hle: kernel: k_memory_manager: Rework for latest kernel behavior.
- Updates the KMemoryManager implementation against latest documentation. - Reworks KMemoryLayout to be accessed throughout the kernel. - Fixes an issue with pool sizes being incorrectly reported.
This commit is contained in:
		@@ -10,189 +10,412 @@
 | 
			
		||||
#include "common/scope_exit.h"
 | 
			
		||||
#include "core/core.h"
 | 
			
		||||
#include "core/device_memory.h"
 | 
			
		||||
#include "core/hle/kernel/initial_process.h"
 | 
			
		||||
#include "core/hle/kernel/k_memory_manager.h"
 | 
			
		||||
#include "core/hle/kernel/k_page_linked_list.h"
 | 
			
		||||
#include "core/hle/kernel/kernel.h"
 | 
			
		||||
#include "core/hle/kernel/svc_results.h"
 | 
			
		||||
#include "core/memory.h"
 | 
			
		||||
 | 
			
		||||
namespace Kernel {
 | 
			
		||||
 | 
			
		||||
KMemoryManager::KMemoryManager(Core::System& system_) : system{system_} {}
 | 
			
		||||
namespace {
 | 
			
		||||
 | 
			
		||||
std::size_t KMemoryManager::Impl::Initialize(Pool new_pool, u64 start_address, u64 end_address) {
 | 
			
		||||
    const auto size{end_address - start_address};
 | 
			
		||||
 | 
			
		||||
    // Calculate metadata sizes
 | 
			
		||||
    const auto ref_count_size{(size / PageSize) * sizeof(u16)};
 | 
			
		||||
    const auto optimize_map_size{(Common::AlignUp((size / PageSize), 64) / 64) * sizeof(u64)};
 | 
			
		||||
    const auto manager_size{Common::AlignUp(optimize_map_size + ref_count_size, PageSize)};
 | 
			
		||||
    const auto page_heap_size{KPageHeap::CalculateManagementOverheadSize(size)};
 | 
			
		||||
    const auto total_metadata_size{manager_size + page_heap_size};
 | 
			
		||||
    ASSERT(manager_size <= total_metadata_size);
 | 
			
		||||
    ASSERT(Common::IsAligned(total_metadata_size, PageSize));
 | 
			
		||||
 | 
			
		||||
    // Setup region
 | 
			
		||||
    pool = new_pool;
 | 
			
		||||
 | 
			
		||||
    // Initialize the manager's KPageHeap
 | 
			
		||||
    heap.Initialize(start_address, size, page_heap_size);
 | 
			
		||||
 | 
			
		||||
    // Free the memory to the heap
 | 
			
		||||
    heap.Free(start_address, size / PageSize);
 | 
			
		||||
 | 
			
		||||
    // Update the heap's used size
 | 
			
		||||
    heap.UpdateUsedSize();
 | 
			
		||||
 | 
			
		||||
    return total_metadata_size;
 | 
			
		||||
constexpr KMemoryManager::Pool GetPoolFromMemoryRegionType(u32 type) {
 | 
			
		||||
    if ((type | KMemoryRegionType_DramApplicationPool) == type) {
 | 
			
		||||
        return KMemoryManager::Pool::Application;
 | 
			
		||||
    } else if ((type | KMemoryRegionType_DramAppletPool) == type) {
 | 
			
		||||
        return KMemoryManager::Pool::Applet;
 | 
			
		||||
    } else if ((type | KMemoryRegionType_DramSystemPool) == type) {
 | 
			
		||||
        return KMemoryManager::Pool::System;
 | 
			
		||||
    } else if ((type | KMemoryRegionType_DramSystemNonSecurePool) == type) {
 | 
			
		||||
        return KMemoryManager::Pool::SystemNonSecure;
 | 
			
		||||
    } else {
 | 
			
		||||
        UNREACHABLE_MSG("InvalidMemoryRegionType for conversion to Pool");
 | 
			
		||||
        return {};
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void KMemoryManager::InitializeManager(Pool pool, u64 start_address, u64 end_address) {
 | 
			
		||||
    ASSERT(pool < Pool::Count);
 | 
			
		||||
    managers[static_cast<std::size_t>(pool)].Initialize(pool, start_address, end_address);
 | 
			
		||||
} // namespace
 | 
			
		||||
 | 
			
		||||
KMemoryManager::KMemoryManager(Core::System& system_)
 | 
			
		||||
    : system{system_}, pool_locks{
 | 
			
		||||
                           KLightLock{system_.Kernel()},
 | 
			
		||||
                           KLightLock{system_.Kernel()},
 | 
			
		||||
                           KLightLock{system_.Kernel()},
 | 
			
		||||
                           KLightLock{system_.Kernel()},
 | 
			
		||||
                       } {}
 | 
			
		||||
 | 
			
		||||
void KMemoryManager::Initialize(VAddr management_region, size_t management_region_size) {
 | 
			
		||||
 | 
			
		||||
    // Clear the management region to zero.
 | 
			
		||||
    const VAddr management_region_end = management_region + management_region_size;
 | 
			
		||||
 | 
			
		||||
    // Reset our manager count.
 | 
			
		||||
    num_managers = 0;
 | 
			
		||||
 | 
			
		||||
    // Traverse the virtual memory layout tree, initializing each manager as appropriate.
 | 
			
		||||
    while (num_managers != MaxManagerCount) {
 | 
			
		||||
        // Locate the region that should initialize the current manager.
 | 
			
		||||
        PAddr region_address = 0;
 | 
			
		||||
        size_t region_size = 0;
 | 
			
		||||
        Pool region_pool = Pool::Count;
 | 
			
		||||
        for (const auto& it : system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) {
 | 
			
		||||
            // We only care about regions that we need to create managers for.
 | 
			
		||||
            if (!it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) {
 | 
			
		||||
                continue;
 | 
			
		||||
            }
 | 
			
		||||
 | 
			
		||||
            // We want to initialize the managers in order.
 | 
			
		||||
            if (it.GetAttributes() != num_managers) {
 | 
			
		||||
                continue;
 | 
			
		||||
            }
 | 
			
		||||
 | 
			
		||||
            const PAddr cur_start = it.GetAddress();
 | 
			
		||||
            const PAddr cur_end = it.GetEndAddress();
 | 
			
		||||
 | 
			
		||||
            // Validate the region.
 | 
			
		||||
            ASSERT(cur_end != 0);
 | 
			
		||||
            ASSERT(cur_start != 0);
 | 
			
		||||
            ASSERT(it.GetSize() > 0);
 | 
			
		||||
 | 
			
		||||
            // Update the region's extents.
 | 
			
		||||
            if (region_address == 0) {
 | 
			
		||||
                region_address = cur_start;
 | 
			
		||||
                region_size = it.GetSize();
 | 
			
		||||
                region_pool = GetPoolFromMemoryRegionType(it.GetType());
 | 
			
		||||
            } else {
 | 
			
		||||
                ASSERT(cur_start == region_address + region_size);
 | 
			
		||||
 | 
			
		||||
                // Update the size.
 | 
			
		||||
                region_size = cur_end - region_address;
 | 
			
		||||
                ASSERT(GetPoolFromMemoryRegionType(it.GetType()) == region_pool);
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        // If we didn't find a region, we're done.
 | 
			
		||||
        if (region_size == 0) {
 | 
			
		||||
            break;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        // Initialize a new manager for the region.
 | 
			
		||||
        Impl* manager = std::addressof(managers[num_managers++]);
 | 
			
		||||
        ASSERT(num_managers <= managers.size());
 | 
			
		||||
 | 
			
		||||
        const size_t cur_size = manager->Initialize(region_address, region_size, management_region,
 | 
			
		||||
                                                    management_region_end, region_pool);
 | 
			
		||||
        management_region += cur_size;
 | 
			
		||||
        ASSERT(management_region <= management_region_end);
 | 
			
		||||
 | 
			
		||||
        // Insert the manager into the pool list.
 | 
			
		||||
        const auto region_pool_index = static_cast<u32>(region_pool);
 | 
			
		||||
        if (pool_managers_tail[region_pool_index] == nullptr) {
 | 
			
		||||
            pool_managers_head[region_pool_index] = manager;
 | 
			
		||||
        } else {
 | 
			
		||||
            pool_managers_tail[region_pool_index]->SetNext(manager);
 | 
			
		||||
            manager->SetPrev(pool_managers_tail[region_pool_index]);
 | 
			
		||||
        }
 | 
			
		||||
        pool_managers_tail[region_pool_index] = manager;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // Free each region to its corresponding heap.
 | 
			
		||||
    size_t reserved_sizes[MaxManagerCount] = {};
 | 
			
		||||
    const PAddr ini_start = GetInitialProcessBinaryPhysicalAddress();
 | 
			
		||||
    const PAddr ini_end = ini_start + InitialProcessBinarySizeMax;
 | 
			
		||||
    const PAddr ini_last = ini_end - 1;
 | 
			
		||||
    for (const auto& it : system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) {
 | 
			
		||||
        if (it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) {
 | 
			
		||||
            // Get the manager for the region.
 | 
			
		||||
            auto index = it.GetAttributes();
 | 
			
		||||
            auto& manager = managers[index];
 | 
			
		||||
 | 
			
		||||
            const PAddr cur_start = it.GetAddress();
 | 
			
		||||
            const PAddr cur_last = it.GetLastAddress();
 | 
			
		||||
            const PAddr cur_end = it.GetEndAddress();
 | 
			
		||||
 | 
			
		||||
            if (cur_start <= ini_start && ini_last <= cur_last) {
 | 
			
		||||
                // Free memory before the ini to the heap.
 | 
			
		||||
                if (cur_start != ini_start) {
 | 
			
		||||
                    manager.Free(cur_start, (ini_start - cur_start) / PageSize);
 | 
			
		||||
                }
 | 
			
		||||
 | 
			
		||||
                // Open/reserve the ini memory.
 | 
			
		||||
                manager.OpenFirst(ini_start, InitialProcessBinarySizeMax / PageSize);
 | 
			
		||||
                reserved_sizes[it.GetAttributes()] += InitialProcessBinarySizeMax;
 | 
			
		||||
 | 
			
		||||
                // Free memory after the ini to the heap.
 | 
			
		||||
                if (ini_last != cur_last) {
 | 
			
		||||
                    ASSERT(cur_end != 0);
 | 
			
		||||
                    manager.Free(ini_end, cur_end - ini_end);
 | 
			
		||||
                }
 | 
			
		||||
            } else {
 | 
			
		||||
                // Ensure there's no partial overlap with the ini image.
 | 
			
		||||
                if (cur_start <= ini_last) {
 | 
			
		||||
                    ASSERT(cur_last < ini_start);
 | 
			
		||||
                } else {
 | 
			
		||||
                    // Otherwise, check the region for general validity.
 | 
			
		||||
                    ASSERT(cur_end != 0);
 | 
			
		||||
                }
 | 
			
		||||
 | 
			
		||||
                // Free the memory to the heap.
 | 
			
		||||
                manager.Free(cur_start, it.GetSize() / PageSize);
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // Update the used size for all managers.
 | 
			
		||||
    for (size_t i = 0; i < num_managers; ++i) {
 | 
			
		||||
        managers[i].SetInitialUsedHeapSize(reserved_sizes[i]);
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
VAddr KMemoryManager::AllocateAndOpenContinuous(std::size_t num_pages, std::size_t align_pages,
 | 
			
		||||
                                                u32 option) {
 | 
			
		||||
    // Early return if we're allocating no pages
 | 
			
		||||
PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option) {
 | 
			
		||||
    // Early return if we're allocating no pages.
 | 
			
		||||
    if (num_pages == 0) {
 | 
			
		||||
        return {};
 | 
			
		||||
        return 0;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // Lock the pool that we're allocating from
 | 
			
		||||
    // Lock the pool that we're allocating from.
 | 
			
		||||
    const auto [pool, dir] = DecodeOption(option);
 | 
			
		||||
    const auto pool_index{static_cast<std::size_t>(pool)};
 | 
			
		||||
    std::lock_guard lock{pool_locks[pool_index]};
 | 
			
		||||
    KScopedLightLock lk(pool_locks[static_cast<std::size_t>(pool)]);
 | 
			
		||||
 | 
			
		||||
    // Choose a heap based on our page size request
 | 
			
		||||
    const s32 heap_index{KPageHeap::GetAlignedBlockIndex(num_pages, align_pages)};
 | 
			
		||||
    // Choose a heap based on our page size request.
 | 
			
		||||
    const s32 heap_index = KPageHeap::GetAlignedBlockIndex(num_pages, align_pages);
 | 
			
		||||
 | 
			
		||||
    // Loop, trying to iterate from each block
 | 
			
		||||
    // TODO (bunnei): Support multiple managers
 | 
			
		||||
    Impl& chosen_manager{managers[pool_index]};
 | 
			
		||||
    VAddr allocated_block{chosen_manager.AllocateBlock(heap_index, false)};
 | 
			
		||||
 | 
			
		||||
    // If we failed to allocate, quit now
 | 
			
		||||
    if (!allocated_block) {
 | 
			
		||||
        return {};
 | 
			
		||||
    // Loop, trying to iterate from each block.
 | 
			
		||||
    Impl* chosen_manager = nullptr;
 | 
			
		||||
    PAddr allocated_block = 0;
 | 
			
		||||
    for (chosen_manager = this->GetFirstManager(pool, dir); chosen_manager != nullptr;
 | 
			
		||||
         chosen_manager = this->GetNextManager(chosen_manager, dir)) {
 | 
			
		||||
        allocated_block = chosen_manager->AllocateBlock(heap_index, true);
 | 
			
		||||
        if (allocated_block != 0) {
 | 
			
		||||
            break;
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // If we allocated more than we need, free some
 | 
			
		||||
    const auto allocated_pages{KPageHeap::GetBlockNumPages(heap_index)};
 | 
			
		||||
    // If we failed to allocate, quit now.
 | 
			
		||||
    if (allocated_block == 0) {
 | 
			
		||||
        return 0;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // If we allocated more than we need, free some.
 | 
			
		||||
    const size_t allocated_pages = KPageHeap::GetBlockNumPages(heap_index);
 | 
			
		||||
    if (allocated_pages > num_pages) {
 | 
			
		||||
        chosen_manager.Free(allocated_block + num_pages * PageSize, allocated_pages - num_pages);
 | 
			
		||||
        chosen_manager->Free(allocated_block + num_pages * PageSize, allocated_pages - num_pages);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // Open the first reference to the pages.
 | 
			
		||||
    chosen_manager->OpenFirst(allocated_block, num_pages);
 | 
			
		||||
 | 
			
		||||
    return allocated_block;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
ResultCode KMemoryManager::Allocate(KPageLinkedList& page_list, std::size_t num_pages, Pool pool,
 | 
			
		||||
                                    Direction dir, u32 heap_fill_value) {
 | 
			
		||||
    ASSERT(page_list.GetNumPages() == 0);
 | 
			
		||||
ResultCode KMemoryManager::AllocatePageGroupImpl(KPageLinkedList* out, size_t num_pages, Pool pool,
 | 
			
		||||
                                                 Direction dir, bool random) {
 | 
			
		||||
    // Choose a heap based on our page size request.
 | 
			
		||||
    const s32 heap_index = KPageHeap::GetBlockIndex(num_pages);
 | 
			
		||||
    R_UNLESS(0 <= heap_index, ResultOutOfMemory);
 | 
			
		||||
 | 
			
		||||
    // Early return if we're allocating no pages
 | 
			
		||||
    if (num_pages == 0) {
 | 
			
		||||
        return ResultSuccess;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // Lock the pool that we're allocating from
 | 
			
		||||
    const auto pool_index{static_cast<std::size_t>(pool)};
 | 
			
		||||
    std::lock_guard lock{pool_locks[pool_index]};
 | 
			
		||||
 | 
			
		||||
    // Choose a heap based on our page size request
 | 
			
		||||
    const s32 heap_index{KPageHeap::GetBlockIndex(num_pages)};
 | 
			
		||||
    if (heap_index < 0) {
 | 
			
		||||
        return ResultOutOfMemory;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // TODO (bunnei): Support multiple managers
 | 
			
		||||
    Impl& chosen_manager{managers[pool_index]};
 | 
			
		||||
 | 
			
		||||
    // Ensure that we don't leave anything un-freed
 | 
			
		||||
    auto group_guard = detail::ScopeExit([&] {
 | 
			
		||||
        for (const auto& it : page_list.Nodes()) {
 | 
			
		||||
            const auto min_num_pages{std::min<size_t>(
 | 
			
		||||
                it.GetNumPages(), (chosen_manager.GetEndAddress() - it.GetAddress()) / PageSize)};
 | 
			
		||||
            chosen_manager.Free(it.GetAddress(), min_num_pages);
 | 
			
		||||
    // Ensure that we don't leave anything un-freed.
 | 
			
		||||
    auto group_guard = SCOPE_GUARD({
 | 
			
		||||
        for (const auto& it : out->Nodes()) {
 | 
			
		||||
            auto& manager = this->GetManager(system.Kernel().MemoryLayout(), it.GetAddress());
 | 
			
		||||
            const size_t num_pages_to_free =
 | 
			
		||||
                std::min(it.GetNumPages(), (manager.GetEndAddress() - it.GetAddress()) / PageSize);
 | 
			
		||||
            manager.Free(it.GetAddress(), num_pages_to_free);
 | 
			
		||||
        }
 | 
			
		||||
    });
 | 
			
		||||
 | 
			
		||||
    // Keep allocating until we've allocated all our pages
 | 
			
		||||
    for (s32 index{heap_index}; index >= 0 && num_pages > 0; index--) {
 | 
			
		||||
        const auto pages_per_alloc{KPageHeap::GetBlockNumPages(index)};
 | 
			
		||||
 | 
			
		||||
        while (num_pages >= pages_per_alloc) {
 | 
			
		||||
            // Allocate a block
 | 
			
		||||
            VAddr allocated_block{chosen_manager.AllocateBlock(index, false)};
 | 
			
		||||
            if (!allocated_block) {
 | 
			
		||||
                break;
 | 
			
		||||
            }
 | 
			
		||||
 | 
			
		||||
            // Safely add it to our group
 | 
			
		||||
            {
 | 
			
		||||
                auto block_guard = detail::ScopeExit(
 | 
			
		||||
                    [&] { chosen_manager.Free(allocated_block, pages_per_alloc); });
 | 
			
		||||
 | 
			
		||||
                if (const ResultCode result{page_list.AddBlock(allocated_block, pages_per_alloc)};
 | 
			
		||||
                    result.IsError()) {
 | 
			
		||||
                    return result;
 | 
			
		||||
    // Keep allocating until we've allocated all our pages.
 | 
			
		||||
    for (s32 index = heap_index; index >= 0 && num_pages > 0; index--) {
 | 
			
		||||
        const size_t pages_per_alloc = KPageHeap::GetBlockNumPages(index);
 | 
			
		||||
        for (Impl* cur_manager = this->GetFirstManager(pool, dir); cur_manager != nullptr;
 | 
			
		||||
             cur_manager = this->GetNextManager(cur_manager, dir)) {
 | 
			
		||||
            while (num_pages >= pages_per_alloc) {
 | 
			
		||||
                // Allocate a block.
 | 
			
		||||
                PAddr allocated_block = cur_manager->AllocateBlock(index, random);
 | 
			
		||||
                if (allocated_block == 0) {
 | 
			
		||||
                    break;
 | 
			
		||||
                }
 | 
			
		||||
 | 
			
		||||
                block_guard.Cancel();
 | 
			
		||||
            }
 | 
			
		||||
                // Safely add it to our group.
 | 
			
		||||
                {
 | 
			
		||||
                    auto block_guard =
 | 
			
		||||
                        SCOPE_GUARD({ cur_manager->Free(allocated_block, pages_per_alloc); });
 | 
			
		||||
                    R_TRY(out->AddBlock(allocated_block, pages_per_alloc));
 | 
			
		||||
                    block_guard.Cancel();
 | 
			
		||||
                }
 | 
			
		||||
 | 
			
		||||
            num_pages -= pages_per_alloc;
 | 
			
		||||
                num_pages -= pages_per_alloc;
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // Clear allocated memory.
 | 
			
		||||
    for (const auto& it : page_list.Nodes()) {
 | 
			
		||||
        std::memset(system.DeviceMemory().GetPointer(it.GetAddress()), heap_fill_value,
 | 
			
		||||
                    it.GetSize());
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // Only succeed if we allocated as many pages as we wanted
 | 
			
		||||
    if (num_pages) {
 | 
			
		||||
        return ResultOutOfMemory;
 | 
			
		||||
    }
 | 
			
		||||
    // Only succeed if we allocated as many pages as we wanted.
 | 
			
		||||
    R_UNLESS(num_pages == 0, ResultOutOfMemory);
 | 
			
		||||
 | 
			
		||||
    // We succeeded!
 | 
			
		||||
    group_guard.Cancel();
 | 
			
		||||
 | 
			
		||||
    return ResultSuccess;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
ResultCode KMemoryManager::Free(KPageLinkedList& page_list, std::size_t num_pages, Pool pool,
 | 
			
		||||
                                Direction dir, u32 heap_fill_value) {
 | 
			
		||||
    // Early return if we're freeing no pages
 | 
			
		||||
    if (!num_pages) {
 | 
			
		||||
        return ResultSuccess;
 | 
			
		||||
    }
 | 
			
		||||
ResultCode KMemoryManager::AllocateAndOpen(KPageLinkedList* out, size_t num_pages, u32 option) {
 | 
			
		||||
    ASSERT(out != nullptr);
 | 
			
		||||
    ASSERT(out->GetNumPages() == 0);
 | 
			
		||||
 | 
			
		||||
    // Lock the pool that we're freeing from
 | 
			
		||||
    const auto pool_index{static_cast<std::size_t>(pool)};
 | 
			
		||||
    std::lock_guard lock{pool_locks[pool_index]};
 | 
			
		||||
    // Early return if we're allocating no pages.
 | 
			
		||||
    R_SUCCEED_IF(num_pages == 0);
 | 
			
		||||
 | 
			
		||||
    // TODO (bunnei): Support multiple managers
 | 
			
		||||
    Impl& chosen_manager{managers[pool_index]};
 | 
			
		||||
    // Lock the pool that we're allocating from.
 | 
			
		||||
    const auto [pool, dir] = DecodeOption(option);
 | 
			
		||||
    KScopedLightLock lk(pool_locks[static_cast<size_t>(pool)]);
 | 
			
		||||
 | 
			
		||||
    // Free all of the pages
 | 
			
		||||
    for (const auto& it : page_list.Nodes()) {
 | 
			
		||||
        const auto min_num_pages{std::min<size_t>(
 | 
			
		||||
            it.GetNumPages(), (chosen_manager.GetEndAddress() - it.GetAddress()) / PageSize)};
 | 
			
		||||
        chosen_manager.Free(it.GetAddress(), min_num_pages);
 | 
			
		||||
    // Allocate the page group.
 | 
			
		||||
    R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, false));
 | 
			
		||||
 | 
			
		||||
    // Open the first reference to the pages.
 | 
			
		||||
    for (const auto& block : out->Nodes()) {
 | 
			
		||||
        PAddr cur_address = block.GetAddress();
 | 
			
		||||
        size_t remaining_pages = block.GetNumPages();
 | 
			
		||||
        while (remaining_pages > 0) {
 | 
			
		||||
            // Get the manager for the current address.
 | 
			
		||||
            auto& manager = this->GetManager(system.Kernel().MemoryLayout(), cur_address);
 | 
			
		||||
 | 
			
		||||
            // Process part or all of the block.
 | 
			
		||||
            const size_t cur_pages =
 | 
			
		||||
                std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address));
 | 
			
		||||
            manager.OpenFirst(cur_address, cur_pages);
 | 
			
		||||
 | 
			
		||||
            // Advance.
 | 
			
		||||
            cur_address += cur_pages * PageSize;
 | 
			
		||||
            remaining_pages -= cur_pages;
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    return ResultSuccess;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
std::size_t KMemoryManager::Impl::CalculateManagementOverheadSize(std::size_t region_size) {
 | 
			
		||||
    const std::size_t ref_count_size = (region_size / PageSize) * sizeof(u16);
 | 
			
		||||
    const std::size_t optimize_map_size =
 | 
			
		||||
ResultCode KMemoryManager::AllocateAndOpenForProcess(KPageLinkedList* out, size_t num_pages,
 | 
			
		||||
                                                     u32 option, u64 process_id, u8 fill_pattern) {
 | 
			
		||||
    ASSERT(out != nullptr);
 | 
			
		||||
    ASSERT(out->GetNumPages() == 0);
 | 
			
		||||
 | 
			
		||||
    // Decode the option.
 | 
			
		||||
    const auto [pool, dir] = DecodeOption(option);
 | 
			
		||||
 | 
			
		||||
    // Allocate the memory.
 | 
			
		||||
    {
 | 
			
		||||
        // Lock the pool that we're allocating from.
 | 
			
		||||
        KScopedLightLock lk(pool_locks[static_cast<size_t>(pool)]);
 | 
			
		||||
 | 
			
		||||
        // Allocate the page group.
 | 
			
		||||
        R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, false));
 | 
			
		||||
 | 
			
		||||
        // Open the first reference to the pages.
 | 
			
		||||
        for (const auto& block : out->Nodes()) {
 | 
			
		||||
            PAddr cur_address = block.GetAddress();
 | 
			
		||||
            size_t remaining_pages = block.GetNumPages();
 | 
			
		||||
            while (remaining_pages > 0) {
 | 
			
		||||
                // Get the manager for the current address.
 | 
			
		||||
                auto& manager = this->GetManager(system.Kernel().MemoryLayout(), cur_address);
 | 
			
		||||
 | 
			
		||||
                // Process part or all of the block.
 | 
			
		||||
                const size_t cur_pages =
 | 
			
		||||
                    std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address));
 | 
			
		||||
                manager.OpenFirst(cur_address, cur_pages);
 | 
			
		||||
 | 
			
		||||
                // Advance.
 | 
			
		||||
                cur_address += cur_pages * PageSize;
 | 
			
		||||
                remaining_pages -= cur_pages;
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // Set all the allocated memory.
 | 
			
		||||
    for (const auto& block : out->Nodes()) {
 | 
			
		||||
        std::memset(system.DeviceMemory().GetPointer(block.GetAddress()), fill_pattern,
 | 
			
		||||
                    block.GetSize());
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    return ResultSuccess;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void KMemoryManager::Open(PAddr address, size_t num_pages) {
 | 
			
		||||
    // Repeatedly open references until we've done so for all pages.
 | 
			
		||||
    while (num_pages) {
 | 
			
		||||
        auto& manager = this->GetManager(system.Kernel().MemoryLayout(), address);
 | 
			
		||||
        const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address));
 | 
			
		||||
 | 
			
		||||
        {
 | 
			
		||||
            KScopedLightLock lk(pool_locks[static_cast<size_t>(manager.GetPool())]);
 | 
			
		||||
            manager.Open(address, cur_pages);
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        num_pages -= cur_pages;
 | 
			
		||||
        address += cur_pages * PageSize;
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void KMemoryManager::Close(PAddr address, size_t num_pages) {
 | 
			
		||||
    // Repeatedly close references until we've done so for all pages.
 | 
			
		||||
    while (num_pages) {
 | 
			
		||||
        auto& manager = this->GetManager(system.Kernel().MemoryLayout(), address);
 | 
			
		||||
        const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address));
 | 
			
		||||
 | 
			
		||||
        {
 | 
			
		||||
            KScopedLightLock lk(pool_locks[static_cast<size_t>(manager.GetPool())]);
 | 
			
		||||
            manager.Close(address, cur_pages);
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        num_pages -= cur_pages;
 | 
			
		||||
        address += cur_pages * PageSize;
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void KMemoryManager::Close(const KPageLinkedList& pg) {
 | 
			
		||||
    for (const auto& node : pg.Nodes()) {
 | 
			
		||||
        Close(node.GetAddress(), node.GetNumPages());
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
void KMemoryManager::Open(const KPageLinkedList& pg) {
 | 
			
		||||
    for (const auto& node : pg.Nodes()) {
 | 
			
		||||
        Open(node.GetAddress(), node.GetNumPages());
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
size_t KMemoryManager::Impl::Initialize(PAddr address, size_t size, VAddr management,
 | 
			
		||||
                                        VAddr management_end, Pool p) {
 | 
			
		||||
    // Calculate management sizes.
 | 
			
		||||
    const size_t ref_count_size = (size / PageSize) * sizeof(u16);
 | 
			
		||||
    const size_t optimize_map_size = CalculateOptimizedProcessOverheadSize(size);
 | 
			
		||||
    const size_t manager_size = Common::AlignUp(optimize_map_size + ref_count_size, PageSize);
 | 
			
		||||
    const size_t page_heap_size = KPageHeap::CalculateManagementOverheadSize(size);
 | 
			
		||||
    const size_t total_management_size = manager_size + page_heap_size;
 | 
			
		||||
    ASSERT(manager_size <= total_management_size);
 | 
			
		||||
    ASSERT(management + total_management_size <= management_end);
 | 
			
		||||
    ASSERT(Common::IsAligned(total_management_size, PageSize));
 | 
			
		||||
 | 
			
		||||
    // Setup region.
 | 
			
		||||
    pool = p;
 | 
			
		||||
    management_region = management;
 | 
			
		||||
    page_reference_counts.resize(
 | 
			
		||||
        Kernel::Board::Nintendo::Nx::KSystemControl::Init::GetIntendedMemorySize() / PageSize);
 | 
			
		||||
    ASSERT(Common::IsAligned(management_region, PageSize));
 | 
			
		||||
 | 
			
		||||
    // Initialize the manager's KPageHeap.
 | 
			
		||||
    heap.Initialize(address, size, management + manager_size, page_heap_size);
 | 
			
		||||
 | 
			
		||||
    return total_management_size;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
size_t KMemoryManager::Impl::CalculateManagementOverheadSize(size_t region_size) {
 | 
			
		||||
    const size_t ref_count_size = (region_size / PageSize) * sizeof(u16);
 | 
			
		||||
    const size_t optimize_map_size =
 | 
			
		||||
        (Common::AlignUp((region_size / PageSize), Common::BitSize<u64>()) /
 | 
			
		||||
         Common::BitSize<u64>()) *
 | 
			
		||||
        sizeof(u64);
 | 
			
		||||
    const std::size_t manager_meta_size =
 | 
			
		||||
        Common::AlignUp(optimize_map_size + ref_count_size, PageSize);
 | 
			
		||||
    const std::size_t page_heap_size = KPageHeap::CalculateManagementOverheadSize(region_size);
 | 
			
		||||
    const size_t manager_meta_size = Common::AlignUp(optimize_map_size + ref_count_size, PageSize);
 | 
			
		||||
    const size_t page_heap_size = KPageHeap::CalculateManagementOverheadSize(region_size);
 | 
			
		||||
    return manager_meta_size + page_heap_size;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -5,11 +5,12 @@
 | 
			
		||||
#pragma once
 | 
			
		||||
 | 
			
		||||
#include <array>
 | 
			
		||||
#include <mutex>
 | 
			
		||||
#include <tuple>
 | 
			
		||||
 | 
			
		||||
#include "common/common_funcs.h"
 | 
			
		||||
#include "common/common_types.h"
 | 
			
		||||
#include "core/hle/kernel/k_light_lock.h"
 | 
			
		||||
#include "core/hle/kernel/k_memory_layout.h"
 | 
			
		||||
#include "core/hle/kernel/k_page_heap.h"
 | 
			
		||||
#include "core/hle/result.h"
 | 
			
		||||
 | 
			
		||||
@@ -52,22 +53,33 @@ public:
 | 
			
		||||
 | 
			
		||||
    explicit KMemoryManager(Core::System& system_);
 | 
			
		||||
 | 
			
		||||
    constexpr std::size_t GetSize(Pool pool) const {
 | 
			
		||||
        return managers[static_cast<std::size_t>(pool)].GetSize();
 | 
			
		||||
    void Initialize(VAddr management_region, size_t management_region_size);
 | 
			
		||||
 | 
			
		||||
    constexpr size_t GetSize(Pool pool) const {
 | 
			
		||||
        constexpr Direction GetSizeDirection = Direction::FromFront;
 | 
			
		||||
        size_t total = 0;
 | 
			
		||||
        for (auto* manager = this->GetFirstManager(pool, GetSizeDirection); manager != nullptr;
 | 
			
		||||
             manager = this->GetNextManager(manager, GetSizeDirection)) {
 | 
			
		||||
            total += manager->GetSize();
 | 
			
		||||
        }
 | 
			
		||||
        return total;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    void InitializeManager(Pool pool, u64 start_address, u64 end_address);
 | 
			
		||||
    PAddr AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option);
 | 
			
		||||
    ResultCode AllocateAndOpen(KPageLinkedList* out, size_t num_pages, u32 option);
 | 
			
		||||
    ResultCode AllocateAndOpenForProcess(KPageLinkedList* out, size_t num_pages, u32 option,
 | 
			
		||||
                                         u64 process_id, u8 fill_pattern);
 | 
			
		||||
 | 
			
		||||
    VAddr AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option);
 | 
			
		||||
    ResultCode Allocate(KPageLinkedList& page_list, std::size_t num_pages, Pool pool, Direction dir,
 | 
			
		||||
                        u32 heap_fill_value = 0);
 | 
			
		||||
    ResultCode Free(KPageLinkedList& page_list, std::size_t num_pages, Pool pool, Direction dir,
 | 
			
		||||
                    u32 heap_fill_value = 0);
 | 
			
		||||
    static constexpr size_t MaxManagerCount = 10;
 | 
			
		||||
 | 
			
		||||
    static constexpr std::size_t MaxManagerCount = 10;
 | 
			
		||||
    void Close(PAddr address, size_t num_pages);
 | 
			
		||||
    void Close(const KPageLinkedList& pg);
 | 
			
		||||
 | 
			
		||||
    void Open(PAddr address, size_t num_pages);
 | 
			
		||||
    void Open(const KPageLinkedList& pg);
 | 
			
		||||
 | 
			
		||||
public:
 | 
			
		||||
    static std::size_t CalculateManagementOverheadSize(std::size_t region_size) {
 | 
			
		||||
    static size_t CalculateManagementOverheadSize(size_t region_size) {
 | 
			
		||||
        return Impl::CalculateManagementOverheadSize(region_size);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
@@ -100,17 +112,26 @@ private:
 | 
			
		||||
        Impl() = default;
 | 
			
		||||
        ~Impl() = default;
 | 
			
		||||
 | 
			
		||||
        std::size_t Initialize(Pool new_pool, u64 start_address, u64 end_address);
 | 
			
		||||
        size_t Initialize(PAddr address, size_t size, VAddr management, VAddr management_end,
 | 
			
		||||
                          Pool p);
 | 
			
		||||
 | 
			
		||||
        VAddr AllocateBlock(s32 index, bool random) {
 | 
			
		||||
            return heap.AllocateBlock(index, random);
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        void Free(VAddr addr, std::size_t num_pages) {
 | 
			
		||||
        void Free(VAddr addr, size_t num_pages) {
 | 
			
		||||
            heap.Free(addr, num_pages);
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        constexpr std::size_t GetSize() const {
 | 
			
		||||
        void SetInitialUsedHeapSize(size_t reserved_size) {
 | 
			
		||||
            heap.SetInitialUsedSize(reserved_size);
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        constexpr Pool GetPool() const {
 | 
			
		||||
            return pool;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        constexpr size_t GetSize() const {
 | 
			
		||||
            return heap.GetSize();
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
@@ -122,10 +143,88 @@ private:
 | 
			
		||||
            return heap.GetEndAddress();
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        static std::size_t CalculateManagementOverheadSize(std::size_t region_size);
 | 
			
		||||
        constexpr size_t GetPageOffset(PAddr address) const {
 | 
			
		||||
            return heap.GetPageOffset(address);
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        static constexpr std::size_t CalculateOptimizedProcessOverheadSize(
 | 
			
		||||
            std::size_t region_size) {
 | 
			
		||||
        constexpr size_t GetPageOffsetToEnd(PAddr address) const {
 | 
			
		||||
            return heap.GetPageOffsetToEnd(address);
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        constexpr void SetNext(Impl* n) {
 | 
			
		||||
            next = n;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        constexpr void SetPrev(Impl* n) {
 | 
			
		||||
            prev = n;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        constexpr Impl* GetNext() const {
 | 
			
		||||
            return next;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        constexpr Impl* GetPrev() const {
 | 
			
		||||
            return prev;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        void OpenFirst(PAddr address, size_t num_pages) {
 | 
			
		||||
            size_t index = this->GetPageOffset(address);
 | 
			
		||||
            const size_t end = index + num_pages;
 | 
			
		||||
            while (index < end) {
 | 
			
		||||
                const RefCount ref_count = (++page_reference_counts[index]);
 | 
			
		||||
                ASSERT(ref_count == 1);
 | 
			
		||||
 | 
			
		||||
                index++;
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        void Open(PAddr address, size_t num_pages) {
 | 
			
		||||
            size_t index = this->GetPageOffset(address);
 | 
			
		||||
            const size_t end = index + num_pages;
 | 
			
		||||
            while (index < end) {
 | 
			
		||||
                const RefCount ref_count = (++page_reference_counts[index]);
 | 
			
		||||
                ASSERT(ref_count > 1);
 | 
			
		||||
 | 
			
		||||
                index++;
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        void Close(PAddr address, size_t num_pages) {
 | 
			
		||||
            size_t index = this->GetPageOffset(address);
 | 
			
		||||
            const size_t end = index + num_pages;
 | 
			
		||||
 | 
			
		||||
            size_t free_start = 0;
 | 
			
		||||
            size_t free_count = 0;
 | 
			
		||||
            while (index < end) {
 | 
			
		||||
                ASSERT(page_reference_counts[index] > 0);
 | 
			
		||||
                const RefCount ref_count = (--page_reference_counts[index]);
 | 
			
		||||
 | 
			
		||||
                // Keep track of how many zero refcounts we see in a row, to minimize calls to free.
 | 
			
		||||
                if (ref_count == 0) {
 | 
			
		||||
                    if (free_count > 0) {
 | 
			
		||||
                        free_count++;
 | 
			
		||||
                    } else {
 | 
			
		||||
                        free_start = index;
 | 
			
		||||
                        free_count = 1;
 | 
			
		||||
                    }
 | 
			
		||||
                } else {
 | 
			
		||||
                    if (free_count > 0) {
 | 
			
		||||
                        this->Free(heap.GetAddress() + free_start * PageSize, free_count);
 | 
			
		||||
                        free_count = 0;
 | 
			
		||||
                    }
 | 
			
		||||
                }
 | 
			
		||||
 | 
			
		||||
                index++;
 | 
			
		||||
            }
 | 
			
		||||
 | 
			
		||||
            if (free_count > 0) {
 | 
			
		||||
                this->Free(heap.GetAddress() + free_start * PageSize, free_count);
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        static size_t CalculateManagementOverheadSize(size_t region_size);
 | 
			
		||||
 | 
			
		||||
        static constexpr size_t CalculateOptimizedProcessOverheadSize(size_t region_size) {
 | 
			
		||||
            return (Common::AlignUp((region_size / PageSize), Common::BitSize<u64>()) /
 | 
			
		||||
                    Common::BitSize<u64>()) *
 | 
			
		||||
                   sizeof(u64);
 | 
			
		||||
@@ -135,13 +234,45 @@ private:
 | 
			
		||||
        using RefCount = u16;
 | 
			
		||||
 | 
			
		||||
        KPageHeap heap;
 | 
			
		||||
        std::vector<RefCount> page_reference_counts;
 | 
			
		||||
        VAddr management_region{};
 | 
			
		||||
        Pool pool{};
 | 
			
		||||
        Impl* next{};
 | 
			
		||||
        Impl* prev{};
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
private:
 | 
			
		||||
    Impl& GetManager(const KMemoryLayout& memory_layout, PAddr address) {
 | 
			
		||||
        return managers[memory_layout.GetPhysicalLinearRegion(address).GetAttributes()];
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    const Impl& GetManager(const KMemoryLayout& memory_layout, PAddr address) const {
 | 
			
		||||
        return managers[memory_layout.GetPhysicalLinearRegion(address).GetAttributes()];
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    constexpr Impl* GetFirstManager(Pool pool, Direction dir) const {
 | 
			
		||||
        return dir == Direction::FromBack ? pool_managers_tail[static_cast<size_t>(pool)]
 | 
			
		||||
                                          : pool_managers_head[static_cast<size_t>(pool)];
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    constexpr Impl* GetNextManager(Impl* cur, Direction dir) const {
 | 
			
		||||
        if (dir == Direction::FromBack) {
 | 
			
		||||
            return cur->GetPrev();
 | 
			
		||||
        } else {
 | 
			
		||||
            return cur->GetNext();
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    ResultCode AllocatePageGroupImpl(KPageLinkedList* out, size_t num_pages, Pool pool,
 | 
			
		||||
                                     Direction dir, bool random);
 | 
			
		||||
 | 
			
		||||
private:
 | 
			
		||||
    Core::System& system;
 | 
			
		||||
    std::array<std::mutex, static_cast<std::size_t>(Pool::Count)> pool_locks;
 | 
			
		||||
    std::array<KLightLock, static_cast<size_t>(Pool::Count)> pool_locks;
 | 
			
		||||
    std::array<Impl*, MaxManagerCount> pool_managers_head{};
 | 
			
		||||
    std::array<Impl*, MaxManagerCount> pool_managers_tail{};
 | 
			
		||||
    std::array<Impl, MaxManagerCount> managers;
 | 
			
		||||
    size_t num_managers{};
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
} // namespace Kernel
 | 
			
		||||
 
 | 
			
		||||
@@ -273,11 +273,12 @@ ResultCode KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemory
 | 
			
		||||
    R_TRY(this->CheckMemoryState(addr, size, KMemoryState::All, KMemoryState::Free,
 | 
			
		||||
                                 KMemoryPermission::None, KMemoryPermission::None,
 | 
			
		||||
                                 KMemoryAttribute::None, KMemoryAttribute::None));
 | 
			
		||||
    KPageLinkedList pg;
 | 
			
		||||
    R_TRY(system.Kernel().MemoryManager().AllocateAndOpen(
 | 
			
		||||
        &pg, num_pages,
 | 
			
		||||
        KMemoryManager::EncodeOption(KMemoryManager::Pool::Application, allocation_option)));
 | 
			
		||||
 | 
			
		||||
    KPageLinkedList page_linked_list;
 | 
			
		||||
    R_TRY(system.Kernel().MemoryManager().Allocate(page_linked_list, num_pages, memory_pool,
 | 
			
		||||
                                                   allocation_option));
 | 
			
		||||
    R_TRY(Operate(addr, num_pages, page_linked_list, OperationType::MapGroup));
 | 
			
		||||
    R_TRY(Operate(addr, num_pages, pg, OperationType::MapGroup));
 | 
			
		||||
 | 
			
		||||
    block_manager->Update(addr, num_pages, state, perm);
 | 
			
		||||
 | 
			
		||||
@@ -443,9 +444,10 @@ ResultCode KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) {
 | 
			
		||||
            R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
 | 
			
		||||
 | 
			
		||||
            // Allocate pages for the new memory.
 | 
			
		||||
            KPageLinkedList page_linked_list;
 | 
			
		||||
            R_TRY(system.Kernel().MemoryManager().Allocate(
 | 
			
		||||
                page_linked_list, (size - mapped_size) / PageSize, memory_pool, allocation_option));
 | 
			
		||||
            KPageLinkedList pg;
 | 
			
		||||
            R_TRY(system.Kernel().MemoryManager().AllocateAndOpenForProcess(
 | 
			
		||||
                &pg, (size - mapped_size) / PageSize,
 | 
			
		||||
                KMemoryManager::EncodeOption(memory_pool, allocation_option), 0, 0));
 | 
			
		||||
 | 
			
		||||
            // Map the memory.
 | 
			
		||||
            {
 | 
			
		||||
@@ -547,7 +549,7 @@ ResultCode KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) {
 | 
			
		||||
                });
 | 
			
		||||
 | 
			
		||||
                // Iterate over the memory.
 | 
			
		||||
                auto pg_it = page_linked_list.Nodes().begin();
 | 
			
		||||
                auto pg_it = pg.Nodes().begin();
 | 
			
		||||
                PAddr pg_phys_addr = pg_it->GetAddress();
 | 
			
		||||
                size_t pg_pages = pg_it->GetNumPages();
 | 
			
		||||
 | 
			
		||||
@@ -571,7 +573,7 @@ ResultCode KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) {
 | 
			
		||||
                            // Check if we're at the end of the physical block.
 | 
			
		||||
                            if (pg_pages == 0) {
 | 
			
		||||
                                // Ensure there are more pages to map.
 | 
			
		||||
                                ASSERT(pg_it != page_linked_list.Nodes().end());
 | 
			
		||||
                                ASSERT(pg_it != pg.Nodes().end());
 | 
			
		||||
 | 
			
		||||
                                // Advance our physical block.
 | 
			
		||||
                                ++pg_it;
 | 
			
		||||
@@ -841,10 +843,14 @@ ResultCode KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) {
 | 
			
		||||
    process->GetResourceLimit()->Release(LimitableResource::PhysicalMemory, mapped_size);
 | 
			
		||||
 | 
			
		||||
    // Update memory blocks.
 | 
			
		||||
    system.Kernel().MemoryManager().Free(pg, size / PageSize, memory_pool, allocation_option);
 | 
			
		||||
    block_manager->Update(address, size / PageSize, KMemoryState::Free, KMemoryPermission::None,
 | 
			
		||||
                          KMemoryAttribute::None);
 | 
			
		||||
 | 
			
		||||
    // TODO(bunnei): This is a workaround until the next set of changes, where we add reference
 | 
			
		||||
    // counting for mapped pages. Until then, we must manually close the reference to the page
 | 
			
		||||
    // group.
 | 
			
		||||
    system.Kernel().MemoryManager().Close(pg);
 | 
			
		||||
 | 
			
		||||
    // We succeeded.
 | 
			
		||||
    remap_guard.Cancel();
 | 
			
		||||
 | 
			
		||||
@@ -1270,9 +1276,16 @@ ResultCode KPageTable::SetHeapSize(VAddr* out, std::size_t size) {
 | 
			
		||||
    R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
 | 
			
		||||
 | 
			
		||||
    // Allocate pages for the heap extension.
 | 
			
		||||
    KPageLinkedList page_linked_list;
 | 
			
		||||
    R_TRY(system.Kernel().MemoryManager().Allocate(page_linked_list, allocation_size / PageSize,
 | 
			
		||||
                                                   memory_pool, allocation_option));
 | 
			
		||||
    KPageLinkedList pg;
 | 
			
		||||
    R_TRY(system.Kernel().MemoryManager().AllocateAndOpen(
 | 
			
		||||
        &pg, allocation_size / PageSize,
 | 
			
		||||
        KMemoryManager::EncodeOption(memory_pool, allocation_option)));
 | 
			
		||||
 | 
			
		||||
    // Clear all the newly allocated pages.
 | 
			
		||||
    for (const auto& it : pg.Nodes()) {
 | 
			
		||||
        std::memset(system.DeviceMemory().GetPointer(it.GetAddress()), heap_fill_value,
 | 
			
		||||
                    it.GetSize());
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // Map the pages.
 | 
			
		||||
    {
 | 
			
		||||
@@ -1291,7 +1304,7 @@ ResultCode KPageTable::SetHeapSize(VAddr* out, std::size_t size) {
 | 
			
		||||
 | 
			
		||||
        // Map the pages.
 | 
			
		||||
        const auto num_pages = allocation_size / PageSize;
 | 
			
		||||
        R_TRY(Operate(current_heap_end, num_pages, page_linked_list, OperationType::MapGroup));
 | 
			
		||||
        R_TRY(Operate(current_heap_end, num_pages, pg, OperationType::MapGroup));
 | 
			
		||||
 | 
			
		||||
        // Clear all the newly allocated pages.
 | 
			
		||||
        for (std::size_t cur_page = 0; cur_page < num_pages; ++cur_page) {
 | 
			
		||||
@@ -1339,8 +1352,9 @@ ResultVal<VAddr> KPageTable::AllocateAndMapMemory(std::size_t needed_num_pages,
 | 
			
		||||
        R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr));
 | 
			
		||||
    } else {
 | 
			
		||||
        KPageLinkedList page_group;
 | 
			
		||||
        R_TRY(system.Kernel().MemoryManager().Allocate(page_group, needed_num_pages, memory_pool,
 | 
			
		||||
                                                       allocation_option));
 | 
			
		||||
        R_TRY(system.Kernel().MemoryManager().AllocateAndOpenForProcess(
 | 
			
		||||
            &page_group, needed_num_pages,
 | 
			
		||||
            KMemoryManager::EncodeOption(memory_pool, allocation_option), 0, 0));
 | 
			
		||||
        R_TRY(Operate(addr, needed_num_pages, page_group, OperationType::MapGroup));
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -310,6 +310,8 @@ private:
 | 
			
		||||
    bool is_kernel{};
 | 
			
		||||
    bool is_aslr_enabled{};
 | 
			
		||||
 | 
			
		||||
    u32 heap_fill_value{};
 | 
			
		||||
 | 
			
		||||
    KMemoryManager::Pool memory_pool{KMemoryManager::Pool::Application};
 | 
			
		||||
    KMemoryManager::Direction allocation_option{KMemoryManager::Direction::FromFront};
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -70,13 +70,12 @@ struct KernelCore::Impl {
 | 
			
		||||
 | 
			
		||||
        // Derive the initial memory layout from the emulated board
 | 
			
		||||
        Init::InitializeSlabResourceCounts(kernel);
 | 
			
		||||
        KMemoryLayout memory_layout;
 | 
			
		||||
        DeriveInitialMemoryLayout(memory_layout);
 | 
			
		||||
        DeriveInitialMemoryLayout();
 | 
			
		||||
        Init::InitializeSlabHeaps(system, memory_layout);
 | 
			
		||||
 | 
			
		||||
        // Initialize kernel memory and resources.
 | 
			
		||||
        InitializeSystemResourceLimit(kernel, system.CoreTiming(), memory_layout);
 | 
			
		||||
        InitializeMemoryLayout(memory_layout);
 | 
			
		||||
        InitializeSystemResourceLimit(kernel, system.CoreTiming());
 | 
			
		||||
        InitializeMemoryLayout();
 | 
			
		||||
        InitializePageSlab();
 | 
			
		||||
        InitializeSchedulers();
 | 
			
		||||
        InitializeSuspendThreads();
 | 
			
		||||
@@ -219,8 +218,7 @@ struct KernelCore::Impl {
 | 
			
		||||
 | 
			
		||||
    // Creates the default system resource limit
 | 
			
		||||
    void InitializeSystemResourceLimit(KernelCore& kernel,
 | 
			
		||||
                                       const Core::Timing::CoreTiming& core_timing,
 | 
			
		||||
                                       const KMemoryLayout& memory_layout) {
 | 
			
		||||
                                       const Core::Timing::CoreTiming& core_timing) {
 | 
			
		||||
        system_resource_limit = KResourceLimit::Create(system.Kernel());
 | 
			
		||||
        system_resource_limit->Initialize(&core_timing);
 | 
			
		||||
 | 
			
		||||
@@ -353,7 +351,7 @@ struct KernelCore::Impl {
 | 
			
		||||
        return schedulers[thread_id]->GetCurrentThread();
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    void DeriveInitialMemoryLayout(KMemoryLayout& memory_layout) {
 | 
			
		||||
    void DeriveInitialMemoryLayout() {
 | 
			
		||||
        // Insert the root region for the virtual memory tree, from which all other regions will
 | 
			
		||||
        // derive.
 | 
			
		||||
        memory_layout.GetVirtualMemoryRegionTree().InsertDirectly(
 | 
			
		||||
@@ -616,20 +614,16 @@ struct KernelCore::Impl {
 | 
			
		||||
                                                        linear_region_start);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    void InitializeMemoryLayout(const KMemoryLayout& memory_layout) {
 | 
			
		||||
    void InitializeMemoryLayout() {
 | 
			
		||||
        const auto system_pool = memory_layout.GetKernelSystemPoolRegionPhysicalExtents();
 | 
			
		||||
        const auto applet_pool = memory_layout.GetKernelAppletPoolRegionPhysicalExtents();
 | 
			
		||||
        const auto application_pool = memory_layout.GetKernelApplicationPoolRegionPhysicalExtents();
 | 
			
		||||
 | 
			
		||||
        // Initialize memory managers
 | 
			
		||||
        // Initialize the memory manager.
 | 
			
		||||
        memory_manager = std::make_unique<KMemoryManager>(system);
 | 
			
		||||
        memory_manager->InitializeManager(KMemoryManager::Pool::Application,
 | 
			
		||||
                                          application_pool.GetAddress(),
 | 
			
		||||
                                          application_pool.GetEndAddress());
 | 
			
		||||
        memory_manager->InitializeManager(KMemoryManager::Pool::Applet, applet_pool.GetAddress(),
 | 
			
		||||
                                          applet_pool.GetEndAddress());
 | 
			
		||||
        memory_manager->InitializeManager(KMemoryManager::Pool::System, system_pool.GetAddress(),
 | 
			
		||||
                                          system_pool.GetEndAddress());
 | 
			
		||||
        const auto& management_region = memory_layout.GetPoolManagementRegion();
 | 
			
		||||
        ASSERT(management_region.GetEndAddress() != 0);
 | 
			
		||||
        memory_manager->Initialize(management_region.GetAddress(), management_region.GetSize());
 | 
			
		||||
 | 
			
		||||
        // Setup memory regions for emulated processes
 | 
			
		||||
        // TODO(bunnei): These should not be hardcoded regions initialized within the kernel
 | 
			
		||||
@@ -770,6 +764,9 @@ struct KernelCore::Impl {
 | 
			
		||||
    Kernel::KSharedMemory* irs_shared_mem{};
 | 
			
		||||
    Kernel::KSharedMemory* time_shared_mem{};
 | 
			
		||||
 | 
			
		||||
    // Memory layout
 | 
			
		||||
    KMemoryLayout memory_layout;
 | 
			
		||||
 | 
			
		||||
    // Threads used for services
 | 
			
		||||
    std::unordered_set<std::shared_ptr<Kernel::ServiceThread>> service_threads;
 | 
			
		||||
    Common::ThreadWorker service_threads_manager;
 | 
			
		||||
@@ -1135,6 +1132,10 @@ const KWorkerTaskManager& KernelCore::WorkerTaskManager() const {
 | 
			
		||||
    return impl->worker_task_manager;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
const KMemoryLayout& KernelCore::MemoryLayout() const {
 | 
			
		||||
    return impl->memory_layout;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
bool KernelCore::IsPhantomModeForSingleCore() const {
 | 
			
		||||
    return impl->IsPhantomModeForSingleCore();
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -41,6 +41,7 @@ class KClientSession;
 | 
			
		||||
class KEvent;
 | 
			
		||||
class KHandleTable;
 | 
			
		||||
class KLinkedListNode;
 | 
			
		||||
class KMemoryLayout;
 | 
			
		||||
class KMemoryManager;
 | 
			
		||||
class KPort;
 | 
			
		||||
class KProcess;
 | 
			
		||||
@@ -350,6 +351,9 @@ public:
 | 
			
		||||
    /// Gets the current worker task manager, used for dispatching KThread/KProcess tasks.
 | 
			
		||||
    const KWorkerTaskManager& WorkerTaskManager() const;
 | 
			
		||||
 | 
			
		||||
    /// Gets the memory layout.
 | 
			
		||||
    const KMemoryLayout& MemoryLayout() const;
 | 
			
		||||
 | 
			
		||||
private:
 | 
			
		||||
    friend class KProcess;
 | 
			
		||||
    friend class KThread;
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user