code: dodge PAGE_SIZE #define
Some header files, specifically for OSX and Musl libc define PAGE_SIZE to be a number This is great except in citra we're using PAGE_SIZE as a variable Specific example `static constexpr u64 PAGE_SIZE = u64(1) << PAGE_BITS;` PAGE_SIZE PAGE_BITS PAGE_MASK are all similar variables. Simply deleted the underscores, and then added CITRA_ prefix
This commit is contained in:
@ -23,12 +23,3 @@ typedef void* HANDLE;
|
||||
#include <microprofile.h>
|
||||
|
||||
#define MP_RGB(r, g, b) ((r) << 16 | (g) << 8 | (b) << 0)
|
||||
|
||||
// On OS X, some Mach header included by MicroProfile defines these as macros, conflicting with
|
||||
// identifiers we use.
|
||||
#ifdef PAGE_SIZE
|
||||
#undef PAGE_SIZE
|
||||
#endif
|
||||
#ifdef PAGE_MASK
|
||||
#undef PAGE_MASK
|
||||
#endif
|
||||
|
@ -138,10 +138,10 @@ ResultCode TranslateCommandBuffer(Kernel::KernelSystem& kernel, Memory::MemorySy
|
||||
u32 size = static_cast<u32>(descInfo.size);
|
||||
IPC::MappedBufferPermissions permissions = descInfo.perms;
|
||||
|
||||
VAddr page_start = Common::AlignDown(source_address, Memory::PAGE_SIZE);
|
||||
VAddr page_start = Common::AlignDown(source_address, Memory::CITRA_PAGE_SIZE);
|
||||
u32 page_offset = source_address - page_start;
|
||||
u32 num_pages =
|
||||
Common::AlignUp(page_offset + size, Memory::PAGE_SIZE) >> Memory::PAGE_BITS;
|
||||
Common::AlignUp(page_offset + size, Memory::CITRA_PAGE_SIZE) >> Memory::CITRA_PAGE_BITS;
|
||||
|
||||
// Skip when the size is zero and num_pages == 0
|
||||
if (size == 0) {
|
||||
@ -171,8 +171,8 @@ ResultCode TranslateCommandBuffer(Kernel::KernelSystem& kernel, Memory::MemorySy
|
||||
found->target_address, size);
|
||||
}
|
||||
|
||||
VAddr prev_reserve = page_start - Memory::PAGE_SIZE;
|
||||
VAddr next_reserve = page_start + num_pages * Memory::PAGE_SIZE;
|
||||
VAddr prev_reserve = page_start - Memory::CITRA_PAGE_SIZE;
|
||||
VAddr next_reserve = page_start + num_pages * Memory::CITRA_PAGE_SIZE;
|
||||
|
||||
auto& prev_vma = src_process->vm_manager.FindVMA(prev_reserve)->second;
|
||||
auto& next_vma = src_process->vm_manager.FindVMA(next_reserve)->second;
|
||||
@ -181,7 +181,7 @@ ResultCode TranslateCommandBuffer(Kernel::KernelSystem& kernel, Memory::MemorySy
|
||||
|
||||
// Unmap the buffer and guard pages from the source process
|
||||
ResultCode result = src_process->vm_manager.UnmapRange(
|
||||
page_start - Memory::PAGE_SIZE, (num_pages + 2) * Memory::PAGE_SIZE);
|
||||
page_start - Memory::CITRA_PAGE_SIZE, (num_pages + 2) * Memory::CITRA_PAGE_SIZE);
|
||||
ASSERT(result == RESULT_SUCCESS);
|
||||
|
||||
mapped_buffer_context.erase(found);
|
||||
@ -196,13 +196,13 @@ ResultCode TranslateCommandBuffer(Kernel::KernelSystem& kernel, Memory::MemorySy
|
||||
|
||||
// Reserve a page of memory before the mapped buffer
|
||||
std::shared_ptr<BackingMem> reserve_buffer =
|
||||
std::make_shared<BufferMem>(Memory::PAGE_SIZE);
|
||||
std::make_shared<BufferMem>(Memory::CITRA_PAGE_SIZE);
|
||||
dst_process->vm_manager.MapBackingMemoryToBase(
|
||||
Memory::IPC_MAPPING_VADDR, Memory::IPC_MAPPING_SIZE, reserve_buffer,
|
||||
Memory::PAGE_SIZE, Kernel::MemoryState::Reserved);
|
||||
Memory::CITRA_PAGE_SIZE, Kernel::MemoryState::Reserved);
|
||||
|
||||
std::shared_ptr<BackingMem> buffer =
|
||||
std::make_shared<BufferMem>(num_pages * Memory::PAGE_SIZE);
|
||||
std::make_shared<BufferMem>(num_pages * Memory::CITRA_PAGE_SIZE);
|
||||
memory.ReadBlock(*src_process, source_address, buffer->GetPtr() + page_offset, size);
|
||||
|
||||
// Map the page(s) into the target process' address space.
|
||||
|
@ -127,7 +127,7 @@ void Process::ParseKernelCaps(const u32* kernel_caps, std::size_t len) {
|
||||
// Mapped memory page
|
||||
AddressMapping mapping;
|
||||
mapping.address = descriptor << 12;
|
||||
mapping.size = Memory::PAGE_SIZE;
|
||||
mapping.size = Memory::CITRA_PAGE_SIZE;
|
||||
mapping.read_only = false;
|
||||
mapping.unk_flag = false;
|
||||
|
||||
|
@ -217,10 +217,10 @@ ResultCode SVC::ControlMemory(u32* out_addr, u32 addr0, u32 addr1, u32 size, u32
|
||||
"size=0x{:X}, permissions=0x{:08X}",
|
||||
operation, addr0, addr1, size, permissions);
|
||||
|
||||
if ((addr0 & Memory::PAGE_MASK) != 0 || (addr1 & Memory::PAGE_MASK) != 0) {
|
||||
if ((addr0 & Memory::CITRA_PAGE_MASK) != 0 || (addr1 & Memory::CITRA_PAGE_MASK) != 0) {
|
||||
return ERR_MISALIGNED_ADDRESS;
|
||||
}
|
||||
if ((size & Memory::PAGE_MASK) != 0) {
|
||||
if ((size & Memory::CITRA_PAGE_MASK) != 0) {
|
||||
return ERR_MISALIGNED_SIZE;
|
||||
}
|
||||
|
||||
@ -1286,7 +1286,7 @@ s64 SVC::GetSystemTick() {
|
||||
/// Creates a memory block at the specified address with the specified permissions and size
|
||||
ResultCode SVC::CreateMemoryBlock(Handle* out_handle, u32 addr, u32 size, u32 my_permission,
|
||||
u32 other_permission) {
|
||||
if (size % Memory::PAGE_SIZE != 0)
|
||||
if (size % Memory::CITRA_PAGE_SIZE != 0)
|
||||
return ERR_MISALIGNED_SIZE;
|
||||
|
||||
std::shared_ptr<SharedMemory> shared_memory = nullptr;
|
||||
@ -1507,7 +1507,7 @@ ResultCode SVC::GetProcessInfo(s64* out, Handle process_handle, u32 type) {
|
||||
// TODO(yuriks): Type 0 returns a slightly higher number than type 2, but I'm not sure
|
||||
// what's the difference between them.
|
||||
*out = process->memory_used;
|
||||
if (*out % Memory::PAGE_SIZE != 0) {
|
||||
if (*out % Memory::CITRA_PAGE_SIZE != 0) {
|
||||
LOG_ERROR(Kernel_SVC, "called, memory size not page-aligned");
|
||||
return ERR_MISALIGNED_SIZE;
|
||||
}
|
||||
|
@ -105,9 +105,9 @@ void Thread::Stop() {
|
||||
ReleaseThreadMutexes(this);
|
||||
|
||||
// Mark the TLS slot in the thread's page as free.
|
||||
u32 tls_page = (tls_address - Memory::TLS_AREA_VADDR) / Memory::PAGE_SIZE;
|
||||
u32 tls_page = (tls_address - Memory::TLS_AREA_VADDR) / Memory::CITRA_PAGE_SIZE;
|
||||
u32 tls_slot =
|
||||
((tls_address - Memory::TLS_AREA_VADDR) % Memory::PAGE_SIZE) / Memory::TLS_ENTRY_SIZE;
|
||||
((tls_address - Memory::TLS_AREA_VADDR) % Memory::CITRA_PAGE_SIZE) / Memory::TLS_ENTRY_SIZE;
|
||||
ASSERT(owner_process.lock());
|
||||
owner_process.lock()->tls_slots[tls_page].reset(tls_slot);
|
||||
}
|
||||
@ -373,13 +373,13 @@ ResultVal<std::shared_ptr<Thread>> KernelSystem::CreateThread(
|
||||
auto memory_region = GetMemoryRegion(MemoryRegion::BASE);
|
||||
|
||||
// Allocate some memory from the end of the linear heap for this region.
|
||||
auto offset = memory_region->LinearAllocate(Memory::PAGE_SIZE);
|
||||
auto offset = memory_region->LinearAllocate(Memory::CITRA_PAGE_SIZE);
|
||||
if (!offset) {
|
||||
LOG_ERROR(Kernel_SVC,
|
||||
"Not enough space in region to allocate a new TLS page for thread");
|
||||
return ERR_OUT_OF_MEMORY;
|
||||
}
|
||||
owner_process->memory_used += Memory::PAGE_SIZE;
|
||||
owner_process->memory_used += Memory::CITRA_PAGE_SIZE;
|
||||
|
||||
tls_slots.emplace_back(0); // The page is completely available at the start
|
||||
available_page = tls_slots.size() - 1;
|
||||
@ -389,14 +389,14 @@ ResultVal<std::shared_ptr<Thread>> KernelSystem::CreateThread(
|
||||
|
||||
// Map the page to the current process' address space.
|
||||
vm_manager.MapBackingMemory(
|
||||
Memory::TLS_AREA_VADDR + static_cast<VAddr>(available_page) * Memory::PAGE_SIZE,
|
||||
memory.GetFCRAMRef(*offset), Memory::PAGE_SIZE, MemoryState::Locked);
|
||||
Memory::TLS_AREA_VADDR + static_cast<VAddr>(available_page) * Memory::CITRA_PAGE_SIZE,
|
||||
memory.GetFCRAMRef(*offset), Memory::CITRA_PAGE_SIZE, MemoryState::Locked);
|
||||
}
|
||||
|
||||
// Mark the slot as used
|
||||
tls_slots[available_page].set(available_slot);
|
||||
thread->tls_address = Memory::TLS_AREA_VADDR +
|
||||
static_cast<VAddr>(available_page) * Memory::PAGE_SIZE +
|
||||
static_cast<VAddr>(available_page) * Memory::CITRA_PAGE_SIZE +
|
||||
static_cast<VAddr>(available_slot) * Memory::TLS_ENTRY_SIZE;
|
||||
|
||||
memory.ZeroBlock(*owner_process, thread->tls_address, Memory::TLS_ENTRY_SIZE);
|
||||
|
@ -260,8 +260,8 @@ VMManager::VMAIter VMManager::StripIterConstness(const VMAHandle& iter) {
|
||||
}
|
||||
|
||||
ResultVal<VMManager::VMAIter> VMManager::CarveVMA(VAddr base, u32 size) {
|
||||
ASSERT_MSG((size & Memory::PAGE_MASK) == 0, "non-page aligned size: {:#10X}", size);
|
||||
ASSERT_MSG((base & Memory::PAGE_MASK) == 0, "non-page aligned base: {:#010X}", base);
|
||||
ASSERT_MSG((size & Memory::CITRA_PAGE_MASK) == 0, "non-page aligned size: {:#10X}", size);
|
||||
ASSERT_MSG((base & Memory::CITRA_PAGE_MASK) == 0, "non-page aligned base: {:#010X}", base);
|
||||
|
||||
VMAIter vma_handle = StripIterConstness(FindVMA(base));
|
||||
if (vma_handle == vma_map.end()) {
|
||||
@ -296,8 +296,8 @@ ResultVal<VMManager::VMAIter> VMManager::CarveVMA(VAddr base, u32 size) {
|
||||
}
|
||||
|
||||
ResultVal<VMManager::VMAIter> VMManager::CarveVMARange(VAddr target, u32 size) {
|
||||
ASSERT_MSG((size & Memory::PAGE_MASK) == 0, "non-page aligned size: {:#10X}", size);
|
||||
ASSERT_MSG((target & Memory::PAGE_MASK) == 0, "non-page aligned base: {:#010X}", target);
|
||||
ASSERT_MSG((size & Memory::CITRA_PAGE_MASK) == 0, "non-page aligned size: {:#10X}", size);
|
||||
ASSERT_MSG((target & Memory::CITRA_PAGE_MASK) == 0, "non-page aligned base: {:#010X}", target);
|
||||
|
||||
const VAddr target_end = target + size;
|
||||
ASSERT(target_end >= target);
|
||||
|
@ -192,7 +192,7 @@ static_assert(sizeof(CaptureState) == 0x8, "CaptureState structure size is wrong
|
||||
|
||||
void CSND_SND::Initialize(Kernel::HLERequestContext& ctx) {
|
||||
IPC::RequestParser rp(ctx, 0x01, 5, 0);
|
||||
const u32 size = Common::AlignUp(rp.Pop<u32>(), Memory::PAGE_SIZE);
|
||||
const u32 size = Common::AlignUp(rp.Pop<u32>(), Memory::CITRA_PAGE_SIZE);
|
||||
master_state_offset = rp.Pop<u32>();
|
||||
channel_state_offset = rp.Pop<u32>();
|
||||
capture_state_offset = rp.Pop<u32>();
|
||||
|
@ -1502,7 +1502,7 @@ u32 CROHelper::Fix(u32 fix_level) {
|
||||
}
|
||||
}
|
||||
|
||||
fix_end = Common::AlignUp(fix_end, Memory::PAGE_SIZE);
|
||||
fix_end = Common::AlignUp(fix_end, Memory::CITRA_PAGE_SIZE);
|
||||
|
||||
u32 fixed_size = fix_end - module_address;
|
||||
SetField(FixedSize, fixed_size);
|
||||
@ -1525,8 +1525,8 @@ std::tuple<VAddr, u32> CROHelper::GetExecutablePages() const {
|
||||
SegmentEntry entry;
|
||||
GetEntry(system.Memory(), i, entry);
|
||||
if (entry.type == SegmentType::Code && entry.size != 0) {
|
||||
VAddr begin = Common::AlignDown(entry.offset, Memory::PAGE_SIZE);
|
||||
VAddr end = Common::AlignUp(entry.offset + entry.size, Memory::PAGE_SIZE);
|
||||
VAddr begin = Common::AlignDown(entry.offset, Memory::CITRA_PAGE_SIZE);
|
||||
VAddr end = Common::AlignUp(entry.offset + entry.size, Memory::CITRA_PAGE_SIZE);
|
||||
return std::make_tuple(begin, end - begin);
|
||||
}
|
||||
}
|
||||
|
@ -87,19 +87,19 @@ void RO::Initialize(Kernel::HLERequestContext& ctx) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (crs_buffer_ptr & Memory::PAGE_MASK) {
|
||||
if (crs_buffer_ptr & Memory::CITRA_PAGE_MASK) {
|
||||
LOG_ERROR(Service_LDR, "CRS original address is not aligned");
|
||||
rb.Push(ERROR_MISALIGNED_ADDRESS);
|
||||
return;
|
||||
}
|
||||
|
||||
if (crs_address & Memory::PAGE_MASK) {
|
||||
if (crs_address & Memory::CITRA_PAGE_MASK) {
|
||||
LOG_ERROR(Service_LDR, "CRS mapping address is not aligned");
|
||||
rb.Push(ERROR_MISALIGNED_ADDRESS);
|
||||
return;
|
||||
}
|
||||
|
||||
if (crs_size & Memory::PAGE_MASK) {
|
||||
if (crs_size & Memory::CITRA_PAGE_MASK) {
|
||||
LOG_ERROR(Service_LDR, "CRS size is not aligned");
|
||||
rb.Push(ERROR_MISALIGNED_SIZE);
|
||||
return;
|
||||
@ -207,21 +207,21 @@ void RO::LoadCRO(Kernel::HLERequestContext& ctx, bool link_on_load_bug_fix) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (cro_buffer_ptr & Memory::PAGE_MASK) {
|
||||
if (cro_buffer_ptr & Memory::CITRA_PAGE_MASK) {
|
||||
LOG_ERROR(Service_LDR, "CRO original address is not aligned");
|
||||
rb.Push(ERROR_MISALIGNED_ADDRESS);
|
||||
rb.Push<u32>(0);
|
||||
return;
|
||||
}
|
||||
|
||||
if (cro_address & Memory::PAGE_MASK) {
|
||||
if (cro_address & Memory::CITRA_PAGE_MASK) {
|
||||
LOG_ERROR(Service_LDR, "CRO mapping address is not aligned");
|
||||
rb.Push(ERROR_MISALIGNED_ADDRESS);
|
||||
rb.Push<u32>(0);
|
||||
return;
|
||||
}
|
||||
|
||||
if (cro_size & Memory::PAGE_MASK) {
|
||||
if (cro_size & Memory::CITRA_PAGE_MASK) {
|
||||
LOG_ERROR(Service_LDR, "CRO size is not aligned");
|
||||
rb.Push(ERROR_MISALIGNED_SIZE);
|
||||
rb.Push<u32>(0);
|
||||
@ -354,7 +354,7 @@ void RO::UnloadCRO(Kernel::HLERequestContext& ctx) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (cro_address & Memory::PAGE_MASK) {
|
||||
if (cro_address & Memory::CITRA_PAGE_MASK) {
|
||||
LOG_ERROR(Service_LDR, "CRO address is not aligned");
|
||||
rb.Push(ERROR_MISALIGNED_ADDRESS);
|
||||
return;
|
||||
@ -421,7 +421,7 @@ void RO::LinkCRO(Kernel::HLERequestContext& ctx) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (cro_address & Memory::PAGE_MASK) {
|
||||
if (cro_address & Memory::CITRA_PAGE_MASK) {
|
||||
LOG_ERROR(Service_LDR, "CRO address is not aligned");
|
||||
rb.Push(ERROR_MISALIGNED_ADDRESS);
|
||||
return;
|
||||
@ -461,7 +461,7 @@ void RO::UnlinkCRO(Kernel::HLERequestContext& ctx) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (cro_address & Memory::PAGE_MASK) {
|
||||
if (cro_address & Memory::CITRA_PAGE_MASK) {
|
||||
LOG_ERROR(Service_LDR, "CRO address is not aligned");
|
||||
rb.Push(ERROR_MISALIGNED_ADDRESS);
|
||||
return;
|
||||
|
@ -94,13 +94,13 @@ ResultStatus AppLoader_NCCH::LoadExec(std::shared_ptr<Kernel::Process>& process)
|
||||
codeset->CodeSegment().offset = 0;
|
||||
codeset->CodeSegment().addr = overlay_ncch->exheader_header.codeset_info.text.address;
|
||||
codeset->CodeSegment().size =
|
||||
overlay_ncch->exheader_header.codeset_info.text.num_max_pages * Memory::PAGE_SIZE;
|
||||
overlay_ncch->exheader_header.codeset_info.text.num_max_pages * Memory::CITRA_PAGE_SIZE;
|
||||
|
||||
codeset->RODataSegment().offset =
|
||||
codeset->CodeSegment().offset + codeset->CodeSegment().size;
|
||||
codeset->RODataSegment().addr = overlay_ncch->exheader_header.codeset_info.ro.address;
|
||||
codeset->RODataSegment().size =
|
||||
overlay_ncch->exheader_header.codeset_info.ro.num_max_pages * Memory::PAGE_SIZE;
|
||||
overlay_ncch->exheader_header.codeset_info.ro.num_max_pages * Memory::CITRA_PAGE_SIZE;
|
||||
|
||||
// TODO(yuriks): Not sure if the bss size is added to the page-aligned .data size or just
|
||||
// to the regular size. Playing it safe for now.
|
||||
@ -111,7 +111,7 @@ ResultStatus AppLoader_NCCH::LoadExec(std::shared_ptr<Kernel::Process>& process)
|
||||
codeset->RODataSegment().offset + codeset->RODataSegment().size;
|
||||
codeset->DataSegment().addr = overlay_ncch->exheader_header.codeset_info.data.address;
|
||||
codeset->DataSegment().size =
|
||||
overlay_ncch->exheader_header.codeset_info.data.num_max_pages * Memory::PAGE_SIZE +
|
||||
overlay_ncch->exheader_header.codeset_info.data.num_max_pages * Memory::CITRA_PAGE_SIZE +
|
||||
bss_page_size;
|
||||
|
||||
// Apply patches now that the entire codeset (including .bss) has been allocated
|
||||
|
@ -51,20 +51,20 @@ public:
|
||||
private:
|
||||
bool* At(VAddr addr) {
|
||||
if (addr >= VRAM_VADDR && addr < VRAM_VADDR_END) {
|
||||
return &vram[(addr - VRAM_VADDR) / PAGE_SIZE];
|
||||
return &vram[(addr - VRAM_VADDR) / CITRA_PAGE_SIZE];
|
||||
}
|
||||
if (addr >= LINEAR_HEAP_VADDR && addr < LINEAR_HEAP_VADDR_END) {
|
||||
return &linear_heap[(addr - LINEAR_HEAP_VADDR) / PAGE_SIZE];
|
||||
return &linear_heap[(addr - LINEAR_HEAP_VADDR) / CITRA_PAGE_SIZE];
|
||||
}
|
||||
if (addr >= NEW_LINEAR_HEAP_VADDR && addr < NEW_LINEAR_HEAP_VADDR_END) {
|
||||
return &new_linear_heap[(addr - NEW_LINEAR_HEAP_VADDR) / PAGE_SIZE];
|
||||
return &new_linear_heap[(addr - NEW_LINEAR_HEAP_VADDR) / CITRA_PAGE_SIZE];
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
std::array<bool, VRAM_SIZE / PAGE_SIZE> vram{};
|
||||
std::array<bool, LINEAR_HEAP_SIZE / PAGE_SIZE> linear_heap{};
|
||||
std::array<bool, NEW_LINEAR_HEAP_SIZE / PAGE_SIZE> new_linear_heap{};
|
||||
std::array<bool, VRAM_SIZE / CITRA_PAGE_SIZE> vram{};
|
||||
std::array<bool, LINEAR_HEAP_SIZE / CITRA_PAGE_SIZE> linear_heap{};
|
||||
std::array<bool, NEW_LINEAR_HEAP_SIZE / CITRA_PAGE_SIZE> new_linear_heap{};
|
||||
|
||||
static_assert(sizeof(bool) == 1);
|
||||
friend class boost::serialization::access;
|
||||
@ -147,12 +147,12 @@ public:
|
||||
auto& page_table = *process.vm_manager.page_table;
|
||||
|
||||
std::size_t remaining_size = size;
|
||||
std::size_t page_index = src_addr >> PAGE_BITS;
|
||||
std::size_t page_offset = src_addr & PAGE_MASK;
|
||||
std::size_t page_index = src_addr >> CITRA_PAGE_BITS;
|
||||
std::size_t page_offset = src_addr & CITRA_PAGE_MASK;
|
||||
|
||||
while (remaining_size > 0) {
|
||||
const std::size_t copy_amount = std::min(PAGE_SIZE - page_offset, remaining_size);
|
||||
const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset);
|
||||
const std::size_t copy_amount = std::min(CITRA_PAGE_SIZE - page_offset, remaining_size);
|
||||
const VAddr current_vaddr = static_cast<VAddr>((page_index << CITRA_PAGE_BITS) + page_offset);
|
||||
|
||||
switch (page_table.attributes[page_index]) {
|
||||
case PageType::Unmapped: {
|
||||
@ -356,10 +356,10 @@ std::shared_ptr<PageTable> MemorySystem::GetCurrentPageTable() const {
|
||||
|
||||
void MemorySystem::MapPages(PageTable& page_table, u32 base, u32 size, MemoryRef memory,
|
||||
PageType type) {
|
||||
LOG_DEBUG(HW_Memory, "Mapping {} onto {:08X}-{:08X}", (void*)memory.GetPtr(), base * PAGE_SIZE,
|
||||
(base + size) * PAGE_SIZE);
|
||||
LOG_DEBUG(HW_Memory, "Mapping {} onto {:08X}-{:08X}", (void*)memory.GetPtr(), base * CITRA_PAGE_SIZE,
|
||||
(base + size) * CITRA_PAGE_SIZE);
|
||||
|
||||
RasterizerFlushVirtualRegion(base << PAGE_BITS, size * PAGE_SIZE,
|
||||
RasterizerFlushVirtualRegion(base << CITRA_PAGE_BITS, size * CITRA_PAGE_SIZE,
|
||||
FlushMode::FlushAndInvalidate);
|
||||
|
||||
u32 end = base + size;
|
||||
@ -370,36 +370,36 @@ void MemorySystem::MapPages(PageTable& page_table, u32 base, u32 size, MemoryRef
|
||||
page_table.pointers[base] = memory;
|
||||
|
||||
// If the memory to map is already rasterizer-cached, mark the page
|
||||
if (type == PageType::Memory && impl->cache_marker.IsCached(base * PAGE_SIZE)) {
|
||||
if (type == PageType::Memory && impl->cache_marker.IsCached(base * CITRA_PAGE_SIZE)) {
|
||||
page_table.attributes[base] = PageType::RasterizerCachedMemory;
|
||||
page_table.pointers[base] = nullptr;
|
||||
}
|
||||
|
||||
base += 1;
|
||||
if (memory != nullptr && memory.GetSize() > PAGE_SIZE)
|
||||
memory += PAGE_SIZE;
|
||||
if (memory != nullptr && memory.GetSize() > CITRA_PAGE_SIZE)
|
||||
memory += CITRA_PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
void MemorySystem::MapMemoryRegion(PageTable& page_table, VAddr base, u32 size, MemoryRef target) {
|
||||
ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:08X}", size);
|
||||
ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:08X}", base);
|
||||
MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, target, PageType::Memory);
|
||||
ASSERT_MSG((size & CITRA_PAGE_MASK) == 0, "non-page aligned size: {:08X}", size);
|
||||
ASSERT_MSG((base & CITRA_PAGE_MASK) == 0, "non-page aligned base: {:08X}", base);
|
||||
MapPages(page_table, base / CITRA_PAGE_SIZE, size / CITRA_PAGE_SIZE, target, PageType::Memory);
|
||||
}
|
||||
|
||||
void MemorySystem::MapIoRegion(PageTable& page_table, VAddr base, u32 size,
|
||||
MMIORegionPointer mmio_handler) {
|
||||
ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:08X}", size);
|
||||
ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:08X}", base);
|
||||
MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Special);
|
||||
ASSERT_MSG((size & CITRA_PAGE_MASK) == 0, "non-page aligned size: {:08X}", size);
|
||||
ASSERT_MSG((base & CITRA_PAGE_MASK) == 0, "non-page aligned base: {:08X}", base);
|
||||
MapPages(page_table, base / CITRA_PAGE_SIZE, size / CITRA_PAGE_SIZE, nullptr, PageType::Special);
|
||||
|
||||
page_table.special_regions.emplace_back(SpecialRegion{base, size, mmio_handler});
|
||||
}
|
||||
|
||||
void MemorySystem::UnmapRegion(PageTable& page_table, VAddr base, u32 size) {
|
||||
ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:08X}", size);
|
||||
ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:08X}", base);
|
||||
MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Unmapped);
|
||||
ASSERT_MSG((size & CITRA_PAGE_MASK) == 0, "non-page aligned size: {:08X}", size);
|
||||
ASSERT_MSG((base & CITRA_PAGE_MASK) == 0, "non-page aligned base: {:08X}", base);
|
||||
MapPages(page_table, base / CITRA_PAGE_SIZE, size / CITRA_PAGE_SIZE, nullptr, PageType::Unmapped);
|
||||
}
|
||||
|
||||
MemoryRef MemorySystem::GetPointerForRasterizerCache(VAddr addr) const {
|
||||
@ -422,15 +422,15 @@ T ReadMMIO(MMIORegionPointer mmio_handler, VAddr addr);
|
||||
|
||||
template <typename T>
|
||||
T MemorySystem::Read(const VAddr vaddr) {
|
||||
const u8* page_pointer = impl->current_page_table->pointers[vaddr >> PAGE_BITS];
|
||||
const u8* page_pointer = impl->current_page_table->pointers[vaddr >> CITRA_PAGE_BITS];
|
||||
if (page_pointer) {
|
||||
// NOTE: Avoid adding any extra logic to this fast-path block
|
||||
T value;
|
||||
std::memcpy(&value, &page_pointer[vaddr & PAGE_MASK], sizeof(T));
|
||||
std::memcpy(&value, &page_pointer[vaddr & CITRA_PAGE_MASK], sizeof(T));
|
||||
return value;
|
||||
}
|
||||
|
||||
PageType type = impl->current_page_table->attributes[vaddr >> PAGE_BITS];
|
||||
PageType type = impl->current_page_table->attributes[vaddr >> CITRA_PAGE_BITS];
|
||||
switch (type) {
|
||||
case PageType::Unmapped:
|
||||
LOG_ERROR(HW_Memory, "unmapped Read{} @ 0x{:08X} at PC 0x{:08X}", sizeof(T) * 8, vaddr,
|
||||
@ -460,14 +460,14 @@ void WriteMMIO(MMIORegionPointer mmio_handler, VAddr addr, const T data);
|
||||
|
||||
template <typename T>
|
||||
void MemorySystem::Write(const VAddr vaddr, const T data) {
|
||||
u8* page_pointer = impl->current_page_table->pointers[vaddr >> PAGE_BITS];
|
||||
u8* page_pointer = impl->current_page_table->pointers[vaddr >> CITRA_PAGE_BITS];
|
||||
if (page_pointer) {
|
||||
// NOTE: Avoid adding any extra logic to this fast-path block
|
||||
std::memcpy(&page_pointer[vaddr & PAGE_MASK], &data, sizeof(T));
|
||||
std::memcpy(&page_pointer[vaddr & CITRA_PAGE_MASK], &data, sizeof(T));
|
||||
return;
|
||||
}
|
||||
|
||||
PageType type = impl->current_page_table->attributes[vaddr >> PAGE_BITS];
|
||||
PageType type = impl->current_page_table->attributes[vaddr >> CITRA_PAGE_BITS];
|
||||
switch (type) {
|
||||
case PageType::Unmapped:
|
||||
LOG_ERROR(HW_Memory, "unmapped Write{} 0x{:08X} @ 0x{:08X} at PC 0x{:08X}",
|
||||
@ -492,14 +492,14 @@ void MemorySystem::Write(const VAddr vaddr, const T data) {
|
||||
bool MemorySystem::IsValidVirtualAddress(const Kernel::Process& process, const VAddr vaddr) {
|
||||
auto& page_table = *process.vm_manager.page_table;
|
||||
|
||||
auto page_pointer = page_table.pointers[vaddr >> PAGE_BITS];
|
||||
auto page_pointer = page_table.pointers[vaddr >> CITRA_PAGE_BITS];
|
||||
if (page_pointer)
|
||||
return true;
|
||||
|
||||
if (page_table.attributes[vaddr >> PAGE_BITS] == PageType::RasterizerCachedMemory)
|
||||
if (page_table.attributes[vaddr >> CITRA_PAGE_BITS] == PageType::RasterizerCachedMemory)
|
||||
return true;
|
||||
|
||||
if (page_table.attributes[vaddr >> PAGE_BITS] != PageType::Special)
|
||||
if (page_table.attributes[vaddr >> CITRA_PAGE_BITS] != PageType::Special)
|
||||
return false;
|
||||
|
||||
MMIORegionPointer mmio_region = impl->GetMMIOHandler(page_table, vaddr);
|
||||
@ -542,12 +542,12 @@ PAddr MemorySystem::ClampPhysicalAddress(PAddr base, PAddr address) const {
|
||||
}
|
||||
|
||||
u8* MemorySystem::GetPointer(const VAddr vaddr) {
|
||||
u8* page_pointer = impl->current_page_table->pointers[vaddr >> PAGE_BITS];
|
||||
u8* page_pointer = impl->current_page_table->pointers[vaddr >> CITRA_PAGE_BITS];
|
||||
if (page_pointer) {
|
||||
return page_pointer + (vaddr & PAGE_MASK);
|
||||
return page_pointer + (vaddr & CITRA_PAGE_MASK);
|
||||
}
|
||||
|
||||
if (impl->current_page_table->attributes[vaddr >> PAGE_BITS] ==
|
||||
if (impl->current_page_table->attributes[vaddr >> CITRA_PAGE_BITS] ==
|
||||
PageType::RasterizerCachedMemory) {
|
||||
return GetPointerForRasterizerCache(vaddr);
|
||||
}
|
||||
@ -558,12 +558,12 @@ u8* MemorySystem::GetPointer(const VAddr vaddr) {
|
||||
}
|
||||
|
||||
const u8* MemorySystem::GetPointer(const VAddr vaddr) const {
|
||||
const u8* page_pointer = impl->current_page_table->pointers[vaddr >> PAGE_BITS];
|
||||
const u8* page_pointer = impl->current_page_table->pointers[vaddr >> CITRA_PAGE_BITS];
|
||||
if (page_pointer) {
|
||||
return page_pointer + (vaddr & PAGE_MASK);
|
||||
return page_pointer + (vaddr & CITRA_PAGE_MASK);
|
||||
}
|
||||
|
||||
if (impl->current_page_table->attributes[vaddr >> PAGE_BITS] ==
|
||||
if (impl->current_page_table->attributes[vaddr >> CITRA_PAGE_BITS] ==
|
||||
PageType::RasterizerCachedMemory) {
|
||||
return GetPointerForRasterizerCache(vaddr);
|
||||
}
|
||||
@ -671,14 +671,14 @@ void MemorySystem::RasterizerMarkRegionCached(PAddr start, u32 size, bool cached
|
||||
return;
|
||||
}
|
||||
|
||||
u32 num_pages = ((start + size - 1) >> PAGE_BITS) - (start >> PAGE_BITS) + 1;
|
||||
u32 num_pages = ((start + size - 1) >> CITRA_PAGE_BITS) - (start >> CITRA_PAGE_BITS) + 1;
|
||||
PAddr paddr = start;
|
||||
|
||||
for (unsigned i = 0; i < num_pages; ++i, paddr += PAGE_SIZE) {
|
||||
for (unsigned i = 0; i < num_pages; ++i, paddr += CITRA_PAGE_SIZE) {
|
||||
for (VAddr vaddr : PhysicalToVirtualAddressForRasterizer(paddr)) {
|
||||
impl->cache_marker.Mark(vaddr, cached);
|
||||
for (auto page_table : impl->page_table_list) {
|
||||
PageType& page_type = page_table->attributes[vaddr >> PAGE_BITS];
|
||||
PageType& page_type = page_table->attributes[vaddr >> CITRA_PAGE_BITS];
|
||||
|
||||
if (cached) {
|
||||
// Switch page type to cached if now cached
|
||||
@ -689,7 +689,7 @@ void MemorySystem::RasterizerMarkRegionCached(PAddr start, u32 size, bool cached
|
||||
break;
|
||||
case PageType::Memory:
|
||||
page_type = PageType::RasterizerCachedMemory;
|
||||
page_table->pointers[vaddr >> PAGE_BITS] = nullptr;
|
||||
page_table->pointers[vaddr >> CITRA_PAGE_BITS] = nullptr;
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
@ -703,8 +703,8 @@ void MemorySystem::RasterizerMarkRegionCached(PAddr start, u32 size, bool cached
|
||||
break;
|
||||
case PageType::RasterizerCachedMemory: {
|
||||
page_type = PageType::Memory;
|
||||
page_table->pointers[vaddr >> PAGE_BITS] =
|
||||
GetPointerForRasterizerCache(vaddr & ~PAGE_MASK);
|
||||
page_table->pointers[vaddr >> CITRA_PAGE_BITS] =
|
||||
GetPointerForRasterizerCache(vaddr & ~CITRA_PAGE_MASK);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
@ -845,7 +845,7 @@ void MemorySystem::WriteBlock(const VAddr dest_addr, const void* src_buffer, con
|
||||
|
||||
void MemorySystem::ZeroBlock(const Kernel::Process& process, const VAddr dest_addr,
|
||||
const std::size_t size) {
|
||||
static const std::array<u8, PAGE_SIZE> zeros{0};
|
||||
static const std::array<u8, CITRA_PAGE_SIZE> zeros{0};
|
||||
|
||||
impl->WalkBlock(
|
||||
process, dest_addr, size,
|
||||
@ -879,7 +879,7 @@ void MemorySystem::CopyBlock(const Kernel::Process& process, VAddr dest_addr, VA
|
||||
void MemorySystem::CopyBlock(const Kernel::Process& dest_process,
|
||||
const Kernel::Process& src_process, VAddr dest_addr, VAddr src_addr,
|
||||
std::size_t size) {
|
||||
std::array<u8, PAGE_SIZE> copy_buffer{};
|
||||
std::array<u8, CITRA_PAGE_SIZE> copy_buffer{};
|
||||
|
||||
impl->WalkBlock(
|
||||
src_process, src_addr, size,
|
||||
|
@ -24,17 +24,14 @@ class DspInterface;
|
||||
|
||||
namespace Memory {
|
||||
|
||||
// Are defined in a system header
|
||||
#undef PAGE_SIZE
|
||||
#undef PAGE_MASK
|
||||
/**
|
||||
* Page size used by the ARM architecture. This is the smallest granularity with which memory can
|
||||
* be mapped.
|
||||
*/
|
||||
constexpr u32 PAGE_SIZE = 0x1000;
|
||||
constexpr u32 PAGE_MASK = PAGE_SIZE - 1;
|
||||
constexpr int PAGE_BITS = 12;
|
||||
constexpr std::size_t PAGE_TABLE_NUM_ENTRIES = 1 << (32 - PAGE_BITS);
|
||||
constexpr u32 CITRA_PAGE_SIZE = 0x1000;
|
||||
constexpr u32 CITRA_PAGE_MASK = CITRA_PAGE_SIZE - 1;
|
||||
constexpr int CITRA_PAGE_BITS = 12;
|
||||
constexpr std::size_t PAGE_TABLE_NUM_ENTRIES = 1 << (32 - CITRA_PAGE_BITS);
|
||||
|
||||
enum class PageType {
|
||||
/// Page is unmapped and should cause an access error.
|
||||
|
@ -137,7 +137,7 @@ TEST_CASE("HLERequestContext::PopulateFromIncomingCommandBuffer", "[core][kernel
|
||||
}
|
||||
|
||||
SECTION("translates StaticBuffer descriptors") {
|
||||
auto mem = std::make_shared<BufferMem>(Memory::PAGE_SIZE);
|
||||
auto mem = std::make_shared<BufferMem>(Memory::CITRA_PAGE_SIZE);
|
||||
MemoryRef buffer{mem};
|
||||
std::fill(buffer.GetPtr(), buffer.GetPtr() + buffer.GetSize(), 0xAB);
|
||||
|
||||
@ -161,7 +161,7 @@ TEST_CASE("HLERequestContext::PopulateFromIncomingCommandBuffer", "[core][kernel
|
||||
}
|
||||
|
||||
SECTION("translates MappedBuffer descriptors") {
|
||||
auto mem = std::make_shared<BufferMem>(Memory::PAGE_SIZE);
|
||||
auto mem = std::make_shared<BufferMem>(Memory::CITRA_PAGE_SIZE);
|
||||
MemoryRef buffer{mem};
|
||||
std::fill(buffer.GetPtr(), buffer.GetPtr() + buffer.GetSize(), 0xCD);
|
||||
|
||||
@ -187,11 +187,11 @@ TEST_CASE("HLERequestContext::PopulateFromIncomingCommandBuffer", "[core][kernel
|
||||
}
|
||||
|
||||
SECTION("translates mixed params") {
|
||||
auto mem_static = std::make_shared<BufferMem>(Memory::PAGE_SIZE);
|
||||
auto mem_static = std::make_shared<BufferMem>(Memory::CITRA_PAGE_SIZE);
|
||||
MemoryRef buffer_static{mem_static};
|
||||
std::fill(buffer_static.GetPtr(), buffer_static.GetPtr() + buffer_static.GetSize(), 0xCE);
|
||||
|
||||
auto mem_mapped = std::make_shared<BufferMem>(Memory::PAGE_SIZE);
|
||||
auto mem_mapped = std::make_shared<BufferMem>(Memory::CITRA_PAGE_SIZE);
|
||||
MemoryRef buffer_mapped{mem_mapped};
|
||||
std::fill(buffer_mapped.GetPtr(), buffer_mapped.GetPtr() + buffer_mapped.GetSize(), 0xDF);
|
||||
|
||||
@ -321,12 +321,12 @@ TEST_CASE("HLERequestContext::WriteToOutgoingCommandBuffer", "[core][kernel]") {
|
||||
}
|
||||
|
||||
SECTION("translates StaticBuffer descriptors") {
|
||||
std::vector<u8> input_buffer(Memory::PAGE_SIZE);
|
||||
std::vector<u8> input_buffer(Memory::CITRA_PAGE_SIZE);
|
||||
std::fill(input_buffer.begin(), input_buffer.end(), 0xAB);
|
||||
|
||||
context.AddStaticBuffer(0, input_buffer);
|
||||
|
||||
auto output_mem = std::make_shared<BufferMem>(Memory::PAGE_SIZE);
|
||||
auto output_mem = std::make_shared<BufferMem>(Memory::CITRA_PAGE_SIZE);
|
||||
MemoryRef output_buffer{output_mem};
|
||||
|
||||
VAddr target_address = 0x10000000;
|
||||
@ -355,10 +355,10 @@ TEST_CASE("HLERequestContext::WriteToOutgoingCommandBuffer", "[core][kernel]") {
|
||||
}
|
||||
|
||||
SECTION("translates StaticBuffer descriptors") {
|
||||
std::vector<u8> input_buffer(Memory::PAGE_SIZE);
|
||||
std::vector<u8> input_buffer(Memory::CITRA_PAGE_SIZE);
|
||||
std::fill(input_buffer.begin(), input_buffer.end(), 0xAB);
|
||||
|
||||
auto output_mem = std::make_shared<BufferMem>(Memory::PAGE_SIZE);
|
||||
auto output_mem = std::make_shared<BufferMem>(Memory::CITRA_PAGE_SIZE);
|
||||
MemoryRef output_buffer{output_mem};
|
||||
|
||||
VAddr target_address = 0x10000000;
|
||||
|
@ -10,7 +10,7 @@
|
||||
#include "core/memory.h"
|
||||
|
||||
TEST_CASE("Memory Basics", "[kernel][memory]") {
|
||||
auto mem = std::make_shared<BufferMem>(Memory::PAGE_SIZE);
|
||||
auto mem = std::make_shared<BufferMem>(Memory::CITRA_PAGE_SIZE);
|
||||
MemoryRef block{mem};
|
||||
Memory::MemorySystem memory;
|
||||
SECTION("mapping memory") {
|
||||
|
@ -10,8 +10,8 @@
|
||||
namespace VideoCore {
|
||||
|
||||
void RasterizerAccelerated::UpdatePagesCachedCount(PAddr addr, u32 size, int delta) {
|
||||
const u32 page_start = addr >> Memory::PAGE_BITS;
|
||||
const u32 page_end = ((addr + size - 1) >> Memory::PAGE_BITS);
|
||||
const u32 page_start = addr >> Memory::CITRA_PAGE_BITS;
|
||||
const u32 page_end = ((addr + size - 1) >> Memory::CITRA_PAGE_BITS);
|
||||
|
||||
u32 uncache_start_addr = 0;
|
||||
u32 cache_start_addr = 0;
|
||||
@ -36,10 +36,10 @@ void RasterizerAccelerated::UpdatePagesCachedCount(PAddr addr, u32 size, int del
|
||||
// Assume delta is either -1 or 1
|
||||
if (count == 0) {
|
||||
if (uncache_bytes == 0) {
|
||||
uncache_start_addr = page << Memory::PAGE_BITS;
|
||||
uncache_start_addr = page << Memory::CITRA_PAGE_BITS;
|
||||
}
|
||||
|
||||
uncache_bytes += Memory::PAGE_SIZE;
|
||||
uncache_bytes += Memory::CITRA_PAGE_SIZE;
|
||||
} else if (uncache_bytes > 0) {
|
||||
VideoCore::g_memory->RasterizerMarkRegionCached(uncache_start_addr, uncache_bytes,
|
||||
false);
|
||||
@ -48,10 +48,10 @@ void RasterizerAccelerated::UpdatePagesCachedCount(PAddr addr, u32 size, int del
|
||||
|
||||
if (count == 1 && delta > 0) {
|
||||
if (cache_bytes == 0) {
|
||||
cache_start_addr = page << Memory::PAGE_BITS;
|
||||
cache_start_addr = page << Memory::CITRA_PAGE_BITS;
|
||||
}
|
||||
|
||||
cache_bytes += Memory::PAGE_SIZE;
|
||||
cache_bytes += Memory::CITRA_PAGE_SIZE;
|
||||
} else if (cache_bytes > 0) {
|
||||
VideoCore::g_memory->RasterizerMarkRegionCached(cache_start_addr, cache_bytes,
|
||||
true);
|
||||
|
@ -893,8 +893,8 @@ void RasterizerCache::ClearAll(bool flush) {
|
||||
for (auto& pair : RangeFromInterval(cached_pages, flush_interval)) {
|
||||
const auto interval = pair.first & flush_interval;
|
||||
|
||||
const PAddr interval_start_addr = boost::icl::first(interval) << Memory::PAGE_BITS;
|
||||
const PAddr interval_end_addr = boost::icl::last_next(interval) << Memory::PAGE_BITS;
|
||||
const PAddr interval_start_addr = boost::icl::first(interval) << Memory::CITRA_PAGE_BITS;
|
||||
const PAddr interval_end_addr = boost::icl::last_next(interval) << Memory::CITRA_PAGE_BITS;
|
||||
const u32 interval_size = interval_end_addr - interval_start_addr;
|
||||
|
||||
VideoCore::g_memory->RasterizerMarkRegionCached(interval_start_addr, interval_size, false);
|
||||
|
Reference in New Issue
Block a user