From 25a6da50efd4c1c69e20d6da7030fde3858a004d Mon Sep 17 00:00:00 2001 From: Kyle Kienapfel Date: Thu, 8 Sep 2022 23:01:45 +0300 Subject: [PATCH] code: dodge PAGE_SIZE #define Some header files, specifically for OSX and Musl libc define PAGE_SIZE to be a number This is great except in citra we're using PAGE_SIZE as a variable Specific example `static constexpr u64 PAGE_SIZE = u64(1) << PAGE_BITS;` PAGE_SIZE PAGE_BITS PAGE_MASK are all similar variables. Simply deleted the underscores, and then added CITRA_ prefix --- src/common/microprofile.h | 9 -- src/core/hle/kernel/ipc.cpp | 16 ++-- src/core/hle/kernel/process.cpp | 2 +- src/core/hle/kernel/svc.cpp | 8 +- src/core/hle/kernel/thread.cpp | 14 +-- src/core/hle/kernel/vm_manager.cpp | 8 +- src/core/hle/service/csnd/csnd_snd.cpp | 2 +- src/core/hle/service/ldr_ro/cro_helper.cpp | 6 +- src/core/hle/service/ldr_ro/ldr_ro.cpp | 18 ++-- src/core/loader/ncch.cpp | 6 +- src/core/memory.cpp | 96 +++++++++---------- src/core/memory.h | 11 +-- src/tests/core/hle/kernel/hle_ipc.cpp | 16 ++-- src/tests/core/memory/vm_manager.cpp | 2 +- src/video_core/rasterizer_accelerated.cpp | 12 +-- .../rasterizer_cache/rasterizer_cache.cpp | 4 +- 16 files changed, 109 insertions(+), 121 deletions(-) diff --git a/src/common/microprofile.h b/src/common/microprofile.h index 54e7f3cc4..027a3ad94 100644 --- a/src/common/microprofile.h +++ b/src/common/microprofile.h @@ -23,12 +23,3 @@ typedef void* HANDLE; #include #define MP_RGB(r, g, b) ((r) << 16 | (g) << 8 | (b) << 0) - -// On OS X, some Mach header included by MicroProfile defines these as macros, conflicting with -// identifiers we use. -#ifdef PAGE_SIZE -#undef PAGE_SIZE -#endif -#ifdef PAGE_MASK -#undef PAGE_MASK -#endif diff --git a/src/core/hle/kernel/ipc.cpp b/src/core/hle/kernel/ipc.cpp index 817f58216..8687c22ba 100644 --- a/src/core/hle/kernel/ipc.cpp +++ b/src/core/hle/kernel/ipc.cpp @@ -138,10 +138,10 @@ ResultCode TranslateCommandBuffer(Kernel::KernelSystem& kernel, Memory::MemorySy u32 size = static_cast(descInfo.size); IPC::MappedBufferPermissions permissions = descInfo.perms; - VAddr page_start = Common::AlignDown(source_address, Memory::PAGE_SIZE); + VAddr page_start = Common::AlignDown(source_address, Memory::CITRA_PAGE_SIZE); u32 page_offset = source_address - page_start; u32 num_pages = - Common::AlignUp(page_offset + size, Memory::PAGE_SIZE) >> Memory::PAGE_BITS; + Common::AlignUp(page_offset + size, Memory::CITRA_PAGE_SIZE) >> Memory::CITRA_PAGE_BITS; // Skip when the size is zero and num_pages == 0 if (size == 0) { @@ -171,8 +171,8 @@ ResultCode TranslateCommandBuffer(Kernel::KernelSystem& kernel, Memory::MemorySy found->target_address, size); } - VAddr prev_reserve = page_start - Memory::PAGE_SIZE; - VAddr next_reserve = page_start + num_pages * Memory::PAGE_SIZE; + VAddr prev_reserve = page_start - Memory::CITRA_PAGE_SIZE; + VAddr next_reserve = page_start + num_pages * Memory::CITRA_PAGE_SIZE; auto& prev_vma = src_process->vm_manager.FindVMA(prev_reserve)->second; auto& next_vma = src_process->vm_manager.FindVMA(next_reserve)->second; @@ -181,7 +181,7 @@ ResultCode TranslateCommandBuffer(Kernel::KernelSystem& kernel, Memory::MemorySy // Unmap the buffer and guard pages from the source process ResultCode result = src_process->vm_manager.UnmapRange( - page_start - Memory::PAGE_SIZE, (num_pages + 2) * Memory::PAGE_SIZE); + page_start - Memory::CITRA_PAGE_SIZE, (num_pages + 2) * Memory::CITRA_PAGE_SIZE); ASSERT(result == RESULT_SUCCESS); mapped_buffer_context.erase(found); @@ -196,13 +196,13 @@ ResultCode TranslateCommandBuffer(Kernel::KernelSystem& kernel, Memory::MemorySy // Reserve a page of memory before the mapped buffer std::shared_ptr reserve_buffer = - std::make_shared(Memory::PAGE_SIZE); + std::make_shared(Memory::CITRA_PAGE_SIZE); dst_process->vm_manager.MapBackingMemoryToBase( Memory::IPC_MAPPING_VADDR, Memory::IPC_MAPPING_SIZE, reserve_buffer, - Memory::PAGE_SIZE, Kernel::MemoryState::Reserved); + Memory::CITRA_PAGE_SIZE, Kernel::MemoryState::Reserved); std::shared_ptr buffer = - std::make_shared(num_pages * Memory::PAGE_SIZE); + std::make_shared(num_pages * Memory::CITRA_PAGE_SIZE); memory.ReadBlock(*src_process, source_address, buffer->GetPtr() + page_offset, size); // Map the page(s) into the target process' address space. diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp index 2478d2218..e78c4a1bb 100644 --- a/src/core/hle/kernel/process.cpp +++ b/src/core/hle/kernel/process.cpp @@ -127,7 +127,7 @@ void Process::ParseKernelCaps(const u32* kernel_caps, std::size_t len) { // Mapped memory page AddressMapping mapping; mapping.address = descriptor << 12; - mapping.size = Memory::PAGE_SIZE; + mapping.size = Memory::CITRA_PAGE_SIZE; mapping.read_only = false; mapping.unk_flag = false; diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index e38791145..8f93be85e 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -217,10 +217,10 @@ ResultCode SVC::ControlMemory(u32* out_addr, u32 addr0, u32 addr1, u32 size, u32 "size=0x{:X}, permissions=0x{:08X}", operation, addr0, addr1, size, permissions); - if ((addr0 & Memory::PAGE_MASK) != 0 || (addr1 & Memory::PAGE_MASK) != 0) { + if ((addr0 & Memory::CITRA_PAGE_MASK) != 0 || (addr1 & Memory::CITRA_PAGE_MASK) != 0) { return ERR_MISALIGNED_ADDRESS; } - if ((size & Memory::PAGE_MASK) != 0) { + if ((size & Memory::CITRA_PAGE_MASK) != 0) { return ERR_MISALIGNED_SIZE; } @@ -1286,7 +1286,7 @@ s64 SVC::GetSystemTick() { /// Creates a memory block at the specified address with the specified permissions and size ResultCode SVC::CreateMemoryBlock(Handle* out_handle, u32 addr, u32 size, u32 my_permission, u32 other_permission) { - if (size % Memory::PAGE_SIZE != 0) + if (size % Memory::CITRA_PAGE_SIZE != 0) return ERR_MISALIGNED_SIZE; std::shared_ptr shared_memory = nullptr; @@ -1507,7 +1507,7 @@ ResultCode SVC::GetProcessInfo(s64* out, Handle process_handle, u32 type) { // TODO(yuriks): Type 0 returns a slightly higher number than type 2, but I'm not sure // what's the difference between them. *out = process->memory_used; - if (*out % Memory::PAGE_SIZE != 0) { + if (*out % Memory::CITRA_PAGE_SIZE != 0) { LOG_ERROR(Kernel_SVC, "called, memory size not page-aligned"); return ERR_MISALIGNED_SIZE; } diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp index a354aff71..e470a3687 100644 --- a/src/core/hle/kernel/thread.cpp +++ b/src/core/hle/kernel/thread.cpp @@ -105,9 +105,9 @@ void Thread::Stop() { ReleaseThreadMutexes(this); // Mark the TLS slot in the thread's page as free. - u32 tls_page = (tls_address - Memory::TLS_AREA_VADDR) / Memory::PAGE_SIZE; + u32 tls_page = (tls_address - Memory::TLS_AREA_VADDR) / Memory::CITRA_PAGE_SIZE; u32 tls_slot = - ((tls_address - Memory::TLS_AREA_VADDR) % Memory::PAGE_SIZE) / Memory::TLS_ENTRY_SIZE; + ((tls_address - Memory::TLS_AREA_VADDR) % Memory::CITRA_PAGE_SIZE) / Memory::TLS_ENTRY_SIZE; ASSERT(owner_process.lock()); owner_process.lock()->tls_slots[tls_page].reset(tls_slot); } @@ -373,13 +373,13 @@ ResultVal> KernelSystem::CreateThread( auto memory_region = GetMemoryRegion(MemoryRegion::BASE); // Allocate some memory from the end of the linear heap for this region. - auto offset = memory_region->LinearAllocate(Memory::PAGE_SIZE); + auto offset = memory_region->LinearAllocate(Memory::CITRA_PAGE_SIZE); if (!offset) { LOG_ERROR(Kernel_SVC, "Not enough space in region to allocate a new TLS page for thread"); return ERR_OUT_OF_MEMORY; } - owner_process->memory_used += Memory::PAGE_SIZE; + owner_process->memory_used += Memory::CITRA_PAGE_SIZE; tls_slots.emplace_back(0); // The page is completely available at the start available_page = tls_slots.size() - 1; @@ -389,14 +389,14 @@ ResultVal> KernelSystem::CreateThread( // Map the page to the current process' address space. vm_manager.MapBackingMemory( - Memory::TLS_AREA_VADDR + static_cast(available_page) * Memory::PAGE_SIZE, - memory.GetFCRAMRef(*offset), Memory::PAGE_SIZE, MemoryState::Locked); + Memory::TLS_AREA_VADDR + static_cast(available_page) * Memory::CITRA_PAGE_SIZE, + memory.GetFCRAMRef(*offset), Memory::CITRA_PAGE_SIZE, MemoryState::Locked); } // Mark the slot as used tls_slots[available_page].set(available_slot); thread->tls_address = Memory::TLS_AREA_VADDR + - static_cast(available_page) * Memory::PAGE_SIZE + + static_cast(available_page) * Memory::CITRA_PAGE_SIZE + static_cast(available_slot) * Memory::TLS_ENTRY_SIZE; memory.ZeroBlock(*owner_process, thread->tls_address, Memory::TLS_ENTRY_SIZE); diff --git a/src/core/hle/kernel/vm_manager.cpp b/src/core/hle/kernel/vm_manager.cpp index 48a9ae9bc..7ecc22870 100644 --- a/src/core/hle/kernel/vm_manager.cpp +++ b/src/core/hle/kernel/vm_manager.cpp @@ -260,8 +260,8 @@ VMManager::VMAIter VMManager::StripIterConstness(const VMAHandle& iter) { } ResultVal VMManager::CarveVMA(VAddr base, u32 size) { - ASSERT_MSG((size & Memory::PAGE_MASK) == 0, "non-page aligned size: {:#10X}", size); - ASSERT_MSG((base & Memory::PAGE_MASK) == 0, "non-page aligned base: {:#010X}", base); + ASSERT_MSG((size & Memory::CITRA_PAGE_MASK) == 0, "non-page aligned size: {:#10X}", size); + ASSERT_MSG((base & Memory::CITRA_PAGE_MASK) == 0, "non-page aligned base: {:#010X}", base); VMAIter vma_handle = StripIterConstness(FindVMA(base)); if (vma_handle == vma_map.end()) { @@ -296,8 +296,8 @@ ResultVal VMManager::CarveVMA(VAddr base, u32 size) { } ResultVal VMManager::CarveVMARange(VAddr target, u32 size) { - ASSERT_MSG((size & Memory::PAGE_MASK) == 0, "non-page aligned size: {:#10X}", size); - ASSERT_MSG((target & Memory::PAGE_MASK) == 0, "non-page aligned base: {:#010X}", target); + ASSERT_MSG((size & Memory::CITRA_PAGE_MASK) == 0, "non-page aligned size: {:#10X}", size); + ASSERT_MSG((target & Memory::CITRA_PAGE_MASK) == 0, "non-page aligned base: {:#010X}", target); const VAddr target_end = target + size; ASSERT(target_end >= target); diff --git a/src/core/hle/service/csnd/csnd_snd.cpp b/src/core/hle/service/csnd/csnd_snd.cpp index 160ce313f..7eda52e09 100644 --- a/src/core/hle/service/csnd/csnd_snd.cpp +++ b/src/core/hle/service/csnd/csnd_snd.cpp @@ -192,7 +192,7 @@ static_assert(sizeof(CaptureState) == 0x8, "CaptureState structure size is wrong void CSND_SND::Initialize(Kernel::HLERequestContext& ctx) { IPC::RequestParser rp(ctx, 0x01, 5, 0); - const u32 size = Common::AlignUp(rp.Pop(), Memory::PAGE_SIZE); + const u32 size = Common::AlignUp(rp.Pop(), Memory::CITRA_PAGE_SIZE); master_state_offset = rp.Pop(); channel_state_offset = rp.Pop(); capture_state_offset = rp.Pop(); diff --git a/src/core/hle/service/ldr_ro/cro_helper.cpp b/src/core/hle/service/ldr_ro/cro_helper.cpp index 78a03063f..73713a3b6 100644 --- a/src/core/hle/service/ldr_ro/cro_helper.cpp +++ b/src/core/hle/service/ldr_ro/cro_helper.cpp @@ -1502,7 +1502,7 @@ u32 CROHelper::Fix(u32 fix_level) { } } - fix_end = Common::AlignUp(fix_end, Memory::PAGE_SIZE); + fix_end = Common::AlignUp(fix_end, Memory::CITRA_PAGE_SIZE); u32 fixed_size = fix_end - module_address; SetField(FixedSize, fixed_size); @@ -1525,8 +1525,8 @@ std::tuple CROHelper::GetExecutablePages() const { SegmentEntry entry; GetEntry(system.Memory(), i, entry); if (entry.type == SegmentType::Code && entry.size != 0) { - VAddr begin = Common::AlignDown(entry.offset, Memory::PAGE_SIZE); - VAddr end = Common::AlignUp(entry.offset + entry.size, Memory::PAGE_SIZE); + VAddr begin = Common::AlignDown(entry.offset, Memory::CITRA_PAGE_SIZE); + VAddr end = Common::AlignUp(entry.offset + entry.size, Memory::CITRA_PAGE_SIZE); return std::make_tuple(begin, end - begin); } } diff --git a/src/core/hle/service/ldr_ro/ldr_ro.cpp b/src/core/hle/service/ldr_ro/ldr_ro.cpp index a2f8379b1..96359512d 100644 --- a/src/core/hle/service/ldr_ro/ldr_ro.cpp +++ b/src/core/hle/service/ldr_ro/ldr_ro.cpp @@ -87,19 +87,19 @@ void RO::Initialize(Kernel::HLERequestContext& ctx) { return; } - if (crs_buffer_ptr & Memory::PAGE_MASK) { + if (crs_buffer_ptr & Memory::CITRA_PAGE_MASK) { LOG_ERROR(Service_LDR, "CRS original address is not aligned"); rb.Push(ERROR_MISALIGNED_ADDRESS); return; } - if (crs_address & Memory::PAGE_MASK) { + if (crs_address & Memory::CITRA_PAGE_MASK) { LOG_ERROR(Service_LDR, "CRS mapping address is not aligned"); rb.Push(ERROR_MISALIGNED_ADDRESS); return; } - if (crs_size & Memory::PAGE_MASK) { + if (crs_size & Memory::CITRA_PAGE_MASK) { LOG_ERROR(Service_LDR, "CRS size is not aligned"); rb.Push(ERROR_MISALIGNED_SIZE); return; @@ -207,21 +207,21 @@ void RO::LoadCRO(Kernel::HLERequestContext& ctx, bool link_on_load_bug_fix) { return; } - if (cro_buffer_ptr & Memory::PAGE_MASK) { + if (cro_buffer_ptr & Memory::CITRA_PAGE_MASK) { LOG_ERROR(Service_LDR, "CRO original address is not aligned"); rb.Push(ERROR_MISALIGNED_ADDRESS); rb.Push(0); return; } - if (cro_address & Memory::PAGE_MASK) { + if (cro_address & Memory::CITRA_PAGE_MASK) { LOG_ERROR(Service_LDR, "CRO mapping address is not aligned"); rb.Push(ERROR_MISALIGNED_ADDRESS); rb.Push(0); return; } - if (cro_size & Memory::PAGE_MASK) { + if (cro_size & Memory::CITRA_PAGE_MASK) { LOG_ERROR(Service_LDR, "CRO size is not aligned"); rb.Push(ERROR_MISALIGNED_SIZE); rb.Push(0); @@ -354,7 +354,7 @@ void RO::UnloadCRO(Kernel::HLERequestContext& ctx) { return; } - if (cro_address & Memory::PAGE_MASK) { + if (cro_address & Memory::CITRA_PAGE_MASK) { LOG_ERROR(Service_LDR, "CRO address is not aligned"); rb.Push(ERROR_MISALIGNED_ADDRESS); return; @@ -421,7 +421,7 @@ void RO::LinkCRO(Kernel::HLERequestContext& ctx) { return; } - if (cro_address & Memory::PAGE_MASK) { + if (cro_address & Memory::CITRA_PAGE_MASK) { LOG_ERROR(Service_LDR, "CRO address is not aligned"); rb.Push(ERROR_MISALIGNED_ADDRESS); return; @@ -461,7 +461,7 @@ void RO::UnlinkCRO(Kernel::HLERequestContext& ctx) { return; } - if (cro_address & Memory::PAGE_MASK) { + if (cro_address & Memory::CITRA_PAGE_MASK) { LOG_ERROR(Service_LDR, "CRO address is not aligned"); rb.Push(ERROR_MISALIGNED_ADDRESS); return; diff --git a/src/core/loader/ncch.cpp b/src/core/loader/ncch.cpp index 3fade83a9..24f9143af 100644 --- a/src/core/loader/ncch.cpp +++ b/src/core/loader/ncch.cpp @@ -94,13 +94,13 @@ ResultStatus AppLoader_NCCH::LoadExec(std::shared_ptr& process) codeset->CodeSegment().offset = 0; codeset->CodeSegment().addr = overlay_ncch->exheader_header.codeset_info.text.address; codeset->CodeSegment().size = - overlay_ncch->exheader_header.codeset_info.text.num_max_pages * Memory::PAGE_SIZE; + overlay_ncch->exheader_header.codeset_info.text.num_max_pages * Memory::CITRA_PAGE_SIZE; codeset->RODataSegment().offset = codeset->CodeSegment().offset + codeset->CodeSegment().size; codeset->RODataSegment().addr = overlay_ncch->exheader_header.codeset_info.ro.address; codeset->RODataSegment().size = - overlay_ncch->exheader_header.codeset_info.ro.num_max_pages * Memory::PAGE_SIZE; + overlay_ncch->exheader_header.codeset_info.ro.num_max_pages * Memory::CITRA_PAGE_SIZE; // TODO(yuriks): Not sure if the bss size is added to the page-aligned .data size or just // to the regular size. Playing it safe for now. @@ -111,7 +111,7 @@ ResultStatus AppLoader_NCCH::LoadExec(std::shared_ptr& process) codeset->RODataSegment().offset + codeset->RODataSegment().size; codeset->DataSegment().addr = overlay_ncch->exheader_header.codeset_info.data.address; codeset->DataSegment().size = - overlay_ncch->exheader_header.codeset_info.data.num_max_pages * Memory::PAGE_SIZE + + overlay_ncch->exheader_header.codeset_info.data.num_max_pages * Memory::CITRA_PAGE_SIZE + bss_page_size; // Apply patches now that the entire codeset (including .bss) has been allocated diff --git a/src/core/memory.cpp b/src/core/memory.cpp index 481e04868..c9293bc55 100644 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp @@ -51,20 +51,20 @@ public: private: bool* At(VAddr addr) { if (addr >= VRAM_VADDR && addr < VRAM_VADDR_END) { - return &vram[(addr - VRAM_VADDR) / PAGE_SIZE]; + return &vram[(addr - VRAM_VADDR) / CITRA_PAGE_SIZE]; } if (addr >= LINEAR_HEAP_VADDR && addr < LINEAR_HEAP_VADDR_END) { - return &linear_heap[(addr - LINEAR_HEAP_VADDR) / PAGE_SIZE]; + return &linear_heap[(addr - LINEAR_HEAP_VADDR) / CITRA_PAGE_SIZE]; } if (addr >= NEW_LINEAR_HEAP_VADDR && addr < NEW_LINEAR_HEAP_VADDR_END) { - return &new_linear_heap[(addr - NEW_LINEAR_HEAP_VADDR) / PAGE_SIZE]; + return &new_linear_heap[(addr - NEW_LINEAR_HEAP_VADDR) / CITRA_PAGE_SIZE]; } return nullptr; } - std::array vram{}; - std::array linear_heap{}; - std::array new_linear_heap{}; + std::array vram{}; + std::array linear_heap{}; + std::array new_linear_heap{}; static_assert(sizeof(bool) == 1); friend class boost::serialization::access; @@ -147,12 +147,12 @@ public: auto& page_table = *process.vm_manager.page_table; std::size_t remaining_size = size; - std::size_t page_index = src_addr >> PAGE_BITS; - std::size_t page_offset = src_addr & PAGE_MASK; + std::size_t page_index = src_addr >> CITRA_PAGE_BITS; + std::size_t page_offset = src_addr & CITRA_PAGE_MASK; while (remaining_size > 0) { - const std::size_t copy_amount = std::min(PAGE_SIZE - page_offset, remaining_size); - const VAddr current_vaddr = static_cast((page_index << PAGE_BITS) + page_offset); + const std::size_t copy_amount = std::min(CITRA_PAGE_SIZE - page_offset, remaining_size); + const VAddr current_vaddr = static_cast((page_index << CITRA_PAGE_BITS) + page_offset); switch (page_table.attributes[page_index]) { case PageType::Unmapped: { @@ -356,10 +356,10 @@ std::shared_ptr MemorySystem::GetCurrentPageTable() const { void MemorySystem::MapPages(PageTable& page_table, u32 base, u32 size, MemoryRef memory, PageType type) { - LOG_DEBUG(HW_Memory, "Mapping {} onto {:08X}-{:08X}", (void*)memory.GetPtr(), base * PAGE_SIZE, - (base + size) * PAGE_SIZE); + LOG_DEBUG(HW_Memory, "Mapping {} onto {:08X}-{:08X}", (void*)memory.GetPtr(), base * CITRA_PAGE_SIZE, + (base + size) * CITRA_PAGE_SIZE); - RasterizerFlushVirtualRegion(base << PAGE_BITS, size * PAGE_SIZE, + RasterizerFlushVirtualRegion(base << CITRA_PAGE_BITS, size * CITRA_PAGE_SIZE, FlushMode::FlushAndInvalidate); u32 end = base + size; @@ -370,36 +370,36 @@ void MemorySystem::MapPages(PageTable& page_table, u32 base, u32 size, MemoryRef page_table.pointers[base] = memory; // If the memory to map is already rasterizer-cached, mark the page - if (type == PageType::Memory && impl->cache_marker.IsCached(base * PAGE_SIZE)) { + if (type == PageType::Memory && impl->cache_marker.IsCached(base * CITRA_PAGE_SIZE)) { page_table.attributes[base] = PageType::RasterizerCachedMemory; page_table.pointers[base] = nullptr; } base += 1; - if (memory != nullptr && memory.GetSize() > PAGE_SIZE) - memory += PAGE_SIZE; + if (memory != nullptr && memory.GetSize() > CITRA_PAGE_SIZE) + memory += CITRA_PAGE_SIZE; } } void MemorySystem::MapMemoryRegion(PageTable& page_table, VAddr base, u32 size, MemoryRef target) { - ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:08X}", size); - ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:08X}", base); - MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, target, PageType::Memory); + ASSERT_MSG((size & CITRA_PAGE_MASK) == 0, "non-page aligned size: {:08X}", size); + ASSERT_MSG((base & CITRA_PAGE_MASK) == 0, "non-page aligned base: {:08X}", base); + MapPages(page_table, base / CITRA_PAGE_SIZE, size / CITRA_PAGE_SIZE, target, PageType::Memory); } void MemorySystem::MapIoRegion(PageTable& page_table, VAddr base, u32 size, MMIORegionPointer mmio_handler) { - ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:08X}", size); - ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:08X}", base); - MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Special); + ASSERT_MSG((size & CITRA_PAGE_MASK) == 0, "non-page aligned size: {:08X}", size); + ASSERT_MSG((base & CITRA_PAGE_MASK) == 0, "non-page aligned base: {:08X}", base); + MapPages(page_table, base / CITRA_PAGE_SIZE, size / CITRA_PAGE_SIZE, nullptr, PageType::Special); page_table.special_regions.emplace_back(SpecialRegion{base, size, mmio_handler}); } void MemorySystem::UnmapRegion(PageTable& page_table, VAddr base, u32 size) { - ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:08X}", size); - ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:08X}", base); - MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Unmapped); + ASSERT_MSG((size & CITRA_PAGE_MASK) == 0, "non-page aligned size: {:08X}", size); + ASSERT_MSG((base & CITRA_PAGE_MASK) == 0, "non-page aligned base: {:08X}", base); + MapPages(page_table, base / CITRA_PAGE_SIZE, size / CITRA_PAGE_SIZE, nullptr, PageType::Unmapped); } MemoryRef MemorySystem::GetPointerForRasterizerCache(VAddr addr) const { @@ -422,15 +422,15 @@ T ReadMMIO(MMIORegionPointer mmio_handler, VAddr addr); template T MemorySystem::Read(const VAddr vaddr) { - const u8* page_pointer = impl->current_page_table->pointers[vaddr >> PAGE_BITS]; + const u8* page_pointer = impl->current_page_table->pointers[vaddr >> CITRA_PAGE_BITS]; if (page_pointer) { // NOTE: Avoid adding any extra logic to this fast-path block T value; - std::memcpy(&value, &page_pointer[vaddr & PAGE_MASK], sizeof(T)); + std::memcpy(&value, &page_pointer[vaddr & CITRA_PAGE_MASK], sizeof(T)); return value; } - PageType type = impl->current_page_table->attributes[vaddr >> PAGE_BITS]; + PageType type = impl->current_page_table->attributes[vaddr >> CITRA_PAGE_BITS]; switch (type) { case PageType::Unmapped: LOG_ERROR(HW_Memory, "unmapped Read{} @ 0x{:08X} at PC 0x{:08X}", sizeof(T) * 8, vaddr, @@ -460,14 +460,14 @@ void WriteMMIO(MMIORegionPointer mmio_handler, VAddr addr, const T data); template void MemorySystem::Write(const VAddr vaddr, const T data) { - u8* page_pointer = impl->current_page_table->pointers[vaddr >> PAGE_BITS]; + u8* page_pointer = impl->current_page_table->pointers[vaddr >> CITRA_PAGE_BITS]; if (page_pointer) { // NOTE: Avoid adding any extra logic to this fast-path block - std::memcpy(&page_pointer[vaddr & PAGE_MASK], &data, sizeof(T)); + std::memcpy(&page_pointer[vaddr & CITRA_PAGE_MASK], &data, sizeof(T)); return; } - PageType type = impl->current_page_table->attributes[vaddr >> PAGE_BITS]; + PageType type = impl->current_page_table->attributes[vaddr >> CITRA_PAGE_BITS]; switch (type) { case PageType::Unmapped: LOG_ERROR(HW_Memory, "unmapped Write{} 0x{:08X} @ 0x{:08X} at PC 0x{:08X}", @@ -492,14 +492,14 @@ void MemorySystem::Write(const VAddr vaddr, const T data) { bool MemorySystem::IsValidVirtualAddress(const Kernel::Process& process, const VAddr vaddr) { auto& page_table = *process.vm_manager.page_table; - auto page_pointer = page_table.pointers[vaddr >> PAGE_BITS]; + auto page_pointer = page_table.pointers[vaddr >> CITRA_PAGE_BITS]; if (page_pointer) return true; - if (page_table.attributes[vaddr >> PAGE_BITS] == PageType::RasterizerCachedMemory) + if (page_table.attributes[vaddr >> CITRA_PAGE_BITS] == PageType::RasterizerCachedMemory) return true; - if (page_table.attributes[vaddr >> PAGE_BITS] != PageType::Special) + if (page_table.attributes[vaddr >> CITRA_PAGE_BITS] != PageType::Special) return false; MMIORegionPointer mmio_region = impl->GetMMIOHandler(page_table, vaddr); @@ -542,12 +542,12 @@ PAddr MemorySystem::ClampPhysicalAddress(PAddr base, PAddr address) const { } u8* MemorySystem::GetPointer(const VAddr vaddr) { - u8* page_pointer = impl->current_page_table->pointers[vaddr >> PAGE_BITS]; + u8* page_pointer = impl->current_page_table->pointers[vaddr >> CITRA_PAGE_BITS]; if (page_pointer) { - return page_pointer + (vaddr & PAGE_MASK); + return page_pointer + (vaddr & CITRA_PAGE_MASK); } - if (impl->current_page_table->attributes[vaddr >> PAGE_BITS] == + if (impl->current_page_table->attributes[vaddr >> CITRA_PAGE_BITS] == PageType::RasterizerCachedMemory) { return GetPointerForRasterizerCache(vaddr); } @@ -558,12 +558,12 @@ u8* MemorySystem::GetPointer(const VAddr vaddr) { } const u8* MemorySystem::GetPointer(const VAddr vaddr) const { - const u8* page_pointer = impl->current_page_table->pointers[vaddr >> PAGE_BITS]; + const u8* page_pointer = impl->current_page_table->pointers[vaddr >> CITRA_PAGE_BITS]; if (page_pointer) { - return page_pointer + (vaddr & PAGE_MASK); + return page_pointer + (vaddr & CITRA_PAGE_MASK); } - if (impl->current_page_table->attributes[vaddr >> PAGE_BITS] == + if (impl->current_page_table->attributes[vaddr >> CITRA_PAGE_BITS] == PageType::RasterizerCachedMemory) { return GetPointerForRasterizerCache(vaddr); } @@ -671,14 +671,14 @@ void MemorySystem::RasterizerMarkRegionCached(PAddr start, u32 size, bool cached return; } - u32 num_pages = ((start + size - 1) >> PAGE_BITS) - (start >> PAGE_BITS) + 1; + u32 num_pages = ((start + size - 1) >> CITRA_PAGE_BITS) - (start >> CITRA_PAGE_BITS) + 1; PAddr paddr = start; - for (unsigned i = 0; i < num_pages; ++i, paddr += PAGE_SIZE) { + for (unsigned i = 0; i < num_pages; ++i, paddr += CITRA_PAGE_SIZE) { for (VAddr vaddr : PhysicalToVirtualAddressForRasterizer(paddr)) { impl->cache_marker.Mark(vaddr, cached); for (auto page_table : impl->page_table_list) { - PageType& page_type = page_table->attributes[vaddr >> PAGE_BITS]; + PageType& page_type = page_table->attributes[vaddr >> CITRA_PAGE_BITS]; if (cached) { // Switch page type to cached if now cached @@ -689,7 +689,7 @@ void MemorySystem::RasterizerMarkRegionCached(PAddr start, u32 size, bool cached break; case PageType::Memory: page_type = PageType::RasterizerCachedMemory; - page_table->pointers[vaddr >> PAGE_BITS] = nullptr; + page_table->pointers[vaddr >> CITRA_PAGE_BITS] = nullptr; break; default: UNREACHABLE(); @@ -703,8 +703,8 @@ void MemorySystem::RasterizerMarkRegionCached(PAddr start, u32 size, bool cached break; case PageType::RasterizerCachedMemory: { page_type = PageType::Memory; - page_table->pointers[vaddr >> PAGE_BITS] = - GetPointerForRasterizerCache(vaddr & ~PAGE_MASK); + page_table->pointers[vaddr >> CITRA_PAGE_BITS] = + GetPointerForRasterizerCache(vaddr & ~CITRA_PAGE_MASK); break; } default: @@ -845,7 +845,7 @@ void MemorySystem::WriteBlock(const VAddr dest_addr, const void* src_buffer, con void MemorySystem::ZeroBlock(const Kernel::Process& process, const VAddr dest_addr, const std::size_t size) { - static const std::array zeros{0}; + static const std::array zeros{0}; impl->WalkBlock( process, dest_addr, size, @@ -879,7 +879,7 @@ void MemorySystem::CopyBlock(const Kernel::Process& process, VAddr dest_addr, VA void MemorySystem::CopyBlock(const Kernel::Process& dest_process, const Kernel::Process& src_process, VAddr dest_addr, VAddr src_addr, std::size_t size) { - std::array copy_buffer{}; + std::array copy_buffer{}; impl->WalkBlock( src_process, src_addr, size, diff --git a/src/core/memory.h b/src/core/memory.h index 8ee3c1d60..25bd9905f 100644 --- a/src/core/memory.h +++ b/src/core/memory.h @@ -24,17 +24,14 @@ class DspInterface; namespace Memory { -// Are defined in a system header -#undef PAGE_SIZE -#undef PAGE_MASK /** * Page size used by the ARM architecture. This is the smallest granularity with which memory can * be mapped. */ -constexpr u32 PAGE_SIZE = 0x1000; -constexpr u32 PAGE_MASK = PAGE_SIZE - 1; -constexpr int PAGE_BITS = 12; -constexpr std::size_t PAGE_TABLE_NUM_ENTRIES = 1 << (32 - PAGE_BITS); +constexpr u32 CITRA_PAGE_SIZE = 0x1000; +constexpr u32 CITRA_PAGE_MASK = CITRA_PAGE_SIZE - 1; +constexpr int CITRA_PAGE_BITS = 12; +constexpr std::size_t PAGE_TABLE_NUM_ENTRIES = 1 << (32 - CITRA_PAGE_BITS); enum class PageType { /// Page is unmapped and should cause an access error. diff --git a/src/tests/core/hle/kernel/hle_ipc.cpp b/src/tests/core/hle/kernel/hle_ipc.cpp index b852e565b..a0711f1cf 100644 --- a/src/tests/core/hle/kernel/hle_ipc.cpp +++ b/src/tests/core/hle/kernel/hle_ipc.cpp @@ -137,7 +137,7 @@ TEST_CASE("HLERequestContext::PopulateFromIncomingCommandBuffer", "[core][kernel } SECTION("translates StaticBuffer descriptors") { - auto mem = std::make_shared(Memory::PAGE_SIZE); + auto mem = std::make_shared(Memory::CITRA_PAGE_SIZE); MemoryRef buffer{mem}; std::fill(buffer.GetPtr(), buffer.GetPtr() + buffer.GetSize(), 0xAB); @@ -161,7 +161,7 @@ TEST_CASE("HLERequestContext::PopulateFromIncomingCommandBuffer", "[core][kernel } SECTION("translates MappedBuffer descriptors") { - auto mem = std::make_shared(Memory::PAGE_SIZE); + auto mem = std::make_shared(Memory::CITRA_PAGE_SIZE); MemoryRef buffer{mem}; std::fill(buffer.GetPtr(), buffer.GetPtr() + buffer.GetSize(), 0xCD); @@ -187,11 +187,11 @@ TEST_CASE("HLERequestContext::PopulateFromIncomingCommandBuffer", "[core][kernel } SECTION("translates mixed params") { - auto mem_static = std::make_shared(Memory::PAGE_SIZE); + auto mem_static = std::make_shared(Memory::CITRA_PAGE_SIZE); MemoryRef buffer_static{mem_static}; std::fill(buffer_static.GetPtr(), buffer_static.GetPtr() + buffer_static.GetSize(), 0xCE); - auto mem_mapped = std::make_shared(Memory::PAGE_SIZE); + auto mem_mapped = std::make_shared(Memory::CITRA_PAGE_SIZE); MemoryRef buffer_mapped{mem_mapped}; std::fill(buffer_mapped.GetPtr(), buffer_mapped.GetPtr() + buffer_mapped.GetSize(), 0xDF); @@ -321,12 +321,12 @@ TEST_CASE("HLERequestContext::WriteToOutgoingCommandBuffer", "[core][kernel]") { } SECTION("translates StaticBuffer descriptors") { - std::vector input_buffer(Memory::PAGE_SIZE); + std::vector input_buffer(Memory::CITRA_PAGE_SIZE); std::fill(input_buffer.begin(), input_buffer.end(), 0xAB); context.AddStaticBuffer(0, input_buffer); - auto output_mem = std::make_shared(Memory::PAGE_SIZE); + auto output_mem = std::make_shared(Memory::CITRA_PAGE_SIZE); MemoryRef output_buffer{output_mem}; VAddr target_address = 0x10000000; @@ -355,10 +355,10 @@ TEST_CASE("HLERequestContext::WriteToOutgoingCommandBuffer", "[core][kernel]") { } SECTION("translates StaticBuffer descriptors") { - std::vector input_buffer(Memory::PAGE_SIZE); + std::vector input_buffer(Memory::CITRA_PAGE_SIZE); std::fill(input_buffer.begin(), input_buffer.end(), 0xAB); - auto output_mem = std::make_shared(Memory::PAGE_SIZE); + auto output_mem = std::make_shared(Memory::CITRA_PAGE_SIZE); MemoryRef output_buffer{output_mem}; VAddr target_address = 0x10000000; diff --git a/src/tests/core/memory/vm_manager.cpp b/src/tests/core/memory/vm_manager.cpp index 92e013cdb..f8244b357 100644 --- a/src/tests/core/memory/vm_manager.cpp +++ b/src/tests/core/memory/vm_manager.cpp @@ -10,7 +10,7 @@ #include "core/memory.h" TEST_CASE("Memory Basics", "[kernel][memory]") { - auto mem = std::make_shared(Memory::PAGE_SIZE); + auto mem = std::make_shared(Memory::CITRA_PAGE_SIZE); MemoryRef block{mem}; Memory::MemorySystem memory; SECTION("mapping memory") { diff --git a/src/video_core/rasterizer_accelerated.cpp b/src/video_core/rasterizer_accelerated.cpp index ecacabd12..e0d25cca5 100644 --- a/src/video_core/rasterizer_accelerated.cpp +++ b/src/video_core/rasterizer_accelerated.cpp @@ -10,8 +10,8 @@ namespace VideoCore { void RasterizerAccelerated::UpdatePagesCachedCount(PAddr addr, u32 size, int delta) { - const u32 page_start = addr >> Memory::PAGE_BITS; - const u32 page_end = ((addr + size - 1) >> Memory::PAGE_BITS); + const u32 page_start = addr >> Memory::CITRA_PAGE_BITS; + const u32 page_end = ((addr + size - 1) >> Memory::CITRA_PAGE_BITS); u32 uncache_start_addr = 0; u32 cache_start_addr = 0; @@ -36,10 +36,10 @@ void RasterizerAccelerated::UpdatePagesCachedCount(PAddr addr, u32 size, int del // Assume delta is either -1 or 1 if (count == 0) { if (uncache_bytes == 0) { - uncache_start_addr = page << Memory::PAGE_BITS; + uncache_start_addr = page << Memory::CITRA_PAGE_BITS; } - uncache_bytes += Memory::PAGE_SIZE; + uncache_bytes += Memory::CITRA_PAGE_SIZE; } else if (uncache_bytes > 0) { VideoCore::g_memory->RasterizerMarkRegionCached(uncache_start_addr, uncache_bytes, false); @@ -48,10 +48,10 @@ void RasterizerAccelerated::UpdatePagesCachedCount(PAddr addr, u32 size, int del if (count == 1 && delta > 0) { if (cache_bytes == 0) { - cache_start_addr = page << Memory::PAGE_BITS; + cache_start_addr = page << Memory::CITRA_PAGE_BITS; } - cache_bytes += Memory::PAGE_SIZE; + cache_bytes += Memory::CITRA_PAGE_SIZE; } else if (cache_bytes > 0) { VideoCore::g_memory->RasterizerMarkRegionCached(cache_start_addr, cache_bytes, true); diff --git a/src/video_core/rasterizer_cache/rasterizer_cache.cpp b/src/video_core/rasterizer_cache/rasterizer_cache.cpp index ff5f9ab09..68b7e5813 100644 --- a/src/video_core/rasterizer_cache/rasterizer_cache.cpp +++ b/src/video_core/rasterizer_cache/rasterizer_cache.cpp @@ -893,8 +893,8 @@ void RasterizerCache::ClearAll(bool flush) { for (auto& pair : RangeFromInterval(cached_pages, flush_interval)) { const auto interval = pair.first & flush_interval; - const PAddr interval_start_addr = boost::icl::first(interval) << Memory::PAGE_BITS; - const PAddr interval_end_addr = boost::icl::last_next(interval) << Memory::PAGE_BITS; + const PAddr interval_start_addr = boost::icl::first(interval) << Memory::CITRA_PAGE_BITS; + const PAddr interval_end_addr = boost::icl::last_next(interval) << Memory::CITRA_PAGE_BITS; const u32 interval_size = interval_end_addr - interval_start_addr; VideoCore::g_memory->RasterizerMarkRegionCached(interval_start_addr, interval_size, false);