diff --git a/src/core/arm/dynarmic/arm_dynarmic.cpp b/src/core/arm/dynarmic/arm_dynarmic.cpp
index 0a0b91590..34c5aa381 100644
--- a/src/core/arm/dynarmic/arm_dynarmic.cpp
+++ b/src/core/arm/dynarmic/arm_dynarmic.cpp
@@ -56,7 +56,9 @@ static Dynarmic::UserCallbacks GetUserCallbacks(
     user_callbacks.memory.Write16 = &Memory::Write16;
     user_callbacks.memory.Write32 = &Memory::Write32;
     user_callbacks.memory.Write64 = &Memory::Write64;
-    user_callbacks.page_table = Memory::GetCurrentPageTablePointers();
+    // TODO(Subv): Re-add the page table pointers once dynarmic supports switching page tables at
+    // runtime.
+    user_callbacks.page_table = nullptr;
     user_callbacks.coprocessors[15] = std::make_shared<DynarmicCP15>(interpeter_state);
     return user_callbacks;
 }
diff --git a/src/core/core.cpp b/src/core/core.cpp
index 5332318cf..59b8768e7 100644
--- a/src/core/core.cpp
+++ b/src/core/core.cpp
@@ -137,7 +137,6 @@ void System::Reschedule() {
 }
 
 System::ResultStatus System::Init(EmuWindow* emu_window, u32 system_mode) {
-    Memory::InitMemoryMap();
     LOG_DEBUG(HW_Memory, "initialized OK");
 
     if (Settings::values.use_cpu_jit) {
diff --git a/src/core/hle/kernel/memory.cpp b/src/core/hle/kernel/memory.cpp
index 496d07cb5..7f27e9655 100644
--- a/src/core/hle/kernel/memory.cpp
+++ b/src/core/hle/kernel/memory.cpp
@@ -8,7 +8,6 @@
 #include <memory>
 #include <utility>
 #include <vector>
-#include "audio_core/audio_core.h"
 #include "common/assert.h"
 #include "common/common_types.h"
 #include "common/logging/log.h"
@@ -24,7 +23,7 @@
 
 namespace Kernel {
 
-static MemoryRegionInfo memory_regions[3];
+MemoryRegionInfo memory_regions[3];
 
 /// Size of the APPLICATION, SYSTEM and BASE memory regions (respectively) for each system
 /// memory configuration type.
@@ -96,9 +95,6 @@ MemoryRegionInfo* GetMemoryRegion(MemoryRegion region) {
     }
 }
 
-std::array<u8, Memory::VRAM_SIZE> vram;
-std::array<u8, Memory::N3DS_EXTRA_RAM_SIZE> n3ds_extra_ram;
-
 void HandleSpecialMapping(VMManager& address_space, const AddressMapping& mapping) {
     using namespace Memory;
 
@@ -143,30 +139,14 @@ void HandleSpecialMapping(VMManager& address_space, const AddressMapping& mappin
         return;
     }
 
-    // TODO(yuriks): Use GetPhysicalPointer when that becomes independent of the virtual
-    // mappings.
-    u8* target_pointer = nullptr;
-    switch (area->paddr_base) {
-    case VRAM_PADDR:
-        target_pointer = vram.data();
-        break;
-    case DSP_RAM_PADDR:
-        target_pointer = AudioCore::GetDspMemory().data();
-        break;
-    case N3DS_EXTRA_RAM_PADDR:
-        target_pointer = n3ds_extra_ram.data();
-        break;
-    default:
-        UNREACHABLE();
-    }
+    u8* target_pointer = Memory::GetPhysicalPointer(area->paddr_base + offset_into_region);
 
     // TODO(yuriks): This flag seems to have some other effect, but it's unknown what
     MemoryState memory_state = mapping.unk_flag ? MemoryState::Static : MemoryState::IO;
 
-    auto vma = address_space
-                   .MapBackingMemory(mapping.address, target_pointer + offset_into_region,
-                                     mapping.size, memory_state)
-                   .Unwrap();
+    auto vma =
+        address_space.MapBackingMemory(mapping.address, target_pointer, mapping.size, memory_state)
+            .Unwrap();
     address_space.Reprotect(vma,
                             mapping.read_only ? VMAPermission::Read : VMAPermission::ReadWrite);
 }
diff --git a/src/core/hle/kernel/memory.h b/src/core/hle/kernel/memory.h
index 08c1a9989..da6bb3563 100644
--- a/src/core/hle/kernel/memory.h
+++ b/src/core/hle/kernel/memory.h
@@ -26,4 +26,6 @@ MemoryRegionInfo* GetMemoryRegion(MemoryRegion region);
 
 void HandleSpecialMapping(VMManager& address_space, const AddressMapping& mapping);
 void MapSharedPages(VMManager& address_space);
+
+extern MemoryRegionInfo memory_regions[3];
 } // namespace Kernel
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp
index b957c45dd..324415a36 100644
--- a/src/core/hle/kernel/thread.cpp
+++ b/src/core/hle/kernel/thread.cpp
@@ -171,6 +171,8 @@ static void SwitchContext(Thread* new_thread) {
         // Cancel any outstanding wakeup events for this thread
         CoreTiming::UnscheduleEvent(ThreadWakeupEventType, new_thread->callback_handle);
 
+        auto previous_process = Kernel::g_current_process;
+
         current_thread = new_thread;
 
         ready_queue.remove(new_thread->current_priority, new_thread);
@@ -178,8 +180,18 @@ static void SwitchContext(Thread* new_thread) {
 
         Core::CPU().LoadContext(new_thread->context);
         Core::CPU().SetCP15Register(CP15_THREAD_URO, new_thread->GetTLSAddress());
+
+        if (previous_process != current_thread->owner_process) {
+            Kernel::g_current_process = current_thread->owner_process;
+            Memory::current_page_table = &Kernel::g_current_process->vm_manager.page_table;
+            // We have switched processes and thus, page tables, clear the instruction cache so we
+            // don't keep stale data from the previous process.
+            Core::CPU().ClearInstructionCache();
+        }
     } else {
         current_thread = nullptr;
+        // Note: We do not reset the current process and current page table when idling because
+        // technically we haven't changed processes, our threads are just paused.
     }
 }
 
diff --git a/src/core/hle/kernel/vm_manager.cpp b/src/core/hle/kernel/vm_manager.cpp
index cef1f7fa8..7a007c065 100644
--- a/src/core/hle/kernel/vm_manager.cpp
+++ b/src/core/hle/kernel/vm_manager.cpp
@@ -56,6 +56,10 @@ void VMManager::Reset() {
     initial_vma.size = MAX_ADDRESS;
     vma_map.emplace(initial_vma.base, initial_vma);
 
+    page_table.pointers.fill(nullptr);
+    page_table.attributes.fill(Memory::PageType::Unmapped);
+    page_table.cached_res_count.fill(0);
+
     UpdatePageTableForVMA(initial_vma);
 }
 
@@ -328,16 +332,17 @@ VMManager::VMAIter VMManager::MergeAdjacent(VMAIter iter) {
 void VMManager::UpdatePageTableForVMA(const VirtualMemoryArea& vma) {
     switch (vma.type) {
     case VMAType::Free:
-        Memory::UnmapRegion(vma.base, vma.size);
+        Memory::UnmapRegion(page_table, vma.base, vma.size);
         break;
     case VMAType::AllocatedMemoryBlock:
-        Memory::MapMemoryRegion(vma.base, vma.size, vma.backing_block->data() + vma.offset);
+        Memory::MapMemoryRegion(page_table, vma.base, vma.size,
+                                vma.backing_block->data() + vma.offset);
         break;
     case VMAType::BackingMemory:
-        Memory::MapMemoryRegion(vma.base, vma.size, vma.backing_memory);
+        Memory::MapMemoryRegion(page_table, vma.base, vma.size, vma.backing_memory);
         break;
     case VMAType::MMIO:
-        Memory::MapIoRegion(vma.base, vma.size, vma.mmio_handler);
+        Memory::MapIoRegion(page_table, vma.base, vma.size, vma.mmio_handler);
         break;
     }
 }
diff --git a/src/core/hle/kernel/vm_manager.h b/src/core/hle/kernel/vm_manager.h
index 38e0d74d0..1302527bb 100644
--- a/src/core/hle/kernel/vm_manager.h
+++ b/src/core/hle/kernel/vm_manager.h
@@ -9,6 +9,7 @@
 #include <vector>
 #include "common/common_types.h"
 #include "core/hle/result.h"
+#include "core/memory.h"
 #include "core/mmio.h"
 
 namespace Kernel {
@@ -102,7 +103,6 @@ struct VirtualMemoryArea {
  *  - http://duartes.org/gustavo/blog/post/page-cache-the-affair-between-memory-and-files/
  */
 class VMManager final {
-    // TODO(yuriks): Make page tables switchable to support multiple VMManagers
 public:
     /**
      * The maximum amount of address space managed by the kernel. Addresses above this are never
@@ -184,6 +184,10 @@ public:
     /// Dumps the address space layout to the log, for debugging
     void LogLayout(Log::Level log_level) const;
 
+    /// Each VMManager has its own page table, which is set as the main one when the owning process
+    /// is scheduled.
+    Memory::PageTable page_table;
+
 private:
     using VMAIter = decltype(vma_map)::iterator;
 
diff --git a/src/core/loader/3dsx.cpp b/src/core/loader/3dsx.cpp
index 74e336487..69cdc0867 100644
--- a/src/core/loader/3dsx.cpp
+++ b/src/core/loader/3dsx.cpp
@@ -270,6 +270,7 @@ ResultStatus AppLoader_THREEDSX::Load() {
     Kernel::g_current_process = Kernel::Process::Create(std::move(codeset));
     Kernel::g_current_process->svc_access_mask.set();
     Kernel::g_current_process->address_mappings = default_address_mappings;
+    Memory::current_page_table = &Kernel::g_current_process->vm_manager.page_table;
 
     // Attach the default resource limit (APPLICATION) to the process
     Kernel::g_current_process->resource_limit =
diff --git a/src/core/loader/elf.cpp b/src/core/loader/elf.cpp
index cfcde9167..2f27606a1 100644
--- a/src/core/loader/elf.cpp
+++ b/src/core/loader/elf.cpp
@@ -397,6 +397,7 @@ ResultStatus AppLoader_ELF::Load() {
     Kernel::g_current_process = Kernel::Process::Create(std::move(codeset));
     Kernel::g_current_process->svc_access_mask.set();
     Kernel::g_current_process->address_mappings = default_address_mappings;
+    Memory::current_page_table = &Kernel::g_current_process->vm_manager.page_table;
 
     // Attach the default resource limit (APPLICATION) to the process
     Kernel::g_current_process->resource_limit =
diff --git a/src/core/loader/ncch.cpp b/src/core/loader/ncch.cpp
index 7aff7f29b..79ea50147 100644
--- a/src/core/loader/ncch.cpp
+++ b/src/core/loader/ncch.cpp
@@ -172,6 +172,7 @@ ResultStatus AppLoader_NCCH::LoadExec() {
         codeset->memory = std::make_shared<std::vector<u8>>(std::move(code));
 
         Kernel::g_current_process = Kernel::Process::Create(std::move(codeset));
+        Memory::current_page_table = &Kernel::g_current_process->vm_manager.page_table;
 
         // Attach a resource limit to the process based on the resource limit category
         Kernel::g_current_process->resource_limit =
diff --git a/src/core/memory.cpp b/src/core/memory.cpp
index 097bc5b47..68a6b1ac2 100644
--- a/src/core/memory.cpp
+++ b/src/core/memory.cpp
@@ -4,83 +4,31 @@
 
 #include <array>
 #include <cstring>
+#include "audio_core/audio_core.h"
 #include "common/assert.h"
 #include "common/common_types.h"
 #include "common/logging/log.h"
 #include "common/swap.h"
+#include "core/hle/kernel/memory.h"
 #include "core/hle/kernel/process.h"
 #include "core/hle/lock.h"
 #include "core/memory.h"
 #include "core/memory_setup.h"
-#include "core/mmio.h"
 #include "video_core/renderer_base.h"
 #include "video_core/video_core.h"
 
 namespace Memory {
 
-enum class PageType {
-    /// Page is unmapped and should cause an access error.
-    Unmapped,
-    /// Page is mapped to regular memory. This is the only type you can get pointers to.
-    Memory,
-    /// Page is mapped to regular memory, but also needs to check for rasterizer cache flushing and
-    /// invalidation
-    RasterizerCachedMemory,
-    /// Page is mapped to a I/O region. Writing and reading to this page is handled by functions.
-    Special,
-    /// Page is mapped to a I/O region, but also needs to check for rasterizer cache flushing and
-    /// invalidation
-    RasterizerCachedSpecial,
-};
+static std::array<u8, Memory::VRAM_SIZE> vram;
+static std::array<u8, Memory::N3DS_EXTRA_RAM_SIZE> n3ds_extra_ram;
 
-struct SpecialRegion {
-    VAddr base;
-    u32 size;
-    MMIORegionPointer handler;
-};
-
-/**
- * A (reasonably) fast way of allowing switchable and remappable process address spaces. It loosely
- * mimics the way a real CPU page table works, but instead is optimized for minimal decoding and
- * fetching requirements when accessing. In the usual case of an access to regular memory, it only
- * requires an indexed fetch and a check for NULL.
- */
-struct PageTable {
-    /**
-     * Array of memory pointers backing each page. An entry can only be non-null if the
-     * corresponding entry in the `attributes` array is of type `Memory`.
-     */
-    std::array<u8*, PAGE_TABLE_NUM_ENTRIES> pointers;
-
-    /**
-     * Contains MMIO handlers that back memory regions whose entries in the `attribute` array is of
-     * type `Special`.
-     */
-    std::vector<SpecialRegion> special_regions;
-
-    /**
-     * Array of fine grained page attributes. If it is set to any value other than `Memory`, then
-     * the corresponding entry in `pointers` MUST be set to null.
-     */
-    std::array<PageType, PAGE_TABLE_NUM_ENTRIES> attributes;
-
-    /**
-     * Indicates the number of externally cached resources touching a page that should be
-     * flushed before the memory is accessed
-     */
-    std::array<u8, PAGE_TABLE_NUM_ENTRIES> cached_res_count;
-};
-
-/// Singular page table used for the singleton process
-static PageTable main_page_table;
-/// Currently active page table
-static PageTable* current_page_table = &main_page_table;
+PageTable* current_page_table = nullptr;
 
 std::array<u8*, PAGE_TABLE_NUM_ENTRIES>* GetCurrentPageTablePointers() {
     return &current_page_table->pointers;
 }
 
-static void MapPages(u32 base, u32 size, u8* memory, PageType type) {
+static void MapPages(PageTable& page_table, u32 base, u32 size, u8* memory, PageType type) {
     LOG_DEBUG(HW_Memory, "Mapping %p onto %08X-%08X", memory, base * PAGE_SIZE,
               (base + size) * PAGE_SIZE);
 
@@ -91,9 +39,9 @@ static void MapPages(u32 base, u32 size, u8* memory, PageType type) {
     while (base != end) {
         ASSERT_MSG(base < PAGE_TABLE_NUM_ENTRIES, "out of range mapping at %08X", base);
 
-        current_page_table->attributes[base] = type;
-        current_page_table->pointers[base] = memory;
-        current_page_table->cached_res_count[base] = 0;
+        page_table.attributes[base] = type;
+        page_table.pointers[base] = memory;
+        page_table.cached_res_count[base] = 0;
 
         base += 1;
         if (memory != nullptr)
@@ -101,30 +49,24 @@ static void MapPages(u32 base, u32 size, u8* memory, PageType type) {
     }
 }
 
-void InitMemoryMap() {
-    main_page_table.pointers.fill(nullptr);
-    main_page_table.attributes.fill(PageType::Unmapped);
-    main_page_table.cached_res_count.fill(0);
-}
-
-void MapMemoryRegion(VAddr base, u32 size, u8* target) {
+void MapMemoryRegion(PageTable& page_table, VAddr base, u32 size, u8* target) {
     ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: %08X", size);
     ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: %08X", base);
-    MapPages(base / PAGE_SIZE, size / PAGE_SIZE, target, PageType::Memory);
+    MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, target, PageType::Memory);
 }
 
-void MapIoRegion(VAddr base, u32 size, MMIORegionPointer mmio_handler) {
+void MapIoRegion(PageTable& page_table, VAddr base, u32 size, MMIORegionPointer mmio_handler) {
     ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: %08X", size);
     ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: %08X", base);
-    MapPages(base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Special);
+    MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Special);
 
-    current_page_table->special_regions.emplace_back(SpecialRegion{base, size, mmio_handler});
+    page_table.special_regions.emplace_back(SpecialRegion{base, size, mmio_handler});
 }
 
-void UnmapRegion(VAddr base, u32 size) {
+void UnmapRegion(PageTable& page_table, VAddr base, u32 size) {
     ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: %08X", size);
     ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: %08X", base);
-    MapPages(base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Unmapped);
+    MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Unmapped);
 }
 
 /**
@@ -273,8 +215,7 @@ bool IsValidVirtualAddress(const VAddr vaddr) {
 }
 
 bool IsValidPhysicalAddress(const PAddr paddr) {
-    boost::optional<VAddr> vaddr = PhysicalToVirtualAddress(paddr);
-    return vaddr && IsValidVirtualAddress(*vaddr);
+    return GetPhysicalPointer(paddr) != nullptr;
 }
 
 u8* GetPointer(const VAddr vaddr) {
@@ -306,9 +247,63 @@ std::string ReadCString(VAddr vaddr, std::size_t max_length) {
 }
 
 u8* GetPhysicalPointer(PAddr address) {
-    // TODO(Subv): This call should not go through the application's memory mapping.
-    boost::optional<VAddr> vaddr = PhysicalToVirtualAddress(address);
-    return vaddr ? GetPointer(*vaddr) : nullptr;
+    struct MemoryArea {
+        PAddr paddr_base;
+        u32 size;
+    };
+
+    static constexpr MemoryArea memory_areas[] = {
+        {VRAM_PADDR, VRAM_SIZE},
+        {IO_AREA_PADDR, IO_AREA_SIZE},
+        {DSP_RAM_PADDR, DSP_RAM_SIZE},
+        {FCRAM_PADDR, FCRAM_N3DS_SIZE},
+        {N3DS_EXTRA_RAM_PADDR, N3DS_EXTRA_RAM_SIZE},
+    };
+
+    const auto area =
+        std::find_if(std::begin(memory_areas), std::end(memory_areas), [&](const auto& area) {
+            return address >= area.paddr_base && address < area.paddr_base + area.size;
+        });
+
+    if (area == std::end(memory_areas)) {
+        LOG_ERROR(HW_Memory, "unknown GetPhysicalPointer @ 0x%08X", address);
+        return nullptr;
+    }
+
+    if (area->paddr_base == IO_AREA_PADDR) {
+        LOG_ERROR(HW_Memory, "MMIO mappings are not supported yet. phys_addr=0x%08X", address);
+        return nullptr;
+    }
+
+    u32 offset_into_region = address - area->paddr_base;
+
+    u8* target_pointer = nullptr;
+    switch (area->paddr_base) {
+    case VRAM_PADDR:
+        target_pointer = vram.data() + offset_into_region;
+        break;
+    case DSP_RAM_PADDR:
+        target_pointer = AudioCore::GetDspMemory().data() + offset_into_region;
+        break;
+    case FCRAM_PADDR:
+        for (const auto& region : Kernel::memory_regions) {
+            if (offset_into_region >= region.base &&
+                offset_into_region < region.base + region.size) {
+                target_pointer =
+                    region.linear_heap_memory->data() + offset_into_region - region.base;
+                break;
+            }
+        }
+        ASSERT_MSG(target_pointer != nullptr, "Invalid FCRAM address");
+        break;
+    case N3DS_EXTRA_RAM_PADDR:
+        target_pointer = n3ds_extra_ram.data() + offset_into_region;
+        break;
+    default:
+        UNREACHABLE();
+    }
+
+    return target_pointer;
 }
 
 void RasterizerMarkRegionCached(PAddr start, u32 size, int count_delta) {
diff --git a/src/core/memory.h b/src/core/memory.h
index c8c56babd..b228a48c2 100644
--- a/src/core/memory.h
+++ b/src/core/memory.h
@@ -7,8 +7,10 @@
 #include <array>
 #include <cstddef>
 #include <string>
+#include <vector>
 #include <boost/optional.hpp>
 #include "common/common_types.h"
+#include "core/mmio.h"
 
 namespace Memory {
 
@@ -21,6 +23,59 @@ const u32 PAGE_MASK = PAGE_SIZE - 1;
 const int PAGE_BITS = 12;
 const size_t PAGE_TABLE_NUM_ENTRIES = 1 << (32 - PAGE_BITS);
 
+enum class PageType {
+    /// Page is unmapped and should cause an access error.
+    Unmapped,
+    /// Page is mapped to regular memory. This is the only type you can get pointers to.
+    Memory,
+    /// Page is mapped to regular memory, but also needs to check for rasterizer cache flushing and
+    /// invalidation
+    RasterizerCachedMemory,
+    /// Page is mapped to a I/O region. Writing and reading to this page is handled by functions.
+    Special,
+    /// Page is mapped to a I/O region, but also needs to check for rasterizer cache flushing and
+    /// invalidation
+    RasterizerCachedSpecial,
+};
+
+struct SpecialRegion {
+    VAddr base;
+    u32 size;
+    MMIORegionPointer handler;
+};
+
+/**
+ * A (reasonably) fast way of allowing switchable and remappable process address spaces. It loosely
+ * mimics the way a real CPU page table works, but instead is optimized for minimal decoding and
+ * fetching requirements when accessing. In the usual case of an access to regular memory, it only
+ * requires an indexed fetch and a check for NULL.
+ */
+struct PageTable {
+    /**
+     * Array of memory pointers backing each page. An entry can only be non-null if the
+     * corresponding entry in the `attributes` array is of type `Memory`.
+     */
+    std::array<u8*, PAGE_TABLE_NUM_ENTRIES> pointers;
+
+    /**
+     * Contains MMIO handlers that back memory regions whose entries in the `attribute` array is of
+     * type `Special`.
+     */
+    std::vector<SpecialRegion> special_regions;
+
+    /**
+     * Array of fine grained page attributes. If it is set to any value other than `Memory`, then
+     * the corresponding entry in `pointers` MUST be set to null.
+     */
+    std::array<PageType, PAGE_TABLE_NUM_ENTRIES> attributes;
+
+    /**
+     * Indicates the number of externally cached resources touching a page that should be
+     * flushed before the memory is accessed
+     */
+    std::array<u8, PAGE_TABLE_NUM_ENTRIES> cached_res_count;
+};
+
 /// Physical memory regions as seen from the ARM11
 enum : PAddr {
     /// IO register area
@@ -126,6 +181,9 @@ enum : VAddr {
     NEW_LINEAR_HEAP_VADDR_END = NEW_LINEAR_HEAP_VADDR + NEW_LINEAR_HEAP_SIZE,
 };
 
+/// Currently active page table
+extern PageTable* current_page_table;
+
 bool IsValidVirtualAddress(const VAddr addr);
 bool IsValidPhysicalAddress(const PAddr addr);
 
@@ -169,8 +227,6 @@ boost::optional<VAddr> PhysicalToVirtualAddress(PAddr addr);
 
 /**
  * Gets a pointer to the memory region beginning at the specified physical address.
- *
- * @note This is currently implemented using PhysicalToVirtualAddress().
  */
 u8* GetPhysicalPointer(PAddr address);
 
@@ -209,4 +265,4 @@ void RasterizerFlushVirtualRegion(VAddr start, u32 size, FlushMode mode);
  * retrieve the current page table for that purpose.
  */
 std::array<u8*, PAGE_TABLE_NUM_ENTRIES>* GetCurrentPageTablePointers();
-}
+} // namespace Memory
diff --git a/src/core/memory_setup.h b/src/core/memory_setup.h
index 3fdf3a87d..c58baa50b 100644
--- a/src/core/memory_setup.h
+++ b/src/core/memory_setup.h
@@ -9,24 +9,24 @@
 
 namespace Memory {
 
-void InitMemoryMap();
-
 /**
  * Maps an allocated buffer onto a region of the emulated process address space.
  *
+ * @param page_table The page table of the emulated process.
  * @param base The address to start mapping at. Must be page-aligned.
  * @param size The amount of bytes to map. Must be page-aligned.
  * @param target Buffer with the memory backing the mapping. Must be of length at least `size`.
  */
-void MapMemoryRegion(VAddr base, u32 size, u8* target);
+void MapMemoryRegion(PageTable& page_table, VAddr base, u32 size, u8* target);
 
 /**
  * Maps a region of the emulated process address space as a IO region.
+ * @param page_table The page table of the emulated process.
  * @param base The address to start mapping at. Must be page-aligned.
  * @param size The amount of bytes to map. Must be page-aligned.
  * @param mmio_handler The handler that backs the mapping.
  */
-void MapIoRegion(VAddr base, u32 size, MMIORegionPointer mmio_handler);
+void MapIoRegion(PageTable& page_table, VAddr base, u32 size, MMIORegionPointer mmio_handler);
 
-void UnmapRegion(VAddr base, u32 size);
+void UnmapRegion(PageTable& page_table, VAddr base, u32 size);
 }
diff --git a/src/tests/core/arm/arm_test_common.cpp b/src/tests/core/arm/arm_test_common.cpp
index 1df6c5677..8384ce744 100644
--- a/src/tests/core/arm/arm_test_common.cpp
+++ b/src/tests/core/arm/arm_test_common.cpp
@@ -3,20 +3,30 @@
 // Refer to the license.txt file included.
 
 #include "core/core.h"
+#include "core/memory.h"
 #include "core/memory_setup.h"
 #include "tests/core/arm/arm_test_common.h"
 
 namespace ArmTests {
 
+static Memory::PageTable page_table;
+
 TestEnvironment::TestEnvironment(bool mutable_memory_)
     : mutable_memory(mutable_memory_), test_memory(std::make_shared<TestMemory>(this)) {
-    Memory::MapIoRegion(0x00000000, 0x80000000, test_memory);
-    Memory::MapIoRegion(0x80000000, 0x80000000, test_memory);
+
+    page_table.pointers.fill(nullptr);
+    page_table.attributes.fill(Memory::PageType::Unmapped);
+    page_table.cached_res_count.fill(0);
+
+    Memory::MapIoRegion(page_table, 0x00000000, 0x80000000, test_memory);
+    Memory::MapIoRegion(page_table, 0x80000000, 0x80000000, test_memory);
+
+    Memory::current_page_table = &page_table;
 }
 
 TestEnvironment::~TestEnvironment() {
-    Memory::UnmapRegion(0x80000000, 0x80000000);
-    Memory::UnmapRegion(0x00000000, 0x80000000);
+    Memory::UnmapRegion(page_table, 0x80000000, 0x80000000);
+    Memory::UnmapRegion(page_table, 0x00000000, 0x80000000);
 }
 
 void TestEnvironment::SetMemory64(VAddr vaddr, u64 value) {