diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt
index 721685bb7..506885659 100644
--- a/src/core/CMakeLists.txt
+++ b/src/core/CMakeLists.txt
@@ -179,6 +179,8 @@ add_library(core STATIC
     hle/kernel/k_client_port.h
     hle/kernel/k_client_session.cpp
     hle/kernel/k_client_session.h
+    hle/kernel/k_code_memory.cpp
+    hle/kernel/k_code_memory.h
     hle/kernel/k_condition_variable.cpp
     hle/kernel/k_condition_variable.h
     hle/kernel/k_event.cpp
diff --git a/src/core/hle/kernel/init/init_slab_setup.cpp b/src/core/hle/kernel/init/init_slab_setup.cpp
index 8ff0f695d..36fc0944a 100644
--- a/src/core/hle/kernel/init/init_slab_setup.cpp
+++ b/src/core/hle/kernel/init/init_slab_setup.cpp
@@ -9,6 +9,7 @@
 #include "core/core.h"
 #include "core/hardware_properties.h"
 #include "core/hle/kernel/init/init_slab_setup.h"
+#include "core/hle/kernel/k_code_memory.h"
 #include "core/hle/kernel/k_event.h"
 #include "core/hle/kernel/k_memory_layout.h"
 #include "core/hle/kernel/k_memory_manager.h"
@@ -32,6 +33,7 @@ namespace Kernel::Init {
     HANDLER(KPort, (SLAB_COUNT(KPort)), ##__VA_ARGS__)                                             \
     HANDLER(KSharedMemory, (SLAB_COUNT(KSharedMemory)), ##__VA_ARGS__)                             \
     HANDLER(KTransferMemory, (SLAB_COUNT(KTransferMemory)), ##__VA_ARGS__)                         \
+    HANDLER(KCodeMemory, (SLAB_COUNT(KCodeMemory)), ##__VA_ARGS__)                                 \
     HANDLER(KSession, (SLAB_COUNT(KSession)), ##__VA_ARGS__)                                       \
     HANDLER(KResourceLimit, (SLAB_COUNT(KResourceLimit)), ##__VA_ARGS__)
 
diff --git a/src/core/hle/kernel/k_class_token.cpp b/src/core/hle/kernel/k_class_token.cpp
index 0be0027be..21e2fe494 100644
--- a/src/core/hle/kernel/k_class_token.cpp
+++ b/src/core/hle/kernel/k_class_token.cpp
@@ -6,6 +6,7 @@
 #include "core/hle/kernel/k_class_token.h"
 #include "core/hle/kernel/k_client_port.h"
 #include "core/hle/kernel/k_client_session.h"
+#include "core/hle/kernel/k_code_memory.h"
 #include "core/hle/kernel/k_event.h"
 #include "core/hle/kernel/k_port.h"
 #include "core/hle/kernel/k_process.h"
@@ -48,7 +49,7 @@ static_assert(ClassToken<KWritableEvent> == 0b10001001'00000000);
 static_assert(ClassToken<KTransferMemory> == 0b10010001'00000000);
 // static_assert(ClassToken<KDeviceAddressSpace> == 0b01100001'00000000);
 // static_assert(ClassToken<KSessionRequest> == 0b10100001'00000000);
-// static_assert(ClassToken<KCodeMemory> == 0b11000001'00000000);
+static_assert(ClassToken<KCodeMemory> == 0b11000001'00000000);
 
 // Ensure that the token hierarchy is correct.
 
@@ -79,7 +80,7 @@ static_assert(ClassToken<KWritableEvent> == ((0b10001001 << 8) | ClassToken<KAut
 static_assert(ClassToken<KTransferMemory> == ((0b10010001 << 8) | ClassToken<KAutoObject>));
 // static_assert(ClassToken<KDeviceAddressSpace> == ((0b01100001 << 8) | ClassToken<KAutoObject>));
 // static_assert(ClassToken<KSessionRequest> == ((0b10100001 << 8) | ClassToken<KAutoObject>));
-// static_assert(ClassToken<KCodeMemory> == ((0b11000001 << 8) | ClassToken<KAutoObject>));
+static_assert(ClassToken<KCodeMemory> == ((0b11000001 << 8) | ClassToken<KAutoObject>));
 
 // Ensure that the token hierarchy reflects the class hierarchy.
 
diff --git a/src/core/hle/kernel/k_code_memory.cpp b/src/core/hle/kernel/k_code_memory.cpp
new file mode 100644
index 000000000..d69f7ffb7
--- /dev/null
+++ b/src/core/hle/kernel/k_code_memory.cpp
@@ -0,0 +1,146 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "common/common_types.h"
+#include "core/device_memory.h"
+#include "core/hle/kernel/k_auto_object.h"
+#include "core/hle/kernel/k_code_memory.h"
+#include "core/hle/kernel/k_light_lock.h"
+#include "core/hle/kernel/k_memory_block.h"
+#include "core/hle/kernel/k_page_linked_list.h"
+#include "core/hle/kernel/k_page_table.h"
+#include "core/hle/kernel/k_process.h"
+#include "core/hle/kernel/slab_helpers.h"
+#include "core/hle/kernel/svc_types.h"
+#include "core/hle/result.h"
+
+namespace Kernel {
+
+KCodeMemory::KCodeMemory(KernelCore& kernel_)
+    : KAutoObjectWithSlabHeapAndContainer{kernel_}, m_lock(kernel_) {}
+
+ResultCode KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, size_t size) {
+    // Set members.
+    m_owner = kernel.CurrentProcess();
+
+    // Get the owner page table.
+    auto& page_table = m_owner->PageTable();
+
+    // Construct the page group.
+    KMemoryInfo kBlockInfo = page_table.QueryInfo(addr);
+    m_page_group = KPageLinkedList(kBlockInfo.GetAddress(), kBlockInfo.GetNumPages());
+
+    // Lock the memory.
+    R_TRY(page_table.LockForCodeMemory(addr, size))
+
+    // Clear the memory.
+    for (const auto& block : m_page_group.Nodes()) {
+        std::memset(device_memory.GetPointer(block.GetAddress()), 0xFF, block.GetSize());
+    }
+
+    // Set remaining tracking members.
+    m_address = addr;
+    m_is_initialized = true;
+    m_is_owner_mapped = false;
+    m_is_mapped = false;
+
+    // We succeeded.
+    return ResultSuccess;
+}
+
+void KCodeMemory::Finalize() {
+    // Unlock.
+    if (!m_is_mapped && !m_is_owner_mapped) {
+        const size_t size = m_page_group.GetNumPages() * PageSize;
+        m_owner->PageTable().UnlockForCodeMemory(m_address, size);
+    }
+}
+
+ResultCode KCodeMemory::Map(VAddr address, size_t size) {
+    // Validate the size.
+    R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
+
+    // Lock ourselves.
+    KScopedLightLock lk(m_lock);
+
+    // Ensure we're not already mapped.
+    R_UNLESS(!m_is_mapped, ResultInvalidState);
+
+    // Map the memory.
+    R_TRY(kernel.CurrentProcess()->PageTable().MapPages(
+        address, m_page_group, KMemoryState::CodeOut, KMemoryPermission::UserReadWrite));
+
+    // Mark ourselves as mapped.
+    m_is_mapped = true;
+
+    return ResultSuccess;
+}
+
+ResultCode KCodeMemory::Unmap(VAddr address, size_t size) {
+    // Validate the size.
+    R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
+
+    // Lock ourselves.
+    KScopedLightLock lk(m_lock);
+
+    // Unmap the memory.
+    R_TRY(kernel.CurrentProcess()->PageTable().UnmapPages(address, m_page_group,
+                                                          KMemoryState::CodeOut));
+
+    // Mark ourselves as unmapped.
+    m_is_mapped = false;
+
+    return ResultSuccess;
+}
+
+ResultCode KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission perm) {
+    // Validate the size.
+    R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
+
+    // Lock ourselves.
+    KScopedLightLock lk(m_lock);
+
+    // Ensure we're not already mapped.
+    R_UNLESS(!m_is_owner_mapped, ResultInvalidState);
+
+    // Convert the memory permission.
+    KMemoryPermission k_perm{};
+    switch (perm) {
+    case Svc::MemoryPermission::Read:
+        k_perm = KMemoryPermission::UserRead;
+        break;
+    case Svc::MemoryPermission::ReadExecute:
+        k_perm = KMemoryPermission::UserReadExecute;
+        break;
+    default:
+        break;
+    }
+
+    // Map the memory.
+    R_TRY(
+        m_owner->PageTable().MapPages(address, m_page_group, KMemoryState::GeneratedCode, k_perm));
+
+    // Mark ourselves as mapped.
+    m_is_owner_mapped = true;
+
+    return ResultSuccess;
+}
+
+ResultCode KCodeMemory::UnmapFromOwner(VAddr address, size_t size) {
+    // Validate the size.
+    R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
+
+    // Lock ourselves.
+    KScopedLightLock lk(m_lock);
+
+    // Unmap the memory.
+    R_TRY(m_owner->PageTable().UnmapPages(address, m_page_group, KMemoryState::GeneratedCode));
+
+    // Mark ourselves as unmapped.
+    m_is_owner_mapped = false;
+
+    return ResultSuccess;
+}
+
+} // namespace Kernel
\ No newline at end of file
diff --git a/src/core/hle/kernel/k_code_memory.h b/src/core/hle/kernel/k_code_memory.h
new file mode 100644
index 000000000..e0ba19a53
--- /dev/null
+++ b/src/core/hle/kernel/k_code_memory.h
@@ -0,0 +1,66 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include "common/common_types.h"
+#include "core/device_memory.h"
+#include "core/hle/kernel/k_auto_object.h"
+#include "core/hle/kernel/k_light_lock.h"
+#include "core/hle/kernel/k_page_linked_list.h"
+#include "core/hle/kernel/k_process.h"
+#include "core/hle/kernel/slab_helpers.h"
+#include "core/hle/kernel/svc_types.h"
+#include "core/hle/result.h"
+
+namespace Kernel {
+
+enum class CodeMemoryOperation : u32 {
+    Map = 0,
+    MapToOwner = 1,
+    Unmap = 2,
+    UnmapFromOwner = 3,
+};
+
+class KCodeMemory final
+    : public KAutoObjectWithSlabHeapAndContainer<KCodeMemory, KAutoObjectWithList> {
+    KERNEL_AUTOOBJECT_TRAITS(KCodeMemory, KAutoObject);
+
+public:
+    explicit KCodeMemory(KernelCore& kernel_);
+
+    ResultCode Initialize(Core::DeviceMemory& device_memory, VAddr address, size_t size);
+    void Finalize();
+
+    ResultCode Map(VAddr address, size_t size);
+    ResultCode Unmap(VAddr address, size_t size);
+    ResultCode MapToOwner(VAddr address, size_t size, Svc::MemoryPermission perm);
+    ResultCode UnmapFromOwner(VAddr address, size_t size);
+
+    bool IsInitialized() const {
+        return m_is_initialized;
+    }
+    static void PostDestroy([[maybe_unused]] uintptr_t arg) {}
+
+    KProcess* GetOwner() const {
+        return m_owner;
+    }
+    VAddr GetSourceAddress() const {
+        return m_address;
+    }
+    size_t GetSize() const {
+        return m_is_initialized ? m_page_group.GetNumPages() * PageSize : 0;
+    }
+
+private:
+    KPageLinkedList m_page_group{};
+    KProcess* m_owner{};
+    VAddr m_address{};
+    KLightLock m_lock;
+    bool m_is_initialized{};
+    bool m_is_owner_mapped{};
+    bool m_is_mapped{};
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_memory_block.h b/src/core/hle/kernel/k_memory_block.h
index a7fdb5fb8..fd491146f 100644
--- a/src/core/hle/kernel/k_memory_block.h
+++ b/src/core/hle/kernel/k_memory_block.h
@@ -131,6 +131,26 @@ enum class KMemoryPermission : u8 {
 
     UserMask = static_cast<u8>(Svc::MemoryPermission::Read | Svc::MemoryPermission::Write |
                                Svc::MemoryPermission::Execute),
+
+    KernelShift = 3,
+
+    KernelRead = Read << KernelShift,
+    KernelWrite = Write << KernelShift,
+    KernelExecute = Execute << KernelShift,
+
+    NotMapped = (1 << (2 * KernelShift)),
+
+    KernelReadWrite = KernelRead | KernelWrite,
+    KernelReadExecute = KernelRead | KernelExecute,
+
+    UserRead = Read | KernelRead,
+    UserWrite = Write | KernelWrite,
+    UserExecute = Execute,
+
+    UserReadWrite = UserRead | UserWrite,
+    UserReadExecute = UserRead | UserExecute,
+
+    IpcLockChangeMask = NotMapped | UserReadWrite
 };
 DECLARE_ENUM_FLAG_OPERATORS(KMemoryPermission);
 
diff --git a/src/core/hle/kernel/k_page_linked_list.h b/src/core/hle/kernel/k_page_linked_list.h
index 3362fb236..0e2ae582a 100644
--- a/src/core/hle/kernel/k_page_linked_list.h
+++ b/src/core/hle/kernel/k_page_linked_list.h
@@ -27,6 +27,10 @@ public:
             return num_pages;
         }
 
+        constexpr std::size_t GetSize() const {
+            return GetNumPages() * PageSize;
+        }
+
     private:
         u64 addr{};
         std::size_t num_pages{};
diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp
index 9bda5c5b2..99982e5a3 100644
--- a/src/core/hle/kernel/k_page_table.cpp
+++ b/src/core/hle/kernel/k_page_table.cpp
@@ -368,6 +368,33 @@ ResultCode KPageTable::UnmapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, st
     return ResultSuccess;
 }
 
+ResultCode KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size,
+                                          KPageTable& src_page_table, VAddr src_addr) {
+    std::lock_guard lock{page_table_lock};
+
+    const std::size_t num_pages{size / PageSize};
+
+    // Check that the memory is mapped in the destination process.
+    size_t num_allocator_blocks;
+    R_TRY(CheckMemoryState(&num_allocator_blocks, dst_addr, size, KMemoryState::All,
+                           KMemoryState::SharedCode, KMemoryPermission::UserReadWrite,
+                           KMemoryPermission::UserReadWrite, KMemoryAttribute::All,
+                           KMemoryAttribute::None));
+
+    // Check that the memory is mapped in the source process.
+    R_TRY(src_page_table.CheckMemoryState(src_addr, size, KMemoryState::FlagCanMapProcess,
+                                          KMemoryState::FlagCanMapProcess, KMemoryPermission::None,
+                                          KMemoryPermission::None, KMemoryAttribute::All,
+                                          KMemoryAttribute::None));
+
+    CASCADE_CODE(Operate(dst_addr, num_pages, KMemoryPermission::None, OperationType::Unmap));
+
+    // Apply the memory block update.
+    block_manager->Update(dst_addr, num_pages, KMemoryState::Free, KMemoryPermission::None,
+                          KMemoryAttribute::None);
+
+    return ResultSuccess;
+}
 void KPageTable::MapPhysicalMemory(KPageLinkedList& page_linked_list, VAddr start, VAddr end) {
     auto node{page_linked_list.Nodes().begin()};
     PAddr map_addr{node->GetAddress()};
@@ -942,6 +969,60 @@ ResultCode KPageTable::UnlockForDeviceAddressSpace(VAddr addr, std::size_t size)
     return ResultSuccess;
 }
 
+ResultCode KPageTable::LockForCodeMemory(VAddr addr, std::size_t size) {
+    std::lock_guard lock{page_table_lock};
+
+    KMemoryPermission new_perm = KMemoryPermission::NotMapped | KMemoryPermission::KernelReadWrite;
+
+    KMemoryPermission old_perm{};
+
+    if (const ResultCode result{CheckMemoryState(
+            nullptr, &old_perm, nullptr, addr, size, KMemoryState::FlagCanCodeMemory,
+            KMemoryState::FlagCanCodeMemory, KMemoryPermission::Mask,
+            KMemoryPermission::UserReadWrite, KMemoryAttribute::All, KMemoryAttribute::None)};
+        result.IsError()) {
+        return result;
+    }
+
+    new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm;
+
+    block_manager->UpdateLock(
+        addr, size / PageSize,
+        [](KMemoryBlockManager::iterator block, KMemoryPermission permission) {
+            block->ShareToDevice(permission);
+        },
+        new_perm);
+
+    return ResultSuccess;
+}
+
+ResultCode KPageTable::UnlockForCodeMemory(VAddr addr, std::size_t size) {
+    std::lock_guard lock{page_table_lock};
+
+    KMemoryPermission new_perm = KMemoryPermission::UserReadWrite;
+
+    KMemoryPermission old_perm{};
+
+    if (const ResultCode result{CheckMemoryState(
+            nullptr, &old_perm, nullptr, addr, size, KMemoryState::FlagCanCodeMemory,
+            KMemoryState::FlagCanCodeMemory, KMemoryPermission::None, KMemoryPermission::None,
+            KMemoryAttribute::All, KMemoryAttribute::Locked)};
+        result.IsError()) {
+        return result;
+    }
+
+    new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm;
+
+    block_manager->UpdateLock(
+        addr, size / PageSize,
+        [](KMemoryBlockManager::iterator block, KMemoryPermission permission) {
+            block->UnshareToDevice(permission);
+        },
+        new_perm);
+
+    return ResultSuccess;
+}
+
 ResultCode KPageTable::InitializeMemoryLayout(VAddr start, VAddr end) {
     block_manager = std::make_unique<KMemoryBlockManager>(start, end);
 
@@ -1231,4 +1312,42 @@ ResultCode KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermissi
     return ResultSuccess;
 }
 
+ResultCode KPageTable::CheckMemoryState(size_t* out_blocks_needed, VAddr addr, size_t size,
+                                        KMemoryState state_mask, KMemoryState state,
+                                        KMemoryPermission perm_mask, KMemoryPermission perm,
+                                        KMemoryAttribute attr_mask, KMemoryAttribute attr) const {
+    // Get information about the first block.
+    const VAddr last_addr = addr + size - 1;
+    KMemoryBlockManager::const_iterator it{block_manager->FindIterator(addr)};
+    KMemoryInfo info = it->GetMemoryInfo();
+
+    // If the start address isn't aligned, we need a block.
+    const size_t blocks_for_start_align =
+        (Common::AlignDown(addr, PageSize) != info.GetAddress()) ? 1 : 0;
+
+    while (true) {
+        // Validate against the provided masks.
+        R_TRY(CheckMemoryState(info, state_mask, state, perm_mask, perm, attr_mask, attr));
+
+        // Break once we're done.
+        if (last_addr <= info.GetLastAddress()) {
+            break;
+        }
+
+        // Advance our iterator.
+        it++;
+        info = it->GetMemoryInfo();
+    }
+
+    // If the end address isn't aligned, we need a block.
+    const size_t blocks_for_end_align =
+        (Common::AlignUp(addr + size, PageSize) != info.GetEndAddress()) ? 1 : 0;
+
+    if (out_blocks_needed != nullptr) {
+        *out_blocks_needed = blocks_for_start_align + blocks_for_end_align;
+    }
+
+    return ResultSuccess;
+}
+
 } // namespace Kernel
diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h
index b7ec38f06..d784aa67e 100644
--- a/src/core/hle/kernel/k_page_table.h
+++ b/src/core/hle/kernel/k_page_table.h
@@ -33,6 +33,8 @@ public:
                               KMemoryPermission perm);
     ResultCode MapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
     ResultCode UnmapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
+    ResultCode UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTable& src_page_table,
+                                  VAddr src_addr);
     ResultCode MapPhysicalMemory(VAddr addr, std::size_t size);
     ResultCode UnmapPhysicalMemory(VAddr addr, std::size_t size);
     ResultCode UnmapMemory(VAddr addr, std::size_t size);
@@ -55,6 +57,8 @@ public:
                                           KMemoryPermission perm, PAddr map_addr = 0);
     ResultCode LockForDeviceAddressSpace(VAddr addr, std::size_t size);
     ResultCode UnlockForDeviceAddressSpace(VAddr addr, std::size_t size);
+    ResultCode LockForCodeMemory(VAddr addr, std::size_t size);
+    ResultCode UnlockForCodeMemory(VAddr addr, std::size_t size);
 
     Common::PageTable& PageTableImpl() {
         return page_table_impl;
@@ -115,6 +119,10 @@ private:
         return CheckMemoryState(nullptr, nullptr, nullptr, addr, size, state_mask, state, perm_mask,
                                 perm, attr_mask, attr, ignore_attr);
     }
+    ResultCode CheckMemoryState(size_t* out_blocks_needed, VAddr addr, size_t size,
+                                KMemoryState state_mask, KMemoryState state,
+                                KMemoryPermission perm_mask, KMemoryPermission perm,
+                                KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
 
     std::recursive_mutex page_table_lock;
     std::unique_ptr<KMemoryBlockManager> block_manager;
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h
index d2ceae950..d847fd0c5 100644
--- a/src/core/hle/kernel/kernel.h
+++ b/src/core/hle/kernel/kernel.h
@@ -53,6 +53,7 @@ class KSharedMemoryInfo;
 class KThread;
 class KTransferMemory;
 class KWritableEvent;
+class KCodeMemory;
 class PhysicalCore;
 class ServiceThread;
 class Synchronization;
@@ -326,6 +327,8 @@ public:
             return slab_heap_container->transfer_memory;
         } else if constexpr (std::is_same_v<T, KWritableEvent>) {
             return slab_heap_container->writeable_event;
+        } else if constexpr (std::is_same_v<T, KCodeMemory>) {
+            return slab_heap_container->code_memory;
         }
     }
 
@@ -377,6 +380,7 @@ private:
         KSlabHeap<KThread> thread;
         KSlabHeap<KTransferMemory> transfer_memory;
         KSlabHeap<KWritableEvent> writeable_event;
+        KSlabHeap<KCodeMemory> code_memory;
     };
 
     std::unique_ptr<SlabHeapContainer> slab_heap_container;
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index f0cd8471e..b37db918e 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -18,6 +18,7 @@
 #include "core/core_timing.h"
 #include "core/hle/kernel/k_client_port.h"
 #include "core/hle/kernel/k_client_session.h"
+#include "core/hle/kernel/k_code_memory.h"
 #include "core/hle/kernel/k_event.h"
 #include "core/hle/kernel/k_handle_table.h"
 #include "core/hle/kernel/k_memory_block.h"
@@ -1197,6 +1198,22 @@ constexpr bool IsValidProcessMemoryPermission(Svc::MemoryPermission perm) {
     }
 }
 
+constexpr bool IsValidMapCodeMemoryPermission(Svc::MemoryPermission perm) {
+    return perm == Svc::MemoryPermission::ReadWrite;
+}
+
+constexpr bool IsValidMapToOwnerCodeMemoryPermission(Svc::MemoryPermission perm) {
+    return perm == Svc::MemoryPermission::Read || perm == Svc::MemoryPermission::ReadExecute;
+}
+
+constexpr bool IsValidUnmapCodeMemoryPermission(Svc::MemoryPermission perm) {
+    return perm == Svc::MemoryPermission::None;
+}
+
+constexpr bool IsValidUnmapFromOwnerCodeMemoryPermission(Svc::MemoryPermission perm) {
+    return perm == Svc::MemoryPermission::None;
+}
+
 } // Anonymous namespace
 
 static ResultCode MapSharedMemory(Core::System& system, Handle shmem_handle, VAddr address,
@@ -1306,6 +1323,195 @@ static ResultCode SetProcessMemoryPermission(Core::System& system, Handle proces
     return page_table.SetProcessMemoryPermission(address, size, ConvertToKMemoryPermission(perm));
 }
 
+static ResultCode MapProcessMemory(Core::System& system, VAddr dst_address, Handle process_handle,
+                                   VAddr src_address, u64 size) {
+    LOG_TRACE(Kernel_SVC,
+              "called, dst_address=0x{:X}, process_handle=0x{:X}, src_address=0x{:X}, size=0x{:X}",
+              dst_address, process_handle, src_address, size);
+
+    // Validate the address/size.
+    R_UNLESS(Common::IsAligned(dst_address, PageSize), ResultInvalidAddress);
+    R_UNLESS(Common::IsAligned(src_address, PageSize), ResultInvalidAddress);
+    R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
+    R_UNLESS(size > 0, ResultInvalidSize);
+    R_UNLESS((dst_address < dst_address + size), ResultInvalidCurrentMemory);
+    R_UNLESS((src_address < src_address + size), ResultInvalidCurrentMemory);
+
+    // Get the processes.
+    KProcess* dst_process = system.CurrentProcess();
+    KScopedAutoObject src_process =
+        dst_process->GetHandleTable().GetObjectWithoutPseudoHandle<KProcess>(process_handle);
+    R_UNLESS(src_process.IsNotNull(), ResultInvalidHandle);
+
+    // Get the page tables.
+    auto& dst_pt = dst_process->PageTable();
+    auto& src_pt = src_process->PageTable();
+
+    // Validate that the mapping is in range.
+    R_UNLESS(src_pt.Contains(src_address, size), ResultInvalidCurrentMemory);
+    R_UNLESS(dst_pt.CanContain(dst_address, size, KMemoryState::SharedCode),
+             ResultInvalidMemoryRegion);
+
+    // Create a new page group.
+    KMemoryInfo kBlockInfo = dst_pt.QueryInfo(dst_address);
+    KPageLinkedList pg(kBlockInfo.GetAddress(), kBlockInfo.GetNumPages());
+
+    // Map the group.
+    R_TRY(dst_pt.MapPages(dst_address, pg, KMemoryState::SharedCode,
+                          KMemoryPermission::UserReadWrite));
+
+    return ResultSuccess;
+}
+
+static ResultCode UnmapProcessMemory(Core::System& system, VAddr dst_address, Handle process_handle,
+                                     VAddr src_address, u64 size) {
+    LOG_TRACE(Kernel_SVC,
+              "called, dst_address=0x{:X}, process_handle=0x{:X}, src_address=0x{:X}, size=0x{:X}",
+              dst_address, process_handle, src_address, size);
+
+    // Validate the address/size.
+    R_UNLESS(Common::IsAligned(dst_address, PageSize), ResultInvalidAddress);
+    R_UNLESS(Common::IsAligned(src_address, PageSize), ResultInvalidAddress);
+    R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
+    R_UNLESS(size > 0, ResultInvalidSize);
+    R_UNLESS((dst_address < dst_address + size), ResultInvalidCurrentMemory);
+    R_UNLESS((src_address < src_address + size), ResultInvalidCurrentMemory);
+
+    // Get the processes.
+    KProcess* dst_process = system.CurrentProcess();
+    KScopedAutoObject src_process =
+        dst_process->GetHandleTable().GetObjectWithoutPseudoHandle<KProcess>(process_handle);
+    R_UNLESS(src_process.IsNotNull(), ResultInvalidHandle);
+
+    // Get the page tables.
+    auto& dst_pt = dst_process->PageTable();
+    auto& src_pt = src_process->PageTable();
+
+    // Validate that the mapping is in range.
+    R_UNLESS(src_pt.Contains(src_address, size), ResultInvalidCurrentMemory);
+    R_UNLESS(dst_pt.CanContain(dst_address, size, KMemoryState::SharedCode),
+             ResultInvalidMemoryRegion);
+
+    // Unmap the memory.
+    R_TRY(dst_pt.UnmapProcessMemory(dst_address, size, src_pt, src_address));
+
+    return ResultSuccess;
+}
+
+static ResultCode CreateCodeMemory(Core::System& system, Handle* out, VAddr address, size_t size) {
+    LOG_TRACE(Kernel_SVC, "called, handle_out=0x{:X}, address=0x{:X}, size=0x{:X}",
+              static_cast<void*>(out), address, size);
+    // Get kernel instance.
+    auto& kernel = system.Kernel();
+
+    // Validate address / size.
+    R_UNLESS(Common::IsAligned(address, PageSize), ResultInvalidAddress);
+    R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
+    R_UNLESS(size > 0, ResultInvalidSize);
+    R_UNLESS((address < address + size), ResultInvalidCurrentMemory);
+
+    // Create the code memory.
+
+    KCodeMemory* code_mem = KCodeMemory::Create(kernel);
+    R_UNLESS(code_mem != nullptr, ResultOutOfResource);
+
+    // Verify that the region is in range.
+    R_UNLESS(system.CurrentProcess()->PageTable().Contains(address, size),
+             ResultInvalidCurrentMemory);
+
+    // Initialize the code memory.
+    R_TRY(code_mem->Initialize(system.DeviceMemory(), address, size));
+
+    // Register the code memory.
+    KCodeMemory::Register(kernel, code_mem);
+
+    // Add the code memory to the handle table.
+    R_TRY(system.CurrentProcess()->GetHandleTable().Add(out, code_mem));
+
+    code_mem->Close();
+
+    return ResultSuccess;
+}
+
+static ResultCode ControlCodeMemory(Core::System& system, Handle code_memory_handle, u32 operation,
+                                    VAddr address, size_t size, Svc::MemoryPermission perm) {
+
+    LOG_TRACE(Kernel_SVC,
+              "called, code_memory_handle=0x{:X}, operation=0x{:X}, address=0x{:X}, size=0x{:X}, "
+              "permission=0x{:X}",
+              code_memory_handle, operation, address, size, perm);
+
+    // Validate the address / size.
+    R_UNLESS(Common::IsAligned(address, PageSize), ResultInvalidAddress);
+    R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
+    R_UNLESS(size > 0, ResultInvalidSize);
+    R_UNLESS((address < address + size), ResultInvalidCurrentMemory);
+
+    // Get the code memory from its handle.
+    KScopedAutoObject code_mem =
+        system.CurrentProcess()->GetHandleTable().GetObject<KCodeMemory>(code_memory_handle);
+    R_UNLESS(code_mem.IsNotNull(), ResultInvalidHandle);
+
+    // NOTE: Here, Atmosphere extends the SVC to allow code memory operations on one's own process.
+    // This enables homebrew usage of these SVCs for JIT.
+
+    // Perform the operation.
+    switch (static_cast<CodeMemoryOperation>(operation)) {
+    case CodeMemoryOperation::Map: {
+        // Check that the region is in range.
+        R_UNLESS(
+            system.CurrentProcess()->PageTable().CanContain(address, size, KMemoryState::CodeOut),
+            ResultInvalidMemoryRegion);
+
+        // Check the memory permission.
+        R_UNLESS(IsValidMapCodeMemoryPermission(perm), ResultInvalidNewMemoryPermission);
+
+        // Map the memory.
+        R_TRY(code_mem->Map(address, size));
+    } break;
+    case CodeMemoryOperation::Unmap: {
+        // Check that the region is in range.
+        R_UNLESS(
+            system.CurrentProcess()->PageTable().CanContain(address, size, KMemoryState::CodeOut),
+            ResultInvalidMemoryRegion);
+
+        // Check the memory permission.
+        R_UNLESS(IsValidUnmapCodeMemoryPermission(perm), ResultInvalidNewMemoryPermission);
+
+        // Unmap the memory.
+        R_TRY(code_mem->Unmap(address, size));
+    } break;
+    case CodeMemoryOperation::MapToOwner: {
+        // Check that the region is in range.
+        R_UNLESS(code_mem->GetOwner()->PageTable().CanContain(address, size,
+                                                              KMemoryState::GeneratedCode),
+                 ResultInvalidMemoryRegion);
+
+        // Check the memory permission.
+        R_UNLESS(IsValidMapToOwnerCodeMemoryPermission(perm), ResultInvalidNewMemoryPermission);
+
+        // Map the memory to its owner.
+        R_TRY(code_mem->MapToOwner(address, size, perm));
+    } break;
+    case CodeMemoryOperation::UnmapFromOwner: {
+        // Check that the region is in range.
+        R_UNLESS(code_mem->GetOwner()->PageTable().CanContain(address, size,
+                                                              KMemoryState::GeneratedCode),
+                 ResultInvalidMemoryRegion);
+
+        // Check the memory permission.
+        R_UNLESS(IsValidUnmapFromOwnerCodeMemoryPermission(perm), ResultInvalidNewMemoryPermission);
+
+        // Unmap the memory from its owner.
+        R_TRY(code_mem->UnmapFromOwner(address, size));
+    } break;
+    default:
+        return ResultInvalidEnumValue;
+    }
+
+    return ResultSuccess;
+}
+
 static ResultCode QueryProcessMemory(Core::System& system, VAddr memory_info_address,
                                      VAddr page_info_address, Handle process_handle,
                                      VAddr address) {
@@ -2600,8 +2806,8 @@ static const FunctionDef SVC_Table_64[] = {
     {0x48, nullptr, "MapPhysicalMemoryUnsafe"},
     {0x49, nullptr, "UnmapPhysicalMemoryUnsafe"},
     {0x4A, nullptr, "SetUnsafeLimit"},
-    {0x4B, nullptr, "CreateCodeMemory"},
-    {0x4C, nullptr, "ControlCodeMemory"},
+    {0x4B, SvcWrap64<CreateCodeMemory>, "CreateCodeMemory"},
+    {0x4C, SvcWrap64<ControlCodeMemory>, "ControlCodeMemory"},
     {0x4D, nullptr, "SleepSystem"},
     {0x4E, nullptr, "ReadWriteRegister"},
     {0x4F, nullptr, "SetProcessActivity"},
@@ -2641,8 +2847,8 @@ static const FunctionDef SVC_Table_64[] = {
     {0x71, nullptr, "ManageNamedPort"},
     {0x72, nullptr, "ConnectToPort"},
     {0x73, SvcWrap64<SetProcessMemoryPermission>, "SetProcessMemoryPermission"},
-    {0x74, nullptr, "MapProcessMemory"},
-    {0x75, nullptr, "UnmapProcessMemory"},
+    {0x74, SvcWrap64<MapProcessMemory>, "MapProcessMemory"},
+    {0x75, SvcWrap64<UnmapProcessMemory>, "UnmapProcessMemory"},
     {0x76, SvcWrap64<QueryProcessMemory>, "QueryProcessMemory"},
     {0x77, SvcWrap64<MapProcessCodeMemory>, "MapProcessCodeMemory"},
     {0x78, SvcWrap64<UnmapProcessCodeMemory>, "UnmapProcessCodeMemory"},
diff --git a/src/core/hle/kernel/svc_wrap.h b/src/core/hle/kernel/svc_wrap.h
index 6e62e656f..86255fe6d 100644
--- a/src/core/hle/kernel/svc_wrap.h
+++ b/src/core/hle/kernel/svc_wrap.h
@@ -73,6 +73,23 @@ void SvcWrap64(Core::System& system) {
                            .raw);
 }
 
+// Used by MapProcessMemory and UnmapProcessMemory
+template <ResultCode func(Core::System&, u64, u32, u64, u64)>
+void SvcWrap64(Core::System& system) {
+    FuncReturn(system, func(system, Param(system, 0), static_cast<u32>(Param(system, 1)),
+                            Param(system, 2), Param(system, 3))
+                           .raw);
+}
+
+// Used by ControlCodeMemory
+template <ResultCode func(Core::System&, Handle, u32, u64, u64, Svc::MemoryPermission)>
+void SvcWrap64(Core::System& system) {
+    FuncReturn(system, func(system, static_cast<Handle>(Param(system, 0)),
+                            static_cast<u32>(Param(system, 1)), Param(system, 2), Param(system, 3),
+                            static_cast<Svc::MemoryPermission>(Param(system, 4)))
+                           .raw);
+}
+
 template <ResultCode func(Core::System&, u32*)>
 void SvcWrap64(Core::System& system) {
     u32 param = 0;
@@ -301,6 +318,16 @@ void SvcWrap64(Core::System& system) {
     FuncReturn(system, retval);
 }
 
+// Used by CreateCodeMemory
+template <ResultCode func(Core::System&, Handle*, u64, u64)>
+void SvcWrap64(Core::System& system) {
+    u32 param_1 = 0;
+    const u32 retval = func(system, &param_1, Param(system, 1), Param(system, 2)).raw;
+
+    system.CurrentArmInterface().SetReg(1, param_1);
+    FuncReturn(system, retval);
+}
+
 template <ResultCode func(Core::System&, Handle*, u64, u32, u32)>
 void SvcWrap64(Core::System& system) {
     u32 param_1 = 0;