From 51d53a62815dfb8fd20655403af58aed357e2637 Mon Sep 17 00:00:00 2001 From: NarcolepticK Date: Sun, 30 Sep 2018 21:21:51 -0400 Subject: [PATCH] LLE Mapped Buffer: addressed comments --- src/core/hle/kernel/ipc.cpp | 159 +++++++++++++++++-------------- src/core/hle/kernel/vm_manager.h | 3 - src/core/memory.cpp | 52 ++++++++++ src/core/memory.h | 2 + 4 files changed, 140 insertions(+), 76 deletions(-) diff --git a/src/core/hle/kernel/ipc.cpp b/src/core/hle/kernel/ipc.cpp index 8a6214409..070bb6619 100644 --- a/src/core/hle/kernel/ipc.cpp +++ b/src/core/hle/kernel/ipc.cpp @@ -33,6 +33,17 @@ ResultCode TranslateCommandBuffer(SharedPtr src_thread, SharedPtr cmd_buf; Memory::ReadBlock(*src_process, src_address, cmd_buf.data(), command_size * sizeof(u32)); + // Create a copy of the target's command buffer + IPC::Header dst_header; + Memory::ReadBlock(*dst_process, dst_address, &dst_header.raw, sizeof(dst_header.raw)); + + std::size_t dst_untranslated_size = 1u + dst_header.normal_params_size; + std::size_t dst_command_size = dst_untranslated_size + dst_header.translate_params_size; + + std::array dst_cmd_buf; + Memory::ReadBlock(*dst_process, dst_address, dst_cmd_buf.data(), + dst_command_size * sizeof(u32)); + std::size_t i = untranslated_size; while (i < command_size) { u32 descriptor = cmd_buf[i]; @@ -128,36 +139,63 @@ ResultCode TranslateCommandBuffer(SharedPtr src_thread, SharedPtr> Memory::PAGE_BITS; - // Skip when the size is zero + // Skip when the size is zero and num_pages == 0 if (size == 0) { - i += 1; + cmd_buf[i++] = 0; break; } + ASSERT(num_pages >= 1); if (reply) { - // TODO(Subv): Scan the target's command buffer to make sure that there was a - // MappedBuffer descriptor in the original request. The real kernel panics if you - // try to reply with an unsolicited MappedBuffer. + // Scan the target's command buffer for the matching mapped buffer + std::size_t j = dst_untranslated_size; + while (j < dst_command_size) { + u32 desc = dst_cmd_buf[j++]; - // Unmap the buffers. Readonly buffers do not need to be copied over to the target - // process again because they were (presumably) not modified. This behavior is - // consistent with the real kernel. - if (permissions == IPC::MappedBufferPermissions::R) { - ResultCode result = src_process->vm_manager.UnmapRange( - page_start, num_pages * Memory::PAGE_SIZE); - ASSERT(result == RESULT_SUCCESS); - } else { - const auto vma_iter = src_process->vm_manager.vma_map.find(source_address); - const auto& vma = vma_iter->second; - const VAddr dest_address = vma.originating_buffer_address; + if (IPC::GetDescriptorType(desc) == IPC::DescriptorType::MappedBuffer) { + IPC::MappedBufferDescInfo dest_descInfo{desc}; + VAddr dest_address = dst_cmd_buf[j]; - auto buffer = std::make_shared>(size); - Memory::ReadBlock(*src_process, source_address, buffer->data(), size); - Memory::WriteBlock(*dst_process, dest_address, buffer->data(), size); + u32 dest_size = static_cast(dest_descInfo.size); + IPC::MappedBufferPermissions dest_permissions = dest_descInfo.perms; - ResultCode result = src_process->vm_manager.UnmapRange( - page_start, num_pages * Memory::PAGE_SIZE); - ASSERT(result == RESULT_SUCCESS); + if (permissions == dest_permissions && size == dest_size) { + // Readonly buffers do not need to be copied over to the target + // process again because they were (presumably) not modified. This + // behavior is consistent with the real kernel. + if (permissions != IPC::MappedBufferPermissions::R) { + // Copy the modified buffer back into the target process + Memory::CopyBlock(*src_process, *dst_process, source_address, + dest_address, size); + } + + // Unmap the Reserved page before the buffer + ResultCode result = src_process->vm_manager.UnmapRange( + page_start - Memory::PAGE_SIZE, Memory::PAGE_SIZE); + ASSERT(result == RESULT_SUCCESS); + + // Unmap the buffer from the source process + result = src_process->vm_manager.UnmapRange( + page_start, num_pages * Memory::PAGE_SIZE); + ASSERT(result == RESULT_SUCCESS); + + // Check if this is the last mapped buffer + VAddr next_reserve = page_start + num_pages * Memory::PAGE_SIZE; + auto& vma = + src_process->vm_manager.FindVMA(next_reserve + Memory::PAGE_SIZE) + ->second; + if (vma.type == VMAType::Free) { + // Unmap the Reserved page after the last buffer + result = src_process->vm_manager.UnmapRange(next_reserve, + Memory::PAGE_SIZE); + ASSERT(result == RESULT_SUCCESS); + } + + break; + } + } + + j += 1; } i += 1; @@ -166,63 +204,38 @@ ResultCode TranslateCommandBuffer(SharedPtr src_thread, SharedPtr bool { - return (address & Memory::PAGE_MASK) == 0; - }; - // TODO(Subv): Perform permission checks. - // TODO(Subv): Leave a page of unmapped memory before the first page and after the last - // page. + // Reserve a page of memory before the mapped buffer + auto reserve_buffer = std::make_shared>(Memory::PAGE_SIZE); + dst_process->vm_manager.MapMemoryBlockToBase( + Memory::IPC_MAPPING_VADDR, Memory::IPC_MAPPING_SIZE, reserve_buffer, 0, + static_cast(reserve_buffer->size()), Kernel::MemoryState::Reserved); - if (num_pages == 1 && !IsPageAligned(source_address) && - !IsPageAligned(source_address + size)) { - // If the address of the source buffer is not page-aligned or if the buffer doesn't - // fill an entire page, then we have to allocate a page of memory in the target - // process and copy over the data from the input buffer. This allocated buffer will - // be copied back to the source process and deallocated when the server replies to - // the request via ReplyAndReceive. + auto buffer = std::make_shared>(num_pages * Memory::PAGE_SIZE); + Memory::ReadBlock(*src_process, source_address, buffer->data() + page_offset, size); - auto buffer = std::make_shared>(Memory::PAGE_SIZE); - - // Number of bytes until the next page. - std::size_t difference_to_page = - Common::AlignUp(source_address, Memory::PAGE_SIZE) - source_address; - // If the data fits in one page we can just copy the required size instead of the - // entire page. - std::size_t read_size = - num_pages == 1 ? static_cast(size) : difference_to_page; - - Memory::ReadBlock(*src_process, source_address, buffer->data() + page_offset, - read_size); - - // Map the page into the target process' address space. - target_address = - dst_process->vm_manager - .MapMemoryBlockToBase(Memory::IPC_MAPPING_VADDR, Memory::IPC_MAPPING_SIZE, - buffer, 0, static_cast(buffer->size()), - Kernel::MemoryState::Shared) - .Unwrap(); - } else { - auto buffer = std::make_shared>(num_pages * Memory::PAGE_SIZE); - Memory::ReadBlock(*src_process, source_address, buffer->data() + page_offset, size); - - // Map the pages into the target process' address space. - target_address = - dst_process->vm_manager - .MapMemoryBlockToBase(Memory::IPC_MAPPING_VADDR + Memory::PAGE_SIZE, - Memory::IPC_MAPPING_SIZE - Memory::PAGE_SIZE, buffer, - 0, static_cast(buffer->size()), - Kernel::MemoryState::Shared) - .Unwrap(); - } - // Save the original address we copied the buffer from so that we can copy the modified - // buffer back, if needed - auto vma_iter = dst_process->vm_manager.vma_map.find(target_address + page_offset); - auto& vma = vma_iter->second; - vma.originating_buffer_address = source_address; + // Map the page(s) into the target process' address space. + target_address = dst_process->vm_manager + .MapMemoryBlockToBase( + Memory::IPC_MAPPING_VADDR, Memory::IPC_MAPPING_SIZE, buffer, 0, + static_cast(buffer->size()), Kernel::MemoryState::Shared) + .Unwrap(); cmd_buf[i++] = target_address + page_offset; + + // Check if this is the last mapped buffer + if (i < command_size) { + u32 next_descriptor = cmd_buf[i]; + if (IPC::GetDescriptorType(next_descriptor) == IPC::DescriptorType::MappedBuffer) { + break; + } + } + + // Reserve a page of memory after the last mapped buffer + dst_process->vm_manager.MapMemoryBlockToBase( + Memory::IPC_MAPPING_VADDR, Memory::IPC_MAPPING_SIZE, reserve_buffer, 0, + static_cast(reserve_buffer->size()), Kernel::MemoryState::Reserved); break; } default: diff --git a/src/core/hle/kernel/vm_manager.h b/src/core/hle/kernel/vm_manager.h index fcb107c06..7ac5c3b01 100644 --- a/src/core/hle/kernel/vm_manager.h +++ b/src/core/hle/kernel/vm_manager.h @@ -86,9 +86,6 @@ struct VirtualMemoryArea { PAddr paddr = 0; Memory::MMIORegionPointer mmio_handler = nullptr; - /// Originating address of the IPC mapped buffer - VAddr originating_buffer_address = 0; - /// Tests if this area can be merged to the right with `next`. bool CanBeMergedWith(const VirtualMemoryArea& next) const; }; diff --git a/src/core/memory.cpp b/src/core/memory.cpp index 26967ad36..70baef93a 100644 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp @@ -700,6 +700,58 @@ void CopyBlock(VAddr dest_addr, VAddr src_addr, const std::size_t size) { CopyBlock(*Kernel::g_current_process, dest_addr, src_addr, size); } +void CopyBlock(const Kernel::Process& src_process, const Kernel::Process& dest_process, + VAddr src_addr, VAddr dest_addr, std::size_t size) { + auto& page_table = src_process.vm_manager.page_table; + std::size_t remaining_size = size; + std::size_t page_index = src_addr >> PAGE_BITS; + std::size_t page_offset = src_addr & PAGE_MASK; + + while (remaining_size > 0) { + const std::size_t copy_amount = std::min(PAGE_SIZE - page_offset, remaining_size); + const VAddr current_vaddr = static_cast((page_index << PAGE_BITS) + page_offset); + + switch (page_table.attributes[page_index]) { + case PageType::Unmapped: { + LOG_ERROR(HW_Memory, + "unmapped CopyBlock @ 0x{:08X} (start address = 0x{:08X}, size = {})", + current_vaddr, src_addr, size); + ZeroBlock(dest_process, dest_addr, copy_amount); + break; + } + case PageType::Memory: { + DEBUG_ASSERT(page_table.pointers[page_index]); + const u8* src_ptr = page_table.pointers[page_index] + page_offset; + WriteBlock(dest_process, dest_addr, src_ptr, copy_amount); + break; + } + case PageType::Special: { + MMIORegionPointer handler = GetMMIOHandler(page_table, current_vaddr); + DEBUG_ASSERT(handler); + std::vector buffer(copy_amount); + handler->ReadBlock(current_vaddr, buffer.data(), buffer.size()); + WriteBlock(dest_process, dest_addr, buffer.data(), buffer.size()); + break; + } + case PageType::RasterizerCachedMemory: { + RasterizerFlushVirtualRegion(current_vaddr, static_cast(copy_amount), + FlushMode::Flush); + WriteBlock(dest_process, dest_addr, GetPointerFromVMA(src_process, current_vaddr), + copy_amount); + break; + } + default: + UNREACHABLE(); + } + + page_index++; + page_offset = 0; + dest_addr += static_cast(copy_amount); + src_addr += static_cast(copy_amount); + remaining_size -= copy_amount; + } +} + template <> u8 ReadMMIO(MMIORegionPointer mmio_handler, VAddr addr) { return mmio_handler->Read8(addr); diff --git a/src/core/memory.h b/src/core/memory.h index 73dbe091c..e78754705 100644 --- a/src/core/memory.h +++ b/src/core/memory.h @@ -205,6 +205,8 @@ void ZeroBlock(const Kernel::Process& process, VAddr dest_addr, const std::size_ void ZeroBlock(VAddr dest_addr, const std::size_t size); void CopyBlock(const Kernel::Process& process, VAddr dest_addr, VAddr src_addr, std::size_t size); void CopyBlock(VAddr dest_addr, VAddr src_addr, std::size_t size); +void CopyBlock(const Kernel::Process& src_process, const Kernel::Process& dest_process, + VAddr src_addr, VAddr dest_addr, std::size_t size); u8* GetPointer(VAddr vaddr);