Kernel/IPC: Partially implement MappedBuffer translation.

Right now only MappedBuffers that only span a single page and are not aligned are implemented.

MappedBuffers are unmapped during the reply part of ReplyAndReceive. Only unmapping of ReadOnly buffers is currently implemented.
This commit is contained in:
Subv
2017-11-07 14:35:17 -05:00
parent 928202f744
commit a7a5c5aa0d
5 changed files with 102 additions and 14 deletions

View File

@ -93,7 +93,8 @@ ResultVal<VMManager::VMAHandle> VMManager::MapMemoryBlock(VAddr target,
return MakeResult<VMAHandle>(MergeAdjacent(vma_handle));
}
ResultVal<VAddr> VMManager::MapMemoryBlockToBase(VAddr base, std::shared_ptr<std::vector<u8>> block,
ResultVal<VAddr> VMManager::MapMemoryBlockToBase(VAddr base, u32 region_size,
std::shared_ptr<std::vector<u8>> block,
size_t offset, u32 size, MemoryState state) {
// Find the first Free VMA.
@ -105,13 +106,15 @@ ResultVal<VAddr> VMManager::MapMemoryBlockToBase(VAddr base, std::shared_ptr<std
return vma_end > base && vma_end >= base + size;
});
if (vma_handle == vma_map.end()) {
VAddr target = std::max(base, vma_handle->second.base);
// Do not try to allocate the block if there are no available addresses within the desired
// region.
if (vma_handle == vma_map.end() || target + size > base + region_size) {
return ResultCode(ErrorDescription::OutOfMemory, ErrorModule::Kernel,
ErrorSummary::OutOfResource, ErrorLevel::Permanent);
}
VAddr target = std::max(base, vma_handle->second.base);
auto result = MapMemoryBlock(target, block, offset, size, state);
if (result.Failed())
@ -373,4 +376,4 @@ void VMManager::UpdatePageTableForVMA(const VirtualMemoryArea& vma) {
break;
}
}
}
} // namespace Kernel