general: rework usages of UNREACHABLE macro
This commit is contained in:
		| @@ -34,7 +34,7 @@ MAP_MEMBER_CONST()::FlatAddressSpaceMap(VaType va_limit_, | ||||
|                                         std::function<void(VaType, VaType)> unmap_callback_) | ||||
|     : va_limit{va_limit_}, unmap_callback{std::move(unmap_callback_)} { | ||||
|     if (va_limit > VaMaximum) { | ||||
|         UNREACHABLE_MSG("Invalid VA limit!"); | ||||
|         ASSERT_MSG(false, "Invalid VA limit!"); | ||||
|     } | ||||
| } | ||||
|  | ||||
| @@ -42,14 +42,14 @@ MAP_MEMBER(void)::MapLocked(VaType virt, PaType phys, VaType size, ExtraBlockInf | ||||
|     VaType virt_end{virt + size}; | ||||
|  | ||||
|     if (virt_end > va_limit) { | ||||
|         UNREACHABLE_MSG( | ||||
|             "Trying to map a block past the VA limit: virt_end: 0x{:X}, va_limit: 0x{:X}", virt_end, | ||||
|             va_limit); | ||||
|         ASSERT_MSG(false, | ||||
|                    "Trying to map a block past the VA limit: virt_end: 0x{:X}, va_limit: 0x{:X}", | ||||
|                    virt_end, va_limit); | ||||
|     } | ||||
|  | ||||
|     auto block_end_successor{std::lower_bound(blocks.begin(), blocks.end(), virt_end)}; | ||||
|     if (block_end_successor == blocks.begin()) { | ||||
|         UNREACHABLE_MSG("Trying to map a block before the VA start: virt_end: 0x{:X}", virt_end); | ||||
|         ASSERT_MSG(false, "Trying to map a block before the VA start: virt_end: 0x{:X}", virt_end); | ||||
|     } | ||||
|  | ||||
|     auto block_end_predecessor{std::prev(block_end_successor)}; | ||||
| @@ -124,7 +124,7 @@ MAP_MEMBER(void)::MapLocked(VaType virt, PaType phys, VaType size, ExtraBlockInf | ||||
|  | ||||
|     // Check that the start successor is either the end block or something in between | ||||
|     if (block_start_successor->virt > virt_end) { | ||||
|         UNREACHABLE_MSG("Unsorted block in AS map: virt: 0x{:X}", block_start_successor->virt); | ||||
|         ASSERT_MSG(false, "Unsorted block in AS map: virt: 0x{:X}", block_start_successor->virt); | ||||
|     } else if (block_start_successor->virt == virt_end) { | ||||
|         // We need to create a new block as there are none spare that we would overwrite | ||||
|         blocks.insert(block_start_successor, Block(virt, phys, extra_info)); | ||||
| @@ -149,14 +149,15 @@ MAP_MEMBER(void)::UnmapLocked(VaType virt, VaType size) { | ||||
|     VaType virt_end{virt + size}; | ||||
|  | ||||
|     if (virt_end > va_limit) { | ||||
|         UNREACHABLE_MSG( | ||||
|             "Trying to map a block past the VA limit: virt_end: 0x{:X}, va_limit: 0x{:X}", virt_end, | ||||
|             va_limit); | ||||
|         ASSERT_MSG(false, | ||||
|                    "Trying to map a block past the VA limit: virt_end: 0x{:X}, va_limit: 0x{:X}", | ||||
|                    virt_end, va_limit); | ||||
|     } | ||||
|  | ||||
|     auto block_end_successor{std::lower_bound(blocks.begin(), blocks.end(), virt_end)}; | ||||
|     if (block_end_successor == blocks.begin()) { | ||||
|         UNREACHABLE_MSG("Trying to unmap a block before the VA start: virt_end: 0x{:X}", virt_end); | ||||
|         ASSERT_MSG(false, "Trying to unmap a block before the VA start: virt_end: 0x{:X}", | ||||
|                    virt_end); | ||||
|     } | ||||
|  | ||||
|     auto block_end_predecessor{std::prev(block_end_successor)}; | ||||
| @@ -190,7 +191,7 @@ MAP_MEMBER(void)::UnmapLocked(VaType virt, VaType size) { | ||||
|         if (eraseEnd != blocks.end() && | ||||
|             (eraseEnd == block_start_successor || | ||||
|              (block_start_predecessor->Unmapped() && eraseEnd->Unmapped()))) { | ||||
|             UNREACHABLE_MSG("Multiple contiguous unmapped regions are unsupported!"); | ||||
|             ASSERT_MSG(false, "Multiple contiguous unmapped regions are unsupported!"); | ||||
|         } | ||||
|  | ||||
|         blocks.erase(block_start_successor, eraseEnd); | ||||
| @@ -217,7 +218,7 @@ MAP_MEMBER(void)::UnmapLocked(VaType virt, VaType size) { | ||||
|         return; // The region is unmapped here and doesn't need splitting, bail out early | ||||
|     } else if (block_end_successor == blocks.end()) { | ||||
|         // This should never happen as the end should always follow an unmapped block | ||||
|         UNREACHABLE_MSG("Unexpected Memory Manager state!"); | ||||
|         ASSERT_MSG(false, "Unexpected Memory Manager state!"); | ||||
|     } else if (block_end_successor->virt != virt_end) { | ||||
|         // If one block is directly in front then we don't have to add a tail | ||||
|  | ||||
| @@ -256,7 +257,7 @@ MAP_MEMBER(void)::UnmapLocked(VaType virt, VaType size) { | ||||
|     auto block_start_successor{std::next(block_start_predecessor)}; | ||||
|  | ||||
|     if (block_start_successor->virt > virt_end) { | ||||
|         UNREACHABLE_MSG("Unsorted block in AS map: virt: 0x{:X}", block_start_successor->virt); | ||||
|         ASSERT_MSG(false, "Unsorted block in AS map: virt: 0x{:X}", block_start_successor->virt); | ||||
|     } else if (block_start_successor->virt == virt_end) { | ||||
|         // There are no blocks between the start and the end that would let us skip inserting a new | ||||
|         // one for head | ||||
| @@ -298,7 +299,7 @@ ALLOC_MEMBER(VaType)::Allocate(VaType size) { | ||||
|         auto alloc_end_successor{ | ||||
|             std::lower_bound(this->blocks.begin(), this->blocks.end(), alloc_end)}; | ||||
|         if (alloc_end_successor == this->blocks.begin()) { | ||||
|             UNREACHABLE_MSG("First block in AS map is invalid!"); | ||||
|             ASSERT_MSG(false, "First block in AS map is invalid!"); | ||||
|         } | ||||
|  | ||||
|         auto alloc_end_predecessor{std::prev(alloc_end_successor)}; | ||||
| @@ -332,7 +333,7 @@ ALLOC_MEMBER(VaType)::Allocate(VaType size) { | ||||
|         current_linear_alloc_end = alloc_start + size; | ||||
|     } else { // If linear allocation overflows the AS then find a gap | ||||
|         if (this->blocks.size() <= 2) { | ||||
|             UNREACHABLE_MSG("Unexpected allocator state!"); | ||||
|             ASSERT_MSG(false, "Unexpected allocator state!"); | ||||
|         } | ||||
|  | ||||
|         auto search_predecessor{this->blocks.begin()}; | ||||
|   | ||||
| @@ -29,7 +29,7 @@ SyncpointManager::~SyncpointManager() = default; | ||||
|  | ||||
| u32 SyncpointManager::ReserveSyncpoint(u32 id, bool clientManaged) { | ||||
|     if (syncpoints.at(id).reserved) { | ||||
|         UNREACHABLE_MSG("Requested syncpoint is in use"); | ||||
|         ASSERT_MSG(false, "Requested syncpoint is in use"); | ||||
|         return 0; | ||||
|     } | ||||
|  | ||||
| @@ -45,7 +45,7 @@ u32 SyncpointManager::FindFreeSyncpoint() { | ||||
|             return i; | ||||
|         } | ||||
|     } | ||||
|     UNREACHABLE_MSG("Failed to find a free syncpoint!"); | ||||
|     ASSERT_MSG(false, "Failed to find a free syncpoint!"); | ||||
|     return 0; | ||||
| } | ||||
|  | ||||
| @@ -68,7 +68,7 @@ bool SyncpointManager::HasSyncpointExpired(u32 id, u32 threshold) { | ||||
|     const SyncpointInfo& syncpoint{syncpoints.at(id)}; | ||||
|  | ||||
|     if (!syncpoint.reserved) { | ||||
|         UNREACHABLE(); | ||||
|         ASSERT(false); | ||||
|         return 0; | ||||
|     } | ||||
|  | ||||
| @@ -83,7 +83,7 @@ bool SyncpointManager::HasSyncpointExpired(u32 id, u32 threshold) { | ||||
|  | ||||
| u32 SyncpointManager::IncrementSyncpointMaxExt(u32 id, u32 amount) { | ||||
|     if (!syncpoints.at(id).reserved) { | ||||
|         UNREACHABLE(); | ||||
|         ASSERT(false); | ||||
|         return 0; | ||||
|     } | ||||
|  | ||||
| @@ -92,7 +92,7 @@ u32 SyncpointManager::IncrementSyncpointMaxExt(u32 id, u32 amount) { | ||||
|  | ||||
| u32 SyncpointManager::ReadSyncpointMinValue(u32 id) { | ||||
|     if (!syncpoints.at(id).reserved) { | ||||
|         UNREACHABLE(); | ||||
|         ASSERT(false); | ||||
|         return 0; | ||||
|     } | ||||
|  | ||||
| @@ -101,7 +101,7 @@ u32 SyncpointManager::ReadSyncpointMinValue(u32 id) { | ||||
|  | ||||
| u32 SyncpointManager::UpdateMin(u32 id) { | ||||
|     if (!syncpoints.at(id).reserved) { | ||||
|         UNREACHABLE(); | ||||
|         ASSERT(false); | ||||
|         return 0; | ||||
|     } | ||||
|  | ||||
| @@ -111,7 +111,7 @@ u32 SyncpointManager::UpdateMin(u32 id) { | ||||
|  | ||||
| NvFence SyncpointManager::GetSyncpointFence(u32 id) { | ||||
|     if (!syncpoints.at(id).reserved) { | ||||
|         UNREACHABLE(); | ||||
|         ASSERT(false); | ||||
|         return NvFence{}; | ||||
|     } | ||||
|  | ||||
|   | ||||
| @@ -96,7 +96,7 @@ NvResult nvhost_as_gpu::AllocAsEx(const std::vector<u8>& input, std::vector<u8>& | ||||
|     std::scoped_lock lock(mutex); | ||||
|  | ||||
|     if (vm.initialised) { | ||||
|         UNREACHABLE_MSG("Cannot initialise an address space twice!"); | ||||
|         ASSERT_MSG(false, "Cannot initialise an address space twice!"); | ||||
|         return NvResult::InvalidState; | ||||
|     } | ||||
|  | ||||
| @@ -174,7 +174,7 @@ NvResult nvhost_as_gpu::AllocateSpace(const std::vector<u8>& input, std::vector< | ||||
|     } else { | ||||
|         params.offset = static_cast<u64>(allocator.Allocate(params.pages)) << page_size_bits; | ||||
|         if (!params.offset) { | ||||
|             UNREACHABLE_MSG("Failed to allocate free space in the GPU AS!"); | ||||
|             ASSERT_MSG(false, "Failed to allocate free space in the GPU AS!"); | ||||
|             return NvResult::InsufficientMemory; | ||||
|         } | ||||
|     } | ||||
| @@ -372,7 +372,7 @@ NvResult nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8 | ||||
|         else if (Common::IsAligned(handle->align, VM::YUZU_PAGESIZE)) | ||||
|             return false; | ||||
|         else { | ||||
|             UNREACHABLE(); | ||||
|             ASSERT(false); | ||||
|             return false; | ||||
|         } | ||||
|     }()}; | ||||
| @@ -382,7 +382,7 @@ NvResult nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8 | ||||
|  | ||||
|         if (alloc-- == allocation_map.begin() || | ||||
|             (params.offset - alloc->first) + size > alloc->second.size) { | ||||
|             UNREACHABLE_MSG("Cannot perform a fixed mapping into an unallocated region!"); | ||||
|             ASSERT_MSG(false, "Cannot perform a fixed mapping into an unallocated region!"); | ||||
|             return NvResult::BadValue; | ||||
|         } | ||||
|  | ||||
| @@ -403,7 +403,7 @@ NvResult nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8 | ||||
|                             static_cast<u32>(Common::AlignUp(size, page_size) >> page_size_bits))) | ||||
|                         << page_size_bits; | ||||
|         if (!params.offset) { | ||||
|             UNREACHABLE_MSG("Failed to allocate free space in the GPU AS!"); | ||||
|             ASSERT_MSG(false, "Failed to allocate free space in the GPU AS!"); | ||||
|             return NvResult::InsufficientMemory; | ||||
|         } | ||||
|  | ||||
|   | ||||
		Reference in New Issue
	
	Block a user