Compare commits
7 Commits
android-25
...
android-24
Author | SHA1 | Date | |
---|---|---|---|
89ad221c32 | |||
bda226e29f | |||
370c234147 | |||
3d86ff4283 | |||
7e378a6dfa | |||
4a77f7b383 | |||
d6e29ad2fe |
@ -4,9 +4,8 @@
|
|||||||
| [12461](https://github.com/yuzu-emu/yuzu//pull/12461) | [`acc26667b`](https://github.com/yuzu-emu/yuzu//pull/12461/files) | Rework Nvdec and VIC to fix out-of-order videos, and speed up decoding. | [Kelebek1](https://github.com/Kelebek1/) | Yes |
|
| [12461](https://github.com/yuzu-emu/yuzu//pull/12461) | [`acc26667b`](https://github.com/yuzu-emu/yuzu//pull/12461/files) | Rework Nvdec and VIC to fix out-of-order videos, and speed up decoding. | [Kelebek1](https://github.com/Kelebek1/) | Yes |
|
||||||
| [12749](https://github.com/yuzu-emu/yuzu//pull/12749) | [`aad4b0d6f`](https://github.com/yuzu-emu/yuzu//pull/12749/files) | general: workarounds for SMMU syncing issues | [liamwhite](https://github.com/liamwhite/) | Yes |
|
| [12749](https://github.com/yuzu-emu/yuzu//pull/12749) | [`aad4b0d6f`](https://github.com/yuzu-emu/yuzu//pull/12749/files) | general: workarounds for SMMU syncing issues | [liamwhite](https://github.com/liamwhite/) | Yes |
|
||||||
| [13000](https://github.com/yuzu-emu/yuzu//pull/13000) | [`461eaca7e`](https://github.com/yuzu-emu/yuzu//pull/13000/files) | device_memory_manager: skip unregistered interfaces on invalidate | [liamwhite](https://github.com/liamwhite/) | Yes |
|
| [13000](https://github.com/yuzu-emu/yuzu//pull/13000) | [`461eaca7e`](https://github.com/yuzu-emu/yuzu//pull/13000/files) | device_memory_manager: skip unregistered interfaces on invalidate | [liamwhite](https://github.com/liamwhite/) | Yes |
|
||||||
| [13006](https://github.com/yuzu-emu/yuzu//pull/13006) | [`3067bfd12`](https://github.com/yuzu-emu/yuzu//pull/13006/files) | buffer_cache: use mapped range with large vertex buffer size | [liamwhite](https://github.com/liamwhite/) | Yes |
|
| [13011](https://github.com/yuzu-emu/yuzu//pull/13011) | [`1842df1da`](https://github.com/yuzu-emu/yuzu//pull/13011/files) | vi: rewrite for new IPC | [liamwhite](https://github.com/liamwhite/) | Yes |
|
||||||
| [13017](https://github.com/yuzu-emu/yuzu//pull/13017) | [`af4248256`](https://github.com/yuzu-emu/yuzu//pull/13017/files) | kernel: add and enable system suspend type | [liamwhite](https://github.com/liamwhite/) | Yes |
|
| [13016](https://github.com/yuzu-emu/yuzu//pull/13016) | [`caf16982d`](https://github.com/yuzu-emu/yuzu//pull/13016/files) | service: set: Migrate ISystemSettingsServer to new IPC | [german77](https://github.com/german77/) | Yes |
|
||||||
| [13026](https://github.com/yuzu-emu/yuzu//pull/13026) | [`462ea921e`](https://github.com/yuzu-emu/yuzu//pull/13026/files) | shader_recompiler: fix non-const offset for arrayed image types | [liamwhite](https://github.com/liamwhite/) | Yes |
|
|
||||||
|
|
||||||
|
|
||||||
End of merge log. You can find the original README.md below the break.
|
End of merge log. You can find the original README.md below the break.
|
||||||
|
@ -242,7 +242,7 @@ struct System::Impl {
|
|||||||
void Run() {
|
void Run() {
|
||||||
std::unique_lock<std::mutex> lk(suspend_guard);
|
std::unique_lock<std::mutex> lk(suspend_guard);
|
||||||
|
|
||||||
kernel.SuspendEmulation(false);
|
kernel.SuspendApplication(false);
|
||||||
core_timing.SyncPause(false);
|
core_timing.SyncPause(false);
|
||||||
is_paused.store(false, std::memory_order_relaxed);
|
is_paused.store(false, std::memory_order_relaxed);
|
||||||
}
|
}
|
||||||
@ -251,7 +251,7 @@ struct System::Impl {
|
|||||||
std::unique_lock<std::mutex> lk(suspend_guard);
|
std::unique_lock<std::mutex> lk(suspend_guard);
|
||||||
|
|
||||||
core_timing.SyncPause(true);
|
core_timing.SyncPause(true);
|
||||||
kernel.SuspendEmulation(true);
|
kernel.SuspendApplication(true);
|
||||||
is_paused.store(true, std::memory_order_relaxed);
|
is_paused.store(true, std::memory_order_relaxed);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -261,7 +261,7 @@ struct System::Impl {
|
|||||||
|
|
||||||
std::unique_lock<std::mutex> StallApplication() {
|
std::unique_lock<std::mutex> StallApplication() {
|
||||||
std::unique_lock<std::mutex> lk(suspend_guard);
|
std::unique_lock<std::mutex> lk(suspend_guard);
|
||||||
kernel.SuspendEmulation(true);
|
kernel.SuspendApplication(true);
|
||||||
core_timing.SyncPause(true);
|
core_timing.SyncPause(true);
|
||||||
return lk;
|
return lk;
|
||||||
}
|
}
|
||||||
@ -269,7 +269,7 @@ struct System::Impl {
|
|||||||
void UnstallApplication() {
|
void UnstallApplication() {
|
||||||
if (!IsPaused()) {
|
if (!IsPaused()) {
|
||||||
core_timing.SyncPause(false);
|
core_timing.SyncPause(false);
|
||||||
kernel.SuspendEmulation(false);
|
kernel.SuspendApplication(false);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -459,7 +459,7 @@ struct System::Impl {
|
|||||||
}
|
}
|
||||||
|
|
||||||
Network::CancelPendingSocketOperations();
|
Network::CancelPendingSocketOperations();
|
||||||
kernel.SuspendEmulation(true);
|
kernel.SuspendApplication(true);
|
||||||
if (services) {
|
if (services) {
|
||||||
services->KillNVNFlinger();
|
services->KillNVNFlinger();
|
||||||
}
|
}
|
||||||
|
@ -66,7 +66,6 @@ enum class SuspendType : u32 {
|
|||||||
Debug = 2,
|
Debug = 2,
|
||||||
Backtrace = 3,
|
Backtrace = 3,
|
||||||
Init = 4,
|
Init = 4,
|
||||||
System = 5,
|
|
||||||
|
|
||||||
Count,
|
Count,
|
||||||
};
|
};
|
||||||
@ -85,9 +84,8 @@ enum class ThreadState : u16 {
|
|||||||
DebugSuspended = (1 << (2 + SuspendShift)),
|
DebugSuspended = (1 << (2 + SuspendShift)),
|
||||||
BacktraceSuspended = (1 << (3 + SuspendShift)),
|
BacktraceSuspended = (1 << (3 + SuspendShift)),
|
||||||
InitSuspended = (1 << (4 + SuspendShift)),
|
InitSuspended = (1 << (4 + SuspendShift)),
|
||||||
SystemSuspended = (1 << (5 + SuspendShift)),
|
|
||||||
|
|
||||||
SuspendFlagMask = ((1 << 6) - 1) << SuspendShift,
|
SuspendFlagMask = ((1 << 5) - 1) << SuspendShift,
|
||||||
};
|
};
|
||||||
DECLARE_ENUM_FLAG_OPERATORS(ThreadState);
|
DECLARE_ENUM_FLAG_OPERATORS(ThreadState);
|
||||||
|
|
||||||
|
@ -1204,49 +1204,40 @@ const Kernel::KSharedMemory& KernelCore::GetHidBusSharedMem() const {
|
|||||||
return *impl->hidbus_shared_mem;
|
return *impl->hidbus_shared_mem;
|
||||||
}
|
}
|
||||||
|
|
||||||
void KernelCore::SuspendEmulation(bool suspended) {
|
void KernelCore::SuspendApplication(bool suspended) {
|
||||||
const bool should_suspend{exception_exited || suspended};
|
const bool should_suspend{exception_exited || suspended};
|
||||||
auto processes = GetProcessList();
|
const auto activity =
|
||||||
|
should_suspend ? Svc::ProcessActivity::Paused : Svc::ProcessActivity::Runnable;
|
||||||
|
|
||||||
for (auto& process : processes) {
|
// Get the application process.
|
||||||
KScopedLightLock ll{process->GetListLock()};
|
KScopedAutoObject<KProcess> process = ApplicationProcess();
|
||||||
|
if (process.IsNull()) {
|
||||||
for (auto& thread : process->GetThreadList()) {
|
|
||||||
if (should_suspend) {
|
|
||||||
thread.RequestSuspend(SuspendType::System);
|
|
||||||
} else {
|
|
||||||
thread.Resume(SuspendType::System);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!should_suspend) {
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Set the new activity.
|
||||||
|
process->SetActivity(activity);
|
||||||
|
|
||||||
// Wait for process execution to stop.
|
// Wait for process execution to stop.
|
||||||
// KernelCore::SuspendEmulation must be called from locked context,
|
bool must_wait{should_suspend};
|
||||||
// or we could race another call, interfering with waiting.
|
|
||||||
const auto TryWait = [&]() {
|
// KernelCore::SuspendApplication must be called from locked context,
|
||||||
|
// or we could race another call to SetActivity, interfering with waiting.
|
||||||
|
while (must_wait) {
|
||||||
KScopedSchedulerLock sl{*this};
|
KScopedSchedulerLock sl{*this};
|
||||||
|
|
||||||
for (auto& process : processes) {
|
// Assume that all threads have finished running.
|
||||||
|
must_wait = false;
|
||||||
|
|
||||||
for (auto i = 0; i < static_cast<s32>(Core::Hardware::NUM_CPU_CORES); ++i) {
|
for (auto i = 0; i < static_cast<s32>(Core::Hardware::NUM_CPU_CORES); ++i) {
|
||||||
if (Scheduler(i).GetSchedulerCurrentThread()->GetOwnerProcess() ==
|
if (Scheduler(i).GetSchedulerCurrentThread()->GetOwnerProcess() ==
|
||||||
process.GetPointerUnsafe()) {
|
process.GetPointerUnsafe()) {
|
||||||
// A thread has not finished running yet.
|
// A thread has not finished running yet.
|
||||||
// Continue waiting.
|
// Continue waiting.
|
||||||
return false;
|
must_wait = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return true;
|
|
||||||
};
|
|
||||||
|
|
||||||
while (!TryWait()) {
|
|
||||||
// ...
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void KernelCore::ShutdownCores() {
|
void KernelCore::ShutdownCores() {
|
||||||
@ -1269,7 +1260,7 @@ bool KernelCore::IsShuttingDown() const {
|
|||||||
|
|
||||||
void KernelCore::ExceptionalExitApplication() {
|
void KernelCore::ExceptionalExitApplication() {
|
||||||
exception_exited = true;
|
exception_exited = true;
|
||||||
SuspendEmulation(true);
|
SuspendApplication(true);
|
||||||
}
|
}
|
||||||
|
|
||||||
void KernelCore::EnterSVCProfile() {
|
void KernelCore::EnterSVCProfile() {
|
||||||
|
@ -258,8 +258,8 @@ public:
|
|||||||
/// Gets the shared memory object for HIDBus services.
|
/// Gets the shared memory object for HIDBus services.
|
||||||
const Kernel::KSharedMemory& GetHidBusSharedMem() const;
|
const Kernel::KSharedMemory& GetHidBusSharedMem() const;
|
||||||
|
|
||||||
/// Suspend/unsuspend emulated processes.
|
/// Suspend/unsuspend application process.
|
||||||
void SuspendEmulation(bool suspend);
|
void SuspendApplication(bool suspend);
|
||||||
|
|
||||||
/// Exceptional exit application process.
|
/// Exceptional exit application process.
|
||||||
void ExceptionalExitApplication();
|
void ExceptionalExitApplication();
|
||||||
|
@ -60,10 +60,11 @@ public:
|
|||||||
Add(spv::ImageOperandsMask::ConstOffsets, offsets);
|
Add(spv::ImageOperandsMask::ConstOffsets, offsets);
|
||||||
}
|
}
|
||||||
|
|
||||||
explicit ImageOperands(Id lod, Id ms) {
|
explicit ImageOperands(EmitContext& ctx, const IR::Value& offset, Id lod, Id ms) {
|
||||||
if (Sirit::ValidId(lod)) {
|
if (Sirit::ValidId(lod)) {
|
||||||
Add(spv::ImageOperandsMask::Lod, lod);
|
Add(spv::ImageOperandsMask::Lod, lod);
|
||||||
}
|
}
|
||||||
|
AddOffset(ctx, offset, ImageFetchOffsetAllowed);
|
||||||
if (Sirit::ValidId(ms)) {
|
if (Sirit::ValidId(ms)) {
|
||||||
Add(spv::ImageOperandsMask::Sample, ms);
|
Add(spv::ImageOperandsMask::Sample, ms);
|
||||||
}
|
}
|
||||||
@ -311,43 +312,6 @@ Id ImageGatherSubpixelOffset(EmitContext& ctx, const IR::TextureInstInfo& info,
|
|||||||
return coords;
|
return coords;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void AddOffsetToCoordinates(EmitContext& ctx, const IR::TextureInstInfo& info, Id& coords,
|
|
||||||
Id offset) {
|
|
||||||
if (!Sirit::ValidId(offset)) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
Id result_type{};
|
|
||||||
switch (info.type) {
|
|
||||||
case TextureType::Buffer:
|
|
||||||
case TextureType::Color1D: {
|
|
||||||
result_type = ctx.U32[1];
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case TextureType::ColorArray1D:
|
|
||||||
offset = ctx.OpCompositeConstruct(ctx.U32[2], offset, ctx.u32_zero_value);
|
|
||||||
[[fallthrough]];
|
|
||||||
case TextureType::Color2D:
|
|
||||||
case TextureType::Color2DRect: {
|
|
||||||
result_type = ctx.U32[2];
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case TextureType::ColorArray2D:
|
|
||||||
offset = ctx.OpCompositeConstruct(ctx.U32[3], ctx.OpCompositeExtract(ctx.U32[1], coords, 0),
|
|
||||||
ctx.OpCompositeExtract(ctx.U32[1], coords, 1),
|
|
||||||
ctx.u32_zero_value);
|
|
||||||
[[fallthrough]];
|
|
||||||
case TextureType::Color3D: {
|
|
||||||
result_type = ctx.U32[3];
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case TextureType::ColorCube:
|
|
||||||
case TextureType::ColorArrayCube:
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
coords = ctx.OpIAdd(result_type, coords, offset);
|
|
||||||
}
|
|
||||||
} // Anonymous namespace
|
} // Anonymous namespace
|
||||||
|
|
||||||
Id EmitBindlessImageSampleImplicitLod(EmitContext&) {
|
Id EmitBindlessImageSampleImplicitLod(EmitContext&) {
|
||||||
@ -530,10 +494,9 @@ Id EmitImageGatherDref(EmitContext& ctx, IR::Inst* inst, const IR::Value& index,
|
|||||||
operands.Span());
|
operands.Span());
|
||||||
}
|
}
|
||||||
|
|
||||||
Id EmitImageFetch(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords, Id offset,
|
Id EmitImageFetch(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords,
|
||||||
Id lod, Id ms) {
|
const IR::Value& offset, Id lod, Id ms) {
|
||||||
const auto info{inst->Flags<IR::TextureInstInfo>()};
|
const auto info{inst->Flags<IR::TextureInstInfo>()};
|
||||||
AddOffsetToCoordinates(ctx, info, coords, offset);
|
|
||||||
if (info.type == TextureType::Buffer) {
|
if (info.type == TextureType::Buffer) {
|
||||||
lod = Id{};
|
lod = Id{};
|
||||||
}
|
}
|
||||||
@ -541,7 +504,7 @@ Id EmitImageFetch(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id c
|
|||||||
// This image is multisampled, lod must be implicit
|
// This image is multisampled, lod must be implicit
|
||||||
lod = Id{};
|
lod = Id{};
|
||||||
}
|
}
|
||||||
const ImageOperands operands(lod, ms);
|
const ImageOperands operands(ctx, offset, lod, ms);
|
||||||
return Emit(&EmitContext::OpImageSparseFetch, &EmitContext::OpImageFetch, ctx, inst, ctx.F32[4],
|
return Emit(&EmitContext::OpImageSparseFetch, &EmitContext::OpImageFetch, ctx, inst, ctx.F32[4],
|
||||||
TextureImage(ctx, info, index), coords, operands.MaskOptional(), operands.Span());
|
TextureImage(ctx, info, index), coords, operands.MaskOptional(), operands.Span());
|
||||||
}
|
}
|
||||||
|
@ -537,8 +537,8 @@ Id EmitImageGather(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id
|
|||||||
const IR::Value& offset, const IR::Value& offset2);
|
const IR::Value& offset, const IR::Value& offset2);
|
||||||
Id EmitImageGatherDref(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords,
|
Id EmitImageGatherDref(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords,
|
||||||
const IR::Value& offset, const IR::Value& offset2, Id dref);
|
const IR::Value& offset, const IR::Value& offset2, Id dref);
|
||||||
Id EmitImageFetch(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords, Id offset,
|
Id EmitImageFetch(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords,
|
||||||
Id lod, Id ms);
|
const IR::Value& offset, Id lod, Id ms);
|
||||||
Id EmitImageQueryDimensions(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id lod,
|
Id EmitImageQueryDimensions(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id lod,
|
||||||
const IR::Value& skip_mips);
|
const IR::Value& skip_mips);
|
||||||
Id EmitImageQueryLod(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords);
|
Id EmitImageQueryLod(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords);
|
||||||
|
@ -1130,7 +1130,7 @@ void BufferCache<P>::UpdateVertexBuffer(u32 index) {
|
|||||||
channel_state->vertex_buffers[index] = NULL_BINDING;
|
channel_state->vertex_buffers[index] = NULL_BINDING;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (!gpu_memory->IsWithinGPUAddressRange(gpu_addr_end) || size >= 64_MiB) {
|
if (!gpu_memory->IsWithinGPUAddressRange(gpu_addr_end)) {
|
||||||
size = static_cast<u32>(gpu_memory->MaxContinuousRange(gpu_addr_begin, size));
|
size = static_cast<u32>(gpu_memory->MaxContinuousRange(gpu_addr_begin, size));
|
||||||
}
|
}
|
||||||
const BufferId buffer_id = FindBuffer(*device_addr, size);
|
const BufferId buffer_id = FindBuffer(*device_addr, size);
|
||||||
|
Reference in New Issue
Block a user