nvdrv: add ioctl command serialization, convert nvhost_as_gpu
This commit is contained in:
parent
008d7e8c5f
commit
6256e3ca8e
|
@ -0,0 +1,107 @@
|
|||
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <span>
|
||||
#include <vector>
|
||||
|
||||
#include "common/concepts.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvdevice.h"
|
||||
|
||||
namespace Service::Nvidia::Devices {
|
||||
|
||||
struct Ioctl1Traits {
|
||||
template <typename T, typename R, typename A>
|
||||
static T GetClassImpl(R (T::*)(A));
|
||||
|
||||
template <typename T, typename R, typename A>
|
||||
static A GetArgImpl(R (T::*)(A));
|
||||
};
|
||||
|
||||
struct Ioctl23Traits {
|
||||
template <typename T, typename R, typename A, typename B>
|
||||
static T GetClassImpl(R (T::*)(A, B));
|
||||
|
||||
template <typename T, typename R, typename A, typename B>
|
||||
static A GetArgImpl(R (T::*)(A, B));
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct ContainerType {
|
||||
using ValueType = T;
|
||||
};
|
||||
|
||||
template <Common::IsContiguousContainer T>
|
||||
struct ContainerType<T> {
|
||||
using ValueType = T::value_type;
|
||||
};
|
||||
|
||||
template <typename InnerArg, typename F, typename Self, typename... Rest>
|
||||
NvResult Wrap(std::span<const u8> input, std::span<u8> output, Self* self, F&& callable,
|
||||
Rest&&... rest) {
|
||||
using Arg = ContainerType<InnerArg>::ValueType;
|
||||
constexpr bool ArgumentIsContainer = Common::IsContiguousContainer<InnerArg>;
|
||||
|
||||
// Verify that the input and output sizes are valid.
|
||||
const size_t in_params = input.size() / sizeof(Arg);
|
||||
const size_t out_params = output.size() / sizeof(Arg);
|
||||
if (in_params * sizeof(Arg) != input.size()) {
|
||||
return NvResult::InvalidSize;
|
||||
}
|
||||
if (out_params * sizeof(Arg) != output.size()) {
|
||||
return NvResult::InvalidSize;
|
||||
}
|
||||
if (in_params == 0 && out_params == 0 && !ArgumentIsContainer) {
|
||||
return NvResult::InvalidSize;
|
||||
}
|
||||
|
||||
// Copy inputs, if needed.
|
||||
std::vector<Arg> params(std::max(in_params, out_params));
|
||||
if (in_params > 0) {
|
||||
std::memcpy(params.data(), input.data(), input.size());
|
||||
}
|
||||
|
||||
// Perform the call.
|
||||
NvResult result;
|
||||
if constexpr (ArgumentIsContainer) {
|
||||
result = (self->*callable)(params, std::forward<Rest>(rest)...);
|
||||
} else {
|
||||
result = (self->*callable)(params.front(), std::forward<Rest>(rest)...);
|
||||
}
|
||||
|
||||
// Copy outputs, if needed.
|
||||
if (out_params > 0) {
|
||||
std::memcpy(output.data(), params.data(), output.size());
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
template <typename F>
|
||||
NvResult nvdevice::Wrap1(F&& callable, std::span<const u8> input, std::span<u8> output) {
|
||||
using Self = decltype(Ioctl1Traits::GetClassImpl(callable));
|
||||
using InnerArg = std::remove_reference_t<decltype(Ioctl1Traits::GetArgImpl(callable))>;
|
||||
|
||||
return Wrap<InnerArg>(input, output, static_cast<Self*>(this), callable);
|
||||
}
|
||||
|
||||
template <typename F>
|
||||
NvResult nvdevice::Wrap2(F&& callable, std::span<const u8> input, std::span<const u8> inline_input,
|
||||
std::span<u8> output) {
|
||||
using Self = decltype(Ioctl23Traits::GetClassImpl(callable));
|
||||
using InnerArg = std::remove_reference_t<decltype(Ioctl23Traits::GetArgImpl(callable))>;
|
||||
|
||||
return Wrap<InnerArg>(input, output, static_cast<Self*>(this), callable, inline_input);
|
||||
}
|
||||
|
||||
template <typename F>
|
||||
NvResult nvdevice::Wrap3(F&& callable, std::span<const u8> input, std::span<u8> output,
|
||||
std::span<u8> inline_output) {
|
||||
using Self = decltype(Ioctl23Traits::GetClassImpl(callable));
|
||||
using InnerArg = std::remove_reference_t<decltype(Ioctl23Traits::GetArgImpl(callable))>;
|
||||
|
||||
return Wrap<InnerArg>(input, output, static_cast<Self*>(this), callable, inline_output);
|
||||
}
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
|
@ -74,6 +74,18 @@ public:
|
|||
return nullptr;
|
||||
}
|
||||
|
||||
protected:
|
||||
template <typename F>
|
||||
NvResult Wrap1(F&& callable, std::span<const u8> input, std::span<u8> output);
|
||||
|
||||
template <typename F>
|
||||
NvResult Wrap2(F&& callable, std::span<const u8> input, std::span<const u8> inline_input,
|
||||
std::span<u8> output);
|
||||
|
||||
template <typename F>
|
||||
NvResult Wrap3(F&& callable, std::span<const u8> input, std::span<u8> output,
|
||||
std::span<u8> inline_output);
|
||||
|
||||
protected:
|
||||
Core::System& system;
|
||||
};
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
#include "core/core.h"
|
||||
#include "core/hle/service/nvdrv/core/container.h"
|
||||
#include "core/hle/service/nvdrv/core/nvmap.h"
|
||||
#include "core/hle/service/nvdrv/devices/ioctl_serialization.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvhost_as_gpu.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvhost_gpu.h"
|
||||
#include "core/hle/service/nvdrv/nvdrv.h"
|
||||
|
@ -33,21 +34,21 @@ NvResult nvhost_as_gpu::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> i
|
|||
case 'A':
|
||||
switch (command.cmd) {
|
||||
case 0x1:
|
||||
return BindChannel(input, output);
|
||||
return Wrap1(&nvhost_as_gpu::BindChannel, input, output);
|
||||
case 0x2:
|
||||
return AllocateSpace(input, output);
|
||||
return Wrap1(&nvhost_as_gpu::AllocateSpace, input, output);
|
||||
case 0x3:
|
||||
return FreeSpace(input, output);
|
||||
return Wrap1(&nvhost_as_gpu::FreeSpace, input, output);
|
||||
case 0x5:
|
||||
return UnmapBuffer(input, output);
|
||||
return Wrap1(&nvhost_as_gpu::UnmapBuffer, input, output);
|
||||
case 0x6:
|
||||
return MapBufferEx(input, output);
|
||||
return Wrap1(&nvhost_as_gpu::MapBufferEx, input, output);
|
||||
case 0x8:
|
||||
return GetVARegions(input, output);
|
||||
return Wrap1(&nvhost_as_gpu::GetVARegions1, input, output);
|
||||
case 0x9:
|
||||
return AllocAsEx(input, output);
|
||||
return Wrap1(&nvhost_as_gpu::AllocAsEx, input, output);
|
||||
case 0x14:
|
||||
return Remap(input, output);
|
||||
return Wrap1(&nvhost_as_gpu::Remap, input, output);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -72,7 +73,7 @@ NvResult nvhost_as_gpu::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> i
|
|||
case 'A':
|
||||
switch (command.cmd) {
|
||||
case 0x8:
|
||||
return GetVARegions(input, output, inline_output);
|
||||
return Wrap3(&nvhost_as_gpu::GetVARegions3, input, output, inline_output);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -87,10 +88,7 @@ NvResult nvhost_as_gpu::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> i
|
|||
void nvhost_as_gpu::OnOpen(DeviceFD fd) {}
|
||||
void nvhost_as_gpu::OnClose(DeviceFD fd) {}
|
||||
|
||||
NvResult nvhost_as_gpu::AllocAsEx(std::span<const u8> input, std::span<u8> output) {
|
||||
IoctlAllocAsEx params{};
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
|
||||
NvResult nvhost_as_gpu::AllocAsEx(IoctlAllocAsEx& params) {
|
||||
LOG_DEBUG(Service_NVDRV, "called, big_page_size=0x{:X}", params.big_page_size);
|
||||
|
||||
std::scoped_lock lock(mutex);
|
||||
|
@ -141,10 +139,7 @@ NvResult nvhost_as_gpu::AllocAsEx(std::span<const u8> input, std::span<u8> outpu
|
|||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_as_gpu::AllocateSpace(std::span<const u8> input, std::span<u8> output) {
|
||||
IoctlAllocSpace params{};
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
|
||||
NvResult nvhost_as_gpu::AllocateSpace(IoctlAllocSpace& params) {
|
||||
LOG_DEBUG(Service_NVDRV, "called, pages={:X}, page_size={:X}, flags={:X}", params.pages,
|
||||
params.page_size, params.flags);
|
||||
|
||||
|
@ -194,7 +189,6 @@ NvResult nvhost_as_gpu::AllocateSpace(std::span<const u8> input, std::span<u8> o
|
|||
.big_pages = params.page_size != VM::YUZU_PAGESIZE,
|
||||
};
|
||||
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
|
@ -222,10 +216,7 @@ void nvhost_as_gpu::FreeMappingLocked(u64 offset) {
|
|||
mapping_map.erase(offset);
|
||||
}
|
||||
|
||||
NvResult nvhost_as_gpu::FreeSpace(std::span<const u8> input, std::span<u8> output) {
|
||||
IoctlFreeSpace params{};
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
|
||||
NvResult nvhost_as_gpu::FreeSpace(IoctlFreeSpace& params) {
|
||||
LOG_DEBUG(Service_NVDRV, "called, offset={:X}, pages={:X}, page_size={:X}", params.offset,
|
||||
params.pages, params.page_size);
|
||||
|
||||
|
@ -264,18 +255,11 @@ NvResult nvhost_as_gpu::FreeSpace(std::span<const u8> input, std::span<u8> outpu
|
|||
return NvResult::BadValue;
|
||||
}
|
||||
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_as_gpu::Remap(std::span<const u8> input, std::span<u8> output) {
|
||||
const auto num_entries = input.size() / sizeof(IoctlRemapEntry);
|
||||
|
||||
LOG_DEBUG(Service_NVDRV, "called, num_entries=0x{:X}", num_entries);
|
||||
|
||||
std::scoped_lock lock(mutex);
|
||||
entries.resize_destructive(num_entries);
|
||||
std::memcpy(entries.data(), input.data(), input.size());
|
||||
NvResult nvhost_as_gpu::Remap(std::span<IoctlRemapEntry> entries) {
|
||||
LOG_DEBUG(Service_NVDRV, "called, num_entries=0x{:X}", entries.size());
|
||||
|
||||
if (!vm.initialised) {
|
||||
return NvResult::BadValue;
|
||||
|
@ -317,14 +301,10 @@ NvResult nvhost_as_gpu::Remap(std::span<const u8> input, std::span<u8> output) {
|
|||
}
|
||||
}
|
||||
|
||||
std::memcpy(output.data(), entries.data(), output.size());
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_as_gpu::MapBufferEx(std::span<const u8> input, std::span<u8> output) {
|
||||
IoctlMapBufferEx params{};
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
|
||||
NvResult nvhost_as_gpu::MapBufferEx(IoctlMapBufferEx& params) {
|
||||
LOG_DEBUG(Service_NVDRV,
|
||||
"called, flags={:X}, nvmap_handle={:X}, buffer_offset={}, mapping_size={}"
|
||||
", offset={}",
|
||||
|
@ -421,14 +401,10 @@ NvResult nvhost_as_gpu::MapBufferEx(std::span<const u8> input, std::span<u8> out
|
|||
mapping_map[params.offset] = mapping;
|
||||
}
|
||||
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_as_gpu::UnmapBuffer(std::span<const u8> input, std::span<u8> output) {
|
||||
IoctlUnmapBuffer params{};
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
|
||||
NvResult nvhost_as_gpu::UnmapBuffer(IoctlUnmapBuffer& params) {
|
||||
LOG_DEBUG(Service_NVDRV, "called, offset=0x{:X}", params.offset);
|
||||
|
||||
std::scoped_lock lock(mutex);
|
||||
|
@ -464,9 +440,7 @@ NvResult nvhost_as_gpu::UnmapBuffer(std::span<const u8> input, std::span<u8> out
|
|||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_as_gpu::BindChannel(std::span<const u8> input, std::span<u8> output) {
|
||||
IoctlBindChannel params{};
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
NvResult nvhost_as_gpu::BindChannel(IoctlBindChannel& params) {
|
||||
LOG_DEBUG(Service_NVDRV, "called, fd={:X}", params.fd);
|
||||
|
||||
auto gpu_channel_device = module.GetDevice<nvhost_gpu>(params.fd);
|
||||
|
@ -493,10 +467,7 @@ void nvhost_as_gpu::GetVARegionsImpl(IoctlGetVaRegions& params) {
|
|||
};
|
||||
}
|
||||
|
||||
NvResult nvhost_as_gpu::GetVARegions(std::span<const u8> input, std::span<u8> output) {
|
||||
IoctlGetVaRegions params{};
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
|
||||
NvResult nvhost_as_gpu::GetVARegions1(IoctlGetVaRegions& params) {
|
||||
LOG_DEBUG(Service_NVDRV, "called, buf_addr={:X}, buf_size={:X}", params.buf_addr,
|
||||
params.buf_size);
|
||||
|
||||
|
@ -508,15 +479,10 @@ NvResult nvhost_as_gpu::GetVARegions(std::span<const u8> input, std::span<u8> ou
|
|||
|
||||
GetVARegionsImpl(params);
|
||||
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_as_gpu::GetVARegions(std::span<const u8> input, std::span<u8> output,
|
||||
std::span<u8> inline_output) {
|
||||
IoctlGetVaRegions params{};
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
|
||||
NvResult nvhost_as_gpu::GetVARegions3(IoctlGetVaRegions& params, std::span<u8> inline_output) {
|
||||
LOG_DEBUG(Service_NVDRV, "called, buf_addr={:X}, buf_size={:X}", params.buf_addr,
|
||||
params.buf_size);
|
||||
|
||||
|
@ -528,9 +494,7 @@ NvResult nvhost_as_gpu::GetVARegions(std::span<const u8> input, std::span<u8> ou
|
|||
|
||||
GetVARegionsImpl(params);
|
||||
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
std::memcpy(inline_output.data(), ¶ms.regions[0], sizeof(VaRegion));
|
||||
std::memcpy(inline_output.data() + sizeof(VaRegion), ¶ms.regions[1], sizeof(VaRegion));
|
||||
std::memcpy(inline_output.data(), params.regions.data(), 2 * sizeof(VaRegion));
|
||||
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
|
|
@ -139,18 +139,17 @@ private:
|
|||
static_assert(sizeof(IoctlGetVaRegions) == 16 + sizeof(VaRegion) * 2,
|
||||
"IoctlGetVaRegions is incorrect size");
|
||||
|
||||
NvResult AllocAsEx(std::span<const u8> input, std::span<u8> output);
|
||||
NvResult AllocateSpace(std::span<const u8> input, std::span<u8> output);
|
||||
NvResult Remap(std::span<const u8> input, std::span<u8> output);
|
||||
NvResult MapBufferEx(std::span<const u8> input, std::span<u8> output);
|
||||
NvResult UnmapBuffer(std::span<const u8> input, std::span<u8> output);
|
||||
NvResult FreeSpace(std::span<const u8> input, std::span<u8> output);
|
||||
NvResult BindChannel(std::span<const u8> input, std::span<u8> output);
|
||||
NvResult AllocAsEx(IoctlAllocAsEx& params);
|
||||
NvResult AllocateSpace(IoctlAllocSpace& params);
|
||||
NvResult Remap(std::span<IoctlRemapEntry> params);
|
||||
NvResult MapBufferEx(IoctlMapBufferEx& params);
|
||||
NvResult UnmapBuffer(IoctlUnmapBuffer& params);
|
||||
NvResult FreeSpace(IoctlFreeSpace& params);
|
||||
NvResult BindChannel(IoctlBindChannel& params);
|
||||
|
||||
void GetVARegionsImpl(IoctlGetVaRegions& params);
|
||||
NvResult GetVARegions(std::span<const u8> input, std::span<u8> output);
|
||||
NvResult GetVARegions(std::span<const u8> input, std::span<u8> output,
|
||||
std::span<u8> inline_output);
|
||||
NvResult GetVARegions1(IoctlGetVaRegions& params);
|
||||
NvResult GetVARegions3(IoctlGetVaRegions& params, std::span<u8> inline_output);
|
||||
|
||||
void FreeMappingLocked(u64 offset);
|
||||
|
||||
|
@ -213,7 +212,6 @@ private:
|
|||
bool initialised{};
|
||||
} vm;
|
||||
std::shared_ptr<Tegra::MemoryManager> gmmu;
|
||||
Common::ScratchBuffer<IoctlRemapEntry> entries;
|
||||
|
||||
// s32 channel{};
|
||||
// u32 big_page_size{VM::DEFAULT_BIG_PAGE_SIZE};
|
||||
|
|
|
@ -134,7 +134,7 @@ NvResult nvhost_gpu::SetClientData(std::span<const u8> input, std::span<u8> outp
|
|||
LOG_DEBUG(Service_NVDRV, "called");
|
||||
|
||||
IoctlClientData params{};
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
std::memcpy(¶ms, input.data(), std::min(sizeof(IoctlClientData), input.size()));
|
||||
user_data = params.data;
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
@ -143,9 +143,9 @@ NvResult nvhost_gpu::GetClientData(std::span<const u8> input, std::span<u8> outp
|
|||
LOG_DEBUG(Service_NVDRV, "called");
|
||||
|
||||
IoctlClientData params{};
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
std::memcpy(¶ms, input.data(), std::min(sizeof(IoctlClientData), input.size()));
|
||||
params.data = user_data;
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
std::memcpy(output.data(), ¶ms, std::min(sizeof(IoctlClientData), output.size()));
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue