Compare commits

...

21 Commits

Author SHA1 Message Date
8800daa974 Android #10 2023-07-16 00:58:33 +00:00
04868ab9da file_sys/content_archive: Detect compressed NCAs (#11047) 2023-07-12 23:17:18 +02:00
28598c9090 Merge pull request #10985 from liamwhite/handle-translate
k_server_session: translate special header for non-HLE requests
2023-07-11 16:49:24 -07:00
11cb4d88f0 Merge pull request #11070 from t895/home-setting-warning
android: Visualize disabled home options
2023-07-11 16:48:57 -07:00
4b870b28e8 android: Visualize disabled home options
Allow for displaying options in the home options that are disabled with messages that explain why they are disabled.

This includes reasoning for the GPU driver installation button.
2023-07-10 22:17:19 -04:00
ce7c418e0c Merge pull request #10996 from Kelebek1/readblock_optimisation
Use spans over guest memory where possible instead of copying data
2023-07-10 18:54:19 -07:00
169b198d08 Merge pull request #11050 from SuperSamus/sdl-button-labels
input_common: set `SDL_HINT_GAMECONTROLLER_USE_BUTTON_LABELS` to 0
2023-07-10 18:53:16 -07:00
e32ce6cc69 Merge pull request #11067 from t895/fragile-data
android: Don't prompt to save user data on uninstall
2023-07-09 17:20:14 -07:00
190748546d Merge pull request #11064 from lat9nq/mingw-no-install-pefile
ci/mingw: Remove pefile installation step
2023-07-09 19:27:52 -04:00
79e289404b Merge pull request #11055 from lat9nq/tzdb-catch-
settings: Catch runtime error from STL
2023-07-09 19:27:41 -04:00
a6e467cd55 android: Don't prompt to save user data on uninstall
While this can be convenient in some scenarios, this will be a big problem for users trying to sideload different APK versions. If they forget the last one they had installed, they could have problems installing a new copy.
2023-07-09 19:02:42 -04:00
8a87a41f2d Merge pull request #11063 from liamwhite/oops
arm_interface: correct breakpoint rewind condition
2023-07-09 16:24:49 -04:00
f02226283e ci/mingw: Remove pefile installation step
This is unnecessary here: pefile is already installed on the container.
This step also causes issues in coming changes to the container.
2023-07-09 16:07:43 -04:00
82568412f6 arm_interface: correct breakpoint rewind condition 2023-07-09 12:03:25 -04:00
1255196731 settings: Catch runtime error from STL
This function throws a runtime error we can catch on old Windows 10
installs, so we can catch it here rather than disable this path for
everybody.
2023-07-09 02:26:58 -04:00
9ce5d39829 Merge pull request #11030 from lat9nq/tz-restrict-msvc
settings: Disable C++20 time zone path on MSVC
2023-07-09 01:45:00 -04:00
4540bcfaf7 k_server_session: translate special header for non-HLE requests 2023-07-08 01:01:49 -04:00
aa882cdaa8 input_common: set SDL_HINT_GAMECONTROLLER_USE_BUTTON_LABELS to 0
This allows to share the mappings between Nintendo and non-Nintendo controllers.
Breaks the controller configuration for existing users who are using a Nintendo controller.

(Documentation of the hint 92b3c53c92/include/SDL_hints.h (L512-L532))
2023-07-07 21:59:47 +02:00
302a735135 settings: Disable C++20 path on MSVC
Even though it compiles and runs fine on the latest Windows versions,
older LTSC builds will crash due to lacking support somewhere in the OS.

For now just disable it for MSVC until either Microsoft fixes this or we
no longer support 1809 LTSC.
2023-07-05 15:58:12 -04:00
f1cfd9c219 Fix ScratchBuffer moves 2023-07-04 16:02:58 +01:00
6f7cb69c94 Use spans over guest memory where possible instead of copying data. 2023-07-02 23:09:48 +01:00
39 changed files with 1395 additions and 343 deletions

View File

@ -56,7 +56,6 @@ for i in package/*.exe; do
x86_64-w64-mingw32-strip "${i}"
done
pip3 install pefile
python3 .ci/scripts/windows/scan_dll.py package/*.exe package/imageformats/*.dll "package/"
# copy FFmpeg libraries

View File

@ -1,3 +1,11 @@
| Pull Request | Commit | Title | Author | Merged? |
|----|----|----|----|----|
End of merge log. You can find the original README.md below the break.
-----
<!--
SPDX-FileCopyrightText: 2018 yuzu Emulator Project
SPDX-License-Identifier: GPL-2.0-or-later

View File

@ -22,7 +22,7 @@ SPDX-License-Identifier: GPL-3.0-or-later
android:label="@string/app_name_suffixed"
android:icon="@drawable/ic_launcher"
android:allowBackup="true"
android:hasFragileUserData="true"
android:hasFragileUserData="false"
android:supportsRtl="true"
android:isGame="true"
android:localeConfig="@xml/locales_config"

View File

@ -12,6 +12,7 @@ import androidx.core.content.res.ResourcesCompat
import androidx.recyclerview.widget.RecyclerView
import org.yuzu.yuzu_emu.R
import org.yuzu.yuzu_emu.databinding.CardHomeOptionBinding
import org.yuzu.yuzu_emu.fragments.MessageDialogFragment
import org.yuzu.yuzu_emu.model.HomeSetting
class HomeSettingAdapter(private val activity: AppCompatActivity, var options: List<HomeSetting>) :
@ -34,7 +35,14 @@ class HomeSettingAdapter(private val activity: AppCompatActivity, var options: L
override fun onClick(view: View) {
val holder = view.tag as HomeOptionViewHolder
holder.option.onClick.invoke()
if (holder.option.isEnabled.invoke()) {
holder.option.onClick.invoke()
} else {
MessageDialogFragment.newInstance(
holder.option.disabledTitleId,
holder.option.disabledMessageId
).show(activity.supportFragmentManager, MessageDialogFragment.TAG)
}
}
inner class HomeOptionViewHolder(val binding: CardHomeOptionBinding) :
@ -65,6 +73,12 @@ class HomeSettingAdapter(private val activity: AppCompatActivity, var options: L
R.drawable.premium_background
)
}
if (!option.isEnabled.invoke()) {
binding.optionTitle.alpha = 0.5f
binding.optionDescription.alpha = 0.5f
binding.optionIcon.alpha = 0.5f
}
}
}
}

View File

@ -73,102 +73,113 @@ class HomeSettingsFragment : Fragment() {
HomeSetting(
R.string.advanced_settings,
R.string.settings_description,
R.drawable.ic_settings
) { SettingsActivity.launch(requireContext(), SettingsFile.FILE_NAME_CONFIG, "") }
R.drawable.ic_settings,
{ SettingsActivity.launch(requireContext(), SettingsFile.FILE_NAME_CONFIG, "") }
)
)
add(
HomeSetting(
R.string.open_user_folder,
R.string.open_user_folder_description,
R.drawable.ic_folder_open
) { openFileManager() }
R.drawable.ic_folder_open,
{ openFileManager() }
)
)
add(
HomeSetting(
R.string.preferences_theme,
R.string.theme_and_color_description,
R.drawable.ic_palette
) { SettingsActivity.launch(requireContext(), Settings.SECTION_THEME, "") }
)
if (GpuDriverHelper.supportsCustomDriverLoading()) {
add(
HomeSetting(
R.string.install_gpu_driver,
R.string.install_gpu_driver_description,
R.drawable.ic_exit
) { driverInstaller() }
R.drawable.ic_palette,
{ SettingsActivity.launch(requireContext(), Settings.SECTION_THEME, "") }
)
}
)
add(
HomeSetting(
R.string.install_gpu_driver,
R.string.install_gpu_driver_description,
R.drawable.ic_exit,
{ driverInstaller() },
{ GpuDriverHelper.supportsCustomDriverLoading() },
R.string.custom_driver_not_supported,
R.string.custom_driver_not_supported_description
)
)
add(
HomeSetting(
R.string.install_amiibo_keys,
R.string.install_amiibo_keys_description,
R.drawable.ic_nfc
) { mainActivity.getAmiiboKey.launch(arrayOf("*/*")) }
R.drawable.ic_nfc,
{ mainActivity.getAmiiboKey.launch(arrayOf("*/*")) }
)
)
add(
HomeSetting(
R.string.install_game_content,
R.string.install_game_content_description,
R.drawable.ic_system_update_alt
) { mainActivity.installGameUpdate.launch(arrayOf("*/*")) }
R.drawable.ic_system_update_alt,
{ mainActivity.installGameUpdate.launch(arrayOf("*/*")) }
)
)
add(
HomeSetting(
R.string.select_games_folder,
R.string.select_games_folder_description,
R.drawable.ic_add
) {
mainActivity.getGamesDirectory.launch(
Intent(Intent.ACTION_OPEN_DOCUMENT_TREE).data
)
}
R.drawable.ic_add,
{
mainActivity.getGamesDirectory.launch(
Intent(Intent.ACTION_OPEN_DOCUMENT_TREE).data
)
}
)
)
add(
HomeSetting(
R.string.manage_save_data,
R.string.import_export_saves_description,
R.drawable.ic_save
) {
ImportExportSavesFragment().show(
parentFragmentManager,
ImportExportSavesFragment.TAG
)
}
R.drawable.ic_save,
{
ImportExportSavesFragment().show(
parentFragmentManager,
ImportExportSavesFragment.TAG
)
}
)
)
add(
HomeSetting(
R.string.install_prod_keys,
R.string.install_prod_keys_description,
R.drawable.ic_unlock
) { mainActivity.getProdKey.launch(arrayOf("*/*")) }
R.drawable.ic_unlock,
{ mainActivity.getProdKey.launch(arrayOf("*/*")) }
)
)
add(
HomeSetting(
R.string.install_firmware,
R.string.install_firmware_description,
R.drawable.ic_firmware
) { mainActivity.getFirmware.launch(arrayOf("application/zip")) }
R.drawable.ic_firmware,
{ mainActivity.getFirmware.launch(arrayOf("application/zip")) }
)
)
add(
HomeSetting(
R.string.share_log,
R.string.share_log_description,
R.drawable.ic_log
) { shareLog() }
R.drawable.ic_log,
{ shareLog() }
)
)
add(
HomeSetting(
R.string.about,
R.string.about_description,
R.drawable.ic_info_outline
) {
exitTransition = MaterialSharedAxis(MaterialSharedAxis.X, true)
parentFragmentManager.primaryNavigationFragment?.findNavController()
?.navigate(R.id.action_homeSettingsFragment_to_aboutFragment)
}
R.drawable.ic_info_outline,
{
exitTransition = MaterialSharedAxis(MaterialSharedAxis.X, true)
parentFragmentManager.primaryNavigationFragment?.findNavController()
?.navigate(R.id.action_homeSettingsFragment_to_aboutFragment)
}
)
)
}
@ -178,12 +189,13 @@ class HomeSettingsFragment : Fragment() {
HomeSetting(
R.string.get_early_access,
R.string.get_early_access_description,
R.drawable.ic_diamond
) {
exitTransition = MaterialSharedAxis(MaterialSharedAxis.X, true)
parentFragmentManager.primaryNavigationFragment?.findNavController()
?.navigate(R.id.action_homeSettingsFragment_to_earlyAccessFragment)
}
R.drawable.ic_diamond,
{
exitTransition = MaterialSharedAxis(MaterialSharedAxis.X, true)
parentFragmentManager.primaryNavigationFragment?.findNavController()
?.navigate(R.id.action_homeSettingsFragment_to_earlyAccessFragment)
}
)
)
}

View File

@ -7,5 +7,8 @@ data class HomeSetting(
val titleId: Int,
val descriptionId: Int,
val iconId: Int,
val onClick: () -> Unit
val onClick: () -> Unit,
val isEnabled: () -> Boolean = { true },
val disabledTitleId: Int = 0,
val disabledMessageId: Int = 0
)

View File

@ -113,6 +113,8 @@
<string name="install_game_content_success_install">%1$d installed successfully</string>
<string name="install_game_content_success_overwrite">%1$d overwritten successfully</string>
<string name="install_game_content_help_link">https://yuzu-emu.org/help/quickstart/#dumping-installed-updates</string>
<string name="custom_driver_not_supported">Custom drivers not supported</string>
<string name="custom_driver_not_supported_description">Custom driver loading isn\'t currently supported for this device.\nCheck this option again in the future to see if support was added!</string>
<!-- About screen strings -->
<string name="gaia_is_not_real">Gaia isn\'t real</string>

View File

@ -92,9 +92,9 @@ void DeviceSession::AppendBuffers(std::span<const AudioBuffer> buffers) {
if (type == Sink::StreamType::In) {
stream->AppendBuffer(new_buffer, tmp_samples);
} else {
system.ApplicationMemory().ReadBlockUnsafe(buffer.samples, tmp_samples.data(),
buffer.size);
stream->AppendBuffer(new_buffer, tmp_samples);
Core::Memory::CpuGuestMemory<s16, Core::Memory::GuestMemoryFlags::UnsafeRead> samples(
system.ApplicationMemory(), buffer.samples, buffer.size / sizeof(s16));
stream->AppendBuffer(new_buffer, samples);
}
}
}

View File

@ -28,7 +28,6 @@ constexpr std::array<u8, 3> PitchBySrcQuality = {4, 8, 4};
template <typename T>
static u32 DecodePcm(Core::Memory::Memory& memory, std::span<s16> out_buffer,
const DecodeArg& req) {
std::array<T, TempBufferSize> tmp_samples{};
constexpr s32 min{std::numeric_limits<s16>::min()};
constexpr s32 max{std::numeric_limits<s16>::max()};
@ -49,19 +48,18 @@ static u32 DecodePcm(Core::Memory::Memory& memory, std::span<s16> out_buffer,
const VAddr source{req.buffer +
(((req.start_offset + req.offset) * channel_count) * sizeof(T))};
const u64 size{channel_count * samples_to_decode};
const u64 size_bytes{size * sizeof(T)};
memory.ReadBlockUnsafe(source, tmp_samples.data(), size_bytes);
Core::Memory::CpuGuestMemory<T, Core::Memory::GuestMemoryFlags::UnsafeRead> samples(
memory, source, size);
if constexpr (std::is_floating_point_v<T>) {
for (u32 i = 0; i < samples_to_decode; i++) {
auto sample{static_cast<s32>(tmp_samples[i * channel_count + req.target_channel] *
auto sample{static_cast<s32>(samples[i * channel_count + req.target_channel] *
std::numeric_limits<s16>::max())};
out_buffer[i] = static_cast<s16>(std::clamp(sample, min, max));
}
} else {
for (u32 i = 0; i < samples_to_decode; i++) {
out_buffer[i] = tmp_samples[i * channel_count + req.target_channel];
out_buffer[i] = samples[i * channel_count + req.target_channel];
}
}
} break;
@ -74,16 +72,17 @@ static u32 DecodePcm(Core::Memory::Memory& memory, std::span<s16> out_buffer,
}
const VAddr source{req.buffer + ((req.start_offset + req.offset) * sizeof(T))};
memory.ReadBlockUnsafe(source, tmp_samples.data(), samples_to_decode * sizeof(T));
Core::Memory::CpuGuestMemory<T, Core::Memory::GuestMemoryFlags::UnsafeRead> samples(
memory, source, samples_to_decode);
if constexpr (std::is_floating_point_v<T>) {
for (u32 i = 0; i < samples_to_decode; i++) {
auto sample{static_cast<s32>(tmp_samples[i * channel_count + req.target_channel] *
auto sample{static_cast<s32>(samples[i * channel_count + req.target_channel] *
std::numeric_limits<s16>::max())};
out_buffer[i] = static_cast<s16>(std::clamp(sample, min, max));
}
} else {
std::memcpy(out_buffer.data(), tmp_samples.data(), samples_to_decode * sizeof(s16));
std::memcpy(out_buffer.data(), samples.data(), samples_to_decode * sizeof(s16));
}
break;
}
@ -101,7 +100,6 @@ static u32 DecodePcm(Core::Memory::Memory& memory, std::span<s16> out_buffer,
*/
static u32 DecodeAdpcm(Core::Memory::Memory& memory, std::span<s16> out_buffer,
const DecodeArg& req) {
std::array<u8, TempBufferSize> wavebuffer{};
constexpr u32 SamplesPerFrame{14};
constexpr u32 NibblesPerFrame{16};
@ -139,7 +137,8 @@ static u32 DecodeAdpcm(Core::Memory::Memory& memory, std::span<s16> out_buffer,
}
const auto size{std::max((samples_to_process / 8U) * SamplesPerFrame, 8U)};
memory.ReadBlockUnsafe(req.buffer + position_in_frame / 2, wavebuffer.data(), size);
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::UnsafeRead> wavebuffer(
memory, req.buffer + position_in_frame / 2, size);
auto context{req.adpcm_context};
auto header{context->header};

View File

@ -21,23 +21,13 @@ static void ResetAuxBufferDsp(Core::Memory::Memory& memory, const CpuAddr aux_in
}
AuxInfo::AuxInfoDsp info{};
auto info_ptr{&info};
bool host_safe{(aux_info & Core::Memory::YUZU_PAGEMASK) <=
(Core::Memory::YUZU_PAGESIZE - sizeof(AuxInfo::AuxInfoDsp))};
memory.ReadBlockUnsafe(aux_info, &info, sizeof(AuxInfo::AuxInfoDsp));
if (host_safe) [[likely]] {
info_ptr = memory.GetPointer<AuxInfo::AuxInfoDsp>(aux_info);
} else {
memory.ReadBlockUnsafe(aux_info, info_ptr, sizeof(AuxInfo::AuxInfoDsp));
}
info.read_offset = 0;
info.write_offset = 0;
info.total_sample_count = 0;
info_ptr->read_offset = 0;
info_ptr->write_offset = 0;
info_ptr->total_sample_count = 0;
if (!host_safe) [[unlikely]] {
memory.WriteBlockUnsafe(aux_info, info_ptr, sizeof(AuxInfo::AuxInfoDsp));
}
memory.WriteBlockUnsafe(aux_info, &info, sizeof(AuxInfo::AuxInfoDsp));
}
/**
@ -86,17 +76,9 @@ static u32 WriteAuxBufferDsp(Core::Memory::Memory& memory, CpuAddr send_info_,
}
AuxInfo::AuxInfoDsp send_info{};
auto send_ptr = &send_info;
bool host_safe = (send_info_ & Core::Memory::YUZU_PAGEMASK) <=
(Core::Memory::YUZU_PAGESIZE - sizeof(AuxInfo::AuxInfoDsp));
memory.ReadBlockUnsafe(send_info_, &send_info, sizeof(AuxInfo::AuxInfoDsp));
if (host_safe) [[likely]] {
send_ptr = memory.GetPointer<AuxInfo::AuxInfoDsp>(send_info_);
} else {
memory.ReadBlockUnsafe(send_info_, send_ptr, sizeof(AuxInfo::AuxInfoDsp));
}
u32 target_write_offset{send_ptr->write_offset + write_offset};
u32 target_write_offset{send_info.write_offset + write_offset};
if (target_write_offset > count_max) {
return 0;
}
@ -105,15 +87,9 @@ static u32 WriteAuxBufferDsp(Core::Memory::Memory& memory, CpuAddr send_info_,
u32 read_pos{0};
while (write_count > 0) {
u32 to_write{std::min(count_max - target_write_offset, write_count)};
const auto write_addr = send_buffer + target_write_offset * sizeof(s32);
bool write_safe{(write_addr & Core::Memory::YUZU_PAGEMASK) <=
(Core::Memory::YUZU_PAGESIZE - (write_addr + to_write * sizeof(s32)))};
if (write_safe) [[likely]] {
auto ptr = memory.GetPointer(write_addr);
std::memcpy(ptr, &input[read_pos], to_write * sizeof(s32));
} else {
memory.WriteBlockUnsafe(send_buffer + target_write_offset * sizeof(s32),
&input[read_pos], to_write * sizeof(s32));
if (to_write > 0) {
const auto write_addr = send_buffer + target_write_offset * sizeof(s32);
memory.WriteBlockUnsafe(write_addr, &input[read_pos], to_write * sizeof(s32));
}
target_write_offset = (target_write_offset + to_write) % count_max;
write_count -= to_write;
@ -121,13 +97,10 @@ static u32 WriteAuxBufferDsp(Core::Memory::Memory& memory, CpuAddr send_info_,
}
if (update_count) {
send_ptr->write_offset = (send_ptr->write_offset + update_count) % count_max;
}
if (!host_safe) [[unlikely]] {
memory.WriteBlockUnsafe(send_info_, send_ptr, sizeof(AuxInfo::AuxInfoDsp));
send_info.write_offset = (send_info.write_offset + update_count) % count_max;
}
memory.WriteBlockUnsafe(send_info_, &send_info, sizeof(AuxInfo::AuxInfoDsp));
return write_count_;
}
@ -174,17 +147,9 @@ static u32 ReadAuxBufferDsp(Core::Memory::Memory& memory, CpuAddr return_info_,
}
AuxInfo::AuxInfoDsp return_info{};
auto return_ptr = &return_info;
bool host_safe = (return_info_ & Core::Memory::YUZU_PAGEMASK) <=
(Core::Memory::YUZU_PAGESIZE - sizeof(AuxInfo::AuxInfoDsp));
memory.ReadBlockUnsafe(return_info_, &return_info, sizeof(AuxInfo::AuxInfoDsp));
if (host_safe) [[likely]] {
return_ptr = memory.GetPointer<AuxInfo::AuxInfoDsp>(return_info_);
} else {
memory.ReadBlockUnsafe(return_info_, return_ptr, sizeof(AuxInfo::AuxInfoDsp));
}
u32 target_read_offset{return_ptr->read_offset + read_offset};
u32 target_read_offset{return_info.read_offset + read_offset};
if (target_read_offset > count_max) {
return 0;
}
@ -193,15 +158,9 @@ static u32 ReadAuxBufferDsp(Core::Memory::Memory& memory, CpuAddr return_info_,
u32 write_pos{0};
while (read_count > 0) {
u32 to_read{std::min(count_max - target_read_offset, read_count)};
const auto read_addr = return_buffer + target_read_offset * sizeof(s32);
bool read_safe{(read_addr & Core::Memory::YUZU_PAGEMASK) <=
(Core::Memory::YUZU_PAGESIZE - (read_addr + to_read * sizeof(s32)))};
if (read_safe) [[likely]] {
auto ptr = memory.GetPointer(read_addr);
std::memcpy(&output[write_pos], ptr, to_read * sizeof(s32));
} else {
memory.ReadBlockUnsafe(return_buffer + target_read_offset * sizeof(s32),
&output[write_pos], to_read * sizeof(s32));
if (to_read > 0) {
const auto read_addr = return_buffer + target_read_offset * sizeof(s32);
memory.ReadBlockUnsafe(read_addr, &output[write_pos], to_read * sizeof(s32));
}
target_read_offset = (target_read_offset + to_read) % count_max;
read_count -= to_read;
@ -209,13 +168,10 @@ static u32 ReadAuxBufferDsp(Core::Memory::Memory& memory, CpuAddr return_info_,
}
if (update_count) {
return_ptr->read_offset = (return_ptr->read_offset + update_count) % count_max;
}
if (!host_safe) [[unlikely]] {
memory.WriteBlockUnsafe(return_info_, return_ptr, sizeof(AuxInfo::AuxInfoDsp));
return_info.read_offset = (return_info.read_offset + update_count) % count_max;
}
memory.WriteBlockUnsafe(return_info_, &return_info, sizeof(AuxInfo::AuxInfoDsp));
return read_count_;
}

View File

@ -66,6 +66,7 @@ void PageTable::Resize(std::size_t address_space_width_in_bits, std::size_t page
<< (address_space_width_in_bits - page_size_in_bits)};
pointers.resize(num_page_table_entries);
backing_addr.resize(num_page_table_entries);
blocks.resize(num_page_table_entries);
current_address_space_width_in_bits = address_space_width_in_bits;
page_size = 1ULL << page_size_in_bits;
}

View File

@ -122,6 +122,7 @@ struct PageTable {
* corresponding attribute element is of type `Memory`.
*/
VirtualBuffer<PageInfo> pointers;
VirtualBuffer<u64> blocks;
VirtualBuffer<u64> backing_addr;

View File

@ -40,8 +40,21 @@ public:
~ScratchBuffer() = default;
ScratchBuffer(const ScratchBuffer&) = delete;
ScratchBuffer& operator=(const ScratchBuffer&) = delete;
ScratchBuffer(ScratchBuffer&&) = default;
ScratchBuffer& operator=(ScratchBuffer&&) = default;
ScratchBuffer(ScratchBuffer&& other) noexcept {
swap(other);
other.last_requested_size = 0;
other.buffer_capacity = 0;
other.buffer.reset();
}
ScratchBuffer& operator=(ScratchBuffer&& other) noexcept {
swap(other);
other.last_requested_size = 0;
other.buffer_capacity = 0;
other.buffer.reset();
return *this;
}
/// This will only grow the buffer's capacity if size is greater than the current capacity.
/// The previously held data will remain intact.

View File

@ -27,8 +27,8 @@ std::string GetTimeZoneString() {
std::string location_name;
if (time_zone_index == 0) { // Auto
#if __cpp_lib_chrono >= 201907L
const struct std::chrono::tzdb& time_zone_data = std::chrono::get_tzdb();
try {
const struct std::chrono::tzdb& time_zone_data = std::chrono::get_tzdb();
const std::chrono::time_zone* current_zone = time_zone_data.current_zone();
std::string_view current_zone_name = current_zone->name();
location_name = current_zone_name;

View File

@ -285,6 +285,7 @@ add_library(core STATIC
hle/kernel/kernel.cpp
hle/kernel/kernel.h
hle/kernel/memory_types.h
hle/kernel/message_buffer.h
hle/kernel/physical_core.cpp
hle/kernel/physical_core.h
hle/kernel/physical_memory.h

View File

@ -185,7 +185,7 @@ void ARM_Interface::Run() {
// Notify the debugger and go to sleep if a breakpoint was hit,
// or if the thread is unable to continue for any reason.
if (True(hr & HaltReason::InstructionBreakpoint) || True(hr & HaltReason::PrefetchAbort)) {
if (!True(hr & HaltReason::InstructionBreakpoint)) {
if (!True(hr & HaltReason::PrefetchAbort)) {
RewindBreakpointInstruction();
}
if (system.DebuggerEnabled()) {

View File

@ -70,7 +70,7 @@ void CoreTiming::Initialize(std::function<void()>&& on_thread_init_) {
-> std::optional<std::chrono::nanoseconds> { return std::nullopt; };
ev_lost = CreateEvent("_lost_event", empty_timed_callback);
if (is_multicore) {
timer_thread = std::make_unique<std::thread>(ThreadEntry, std::ref(*this));
timer_thread = std::make_unique<std::jthread>(ThreadEntry, std::ref(*this));
}
}
@ -255,7 +255,6 @@ void CoreTiming::ThreadLoop() {
#ifdef _WIN32
while (!paused && !event.IsSet() && wait_time > 0) {
wait_time = *next_time - GetGlobalTimeNs().count();
if (wait_time >= timer_resolution_ns) {
Common::Windows::SleepForOneTick();
} else {

View File

@ -163,7 +163,7 @@ private:
Common::Event pause_event{};
std::mutex basic_lock;
std::mutex advance_lock;
std::unique_ptr<std::thread> timer_thread;
std::unique_ptr<std::jthread> timer_thread;
std::atomic<bool> paused{};
std::atomic<bool> paused_set{};
std::atomic<bool> wait_set{};

View File

@ -57,11 +57,34 @@ struct NCASectionHeaderBlock {
};
static_assert(sizeof(NCASectionHeaderBlock) == 0x8, "NCASectionHeaderBlock has incorrect size.");
struct NCABucketInfo {
u64 table_offset;
u64 table_size;
std::array<u8, 0x10> table_header;
};
static_assert(sizeof(NCABucketInfo) == 0x20, "NCABucketInfo has incorrect size.");
struct NCASparseInfo {
NCABucketInfo bucket;
u64 physical_offset;
u16 generation;
INSERT_PADDING_BYTES_NOINIT(0x6);
};
static_assert(sizeof(NCASparseInfo) == 0x30, "NCASparseInfo has incorrect size.");
struct NCACompressionInfo {
NCABucketInfo bucket;
INSERT_PADDING_BYTES_NOINIT(0x8);
};
static_assert(sizeof(NCACompressionInfo) == 0x28, "NCACompressionInfo has incorrect size.");
struct NCASectionRaw {
NCASectionHeaderBlock header;
std::array<u8, 0x138> block_data;
std::array<u8, 0x8> section_ctr;
INSERT_PADDING_BYTES_NOINIT(0xB8);
NCASparseInfo sparse_info;
NCACompressionInfo compression_info;
INSERT_PADDING_BYTES_NOINIT(0x60);
};
static_assert(sizeof(NCASectionRaw) == 0x200, "NCASectionRaw has incorrect size.");
@ -225,6 +248,20 @@ bool NCA::ReadSections(const std::vector<NCASectionHeader>& sections, u64 bktr_b
for (std::size_t i = 0; i < sections.size(); ++i) {
const auto& section = sections[i];
if (section.raw.sparse_info.bucket.table_offset != 0 &&
section.raw.sparse_info.bucket.table_size != 0) {
LOG_ERROR(Loader, "Sparse NCAs are not supported.");
status = Loader::ResultStatus::ErrorSparseNCA;
return false;
}
if (section.raw.compression_info.bucket.table_offset != 0 &&
section.raw.compression_info.bucket.table_size != 0) {
LOG_ERROR(Loader, "Compressed NCAs are not supported.");
status = Loader::ResultStatus::ErrorCompressedNCA;
return false;
}
if (section.raw.header.filesystem_type == NCASectionFilesystemType::ROMFS) {
if (!ReadRomFSSection(section, header.section_tables[i], bktr_base_ivfc_offset)) {
return false;

View File

@ -20,12 +20,132 @@
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/k_thread_queue.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/message_buffer.h"
#include "core/hle/service/hle_ipc.h"
#include "core/hle/service/ipc_helpers.h"
#include "core/memory.h"
namespace Kernel {
namespace {
template <bool MoveHandleAllowed>
Result ProcessMessageSpecialData(KProcess& dst_process, KProcess& src_process, KThread& src_thread,
MessageBuffer& dst_msg, const MessageBuffer& src_msg,
MessageBuffer::SpecialHeader& src_special_header) {
// Copy the special header to the destination.
s32 offset = dst_msg.Set(src_special_header);
// Copy the process ID.
if (src_special_header.GetHasProcessId()) {
offset = dst_msg.SetProcessId(offset, src_process.GetProcessId());
}
// Prepare to process handles.
auto& dst_handle_table = dst_process.GetHandleTable();
auto& src_handle_table = src_process.GetHandleTable();
Result result = ResultSuccess;
// Process copy handles.
for (auto i = 0; i < src_special_header.GetCopyHandleCount(); ++i) {
// Get the handles.
const Handle src_handle = src_msg.GetHandle(offset);
Handle dst_handle = Svc::InvalidHandle;
// If we're in a success state, try to move the handle to the new table.
if (R_SUCCEEDED(result) && src_handle != Svc::InvalidHandle) {
KScopedAutoObject obj =
src_handle_table.GetObjectForIpc(src_handle, std::addressof(src_thread));
if (obj.IsNotNull()) {
Result add_result =
dst_handle_table.Add(std::addressof(dst_handle), obj.GetPointerUnsafe());
if (R_FAILED(add_result)) {
result = add_result;
dst_handle = Svc::InvalidHandle;
}
} else {
result = ResultInvalidHandle;
}
}
// Set the handle.
offset = dst_msg.SetHandle(offset, dst_handle);
}
// Process move handles.
if constexpr (MoveHandleAllowed) {
for (auto i = 0; i < src_special_header.GetMoveHandleCount(); ++i) {
// Get the handles.
const Handle src_handle = src_msg.GetHandle(offset);
Handle dst_handle = Svc::InvalidHandle;
// Whether or not we've succeeded, we need to remove the handles from the source table.
if (src_handle != Svc::InvalidHandle) {
if (R_SUCCEEDED(result)) {
KScopedAutoObject obj =
src_handle_table.GetObjectForIpcWithoutPseudoHandle(src_handle);
if (obj.IsNotNull()) {
Result add_result = dst_handle_table.Add(std::addressof(dst_handle),
obj.GetPointerUnsafe());
src_handle_table.Remove(src_handle);
if (R_FAILED(add_result)) {
result = add_result;
dst_handle = Svc::InvalidHandle;
}
} else {
result = ResultInvalidHandle;
}
} else {
src_handle_table.Remove(src_handle);
}
}
// Set the handle.
offset = dst_msg.SetHandle(offset, dst_handle);
}
}
R_RETURN(result);
}
void CleanupSpecialData(KProcess& dst_process, u32* dst_msg_ptr, size_t dst_buffer_size) {
// Parse the message.
const MessageBuffer dst_msg(dst_msg_ptr, dst_buffer_size);
const MessageBuffer::MessageHeader dst_header(dst_msg);
const MessageBuffer::SpecialHeader dst_special_header(dst_msg, dst_header);
// Check that the size is big enough.
if (MessageBuffer::GetMessageBufferSize(dst_header, dst_special_header) > dst_buffer_size) {
return;
}
// Set the special header.
int offset = dst_msg.Set(dst_special_header);
// Clear the process id, if needed.
if (dst_special_header.GetHasProcessId()) {
offset = dst_msg.SetProcessId(offset, 0);
}
// Clear handles, as relevant.
auto& dst_handle_table = dst_process.GetHandleTable();
for (auto i = 0;
i < (dst_special_header.GetCopyHandleCount() + dst_special_header.GetMoveHandleCount());
++i) {
const Handle handle = dst_msg.GetHandle(offset);
if (handle != Svc::InvalidHandle) {
dst_handle_table.Remove(handle);
}
offset = dst_msg.SetHandle(offset, Svc::InvalidHandle);
}
}
} // namespace
using ThreadQueueImplForKServerSessionRequest = KThreadQueue;
KServerSession::KServerSession(KernelCore& kernel)
@ -223,12 +343,27 @@ Result KServerSession::SendReply(bool is_hle) {
// the reply has already been written in this case.
} else {
Core::Memory::Memory& memory{client_thread->GetOwnerProcess()->GetMemory()};
KThread* server_thread{GetCurrentThreadPointer(m_kernel)};
KThread* server_thread = GetCurrentThreadPointer(m_kernel);
KProcess& src_process = *client_thread->GetOwnerProcess();
KProcess& dst_process = *server_thread->GetOwnerProcess();
UNIMPLEMENTED_IF(server_thread->GetOwnerProcess() != client_thread->GetOwnerProcess());
auto* src_msg_buffer = memory.GetPointer(server_thread->GetTlsAddress());
auto* dst_msg_buffer = memory.GetPointer(client_message);
auto* src_msg_buffer = memory.GetPointer<u32>(server_thread->GetTlsAddress());
auto* dst_msg_buffer = memory.GetPointer<u32>(client_message);
std::memcpy(dst_msg_buffer, src_msg_buffer, client_buffer_size);
// Translate special header ad-hoc.
MessageBuffer src_msg(src_msg_buffer, client_buffer_size);
MessageBuffer::MessageHeader src_header(src_msg);
MessageBuffer::SpecialHeader src_special_header(src_msg, src_header);
if (src_header.GetHasSpecialHeader()) {
MessageBuffer dst_msg(dst_msg_buffer, client_buffer_size);
result = ProcessMessageSpecialData<true>(dst_process, src_process, *server_thread,
dst_msg, src_msg, src_special_header);
if (R_FAILED(result)) {
CleanupSpecialData(dst_process, dst_msg_buffer, client_buffer_size);
}
}
}
} else {
result = ResultSessionClosed;
@ -330,12 +465,28 @@ Result KServerSession::ReceiveRequest(std::shared_ptr<Service::HLERequestContext
->PopulateFromIncomingCommandBuffer(client_thread->GetOwnerProcess()->GetHandleTable(),
cmd_buf);
} else {
KThread* server_thread{GetCurrentThreadPointer(m_kernel)};
UNIMPLEMENTED_IF(server_thread->GetOwnerProcess() != client_thread->GetOwnerProcess());
KThread* server_thread = GetCurrentThreadPointer(m_kernel);
KProcess& src_process = *client_thread->GetOwnerProcess();
KProcess& dst_process = *server_thread->GetOwnerProcess();
UNIMPLEMENTED_IF(client_thread->GetOwnerProcess() != server_thread->GetOwnerProcess());
auto* src_msg_buffer = memory.GetPointer(client_message);
auto* dst_msg_buffer = memory.GetPointer(server_thread->GetTlsAddress());
auto* src_msg_buffer = memory.GetPointer<u32>(client_message);
auto* dst_msg_buffer = memory.GetPointer<u32>(server_thread->GetTlsAddress());
std::memcpy(dst_msg_buffer, src_msg_buffer, client_buffer_size);
// Translate special header ad-hoc.
// TODO: fix this mess
MessageBuffer src_msg(src_msg_buffer, client_buffer_size);
MessageBuffer::MessageHeader src_header(src_msg);
MessageBuffer::SpecialHeader src_special_header(src_msg, src_header);
if (src_header.GetHasSpecialHeader()) {
MessageBuffer dst_msg(dst_msg_buffer, client_buffer_size);
Result res = ProcessMessageSpecialData<false>(dst_process, src_process, *client_thread,
dst_msg, src_msg, src_special_header);
if (R_FAILED(res)) {
CleanupSpecialData(dst_process, dst_msg_buffer, client_buffer_size);
}
}
}
// We succeeded.

View File

@ -0,0 +1,612 @@
// SPDX-FileCopyrightText: 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include "common/alignment.h"
#include "common/bit_field.h"
#include "core/hle/kernel/k_thread.h"
namespace Kernel {
constexpr inline size_t MessageBufferSize = 0x100;
class MessageBuffer {
public:
class MessageHeader {
private:
static constexpr inline u64 NullTag = 0;
public:
enum class ReceiveListCountType : u32 {
None = 0,
ToMessageBuffer = 1,
ToSingleBuffer = 2,
CountOffset = 2,
CountMax = 13,
};
private:
union {
std::array<u32, 2> raw;
struct {
// Define fields for the first header word.
union {
BitField<0, 16, u16> tag;
BitField<16, 4, u32> pointer_count;
BitField<20, 4, u32> send_count;
BitField<24, 4, u32> receive_count;
BitField<28, 4, u32> exchange_count;
};
// Define fields for the second header word.
union {
BitField<0, 10, u32> raw_count;
BitField<10, 4, ReceiveListCountType> receive_list_count;
BitField<14, 6, u32> reserved0;
BitField<20, 11, u32> receive_list_offset;
BitField<31, 1, u32> has_special_header;
};
};
} m_header;
public:
constexpr MessageHeader() : m_header{} {}
constexpr MessageHeader(u16 tag, bool special, s32 ptr, s32 send, s32 recv, s32 exch,
s32 raw, ReceiveListCountType recv_list)
: m_header{} {
m_header.raw[0] = 0;
m_header.raw[1] = 0;
m_header.tag.Assign(tag);
m_header.pointer_count.Assign(ptr);
m_header.send_count.Assign(send);
m_header.receive_count.Assign(recv);
m_header.exchange_count.Assign(exch);
m_header.raw_count.Assign(raw);
m_header.receive_list_count.Assign(recv_list);
m_header.has_special_header.Assign(special);
}
explicit MessageHeader(const MessageBuffer& buf) : m_header{} {
buf.Get(0, m_header.raw.data(), 2);
}
explicit MessageHeader(const u32* msg) : m_header{{msg[0], msg[1]}} {}
constexpr u16 GetTag() const {
return m_header.tag;
}
constexpr s32 GetPointerCount() const {
return m_header.pointer_count;
}
constexpr s32 GetSendCount() const {
return m_header.send_count;
}
constexpr s32 GetReceiveCount() const {
return m_header.receive_count;
}
constexpr s32 GetExchangeCount() const {
return m_header.exchange_count;
}
constexpr s32 GetMapAliasCount() const {
return this->GetSendCount() + this->GetReceiveCount() + this->GetExchangeCount();
}
constexpr s32 GetRawCount() const {
return m_header.raw_count;
}
constexpr ReceiveListCountType GetReceiveListCount() const {
return m_header.receive_list_count;
}
constexpr s32 GetReceiveListOffset() const {
return m_header.receive_list_offset;
}
constexpr bool GetHasSpecialHeader() const {
return m_header.has_special_header.Value() != 0;
}
constexpr void SetReceiveListCount(ReceiveListCountType recv_list) {
m_header.receive_list_count.Assign(recv_list);
}
constexpr const u32* GetData() const {
return m_header.raw.data();
}
static constexpr size_t GetDataSize() {
return sizeof(m_header);
}
};
class SpecialHeader {
private:
union {
std::array<u32, 1> raw;
// Define fields for the header word.
BitField<0, 1, u32> has_process_id;
BitField<1, 4, u32> copy_handle_count;
BitField<5, 4, u32> move_handle_count;
} m_header;
bool m_has_header;
public:
constexpr explicit SpecialHeader(bool pid, s32 copy, s32 move)
: m_header{}, m_has_header(true) {
m_header.has_process_id.Assign(pid);
m_header.copy_handle_count.Assign(copy);
m_header.move_handle_count.Assign(move);
}
constexpr explicit SpecialHeader(bool pid, s32 copy, s32 move, bool _has_header)
: m_header{}, m_has_header(_has_header) {
m_header.has_process_id.Assign(pid);
m_header.copy_handle_count.Assign(copy);
m_header.move_handle_count.Assign(move);
}
explicit SpecialHeader(const MessageBuffer& buf, const MessageHeader& hdr)
: m_header{}, m_has_header(hdr.GetHasSpecialHeader()) {
if (m_has_header) {
buf.Get(static_cast<s32>(MessageHeader::GetDataSize() / sizeof(u32)),
m_header.raw.data(), sizeof(m_header) / sizeof(u32));
}
}
constexpr bool GetHasProcessId() const {
return m_header.has_process_id.Value() != 0;
}
constexpr s32 GetCopyHandleCount() const {
return m_header.copy_handle_count;
}
constexpr s32 GetMoveHandleCount() const {
return m_header.move_handle_count;
}
constexpr const u32* GetHeader() const {
return m_header.raw.data();
}
constexpr size_t GetHeaderSize() const {
if (m_has_header) {
return sizeof(m_header);
} else {
return 0;
}
}
constexpr size_t GetDataSize() const {
if (m_has_header) {
return (this->GetHasProcessId() ? sizeof(u64) : 0) +
(this->GetCopyHandleCount() * sizeof(Handle)) +
(this->GetMoveHandleCount() * sizeof(Handle));
} else {
return 0;
}
}
};
class MapAliasDescriptor {
public:
enum class Attribute : u32 {
Ipc = 0,
NonSecureIpc = 1,
NonDeviceIpc = 3,
};
private:
static constexpr u32 SizeLowCount = 32;
static constexpr u32 SizeHighCount = 4;
static constexpr u32 AddressLowCount = 32;
static constexpr u32 AddressMidCount = 4;
constexpr u32 GetAddressMid(u64 address) {
return static_cast<u32>(address >> AddressLowCount) & ((1U << AddressMidCount) - 1);
}
constexpr u32 GetAddressHigh(u64 address) {
return static_cast<u32>(address >> (AddressLowCount + AddressMidCount));
}
private:
union {
std::array<u32, 3> raw;
struct {
// Define fields for the first two words.
u32 size_low;
u32 address_low;
// Define fields for the packed descriptor word.
union {
BitField<0, 2, Attribute> attributes;
BitField<2, 3, u32> address_high;
BitField<5, 19, u32> reserved;
BitField<24, 4, u32> size_high;
BitField<28, 4, u32> address_mid;
};
};
} m_data;
public:
constexpr MapAliasDescriptor() : m_data{} {}
MapAliasDescriptor(const void* buffer, size_t _size, Attribute attr = Attribute::Ipc)
: m_data{} {
const u64 address = reinterpret_cast<u64>(buffer);
const u64 size = static_cast<u64>(_size);
m_data.size_low = static_cast<u32>(size);
m_data.address_low = static_cast<u32>(address);
m_data.attributes.Assign(attr);
m_data.address_mid.Assign(GetAddressMid(address));
m_data.size_high.Assign(static_cast<u32>(size >> SizeLowCount));
m_data.address_high.Assign(GetAddressHigh(address));
}
MapAliasDescriptor(const MessageBuffer& buf, s32 index) : m_data{} {
buf.Get(index, m_data.raw.data(), 3);
}
constexpr uintptr_t GetAddress() const {
return (static_cast<u64>((m_data.address_high << AddressMidCount) | m_data.address_mid)
<< AddressLowCount) |
m_data.address_low;
}
constexpr uintptr_t GetSize() const {
return (static_cast<u64>(m_data.size_high) << SizeLowCount) | m_data.size_low;
}
constexpr Attribute GetAttribute() const {
return m_data.attributes;
}
constexpr const u32* GetData() const {
return m_data.raw.data();
}
static constexpr size_t GetDataSize() {
return sizeof(m_data);
}
};
class PointerDescriptor {
private:
static constexpr u32 AddressLowCount = 32;
static constexpr u32 AddressMidCount = 4;
constexpr u32 GetAddressMid(u64 address) {
return static_cast<u32>(address >> AddressLowCount) & ((1u << AddressMidCount) - 1);
}
constexpr u32 GetAddressHigh(u64 address) {
return static_cast<u32>(address >> (AddressLowCount + AddressMidCount));
}
private:
union {
std::array<u32, 2> raw;
struct {
// Define fields for the packed descriptor word.
union {
BitField<0, 4, u32> index;
BitField<4, 2, u32> reserved0;
BitField<6, 3, u32> address_high;
BitField<9, 3, u32> reserved1;
BitField<12, 4, u32> address_mid;
BitField<16, 16, u32> size;
};
// Define fields for the second word.
u32 address_low;
};
} m_data;
public:
constexpr PointerDescriptor() : m_data{} {}
PointerDescriptor(const void* buffer, size_t size, s32 index) : m_data{} {
const u64 address = reinterpret_cast<u64>(buffer);
m_data.index.Assign(index);
m_data.address_high.Assign(GetAddressHigh(address));
m_data.address_mid.Assign(GetAddressMid(address));
m_data.size.Assign(static_cast<u32>(size));
m_data.address_low = static_cast<u32>(address);
}
PointerDescriptor(const MessageBuffer& buf, s32 index) : m_data{} {
buf.Get(index, m_data.raw.data(), 2);
}
constexpr s32 GetIndex() const {
return m_data.index;
}
constexpr uintptr_t GetAddress() const {
return (static_cast<u64>((m_data.address_high << AddressMidCount) | m_data.address_mid)
<< AddressLowCount) |
m_data.address_low;
}
constexpr size_t GetSize() const {
return m_data.size;
}
constexpr const u32* GetData() const {
return m_data.raw.data();
}
static constexpr size_t GetDataSize() {
return sizeof(m_data);
}
};
class ReceiveListEntry {
private:
static constexpr u32 AddressLowCount = 32;
constexpr u32 GetAddressHigh(u64 address) {
return static_cast<u32>(address >> (AddressLowCount));
}
private:
union {
std::array<u32, 2> raw;
struct {
// Define fields for the first word.
u32 address_low;
// Define fields for the packed descriptor word.
union {
BitField<0, 7, u32> address_high;
BitField<7, 9, u32> reserved;
BitField<16, 16, u32> size;
};
};
} m_data;
public:
constexpr ReceiveListEntry() : m_data{} {}
ReceiveListEntry(const void* buffer, size_t size) : m_data{} {
const u64 address = reinterpret_cast<u64>(buffer);
m_data.address_low = static_cast<u32>(address);
m_data.address_high.Assign(GetAddressHigh(address));
m_data.size.Assign(static_cast<u32>(size));
}
ReceiveListEntry(u32 a, u32 b) : m_data{{a, b}} {}
constexpr uintptr_t GetAddress() const {
return (static_cast<u64>(m_data.address_high) << AddressLowCount) | m_data.address_low;
}
constexpr size_t GetSize() const {
return m_data.size;
}
constexpr const u32* GetData() const {
return m_data.raw.data();
}
static constexpr size_t GetDataSize() {
return sizeof(m_data);
}
};
private:
u32* m_buffer;
size_t m_size;
public:
constexpr MessageBuffer(u32* b, size_t sz) : m_buffer(b), m_size(sz) {}
constexpr explicit MessageBuffer(u32* b) : m_buffer(b), m_size(MessageBufferSize) {}
constexpr void* GetBufferForDebug() const {
return m_buffer;
}
constexpr size_t GetBufferSize() const {
return m_size;
}
void Get(s32 index, u32* dst, size_t count) const {
// Ensure that this doesn't get re-ordered.
std::atomic_thread_fence(std::memory_order_seq_cst);
// Get the words.
static_assert(sizeof(*dst) == sizeof(*m_buffer));
memcpy(dst, m_buffer + index, count * sizeof(*dst));
}
s32 Set(s32 index, u32* src, size_t count) const {
// Ensure that this doesn't get re-ordered.
std::atomic_thread_fence(std::memory_order_seq_cst);
// Set the words.
memcpy(m_buffer + index, src, count * sizeof(*src));
// Ensure that this doesn't get re-ordered.
std::atomic_thread_fence(std::memory_order_seq_cst);
return static_cast<s32>(index + count);
}
template <typename T>
const T& GetRaw(s32 index) const {
return *reinterpret_cast<const T*>(m_buffer + index);
}
template <typename T>
s32 SetRaw(s32 index, const T& val) const {
*reinterpret_cast<const T*>(m_buffer + index) = val;
return index + (Common::AlignUp(sizeof(val), sizeof(*m_buffer)) / sizeof(*m_buffer));
}
void GetRawArray(s32 index, void* dst, size_t len) const {
memcpy(dst, m_buffer + index, len);
}
void SetRawArray(s32 index, const void* src, size_t len) const {
memcpy(m_buffer + index, src, len);
}
void SetNull() const {
this->Set(MessageHeader());
}
s32 Set(const MessageHeader& hdr) const {
memcpy(m_buffer, hdr.GetData(), hdr.GetDataSize());
return static_cast<s32>(hdr.GetDataSize() / sizeof(*m_buffer));
}
s32 Set(const SpecialHeader& spc) const {
const s32 index = static_cast<s32>(MessageHeader::GetDataSize() / sizeof(*m_buffer));
memcpy(m_buffer + index, spc.GetHeader(), spc.GetHeaderSize());
return static_cast<s32>(index + (spc.GetHeaderSize() / sizeof(*m_buffer)));
}
s32 SetHandle(s32 index, const Handle& hnd) const {
memcpy(m_buffer + index, std::addressof(hnd), sizeof(hnd));
return static_cast<s32>(index + (sizeof(hnd) / sizeof(*m_buffer)));
}
s32 SetProcessId(s32 index, const u64 pid) const {
memcpy(m_buffer + index, std::addressof(pid), sizeof(pid));
return static_cast<s32>(index + (sizeof(pid) / sizeof(*m_buffer)));
}
s32 Set(s32 index, const MapAliasDescriptor& desc) const {
memcpy(m_buffer + index, desc.GetData(), desc.GetDataSize());
return static_cast<s32>(index + (desc.GetDataSize() / sizeof(*m_buffer)));
}
s32 Set(s32 index, const PointerDescriptor& desc) const {
memcpy(m_buffer + index, desc.GetData(), desc.GetDataSize());
return static_cast<s32>(index + (desc.GetDataSize() / sizeof(*m_buffer)));
}
s32 Set(s32 index, const ReceiveListEntry& desc) const {
memcpy(m_buffer + index, desc.GetData(), desc.GetDataSize());
return static_cast<s32>(index + (desc.GetDataSize() / sizeof(*m_buffer)));
}
s32 Set(s32 index, const u32 val) const {
memcpy(m_buffer + index, std::addressof(val), sizeof(val));
return static_cast<s32>(index + (sizeof(val) / sizeof(*m_buffer)));
}
Result GetAsyncResult() const {
MessageHeader hdr(m_buffer);
MessageHeader null{};
if (memcmp(hdr.GetData(), null.GetData(), MessageHeader::GetDataSize()) != 0) [[unlikely]] {
R_SUCCEED();
}
return Result(m_buffer[MessageHeader::GetDataSize() / sizeof(*m_buffer)]);
}
void SetAsyncResult(Result res) const {
const s32 index = this->Set(MessageHeader());
const auto value = res.raw;
memcpy(m_buffer + index, std::addressof(value), sizeof(value));
}
u32 Get32(s32 index) const {
return m_buffer[index];
}
u64 Get64(s32 index) const {
u64 value;
memcpy(std::addressof(value), m_buffer + index, sizeof(value));
return value;
}
u64 GetProcessId(s32 index) const {
return this->Get64(index);
}
Handle GetHandle(s32 index) const {
static_assert(sizeof(Handle) == sizeof(*m_buffer));
return Handle(m_buffer[index]);
}
static constexpr s32 GetSpecialDataIndex(const MessageHeader& hdr, const SpecialHeader& spc) {
return static_cast<s32>((MessageHeader::GetDataSize() / sizeof(u32)) +
(spc.GetHeaderSize() / sizeof(u32)));
}
static constexpr s32 GetPointerDescriptorIndex(const MessageHeader& hdr,
const SpecialHeader& spc) {
return static_cast<s32>(GetSpecialDataIndex(hdr, spc) + (spc.GetDataSize() / sizeof(u32)));
}
static constexpr s32 GetMapAliasDescriptorIndex(const MessageHeader& hdr,
const SpecialHeader& spc) {
return GetPointerDescriptorIndex(hdr, spc) +
static_cast<s32>(hdr.GetPointerCount() * PointerDescriptor::GetDataSize() /
sizeof(u32));
}
static constexpr s32 GetRawDataIndex(const MessageHeader& hdr, const SpecialHeader& spc) {
return GetMapAliasDescriptorIndex(hdr, spc) +
static_cast<s32>(hdr.GetMapAliasCount() * MapAliasDescriptor::GetDataSize() /
sizeof(u32));
}
static constexpr s32 GetReceiveListIndex(const MessageHeader& hdr, const SpecialHeader& spc) {
if (const s32 recv_list_index = hdr.GetReceiveListOffset()) {
return recv_list_index;
} else {
return GetRawDataIndex(hdr, spc) + hdr.GetRawCount();
}
}
static constexpr size_t GetMessageBufferSize(const MessageHeader& hdr,
const SpecialHeader& spc) {
// Get the size of the plain message.
size_t msg_size = GetReceiveListIndex(hdr, spc) * sizeof(u32);
// Add the size of the receive list.
const auto count = hdr.GetReceiveListCount();
switch (count) {
case MessageHeader::ReceiveListCountType::None:
break;
case MessageHeader::ReceiveListCountType::ToMessageBuffer:
break;
case MessageHeader::ReceiveListCountType::ToSingleBuffer:
msg_size += ReceiveListEntry::GetDataSize();
break;
default:
msg_size += (static_cast<s32>(count) -
static_cast<s32>(MessageHeader::ReceiveListCountType::CountOffset)) *
ReceiveListEntry::GetDataSize();
break;
}
return msg_size;
}
};
} // namespace Kernel

View File

@ -329,8 +329,22 @@ std::vector<u8> HLERequestContext::ReadBufferCopy(std::size_t buffer_index) cons
}
std::span<const u8> HLERequestContext::ReadBuffer(std::size_t buffer_index) const {
static thread_local std::array<Common::ScratchBuffer<u8>, 2> read_buffer_a;
static thread_local std::array<Common::ScratchBuffer<u8>, 2> read_buffer_x;
static thread_local std::array read_buffer_a{
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
};
static thread_local std::array read_buffer_data_a{
Common::ScratchBuffer<u8>(),
Common::ScratchBuffer<u8>(),
};
static thread_local std::array read_buffer_x{
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
};
static thread_local std::array read_buffer_data_x{
Common::ScratchBuffer<u8>(),
Common::ScratchBuffer<u8>(),
};
const bool is_buffer_a{BufferDescriptorA().size() > buffer_index &&
BufferDescriptorA()[buffer_index].Size()};
@ -339,19 +353,17 @@ std::span<const u8> HLERequestContext::ReadBuffer(std::size_t buffer_index) cons
BufferDescriptorA().size() > buffer_index, { return {}; },
"BufferDescriptorA invalid buffer_index {}", buffer_index);
auto& read_buffer = read_buffer_a[buffer_index];
read_buffer.resize_destructive(BufferDescriptorA()[buffer_index].Size());
memory.ReadBlock(BufferDescriptorA()[buffer_index].Address(), read_buffer.data(),
read_buffer.size());
return read_buffer;
return read_buffer.Read(BufferDescriptorA()[buffer_index].Address(),
BufferDescriptorA()[buffer_index].Size(),
&read_buffer_data_a[buffer_index]);
} else {
ASSERT_OR_EXECUTE_MSG(
BufferDescriptorX().size() > buffer_index, { return {}; },
"BufferDescriptorX invalid buffer_index {}", buffer_index);
auto& read_buffer = read_buffer_x[buffer_index];
read_buffer.resize_destructive(BufferDescriptorX()[buffer_index].Size());
memory.ReadBlock(BufferDescriptorX()[buffer_index].Address(), read_buffer.data(),
read_buffer.size());
return read_buffer;
return read_buffer.Read(BufferDescriptorX()[buffer_index].Address(),
BufferDescriptorX()[buffer_index].Size(),
&read_buffer_data_x[buffer_index]);
}
}

View File

@ -79,6 +79,8 @@ enum class ResultStatus : u16 {
ErrorBadPFSHeader,
ErrorIncorrectPFSFileSize,
ErrorBadNCAHeader,
ErrorCompressedNCA,
ErrorSparseNCA,
ErrorMissingProductionKeyFile,
ErrorMissingHeaderKey,
ErrorIncorrectHeaderKey,

View File

@ -266,6 +266,22 @@ struct Memory::Impl {
ReadBlockImpl<true>(*system.ApplicationProcess(), src_addr, dest_buffer, size);
}
const u8* GetSpan(const VAddr src_addr, const std::size_t size) const {
if (current_page_table->blocks[src_addr >> YUZU_PAGEBITS] ==
current_page_table->blocks[(src_addr + size) >> YUZU_PAGEBITS]) {
return GetPointerSilent(src_addr);
}
return nullptr;
}
u8* GetSpan(const VAddr src_addr, const std::size_t size) {
if (current_page_table->blocks[src_addr >> YUZU_PAGEBITS] ==
current_page_table->blocks[(src_addr + size) >> YUZU_PAGEBITS]) {
return GetPointerSilent(src_addr);
}
return nullptr;
}
template <bool UNSAFE>
void WriteBlockImpl(const Kernel::KProcess& process, const Common::ProcessAddress dest_addr,
const void* src_buffer, const std::size_t size) {
@ -559,7 +575,7 @@ struct Memory::Impl {
}
}
const Common::ProcessAddress end = base + size;
const auto end = base + size;
ASSERT_MSG(end <= page_table.pointers.size(), "out of range mapping at {:016X}",
base + page_table.pointers.size());
@ -570,14 +586,18 @@ struct Memory::Impl {
while (base != end) {
page_table.pointers[base].Store(nullptr, type);
page_table.backing_addr[base] = 0;
page_table.blocks[base] = 0;
base += 1;
}
} else {
auto orig_base = base;
while (base != end) {
page_table.pointers[base].Store(
system.DeviceMemory().GetPointer<u8>(target) - (base << YUZU_PAGEBITS), type);
page_table.backing_addr[base] = GetInteger(target) - (base << YUZU_PAGEBITS);
auto host_ptr =
system.DeviceMemory().GetPointer<u8>(target) - (base << YUZU_PAGEBITS);
auto backing = GetInteger(target) - (base << YUZU_PAGEBITS);
page_table.pointers[base].Store(host_ptr, type);
page_table.backing_addr[base] = backing;
page_table.blocks[base] = orig_base << YUZU_PAGEBITS;
ASSERT_MSG(page_table.pointers[base].Pointer(),
"memory mapping base yield a nullptr within the table");
@ -747,6 +767,14 @@ struct Memory::Impl {
VAddr last_address;
};
void InvalidateRegion(Common::ProcessAddress dest_addr, size_t size) {
system.GPU().InvalidateRegion(GetInteger(dest_addr), size);
}
void FlushRegion(Common::ProcessAddress dest_addr, size_t size) {
system.GPU().FlushRegion(GetInteger(dest_addr), size);
}
Core::System& system;
Common::PageTable* current_page_table = nullptr;
std::array<VideoCore::RasterizerDownloadArea, Core::Hardware::NUM_CPU_CORES>
@ -881,6 +909,14 @@ void Memory::ReadBlockUnsafe(const Common::ProcessAddress src_addr, void* dest_b
impl->ReadBlockUnsafe(src_addr, dest_buffer, size);
}
const u8* Memory::GetSpan(const VAddr src_addr, const std::size_t size) const {
return impl->GetSpan(src_addr, size);
}
u8* Memory::GetSpan(const VAddr src_addr, const std::size_t size) {
return impl->GetSpan(src_addr, size);
}
void Memory::WriteBlock(const Common::ProcessAddress dest_addr, const void* src_buffer,
const std::size_t size) {
impl->WriteBlock(dest_addr, src_buffer, size);
@ -924,4 +960,12 @@ void Memory::MarkRegionDebug(Common::ProcessAddress vaddr, u64 size, bool debug)
impl->MarkRegionDebug(GetInteger(vaddr), size, debug);
}
void Memory::InvalidateRegion(Common::ProcessAddress dest_addr, size_t size) {
impl->InvalidateRegion(dest_addr, size);
}
void Memory::FlushRegion(Common::ProcessAddress dest_addr, size_t size) {
impl->FlushRegion(dest_addr, size);
}
} // namespace Core::Memory

View File

@ -5,8 +5,12 @@
#include <cstddef>
#include <memory>
#include <optional>
#include <span>
#include <string>
#include <vector>
#include "common/scratch_buffer.h"
#include "common/typed_address.h"
#include "core/hle/result.h"
@ -24,6 +28,10 @@ class PhysicalMemory;
class KProcess;
} // namespace Kernel
namespace Tegra {
class MemoryManager;
}
namespace Core::Memory {
/**
@ -343,6 +351,9 @@ public:
*/
void ReadBlockUnsafe(Common::ProcessAddress src_addr, void* dest_buffer, std::size_t size);
const u8* GetSpan(const VAddr src_addr, const std::size_t size) const;
u8* GetSpan(const VAddr src_addr, const std::size_t size);
/**
* Writes a range of bytes into the current process' address space at the specified
* virtual address.
@ -461,6 +472,8 @@ public:
void MarkRegionDebug(Common::ProcessAddress vaddr, u64 size, bool debug);
void SetGPUDirtyManagers(std::span<Core::GPUDirtyMemoryManager> managers);
void InvalidateRegion(Common::ProcessAddress dest_addr, size_t size);
void FlushRegion(Common::ProcessAddress dest_addr, size_t size);
private:
Core::System& system;
@ -469,4 +482,203 @@ private:
std::unique_ptr<Impl> impl;
};
enum GuestMemoryFlags : u32 {
Read = 1 << 0,
Write = 1 << 1,
Safe = 1 << 2,
Cached = 1 << 3,
SafeRead = Read | Safe,
SafeWrite = Write | Safe,
SafeReadWrite = SafeRead | SafeWrite,
SafeReadCachedWrite = SafeReadWrite | Cached,
UnsafeRead = Read,
UnsafeWrite = Write,
UnsafeReadWrite = UnsafeRead | UnsafeWrite,
UnsafeReadCachedWrite = UnsafeReadWrite | Cached,
};
namespace {
template <typename M, typename T, GuestMemoryFlags FLAGS>
class GuestMemory {
using iterator = T*;
using const_iterator = const T*;
using value_type = T;
using element_type = T;
using iterator_category = std::contiguous_iterator_tag;
public:
GuestMemory() = delete;
explicit GuestMemory(M& memory_, u64 addr_, std::size_t size_,
Common::ScratchBuffer<T>* backup = nullptr)
: memory{memory_}, addr{addr_}, size{size_} {
static_assert(FLAGS & GuestMemoryFlags::Read || FLAGS & GuestMemoryFlags::Write);
if constexpr (FLAGS & GuestMemoryFlags::Read) {
Read(addr, size, backup);
}
}
~GuestMemory() = default;
T* data() noexcept {
return data_span.data();
}
const T* data() const noexcept {
return data_span.data();
}
[[nodiscard]] T* begin() noexcept {
return data();
}
[[nodiscard]] const T* begin() const noexcept {
return data();
}
[[nodiscard]] T* end() noexcept {
return data() + size;
}
[[nodiscard]] const T* end() const noexcept {
return data() + size;
}
T& operator[](size_t index) noexcept {
return data_span[index];
}
const T& operator[](size_t index) const noexcept {
return data_span[index];
}
void SetAddressAndSize(u64 addr_, std::size_t size_) noexcept {
addr = addr_;
size = size_;
addr_changed = true;
}
std::span<T> Read(u64 addr_, std::size_t size_,
Common::ScratchBuffer<T>* backup = nullptr) noexcept {
addr = addr_;
size = size_;
if (size == 0) {
is_data_copy = true;
return {};
}
if (TrySetSpan()) {
if constexpr (FLAGS & GuestMemoryFlags::Safe) {
memory.FlushRegion(addr, size * sizeof(T));
}
} else {
if (backup) {
backup->resize_destructive(size);
data_span = *backup;
} else {
data_copy.resize(size);
data_span = std::span(data_copy);
}
is_data_copy = true;
span_valid = true;
if constexpr (FLAGS & GuestMemoryFlags::Safe) {
memory.ReadBlock(addr, data_span.data(), size * sizeof(T));
} else {
memory.ReadBlockUnsafe(addr, data_span.data(), size * sizeof(T));
}
}
return data_span;
}
void Write(std::span<T> write_data) noexcept {
if constexpr (FLAGS & GuestMemoryFlags::Cached) {
memory.WriteBlockCached(addr, write_data.data(), size * sizeof(T));
} else if constexpr (FLAGS & GuestMemoryFlags::Safe) {
memory.WriteBlock(addr, write_data.data(), size * sizeof(T));
} else {
memory.WriteBlockUnsafe(addr, write_data.data(), size * sizeof(T));
}
}
bool TrySetSpan() noexcept {
if (u8* ptr = memory.GetSpan(addr, size * sizeof(T)); ptr) {
data_span = {reinterpret_cast<T*>(ptr), size};
span_valid = true;
return true;
}
return false;
}
protected:
bool IsDataCopy() const noexcept {
return is_data_copy;
}
bool AddressChanged() const noexcept {
return addr_changed;
}
M& memory;
u64 addr;
size_t size;
std::span<T> data_span{};
std::vector<T> data_copy;
bool span_valid{false};
bool is_data_copy{false};
bool addr_changed{false};
};
template <typename M, typename T, GuestMemoryFlags FLAGS>
class GuestMemoryScoped : public GuestMemory<M, T, FLAGS> {
public:
GuestMemoryScoped() = delete;
explicit GuestMemoryScoped(M& memory_, u64 addr_, std::size_t size_,
Common::ScratchBuffer<T>* backup = nullptr)
: GuestMemory<M, T, FLAGS>(memory_, addr_, size_, backup) {
if constexpr (!(FLAGS & GuestMemoryFlags::Read)) {
if (!this->TrySetSpan()) {
if (backup) {
this->data_span = *backup;
this->span_valid = true;
this->is_data_copy = true;
}
}
}
}
~GuestMemoryScoped() {
if constexpr (FLAGS & GuestMemoryFlags::Write) {
if (this->size == 0) [[unlikely]] {
return;
}
if (this->AddressChanged() || this->IsDataCopy()) {
ASSERT(this->span_valid);
if constexpr (FLAGS & GuestMemoryFlags::Cached) {
this->memory.WriteBlockCached(this->addr, this->data_span.data(),
this->size * sizeof(T));
} else if constexpr (FLAGS & GuestMemoryFlags::Safe) {
this->memory.WriteBlock(this->addr, this->data_span.data(),
this->size * sizeof(T));
} else {
this->memory.WriteBlockUnsafe(this->addr, this->data_span.data(),
this->size * sizeof(T));
}
} else if constexpr (FLAGS & GuestMemoryFlags::Safe) {
this->memory.InvalidateRegion(this->addr, this->size * sizeof(T));
}
}
}
};
} // namespace
template <typename T, GuestMemoryFlags FLAGS>
using CpuGuestMemory = GuestMemory<Memory, T, FLAGS>;
template <typename T, GuestMemoryFlags FLAGS>
using CpuGuestMemoryScoped = GuestMemoryScoped<Memory, T, FLAGS>;
template <typename T, GuestMemoryFlags FLAGS>
using GpuGuestMemory = GuestMemory<Tegra::MemoryManager, T, FLAGS>;
template <typename T, GuestMemoryFlags FLAGS>
using GpuGuestMemoryScoped = GuestMemoryScoped<Tegra::MemoryManager, T, FLAGS>;
} // namespace Core::Memory

View File

@ -523,6 +523,8 @@ SDLDriver::SDLDriver(std::string input_engine_) : InputEngine(std::move(input_en
}
SDL_SetHint(SDL_HINT_JOYSTICK_HIDAPI_SWITCH_PLAYER_LED, "1");
// Share the same button mapping with non-Nintendo controllers
SDL_SetHint(SDL_HINT_GAMECONTROLLER_USE_BUTTON_LABELS, "0");
// Disable hidapi driver for xbox. Already default on Windows, this causes conflict with native
// driver on Linux.
@ -800,16 +802,9 @@ ButtonMapping SDLDriver::GetButtonMappingForDevice(const Common::ParamPackage& p
// This list is missing ZL/ZR since those are not considered buttons in SDL GameController.
// We will add those afterwards
// This list also excludes Screenshot since there's not really a mapping for that
ButtonBindings switch_to_sdl_button;
if (SDL_GameControllerGetType(controller) == SDL_CONTROLLER_TYPE_NINTENDO_SWITCH_PRO ||
SDL_GameControllerGetType(controller) == SDL_CONTROLLER_TYPE_NINTENDO_SWITCH_JOYCON_LEFT ||
SDL_GameControllerGetType(controller) == SDL_CONTROLLER_TYPE_NINTENDO_SWITCH_JOYCON_RIGHT) {
switch_to_sdl_button = GetNintendoButtonBinding(joystick);
} else {
switch_to_sdl_button = GetDefaultButtonBinding();
}
switch_to_sdl_button = GetDefaultButtonBinding(joystick);
// Add the missing bindings for ZL/ZR
static constexpr ZButtonBindings switch_to_sdl_axis{{
@ -830,32 +825,9 @@ ButtonMapping SDLDriver::GetButtonMappingForDevice(const Common::ParamPackage& p
return GetSingleControllerMapping(joystick, switch_to_sdl_button, switch_to_sdl_axis);
}
ButtonBindings SDLDriver::GetDefaultButtonBinding() const {
return {
std::pair{Settings::NativeButton::A, SDL_CONTROLLER_BUTTON_B},
{Settings::NativeButton::B, SDL_CONTROLLER_BUTTON_A},
{Settings::NativeButton::X, SDL_CONTROLLER_BUTTON_Y},
{Settings::NativeButton::Y, SDL_CONTROLLER_BUTTON_X},
{Settings::NativeButton::LStick, SDL_CONTROLLER_BUTTON_LEFTSTICK},
{Settings::NativeButton::RStick, SDL_CONTROLLER_BUTTON_RIGHTSTICK},
{Settings::NativeButton::L, SDL_CONTROLLER_BUTTON_LEFTSHOULDER},
{Settings::NativeButton::R, SDL_CONTROLLER_BUTTON_RIGHTSHOULDER},
{Settings::NativeButton::Plus, SDL_CONTROLLER_BUTTON_START},
{Settings::NativeButton::Minus, SDL_CONTROLLER_BUTTON_BACK},
{Settings::NativeButton::DLeft, SDL_CONTROLLER_BUTTON_DPAD_LEFT},
{Settings::NativeButton::DUp, SDL_CONTROLLER_BUTTON_DPAD_UP},
{Settings::NativeButton::DRight, SDL_CONTROLLER_BUTTON_DPAD_RIGHT},
{Settings::NativeButton::DDown, SDL_CONTROLLER_BUTTON_DPAD_DOWN},
{Settings::NativeButton::SL, SDL_CONTROLLER_BUTTON_LEFTSHOULDER},
{Settings::NativeButton::SR, SDL_CONTROLLER_BUTTON_RIGHTSHOULDER},
{Settings::NativeButton::Home, SDL_CONTROLLER_BUTTON_GUIDE},
{Settings::NativeButton::Screenshot, SDL_CONTROLLER_BUTTON_MISC1},
};
}
ButtonBindings SDLDriver::GetNintendoButtonBinding(
ButtonBindings SDLDriver::GetDefaultButtonBinding(
const std::shared_ptr<SDLJoystick>& joystick) const {
// Default SL/SR mapping for pro controllers
// Default SL/SR mapping for other controllers
auto sl_button = SDL_CONTROLLER_BUTTON_LEFTSHOULDER;
auto sr_button = SDL_CONTROLLER_BUTTON_RIGHTSHOULDER;
@ -869,10 +841,10 @@ ButtonBindings SDLDriver::GetNintendoButtonBinding(
}
return {
std::pair{Settings::NativeButton::A, SDL_CONTROLLER_BUTTON_A},
{Settings::NativeButton::B, SDL_CONTROLLER_BUTTON_B},
{Settings::NativeButton::X, SDL_CONTROLLER_BUTTON_X},
{Settings::NativeButton::Y, SDL_CONTROLLER_BUTTON_Y},
std::pair{Settings::NativeButton::A, SDL_CONTROLLER_BUTTON_B},
{Settings::NativeButton::B, SDL_CONTROLLER_BUTTON_A},
{Settings::NativeButton::X, SDL_CONTROLLER_BUTTON_Y},
{Settings::NativeButton::Y, SDL_CONTROLLER_BUTTON_X},
{Settings::NativeButton::LStick, SDL_CONTROLLER_BUTTON_LEFTSTICK},
{Settings::NativeButton::RStick, SDL_CONTROLLER_BUTTON_RIGHTSTICK},
{Settings::NativeButton::L, SDL_CONTROLLER_BUTTON_LEFTSHOULDER},

View File

@ -100,11 +100,8 @@ private:
int axis_y, float offset_x,
float offset_y) const;
/// Returns the default button bindings list for generic controllers
ButtonBindings GetDefaultButtonBinding() const;
/// Returns the default button bindings list for nintendo controllers
ButtonBindings GetNintendoButtonBinding(const std::shared_ptr<SDLJoystick>& joystick) const;
/// Returns the default button bindings list
ButtonBindings GetDefaultButtonBinding(const std::shared_ptr<SDLJoystick>& joystick) const;
/// Returns the button mappings from a single controller
ButtonMapping GetSingleControllerMapping(const std::shared_ptr<SDLJoystick>& joystick,

View File

@ -234,9 +234,10 @@ bool BufferCache<P>::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 am
if (has_new_downloads) {
memory_tracker.MarkRegionAsGpuModified(*cpu_dest_address, amount);
}
tmp_buffer.resize_destructive(amount);
cpu_memory.ReadBlockUnsafe(*cpu_src_address, tmp_buffer.data(), amount);
cpu_memory.WriteBlockUnsafe(*cpu_dest_address, tmp_buffer.data(), amount);
Core::Memory::CpuGuestMemoryScoped<u8, Core::Memory::GuestMemoryFlags::UnsafeReadWrite> tmp(
cpu_memory, *cpu_src_address, amount, &tmp_buffer);
tmp.SetAddressAndSize(*cpu_dest_address, amount);
return true;
}

View File

@ -5,6 +5,7 @@
#include "common/microprofile.h"
#include "common/settings.h"
#include "core/core.h"
#include "core/memory.h"
#include "video_core/dma_pusher.h"
#include "video_core/engines/maxwell_3d.h"
#include "video_core/gpu.h"
@ -12,6 +13,8 @@
namespace Tegra {
constexpr u32 MacroRegistersStart = 0xE00;
DmaPusher::DmaPusher(Core::System& system_, GPU& gpu_, MemoryManager& memory_manager_,
Control::ChannelState& channel_state_)
: gpu{gpu_}, system{system_}, memory_manager{memory_manager_}, puller{gpu_, memory_manager_,
@ -74,25 +77,16 @@ bool DmaPusher::Step() {
}
// Push buffer non-empty, read a word
command_headers.resize_destructive(command_list_header.size);
constexpr u32 MacroRegistersStart = 0xE00;
if (dma_state.method < MacroRegistersStart) {
if (Settings::IsGPULevelHigh()) {
memory_manager.ReadBlock(dma_state.dma_get, command_headers.data(),
command_list_header.size * sizeof(u32));
} else {
memory_manager.ReadBlockUnsafe(dma_state.dma_get, command_headers.data(),
command_list_header.size * sizeof(u32));
}
} else {
const size_t copy_size = command_list_header.size * sizeof(u32);
if (dma_state.method >= MacroRegistersStart) {
if (subchannels[dma_state.subchannel]) {
subchannels[dma_state.subchannel]->current_dirty =
memory_manager.IsMemoryDirty(dma_state.dma_get, copy_size);
subchannels[dma_state.subchannel]->current_dirty = memory_manager.IsMemoryDirty(
dma_state.dma_get, command_list_header.size * sizeof(u32));
}
memory_manager.ReadBlockUnsafe(dma_state.dma_get, command_headers.data(), copy_size);
}
ProcessCommands(command_headers);
Core::Memory::GpuGuestMemory<Tegra::CommandHeader,
Core::Memory::GuestMemoryFlags::UnsafeRead>
headers(memory_manager, dma_state.dma_get, command_list_header.size, &command_headers);
ProcessCommands(headers);
}
return true;

View File

@ -5,6 +5,7 @@
#include "common/algorithm.h"
#include "common/assert.h"
#include "core/memory.h"
#include "video_core/engines/engine_upload.h"
#include "video_core/memory_manager.h"
#include "video_core/rasterizer_interface.h"
@ -46,15 +47,11 @@ void State::ProcessData(const u32* data, size_t num_data) {
void State::ProcessData(std::span<const u8> read_buffer) {
const GPUVAddr address{regs.dest.Address()};
if (is_linear) {
if (regs.line_count == 1) {
rasterizer->AccelerateInlineToMemory(address, copy_size, read_buffer);
} else {
for (size_t line = 0; line < regs.line_count; ++line) {
const GPUVAddr dest_line = address + line * regs.dest.pitch;
std::span<const u8> buffer(read_buffer.data() + line * regs.line_length_in,
regs.line_length_in);
rasterizer->AccelerateInlineToMemory(dest_line, regs.line_length_in, buffer);
}
for (size_t line = 0; line < regs.line_count; ++line) {
const GPUVAddr dest_line = address + line * regs.dest.pitch;
std::span<const u8> buffer(read_buffer.data() + line * regs.line_length_in,
regs.line_length_in);
rasterizer->AccelerateInlineToMemory(dest_line, regs.line_length_in, buffer);
}
} else {
u32 width = regs.dest.width;
@ -70,13 +67,14 @@ void State::ProcessData(std::span<const u8> read_buffer) {
const std::size_t dst_size = Tegra::Texture::CalculateSize(
true, bytes_per_pixel, width, regs.dest.height, regs.dest.depth,
regs.dest.BlockHeight(), regs.dest.BlockDepth());
tmp_buffer.resize_destructive(dst_size);
memory_manager.ReadBlock(address, tmp_buffer.data(), dst_size);
Tegra::Texture::SwizzleSubrect(tmp_buffer, read_buffer, bytes_per_pixel, width,
regs.dest.height, regs.dest.depth, x_offset, regs.dest.y,
x_elements, regs.line_count, regs.dest.BlockHeight(),
Core::Memory::GpuGuestMemoryScoped<u8, Core::Memory::GuestMemoryFlags::SafeReadCachedWrite>
tmp(memory_manager, address, dst_size, &tmp_buffer);
Tegra::Texture::SwizzleSubrect(tmp, read_buffer, bytes_per_pixel, width, regs.dest.height,
regs.dest.depth, x_offset, regs.dest.y, x_elements,
regs.line_count, regs.dest.BlockHeight(),
regs.dest.BlockDepth(), regs.line_length_in);
memory_manager.WriteBlockCached(address, tmp_buffer.data(), dst_size);
}
}

View File

@ -84,7 +84,6 @@ Texture::TICEntry KeplerCompute::GetTICEntry(u32 tic_index) const {
Texture::TICEntry tic_entry;
memory_manager.ReadBlockUnsafe(tic_address_gpu, &tic_entry, sizeof(Texture::TICEntry));
return tic_entry;
}

View File

@ -9,6 +9,7 @@
#include "common/settings.h"
#include "core/core.h"
#include "core/core_timing.h"
#include "core/memory.h"
#include "video_core/dirty_flags.h"
#include "video_core/engines/draw_manager.h"
#include "video_core/engines/maxwell_3d.h"
@ -679,17 +680,14 @@ void Maxwell3D::ProcessCBData(u32 value) {
Texture::TICEntry Maxwell3D::GetTICEntry(u32 tic_index) const {
const GPUVAddr tic_address_gpu{regs.tex_header.Address() +
tic_index * sizeof(Texture::TICEntry)};
Texture::TICEntry tic_entry;
memory_manager.ReadBlockUnsafe(tic_address_gpu, &tic_entry, sizeof(Texture::TICEntry));
return tic_entry;
}
Texture::TSCEntry Maxwell3D::GetTSCEntry(u32 tsc_index) const {
const GPUVAddr tsc_address_gpu{regs.tex_sampler.Address() +
tsc_index * sizeof(Texture::TSCEntry)};
Texture::TSCEntry tsc_entry;
memory_manager.ReadBlockUnsafe(tsc_address_gpu, &tsc_entry, sizeof(Texture::TSCEntry));
return tsc_entry;

View File

@ -7,6 +7,7 @@
#include "common/microprofile.h"
#include "common/settings.h"
#include "core/core.h"
#include "core/memory.h"
#include "video_core/engines/maxwell_3d.h"
#include "video_core/engines/maxwell_dma.h"
#include "video_core/memory_manager.h"
@ -130,11 +131,12 @@ void MaxwellDMA::Launch() {
UNIMPLEMENTED_IF(regs.offset_out % 16 != 0);
read_buffer.resize_destructive(16);
for (u32 offset = 0; offset < regs.line_length_in; offset += 16) {
memory_manager.ReadBlock(
convert_linear_2_blocklinear_addr(regs.offset_in + offset),
read_buffer.data(), read_buffer.size());
memory_manager.WriteBlockCached(regs.offset_out + offset, read_buffer.data(),
read_buffer.size());
Core::Memory::GpuGuestMemoryScoped<
u8, Core::Memory::GuestMemoryFlags::SafeReadCachedWrite>
tmp_write_buffer(memory_manager,
convert_linear_2_blocklinear_addr(regs.offset_in + offset),
16, &read_buffer);
tmp_write_buffer.SetAddressAndSize(regs.offset_out + offset, 16);
}
} else if (is_src_pitch && !is_dst_pitch) {
UNIMPLEMENTED_IF(regs.line_length_in % 16 != 0);
@ -142,20 +144,19 @@ void MaxwellDMA::Launch() {
UNIMPLEMENTED_IF(regs.offset_out % 16 != 0);
read_buffer.resize_destructive(16);
for (u32 offset = 0; offset < regs.line_length_in; offset += 16) {
memory_manager.ReadBlock(regs.offset_in + offset, read_buffer.data(),
read_buffer.size());
memory_manager.WriteBlockCached(
convert_linear_2_blocklinear_addr(regs.offset_out + offset),
read_buffer.data(), read_buffer.size());
Core::Memory::GpuGuestMemoryScoped<
u8, Core::Memory::GuestMemoryFlags::SafeReadCachedWrite>
tmp_write_buffer(memory_manager, regs.offset_in + offset, 16, &read_buffer);
tmp_write_buffer.SetAddressAndSize(
convert_linear_2_blocklinear_addr(regs.offset_out + offset), 16);
}
} else {
if (!accelerate.BufferCopy(regs.offset_in, regs.offset_out, regs.line_length_in)) {
read_buffer.resize_destructive(regs.line_length_in);
memory_manager.ReadBlock(regs.offset_in, read_buffer.data(),
regs.line_length_in,
VideoCommon::CacheType::NoBufferCache);
memory_manager.WriteBlockCached(regs.offset_out, read_buffer.data(),
regs.line_length_in);
Core::Memory::GpuGuestMemoryScoped<
u8, Core::Memory::GuestMemoryFlags::SafeReadCachedWrite>
tmp_write_buffer(memory_manager, regs.offset_in, regs.line_length_in,
&read_buffer);
tmp_write_buffer.SetAddressAndSize(regs.offset_out, regs.line_length_in);
}
}
}
@ -222,17 +223,15 @@ void MaxwellDMA::CopyBlockLinearToPitch() {
CalculateSize(true, bytes_per_pixel, width, height, depth, block_height, block_depth);
const size_t dst_size = dst_operand.pitch * regs.line_count;
read_buffer.resize_destructive(src_size);
write_buffer.resize_destructive(dst_size);
memory_manager.ReadBlock(src_operand.address, read_buffer.data(), src_size);
memory_manager.ReadBlock(dst_operand.address, write_buffer.data(), dst_size);
Core::Memory::GpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead> tmp_read_buffer(
memory_manager, src_operand.address, src_size, &read_buffer);
Core::Memory::GpuGuestMemoryScoped<u8, Core::Memory::GuestMemoryFlags::SafeReadCachedWrite>
tmp_write_buffer(memory_manager, dst_operand.address, dst_size, &write_buffer);
UnswizzleSubrect(write_buffer, read_buffer, bytes_per_pixel, width, height, depth, x_offset,
src_params.origin.y, x_elements, regs.line_count, block_height, block_depth,
dst_operand.pitch);
memory_manager.WriteBlockCached(regs.offset_out, write_buffer.data(), dst_size);
UnswizzleSubrect(tmp_write_buffer, tmp_read_buffer, bytes_per_pixel, width, height, depth,
x_offset, src_params.origin.y, x_elements, regs.line_count, block_height,
block_depth, dst_operand.pitch);
}
void MaxwellDMA::CopyPitchToBlockLinear() {
@ -287,18 +286,17 @@ void MaxwellDMA::CopyPitchToBlockLinear() {
CalculateSize(true, bytes_per_pixel, width, height, depth, block_height, block_depth);
const size_t src_size = static_cast<size_t>(regs.pitch_in) * regs.line_count;
read_buffer.resize_destructive(src_size);
write_buffer.resize_destructive(dst_size);
GPUVAddr src_addr = regs.offset_in;
GPUVAddr dst_addr = regs.offset_out;
Core::Memory::GpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead> tmp_read_buffer(
memory_manager, src_addr, src_size, &read_buffer);
Core::Memory::GpuGuestMemoryScoped<u8, Core::Memory::GuestMemoryFlags::SafeReadCachedWrite>
tmp_write_buffer(memory_manager, dst_addr, dst_size, &write_buffer);
memory_manager.ReadBlock(regs.offset_in, read_buffer.data(), src_size);
memory_manager.ReadBlockUnsafe(regs.offset_out, write_buffer.data(), dst_size);
// If the input is linear and the output is tiled, swizzle the input and copy it over.
SwizzleSubrect(write_buffer, read_buffer, bytes_per_pixel, width, height, depth, x_offset,
dst_params.origin.y, x_elements, regs.line_count, block_height, block_depth,
regs.pitch_in);
memory_manager.WriteBlockCached(regs.offset_out, write_buffer.data(), dst_size);
// If the input is linear and the output is tiled, swizzle the input and copy it over.
SwizzleSubrect(tmp_write_buffer, tmp_read_buffer, bytes_per_pixel, width, height, depth,
x_offset, dst_params.origin.y, x_elements, regs.line_count, block_height,
block_depth, regs.pitch_in);
}
void MaxwellDMA::CopyBlockLinearToBlockLinear() {
@ -342,23 +340,20 @@ void MaxwellDMA::CopyBlockLinearToBlockLinear() {
const u32 pitch = x_elements * bytes_per_pixel;
const size_t mid_buffer_size = pitch * regs.line_count;
read_buffer.resize_destructive(src_size);
write_buffer.resize_destructive(dst_size);
intermediate_buffer.resize_destructive(mid_buffer_size);
memory_manager.ReadBlock(regs.offset_in, read_buffer.data(), src_size);
memory_manager.ReadBlock(regs.offset_out, write_buffer.data(), dst_size);
Core::Memory::GpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead> tmp_read_buffer(
memory_manager, regs.offset_in, src_size, &read_buffer);
Core::Memory::GpuGuestMemoryScoped<u8, Core::Memory::GuestMemoryFlags::SafeReadCachedWrite>
tmp_write_buffer(memory_manager, regs.offset_out, dst_size, &write_buffer);
UnswizzleSubrect(intermediate_buffer, read_buffer, bytes_per_pixel, src_width, src.height,
UnswizzleSubrect(intermediate_buffer, tmp_read_buffer, bytes_per_pixel, src_width, src.height,
src.depth, src_x_offset, src.origin.y, x_elements, regs.line_count,
src.block_size.height, src.block_size.depth, pitch);
SwizzleSubrect(write_buffer, intermediate_buffer, bytes_per_pixel, dst_width, dst.height,
SwizzleSubrect(tmp_write_buffer, intermediate_buffer, bytes_per_pixel, dst_width, dst.height,
dst.depth, dst_x_offset, dst.origin.y, x_elements, regs.line_count,
dst.block_size.height, dst.block_size.depth, pitch);
memory_manager.WriteBlockCached(regs.offset_out, write_buffer.data(), dst_size);
}
void MaxwellDMA::ReleaseSemaphore() {

View File

@ -159,11 +159,11 @@ bool SoftwareBlitEngine::Blit(Fermi2D::Surface& src, Fermi2D::Surface& dst,
const auto src_bytes_per_pixel = BytesPerBlock(PixelFormatFromRenderTargetFormat(src.format));
const auto dst_bytes_per_pixel = BytesPerBlock(PixelFormatFromRenderTargetFormat(dst.format));
const size_t src_size = get_surface_size(src, src_bytes_per_pixel);
impl->tmp_buffer.resize_destructive(src_size);
memory_manager.ReadBlock(src.Address(), impl->tmp_buffer.data(), src_size);
Core::Memory::GpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead> tmp_buffer(
memory_manager, src.Address(), src_size, &impl->tmp_buffer);
const size_t src_copy_size = src_extent_x * src_extent_y * src_bytes_per_pixel;
const size_t dst_copy_size = dst_extent_x * dst_extent_y * dst_bytes_per_pixel;
impl->src_buffer.resize_destructive(src_copy_size);
@ -200,12 +200,11 @@ bool SoftwareBlitEngine::Blit(Fermi2D::Surface& src, Fermi2D::Surface& dst,
impl->dst_buffer.resize_destructive(dst_copy_size);
if (src.linear == Fermi2D::MemoryLayout::BlockLinear) {
UnswizzleSubrect(impl->src_buffer, impl->tmp_buffer, src_bytes_per_pixel, src.width,
src.height, src.depth, config.src_x0, config.src_y0, src_extent_x,
src_extent_y, src.block_height, src.block_depth,
src_extent_x * src_bytes_per_pixel);
UnswizzleSubrect(impl->src_buffer, tmp_buffer, src_bytes_per_pixel, src.width, src.height,
src.depth, config.src_x0, config.src_y0, src_extent_x, src_extent_y,
src.block_height, src.block_depth, src_extent_x * src_bytes_per_pixel);
} else {
process_pitch_linear(false, impl->tmp_buffer, impl->src_buffer, src_extent_x, src_extent_y,
process_pitch_linear(false, tmp_buffer, impl->src_buffer, src_extent_x, src_extent_y,
src.pitch, config.src_x0, config.src_y0, src_bytes_per_pixel);
}
@ -221,20 +220,18 @@ bool SoftwareBlitEngine::Blit(Fermi2D::Surface& src, Fermi2D::Surface& dst,
}
const size_t dst_size = get_surface_size(dst, dst_bytes_per_pixel);
impl->tmp_buffer.resize_destructive(dst_size);
memory_manager.ReadBlock(dst.Address(), impl->tmp_buffer.data(), dst_size);
Core::Memory::GpuGuestMemoryScoped<u8, Core::Memory::GuestMemoryFlags::SafeReadWrite>
tmp_buffer2(memory_manager, dst.Address(), dst_size, &impl->tmp_buffer);
if (dst.linear == Fermi2D::MemoryLayout::BlockLinear) {
SwizzleSubrect(impl->tmp_buffer, impl->dst_buffer, dst_bytes_per_pixel, dst.width,
dst.height, dst.depth, config.dst_x0, config.dst_y0, dst_extent_x,
dst_extent_y, dst.block_height, dst.block_depth,
dst_extent_x * dst_bytes_per_pixel);
SwizzleSubrect(tmp_buffer2, impl->dst_buffer, dst_bytes_per_pixel, dst.width, dst.height,
dst.depth, config.dst_x0, config.dst_y0, dst_extent_x, dst_extent_y,
dst.block_height, dst.block_depth, dst_extent_x * dst_bytes_per_pixel);
} else {
process_pitch_linear(true, impl->dst_buffer, impl->tmp_buffer, dst_extent_x, dst_extent_y,
process_pitch_linear(true, impl->dst_buffer, tmp_buffer2, dst_extent_x, dst_extent_y,
dst.pitch, config.dst_x0, config.dst_y0,
static_cast<size_t>(dst_bytes_per_pixel));
}
memory_manager.WriteBlock(dst.Address(), impl->tmp_buffer.data(), dst_size);
return true;
}

View File

@ -10,13 +10,13 @@
#include "core/device_memory.h"
#include "core/hle/kernel/k_page_table.h"
#include "core/hle/kernel/k_process.h"
#include "core/memory.h"
#include "video_core/invalidation_accumulator.h"
#include "video_core/memory_manager.h"
#include "video_core/rasterizer_interface.h"
#include "video_core/renderer_base.h"
namespace Tegra {
using Core::Memory::GuestMemoryFlags;
std::atomic<size_t> MemoryManager::unique_identifier_generator{};
@ -587,13 +587,10 @@ void MemoryManager::InvalidateRegion(GPUVAddr gpu_addr, size_t size,
void MemoryManager::CopyBlock(GPUVAddr gpu_dest_addr, GPUVAddr gpu_src_addr, std::size_t size,
VideoCommon::CacheType which) {
tmp_buffer.resize_destructive(size);
ReadBlock(gpu_src_addr, tmp_buffer.data(), size, which);
// The output block must be flushed in case it has data modified from the GPU.
// Fixes NPC geometry in Zombie Panic in Wonderland DX
Core::Memory::GpuGuestMemoryScoped<u8, GuestMemoryFlags::SafeReadWrite> data(
*this, gpu_src_addr, size);
data.SetAddressAndSize(gpu_dest_addr, size);
FlushRegion(gpu_dest_addr, size, which);
WriteBlock(gpu_dest_addr, tmp_buffer.data(), size, which);
}
bool MemoryManager::IsGranularRange(GPUVAddr gpu_addr, std::size_t size) const {
@ -758,4 +755,23 @@ void MemoryManager::FlushCaching() {
accumulator->Clear();
}
const u8* MemoryManager::GetSpan(const GPUVAddr src_addr, const std::size_t size) const {
auto cpu_addr = GpuToCpuAddress(src_addr);
if (cpu_addr) {
return memory.GetSpan(*cpu_addr, size);
}
return nullptr;
}
u8* MemoryManager::GetSpan(const GPUVAddr src_addr, const std::size_t size) {
if (!IsContinuousRange(src_addr, size)) {
return nullptr;
}
auto cpu_addr = GpuToCpuAddress(src_addr);
if (cpu_addr) {
return memory.GetSpan(*cpu_addr, size);
}
return nullptr;
}
} // namespace Tegra

View File

@ -15,6 +15,7 @@
#include "common/range_map.h"
#include "common/scratch_buffer.h"
#include "common/virtual_buffer.h"
#include "core/memory.h"
#include "video_core/cache_types.h"
#include "video_core/pte_kind.h"
@ -62,6 +63,20 @@ public:
[[nodiscard]] u8* GetPointer(GPUVAddr addr);
[[nodiscard]] const u8* GetPointer(GPUVAddr addr) const;
template <typename T>
[[nodiscard]] T* GetPointer(GPUVAddr addr) {
const auto address{GpuToCpuAddress(addr)};
if (!address) {
return {};
}
return memory.GetPointer(*address);
}
template <typename T>
[[nodiscard]] const T* GetPointer(GPUVAddr addr) const {
return GetPointer<T*>(addr);
}
/**
* ReadBlock and WriteBlock are full read and write operations over virtual
* GPU Memory. It's important to use these when GPU memory may not be continuous
@ -139,6 +154,9 @@ public:
void FlushCaching();
const u8* GetSpan(const GPUVAddr src_addr, const std::size_t size) const;
u8* GetSpan(const GPUVAddr src_addr, const std::size_t size);
private:
template <bool is_big_pages, typename FuncMapped, typename FuncReserved, typename FuncUnmapped>
inline void MemoryOperation(GPUVAddr gpu_src_addr, std::size_t size, FuncMapped&& func_mapped,

View File

@ -8,6 +8,7 @@
#include "common/alignment.h"
#include "common/settings.h"
#include "core/memory.h"
#include "video_core/control/channel_state.h"
#include "video_core/dirty_flags.h"
#include "video_core/engines/kepler_compute.h"
@ -1026,19 +1027,19 @@ void TextureCache<P>::UploadImageContents(Image& image, StagingBuffer& staging)
runtime.AccelerateImageUpload(image, staging, uploads);
return;
}
const size_t guest_size_bytes = image.guest_size_bytes;
swizzle_data_buffer.resize_destructive(guest_size_bytes);
gpu_memory->ReadBlockUnsafe(gpu_addr, swizzle_data_buffer.data(), guest_size_bytes);
Core::Memory::GpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::UnsafeRead> swizzle_data(
*gpu_memory, gpu_addr, image.guest_size_bytes, &swizzle_data_buffer);
if (True(image.flags & ImageFlagBits::Converted)) {
unswizzle_data_buffer.resize_destructive(image.unswizzled_size_bytes);
auto copies = UnswizzleImage(*gpu_memory, gpu_addr, image.info, swizzle_data_buffer,
unswizzle_data_buffer);
auto copies =
UnswizzleImage(*gpu_memory, gpu_addr, image.info, swizzle_data, unswizzle_data_buffer);
ConvertImage(unswizzle_data_buffer, image.info, mapped_span, copies);
image.UploadMemory(staging, copies);
} else {
const auto copies =
UnswizzleImage(*gpu_memory, gpu_addr, image.info, swizzle_data_buffer, mapped_span);
UnswizzleImage(*gpu_memory, gpu_addr, image.info, swizzle_data, mapped_span);
image.UploadMemory(staging, copies);
}
}
@ -1231,11 +1232,12 @@ void TextureCache<P>::QueueAsyncDecode(Image& image, ImageId image_id) {
decode->image_id = image_id;
async_decodes.push_back(std::move(decode));
Common::ScratchBuffer<u8> local_unswizzle_data_buffer(image.unswizzled_size_bytes);
const size_t guest_size_bytes = image.guest_size_bytes;
swizzle_data_buffer.resize_destructive(guest_size_bytes);
gpu_memory->ReadBlockUnsafe(image.gpu_addr, swizzle_data_buffer.data(), guest_size_bytes);
auto copies = UnswizzleImage(*gpu_memory, image.gpu_addr, image.info, swizzle_data_buffer,
static Common::ScratchBuffer<u8> local_unswizzle_data_buffer;
local_unswizzle_data_buffer.resize_destructive(image.unswizzled_size_bytes);
Core::Memory::GpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::UnsafeRead> swizzle_data(
*gpu_memory, image.gpu_addr, image.guest_size_bytes, &swizzle_data_buffer);
auto copies = UnswizzleImage(*gpu_memory, image.gpu_addr, image.info, swizzle_data,
local_unswizzle_data_buffer);
const size_t out_size = MapSizeBytes(image);

View File

@ -20,6 +20,7 @@
#include "common/div_ceil.h"
#include "common/scratch_buffer.h"
#include "common/settings.h"
#include "core/memory.h"
#include "video_core/compatible_formats.h"
#include "video_core/engines/maxwell_3d.h"
#include "video_core/memory_manager.h"
@ -544,17 +545,15 @@ void SwizzleBlockLinearImage(Tegra::MemoryManager& gpu_memory, GPUVAddr gpu_addr
tile_size.height, info.tile_width_spacing);
const size_t subresource_size = sizes[level];
tmp_buffer.resize_destructive(subresource_size);
const std::span<u8> dst(tmp_buffer);
for (s32 layer = 0; layer < info.resources.layers; ++layer) {
const std::span<const u8> src = input.subspan(host_offset);
gpu_memory.ReadBlockUnsafe(gpu_addr + guest_offset, dst.data(), dst.size_bytes());
{
Core::Memory::GpuGuestMemoryScoped<u8, Core::Memory::GuestMemoryFlags::UnsafeReadWrite>
dst(gpu_memory, gpu_addr + guest_offset, subresource_size, &tmp_buffer);
SwizzleTexture(dst, src, bytes_per_block, num_tiles.width, num_tiles.height,
num_tiles.depth, block.height, block.depth);
gpu_memory.WriteBlockUnsafe(gpu_addr + guest_offset, dst.data(), dst.size_bytes());
SwizzleTexture(dst, src, bytes_per_block, num_tiles.width, num_tiles.height,
num_tiles.depth, block.height, block.depth);
}
host_offset += host_bytes_per_layer;
guest_offset += layer_stride;
@ -837,6 +836,7 @@ boost::container::small_vector<BufferImageCopy, 16> UnswizzleImage(Tegra::Memory
const Extent3D size = info.size;
if (info.type == ImageType::Linear) {
ASSERT(output.size_bytes() >= guest_size_bytes);
gpu_memory.ReadBlockUnsafe(gpu_addr, output.data(), guest_size_bytes);
ASSERT((info.pitch >> bpp_log2) << bpp_log2 == info.pitch);
@ -904,16 +904,6 @@ boost::container::small_vector<BufferImageCopy, 16> UnswizzleImage(Tegra::Memory
return copies;
}
BufferCopy UploadBufferCopy(Tegra::MemoryManager& gpu_memory, GPUVAddr gpu_addr,
const ImageBase& image, std::span<u8> output) {
gpu_memory.ReadBlockUnsafe(gpu_addr, output.data(), image.guest_size_bytes);
return BufferCopy{
.src_offset = 0,
.dst_offset = 0,
.size = image.guest_size_bytes,
};
}
void ConvertImage(std::span<const u8> input, const ImageInfo& info, std::span<u8> output,
std::span<BufferImageCopy> copies) {
u32 output_offset = 0;

View File

@ -66,9 +66,6 @@ struct OverlapResult {
Tegra::MemoryManager& gpu_memory, GPUVAddr gpu_addr, const ImageInfo& info,
std::span<const u8> input, std::span<u8> output);
[[nodiscard]] BufferCopy UploadBufferCopy(Tegra::MemoryManager& gpu_memory, GPUVAddr gpu_addr,
const ImageBase& image, std::span<u8> output);
void ConvertImage(std::span<const u8> input, const ImageInfo& info, std::span<u8> output,
std::span<BufferImageCopy> copies);