Compare commits
13 Commits
enforce-se
...
file-watch
Author | SHA1 | Date | |
---|---|---|---|
aa79505ddd | |||
2b7faf60a3 | |||
fd32a82b4e | |||
5193a5d222 | |||
1f6393e7d5 | |||
9b2a5926a6 | |||
e13735b624 | |||
3218af38d0 | |||
1cf64ffaef | |||
998b9a9525 | |||
27bad3a699 | |||
1570aeffcb | |||
09ee80f590 |
@ -1,10 +1,5 @@
|
||||
#!/bin/bash -ex
|
||||
|
||||
# TODO: Work around pip install issues with Python 3.12 in the GitHub runner image.
|
||||
# See: https://github.com/actions/runner-images/issues/8709
|
||||
PYTHON_PATH=$(brew --prefix python@3.11)
|
||||
export PATH=$PYTHON_PATH/bin:$PYTHON_PATH/libexec/bin:$PATH
|
||||
|
||||
mkdir build && cd build
|
||||
cmake .. -GNinja \
|
||||
-DCMAKE_BUILD_TYPE=Release \
|
||||
|
@ -1,10 +1,5 @@
|
||||
#!/bin/bash -ex
|
||||
|
||||
# TODO: Work around pip install issues with Python 3.12 in the GitHub runner image.
|
||||
# See: https://github.com/actions/runner-images/issues/8709
|
||||
PYTHON_PATH=$(brew --prefix python@3.11)
|
||||
export PATH=$PYTHON_PATH/bin:$PYTHON_PATH/libexec/bin:$PATH
|
||||
|
||||
mkdir build && cd build
|
||||
cmake .. -GNinja \
|
||||
-DCMAKE_BUILD_TYPE=Release \
|
||||
|
6
.gitmodules
vendored
6
.gitmodules
vendored
@ -79,9 +79,15 @@
|
||||
[submodule "sirit"]
|
||||
path = externals/sirit
|
||||
url = https://github.com/yuzu-emu/sirit
|
||||
[submodule "faad2"]
|
||||
path = externals/faad2/faad2
|
||||
url = https://github.com/knik0/faad2
|
||||
[submodule "library-headers"]
|
||||
path = externals/library-headers
|
||||
url = https://github.com/citra-emu/ext-library-headers.git
|
||||
[submodule "libadrenotools"]
|
||||
path = externals/libadrenotools
|
||||
url = https://github.com/bylaws/libadrenotools
|
||||
[submodule "oaknut"]
|
||||
path = externals/oaknut
|
||||
url = https://github.com/merryhime/oaknut.git
|
||||
|
@ -81,9 +81,6 @@ CMAKE_DEPENDENT_OPTION(ENABLE_LIBUSB "Enable libusb for GameCube Adapter support
|
||||
|
||||
option(USE_DISCORD_PRESENCE "Enables Discord Rich Presence" OFF)
|
||||
|
||||
CMAKE_DEPENDENT_OPTION(ENABLE_MF "Use Media Foundation decoder (preferred over FFmpeg)" ON "WIN32" OFF)
|
||||
CMAKE_DEPENDENT_OPTION(ENABLE_AUDIOTOOLBOX "Use AudioToolbox decoder (preferred over FFmpeg)" ON "APPLE" OFF)
|
||||
|
||||
CMAKE_DEPENDENT_OPTION(CITRA_ENABLE_BUNDLE_TARGET "Enable the distribution bundling target." ON "NOT ANDROID AND NOT IOS" OFF)
|
||||
|
||||
# Compile options
|
||||
|
@ -67,13 +67,22 @@ function(download_qt target)
|
||||
|
||||
if (NOT EXISTS "${prefix}")
|
||||
message(STATUS "Downloading binaries for Qt...")
|
||||
set(AQT_PREBUILD_BASE_URL "https://github.com/miurahr/aqtinstall/releases/download/v3.1.9")
|
||||
if (WIN32)
|
||||
set(aqt_path "${base_path}/aqt.exe")
|
||||
file(DOWNLOAD
|
||||
https://github.com/miurahr/aqtinstall/releases/download/v3.1.7/aqt.exe
|
||||
${AQT_PREBUILD_BASE_URL}/aqt.exe
|
||||
${aqt_path} SHOW_PROGRESS)
|
||||
execute_process(COMMAND ${aqt_path} ${install_args}
|
||||
WORKING_DIRECTORY ${base_path})
|
||||
elseif (APPLE)
|
||||
set(aqt_path "${base_path}/aqt-macos")
|
||||
file(DOWNLOAD
|
||||
${AQT_PREBUILD_BASE_URL}/aqt-macos
|
||||
${aqt_path} SHOW_PROGRESS)
|
||||
execute_process(COMMAND chmod +x ${aqt_path})
|
||||
execute_process(COMMAND ${aqt_path} ${install_args}
|
||||
WORKING_DIRECTORY ${base_path})
|
||||
else()
|
||||
# aqt does not offer binary releases for other platforms, so download and run from pip.
|
||||
set(aqt_install_path "${base_path}/aqt")
|
||||
|
@ -10,16 +10,20 @@ set(HASH_FILES
|
||||
"${VIDEO_CORE}/renderer_opengl/gl_shader_util.h"
|
||||
"${VIDEO_CORE}/renderer_vulkan/vk_shader_util.cpp"
|
||||
"${VIDEO_CORE}/renderer_vulkan/vk_shader_util.h"
|
||||
"${VIDEO_CORE}/shader/generator/glsl_fs_shader_gen.cpp"
|
||||
"${VIDEO_CORE}/shader/generator/glsl_fs_shader_gen.h"
|
||||
"${VIDEO_CORE}/shader/generator/glsl_shader_decompiler.cpp"
|
||||
"${VIDEO_CORE}/shader/generator/glsl_shader_decompiler.h"
|
||||
"${VIDEO_CORE}/shader/generator/glsl_shader_gen.cpp"
|
||||
"${VIDEO_CORE}/shader/generator/glsl_shader_gen.h"
|
||||
"${VIDEO_CORE}/shader/generator/pica_fs_config.cpp"
|
||||
"${VIDEO_CORE}/shader/generator/pica_fs_config.h"
|
||||
"${VIDEO_CORE}/shader/generator/shader_gen.cpp"
|
||||
"${VIDEO_CORE}/shader/generator/shader_gen.h"
|
||||
"${VIDEO_CORE}/shader/generator/shader_uniforms.cpp"
|
||||
"${VIDEO_CORE}/shader/generator/shader_uniforms.h"
|
||||
"${VIDEO_CORE}/shader/generator/spv_shader_gen.cpp"
|
||||
"${VIDEO_CORE}/shader/generator/spv_shader_gen.h"
|
||||
"${VIDEO_CORE}/shader/generator/spv_fs_shader_gen.cpp"
|
||||
"${VIDEO_CORE}/shader/generator/spv_fs_shader_gen.h"
|
||||
"${VIDEO_CORE}/shader/shader.cpp"
|
||||
"${VIDEO_CORE}/shader/shader.h"
|
||||
"${VIDEO_CORE}/pica.cpp"
|
||||
|
23
externals/CMakeLists.txt
vendored
23
externals/CMakeLists.txt
vendored
@ -85,6 +85,11 @@ if ("x86_64" IN_LIST ARCHITECTURE)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# Oaknut
|
||||
if ("arm64" IN_LIST ARCHITECTURE)
|
||||
add_subdirectory(oaknut EXCLUDE_FROM_ALL)
|
||||
endif()
|
||||
|
||||
# Dynarmic
|
||||
if ("x86_64" IN_LIST ARCHITECTURE OR "arm64" IN_LIST ARCHITECTURE)
|
||||
if(USE_SYSTEM_DYNARMIC)
|
||||
@ -156,24 +161,12 @@ endif()
|
||||
# Open Source Archives
|
||||
add_subdirectory(open_source_archives)
|
||||
|
||||
# faad2
|
||||
add_subdirectory(faad2 EXCLUDE_FROM_ALL)
|
||||
|
||||
# Dynamic library headers
|
||||
add_library(library-headers INTERFACE)
|
||||
|
||||
if (USE_SYSTEM_FDK_AAC_HEADERS)
|
||||
find_path(SYSTEM_FDK_AAC_INCLUDES NAMES fdk-aac/aacdecoder_lib.h)
|
||||
if (SYSTEM_FDK_AAC_INCLUDES STREQUAL "SYSTEM_FDK_AAC_INCLUDES-NOTFOUND")
|
||||
message(WARNING "System fdk-aac headers not found. Falling back on bundled headers.")
|
||||
else()
|
||||
message(STATUS "Using system fdk_aac headers.")
|
||||
target_include_directories(library-headers SYSTEM INTERFACE ${SYSTEM_FDK_AAC_INCLUDES})
|
||||
set(FOUND_FDK_AAC_HEADERS ON)
|
||||
endif()
|
||||
endif()
|
||||
if (NOT FOUND_FDK_AAC_HEADERS)
|
||||
message(STATUS "Using bundled fdk_aac headers.")
|
||||
target_include_directories(library-headers SYSTEM INTERFACE ./library-headers/fdk-aac/include)
|
||||
endif()
|
||||
|
||||
if (USE_SYSTEM_FFMPEG_HEADERS)
|
||||
find_path(SYSTEM_FFMPEG_INCLUDES NAMES libavutil/avutil.h)
|
||||
if (SYSTEM_FFMPEG_INCLUDES STREQUAL "SYSTEM_FFMPEG_INCLUDES-NOTFOUND")
|
||||
|
@ -15,7 +15,6 @@ option(USE_SYSTEM_DYNARMIC "Use the system dynarmic (instead of the bundled one)
|
||||
option(USE_SYSTEM_FMT "Use the system fmt (instead of the bundled one)" OFF)
|
||||
option(USE_SYSTEM_XBYAK "Use the system xbyak (instead of the bundled one)" OFF)
|
||||
option(USE_SYSTEM_INIH "Use the system inih (instead of the bundled one)" OFF)
|
||||
option(USE_SYSTEM_FDK_AAC_HEADERS "Use the system fdk-aac headers (instead of the bundled one)" OFF)
|
||||
option(USE_SYSTEM_FFMPEG_HEADERS "Use the system FFmpeg headers (instead of the bundled one)" OFF)
|
||||
option(USE_SYSTEM_GLSLANG "Use the system glslang and SPIR-V libraries (instead of the bundled ones)" OFF)
|
||||
option(USE_SYSTEM_ZSTD "Use the system Zstandard library (instead of the bundled one)" OFF)
|
||||
@ -36,7 +35,6 @@ CMAKE_DEPENDENT_OPTION(DISABLE_SYSTEM_DYNARMIC "Disable system Dynarmic" OFF "US
|
||||
CMAKE_DEPENDENT_OPTION(DISABLE_SYSTEM_FMT "Disable system fmt" OFF "USE_SYSTEM_LIBS" OFF)
|
||||
CMAKE_DEPENDENT_OPTION(DISABLE_SYSTEM_XBYAK "Disable system xbyak" OFF "USE_SYSTEM_LIBS" OFF)
|
||||
CMAKE_DEPENDENT_OPTION(DISABLE_SYSTEM_INIH "Disable system inih" OFF "USE_SYSTEM_LIBS" OFF)
|
||||
CMAKE_DEPENDENT_OPTION(DISABLE_SYSTEM_FDK_AAC_HEADERS "Disable system fdk_aac" OFF "USE_SYSTEM_LIBS" OFF)
|
||||
CMAKE_DEPENDENT_OPTION(DISABLE_SYSTEM_FFMPEG_HEADERS "Disable system ffmpeg" OFF "USE_SYSTEM_LIBS" OFF)
|
||||
CMAKE_DEPENDENT_OPTION(DISABLE_SYSTEM_GLSLANG "Disable system glslang" OFF "USE_SYSTEM_LIBS" OFF)
|
||||
CMAKE_DEPENDENT_OPTION(DISABLE_SYSTEM_ZSTD "Disable system Zstandard" OFF "USE_SYSTEM_LIBS" OFF)
|
||||
@ -57,7 +55,6 @@ set(LIB_VAR_LIST
|
||||
FMT
|
||||
XBYAK
|
||||
INIH
|
||||
FDK_AAC_HEADERS
|
||||
FFMPEG_HEADERS
|
||||
GLSLANG
|
||||
ZSTD
|
||||
|
102
externals/faad2/CMakeLists.txt
vendored
Normal file
102
externals/faad2/CMakeLists.txt
vendored
Normal file
@ -0,0 +1,102 @@
|
||||
# Copy source to build directory for some modifications.
|
||||
set(FAAD2_SOURCE_DIR "${CMAKE_CURRENT_BINARY_DIR}/faad2/libfaad")
|
||||
if (NOT EXISTS "${FAAD2_SOURCE_DIR}")
|
||||
file(COPY faad2/libfaad/ DESTINATION "${FAAD2_SOURCE_DIR}/")
|
||||
|
||||
# These are fixed defines for some reason and not controllable with compile flags.
|
||||
file(READ "${FAAD2_SOURCE_DIR}/common.h" FAAD2_COMMON_H)
|
||||
# Disable SBR decoding since we don't want it for AAC-LC.
|
||||
string(REGEX REPLACE "#define SBR_DEC" "" FAAD2_COMMON_H "${FAAD2_COMMON_H}")
|
||||
# Disable PS decoding. This can cause mono to be upmixed to stereo, which we don't want.
|
||||
string(REGEX REPLACE "#define PS_DEC" "" FAAD2_COMMON_H "${FAAD2_COMMON_H}")
|
||||
file(WRITE "${FAAD2_SOURCE_DIR}/common.h" "${FAAD2_COMMON_H}")
|
||||
endif()
|
||||
|
||||
# Source list from faad2/libfaad/Makefile.am, cut down to just what we need for AAC-LC.
|
||||
add_library(faad2 STATIC EXCLUDE_FROM_ALL
|
||||
"${FAAD2_SOURCE_DIR}/bits.c"
|
||||
"${FAAD2_SOURCE_DIR}/cfft.c"
|
||||
"${FAAD2_SOURCE_DIR}/common.c"
|
||||
"${FAAD2_SOURCE_DIR}/decoder.c"
|
||||
"${FAAD2_SOURCE_DIR}/drc.c"
|
||||
"${FAAD2_SOURCE_DIR}/error.c"
|
||||
"${FAAD2_SOURCE_DIR}/filtbank.c"
|
||||
"${FAAD2_SOURCE_DIR}/huffman.c"
|
||||
"${FAAD2_SOURCE_DIR}/is.c"
|
||||
"${FAAD2_SOURCE_DIR}/mdct.c"
|
||||
"${FAAD2_SOURCE_DIR}/mp4.c"
|
||||
"${FAAD2_SOURCE_DIR}/ms.c"
|
||||
"${FAAD2_SOURCE_DIR}/output.c"
|
||||
"${FAAD2_SOURCE_DIR}/pns.c"
|
||||
"${FAAD2_SOURCE_DIR}/pulse.c"
|
||||
"${FAAD2_SOURCE_DIR}/specrec.c"
|
||||
"${FAAD2_SOURCE_DIR}/syntax.c"
|
||||
"${FAAD2_SOURCE_DIR}/tns.c"
|
||||
)
|
||||
target_include_directories(faad2 PUBLIC faad2/include PRIVATE "${FAAD2_SOURCE_DIR}")
|
||||
|
||||
# Configure compile definitions.
|
||||
|
||||
# Read version from autoconf script for configuring constant.
|
||||
file(READ faad2/configure.ac CONFIGURE_SCRIPT)
|
||||
string(REGEX MATCH "AC_INIT\\(faad2, ([0-9.]+)\\)" _ ${CONFIGURE_SCRIPT})
|
||||
set(FAAD_VERSION ${CMAKE_MATCH_1})
|
||||
message(STATUS "Building faad2 version ${FAAD_VERSION}")
|
||||
|
||||
# Check for functions and headers.
|
||||
include(CheckFunctionExists)
|
||||
include(CheckIncludeFiles)
|
||||
check_function_exists(getpwuid HAVE_GETPWUID)
|
||||
check_function_exists(lrintf HAVE_LRINTF)
|
||||
check_function_exists(memcpy HAVE_MEMCPY)
|
||||
check_function_exists(strchr HAVE_STRCHR)
|
||||
check_function_exists(strsep HAVE_STRSEP)
|
||||
check_include_files(dlfcn.h HAVE_DLFCN_H)
|
||||
check_include_files(errno.h HAVE_ERRNO_H)
|
||||
check_include_files(float.h HAVE_FLOAT_H)
|
||||
check_include_files(inttypes.h HAVE_INTTYPES_H)
|
||||
check_include_files(IOKit/IOKitLib.h HAVE_IOKIT_IOKITLIB_H)
|
||||
check_include_files(limits.h HAVE_LIMITS_H)
|
||||
check_include_files(mathf.h HAVE_MATHF_H)
|
||||
check_include_files(stdint.h HAVE_STDINT_H)
|
||||
check_include_files(stdio.h HAVE_STDIO_H)
|
||||
check_include_files(stdlib.h HAVE_STDLIB_H)
|
||||
check_include_files(strings.h HAVE_STRINGS_H)
|
||||
check_include_files(string.h HAVE_STRING_H)
|
||||
check_include_files(sysfs/libsysfs.h HAVE_SYSFS_LIBSYSFS_H)
|
||||
check_include_files(sys/stat.h HAVE_SYS_STAT_H)
|
||||
check_include_files(sys/time.h HAVE_SYS_TIME_H)
|
||||
check_include_files(sys/types.h HAVE_SYS_TYPES_H)
|
||||
check_include_files(unistd.h HAVE_UNISTD_H)
|
||||
|
||||
# faad2 uses a relative include for its config.h which breaks under CMake.
|
||||
# We can use target_compile_definitions to pass on the configuration instead.
|
||||
target_compile_definitions(faad2 PRIVATE
|
||||
-DFAAD_VERSION=${FAAD_VERSION}
|
||||
-DPACKAGE_VERSION=\"${FAAD_VERSION}\"
|
||||
-DSTDC_HEADERS
|
||||
-DHAVE_GETPWUID=${HAVE_GETPWUID}
|
||||
-DHAVE_LRINTF=${HAVE_LRINTF}
|
||||
-DHAVE_MEMCPY=${HAVE_MEMCPY}
|
||||
-DHAVE_STRCHR=${HAVE_STRCHR}
|
||||
-DHAVE_STRSEP=${HAVE_STRSEP}
|
||||
-DHAVE_DLFCN_H=${HAVE_DLFCN_H}
|
||||
-DHAVE_ERRNO_H=${HAVE_ERRNO_H}
|
||||
-DHAVE_FLOAT_H=${HAVE_FLOAT_H}
|
||||
-DHAVE_INTTYPES_H=${HAVE_INTTYPES_H}
|
||||
-DHAVE_IOKIT_IOKITLIB_H=${HAVE_IOKIT_IOKITLIB_H}
|
||||
-DHAVE_LIMITS_H=${HAVE_LIMITS_H}
|
||||
-DHAVE_MATHF_H=${HAVE_MATHF_H}
|
||||
-DHAVE_STDINT_H=${HAVE_STDINT_H}
|
||||
-DHAVE_STDIO_H=${HAVE_STDIO_H}
|
||||
-DHAVE_STDLIB_H=${HAVE_STDLIB_H}
|
||||
-DHAVE_STRINGS_H=${HAVE_STRINGS_H}
|
||||
-DHAVE_STRING_H=${HAVE_STRING_H}
|
||||
-DHAVE_SYSFS_LIBSYSFS_H=${HAVE_SYSFS_LIBSYSFS_H}
|
||||
-DHAVE_SYS_STAT_H=${HAVE_SYS_STAT_H}
|
||||
-DHAVE_SYS_TIME_H=${HAVE_SYS_TIME_H}
|
||||
-DHAVE_SYS_TYPES_H=${HAVE_SYS_TYPES_H}
|
||||
-DHAVE_UNISTD_H=${HAVE_UNISTD_H}
|
||||
# Only compile for AAC-LC decoding.
|
||||
-DLC_ONLY_DECODER
|
||||
)
|
1
externals/faad2/faad2
vendored
Submodule
1
externals/faad2/faad2
vendored
Submodule
Submodule externals/faad2/faad2 added at 3918dee560
1
externals/oaknut
vendored
Submodule
1
externals/oaknut
vendored
Submodule
Submodule externals/oaknut added at e6eecc3f94
@ -4,15 +4,11 @@ add_library(audio_core STATIC
|
||||
codec.h
|
||||
dsp_interface.cpp
|
||||
dsp_interface.h
|
||||
hle/adts.h
|
||||
hle/adts_reader.cpp
|
||||
hle/common.h
|
||||
hle/decoder.cpp
|
||||
hle/decoder.h
|
||||
hle/fdk_decoder.cpp
|
||||
hle/fdk_decoder.h
|
||||
hle/ffmpeg_decoder.cpp
|
||||
hle/ffmpeg_decoder.h
|
||||
hle/faad2_decoder.cpp
|
||||
hle/faad2_decoder.h
|
||||
hle/filter.cpp
|
||||
hle/filter.h
|
||||
hle/hle.cpp
|
||||
@ -48,36 +44,7 @@ add_library(audio_core STATIC
|
||||
create_target_directory_groups(audio_core)
|
||||
|
||||
target_link_libraries(audio_core PUBLIC citra_common citra_core)
|
||||
target_link_libraries(audio_core PRIVATE SoundTouch teakra)
|
||||
|
||||
if(ENABLE_MF)
|
||||
target_sources(audio_core PRIVATE
|
||||
hle/wmf_decoder.cpp
|
||||
hle/wmf_decoder.h
|
||||
hle/wmf_decoder_utils.cpp
|
||||
hle/wmf_decoder_utils.h
|
||||
)
|
||||
# We dynamically load the required symbols from mf.dll and mfplat.dll but mfuuid is not a dll
|
||||
# just a static library of GUIDS so include that one directly.
|
||||
target_link_libraries(audio_core PRIVATE mfuuid.lib)
|
||||
target_compile_definitions(audio_core PUBLIC HAVE_MF)
|
||||
elseif(ENABLE_AUDIOTOOLBOX)
|
||||
target_sources(audio_core PRIVATE
|
||||
hle/audiotoolbox_decoder.cpp
|
||||
hle/audiotoolbox_decoder.h
|
||||
)
|
||||
find_library(AUDIOTOOLBOX AudioToolbox)
|
||||
target_link_libraries(audio_core PRIVATE ${AUDIOTOOLBOX})
|
||||
target_compile_definitions(audio_core PUBLIC HAVE_AUDIOTOOLBOX)
|
||||
endif()
|
||||
|
||||
if(ANDROID)
|
||||
target_sources(audio_core PRIVATE
|
||||
hle/mediandk_decoder.cpp
|
||||
hle/mediandk_decoder.h
|
||||
)
|
||||
target_link_libraries(audio_core PRIVATE mediandk)
|
||||
endif()
|
||||
target_link_libraries(audio_core PRIVATE faad2 SoundTouch teakra)
|
||||
|
||||
if(ENABLE_SDL2)
|
||||
target_link_libraries(audio_core PRIVATE SDL2::SDL2)
|
||||
|
@ -1,28 +0,0 @@
|
||||
// Copyright 2019 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
#pragma once
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace AudioCore {
|
||||
|
||||
struct ADTSData {
|
||||
u8 header_length = 0;
|
||||
bool mpeg2 = false;
|
||||
u8 profile = 0;
|
||||
u8 channels = 0;
|
||||
u8 channel_idx = 0;
|
||||
u8 framecount = 0;
|
||||
u8 samplerate_idx = 0;
|
||||
u32 length = 0;
|
||||
u32 samplerate = 0;
|
||||
};
|
||||
|
||||
ADTSData ParseADTS(const u8* buffer);
|
||||
|
||||
// last two bytes of MF AAC decoder user data
|
||||
// see https://docs.microsoft.com/en-us/windows/desktop/medfound/aac-decoder#example-media-types
|
||||
u16 MFGetAACTag(const ADTSData& input);
|
||||
|
||||
} // namespace AudioCore
|
@ -1,79 +0,0 @@
|
||||
// Copyright 2019 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
#include <array>
|
||||
#include "adts.h"
|
||||
#include "common/bit_field.h"
|
||||
|
||||
namespace AudioCore {
|
||||
constexpr std::array<u32, 16> freq_table = {96000, 88200, 64000, 48000, 44100, 32000, 24000, 22050,
|
||||
16000, 12000, 11025, 8000, 7350, 0, 0, 0};
|
||||
constexpr std::array<u8, 8> channel_table = {0, 1, 2, 3, 4, 5, 6, 8};
|
||||
|
||||
struct ADTSHeader {
|
||||
union {
|
||||
std::array<u8, 7> raw{};
|
||||
BitFieldBE<52, 12, u64> sync_word;
|
||||
BitFieldBE<51, 1, u64> mpeg2;
|
||||
BitFieldBE<49, 2, u64> layer;
|
||||
BitFieldBE<48, 1, u64> protection_absent;
|
||||
BitFieldBE<46, 2, u64> profile;
|
||||
BitFieldBE<42, 4, u64> samplerate_idx;
|
||||
BitFieldBE<41, 1, u64> private_bit;
|
||||
BitFieldBE<38, 3, u64> channel_idx;
|
||||
BitFieldBE<37, 1, u64> originality;
|
||||
BitFieldBE<36, 1, u64> home;
|
||||
BitFieldBE<35, 1, u64> copyright_id;
|
||||
BitFieldBE<34, 1, u64> copyright_id_start;
|
||||
BitFieldBE<21, 13, u64> frame_length;
|
||||
BitFieldBE<10, 11, u64> buffer_fullness;
|
||||
BitFieldBE<8, 2, u64> frame_count;
|
||||
};
|
||||
};
|
||||
|
||||
ADTSData ParseADTS(const u8* buffer) {
|
||||
ADTSHeader header;
|
||||
memcpy(header.raw.data(), buffer, sizeof(header.raw));
|
||||
|
||||
// sync word 0xfff
|
||||
if (header.sync_word != 0xfff) {
|
||||
return {};
|
||||
}
|
||||
|
||||
ADTSData out{};
|
||||
// bit 16 = no CRC
|
||||
out.header_length = header.protection_absent ? 7 : 9;
|
||||
out.mpeg2 = static_cast<bool>(header.mpeg2);
|
||||
// bit 17 to 18
|
||||
out.profile = static_cast<u8>(header.profile) + 1;
|
||||
// bit 19 to 22
|
||||
out.samplerate_idx = static_cast<u8>(header.samplerate_idx);
|
||||
out.samplerate = header.samplerate_idx > 15 ? 0 : freq_table[header.samplerate_idx];
|
||||
// bit 24 to 26
|
||||
out.channel_idx = static_cast<u8>(header.channel_idx);
|
||||
out.channels = (header.channel_idx > 7) ? 0 : channel_table[header.channel_idx];
|
||||
// bit 55 to 56
|
||||
out.framecount = static_cast<u8>(header.frame_count + 1);
|
||||
// bit 31 to 43
|
||||
out.length = static_cast<u32>(header.frame_length);
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
// last two bytes of MF AAC decoder user data
|
||||
// Audio object type (5 bits)
|
||||
// Sample rate profile (4 bits)
|
||||
// Channel configuration profile (4 bits)
|
||||
// Frame length flag (1 bit)
|
||||
// Depends on core coder (1 bit)
|
||||
// Extension flag (1 bit)
|
||||
u16 MFGetAACTag(const ADTSData& input) {
|
||||
u16 tag = 0;
|
||||
|
||||
tag |= input.profile << 11;
|
||||
tag |= input.samplerate_idx << 7;
|
||||
tag |= input.channel_idx << 3;
|
||||
|
||||
return tag;
|
||||
}
|
||||
} // namespace AudioCore
|
@ -1,264 +0,0 @@
|
||||
// Copyright 2023 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <AudioToolbox/AudioToolbox.h>
|
||||
#include "audio_core/audio_types.h"
|
||||
#include "audio_core/hle/adts.h"
|
||||
#include "audio_core/hle/audiotoolbox_decoder.h"
|
||||
|
||||
namespace AudioCore::HLE {
|
||||
|
||||
static constexpr auto bytes_per_sample = sizeof(s16);
|
||||
static constexpr auto aac_frames_per_packet = 1024;
|
||||
static constexpr auto error_out_of_data = -1932;
|
||||
|
||||
class AudioToolboxDecoder::Impl {
|
||||
public:
|
||||
explicit Impl(Memory::MemorySystem& memory);
|
||||
~Impl();
|
||||
std::optional<BinaryMessage> ProcessRequest(const BinaryMessage& request);
|
||||
|
||||
private:
|
||||
std::optional<BinaryMessage> Initalize(const BinaryMessage& request);
|
||||
std::optional<BinaryMessage> Decode(const BinaryMessage& request);
|
||||
|
||||
void Clear();
|
||||
bool InitializeDecoder(AudioCore::ADTSData& adts_header);
|
||||
|
||||
static OSStatus DataFunc(AudioConverterRef in_audio_converter, u32* io_number_data_packets,
|
||||
AudioBufferList* io_data,
|
||||
AudioStreamPacketDescription** out_data_packet_description,
|
||||
void* in_user_data);
|
||||
|
||||
Memory::MemorySystem& memory;
|
||||
|
||||
AudioCore::ADTSData adts_config;
|
||||
AudioStreamBasicDescription output_format = {};
|
||||
AudioConverterRef converter = nullptr;
|
||||
|
||||
u8* curr_data = nullptr;
|
||||
u32 curr_data_len = 0;
|
||||
|
||||
AudioStreamPacketDescription packet_description;
|
||||
};
|
||||
|
||||
AudioToolboxDecoder::Impl::Impl(Memory::MemorySystem& memory_) : memory(memory_) {}
|
||||
|
||||
std::optional<BinaryMessage> AudioToolboxDecoder::Impl::Initalize(const BinaryMessage& request) {
|
||||
BinaryMessage response = request;
|
||||
response.header.result = ResultStatus::Success;
|
||||
|
||||
Clear();
|
||||
return response;
|
||||
}
|
||||
|
||||
AudioToolboxDecoder::Impl::~Impl() {
|
||||
Clear();
|
||||
}
|
||||
|
||||
void AudioToolboxDecoder::Impl::Clear() {
|
||||
curr_data = nullptr;
|
||||
curr_data_len = 0;
|
||||
|
||||
adts_config = {};
|
||||
output_format = {};
|
||||
|
||||
if (converter) {
|
||||
AudioConverterDispose(converter);
|
||||
converter = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
std::optional<BinaryMessage> AudioToolboxDecoder::Impl::ProcessRequest(
|
||||
const BinaryMessage& request) {
|
||||
if (request.header.codec != DecoderCodec::DecodeAAC) {
|
||||
LOG_ERROR(Audio_DSP, "AudioToolbox AAC Decoder cannot handle such codec: {}",
|
||||
static_cast<u16>(request.header.codec));
|
||||
return {};
|
||||
}
|
||||
|
||||
switch (request.header.cmd) {
|
||||
case DecoderCommand::Init: {
|
||||
return Initalize(request);
|
||||
}
|
||||
case DecoderCommand::EncodeDecode: {
|
||||
return Decode(request);
|
||||
}
|
||||
case DecoderCommand::Shutdown:
|
||||
case DecoderCommand::SaveState:
|
||||
case DecoderCommand::LoadState: {
|
||||
LOG_WARNING(Audio_DSP, "Got unimplemented binary request: {}",
|
||||
static_cast<u16>(request.header.cmd));
|
||||
BinaryMessage response = request;
|
||||
response.header.result = ResultStatus::Success;
|
||||
return response;
|
||||
}
|
||||
default:
|
||||
LOG_ERROR(Audio_DSP, "Got unknown binary request: {}",
|
||||
static_cast<u16>(request.header.cmd));
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
bool AudioToolboxDecoder::Impl::InitializeDecoder(AudioCore::ADTSData& adts_header) {
|
||||
if (converter) {
|
||||
if (adts_config.channels == adts_header.channels &&
|
||||
adts_config.samplerate == adts_header.samplerate) {
|
||||
return true;
|
||||
} else {
|
||||
Clear();
|
||||
}
|
||||
}
|
||||
|
||||
AudioStreamBasicDescription input_format = {
|
||||
.mSampleRate = static_cast<Float64>(adts_header.samplerate),
|
||||
.mFormatID = kAudioFormatMPEG4AAC,
|
||||
.mFramesPerPacket = aac_frames_per_packet,
|
||||
.mChannelsPerFrame = adts_header.channels,
|
||||
};
|
||||
|
||||
u32 bytes_per_frame = input_format.mChannelsPerFrame * bytes_per_sample;
|
||||
output_format = {
|
||||
.mSampleRate = input_format.mSampleRate,
|
||||
.mFormatID = kAudioFormatLinearPCM,
|
||||
.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked,
|
||||
.mBytesPerPacket = bytes_per_frame,
|
||||
.mFramesPerPacket = 1,
|
||||
.mBytesPerFrame = bytes_per_frame,
|
||||
.mChannelsPerFrame = input_format.mChannelsPerFrame,
|
||||
.mBitsPerChannel = bytes_per_sample * 8,
|
||||
};
|
||||
|
||||
auto status = AudioConverterNew(&input_format, &output_format, &converter);
|
||||
if (status != noErr) {
|
||||
LOG_ERROR(Audio_DSP, "Could not create AAC audio converter: {}", status);
|
||||
Clear();
|
||||
return false;
|
||||
}
|
||||
|
||||
adts_config = adts_header;
|
||||
return true;
|
||||
}
|
||||
|
||||
OSStatus AudioToolboxDecoder::Impl::DataFunc(
|
||||
AudioConverterRef in_audio_converter, u32* io_number_data_packets, AudioBufferList* io_data,
|
||||
AudioStreamPacketDescription** out_data_packet_description, void* in_user_data) {
|
||||
auto impl = reinterpret_cast<Impl*>(in_user_data);
|
||||
if (!impl || !impl->curr_data || impl->curr_data_len == 0) {
|
||||
*io_number_data_packets = 0;
|
||||
return error_out_of_data;
|
||||
}
|
||||
|
||||
io_data->mNumberBuffers = 1;
|
||||
io_data->mBuffers[0].mNumberChannels = 0;
|
||||
io_data->mBuffers[0].mDataByteSize = impl->curr_data_len;
|
||||
io_data->mBuffers[0].mData = impl->curr_data;
|
||||
*io_number_data_packets = 1;
|
||||
|
||||
if (out_data_packet_description != nullptr) {
|
||||
impl->packet_description.mStartOffset = 0;
|
||||
impl->packet_description.mVariableFramesInPacket = 0;
|
||||
impl->packet_description.mDataByteSize = impl->curr_data_len;
|
||||
*out_data_packet_description = &impl->packet_description;
|
||||
}
|
||||
|
||||
impl->curr_data = nullptr;
|
||||
impl->curr_data_len = 0;
|
||||
|
||||
return noErr;
|
||||
}
|
||||
|
||||
std::optional<BinaryMessage> AudioToolboxDecoder::Impl::Decode(const BinaryMessage& request) {
|
||||
BinaryMessage response{};
|
||||
response.header.codec = request.header.codec;
|
||||
response.header.cmd = request.header.cmd;
|
||||
response.decode_aac_response.size = request.decode_aac_request.size;
|
||||
|
||||
if (request.decode_aac_request.src_addr < Memory::FCRAM_PADDR ||
|
||||
request.decode_aac_request.src_addr + request.decode_aac_request.size >
|
||||
Memory::FCRAM_PADDR + Memory::FCRAM_SIZE) {
|
||||
LOG_ERROR(Audio_DSP, "Got out of bounds src_addr {:08x}",
|
||||
request.decode_aac_request.src_addr);
|
||||
return {};
|
||||
}
|
||||
|
||||
const auto data =
|
||||
memory.GetFCRAMPointer(request.decode_aac_request.src_addr - Memory::FCRAM_PADDR);
|
||||
auto adts_header = AudioCore::ParseADTS(data);
|
||||
curr_data = data + adts_header.header_length;
|
||||
curr_data_len = request.decode_aac_request.size - adts_header.header_length;
|
||||
|
||||
if (!InitializeDecoder(adts_header)) {
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
// Up to 2048 samples, up to 2 channels each
|
||||
s16 decoder_output[4096];
|
||||
AudioBufferList out_buffer{1,
|
||||
{{
|
||||
output_format.mChannelsPerFrame,
|
||||
sizeof(decoder_output),
|
||||
decoder_output,
|
||||
}}};
|
||||
|
||||
u32 num_packets = sizeof(decoder_output) / output_format.mBytesPerPacket;
|
||||
auto status = AudioConverterFillComplexBuffer(converter, DataFunc, this, &num_packets,
|
||||
&out_buffer, nullptr);
|
||||
if (status != noErr && status != error_out_of_data) {
|
||||
LOG_ERROR(Audio_DSP, "Could not decode AAC data: {}", status);
|
||||
Clear();
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
// De-interleave samples.
|
||||
std::array<std::vector<s16>, 2> out_streams;
|
||||
auto num_frames = num_packets * output_format.mFramesPerPacket;
|
||||
for (u32 frame = 0; frame < num_frames; frame++) {
|
||||
for (u32 ch = 0; ch < output_format.mChannelsPerFrame; ch++) {
|
||||
out_streams[ch].push_back(
|
||||
decoder_output[(frame * output_format.mChannelsPerFrame) + ch]);
|
||||
}
|
||||
}
|
||||
|
||||
curr_data = nullptr;
|
||||
curr_data_len = 0;
|
||||
|
||||
response.decode_aac_response.sample_rate =
|
||||
GetSampleRateEnum(static_cast<u32>(output_format.mSampleRate));
|
||||
response.decode_aac_response.num_channels = output_format.mChannelsPerFrame;
|
||||
response.decode_aac_response.num_samples = num_frames;
|
||||
|
||||
// transfer the decoded buffer from vector to the FCRAM
|
||||
for (std::size_t ch = 0; ch < out_streams.size(); ch++) {
|
||||
if (!out_streams[ch].empty()) {
|
||||
auto byte_size = out_streams[ch].size() * bytes_per_sample;
|
||||
auto dst = ch == 0 ? request.decode_aac_request.dst_addr_ch0
|
||||
: request.decode_aac_request.dst_addr_ch1;
|
||||
if (dst < Memory::FCRAM_PADDR ||
|
||||
dst + byte_size > Memory::FCRAM_PADDR + Memory::FCRAM_SIZE) {
|
||||
LOG_ERROR(Audio_DSP, "Got out of bounds dst_addr_ch{} {:08x}", ch, dst);
|
||||
return {};
|
||||
}
|
||||
std::memcpy(memory.GetFCRAMPointer(dst - Memory::FCRAM_PADDR), out_streams[ch].data(),
|
||||
byte_size);
|
||||
}
|
||||
}
|
||||
|
||||
return response;
|
||||
}
|
||||
|
||||
AudioToolboxDecoder::AudioToolboxDecoder(Memory::MemorySystem& memory)
|
||||
: impl(std::make_unique<Impl>(memory)) {}
|
||||
|
||||
AudioToolboxDecoder::~AudioToolboxDecoder() = default;
|
||||
|
||||
std::optional<BinaryMessage> AudioToolboxDecoder::ProcessRequest(const BinaryMessage& request) {
|
||||
return impl->ProcessRequest(request);
|
||||
}
|
||||
|
||||
bool AudioToolboxDecoder::IsValid() const {
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace AudioCore::HLE
|
186
src/audio_core/hle/faad2_decoder.cpp
Normal file
186
src/audio_core/hle/faad2_decoder.cpp
Normal file
@ -0,0 +1,186 @@
|
||||
// Copyright 2023 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <neaacdec.h>
|
||||
#include "audio_core/hle/faad2_decoder.h"
|
||||
|
||||
namespace AudioCore::HLE {
|
||||
|
||||
class FAAD2Decoder::Impl {
|
||||
public:
|
||||
explicit Impl(Memory::MemorySystem& memory);
|
||||
~Impl();
|
||||
std::optional<BinaryMessage> ProcessRequest(const BinaryMessage& request);
|
||||
bool IsValid() const {
|
||||
return decoder != nullptr;
|
||||
}
|
||||
|
||||
private:
|
||||
std::optional<BinaryMessage> Initalize(const BinaryMessage& request);
|
||||
|
||||
std::optional<BinaryMessage> Decode(const BinaryMessage& request);
|
||||
|
||||
Memory::MemorySystem& memory;
|
||||
|
||||
NeAACDecHandle decoder = nullptr;
|
||||
};
|
||||
|
||||
FAAD2Decoder::Impl::Impl(Memory::MemorySystem& memory) : memory(memory) {
|
||||
decoder = NeAACDecOpen();
|
||||
if (decoder == nullptr) {
|
||||
LOG_CRITICAL(Audio_DSP, "Could not open FAAD2 decoder.");
|
||||
return;
|
||||
}
|
||||
|
||||
auto config = NeAACDecGetCurrentConfiguration(decoder);
|
||||
config->defObjectType = LC;
|
||||
config->outputFormat = FAAD_FMT_16BIT;
|
||||
if (!NeAACDecSetConfiguration(decoder, config)) {
|
||||
LOG_CRITICAL(Audio_DSP, "Could not configure FAAD2 decoder.");
|
||||
NeAACDecClose(decoder);
|
||||
decoder = nullptr;
|
||||
return;
|
||||
}
|
||||
|
||||
LOG_INFO(Audio_DSP, "Created FAAD2 AAC decoder.");
|
||||
}
|
||||
|
||||
FAAD2Decoder::Impl::~Impl() {
|
||||
if (decoder) {
|
||||
NeAACDecClose(decoder);
|
||||
decoder = nullptr;
|
||||
|
||||
LOG_INFO(Audio_DSP, "Destroyed FAAD2 AAC decoder.");
|
||||
}
|
||||
}
|
||||
|
||||
std::optional<BinaryMessage> FAAD2Decoder::Impl::ProcessRequest(const BinaryMessage& request) {
|
||||
if (request.header.codec != DecoderCodec::DecodeAAC) {
|
||||
LOG_ERROR(Audio_DSP, "FAAD2 AAC Decoder cannot handle such codec: {}",
|
||||
static_cast<u16>(request.header.codec));
|
||||
return {};
|
||||
}
|
||||
|
||||
switch (request.header.cmd) {
|
||||
case DecoderCommand::Init: {
|
||||
return Initalize(request);
|
||||
}
|
||||
case DecoderCommand::EncodeDecode: {
|
||||
return Decode(request);
|
||||
}
|
||||
case DecoderCommand::Shutdown:
|
||||
case DecoderCommand::SaveState:
|
||||
case DecoderCommand::LoadState: {
|
||||
LOG_WARNING(Audio_DSP, "Got unimplemented binary request: {}",
|
||||
static_cast<u16>(request.header.cmd));
|
||||
BinaryMessage response = request;
|
||||
response.header.result = ResultStatus::Success;
|
||||
return response;
|
||||
}
|
||||
default:
|
||||
LOG_ERROR(Audio_DSP, "Got unknown binary request: {}",
|
||||
static_cast<u16>(request.header.cmd));
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
std::optional<BinaryMessage> FAAD2Decoder::Impl::Initalize(const BinaryMessage& request) {
|
||||
BinaryMessage response = request;
|
||||
response.header.result = ResultStatus::Success;
|
||||
return response;
|
||||
}
|
||||
|
||||
std::optional<BinaryMessage> FAAD2Decoder::Impl::Decode(const BinaryMessage& request) {
|
||||
BinaryMessage response{};
|
||||
response.header.codec = request.header.codec;
|
||||
response.header.cmd = request.header.cmd;
|
||||
response.decode_aac_response.size = request.decode_aac_request.size;
|
||||
// This is a hack to continue games when a failure occurs.
|
||||
response.decode_aac_response.sample_rate = DecoderSampleRate::Rate48000;
|
||||
response.decode_aac_response.num_channels = 2;
|
||||
response.decode_aac_response.num_samples = 1024;
|
||||
|
||||
if (request.decode_aac_request.src_addr < Memory::FCRAM_PADDR ||
|
||||
request.decode_aac_request.src_addr + request.decode_aac_request.size >
|
||||
Memory::FCRAM_PADDR + Memory::FCRAM_SIZE) {
|
||||
LOG_ERROR(Audio_DSP, "Got out of bounds src_addr {:08x}",
|
||||
request.decode_aac_request.src_addr);
|
||||
return response;
|
||||
}
|
||||
u8* data = memory.GetFCRAMPointer(request.decode_aac_request.src_addr - Memory::FCRAM_PADDR);
|
||||
u32 data_len = request.decode_aac_request.size;
|
||||
|
||||
unsigned long sample_rate;
|
||||
u8 num_channels;
|
||||
auto init_result = NeAACDecInit(decoder, data, data_len, &sample_rate, &num_channels);
|
||||
if (init_result < 0) {
|
||||
LOG_ERROR(Audio_DSP, "Could not initialize FAAD2 AAC decoder for request: {}", init_result);
|
||||
return response;
|
||||
}
|
||||
|
||||
// Advance past the frame header if needed.
|
||||
data += init_result;
|
||||
data_len -= init_result;
|
||||
|
||||
std::array<std::vector<s16>, 2> out_streams;
|
||||
|
||||
while (data_len > 0) {
|
||||
NeAACDecFrameInfo frame_info;
|
||||
auto curr_sample_buffer =
|
||||
static_cast<s16*>(NeAACDecDecode(decoder, &frame_info, data, data_len));
|
||||
if (curr_sample_buffer == nullptr || frame_info.error != 0) {
|
||||
LOG_ERROR(Audio_DSP, "Failed to decode AAC buffer using FAAD2: {}", frame_info.error);
|
||||
return response;
|
||||
}
|
||||
|
||||
// Split the decode result into channels.
|
||||
u32 num_samples = frame_info.samples / frame_info.channels;
|
||||
for (u32 sample = 0; sample < num_samples; sample++) {
|
||||
for (u32 ch = 0; ch < frame_info.channels; ch++) {
|
||||
out_streams[ch].push_back(curr_sample_buffer[(sample * frame_info.channels) + ch]);
|
||||
}
|
||||
}
|
||||
|
||||
data += frame_info.bytesconsumed;
|
||||
data_len -= frame_info.bytesconsumed;
|
||||
}
|
||||
|
||||
// Transfer the decoded buffer from vector to the FCRAM.
|
||||
for (std::size_t ch = 0; ch < out_streams.size(); ch++) {
|
||||
if (out_streams[ch].empty()) {
|
||||
continue;
|
||||
}
|
||||
auto byte_size = out_streams[ch].size() * sizeof(s16);
|
||||
auto dst = ch == 0 ? request.decode_aac_request.dst_addr_ch0
|
||||
: request.decode_aac_request.dst_addr_ch1;
|
||||
if (dst < Memory::FCRAM_PADDR ||
|
||||
dst + byte_size > Memory::FCRAM_PADDR + Memory::FCRAM_SIZE) {
|
||||
LOG_ERROR(Audio_DSP, "Got out of bounds dst_addr_ch{} {:08x}", ch, dst);
|
||||
return response;
|
||||
}
|
||||
std::memcpy(memory.GetFCRAMPointer(dst - Memory::FCRAM_PADDR), out_streams[ch].data(),
|
||||
byte_size);
|
||||
}
|
||||
|
||||
// Set the output frame info.
|
||||
response.decode_aac_response.sample_rate = GetSampleRateEnum(sample_rate);
|
||||
response.decode_aac_response.num_channels = num_channels;
|
||||
response.decode_aac_response.num_samples = static_cast<u32_le>(out_streams[0].size());
|
||||
|
||||
return response;
|
||||
}
|
||||
|
||||
FAAD2Decoder::FAAD2Decoder(Memory::MemorySystem& memory) : impl(std::make_unique<Impl>(memory)) {}
|
||||
|
||||
FAAD2Decoder::~FAAD2Decoder() = default;
|
||||
|
||||
std::optional<BinaryMessage> FAAD2Decoder::ProcessRequest(const BinaryMessage& request) {
|
||||
return impl->ProcessRequest(request);
|
||||
}
|
||||
|
||||
bool FAAD2Decoder::IsValid() const {
|
||||
return impl->IsValid();
|
||||
}
|
||||
|
||||
} // namespace AudioCore::HLE
|
@ -8,10 +8,10 @@
|
||||
|
||||
namespace AudioCore::HLE {
|
||||
|
||||
class AudioToolboxDecoder final : public DecoderBase {
|
||||
class FAAD2Decoder final : public DecoderBase {
|
||||
public:
|
||||
explicit AudioToolboxDecoder(Memory::MemorySystem& memory);
|
||||
~AudioToolboxDecoder() override;
|
||||
explicit FAAD2Decoder(Memory::MemorySystem& memory);
|
||||
~FAAD2Decoder() override;
|
||||
std::optional<BinaryMessage> ProcessRequest(const BinaryMessage& request) override;
|
||||
bool IsValid() const override;
|
||||
|
@ -1,236 +0,0 @@
|
||||
// Copyright 2019 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "audio_core/hle/fdk_decoder.h"
|
||||
#include "common/dynamic_library/fdk-aac.h"
|
||||
|
||||
using namespace DynamicLibrary;
|
||||
|
||||
namespace AudioCore::HLE {
|
||||
|
||||
class FDKDecoder::Impl {
|
||||
public:
|
||||
explicit Impl(Memory::MemorySystem& memory);
|
||||
~Impl();
|
||||
std::optional<BinaryMessage> ProcessRequest(const BinaryMessage& request);
|
||||
bool IsValid() const {
|
||||
return decoder != nullptr;
|
||||
}
|
||||
|
||||
private:
|
||||
std::optional<BinaryMessage> Initalize(const BinaryMessage& request);
|
||||
|
||||
std::optional<BinaryMessage> Decode(const BinaryMessage& request);
|
||||
|
||||
void Clear();
|
||||
|
||||
Memory::MemorySystem& memory;
|
||||
|
||||
HANDLE_AACDECODER decoder = nullptr;
|
||||
};
|
||||
|
||||
FDKDecoder::Impl::Impl(Memory::MemorySystem& memory) : memory(memory) {
|
||||
if (!FdkAac::LoadFdkAac()) {
|
||||
return;
|
||||
}
|
||||
|
||||
// allocate an array of LIB_INFO structures
|
||||
// if we don't pre-fill the whole segment with zeros, when we call `aacDecoder_GetLibInfo`
|
||||
// it will segfault, upon investigation, there is some code in fdk_aac depends on your initial
|
||||
// values in this array
|
||||
LIB_INFO decoder_info[FDK_MODULE_LAST] = {};
|
||||
// get library information and fill the struct
|
||||
if (FdkAac::aacDecoder_GetLibInfo(decoder_info) != 0) {
|
||||
LOG_ERROR(Audio_DSP, "Failed to retrieve fdk_aac library information!");
|
||||
return;
|
||||
}
|
||||
|
||||
LOG_INFO(Audio_DSP, "Using fdk_aac version {} (build date: {})", decoder_info[0].versionStr,
|
||||
decoder_info[0].build_date);
|
||||
|
||||
// choose the input format when initializing: 1 layer of ADTS
|
||||
decoder = FdkAac::aacDecoder_Open(TRANSPORT_TYPE::TT_MP4_ADTS, 1);
|
||||
// set maximum output channel to two (stereo)
|
||||
// if the input samples have more channels, fdk_aac will perform a downmix
|
||||
AAC_DECODER_ERROR ret = FdkAac::aacDecoder_SetParam(decoder, AAC_PCM_MAX_OUTPUT_CHANNELS, 2);
|
||||
if (ret != AAC_DEC_OK) {
|
||||
// unable to set this parameter reflects the decoder implementation might be broken
|
||||
// we'd better shuts down everything
|
||||
FdkAac::aacDecoder_Close(decoder);
|
||||
decoder = nullptr;
|
||||
LOG_ERROR(Audio_DSP, "Unable to set downmix parameter: {}", ret);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
std::optional<BinaryMessage> FDKDecoder::Impl::Initalize(const BinaryMessage& request) {
|
||||
BinaryMessage response = request;
|
||||
response.header.result = ResultStatus::Success;
|
||||
|
||||
if (decoder) {
|
||||
LOG_INFO(Audio_DSP, "FDK Decoder initialized");
|
||||
Clear();
|
||||
} else {
|
||||
LOG_ERROR(Audio_DSP, "Decoder not initialized");
|
||||
}
|
||||
|
||||
return response;
|
||||
}
|
||||
|
||||
FDKDecoder::Impl::~Impl() {
|
||||
if (decoder) {
|
||||
FdkAac::aacDecoder_Close(decoder);
|
||||
}
|
||||
}
|
||||
|
||||
void FDKDecoder::Impl::Clear() {
|
||||
s16 decoder_output[8192];
|
||||
// flush and re-sync the decoder, discarding the internal buffer
|
||||
// we actually don't care if this succeeds or not
|
||||
// FLUSH - flush internal buffer
|
||||
// INTR - treat the current internal buffer as discontinuous
|
||||
// CONCEAL - try to interpolate and smooth out the samples
|
||||
if (decoder) {
|
||||
FdkAac::aacDecoder_DecodeFrame(decoder, decoder_output, 8192,
|
||||
AACDEC_FLUSH & AACDEC_INTR & AACDEC_CONCEAL);
|
||||
}
|
||||
}
|
||||
|
||||
std::optional<BinaryMessage> FDKDecoder::Impl::ProcessRequest(const BinaryMessage& request) {
|
||||
if (request.header.codec != DecoderCodec::DecodeAAC) {
|
||||
LOG_ERROR(Audio_DSP, "FDK AAC Decoder cannot handle such codec: {}",
|
||||
static_cast<u16>(request.header.codec));
|
||||
return {};
|
||||
}
|
||||
|
||||
switch (request.header.cmd) {
|
||||
case DecoderCommand::Init: {
|
||||
return Initalize(request);
|
||||
}
|
||||
case DecoderCommand::EncodeDecode: {
|
||||
return Decode(request);
|
||||
}
|
||||
case DecoderCommand::Shutdown:
|
||||
case DecoderCommand::SaveState:
|
||||
case DecoderCommand::LoadState: {
|
||||
LOG_WARNING(Audio_DSP, "Got unimplemented binary request: {}",
|
||||
static_cast<u16>(request.header.cmd));
|
||||
BinaryMessage response = request;
|
||||
response.header.result = ResultStatus::Success;
|
||||
return response;
|
||||
}
|
||||
default:
|
||||
LOG_ERROR(Audio_DSP, "Got unknown binary request: {}",
|
||||
static_cast<u16>(request.header.cmd));
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
std::optional<BinaryMessage> FDKDecoder::Impl::Decode(const BinaryMessage& request) {
|
||||
BinaryMessage response{};
|
||||
response.header.codec = request.header.codec;
|
||||
response.header.cmd = request.header.cmd;
|
||||
response.decode_aac_response.size = request.decode_aac_request.size;
|
||||
|
||||
if (!decoder) {
|
||||
LOG_DEBUG(Audio_DSP, "Decoder not initalized");
|
||||
// This is a hack to continue games that are not compiled with the aac codec
|
||||
response.decode_aac_response.num_channels = 2;
|
||||
response.decode_aac_response.num_samples = 1024;
|
||||
return response;
|
||||
}
|
||||
|
||||
if (request.decode_aac_request.src_addr < Memory::FCRAM_PADDR ||
|
||||
request.decode_aac_request.src_addr + request.decode_aac_request.size >
|
||||
Memory::FCRAM_PADDR + Memory::FCRAM_SIZE) {
|
||||
LOG_ERROR(Audio_DSP, "Got out of bounds src_addr {:08x}",
|
||||
request.decode_aac_request.src_addr);
|
||||
return {};
|
||||
}
|
||||
u8* data = memory.GetFCRAMPointer(request.decode_aac_request.src_addr - Memory::FCRAM_PADDR);
|
||||
|
||||
std::array<std::vector<s16>, 2> out_streams;
|
||||
|
||||
u32 data_size = request.decode_aac_request.size;
|
||||
|
||||
// decoding loops
|
||||
AAC_DECODER_ERROR result = AAC_DEC_OK;
|
||||
// Up to 2048 samples, up to 2 channels each
|
||||
s16 decoder_output[4096];
|
||||
// note that we don't free this pointer as it is automatically freed by fdk_aac
|
||||
CStreamInfo* stream_info;
|
||||
// how many bytes to be queued into the decoder, decrementing from the buffer size
|
||||
u32 buffer_remaining = data_size;
|
||||
// alias the data_size as an u32
|
||||
u32 input_size = data_size;
|
||||
|
||||
while (buffer_remaining) {
|
||||
// queue the input buffer, fdk_aac will automatically slice out the buffer it needs
|
||||
// from the input buffer
|
||||
result = FdkAac::aacDecoder_Fill(decoder, &data, &input_size, &buffer_remaining);
|
||||
if (result != AAC_DEC_OK) {
|
||||
// there are some issues when queuing the input buffer
|
||||
LOG_ERROR(Audio_DSP, "Failed to enqueue the input samples");
|
||||
return std::nullopt;
|
||||
}
|
||||
// get output from decoder
|
||||
result = FdkAac::aacDecoder_DecodeFrame(decoder, decoder_output,
|
||||
sizeof(decoder_output) / sizeof(s16), 0);
|
||||
if (result == AAC_DEC_OK) {
|
||||
// get the stream information
|
||||
stream_info = FdkAac::aacDecoder_GetStreamInfo(decoder);
|
||||
// fill the stream information for binary response
|
||||
response.decode_aac_response.sample_rate = GetSampleRateEnum(stream_info->sampleRate);
|
||||
response.decode_aac_response.num_channels = stream_info->numChannels;
|
||||
response.decode_aac_response.num_samples = stream_info->frameSize;
|
||||
// fill the output
|
||||
// the sample size = frame_size * channel_counts
|
||||
for (int sample = 0; sample < stream_info->frameSize; sample++) {
|
||||
for (int ch = 0; ch < stream_info->numChannels; ch++) {
|
||||
out_streams[ch].push_back(
|
||||
decoder_output[(sample * stream_info->numChannels) + ch]);
|
||||
}
|
||||
}
|
||||
} else if (result == AAC_DEC_TRANSPORT_SYNC_ERROR) {
|
||||
// decoder has some synchronization problems, try again with new samples,
|
||||
// using old samples might trigger this error again
|
||||
continue;
|
||||
} else {
|
||||
LOG_ERROR(Audio_DSP, "Error decoding the sample: {}", result);
|
||||
return std::nullopt;
|
||||
}
|
||||
}
|
||||
|
||||
// transfer the decoded buffer from vector to the FCRAM
|
||||
for (std::size_t ch = 0; ch < out_streams.size(); ch++) {
|
||||
if (!out_streams[ch].empty()) {
|
||||
auto byte_size = out_streams[ch].size() * sizeof(s16);
|
||||
auto dst = ch == 0 ? request.decode_aac_request.dst_addr_ch0
|
||||
: request.decode_aac_request.dst_addr_ch1;
|
||||
if (dst < Memory::FCRAM_PADDR ||
|
||||
dst + byte_size > Memory::FCRAM_PADDR + Memory::FCRAM_SIZE) {
|
||||
LOG_ERROR(Audio_DSP, "Got out of bounds dst_addr_ch{} {:08x}", ch, dst);
|
||||
return {};
|
||||
}
|
||||
std::memcpy(memory.GetFCRAMPointer(dst - Memory::FCRAM_PADDR), out_streams[ch].data(),
|
||||
byte_size);
|
||||
}
|
||||
}
|
||||
|
||||
return response;
|
||||
}
|
||||
|
||||
FDKDecoder::FDKDecoder(Memory::MemorySystem& memory) : impl(std::make_unique<Impl>(memory)) {}
|
||||
|
||||
FDKDecoder::~FDKDecoder() = default;
|
||||
|
||||
std::optional<BinaryMessage> FDKDecoder::ProcessRequest(const BinaryMessage& request) {
|
||||
return impl->ProcessRequest(request);
|
||||
}
|
||||
|
||||
bool FDKDecoder::IsValid() const {
|
||||
return impl->IsValid();
|
||||
}
|
||||
|
||||
} // namespace AudioCore::HLE
|
@ -1,23 +0,0 @@
|
||||
// Copyright 2019 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "audio_core/hle/decoder.h"
|
||||
|
||||
namespace AudioCore::HLE {
|
||||
|
||||
class FDKDecoder final : public DecoderBase {
|
||||
public:
|
||||
explicit FDKDecoder(Memory::MemorySystem& memory);
|
||||
~FDKDecoder() override;
|
||||
std::optional<BinaryMessage> ProcessRequest(const BinaryMessage& request) override;
|
||||
bool IsValid() const override;
|
||||
|
||||
private:
|
||||
class Impl;
|
||||
std::unique_ptr<Impl> impl;
|
||||
};
|
||||
|
||||
} // namespace AudioCore::HLE
|
@ -1,290 +0,0 @@
|
||||
// Copyright 2018 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "audio_core/hle/ffmpeg_decoder.h"
|
||||
#include "common/dynamic_library/ffmpeg.h"
|
||||
|
||||
using namespace DynamicLibrary;
|
||||
|
||||
namespace AudioCore::HLE {
|
||||
|
||||
class FFMPEGDecoder::Impl {
|
||||
public:
|
||||
explicit Impl(Memory::MemorySystem& memory);
|
||||
~Impl();
|
||||
std::optional<BinaryMessage> ProcessRequest(const BinaryMessage& request);
|
||||
bool IsValid() const {
|
||||
return have_ffmpeg_dl;
|
||||
}
|
||||
|
||||
private:
|
||||
std::optional<BinaryMessage> Initalize(const BinaryMessage& request);
|
||||
|
||||
void Clear();
|
||||
|
||||
std::optional<BinaryMessage> Decode(const BinaryMessage& request);
|
||||
|
||||
struct AVPacketDeleter {
|
||||
void operator()(AVPacket* packet) const {
|
||||
FFmpeg::av_packet_free(&packet);
|
||||
}
|
||||
};
|
||||
|
||||
struct AVCodecContextDeleter {
|
||||
void operator()(AVCodecContext* context) const {
|
||||
FFmpeg::avcodec_free_context(&context);
|
||||
}
|
||||
};
|
||||
|
||||
struct AVCodecParserContextDeleter {
|
||||
void operator()(AVCodecParserContext* parser) const {
|
||||
FFmpeg::av_parser_close(parser);
|
||||
}
|
||||
};
|
||||
|
||||
struct AVFrameDeleter {
|
||||
void operator()(AVFrame* frame) const {
|
||||
FFmpeg::av_frame_free(&frame);
|
||||
}
|
||||
};
|
||||
|
||||
bool initalized = false;
|
||||
bool have_ffmpeg_dl;
|
||||
|
||||
Memory::MemorySystem& memory;
|
||||
|
||||
const AVCodec* codec;
|
||||
std::unique_ptr<AVCodecContext, AVCodecContextDeleter> av_context;
|
||||
std::unique_ptr<AVCodecParserContext, AVCodecParserContextDeleter> parser;
|
||||
std::unique_ptr<AVPacket, AVPacketDeleter> av_packet;
|
||||
std::unique_ptr<AVFrame, AVFrameDeleter> decoded_frame;
|
||||
};
|
||||
|
||||
FFMPEGDecoder::Impl::Impl(Memory::MemorySystem& memory) : memory(memory) {
|
||||
have_ffmpeg_dl = FFmpeg::LoadFFmpeg();
|
||||
}
|
||||
|
||||
FFMPEGDecoder::Impl::~Impl() = default;
|
||||
|
||||
std::optional<BinaryMessage> FFMPEGDecoder::Impl::ProcessRequest(const BinaryMessage& request) {
|
||||
if (request.header.codec != DecoderCodec::DecodeAAC) {
|
||||
LOG_ERROR(Audio_DSP, "Got wrong codec {}", static_cast<u16>(request.header.codec));
|
||||
return {};
|
||||
}
|
||||
|
||||
switch (request.header.cmd) {
|
||||
case DecoderCommand::Init: {
|
||||
return Initalize(request);
|
||||
}
|
||||
case DecoderCommand::EncodeDecode: {
|
||||
return Decode(request);
|
||||
}
|
||||
case DecoderCommand::Shutdown:
|
||||
case DecoderCommand::SaveState:
|
||||
case DecoderCommand::LoadState: {
|
||||
LOG_WARNING(Audio_DSP, "Got unimplemented binary request: {}",
|
||||
static_cast<u16>(request.header.cmd));
|
||||
BinaryMessage response = request;
|
||||
response.header.result = ResultStatus::Success;
|
||||
return response;
|
||||
}
|
||||
default:
|
||||
LOG_ERROR(Audio_DSP, "Got unknown binary request: {}",
|
||||
static_cast<u16>(request.header.cmd));
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
std::optional<BinaryMessage> FFMPEGDecoder::Impl::Initalize(const BinaryMessage& request) {
|
||||
if (initalized) {
|
||||
Clear();
|
||||
}
|
||||
|
||||
BinaryMessage response = request;
|
||||
response.header.result = ResultStatus::Success;
|
||||
|
||||
if (!have_ffmpeg_dl) {
|
||||
return response;
|
||||
}
|
||||
|
||||
av_packet.reset(FFmpeg::av_packet_alloc());
|
||||
|
||||
codec = FFmpeg::avcodec_find_decoder(AV_CODEC_ID_AAC);
|
||||
if (!codec) {
|
||||
LOG_ERROR(Audio_DSP, "Codec not found\n");
|
||||
return response;
|
||||
}
|
||||
|
||||
parser.reset(FFmpeg::av_parser_init(codec->id));
|
||||
if (!parser) {
|
||||
LOG_ERROR(Audio_DSP, "Parser not found\n");
|
||||
return response;
|
||||
}
|
||||
|
||||
av_context.reset(FFmpeg::avcodec_alloc_context3(codec));
|
||||
if (!av_context) {
|
||||
LOG_ERROR(Audio_DSP, "Could not allocate audio codec context\n");
|
||||
return response;
|
||||
}
|
||||
|
||||
if (FFmpeg::avcodec_open2(av_context.get(), codec, nullptr) < 0) {
|
||||
LOG_ERROR(Audio_DSP, "Could not open codec\n");
|
||||
return response;
|
||||
}
|
||||
|
||||
initalized = true;
|
||||
return response;
|
||||
}
|
||||
|
||||
void FFMPEGDecoder::Impl::Clear() {
|
||||
if (!have_ffmpeg_dl) {
|
||||
return;
|
||||
}
|
||||
|
||||
av_context.reset();
|
||||
parser.reset();
|
||||
decoded_frame.reset();
|
||||
av_packet.reset();
|
||||
}
|
||||
|
||||
std::optional<BinaryMessage> FFMPEGDecoder::Impl::Decode(const BinaryMessage& request) {
|
||||
BinaryMessage response{};
|
||||
response.header.codec = request.header.codec;
|
||||
response.header.cmd = request.header.cmd;
|
||||
response.decode_aac_response.size = request.decode_aac_request.size;
|
||||
|
||||
if (!initalized) {
|
||||
LOG_DEBUG(Audio_DSP, "Decoder not initalized");
|
||||
// This is a hack to continue games that are not compiled with the aac codec
|
||||
response.decode_aac_response.num_channels = 2;
|
||||
response.decode_aac_response.num_samples = 1024;
|
||||
return response;
|
||||
}
|
||||
|
||||
if (request.decode_aac_request.src_addr < Memory::FCRAM_PADDR ||
|
||||
request.decode_aac_request.src_addr + request.decode_aac_request.size >
|
||||
Memory::FCRAM_PADDR + Memory::FCRAM_SIZE) {
|
||||
LOG_ERROR(Audio_DSP, "Got out of bounds src_addr {:08x}",
|
||||
request.decode_aac_request.src_addr);
|
||||
return {};
|
||||
}
|
||||
u8* data = memory.GetFCRAMPointer(request.decode_aac_request.src_addr - Memory::FCRAM_PADDR);
|
||||
|
||||
std::array<std::vector<u8>, 2> out_streams;
|
||||
|
||||
std::size_t data_size = request.decode_aac_request.size;
|
||||
while (data_size > 0) {
|
||||
if (!decoded_frame) {
|
||||
decoded_frame.reset(FFmpeg::av_frame_alloc());
|
||||
if (!decoded_frame) {
|
||||
LOG_ERROR(Audio_DSP, "Could not allocate audio frame");
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
int ret = FFmpeg::av_parser_parse2(parser.get(), av_context.get(), &av_packet->data,
|
||||
&av_packet->size, data, static_cast<int>(data_size),
|
||||
AV_NOPTS_VALUE, AV_NOPTS_VALUE, 0);
|
||||
if (ret < 0) {
|
||||
LOG_ERROR(Audio_DSP, "Error while parsing");
|
||||
return {};
|
||||
}
|
||||
data += ret;
|
||||
data_size -= ret;
|
||||
|
||||
ret = FFmpeg::avcodec_send_packet(av_context.get(), av_packet.get());
|
||||
if (ret < 0) {
|
||||
LOG_ERROR(Audio_DSP, "Error submitting the packet to the decoder");
|
||||
return {};
|
||||
}
|
||||
|
||||
if (av_packet->size) {
|
||||
while (ret >= 0) {
|
||||
ret = FFmpeg::avcodec_receive_frame(av_context.get(), decoded_frame.get());
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
||||
break;
|
||||
else if (ret < 0) {
|
||||
LOG_ERROR(Audio_DSP, "Error during decoding");
|
||||
return {};
|
||||
}
|
||||
int bytes_per_sample = FFmpeg::av_get_bytes_per_sample(av_context->sample_fmt);
|
||||
if (bytes_per_sample < 0) {
|
||||
LOG_ERROR(Audio_DSP, "Failed to calculate data size");
|
||||
return {};
|
||||
}
|
||||
|
||||
#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(59, 24, 100)
|
||||
auto num_channels = static_cast<u32>(decoded_frame->ch_layout.nb_channels);
|
||||
#else
|
||||
auto num_channels = static_cast<u32>(decoded_frame->channels);
|
||||
#endif
|
||||
|
||||
ASSERT(num_channels <= out_streams.size());
|
||||
|
||||
std::size_t size = bytes_per_sample * (decoded_frame->nb_samples);
|
||||
|
||||
response.decode_aac_response.sample_rate =
|
||||
GetSampleRateEnum(decoded_frame->sample_rate);
|
||||
response.decode_aac_response.num_channels = num_channels;
|
||||
response.decode_aac_response.num_samples += decoded_frame->nb_samples;
|
||||
|
||||
// FFmpeg converts to 32 signed floating point PCM, we need s16 PCM so we need to
|
||||
// convert it
|
||||
f32 val_float;
|
||||
for (std::size_t current_pos(0); current_pos < size;) {
|
||||
for (std::size_t channel(0); channel < num_channels; channel++) {
|
||||
std::memcpy(&val_float, decoded_frame->data[channel] + current_pos,
|
||||
sizeof(val_float));
|
||||
val_float = std::clamp(val_float, -1.0f, 1.0f);
|
||||
s16 val = static_cast<s16>(0x7FFF * val_float);
|
||||
out_streams[channel].push_back(val & 0xFF);
|
||||
out_streams[channel].push_back(val >> 8);
|
||||
}
|
||||
current_pos += sizeof(val_float);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (out_streams[0].size() != 0) {
|
||||
if (request.decode_aac_request.dst_addr_ch0 < Memory::FCRAM_PADDR ||
|
||||
request.decode_aac_request.dst_addr_ch0 + out_streams[0].size() >
|
||||
Memory::FCRAM_PADDR + Memory::FCRAM_SIZE) {
|
||||
LOG_ERROR(Audio_DSP, "Got out of bounds dst_addr_ch0 {:08x}",
|
||||
request.decode_aac_request.dst_addr_ch0);
|
||||
return {};
|
||||
}
|
||||
std::memcpy(
|
||||
memory.GetFCRAMPointer(request.decode_aac_request.dst_addr_ch0 - Memory::FCRAM_PADDR),
|
||||
out_streams[0].data(), out_streams[0].size());
|
||||
}
|
||||
|
||||
if (out_streams[1].size() != 0) {
|
||||
if (request.decode_aac_request.dst_addr_ch1 < Memory::FCRAM_PADDR ||
|
||||
request.decode_aac_request.dst_addr_ch1 + out_streams[1].size() >
|
||||
Memory::FCRAM_PADDR + Memory::FCRAM_SIZE) {
|
||||
LOG_ERROR(Audio_DSP, "Got out of bounds dst_addr_ch1 {:08x}",
|
||||
request.decode_aac_request.dst_addr_ch1);
|
||||
return {};
|
||||
}
|
||||
std::memcpy(
|
||||
memory.GetFCRAMPointer(request.decode_aac_request.dst_addr_ch1 - Memory::FCRAM_PADDR),
|
||||
out_streams[1].data(), out_streams[1].size());
|
||||
}
|
||||
return response;
|
||||
}
|
||||
|
||||
FFMPEGDecoder::FFMPEGDecoder(Memory::MemorySystem& memory) : impl(std::make_unique<Impl>(memory)) {}
|
||||
|
||||
FFMPEGDecoder::~FFMPEGDecoder() = default;
|
||||
|
||||
std::optional<BinaryMessage> FFMPEGDecoder::ProcessRequest(const BinaryMessage& request) {
|
||||
return impl->ProcessRequest(request);
|
||||
}
|
||||
|
||||
bool FFMPEGDecoder::IsValid() const {
|
||||
return impl->IsValid();
|
||||
}
|
||||
|
||||
} // namespace AudioCore::HLE
|
@ -1,23 +0,0 @@
|
||||
// Copyright 2018 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "audio_core/hle/decoder.h"
|
||||
|
||||
namespace AudioCore::HLE {
|
||||
|
||||
class FFMPEGDecoder final : public DecoderBase {
|
||||
public:
|
||||
explicit FFMPEGDecoder(Memory::MemorySystem& memory);
|
||||
~FFMPEGDecoder() override;
|
||||
std::optional<BinaryMessage> ProcessRequest(const BinaryMessage& request) override;
|
||||
bool IsValid() const override;
|
||||
|
||||
private:
|
||||
class Impl;
|
||||
std::unique_ptr<Impl> impl;
|
||||
};
|
||||
|
||||
} // namespace AudioCore::HLE
|
@ -8,23 +8,15 @@
|
||||
#include <boost/serialization/vector.hpp>
|
||||
#include <boost/serialization/weak_ptr.hpp>
|
||||
#include "audio_core/audio_types.h"
|
||||
#include "common/archives.h"
|
||||
#ifdef HAVE_MF
|
||||
#include "audio_core/hle/wmf_decoder.h"
|
||||
#elif HAVE_AUDIOTOOLBOX
|
||||
#include "audio_core/hle/audiotoolbox_decoder.h"
|
||||
#elif ANDROID
|
||||
#include "audio_core/hle/mediandk_decoder.h"
|
||||
#endif
|
||||
#include "audio_core/hle/common.h"
|
||||
#include "audio_core/hle/decoder.h"
|
||||
#include "audio_core/hle/fdk_decoder.h"
|
||||
#include "audio_core/hle/ffmpeg_decoder.h"
|
||||
#include "audio_core/hle/faad2_decoder.h"
|
||||
#include "audio_core/hle/hle.h"
|
||||
#include "audio_core/hle/mixers.h"
|
||||
#include "audio_core/hle/shared_memory.h"
|
||||
#include "audio_core/hle/source.h"
|
||||
#include "audio_core/sink.h"
|
||||
#include "common/archives.h"
|
||||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/hash.h"
|
||||
@ -121,26 +113,8 @@ private:
|
||||
|
||||
static std::vector<std::function<std::unique_ptr<HLE::DecoderBase>(Memory::MemorySystem&)>>
|
||||
decoder_backends = {
|
||||
#if defined(HAVE_MF)
|
||||
[](Memory::MemorySystem& memory) -> std::unique_ptr<HLE::DecoderBase> {
|
||||
return std::make_unique<HLE::WMFDecoder>(memory);
|
||||
},
|
||||
#endif
|
||||
#if defined(HAVE_AUDIOTOOLBOX)
|
||||
[](Memory::MemorySystem& memory) -> std::unique_ptr<HLE::DecoderBase> {
|
||||
return std::make_unique<HLE::AudioToolboxDecoder>(memory);
|
||||
},
|
||||
#endif
|
||||
#if ANDROID
|
||||
[](Memory::MemorySystem& memory) -> std::unique_ptr<HLE::DecoderBase> {
|
||||
return std::make_unique<HLE::MediaNDKDecoder>(memory);
|
||||
},
|
||||
#endif
|
||||
[](Memory::MemorySystem& memory) -> std::unique_ptr<HLE::DecoderBase> {
|
||||
return std::make_unique<HLE::FDKDecoder>(memory);
|
||||
},
|
||||
[](Memory::MemorySystem& memory) -> std::unique_ptr<HLE::DecoderBase> {
|
||||
return std::make_unique<HLE::FFMPEGDecoder>(memory);
|
||||
return std::make_unique<HLE::FAAD2Decoder>(memory);
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -1,253 +0,0 @@
|
||||
// Copyright 2019 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <media/NdkMediaCodec.h>
|
||||
#include <media/NdkMediaError.h>
|
||||
#include <media/NdkMediaFormat.h>
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "audio_core/hle/adts.h"
|
||||
#include "audio_core/hle/mediandk_decoder.h"
|
||||
|
||||
namespace AudioCore::HLE {
|
||||
|
||||
struct AMediaCodecRelease {
|
||||
void operator()(AMediaCodec* codec) const {
|
||||
AMediaCodec_stop(codec);
|
||||
AMediaCodec_delete(codec);
|
||||
};
|
||||
};
|
||||
|
||||
class MediaNDKDecoder::Impl {
|
||||
public:
|
||||
explicit Impl(Memory::MemorySystem& memory);
|
||||
~Impl();
|
||||
std::optional<BinaryMessage> ProcessRequest(const BinaryMessage& request);
|
||||
|
||||
bool SetMediaType(const AudioCore::ADTSData& adts_data);
|
||||
|
||||
private:
|
||||
std::optional<BinaryMessage> Initalize(const BinaryMessage& request);
|
||||
std::optional<BinaryMessage> Decode(const BinaryMessage& request);
|
||||
|
||||
Memory::MemorySystem& memory;
|
||||
std::unique_ptr<AMediaCodec, AMediaCodecRelease> decoder;
|
||||
// default: 2 channles, 48000 samplerate
|
||||
AudioCore::ADTSData mADTSData{
|
||||
/*header_length*/ 7, /*mpeg2*/ false, /*profile*/ 2,
|
||||
/*channels*/ 2, /*channel_idx*/ 2, /*framecount*/ 0,
|
||||
/*samplerate_idx*/ 3, /*length*/ 0, /*samplerate*/ 48000};
|
||||
};
|
||||
|
||||
MediaNDKDecoder::Impl::Impl(Memory::MemorySystem& memory_) : memory(memory_) {
|
||||
SetMediaType(mADTSData);
|
||||
}
|
||||
|
||||
MediaNDKDecoder::Impl::~Impl() = default;
|
||||
|
||||
std::optional<BinaryMessage> MediaNDKDecoder::Impl::Initalize(const BinaryMessage& request) {
|
||||
BinaryMessage response = request;
|
||||
response.header.result = ResultStatus::Success;
|
||||
return response;
|
||||
}
|
||||
|
||||
bool MediaNDKDecoder::Impl::SetMediaType(const AudioCore::ADTSData& adts_data) {
|
||||
const char* mime = "audio/mp4a-latm";
|
||||
if (decoder && mADTSData.profile == adts_data.profile &&
|
||||
mADTSData.channel_idx == adts_data.channel_idx &&
|
||||
mADTSData.samplerate_idx == adts_data.samplerate_idx) {
|
||||
return true;
|
||||
}
|
||||
decoder.reset(AMediaCodec_createDecoderByType(mime));
|
||||
if (decoder == nullptr) {
|
||||
return false;
|
||||
}
|
||||
|
||||
u8 csd_0[2];
|
||||
csd_0[0] = static_cast<u8>((adts_data.profile << 3) | (adts_data.samplerate_idx >> 1));
|
||||
csd_0[1] =
|
||||
static_cast<u8>(((adts_data.samplerate_idx << 7) & 0x80) | (adts_data.channel_idx << 3));
|
||||
AMediaFormat* format = AMediaFormat_new();
|
||||
AMediaFormat_setString(format, AMEDIAFORMAT_KEY_MIME, mime);
|
||||
AMediaFormat_setInt32(format, AMEDIAFORMAT_KEY_SAMPLE_RATE, adts_data.samplerate);
|
||||
AMediaFormat_setInt32(format, AMEDIAFORMAT_KEY_CHANNEL_COUNT, adts_data.channels);
|
||||
AMediaFormat_setInt32(format, AMEDIAFORMAT_KEY_IS_ADTS, 1);
|
||||
AMediaFormat_setBuffer(format, "csd-0", csd_0, sizeof(csd_0));
|
||||
|
||||
media_status_t status = AMediaCodec_configure(decoder.get(), format, NULL, NULL, 0);
|
||||
if (status != AMEDIA_OK) {
|
||||
AMediaFormat_delete(format);
|
||||
decoder.reset();
|
||||
return false;
|
||||
}
|
||||
|
||||
status = AMediaCodec_start(decoder.get());
|
||||
if (status != AMEDIA_OK) {
|
||||
AMediaFormat_delete(format);
|
||||
decoder.reset();
|
||||
return false;
|
||||
}
|
||||
|
||||
AMediaFormat_delete(format);
|
||||
mADTSData = adts_data;
|
||||
return true;
|
||||
}
|
||||
|
||||
std::optional<BinaryMessage> MediaNDKDecoder::Impl::ProcessRequest(const BinaryMessage& request) {
|
||||
if (request.header.codec != DecoderCodec::DecodeAAC) {
|
||||
LOG_ERROR(Audio_DSP, "AAC Decoder cannot handle such codec: {}",
|
||||
static_cast<u16>(request.header.codec));
|
||||
return {};
|
||||
}
|
||||
|
||||
switch (request.header.cmd) {
|
||||
case DecoderCommand::Init: {
|
||||
return Initalize(request);
|
||||
}
|
||||
case DecoderCommand::EncodeDecode: {
|
||||
return Decode(request);
|
||||
}
|
||||
case DecoderCommand::Shutdown:
|
||||
case DecoderCommand::SaveState:
|
||||
case DecoderCommand::LoadState: {
|
||||
LOG_WARNING(Audio_DSP, "Got unimplemented binary request: {}",
|
||||
static_cast<u16>(request.header.cmd));
|
||||
BinaryMessage response = request;
|
||||
response.header.result = ResultStatus::Success;
|
||||
return response;
|
||||
}
|
||||
default:
|
||||
LOG_ERROR(Audio_DSP, "Got unknown binary request: {}",
|
||||
static_cast<u16>(request.header.cmd));
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
std::optional<BinaryMessage> MediaNDKDecoder::Impl::Decode(const BinaryMessage& request) {
|
||||
BinaryMessage response{};
|
||||
response.header.codec = request.header.codec;
|
||||
response.header.cmd = request.header.cmd;
|
||||
response.decode_aac_response.size = request.decode_aac_request.size;
|
||||
response.decode_aac_response.num_samples = 1024;
|
||||
|
||||
if (request.decode_aac_request.src_addr < Memory::FCRAM_PADDR ||
|
||||
request.decode_aac_request.src_addr + request.decode_aac_request.size >
|
||||
Memory::FCRAM_PADDR + Memory::FCRAM_SIZE) {
|
||||
LOG_ERROR(Audio_DSP, "Got out of bounds src_addr {:08x}",
|
||||
request.decode_aac_request.src_addr);
|
||||
return response;
|
||||
}
|
||||
|
||||
const u8* data =
|
||||
memory.GetFCRAMPointer(request.decode_aac_request.src_addr - Memory::FCRAM_PADDR);
|
||||
ADTSData adts_data = AudioCore::ParseADTS(data);
|
||||
SetMediaType(adts_data);
|
||||
response.decode_aac_response.sample_rate = GetSampleRateEnum(adts_data.samplerate);
|
||||
response.decode_aac_response.num_channels = adts_data.channels;
|
||||
if (!decoder) {
|
||||
LOG_ERROR(Audio_DSP, "Missing decoder for profile: {}, channels: {}, samplerate: {}",
|
||||
adts_data.profile, adts_data.channels, adts_data.samplerate);
|
||||
return {};
|
||||
}
|
||||
|
||||
// input
|
||||
constexpr int timeout = 160;
|
||||
std::size_t buffer_size = 0;
|
||||
u8* buffer = nullptr;
|
||||
ssize_t buffer_index = AMediaCodec_dequeueInputBuffer(decoder.get(), timeout);
|
||||
if (buffer_index < 0) {
|
||||
LOG_ERROR(Audio_DSP, "Failed to enqueue the input samples: {}", buffer_index);
|
||||
return response;
|
||||
}
|
||||
buffer = AMediaCodec_getInputBuffer(decoder.get(), buffer_index, &buffer_size);
|
||||
if (buffer_size < request.decode_aac_request.size) {
|
||||
return response;
|
||||
}
|
||||
std::memcpy(buffer, data, request.decode_aac_request.size);
|
||||
media_status_t status = AMediaCodec_queueInputBuffer(decoder.get(), buffer_index, 0,
|
||||
request.decode_aac_request.size, 0, 0);
|
||||
if (status != AMEDIA_OK) {
|
||||
LOG_WARNING(Audio_DSP, "Try queue input buffer again later!");
|
||||
return response;
|
||||
}
|
||||
|
||||
// output
|
||||
AMediaCodecBufferInfo info;
|
||||
std::array<std::vector<u16>, 2> out_streams;
|
||||
buffer_index = AMediaCodec_dequeueOutputBuffer(decoder.get(), &info, timeout);
|
||||
switch (buffer_index) {
|
||||
case AMEDIACODEC_INFO_TRY_AGAIN_LATER:
|
||||
LOG_WARNING(Audio_DSP, "Failed to dequeue output buffer: timeout!");
|
||||
break;
|
||||
case AMEDIACODEC_INFO_OUTPUT_BUFFERS_CHANGED:
|
||||
LOG_WARNING(Audio_DSP, "Failed to dequeue output buffer: buffers changed!");
|
||||
break;
|
||||
case AMEDIACODEC_INFO_OUTPUT_FORMAT_CHANGED: {
|
||||
AMediaFormat* format = AMediaCodec_getOutputFormat(decoder.get());
|
||||
LOG_WARNING(Audio_DSP, "output format: {}", AMediaFormat_toString(format));
|
||||
AMediaFormat_delete(format);
|
||||
buffer_index = AMediaCodec_dequeueOutputBuffer(decoder.get(), &info, timeout);
|
||||
}
|
||||
default: {
|
||||
int offset = info.offset;
|
||||
buffer = AMediaCodec_getOutputBuffer(decoder.get(), buffer_index, &buffer_size);
|
||||
while (offset < info.size) {
|
||||
for (int channel = 0; channel < response.decode_aac_response.num_channels; channel++) {
|
||||
u16 pcm_data;
|
||||
std::memcpy(&pcm_data, buffer + offset, sizeof(pcm_data));
|
||||
out_streams[channel].push_back(pcm_data);
|
||||
offset += sizeof(pcm_data);
|
||||
}
|
||||
}
|
||||
AMediaCodec_releaseOutputBuffer(decoder.get(), buffer_index, info.size != 0);
|
||||
}
|
||||
}
|
||||
|
||||
// transfer the decoded buffer from vector to the FCRAM
|
||||
size_t stream0_size = out_streams[0].size() * sizeof(u16);
|
||||
if (stream0_size != 0) {
|
||||
if (request.decode_aac_request.dst_addr_ch0 < Memory::FCRAM_PADDR ||
|
||||
request.decode_aac_request.dst_addr_ch0 + stream0_size >
|
||||
Memory::FCRAM_PADDR + Memory::FCRAM_SIZE) {
|
||||
LOG_ERROR(Audio_DSP, "Got out of bounds dst_addr_ch0 {:08x}",
|
||||
request.decode_aac_request.dst_addr_ch0);
|
||||
return response;
|
||||
}
|
||||
std::memcpy(
|
||||
memory.GetFCRAMPointer(request.decode_aac_request.dst_addr_ch0 - Memory::FCRAM_PADDR),
|
||||
out_streams[0].data(), stream0_size);
|
||||
}
|
||||
|
||||
size_t stream1_size = out_streams[1].size() * sizeof(u16);
|
||||
if (stream1_size != 0) {
|
||||
if (request.decode_aac_request.dst_addr_ch1 < Memory::FCRAM_PADDR ||
|
||||
request.decode_aac_request.dst_addr_ch1 + stream1_size >
|
||||
Memory::FCRAM_PADDR + Memory::FCRAM_SIZE) {
|
||||
LOG_ERROR(Audio_DSP, "Got out of bounds dst_addr_ch1 {:08x}",
|
||||
request.decode_aac_request.dst_addr_ch1);
|
||||
return response;
|
||||
}
|
||||
std::memcpy(
|
||||
memory.GetFCRAMPointer(request.decode_aac_request.dst_addr_ch1 - Memory::FCRAM_PADDR),
|
||||
out_streams[1].data(), stream1_size);
|
||||
}
|
||||
return response;
|
||||
}
|
||||
|
||||
MediaNDKDecoder::MediaNDKDecoder(Memory::MemorySystem& memory)
|
||||
: impl(std::make_unique<Impl>(memory)) {}
|
||||
|
||||
MediaNDKDecoder::~MediaNDKDecoder() = default;
|
||||
|
||||
std::optional<BinaryMessage> MediaNDKDecoder::ProcessRequest(const BinaryMessage& request) {
|
||||
return impl->ProcessRequest(request);
|
||||
}
|
||||
|
||||
bool MediaNDKDecoder::IsValid() const {
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace AudioCore::HLE
|
@ -1,22 +0,0 @@
|
||||
// Copyright 2019 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
#pragma once
|
||||
|
||||
#include "audio_core/hle/decoder.h"
|
||||
|
||||
namespace AudioCore::HLE {
|
||||
|
||||
class MediaNDKDecoder final : public DecoderBase {
|
||||
public:
|
||||
explicit MediaNDKDecoder(Memory::MemorySystem& memory);
|
||||
~MediaNDKDecoder() override;
|
||||
std::optional<BinaryMessage> ProcessRequest(const BinaryMessage& request) override;
|
||||
bool IsValid() const override;
|
||||
|
||||
private:
|
||||
class Impl;
|
||||
std::unique_ptr<Impl> impl;
|
||||
};
|
||||
|
||||
} // namespace AudioCore::HLE
|
@ -1,313 +0,0 @@
|
||||
// Copyright 2018 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "audio_core/hle/wmf_decoder.h"
|
||||
#include "audio_core/hle/wmf_decoder_utils.h"
|
||||
|
||||
namespace AudioCore::HLE {
|
||||
|
||||
using namespace MFDecoder;
|
||||
|
||||
class WMFDecoder::Impl {
|
||||
public:
|
||||
explicit Impl(Memory::MemorySystem& memory);
|
||||
~Impl();
|
||||
std::optional<BinaryMessage> ProcessRequest(const BinaryMessage& request);
|
||||
bool IsValid() const {
|
||||
return is_valid;
|
||||
}
|
||||
|
||||
private:
|
||||
std::optional<BinaryMessage> Initalize(const BinaryMessage& request);
|
||||
|
||||
std::optional<BinaryMessage> Decode(const BinaryMessage& request);
|
||||
|
||||
MFOutputState DecodingLoop(AudioCore::ADTSData adts_header,
|
||||
std::array<std::vector<u8>, 2>& out_streams);
|
||||
|
||||
bool transform_initialized = false;
|
||||
bool format_selected = false;
|
||||
|
||||
Memory::MemorySystem& memory;
|
||||
|
||||
unique_mfptr<IMFTransform> transform;
|
||||
DWORD in_stream_id = 0;
|
||||
DWORD out_stream_id = 0;
|
||||
bool is_valid = false;
|
||||
bool mf_started = false;
|
||||
bool coinited = false;
|
||||
};
|
||||
|
||||
WMFDecoder::Impl::Impl(Memory::MemorySystem& memory) : memory(memory) {
|
||||
// Attempt to load the symbols for mf.dll
|
||||
if (!InitMFDLL()) {
|
||||
LOG_CRITICAL(Audio_DSP,
|
||||
"Unable to load mf.dll. AAC audio through media foundation unavailable");
|
||||
return;
|
||||
}
|
||||
|
||||
HRESULT hr = S_OK;
|
||||
hr = CoInitialize(NULL);
|
||||
// S_FALSE will be returned when COM has already been initialized
|
||||
if (hr != S_OK && hr != S_FALSE) {
|
||||
ReportError("Failed to start COM components", hr);
|
||||
} else {
|
||||
coinited = true;
|
||||
}
|
||||
|
||||
// lite startup is faster and all what we need is included
|
||||
hr = MFDecoder::MFStartup(MF_VERSION, MFSTARTUP_LITE);
|
||||
if (hr != S_OK) {
|
||||
// Do you know you can't initialize MF in test mode or safe mode?
|
||||
ReportError("Failed to initialize Media Foundation", hr);
|
||||
} else {
|
||||
mf_started = true;
|
||||
}
|
||||
|
||||
LOG_INFO(Audio_DSP, "Media Foundation activated");
|
||||
|
||||
// initialize transform
|
||||
transform = MFDecoderInit();
|
||||
if (transform == nullptr) {
|
||||
LOG_CRITICAL(Audio_DSP, "Can't initialize decoder");
|
||||
return;
|
||||
}
|
||||
|
||||
hr = transform->GetStreamIDs(1, &in_stream_id, 1, &out_stream_id);
|
||||
if (hr == E_NOTIMPL) {
|
||||
// if not implemented, it means this MFT does not assign stream ID for you
|
||||
in_stream_id = 0;
|
||||
out_stream_id = 0;
|
||||
} else if (FAILED(hr)) {
|
||||
ReportError("Decoder failed to initialize the stream ID", hr);
|
||||
return;
|
||||
}
|
||||
transform_initialized = true;
|
||||
is_valid = true;
|
||||
}
|
||||
|
||||
WMFDecoder::Impl::~Impl() {
|
||||
if (transform_initialized) {
|
||||
MFFlush(transform.get());
|
||||
// delete the transform object before shutting down MF
|
||||
// otherwise access violation will occur
|
||||
transform.reset();
|
||||
}
|
||||
if (mf_started) {
|
||||
MFDecoder::MFShutdown();
|
||||
}
|
||||
if (coinited) {
|
||||
CoUninitialize();
|
||||
}
|
||||
}
|
||||
|
||||
std::optional<BinaryMessage> WMFDecoder::Impl::ProcessRequest(const BinaryMessage& request) {
|
||||
if (request.header.codec != DecoderCodec::DecodeAAC) {
|
||||
LOG_ERROR(Audio_DSP, "Got unknown codec {}", static_cast<u16>(request.header.codec));
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
switch (request.header.cmd) {
|
||||
case DecoderCommand::Init: {
|
||||
LOG_INFO(Audio_DSP, "WMFDecoder initializing");
|
||||
return Initalize(request);
|
||||
}
|
||||
case DecoderCommand::EncodeDecode: {
|
||||
return Decode(request);
|
||||
}
|
||||
case DecoderCommand::Shutdown:
|
||||
case DecoderCommand::SaveState:
|
||||
case DecoderCommand::LoadState: {
|
||||
LOG_WARNING(Audio_DSP, "Got unimplemented binary request: {}",
|
||||
static_cast<u16>(request.header.cmd));
|
||||
BinaryMessage response = request;
|
||||
response.header.result = ResultStatus::Success;
|
||||
return response;
|
||||
}
|
||||
default:
|
||||
LOG_ERROR(Audio_DSP, "Got unknown binary request: {}",
|
||||
static_cast<u16>(request.header.cmd));
|
||||
return std::nullopt;
|
||||
}
|
||||
}
|
||||
|
||||
std::optional<BinaryMessage> WMFDecoder::Impl::Initalize(const BinaryMessage& request) {
|
||||
BinaryMessage response = request;
|
||||
response.header.result = ResultStatus::Success;
|
||||
|
||||
format_selected = false; // select format again if application request initialize the DSP
|
||||
return response;
|
||||
}
|
||||
|
||||
MFOutputState WMFDecoder::Impl::DecodingLoop(AudioCore::ADTSData adts_header,
|
||||
std::array<std::vector<u8>, 2>& out_streams) {
|
||||
std::optional<std::vector<f32>> output_buffer;
|
||||
|
||||
while (true) {
|
||||
auto [output_status, output] = ReceiveSample(transform.get(), out_stream_id);
|
||||
|
||||
// 0 -> okay; 3 -> okay but more data available (buffer too small)
|
||||
if (output_status == MFOutputState::OK || output_status == MFOutputState::HaveMoreData) {
|
||||
output_buffer = CopySampleToBuffer(output.get());
|
||||
|
||||
// the following was taken from ffmpeg version of the decoder
|
||||
f32 val_f32;
|
||||
for (std::size_t i = 0; i < output_buffer->size();) {
|
||||
for (std::size_t channel = 0; channel < adts_header.channels; channel++) {
|
||||
val_f32 = std::clamp(output_buffer->at(i), -1.0f, 1.0f);
|
||||
s16 val = static_cast<s16>(0x7FFF * val_f32);
|
||||
out_streams[channel].push_back(val & 0xFF);
|
||||
out_streams[channel].push_back(val >> 8);
|
||||
// i is incremented on per channel basis
|
||||
i++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If we return OK here, the decoder won't be in a state to receive new data and will fail
|
||||
// on the next call; instead treat it like the HaveMoreData case
|
||||
if (output_status == MFOutputState::OK)
|
||||
continue;
|
||||
|
||||
// for status = 2, reset MF
|
||||
if (output_status == MFOutputState::NeedReconfig) {
|
||||
format_selected = false;
|
||||
return MFOutputState::NeedReconfig;
|
||||
}
|
||||
|
||||
// for status = 3, try again with new buffer
|
||||
if (output_status == MFOutputState::HaveMoreData)
|
||||
continue;
|
||||
|
||||
// according to MS document, this is not an error (?!)
|
||||
if (output_status == MFOutputState::NeedMoreInput)
|
||||
return MFOutputState::NeedMoreInput;
|
||||
|
||||
return MFOutputState::FatalError; // return on other status
|
||||
}
|
||||
|
||||
return MFOutputState::FatalError;
|
||||
}
|
||||
|
||||
std::optional<BinaryMessage> WMFDecoder::Impl::Decode(const BinaryMessage& request) {
|
||||
BinaryMessage response{};
|
||||
response.header.codec = request.header.codec;
|
||||
response.header.cmd = request.header.cmd;
|
||||
response.decode_aac_response.size = request.decode_aac_request.size;
|
||||
response.decode_aac_response.num_channels = 2;
|
||||
response.decode_aac_response.num_samples = 1024;
|
||||
|
||||
if (!transform_initialized) {
|
||||
LOG_DEBUG(Audio_DSP, "Decoder not initialized");
|
||||
// This is a hack to continue games when decoder failed to initialize
|
||||
return response;
|
||||
}
|
||||
|
||||
if (request.decode_aac_request.src_addr < Memory::FCRAM_PADDR ||
|
||||
request.decode_aac_request.src_addr + request.decode_aac_request.size >
|
||||
Memory::FCRAM_PADDR + Memory::FCRAM_SIZE) {
|
||||
LOG_ERROR(Audio_DSP, "Got out of bounds src_addr {:08x}",
|
||||
request.decode_aac_request.src_addr);
|
||||
return std::nullopt;
|
||||
}
|
||||
const u8* data =
|
||||
memory.GetFCRAMPointer(request.decode_aac_request.src_addr - Memory::FCRAM_PADDR);
|
||||
|
||||
std::array<std::vector<u8>, 2> out_streams;
|
||||
unique_mfptr<IMFSample> sample;
|
||||
MFInputState input_status = MFInputState::OK;
|
||||
MFOutputState output_status = MFOutputState::OK;
|
||||
std::optional<ADTSMeta> adts_meta = DetectMediaType(data, request.decode_aac_request.size);
|
||||
|
||||
if (!adts_meta) {
|
||||
LOG_ERROR(Audio_DSP, "Unable to deduce decoding parameters from ADTS stream");
|
||||
return response;
|
||||
}
|
||||
|
||||
response.decode_aac_response.sample_rate = GetSampleRateEnum(adts_meta->ADTSHeader.samplerate);
|
||||
response.decode_aac_response.num_channels = adts_meta->ADTSHeader.channels;
|
||||
|
||||
if (!format_selected) {
|
||||
LOG_DEBUG(Audio_DSP, "New ADTS stream: channels = {}, sample rate = {}",
|
||||
adts_meta->ADTSHeader.channels, adts_meta->ADTSHeader.samplerate);
|
||||
SelectInputMediaType(transform.get(), in_stream_id, adts_meta->ADTSHeader,
|
||||
adts_meta->AACTag, 14);
|
||||
SelectOutputMediaType(transform.get(), out_stream_id);
|
||||
SendSample(transform.get(), in_stream_id, nullptr);
|
||||
// cache the result from detect_mediatype and call select_*_mediatype only once
|
||||
// This could increase performance very slightly
|
||||
transform->ProcessMessage(MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0);
|
||||
format_selected = true;
|
||||
}
|
||||
|
||||
sample = CreateSample(data, request.decode_aac_request.size, 1, 0);
|
||||
sample->SetUINT32(MFSampleExtension_CleanPoint, 1);
|
||||
|
||||
while (true) {
|
||||
input_status = SendSample(transform.get(), in_stream_id, sample.get());
|
||||
output_status = DecodingLoop(adts_meta->ADTSHeader, out_streams);
|
||||
|
||||
if (output_status == MFOutputState::FatalError) {
|
||||
// if the decode issues are caused by MFT not accepting new samples, try again
|
||||
// NOTICE: you are required to check the output even if you already knew/guessed
|
||||
// MFT didn't accept the input sample
|
||||
if (input_status == MFInputState::NotAccepted) {
|
||||
// try again
|
||||
continue;
|
||||
}
|
||||
|
||||
LOG_ERROR(Audio_DSP, "Errors occurred when receiving output");
|
||||
return response;
|
||||
} else if (output_status == MFOutputState::NeedReconfig) {
|
||||
// flush the transform
|
||||
MFFlush(transform.get());
|
||||
// decode again
|
||||
return this->Decode(request);
|
||||
}
|
||||
|
||||
break; // jump out of the loop if at least we don't have obvious issues
|
||||
}
|
||||
|
||||
if (out_streams[0].size() != 0) {
|
||||
if (request.decode_aac_request.dst_addr_ch0 < Memory::FCRAM_PADDR ||
|
||||
request.decode_aac_request.dst_addr_ch0 + out_streams[0].size() >
|
||||
Memory::FCRAM_PADDR + Memory::FCRAM_SIZE) {
|
||||
LOG_ERROR(Audio_DSP, "Got out of bounds dst_addr_ch0 {:08x}",
|
||||
request.decode_aac_request.dst_addr_ch0);
|
||||
return std::nullopt;
|
||||
}
|
||||
std::memcpy(
|
||||
memory.GetFCRAMPointer(request.decode_aac_request.dst_addr_ch0 - Memory::FCRAM_PADDR),
|
||||
out_streams[0].data(), out_streams[0].size());
|
||||
}
|
||||
|
||||
if (out_streams[1].size() != 0) {
|
||||
if (request.decode_aac_request.dst_addr_ch1 < Memory::FCRAM_PADDR ||
|
||||
request.decode_aac_request.dst_addr_ch1 + out_streams[1].size() >
|
||||
Memory::FCRAM_PADDR + Memory::FCRAM_SIZE) {
|
||||
LOG_ERROR(Audio_DSP, "Got out of bounds dst_addr_ch1 {:08x}",
|
||||
request.decode_aac_request.dst_addr_ch1);
|
||||
return std::nullopt;
|
||||
}
|
||||
std::memcpy(
|
||||
memory.GetFCRAMPointer(request.decode_aac_request.dst_addr_ch1 - Memory::FCRAM_PADDR),
|
||||
out_streams[1].data(), out_streams[1].size());
|
||||
}
|
||||
|
||||
return response;
|
||||
}
|
||||
|
||||
WMFDecoder::WMFDecoder(Memory::MemorySystem& memory) : impl(std::make_unique<Impl>(memory)) {}
|
||||
|
||||
WMFDecoder::~WMFDecoder() = default;
|
||||
|
||||
std::optional<BinaryMessage> WMFDecoder::ProcessRequest(const BinaryMessage& request) {
|
||||
return impl->ProcessRequest(request);
|
||||
}
|
||||
|
||||
bool WMFDecoder::IsValid() const {
|
||||
return impl->IsValid();
|
||||
}
|
||||
|
||||
} // namespace AudioCore::HLE
|
@ -1,23 +0,0 @@
|
||||
// Copyright 2018 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "audio_core/hle/decoder.h"
|
||||
|
||||
namespace AudioCore::HLE {
|
||||
|
||||
class WMFDecoder final : public DecoderBase {
|
||||
public:
|
||||
explicit WMFDecoder(Memory::MemorySystem& memory);
|
||||
~WMFDecoder() override;
|
||||
std::optional<BinaryMessage> ProcessRequest(const BinaryMessage& request) override;
|
||||
bool IsValid() const override;
|
||||
|
||||
private:
|
||||
class Impl;
|
||||
std::unique_ptr<Impl> impl;
|
||||
};
|
||||
|
||||
} // namespace AudioCore::HLE
|
@ -1,464 +0,0 @@
|
||||
// Copyright 2019 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
#include "common/logging/log.h"
|
||||
#include "common/string_util.h"
|
||||
#include "wmf_decoder_utils.h"
|
||||
|
||||
namespace MFDecoder {
|
||||
|
||||
// utility functions
|
||||
void ReportError(std::string msg, HRESULT hr) {
|
||||
if (SUCCEEDED(hr)) {
|
||||
return;
|
||||
}
|
||||
LPWSTR err;
|
||||
FormatMessageW(FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_ALLOCATE_BUFFER |
|
||||
FORMAT_MESSAGE_IGNORE_INSERTS,
|
||||
nullptr, hr,
|
||||
// hardcode to use en_US because if any user had problems with this
|
||||
// we can help them w/o translating anything
|
||||
// default is to use the language currently active on the operating system
|
||||
MAKELANGID(LANG_ENGLISH, SUBLANG_ENGLISH_US), (LPWSTR)&err, 0, nullptr);
|
||||
if (err != nullptr) {
|
||||
LOG_CRITICAL(Audio_DSP, "{}: {}", msg, Common::UTF16ToUTF8(err));
|
||||
LocalFree(err);
|
||||
}
|
||||
LOG_CRITICAL(Audio_DSP, "{}: {:08x}", msg, hr);
|
||||
}
|
||||
|
||||
unique_mfptr<IMFTransform> MFDecoderInit(GUID audio_format) {
|
||||
|
||||
HRESULT hr = S_OK;
|
||||
MFT_REGISTER_TYPE_INFO reg{};
|
||||
GUID category = MFT_CATEGORY_AUDIO_DECODER;
|
||||
IMFActivate** activate;
|
||||
unique_mfptr<IMFTransform> transform;
|
||||
UINT32 num_activate;
|
||||
|
||||
reg.guidMajorType = MFMediaType_Audio;
|
||||
reg.guidSubtype = audio_format;
|
||||
|
||||
hr = MFTEnumEx(category,
|
||||
MFT_ENUM_FLAG_SYNCMFT | MFT_ENUM_FLAG_LOCALMFT | MFT_ENUM_FLAG_SORTANDFILTER,
|
||||
®, nullptr, &activate, &num_activate);
|
||||
if (FAILED(hr) || num_activate < 1) {
|
||||
ReportError("Failed to enumerate decoders", hr);
|
||||
CoTaskMemFree(activate);
|
||||
return nullptr;
|
||||
}
|
||||
LOG_INFO(Audio_DSP, "Windows(R) Media Foundation found {} suitable decoder(s)", num_activate);
|
||||
for (unsigned int n = 0; n < num_activate; n++) {
|
||||
hr = activate[n]->ActivateObject(
|
||||
IID_IMFTransform,
|
||||
reinterpret_cast<void**>(static_cast<IMFTransform**>(Amp(transform))));
|
||||
if (FAILED(hr))
|
||||
transform = nullptr;
|
||||
activate[n]->Release();
|
||||
if (SUCCEEDED(hr))
|
||||
break;
|
||||
}
|
||||
if (transform == nullptr) {
|
||||
ReportError("Failed to initialize MFT", hr);
|
||||
CoTaskMemFree(activate);
|
||||
return nullptr;
|
||||
}
|
||||
CoTaskMemFree(activate);
|
||||
return transform;
|
||||
}
|
||||
|
||||
unique_mfptr<IMFSample> CreateSample(const void* data, DWORD len, DWORD alignment,
|
||||
LONGLONG duration) {
|
||||
HRESULT hr = S_OK;
|
||||
unique_mfptr<IMFMediaBuffer> buf;
|
||||
unique_mfptr<IMFSample> sample;
|
||||
|
||||
hr = MFCreateSample(Amp(sample));
|
||||
if (FAILED(hr)) {
|
||||
ReportError("Unable to allocate a sample", hr);
|
||||
return nullptr;
|
||||
}
|
||||
// Yes, the argument for alignment is the actual alignment - 1
|
||||
hr = MFCreateAlignedMemoryBuffer(len, alignment - 1, Amp(buf));
|
||||
if (FAILED(hr)) {
|
||||
ReportError("Unable to allocate a memory buffer for sample", hr);
|
||||
return nullptr;
|
||||
}
|
||||
if (data) {
|
||||
BYTE* buffer;
|
||||
// lock the MediaBuffer
|
||||
// this is actually not a thread-safe lock
|
||||
hr = buf->Lock(&buffer, nullptr, nullptr);
|
||||
if (FAILED(hr)) {
|
||||
ReportError("Unable to lock down MediaBuffer", hr);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
std::memcpy(buffer, data, len);
|
||||
|
||||
buf->SetCurrentLength(len);
|
||||
buf->Unlock();
|
||||
}
|
||||
|
||||
sample->AddBuffer(buf.get());
|
||||
hr = sample->SetSampleDuration(duration);
|
||||
if (FAILED(hr)) {
|
||||
// MFT will take a guess for you in this case
|
||||
ReportError("Unable to set sample duration, but continuing anyway", hr);
|
||||
}
|
||||
|
||||
return sample;
|
||||
}
|
||||
|
||||
bool SelectInputMediaType(IMFTransform* transform, int in_stream_id,
|
||||
const AudioCore::ADTSData& adts, const UINT8* user_data,
|
||||
UINT32 user_data_len, GUID audio_format) {
|
||||
HRESULT hr = S_OK;
|
||||
unique_mfptr<IMFMediaType> t;
|
||||
|
||||
// actually you can get rid of the whole block of searching and filtering mess
|
||||
// if you know the exact parameters of your media stream
|
||||
hr = MFCreateMediaType(Amp(t));
|
||||
if (FAILED(hr)) {
|
||||
ReportError("Unable to create an empty MediaType", hr);
|
||||
return false;
|
||||
}
|
||||
|
||||
// basic definition
|
||||
t->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Audio);
|
||||
t->SetGUID(MF_MT_SUBTYPE, audio_format);
|
||||
|
||||
t->SetUINT32(MF_MT_AAC_PAYLOAD_TYPE, 1);
|
||||
t->SetUINT32(MF_MT_AUDIO_NUM_CHANNELS, adts.channels);
|
||||
t->SetUINT32(MF_MT_AUDIO_SAMPLES_PER_SECOND, adts.samplerate);
|
||||
// 0xfe = 254 = "unspecified"
|
||||
t->SetUINT32(MF_MT_AAC_AUDIO_PROFILE_LEVEL_INDICATION, 254);
|
||||
t->SetUINT32(MF_MT_AUDIO_BLOCK_ALIGNMENT, 1);
|
||||
t->SetBlob(MF_MT_USER_DATA, user_data, user_data_len);
|
||||
hr = transform->SetInputType(in_stream_id, t.get(), 0);
|
||||
if (FAILED(hr)) {
|
||||
ReportError("failed to select input types for MFT", hr);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool SelectOutputMediaType(IMFTransform* transform, int out_stream_id, GUID audio_format) {
|
||||
HRESULT hr = S_OK;
|
||||
UINT32 tmp;
|
||||
unique_mfptr<IMFMediaType> type;
|
||||
|
||||
// If you know what you need and what you are doing, you can specify the conditions instead of
|
||||
// searching but it's better to use search since MFT may or may not support your output
|
||||
// parameters
|
||||
for (DWORD i = 0;; i++) {
|
||||
hr = transform->GetOutputAvailableType(out_stream_id, i, Amp(type));
|
||||
if (hr == MF_E_NO_MORE_TYPES || hr == E_NOTIMPL) {
|
||||
return true;
|
||||
}
|
||||
if (FAILED(hr)) {
|
||||
ReportError("failed to get output types for MFT", hr);
|
||||
return false;
|
||||
}
|
||||
|
||||
hr = type->GetUINT32(MF_MT_AUDIO_BITS_PER_SAMPLE, &tmp);
|
||||
|
||||
if (FAILED(hr))
|
||||
continue;
|
||||
// select PCM-16 format
|
||||
if (tmp == 32) {
|
||||
hr = type->SetUINT32(MF_MT_AUDIO_BLOCK_ALIGNMENT, 1);
|
||||
if (FAILED(hr)) {
|
||||
ReportError("failed to set MF_MT_AUDIO_BLOCK_ALIGNMENT for MFT on output stream",
|
||||
hr);
|
||||
return false;
|
||||
}
|
||||
hr = transform->SetOutputType(out_stream_id, type.get(), 0);
|
||||
if (FAILED(hr)) {
|
||||
ReportError("failed to select output types for MFT", hr);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
ReportError("MFT: Unable to find preferred output format", E_NOTIMPL);
|
||||
return false;
|
||||
}
|
||||
|
||||
std::optional<ADTSMeta> DetectMediaType(const u8* buffer, std::size_t len) {
|
||||
if (len < 7) {
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
AudioCore::ADTSData tmp;
|
||||
ADTSMeta result;
|
||||
// see https://docs.microsoft.com/en-us/windows/desktop/api/mmreg/ns-mmreg-heaacwaveinfo_tag
|
||||
// for the meaning of the byte array below
|
||||
|
||||
// it might be a good idea to wrap the parameters into a struct
|
||||
// and pass that struct into the function but doing that will lead to messier code
|
||||
// const UINT8 aac_data[] = { 0x01, 0x00, 0xfe, 00, 00, 00, 00, 00, 00, 00, 00, 00, 0x11, 0x90
|
||||
// }; first byte: 0: raw aac 1: adts 2: adif 3: latm/laos
|
||||
UINT8 aac_tmp[] = {0x01, 0x00, 0xfe, 00, 00, 00, 00, 00, 00, 00, 00, 00, 0x00, 0x00};
|
||||
uint16_t tag = 0;
|
||||
|
||||
tmp = AudioCore::ParseADTS(buffer);
|
||||
if (tmp.length == 0) {
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
tag = MFGetAACTag(tmp);
|
||||
aac_tmp[12] |= (tag & 0xff00) >> 8;
|
||||
aac_tmp[13] |= (tag & 0x00ff);
|
||||
std::memcpy(&(result.ADTSHeader), &tmp, sizeof(AudioCore::ADTSData));
|
||||
std::memcpy(&(result.AACTag), aac_tmp, 14);
|
||||
return result;
|
||||
}
|
||||
|
||||
void MFFlush(IMFTransform* transform) {
|
||||
HRESULT hr = transform->ProcessMessage(MFT_MESSAGE_COMMAND_FLUSH, 0);
|
||||
if (FAILED(hr)) {
|
||||
ReportError("MFT: Flush command failed", hr);
|
||||
}
|
||||
hr = transform->ProcessMessage(MFT_MESSAGE_NOTIFY_END_OF_STREAM, 0);
|
||||
if (FAILED(hr)) {
|
||||
ReportError("Failed to end streaming for MFT", hr);
|
||||
}
|
||||
}
|
||||
|
||||
MFInputState SendSample(IMFTransform* transform, DWORD in_stream_id, IMFSample* in_sample) {
|
||||
HRESULT hr = S_OK;
|
||||
|
||||
if (in_sample) {
|
||||
hr = transform->ProcessInput(in_stream_id, in_sample, 0);
|
||||
if (hr == MF_E_NOTACCEPTING) {
|
||||
return MFInputState::NotAccepted; // try again
|
||||
} else if (FAILED(hr)) {
|
||||
ReportError("MFT: Failed to process input", hr);
|
||||
return MFInputState::FatalError;
|
||||
} // FAILED(hr)
|
||||
} else {
|
||||
hr = transform->ProcessMessage(MFT_MESSAGE_COMMAND_DRAIN, 0);
|
||||
if (FAILED(hr)) {
|
||||
ReportError("MFT: Failed to drain when processing input", hr);
|
||||
}
|
||||
}
|
||||
|
||||
return MFInputState::OK;
|
||||
}
|
||||
|
||||
std::tuple<MFOutputState, unique_mfptr<IMFSample>> ReceiveSample(IMFTransform* transform,
|
||||
DWORD out_stream_id) {
|
||||
HRESULT hr;
|
||||
MFT_OUTPUT_DATA_BUFFER out_buffers;
|
||||
MFT_OUTPUT_STREAM_INFO out_info;
|
||||
DWORD status = 0;
|
||||
unique_mfptr<IMFSample> sample;
|
||||
bool mft_create_sample = false;
|
||||
|
||||
hr = transform->GetOutputStreamInfo(out_stream_id, &out_info);
|
||||
|
||||
if (FAILED(hr)) {
|
||||
ReportError("MFT: Failed to get stream info", hr);
|
||||
return std::make_tuple(MFOutputState::FatalError, std::move(sample));
|
||||
}
|
||||
mft_create_sample = (out_info.dwFlags & MFT_OUTPUT_STREAM_PROVIDES_SAMPLES) ||
|
||||
(out_info.dwFlags & MFT_OUTPUT_STREAM_CAN_PROVIDE_SAMPLES);
|
||||
|
||||
while (true) {
|
||||
status = 0;
|
||||
|
||||
if (!mft_create_sample) {
|
||||
sample = CreateSample(nullptr, out_info.cbSize, out_info.cbAlignment);
|
||||
if (!sample.get()) {
|
||||
ReportError("MFT: Unable to allocate memory for samples", hr);
|
||||
return std::make_tuple(MFOutputState::FatalError, std::move(sample));
|
||||
}
|
||||
}
|
||||
|
||||
out_buffers.dwStreamID = out_stream_id;
|
||||
out_buffers.pSample = sample.get();
|
||||
|
||||
hr = transform->ProcessOutput(0, 1, &out_buffers, &status);
|
||||
|
||||
if (!FAILED(hr)) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (hr == MF_E_TRANSFORM_NEED_MORE_INPUT) {
|
||||
// Most likely reasons: data corrupted; your actions not expected by MFT
|
||||
return std::make_tuple(MFOutputState::NeedMoreInput, std::move(sample));
|
||||
}
|
||||
|
||||
if (hr == MF_E_TRANSFORM_STREAM_CHANGE) {
|
||||
ReportError("MFT: stream format changed, re-configuration required", hr);
|
||||
return std::make_tuple(MFOutputState::NeedReconfig, std::move(sample));
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
if (out_buffers.dwStatus & MFT_OUTPUT_DATA_BUFFER_INCOMPLETE) {
|
||||
// this status is also unreliable but whatever
|
||||
return std::make_tuple(MFOutputState::HaveMoreData, std::move(sample));
|
||||
}
|
||||
|
||||
if (out_buffers.pSample == nullptr) {
|
||||
ReportError("MFT: decoding failure", hr);
|
||||
return std::make_tuple(MFOutputState::FatalError, std::move(sample));
|
||||
}
|
||||
|
||||
return std::make_tuple(MFOutputState::OK, std::move(sample));
|
||||
}
|
||||
|
||||
std::optional<std::vector<f32>> CopySampleToBuffer(IMFSample* sample) {
|
||||
unique_mfptr<IMFMediaBuffer> buffer;
|
||||
HRESULT hr = S_OK;
|
||||
std::optional<std::vector<f32>> output;
|
||||
std::vector<f32> output_buffer;
|
||||
BYTE* data;
|
||||
DWORD len = 0;
|
||||
|
||||
hr = sample->GetTotalLength(&len);
|
||||
if (FAILED(hr)) {
|
||||
ReportError("Failed to get the length of sample buffer", hr);
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
hr = sample->ConvertToContiguousBuffer(Amp(buffer));
|
||||
if (FAILED(hr)) {
|
||||
ReportError("Failed to get sample buffer", hr);
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
hr = buffer->Lock(&data, nullptr, nullptr);
|
||||
if (FAILED(hr)) {
|
||||
ReportError("Failed to lock the buffer", hr);
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
output_buffer.resize(len / sizeof(f32));
|
||||
std::memcpy(output_buffer.data(), data, len);
|
||||
output = output_buffer;
|
||||
|
||||
// if buffer unlock fails, then... whatever, we have already got data
|
||||
buffer->Unlock();
|
||||
return output;
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
struct LibraryDeleter {
|
||||
using pointer = HMODULE;
|
||||
void operator()(HMODULE h) const {
|
||||
if (h != nullptr)
|
||||
FreeLibrary(h);
|
||||
}
|
||||
};
|
||||
|
||||
std::unique_ptr<HMODULE, LibraryDeleter> mf_dll{nullptr};
|
||||
std::unique_ptr<HMODULE, LibraryDeleter> mfplat_dll{nullptr};
|
||||
|
||||
} // namespace
|
||||
|
||||
bool InitMFDLL() {
|
||||
|
||||
mf_dll.reset(LoadLibrary(TEXT("mf.dll")));
|
||||
if (!mf_dll) {
|
||||
DWORD error_message_id = GetLastError();
|
||||
LPSTR message_buffer = nullptr;
|
||||
size_t size =
|
||||
FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM |
|
||||
FORMAT_MESSAGE_IGNORE_INSERTS,
|
||||
nullptr, error_message_id, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
|
||||
reinterpret_cast<LPSTR>(&message_buffer), 0, nullptr);
|
||||
|
||||
std::string message(message_buffer, size);
|
||||
|
||||
LocalFree(message_buffer);
|
||||
LOG_ERROR(Audio_DSP, "Could not load mf.dll: {}", message);
|
||||
return false;
|
||||
}
|
||||
|
||||
mfplat_dll.reset(LoadLibrary(TEXT("mfplat.dll")));
|
||||
if (!mfplat_dll) {
|
||||
DWORD error_message_id = GetLastError();
|
||||
LPSTR message_buffer = nullptr;
|
||||
size_t size =
|
||||
FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM |
|
||||
FORMAT_MESSAGE_IGNORE_INSERTS,
|
||||
nullptr, error_message_id, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
|
||||
reinterpret_cast<LPSTR>(&message_buffer), 0, nullptr);
|
||||
|
||||
std::string message(message_buffer, size);
|
||||
|
||||
LocalFree(message_buffer);
|
||||
LOG_ERROR(Audio_DSP, "Could not load mfplat.dll: {}", message);
|
||||
return false;
|
||||
}
|
||||
|
||||
MFStartup = Symbol<HRESULT(ULONG, DWORD)>(mfplat_dll.get(), "MFStartup");
|
||||
if (!MFStartup) {
|
||||
LOG_ERROR(Audio_DSP, "Cannot load function MFStartup");
|
||||
return false;
|
||||
}
|
||||
|
||||
MFShutdown = Symbol<HRESULT(void)>(mfplat_dll.get(), "MFShutdown");
|
||||
if (!MFShutdown) {
|
||||
LOG_ERROR(Audio_DSP, "Cannot load function MFShutdown");
|
||||
return false;
|
||||
}
|
||||
|
||||
MFShutdownObject = Symbol<HRESULT(IUnknown*)>(mf_dll.get(), "MFShutdownObject");
|
||||
if (!MFShutdownObject) {
|
||||
LOG_ERROR(Audio_DSP, "Cannot load function MFShutdownObject");
|
||||
return false;
|
||||
}
|
||||
|
||||
MFCreateAlignedMemoryBuffer = Symbol<HRESULT(DWORD, DWORD, IMFMediaBuffer**)>(
|
||||
mfplat_dll.get(), "MFCreateAlignedMemoryBuffer");
|
||||
if (!MFCreateAlignedMemoryBuffer) {
|
||||
LOG_ERROR(Audio_DSP, "Cannot load function MFCreateAlignedMemoryBuffer");
|
||||
return false;
|
||||
}
|
||||
|
||||
MFCreateSample = Symbol<HRESULT(IMFSample**)>(mfplat_dll.get(), "MFCreateSample");
|
||||
if (!MFCreateSample) {
|
||||
LOG_ERROR(Audio_DSP, "Cannot load function MFCreateSample");
|
||||
return false;
|
||||
}
|
||||
|
||||
MFTEnumEx =
|
||||
Symbol<HRESULT(GUID, UINT32, const MFT_REGISTER_TYPE_INFO*, const MFT_REGISTER_TYPE_INFO*,
|
||||
IMFActivate***, UINT32*)>(mfplat_dll.get(), "MFTEnumEx");
|
||||
if (!MFTEnumEx) {
|
||||
LOG_ERROR(Audio_DSP, "Cannot load function MFTEnumEx");
|
||||
return false;
|
||||
}
|
||||
|
||||
MFCreateMediaType = Symbol<HRESULT(IMFMediaType**)>(mfplat_dll.get(), "MFCreateMediaType");
|
||||
if (!MFCreateMediaType) {
|
||||
LOG_ERROR(Audio_DSP, "Cannot load function MFCreateMediaType");
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
Symbol<HRESULT(ULONG, DWORD)> MFStartup;
|
||||
Symbol<HRESULT(void)> MFShutdown;
|
||||
Symbol<HRESULT(IUnknown*)> MFShutdownObject;
|
||||
Symbol<HRESULT(DWORD, DWORD, IMFMediaBuffer**)> MFCreateAlignedMemoryBuffer;
|
||||
Symbol<HRESULT(IMFSample**)> MFCreateSample;
|
||||
Symbol<HRESULT(GUID, UINT32, const MFT_REGISTER_TYPE_INFO*, const MFT_REGISTER_TYPE_INFO*,
|
||||
IMFActivate***, UINT32*)>
|
||||
MFTEnumEx;
|
||||
Symbol<HRESULT(IMFMediaType**)> MFCreateMediaType;
|
||||
|
||||
} // namespace MFDecoder
|
@ -1,125 +0,0 @@
|
||||
// Copyright 2019 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <tuple>
|
||||
#include <vector>
|
||||
#include <comdef.h>
|
||||
#include <mfapi.h>
|
||||
#include <mferror.h>
|
||||
#include <mfidl.h>
|
||||
#include <mftransform.h>
|
||||
|
||||
#include "adts.h"
|
||||
|
||||
namespace MFDecoder {
|
||||
|
||||
template <typename T>
|
||||
struct Symbol {
|
||||
Symbol() = default;
|
||||
Symbol(HMODULE dll, const char* name) {
|
||||
if (dll) {
|
||||
ptr_symbol = reinterpret_cast<T*>(GetProcAddress(dll, name));
|
||||
}
|
||||
}
|
||||
|
||||
operator T*() const {
|
||||
return ptr_symbol;
|
||||
}
|
||||
|
||||
explicit operator bool() const {
|
||||
return ptr_symbol != nullptr;
|
||||
}
|
||||
|
||||
T* ptr_symbol = nullptr;
|
||||
};
|
||||
|
||||
// Runtime load the MF symbols to prevent mf.dll not found errors on citra load
|
||||
extern Symbol<HRESULT(ULONG, DWORD)> MFStartup;
|
||||
extern Symbol<HRESULT(void)> MFShutdown;
|
||||
extern Symbol<HRESULT(IUnknown*)> MFShutdownObject;
|
||||
extern Symbol<HRESULT(DWORD, DWORD, IMFMediaBuffer**)> MFCreateAlignedMemoryBuffer;
|
||||
extern Symbol<HRESULT(IMFSample**)> MFCreateSample;
|
||||
extern Symbol<HRESULT(GUID, UINT32, const MFT_REGISTER_TYPE_INFO*, const MFT_REGISTER_TYPE_INFO*,
|
||||
IMFActivate***, UINT32*)>
|
||||
MFTEnumEx;
|
||||
extern Symbol<HRESULT(IMFMediaType**)> MFCreateMediaType;
|
||||
|
||||
enum class MFOutputState { FatalError, OK, NeedMoreInput, NeedReconfig, HaveMoreData };
|
||||
enum class MFInputState { FatalError, OK, NotAccepted };
|
||||
|
||||
// utility functions / templates
|
||||
template <class T>
|
||||
struct MFRelease {
|
||||
void operator()(T* pointer) const {
|
||||
pointer->Release();
|
||||
};
|
||||
};
|
||||
|
||||
template <>
|
||||
struct MFRelease<IMFTransform> {
|
||||
void operator()(IMFTransform* pointer) const {
|
||||
MFShutdownObject(pointer);
|
||||
pointer->Release();
|
||||
};
|
||||
};
|
||||
|
||||
// wrapper facilities for dealing with pointers
|
||||
template <typename T>
|
||||
using unique_mfptr = std::unique_ptr<T, MFRelease<T>>;
|
||||
|
||||
template <typename SmartPtr, typename RawPtr>
|
||||
class AmpImpl {
|
||||
public:
|
||||
AmpImpl(SmartPtr& smart_ptr) : smart_ptr(smart_ptr) {}
|
||||
~AmpImpl() {
|
||||
smart_ptr.reset(raw_ptr);
|
||||
}
|
||||
|
||||
operator RawPtr*() {
|
||||
return &raw_ptr;
|
||||
}
|
||||
|
||||
private:
|
||||
SmartPtr& smart_ptr;
|
||||
RawPtr raw_ptr = nullptr;
|
||||
};
|
||||
|
||||
template <typename SmartPtr>
|
||||
auto Amp(SmartPtr& smart_ptr) {
|
||||
return AmpImpl<SmartPtr, decltype(smart_ptr.get())>(smart_ptr);
|
||||
}
|
||||
|
||||
// convient function for formatting error messages
|
||||
void ReportError(std::string msg, HRESULT hr);
|
||||
|
||||
// data type for transferring ADTS metadata between functions
|
||||
struct ADTSMeta {
|
||||
AudioCore::ADTSData ADTSHeader;
|
||||
u8 AACTag[14];
|
||||
};
|
||||
|
||||
// exported functions
|
||||
|
||||
/// Loads the symbols from mf.dll at runtime. Returns false if the symbols can't be loaded
|
||||
bool InitMFDLL();
|
||||
unique_mfptr<IMFTransform> MFDecoderInit(GUID audio_format = MFAudioFormat_AAC);
|
||||
unique_mfptr<IMFSample> CreateSample(const void* data, DWORD len, DWORD alignment = 1,
|
||||
LONGLONG duration = 0);
|
||||
bool SelectInputMediaType(IMFTransform* transform, int in_stream_id,
|
||||
const AudioCore::ADTSData& adts, const UINT8* user_data,
|
||||
UINT32 user_data_len, GUID audio_format = MFAudioFormat_AAC);
|
||||
std::optional<ADTSMeta> DetectMediaType(const u8* buffer, std::size_t len);
|
||||
bool SelectOutputMediaType(IMFTransform* transform, int out_stream_id,
|
||||
GUID audio_format = MFAudioFormat_PCM);
|
||||
void MFFlush(IMFTransform* transform);
|
||||
MFInputState SendSample(IMFTransform* transform, DWORD in_stream_id, IMFSample* in_sample);
|
||||
std::tuple<MFOutputState, unique_mfptr<IMFSample>> ReceiveSample(IMFTransform* transform,
|
||||
DWORD out_stream_id);
|
||||
std::optional<std::vector<f32>> CopySampleToBuffer(IMFSample* sample);
|
||||
|
||||
} // namespace MFDecoder
|
@ -42,10 +42,8 @@ private:
|
||||
SDL_GLContext context;
|
||||
};
|
||||
|
||||
EmuWindow_SDL2_GL::EmuWindow_SDL2_GL(Core::System& system_, bool fullscreen, bool is_secondary)
|
||||
: EmuWindow_SDL2{system_, is_secondary} {
|
||||
// Initialize the window
|
||||
if (Settings::values.use_gles) {
|
||||
static SDL_Window* CreateGLWindow(const std::string& window_title, bool gles) {
|
||||
if (gles) {
|
||||
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 3);
|
||||
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 2);
|
||||
SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_ES);
|
||||
@ -54,7 +52,16 @@ EmuWindow_SDL2_GL::EmuWindow_SDL2_GL(Core::System& system_, bool fullscreen, boo
|
||||
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 3);
|
||||
SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_CORE);
|
||||
}
|
||||
return SDL_CreateWindow(window_title.c_str(),
|
||||
SDL_WINDOWPOS_UNDEFINED, // x position
|
||||
SDL_WINDOWPOS_UNDEFINED, // y position
|
||||
Core::kScreenTopWidth,
|
||||
Core::kScreenTopHeight + Core::kScreenBottomHeight,
|
||||
SDL_WINDOW_OPENGL | SDL_WINDOW_RESIZABLE | SDL_WINDOW_ALLOW_HIGHDPI);
|
||||
}
|
||||
|
||||
EmuWindow_SDL2_GL::EmuWindow_SDL2_GL(Core::System& system_, bool fullscreen, bool is_secondary)
|
||||
: EmuWindow_SDL2{system_, is_secondary} {
|
||||
SDL_GL_SetAttribute(SDL_GL_DOUBLEBUFFER, 1);
|
||||
SDL_GL_SetAttribute(SDL_GL_RED_SIZE, 8);
|
||||
SDL_GL_SetAttribute(SDL_GL_GREEN_SIZE, 8);
|
||||
@ -71,16 +78,16 @@ EmuWindow_SDL2_GL::EmuWindow_SDL2_GL(Core::System& system_, bool fullscreen, boo
|
||||
|
||||
std::string window_title = fmt::format("Citra {} | {}-{}", Common::g_build_fullname,
|
||||
Common::g_scm_branch, Common::g_scm_desc);
|
||||
render_window =
|
||||
SDL_CreateWindow(window_title.c_str(),
|
||||
SDL_WINDOWPOS_UNDEFINED, // x position
|
||||
SDL_WINDOWPOS_UNDEFINED, // y position
|
||||
Core::kScreenTopWidth, Core::kScreenTopHeight + Core::kScreenBottomHeight,
|
||||
SDL_WINDOW_OPENGL | SDL_WINDOW_RESIZABLE | SDL_WINDOW_ALLOW_HIGHDPI);
|
||||
|
||||
// First, try to create a context with the requested type.
|
||||
render_window = CreateGLWindow(window_title, Settings::values.use_gles.GetValue());
|
||||
if (render_window == nullptr) {
|
||||
LOG_CRITICAL(Frontend, "Failed to create SDL2 window: {}", SDL_GetError());
|
||||
exit(1);
|
||||
// On failure, fall back to context with flipped type.
|
||||
render_window = CreateGLWindow(window_title, !Settings::values.use_gles.GetValue());
|
||||
if (render_window == nullptr) {
|
||||
LOG_CRITICAL(Frontend, "Failed to create SDL2 window: {}", SDL_GetError());
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
strict_context_required = std::strcmp(SDL_GetCurrentVideoDriver(), "wayland") == 0;
|
||||
@ -106,7 +113,11 @@ EmuWindow_SDL2_GL::EmuWindow_SDL2_GL(Core::System& system_, bool fullscreen, boo
|
||||
}
|
||||
|
||||
render_window_id = SDL_GetWindowID(render_window);
|
||||
auto gl_load_func = Settings::values.use_gles ? gladLoadGLES2Loader : gladLoadGLLoader;
|
||||
|
||||
int profile_mask = 0;
|
||||
SDL_GL_GetAttribute(SDL_GL_CONTEXT_PROFILE_MASK, &profile_mask);
|
||||
auto gl_load_func =
|
||||
profile_mask == SDL_GL_CONTEXT_PROFILE_ES ? gladLoadGLES2Loader : gladLoadGLLoader;
|
||||
|
||||
if (!gl_load_func(static_cast<GLADloadproc>(SDL_GL_GetProcAddress))) {
|
||||
LOG_CRITICAL(Frontend, "Failed to initialize GL functions: {}", SDL_GetError());
|
||||
|
@ -138,37 +138,50 @@ void EmuThread::run() {
|
||||
}
|
||||
|
||||
#ifdef HAS_OPENGL
|
||||
static std::unique_ptr<QOpenGLContext> CreateQOpenGLContext(bool gles) {
|
||||
QSurfaceFormat format;
|
||||
if (gles) {
|
||||
format.setRenderableType(QSurfaceFormat::RenderableType::OpenGLES);
|
||||
format.setVersion(3, 2);
|
||||
} else {
|
||||
format.setRenderableType(QSurfaceFormat::RenderableType::OpenGL);
|
||||
format.setVersion(4, 3);
|
||||
}
|
||||
format.setProfile(QSurfaceFormat::CoreProfile);
|
||||
|
||||
if (Settings::values.renderer_debug) {
|
||||
format.setOption(QSurfaceFormat::FormatOption::DebugContext);
|
||||
}
|
||||
|
||||
// TODO: expose a setting for buffer value (ie default/single/double/triple)
|
||||
format.setSwapBehavior(QSurfaceFormat::DefaultSwapBehavior);
|
||||
format.setSwapInterval(0);
|
||||
|
||||
auto context = std::make_unique<QOpenGLContext>();
|
||||
context->setFormat(format);
|
||||
if (!context->create()) {
|
||||
LOG_ERROR(Frontend, "Unable to create OpenGL context with GLES = {}", gles);
|
||||
return nullptr;
|
||||
}
|
||||
return context;
|
||||
}
|
||||
|
||||
class OpenGLSharedContext : public Frontend::GraphicsContext {
|
||||
public:
|
||||
/// Create the original context that should be shared from
|
||||
explicit OpenGLSharedContext() {
|
||||
QSurfaceFormat format;
|
||||
|
||||
if (Settings::values.use_gles) {
|
||||
format.setRenderableType(QSurfaceFormat::RenderableType::OpenGLES);
|
||||
format.setVersion(3, 2);
|
||||
} else {
|
||||
format.setRenderableType(QSurfaceFormat::RenderableType::OpenGL);
|
||||
format.setVersion(4, 3);
|
||||
}
|
||||
format.setProfile(QSurfaceFormat::CoreProfile);
|
||||
|
||||
if (Settings::values.renderer_debug) {
|
||||
format.setOption(QSurfaceFormat::FormatOption::DebugContext);
|
||||
}
|
||||
|
||||
// TODO: expose a setting for buffer value (ie default/single/double/triple)
|
||||
format.setSwapBehavior(QSurfaceFormat::DefaultSwapBehavior);
|
||||
format.setSwapInterval(0);
|
||||
|
||||
context = std::make_unique<QOpenGLContext>();
|
||||
context->setFormat(format);
|
||||
if (!context->create()) {
|
||||
LOG_ERROR(Frontend, "Unable to create main openGL context");
|
||||
// First, try to create a context with the requested type.
|
||||
context = CreateQOpenGLContext(Settings::values.use_gles.GetValue());
|
||||
if (context == nullptr) {
|
||||
// On failure, fall back to context with flipped type.
|
||||
context = CreateQOpenGLContext(!Settings::values.use_gles.GetValue());
|
||||
if (context == nullptr) {
|
||||
LOG_ERROR(Frontend, "Unable to create any OpenGL context.");
|
||||
}
|
||||
}
|
||||
|
||||
offscreen_surface = std::make_unique<QOffscreenSurface>(nullptr);
|
||||
offscreen_surface->setFormat(format);
|
||||
offscreen_surface->setFormat(context->format());
|
||||
offscreen_surface->create();
|
||||
surface = offscreen_surface.get();
|
||||
}
|
||||
@ -184,7 +197,7 @@ public:
|
||||
context->setShareContext(share_context);
|
||||
context->setFormat(format);
|
||||
if (!context->create()) {
|
||||
LOG_ERROR(Frontend, "Unable to create shared openGL context");
|
||||
LOG_ERROR(Frontend, "Unable to create shared OpenGL context");
|
||||
}
|
||||
|
||||
surface = main_surface;
|
||||
@ -194,6 +207,10 @@ public:
|
||||
OpenGLSharedContext::DoneCurrent();
|
||||
}
|
||||
|
||||
bool IsGLES() override {
|
||||
return context->format().renderableType() == QSurfaceFormat::RenderableType::OpenGLES;
|
||||
}
|
||||
|
||||
void SwapBuffers() override {
|
||||
context->swapBuffers(surface);
|
||||
}
|
||||
@ -739,8 +756,9 @@ bool GRenderWindow::LoadOpenGL() {
|
||||
#ifdef HAS_OPENGL
|
||||
auto context = CreateSharedContext();
|
||||
auto scope = context->Acquire();
|
||||
const auto gles = context->IsGLES();
|
||||
|
||||
auto gl_load_func = Settings::values.use_gles ? gladLoadGLES2Loader : gladLoadGLLoader;
|
||||
auto gl_load_func = gles ? gladLoadGLES2Loader : gladLoadGLLoader;
|
||||
if (!gl_load_func(GetProcAddressGL)) {
|
||||
QMessageBox::warning(
|
||||
this, tr("Error while initializing OpenGL!"),
|
||||
@ -751,14 +769,14 @@ bool GRenderWindow::LoadOpenGL() {
|
||||
const QString renderer =
|
||||
QString::fromUtf8(reinterpret_cast<const char*>(glGetString(GL_RENDERER)));
|
||||
|
||||
if (!Settings::values.use_gles && !GLAD_GL_VERSION_4_3) {
|
||||
if (!gles && !GLAD_GL_VERSION_4_3) {
|
||||
LOG_ERROR(Frontend, "GPU does not support OpenGL 4.3: {}", renderer.toStdString());
|
||||
QMessageBox::warning(this, tr("Error while initializing OpenGL 4.3!"),
|
||||
tr("Your GPU may not support OpenGL 4.3, or you do not have the "
|
||||
"latest graphics driver.<br><br>GL Renderer:<br>%1")
|
||||
.arg(renderer));
|
||||
return false;
|
||||
} else if (Settings::values.use_gles && !GLAD_GL_ES_VERSION_3_2) {
|
||||
} else if (gles && !GLAD_GL_ES_VERSION_3_2) {
|
||||
LOG_ERROR(Frontend, "GPU does not support OpenGL ES 3.2: {}", renderer.toStdString());
|
||||
QMessageBox::warning(this, tr("Error while initializing OpenGL ES 3.2!"),
|
||||
tr("Your GPU may not support OpenGL ES 3.2, or you do not have the "
|
||||
|
@ -560,7 +560,9 @@ void GameList::AddGamePopup(QMenu& context_menu, const QString& path, const QStr
|
||||
QAction* properties = context_menu.addAction(tr("Properties"));
|
||||
|
||||
const u32 program_id_high = (program_id >> 32) & 0xFFFFFFFF;
|
||||
const bool is_application = program_id_high == 0x00040000 || program_id_high == 0x00040010;
|
||||
// TODO: Use proper bitmasks for these kinds of checks.
|
||||
const bool is_application = program_id_high == 0x00040000 || program_id_high == 0x00040002 ||
|
||||
program_id_high == 0x00040010;
|
||||
|
||||
bool opengl_cache_exists = false;
|
||||
ForEachOpenGLCacheFile(
|
||||
|
@ -21,16 +21,20 @@ add_custom_command(OUTPUT scm_rev.cpp
|
||||
"${VIDEO_CORE}/renderer_opengl/gl_shader_util.h"
|
||||
"${VIDEO_CORE}/renderer_vulkan/vk_shader_util.cpp"
|
||||
"${VIDEO_CORE}/renderer_vulkan/vk_shader_util.h"
|
||||
"${VIDEO_CORE}/shader/generator/glsl_fs_shader_gen.cpp"
|
||||
"${VIDEO_CORE}/shader/generator/glsl_fs_shader_gen.h"
|
||||
"${VIDEO_CORE}/shader/generator/glsl_shader_decompiler.cpp"
|
||||
"${VIDEO_CORE}/shader/generator/glsl_shader_decompiler.h"
|
||||
"${VIDEO_CORE}/shader/generator/glsl_shader_gen.cpp"
|
||||
"${VIDEO_CORE}/shader/generator/glsl_shader_gen.h"
|
||||
"${VIDEO_CORE}/shader/generator/pica_fs_config.cpp"
|
||||
"${VIDEO_CORE}/shader/generator/pica_fs_config.h"
|
||||
"${VIDEO_CORE}/shader/generator/shader_gen.cpp"
|
||||
"${VIDEO_CORE}/shader/generator/shader_gen.h"
|
||||
"${VIDEO_CORE}/shader/generator/shader_uniforms.cpp"
|
||||
"${VIDEO_CORE}/shader/generator/shader_uniforms.h"
|
||||
"${VIDEO_CORE}/shader/generator/spv_shader_gen.cpp"
|
||||
"${VIDEO_CORE}/shader/generator/spv_shader_gen.h"
|
||||
"${VIDEO_CORE}/shader/generator/spv_fs_shader_gen.cpp"
|
||||
"${VIDEO_CORE}/shader/generator/spv_fs_shader_gen.h"
|
||||
"${VIDEO_CORE}/shader/shader.cpp"
|
||||
"${VIDEO_CORE}/shader/shader.h"
|
||||
"${VIDEO_CORE}/pica.cpp"
|
||||
@ -53,6 +57,8 @@ add_custom_command(OUTPUT scm_rev.cpp
|
||||
add_library(citra_common STATIC
|
||||
aarch64/cpu_detect.cpp
|
||||
aarch64/cpu_detect.h
|
||||
aarch64/oaknut_abi.h
|
||||
aarch64/oaknut_util.h
|
||||
alignment.h
|
||||
android_storage.h
|
||||
android_storage.cpp
|
||||
@ -76,8 +82,6 @@ add_library(citra_common STATIC
|
||||
construct.h
|
||||
dynamic_library/dynamic_library.cpp
|
||||
dynamic_library/dynamic_library.h
|
||||
dynamic_library/fdk-aac.cpp
|
||||
dynamic_library/fdk-aac.h
|
||||
dynamic_library/ffmpeg.cpp
|
||||
dynamic_library/ffmpeg.h
|
||||
error.cpp
|
||||
@ -85,6 +89,8 @@ add_library(citra_common STATIC
|
||||
expected.h
|
||||
file_util.cpp
|
||||
file_util.h
|
||||
file_watcher.cpp
|
||||
file_watcher.h
|
||||
hash.h
|
||||
linear_disk_cache.h
|
||||
literals.h
|
||||
@ -181,6 +187,10 @@ if ("x86_64" IN_LIST ARCHITECTURE)
|
||||
target_link_libraries(citra_common PRIVATE xbyak)
|
||||
endif()
|
||||
|
||||
if ("arm64" IN_LIST ARCHITECTURE)
|
||||
target_link_libraries(citra_common PRIVATE oaknut)
|
||||
endif()
|
||||
|
||||
if (CITRA_USE_PRECOMPILED_HEADERS)
|
||||
target_precompile_headers(citra_common PRIVATE precompiled_headers.h)
|
||||
endif()
|
||||
|
155
src/common/aarch64/oaknut_abi.h
Normal file
155
src/common/aarch64/oaknut_abi.h
Normal file
@ -0,0 +1,155 @@
|
||||
// Copyright 2023 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/arch.h"
|
||||
#if CITRA_ARCH(arm64)
|
||||
|
||||
#include <bitset>
|
||||
#include <initializer_list>
|
||||
#include <oaknut/oaknut.hpp>
|
||||
#include "common/assert.h"
|
||||
|
||||
namespace Common::A64 {
|
||||
|
||||
constexpr std::size_t RegToIndex(const oaknut::Reg& reg) {
|
||||
ASSERT(reg.index() != 31); // ZR not allowed
|
||||
return reg.index() + (reg.is_vector() ? 32 : 0);
|
||||
}
|
||||
|
||||
constexpr oaknut::XReg IndexToXReg(std::size_t reg_index) {
|
||||
ASSERT(reg_index <= 30);
|
||||
return oaknut::XReg(static_cast<int>(reg_index));
|
||||
}
|
||||
|
||||
constexpr oaknut::VReg IndexToVReg(std::size_t reg_index) {
|
||||
ASSERT(reg_index >= 32 && reg_index < 64);
|
||||
return oaknut::QReg(static_cast<int>(reg_index - 32));
|
||||
}
|
||||
|
||||
constexpr oaknut::Reg IndexToReg(std::size_t reg_index) {
|
||||
if (reg_index < 32) {
|
||||
return IndexToXReg(reg_index);
|
||||
} else {
|
||||
return IndexToVReg(reg_index);
|
||||
}
|
||||
}
|
||||
|
||||
inline constexpr std::bitset<64> BuildRegSet(std::initializer_list<oaknut::Reg> regs) {
|
||||
std::bitset<64> bits;
|
||||
for (const oaknut::Reg& reg : regs) {
|
||||
bits.set(RegToIndex(reg));
|
||||
}
|
||||
return bits;
|
||||
}
|
||||
|
||||
constexpr inline std::bitset<64> ABI_ALL_GPRS(0x00000000'7FFFFFFF);
|
||||
constexpr inline std::bitset<64> ABI_ALL_FPRS(0xFFFFFFFF'00000000);
|
||||
|
||||
constexpr inline oaknut::XReg ABI_RETURN = oaknut::util::X0;
|
||||
constexpr inline oaknut::XReg ABI_PARAM1 = oaknut::util::X0;
|
||||
constexpr inline oaknut::XReg ABI_PARAM2 = oaknut::util::X1;
|
||||
constexpr inline oaknut::XReg ABI_PARAM3 = oaknut::util::X2;
|
||||
constexpr inline oaknut::XReg ABI_PARAM4 = oaknut::util::X3;
|
||||
|
||||
constexpr std::bitset<64> ABI_ALL_CALLER_SAVED = 0xffffffff'4000ffff;
|
||||
constexpr std::bitset<64> ABI_ALL_CALLEE_SAVED = 0x0000ff00'7ff80000;
|
||||
|
||||
struct ABIFrameInfo {
|
||||
u32 subtraction;
|
||||
u32 fprs_offset;
|
||||
};
|
||||
|
||||
inline ABIFrameInfo ABI_CalculateFrameSize(std::bitset<64> regs, std::size_t frame_size) {
|
||||
const size_t gprs_count = (regs & ABI_ALL_GPRS).count();
|
||||
const size_t fprs_count = (regs & ABI_ALL_FPRS).count();
|
||||
|
||||
const size_t gprs_size = (gprs_count + 1) / 2 * 16;
|
||||
const size_t fprs_size = fprs_count * 16;
|
||||
|
||||
size_t total_size = 0;
|
||||
total_size += gprs_size;
|
||||
const size_t fprs_base_subtraction = total_size;
|
||||
total_size += fprs_size;
|
||||
total_size += frame_size;
|
||||
|
||||
return ABIFrameInfo{static_cast<u32>(total_size), static_cast<u32>(fprs_base_subtraction)};
|
||||
}
|
||||
|
||||
inline void ABI_PushRegisters(oaknut::CodeGenerator& code, std::bitset<64> regs,
|
||||
std::size_t frame_size = 0) {
|
||||
using namespace oaknut;
|
||||
using namespace oaknut::util;
|
||||
auto frame_info = ABI_CalculateFrameSize(regs, frame_size);
|
||||
|
||||
// Allocate stack-space
|
||||
if (frame_info.subtraction != 0) {
|
||||
code.SUB(SP, SP, frame_info.subtraction);
|
||||
}
|
||||
|
||||
// TODO(wunk): Push pairs of registers at a time with STP
|
||||
std::size_t offset = 0;
|
||||
for (std::size_t i = 0; i < 32; ++i) {
|
||||
if (regs[i] && ABI_ALL_GPRS[i]) {
|
||||
const XReg reg = IndexToXReg(i);
|
||||
code.STR(reg, SP, offset);
|
||||
offset += 8;
|
||||
}
|
||||
}
|
||||
|
||||
offset = 0;
|
||||
for (std::size_t i = 32; i < 64; ++i) {
|
||||
if (regs[i] && ABI_ALL_FPRS[i]) {
|
||||
const VReg reg = IndexToVReg(i);
|
||||
code.STR(reg.toQ(), SP, u16(frame_info.fprs_offset + offset));
|
||||
offset += 16;
|
||||
}
|
||||
}
|
||||
|
||||
// Allocate frame-space
|
||||
if (frame_size != 0) {
|
||||
code.SUB(SP, SP, frame_size);
|
||||
}
|
||||
}
|
||||
|
||||
inline void ABI_PopRegisters(oaknut::CodeGenerator& code, std::bitset<64> regs,
|
||||
std::size_t frame_size = 0) {
|
||||
using namespace oaknut;
|
||||
using namespace oaknut::util;
|
||||
auto frame_info = ABI_CalculateFrameSize(regs, frame_size);
|
||||
|
||||
// Free frame-space
|
||||
if (frame_size != 0) {
|
||||
code.ADD(SP, SP, frame_size);
|
||||
}
|
||||
|
||||
// TODO(wunk): Pop pairs of registers at a time with LDP
|
||||
std::size_t offset = 0;
|
||||
for (std::size_t i = 0; i < 32; ++i) {
|
||||
if (regs[i] && ABI_ALL_GPRS[i]) {
|
||||
const XReg reg = IndexToXReg(i);
|
||||
code.LDR(reg, SP, offset);
|
||||
offset += 8;
|
||||
}
|
||||
}
|
||||
|
||||
offset = 0;
|
||||
for (std::size_t i = 32; i < 64; ++i) {
|
||||
if (regs[i] && ABI_ALL_FPRS[i]) {
|
||||
const VReg reg = IndexToVReg(i);
|
||||
code.LDR(reg.toQ(), SP, frame_info.fprs_offset + offset);
|
||||
offset += 16;
|
||||
}
|
||||
}
|
||||
|
||||
// Free stack-space
|
||||
if (frame_info.subtraction != 0) {
|
||||
code.ADD(SP, SP, frame_info.subtraction);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace Common::A64
|
||||
|
||||
#endif // CITRA_ARCH(arm64)
|
43
src/common/aarch64/oaknut_util.h
Normal file
43
src/common/aarch64/oaknut_util.h
Normal file
@ -0,0 +1,43 @@
|
||||
// Copyright 2023 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/arch.h"
|
||||
#if CITRA_ARCH(arm64)
|
||||
|
||||
#include <type_traits>
|
||||
#include <oaknut/oaknut.hpp>
|
||||
#include "common/aarch64/oaknut_abi.h"
|
||||
|
||||
namespace Common::A64 {
|
||||
|
||||
// BL can only reach targets within +-128MiB(24 bits)
|
||||
inline bool IsWithin128M(uintptr_t ref, uintptr_t target) {
|
||||
const u64 distance = target - (ref + 4);
|
||||
return !(distance >= 0x800'0000ULL && distance <= ~0x800'0000ULL);
|
||||
}
|
||||
|
||||
inline bool IsWithin128M(const oaknut::CodeGenerator& code, uintptr_t target) {
|
||||
return IsWithin128M(code.ptr<uintptr_t>(), target);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline void CallFarFunction(oaknut::CodeGenerator& code, const T f) {
|
||||
static_assert(std::is_pointer_v<T>, "Argument must be a (function) pointer.");
|
||||
const std::uintptr_t addr = reinterpret_cast<std::uintptr_t>(f);
|
||||
if (IsWithin128M(code, addr)) {
|
||||
code.BL(reinterpret_cast<const void*>(f));
|
||||
} else {
|
||||
// X16(IP0) and X17(IP1) is the standard veneer register
|
||||
// LR is also available as an intermediate register
|
||||
// https://developer.arm.com/documentation/102374/0101/Procedure-Call-Standard
|
||||
code.MOVP2R(oaknut::util::X16, reinterpret_cast<const void*>(f));
|
||||
code.BLR(oaknut::util::X16);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace Common::A64
|
||||
|
||||
#endif // CITRA_ARCH(arm64)
|
@ -1,57 +0,0 @@
|
||||
// Copyright 2023 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "common/dynamic_library/dynamic_library.h"
|
||||
#include "common/dynamic_library/fdk-aac.h"
|
||||
#include "common/logging/log.h"
|
||||
|
||||
namespace DynamicLibrary::FdkAac {
|
||||
|
||||
aacDecoder_GetLibInfo_func aacDecoder_GetLibInfo;
|
||||
aacDecoder_Open_func aacDecoder_Open;
|
||||
aacDecoder_Close_func aacDecoder_Close;
|
||||
aacDecoder_SetParam_func aacDecoder_SetParam;
|
||||
aacDecoder_GetStreamInfo_func aacDecoder_GetStreamInfo;
|
||||
aacDecoder_DecodeFrame_func aacDecoder_DecodeFrame;
|
||||
aacDecoder_Fill_func aacDecoder_Fill;
|
||||
|
||||
static std::unique_ptr<Common::DynamicLibrary> fdk_aac;
|
||||
|
||||
#define LOAD_SYMBOL(library, name) \
|
||||
any_failed = any_failed || (name = library->GetSymbol<name##_func>(#name)) == nullptr
|
||||
|
||||
bool LoadFdkAac() {
|
||||
if (fdk_aac) {
|
||||
return true;
|
||||
}
|
||||
|
||||
fdk_aac = std::make_unique<Common::DynamicLibrary>("fdk-aac", 2);
|
||||
if (!fdk_aac->IsLoaded()) {
|
||||
LOG_WARNING(Common, "Could not dynamically load libfdk-aac: {}", fdk_aac->GetLoadError());
|
||||
fdk_aac.reset();
|
||||
return false;
|
||||
}
|
||||
|
||||
auto any_failed = false;
|
||||
LOAD_SYMBOL(fdk_aac, aacDecoder_GetLibInfo);
|
||||
LOAD_SYMBOL(fdk_aac, aacDecoder_Open);
|
||||
LOAD_SYMBOL(fdk_aac, aacDecoder_Close);
|
||||
LOAD_SYMBOL(fdk_aac, aacDecoder_SetParam);
|
||||
LOAD_SYMBOL(fdk_aac, aacDecoder_GetStreamInfo);
|
||||
LOAD_SYMBOL(fdk_aac, aacDecoder_DecodeFrame);
|
||||
LOAD_SYMBOL(fdk_aac, aacDecoder_Fill);
|
||||
|
||||
if (any_failed) {
|
||||
LOG_WARNING(Common, "Could not find all required functions in libfdk-aac.");
|
||||
fdk_aac.reset();
|
||||
return false;
|
||||
}
|
||||
|
||||
LOG_INFO(Common, "Successfully loaded libfdk-aac.");
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace DynamicLibrary::FdkAac
|
@ -1,34 +0,0 @@
|
||||
// Copyright 2023 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
extern "C" {
|
||||
#include <fdk-aac/aacdecoder_lib.h>
|
||||
}
|
||||
|
||||
namespace DynamicLibrary::FdkAac {
|
||||
|
||||
typedef INT (*aacDecoder_GetLibInfo_func)(LIB_INFO* info);
|
||||
typedef HANDLE_AACDECODER (*aacDecoder_Open_func)(TRANSPORT_TYPE transportFmt, UINT nrOfLayers);
|
||||
typedef void (*aacDecoder_Close_func)(HANDLE_AACDECODER self);
|
||||
typedef AAC_DECODER_ERROR (*aacDecoder_SetParam_func)(const HANDLE_AACDECODER self,
|
||||
const AACDEC_PARAM param, const INT value);
|
||||
typedef CStreamInfo* (*aacDecoder_GetStreamInfo_func)(HANDLE_AACDECODER self);
|
||||
typedef AAC_DECODER_ERROR (*aacDecoder_DecodeFrame_func)(HANDLE_AACDECODER self, INT_PCM* pTimeData,
|
||||
const INT timeDataSize, const UINT flags);
|
||||
typedef AAC_DECODER_ERROR (*aacDecoder_Fill_func)(HANDLE_AACDECODER self, UCHAR* pBuffer[],
|
||||
const UINT bufferSize[], UINT* bytesValid);
|
||||
|
||||
extern aacDecoder_GetLibInfo_func aacDecoder_GetLibInfo;
|
||||
extern aacDecoder_Open_func aacDecoder_Open;
|
||||
extern aacDecoder_Close_func aacDecoder_Close;
|
||||
extern aacDecoder_SetParam_func aacDecoder_SetParam;
|
||||
extern aacDecoder_GetStreamInfo_func aacDecoder_GetStreamInfo;
|
||||
extern aacDecoder_DecodeFrame_func aacDecoder_DecodeFrame;
|
||||
extern aacDecoder_Fill_func aacDecoder_Fill;
|
||||
|
||||
bool LoadFdkAac();
|
||||
|
||||
} // namespace DynamicLibrary::FdkAac
|
@ -534,7 +534,6 @@ u64 ScanDirectoryTree(const std::string& directory, FSTEntry& parent_entry,
|
||||
}
|
||||
|
||||
void GetAllFilesFromNestedEntries(FSTEntry& directory, std::vector<FSTEntry>& output) {
|
||||
std::vector<FSTEntry> files;
|
||||
for (auto& entry : directory.children) {
|
||||
if (entry.isDirectory) {
|
||||
GetAllFilesFromNestedEntries(entry, output);
|
||||
|
148
src/common/file_watcher.cpp
Normal file
148
src/common/file_watcher.cpp
Normal file
@ -0,0 +1,148 @@
|
||||
// Copyright 2023 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <windows.h>
|
||||
#include <thread>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/file_watcher.h"
|
||||
|
||||
namespace Common {
|
||||
|
||||
static FileAction Win32ActionToFileAction(DWORD action) {
|
||||
switch (action) {
|
||||
case FILE_ACTION_ADDED:
|
||||
return FileAction::Added;
|
||||
case FILE_ACTION_REMOVED:
|
||||
return FileAction::Removed;
|
||||
case FILE_ACTION_MODIFIED:
|
||||
return FileAction::Modified;
|
||||
case FILE_ACTION_RENAMED_OLD_NAME:
|
||||
return FileAction::RenamedOldName;
|
||||
case FILE_ACTION_RENAMED_NEW_NAME:
|
||||
return FileAction::RenamedNewName;
|
||||
default:
|
||||
UNREACHABLE_MSG("Unknown action {}", action);
|
||||
return FileAction::Invalid;
|
||||
}
|
||||
}
|
||||
|
||||
struct FileWatcher::Impl {
|
||||
explicit Impl(const std::string& path, FileWatcher::Callback&& callback_)
|
||||
: callback{callback_} {
|
||||
// Create file handle for the directory we are watching.
|
||||
dir_handle =
|
||||
CreateFile(path.c_str(), FILE_LIST_DIRECTORY | GENERIC_READ,
|
||||
FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE, NULL, OPEN_EXISTING,
|
||||
FILE_FLAG_BACKUP_SEMANTICS | FILE_FLAG_OVERLAPPED, NULL);
|
||||
ASSERT_MSG(dir_handle != INVALID_HANDLE_VALUE, "Unable to create watch file");
|
||||
|
||||
// Create an event that will terminate the thread when fired.
|
||||
termination_event = CreateEvent(NULL, TRUE, FALSE, NULL);
|
||||
ASSERT_MSG(termination_event != INVALID_HANDLE_VALUE, "Unable to create watch event");
|
||||
|
||||
// Create an event that will wake up the watcher thread on filesystem changes.
|
||||
overlapped.hEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
|
||||
ASSERT_MSG(overlapped.hEvent != INVALID_HANDLE_VALUE, "Unable to create watch event");
|
||||
|
||||
// Create the watcher thread.
|
||||
watch_thread = std::thread([this] { WatcherThread(); });
|
||||
}
|
||||
|
||||
~Impl() {
|
||||
// Signal watcher thread to terminate.
|
||||
SetEvent(termination_event);
|
||||
|
||||
// Wait for said termination.
|
||||
if (watch_thread.joinable()) {
|
||||
watch_thread.join();
|
||||
}
|
||||
|
||||
// Close used handles.
|
||||
CancelIo(dir_handle);
|
||||
GetOverlappedResult(dir_handle, &overlapped, &num_bytes_read, TRUE);
|
||||
CloseHandle(termination_event);
|
||||
CloseHandle(overlapped.hEvent);
|
||||
}
|
||||
|
||||
void WatcherThread() {
|
||||
const std::array wait_handles{overlapped.hEvent, termination_event};
|
||||
while (is_running) {
|
||||
bool result =
|
||||
ReadDirectoryChangesW(dir_handle, buffer.data(), buffer.size(), TRUE,
|
||||
FILE_NOTIFY_CHANGE_FILE_NAME |
|
||||
FILE_NOTIFY_CHANGE_DIR_NAME |
|
||||
FILE_NOTIFY_CHANGE_LAST_WRITE, NULL, &overlapped, NULL);
|
||||
ASSERT_MSG(result, "Unable to read directory changes: {}", GetLastErrorMsg());
|
||||
|
||||
// Sleep until we receive a file changed notification or a termination event.
|
||||
switch (
|
||||
WaitForMultipleObjects(wait_handles.size(), wait_handles.data(), FALSE, INFINITE)) {
|
||||
case WAIT_OBJECT_0: {
|
||||
// Retrieve asynchronously the data from ReadDirectoryChangesW.
|
||||
result = GetOverlappedResult(dir_handle, &overlapped, &num_bytes_read, TRUE);
|
||||
ASSERT_MSG(result, "Unable to retrieve overlapped result: {}", GetLastErrorMsg());
|
||||
|
||||
// Notify about file changes.
|
||||
NotifyFileChanges();
|
||||
break;
|
||||
}
|
||||
case WAIT_OBJECT_0 + 1:
|
||||
is_running = false;
|
||||
break;
|
||||
case WAIT_FAILED:
|
||||
UNREACHABLE_MSG("Failed waiting for file watcher events: {}", GetLastErrorMsg());
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void NotifyFileChanges() {
|
||||
// If no data was read we have nothing to do.
|
||||
if (num_bytes_read == 0) [[unlikely]] {
|
||||
return;
|
||||
}
|
||||
|
||||
u32 next_entry_offset{};
|
||||
while (true) {
|
||||
// Retrieve file notify information.
|
||||
auto fni = reinterpret_cast<FILE_NOTIFY_INFORMATION*>(buffer.data() + next_entry_offset);
|
||||
|
||||
// Call the callback function informing about the change.
|
||||
if (fni->Action != 0) {
|
||||
std::string file_name(fni->FileNameLength / sizeof(WCHAR), ' ');
|
||||
WideCharToMultiByte(CP_UTF8, 0, fni->FileName,
|
||||
fni->FileNameLength / sizeof(WCHAR), file_name.data(), file_name.size(), NULL, NULL);
|
||||
const FileAction action = Win32ActionToFileAction(fni->Action);
|
||||
callback(file_name, action);
|
||||
}
|
||||
|
||||
// If this was the last action, break.
|
||||
if (fni->NextEntryOffset == 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
// Move to next fni structure.
|
||||
next_entry_offset += fni->NextEntryOffset;
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
static constexpr size_t DirectoryWatcherBufferSize = 4096;
|
||||
FileWatcher::Callback callback;
|
||||
HANDLE dir_handle{};
|
||||
HANDLE termination_event{};
|
||||
OVERLAPPED overlapped{};
|
||||
std::array<u8, DirectoryWatcherBufferSize> buffer{};
|
||||
std::atomic_bool is_running{true};
|
||||
DWORD num_bytes_read{};
|
||||
std::thread watch_thread;
|
||||
};
|
||||
|
||||
FileWatcher::FileWatcher(const std::string& log_dir, Callback&& callback)
|
||||
: impl{std::make_unique<Impl>(log_dir, std::move(callback))} {}
|
||||
|
||||
FileWatcher::~FileWatcher() = default;
|
||||
|
||||
} // namespace Common
|
33
src/common/file_watcher.h
Normal file
33
src/common/file_watcher.h
Normal file
@ -0,0 +1,33 @@
|
||||
// Copyright 2023 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <functional>
|
||||
#include <string>
|
||||
|
||||
namespace Common {
|
||||
|
||||
enum class FileAction : u8 {
|
||||
Added,
|
||||
Removed,
|
||||
Modified,
|
||||
RenamedOldName,
|
||||
RenamedNewName,
|
||||
Invalid = std::numeric_limits<u8>::max(),
|
||||
};
|
||||
|
||||
class FileWatcher {
|
||||
using Callback = std::function<void(const std::string&, FileAction)>;
|
||||
|
||||
public:
|
||||
explicit FileWatcher(const std::string& log_dir, Callback&& callback);
|
||||
~FileWatcher();
|
||||
|
||||
private:
|
||||
struct Impl;
|
||||
std::unique_ptr<Impl> impl;
|
||||
};
|
||||
|
||||
} // namespace Common
|
@ -25,7 +25,7 @@ std::size_t DirectRomFSReader::ReadFile(std::size_t offset, std::size_t length,
|
||||
d.Seek(crypto_offset + offset);
|
||||
d.ProcessData(buffer, buffer, length);
|
||||
}
|
||||
// LOG_INFO(Service_FS, "Cache SKIP: offset={}, length={}", offset, length);
|
||||
LOG_TRACE(Service_FS, "RomFS Cache SKIP: offset={}, length={}", offset, length);
|
||||
return length;
|
||||
}
|
||||
|
||||
@ -44,11 +44,11 @@ std::size_t DirectRomFSReader::ReadFile(std::size_t offset, std::size_t length,
|
||||
d.Seek(crypto_offset + page);
|
||||
d.ProcessData(cache_entry.second.data(), cache_entry.second.data(), read_size);
|
||||
}
|
||||
// LOG_INFO(Service_FS, "Cache MISS: page={}, length={}, into={}", page, seg.second,
|
||||
// (seg.first - page));
|
||||
LOG_TRACE(Service_FS, "RomFS Cache MISS: page={}, length={}, into={}", page, seg.second,
|
||||
(seg.first - page));
|
||||
} else {
|
||||
// LOG_INFO(Service_FS, "Cache HIT: page={}, length={}, into={}", page, seg.second,
|
||||
// (seg.first - page));
|
||||
LOG_TRACE(Service_FS, "RomFS Cache HIT: page={}, length={}, into={}", page, seg.second,
|
||||
(seg.first - page));
|
||||
}
|
||||
size_t copy_amount =
|
||||
(read_size > (seg.first - page))
|
||||
|
@ -77,6 +77,11 @@ class GraphicsContext {
|
||||
public:
|
||||
virtual ~GraphicsContext();
|
||||
|
||||
/// Checks whether this context uses OpenGL ES.
|
||||
virtual bool IsGLES() {
|
||||
return false;
|
||||
}
|
||||
|
||||
/// Inform the driver to swap the front/back buffers and present the current image
|
||||
virtual void SwapBuffers(){};
|
||||
|
||||
|
@ -6,7 +6,6 @@
|
||||
#include "common/archives.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/global.h"
|
||||
#include "core/hle/kernel/address_arbiter.h"
|
||||
#include "core/hle/kernel/errors.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
@ -21,7 +20,7 @@ void AddressArbiter::WaitThread(std::shared_ptr<Thread> thread, VAddr wait_addre
|
||||
waiting_threads.emplace_back(std::move(thread));
|
||||
}
|
||||
|
||||
void AddressArbiter::ResumeAllThreads(VAddr address) {
|
||||
u64 AddressArbiter::ResumeAllThreads(VAddr address) {
|
||||
// Determine which threads are waiting on this address, those should be woken up.
|
||||
auto itr = std::stable_partition(waiting_threads.begin(), waiting_threads.end(),
|
||||
[address](const auto& thread) {
|
||||
@ -31,13 +30,15 @@ void AddressArbiter::ResumeAllThreads(VAddr address) {
|
||||
});
|
||||
|
||||
// Wake up all the found threads
|
||||
const u64 num_threads = std::distance(itr, waiting_threads.end());
|
||||
std::for_each(itr, waiting_threads.end(), [](auto& thread) { thread->ResumeFromWait(); });
|
||||
|
||||
// Remove the woken up threads from the wait list.
|
||||
waiting_threads.erase(itr, waiting_threads.end());
|
||||
return num_threads;
|
||||
}
|
||||
|
||||
std::shared_ptr<Thread> AddressArbiter::ResumeHighestPriorityThread(VAddr address) {
|
||||
bool AddressArbiter::ResumeHighestPriorityThread(VAddr address) {
|
||||
// Determine which threads are waiting on this address, those should be considered for wakeup.
|
||||
auto matches_start = std::stable_partition(
|
||||
waiting_threads.begin(), waiting_threads.end(), [address](const auto& thread) {
|
||||
@ -54,14 +55,15 @@ std::shared_ptr<Thread> AddressArbiter::ResumeHighestPriorityThread(VAddr addres
|
||||
return lhs->current_priority < rhs->current_priority;
|
||||
});
|
||||
|
||||
if (itr == waiting_threads.end())
|
||||
return nullptr;
|
||||
if (itr == waiting_threads.end()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
auto thread = *itr;
|
||||
thread->ResumeFromWait();
|
||||
|
||||
waiting_threads.erase(itr);
|
||||
return thread;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
AddressArbiter::AddressArbiter(KernelSystem& kernel)
|
||||
@ -107,17 +109,28 @@ ResultCode AddressArbiter::ArbitrateAddress(std::shared_ptr<Thread> thread, Arbi
|
||||
switch (type) {
|
||||
|
||||
// Signal thread(s) waiting for arbitrate address...
|
||||
case ArbitrationType::Signal:
|
||||
case ArbitrationType::Signal: {
|
||||
u64 num_threads{};
|
||||
|
||||
// Negative value means resume all threads
|
||||
if (value < 0) {
|
||||
ResumeAllThreads(address);
|
||||
num_threads = ResumeAllThreads(address);
|
||||
} else {
|
||||
// Resume first N threads
|
||||
for (int i = 0; i < value; i++)
|
||||
ResumeHighestPriorityThread(address);
|
||||
for (s32 i = 0; i < value; i++) {
|
||||
num_threads += ResumeHighestPriorityThread(address);
|
||||
}
|
||||
}
|
||||
|
||||
// Prevents lag from low priority threads that spam svcArbitrateAddress and wake no threads
|
||||
// The tick count is taken directly from official HOS kernel. The priority value is one less
|
||||
// than official kernel as the affected FMV threads dont meet the priority threshold of 50.
|
||||
// TODO: Revisit this when scheduler is rewritten and adjust if there isn't a problem there.
|
||||
if (num_threads == 0 && thread->current_priority >= 49) {
|
||||
kernel.current_cpu->GetTimer().AddTicks(1614u);
|
||||
}
|
||||
break;
|
||||
|
||||
}
|
||||
// Wait current thread (acquire the arbiter)...
|
||||
case ArbitrationType::WaitIfLessThan:
|
||||
if ((s32)kernel.memory.Read32(address) < value) {
|
||||
|
@ -65,11 +65,11 @@ private:
|
||||
void WaitThread(std::shared_ptr<Thread> thread, VAddr wait_address);
|
||||
|
||||
/// Resume all threads found to be waiting on the address under this address arbiter
|
||||
void ResumeAllThreads(VAddr address);
|
||||
u64 ResumeAllThreads(VAddr address);
|
||||
|
||||
/// Resume one thread found to be waiting on the address under this address arbiter and return
|
||||
/// the resumed thread.
|
||||
std::shared_ptr<Thread> ResumeHighestPriorityThread(VAddr address);
|
||||
bool ResumeHighestPriorityThread(VAddr address);
|
||||
|
||||
/// Threads waiting for the address arbiter to be signaled.
|
||||
std::vector<std::shared_ptr<Thread>> waiting_threads;
|
||||
|
@ -12,11 +12,10 @@ add_executable(tests
|
||||
core/memory/vm_manager.cpp
|
||||
precompiled_headers.h
|
||||
audio_core/hle/hle.cpp
|
||||
audio_core/hle/adts_reader.cpp
|
||||
audio_core/lle/lle.cpp
|
||||
audio_core/audio_fixures.h
|
||||
audio_core/decoder_tests.cpp
|
||||
video_core/shader/shader_jit_x64_compiler.cpp
|
||||
video_core/shader/shader_jit_compiler.cpp
|
||||
)
|
||||
|
||||
create_target_directory_groups(tests)
|
||||
|
@ -1,77 +0,0 @@
|
||||
// Copyright 2023 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <catch2/catch_test_macros.hpp>
|
||||
#include <fmt/format.h>
|
||||
|
||||
#include "audio_core/hle/adts.h"
|
||||
|
||||
namespace {
|
||||
constexpr std::array<u32, 16> freq_table = {96000, 88200, 64000, 48000, 44100, 32000, 24000, 22050,
|
||||
16000, 12000, 11025, 8000, 7350, 0, 0, 0};
|
||||
constexpr std::array<u8, 8> channel_table = {0, 1, 2, 3, 4, 5, 6, 8};
|
||||
|
||||
AudioCore::ADTSData ParseADTS_Old(const unsigned char* buffer) {
|
||||
u32 tmp = 0;
|
||||
AudioCore::ADTSData out{};
|
||||
|
||||
// sync word 0xfff
|
||||
tmp = (buffer[0] << 8) | (buffer[1] & 0xf0);
|
||||
if ((tmp & 0xffff) != 0xfff0) {
|
||||
out.length = 0;
|
||||
return out;
|
||||
}
|
||||
// bit 16 = no CRC
|
||||
out.header_length = (buffer[1] & 0x1) ? 7 : 9;
|
||||
out.mpeg2 = (buffer[1] >> 3) & 0x1;
|
||||
// bit 17 to 18
|
||||
out.profile = (buffer[2] >> 6) + 1;
|
||||
// bit 19 to 22
|
||||
tmp = (buffer[2] >> 2) & 0xf;
|
||||
out.samplerate_idx = tmp;
|
||||
out.samplerate = (tmp > 15) ? 0 : freq_table[tmp];
|
||||
// bit 24 to 26
|
||||
tmp = ((buffer[2] & 0x1) << 2) | ((buffer[3] >> 6) & 0x3);
|
||||
out.channel_idx = tmp;
|
||||
out.channels = (tmp > 7) ? 0 : channel_table[tmp];
|
||||
|
||||
// bit 55 to 56
|
||||
out.framecount = (buffer[6] & 0x3) + 1;
|
||||
|
||||
// bit 31 to 43
|
||||
tmp = (buffer[3] & 0x3) << 11;
|
||||
tmp |= (buffer[4] << 3) & 0x7f8;
|
||||
tmp |= (buffer[5] >> 5) & 0x7;
|
||||
|
||||
out.length = tmp;
|
||||
|
||||
return out;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
TEST_CASE("ParseADTS fuzz", "[audio_core][hle]") {
|
||||
for (u32 i = 0; i < 0x10000; i++) {
|
||||
std::array<u8, 7> adts_header;
|
||||
std::string adts_header_string = "ADTS Header: ";
|
||||
for (auto& it : adts_header) {
|
||||
it = static_cast<u8>(rand());
|
||||
adts_header_string.append(fmt::format("{:2X} ", it));
|
||||
}
|
||||
INFO(adts_header_string);
|
||||
|
||||
AudioCore::ADTSData out_old_impl =
|
||||
ParseADTS_Old(reinterpret_cast<const unsigned char*>(adts_header.data()));
|
||||
AudioCore::ADTSData out = AudioCore::ParseADTS(adts_header.data());
|
||||
|
||||
REQUIRE(out_old_impl.length == out.length);
|
||||
REQUIRE(out_old_impl.channels == out.channels);
|
||||
REQUIRE(out_old_impl.channel_idx == out.channel_idx);
|
||||
REQUIRE(out_old_impl.framecount == out.framecount);
|
||||
REQUIRE(out_old_impl.header_length == out.header_length);
|
||||
REQUIRE(out_old_impl.mpeg2 == out.mpeg2);
|
||||
REQUIRE(out_old_impl.profile == out.profile);
|
||||
REQUIRE(out_old_impl.samplerate == out.samplerate);
|
||||
REQUIRE(out_old_impl.samplerate_idx == out.samplerate_idx);
|
||||
}
|
||||
}
|
@ -1,9 +1,9 @@
|
||||
// Copyright 2017 Citra Emulator Project
|
||||
// Copyright 2023 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "common/arch.h"
|
||||
#if CITRA_ARCH(x86_64)
|
||||
#if CITRA_ARCH(x86_64) || CITRA_ARCH(arm64)
|
||||
|
||||
#include <algorithm>
|
||||
#include <cmath>
|
||||
@ -14,7 +14,11 @@
|
||||
#include <fmt/format.h>
|
||||
#include <nihstro/inline_assembly.h>
|
||||
#include "video_core/shader/shader_interpreter.h"
|
||||
#if CITRA_ARCH(x86_64)
|
||||
#include "video_core/shader/shader_jit_x64_compiler.h"
|
||||
#elif CITRA_ARCH(arm64)
|
||||
#include "video_core/shader/shader_jit_a64_compiler.h"
|
||||
#endif
|
||||
|
||||
using JitShader = Pica::Shader::JitShader;
|
||||
using ShaderInterpreter = Pica::Shader::InterpreterEngine;
|
||||
@ -31,6 +35,18 @@ static constexpr Common::Vec4f vec4_zero = Common::Vec4f::AssignToAll(0.0f);
|
||||
|
||||
namespace Catch {
|
||||
template <>
|
||||
struct StringMaker<Common::Vec2f> {
|
||||
static std::string convert(Common::Vec2f value) {
|
||||
return fmt::format("({}, {})", value.x, value.y);
|
||||
}
|
||||
};
|
||||
template <>
|
||||
struct StringMaker<Common::Vec3f> {
|
||||
static std::string convert(Common::Vec3f value) {
|
||||
return fmt::format("({}, {}, {})", value.r(), value.g(), value.b());
|
||||
}
|
||||
};
|
||||
template <>
|
||||
struct StringMaker<Common::Vec4f> {
|
||||
static std::string convert(Common::Vec4f value) {
|
||||
return fmt::format("({}, {}, {}, {})", value.r(), value.g(), value.b(), value.a());
|
||||
@ -59,6 +75,11 @@ public:
|
||||
shader_jit.Compile(&shader_setup->program_code, &shader_setup->swizzle_data);
|
||||
}
|
||||
|
||||
explicit ShaderTest(std::unique_ptr<Pica::Shader::ShaderSetup> input_shader_setup)
|
||||
: shader_setup(std::move(input_shader_setup)) {
|
||||
shader_jit.Compile(&shader_setup->program_code, &shader_setup->swizzle_data);
|
||||
}
|
||||
|
||||
Common::Vec4f Run(std::span<const Common::Vec4f> inputs) {
|
||||
Pica::Shader::UnitState shader_unit;
|
||||
RunJit(shader_unit, inputs);
|
||||
@ -144,6 +165,41 @@ TEST_CASE("ADD", "[video_core][shader][shader_jit]") {
|
||||
REQUIRE(std::isinf(shader.Run({INFINITY, -1.0f}).x));
|
||||
}
|
||||
|
||||
TEST_CASE("CALL", "[video_core][shader][shader_jit]") {
|
||||
const auto sh_input = SourceRegister::MakeInput(0);
|
||||
const auto sh_output = DestRegister::MakeOutput(0);
|
||||
|
||||
auto shader_setup = CompileShaderSetup({
|
||||
{OpCode::Id::NOP}, // call foo
|
||||
{OpCode::Id::END},
|
||||
// .proc foo
|
||||
{OpCode::Id::NOP}, // call ex2
|
||||
{OpCode::Id::END},
|
||||
// .proc ex2
|
||||
{OpCode::Id::EX2, sh_output, sh_input},
|
||||
{OpCode::Id::END},
|
||||
});
|
||||
|
||||
// nihstro does not support the CALL* instructions, so the instruction-binary must be manually
|
||||
// inserted here:
|
||||
nihstro::Instruction CALL = {};
|
||||
CALL.opcode = nihstro::OpCode(nihstro::OpCode::Id::CALL);
|
||||
|
||||
// call foo
|
||||
CALL.flow_control.dest_offset = 2;
|
||||
CALL.flow_control.num_instructions = 1;
|
||||
shader_setup->program_code[0] = CALL.hex;
|
||||
|
||||
// call ex2
|
||||
CALL.flow_control.dest_offset = 4;
|
||||
CALL.flow_control.num_instructions = 1;
|
||||
shader_setup->program_code[2] = CALL.hex;
|
||||
|
||||
auto shader = ShaderTest(std::move(shader_setup));
|
||||
|
||||
REQUIRE(shader.Run(0.f).x == Catch::Approx(1.f));
|
||||
}
|
||||
|
||||
TEST_CASE("DP3", "[video_core][shader][shader_jit]") {
|
||||
const auto sh_input1 = SourceRegister::MakeInput(0);
|
||||
const auto sh_input2 = SourceRegister::MakeInput(1);
|
||||
@ -395,6 +451,39 @@ TEST_CASE("RSQ", "[video_core][shader][shader_jit]") {
|
||||
REQUIRE(shader.Run({0.0625f}).x == Catch::Approx(4.0f).margin(0.004f));
|
||||
}
|
||||
|
||||
TEST_CASE("Uniform Read", "[video_core][shader][shader_jit]") {
|
||||
const auto sh_input = SourceRegister::MakeInput(0);
|
||||
const auto sh_c0 = SourceRegister::MakeFloat(0);
|
||||
const auto sh_output = DestRegister::MakeOutput(0);
|
||||
|
||||
auto shader = ShaderTest({
|
||||
// mova a0.x, sh_input.x
|
||||
{OpCode::Id::MOVA, DestRegister{}, "x", sh_input, "x", SourceRegister{}, "",
|
||||
nihstro::InlineAsm::RelativeAddress::A1},
|
||||
// mov sh_output.xyzw, c0[a0.x].xyzw
|
||||
{OpCode::Id::MOV, sh_output, "xyzw", sh_c0, "xyzw", SourceRegister{}, "",
|
||||
nihstro::InlineAsm::RelativeAddress::A1},
|
||||
{OpCode::Id::END},
|
||||
});
|
||||
|
||||
// Prepare shader uniforms
|
||||
std::array<Common::Vec4f, 96> f_uniforms = {};
|
||||
for (u32 i = 0; i < 96; ++i) {
|
||||
const float color = (i * 2.0f) / 255.0f;
|
||||
const auto color_f24 = Pica::f24::FromFloat32(color);
|
||||
shader.shader_setup->uniforms.f[i] = {color_f24, color_f24, color_f24, Pica::f24::One()};
|
||||
f_uniforms[i] = {color, color, color, 1.0f};
|
||||
}
|
||||
|
||||
for (u32 i = 0; i < 96; ++i) {
|
||||
const float index = static_cast<float>(i);
|
||||
// Add some fractional values to test proper float->integer truncation
|
||||
const float fractional = (i % 17) / 17.0f;
|
||||
|
||||
REQUIRE(shader.Run(index + fractional) == f_uniforms[i]);
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE("Address Register Offset", "[video_core][shader][shader_jit]") {
|
||||
const auto sh_input = SourceRegister::MakeInput(0);
|
||||
const auto sh_c40 = SourceRegister::MakeFloat(40);
|
||||
@ -445,23 +534,83 @@ TEST_CASE("Address Register Offset", "[video_core][shader][shader_jit]") {
|
||||
REQUIRE(shader.Run(-129.f) == f_uniforms[40]);
|
||||
}
|
||||
|
||||
// TODO: Requires fix from https://github.com/neobrain/nihstro/issues/68
|
||||
// TEST_CASE("MAD", "[video_core][shader][shader_jit]") {
|
||||
// const auto sh_input1 = SourceRegister::MakeInput(0);
|
||||
// const auto sh_input2 = SourceRegister::MakeInput(1);
|
||||
// const auto sh_input3 = SourceRegister::MakeInput(2);
|
||||
// const auto sh_output = DestRegister::MakeOutput(0);
|
||||
TEST_CASE("Dest Mask", "[video_core][shader][shader_jit]") {
|
||||
const auto sh_input = SourceRegister::MakeInput(0);
|
||||
const auto sh_output = DestRegister::MakeOutput(0);
|
||||
|
||||
// auto shader = ShaderTest({
|
||||
// {OpCode::Id::MAD, sh_output, sh_input1, sh_input2, sh_input3},
|
||||
// {OpCode::Id::END},
|
||||
// });
|
||||
const auto shader = [&sh_input, &sh_output](const char* dest_mask) {
|
||||
return std::unique_ptr<ShaderTest>(new ShaderTest{
|
||||
{OpCode::Id::MOV, sh_output, dest_mask, sh_input, "xyzw", SourceRegister{}, ""},
|
||||
{OpCode::Id::END},
|
||||
});
|
||||
};
|
||||
|
||||
// REQUIRE(shader.Run({vec4_inf, vec4_zero, vec4_zero}).x == 0.0f);
|
||||
// REQUIRE(std::isnan(shader.Run({vec4_nan, vec4_zero, vec4_zero}).x));
|
||||
const Common::Vec4f iota_vec = {1.0f, 2.0f, 3.0f, 4.0f};
|
||||
|
||||
// REQUIRE(shader.Run({vec4_one, vec4_one, vec4_one}).x == 2.0f);
|
||||
// }
|
||||
REQUIRE(shader("x")->Run({iota_vec}).x == iota_vec.x);
|
||||
REQUIRE(shader("y")->Run({iota_vec}).y == iota_vec.y);
|
||||
REQUIRE(shader("z")->Run({iota_vec}).z == iota_vec.z);
|
||||
REQUIRE(shader("w")->Run({iota_vec}).w == iota_vec.w);
|
||||
REQUIRE(shader("xy")->Run({iota_vec}).xy() == iota_vec.xy());
|
||||
REQUIRE(shader("xz")->Run({iota_vec}).xz() == iota_vec.xz());
|
||||
REQUIRE(shader("xw")->Run({iota_vec}).xw() == iota_vec.xw());
|
||||
REQUIRE(shader("yz")->Run({iota_vec}).yz() == iota_vec.yz());
|
||||
REQUIRE(shader("yw")->Run({iota_vec}).yw() == iota_vec.yw());
|
||||
REQUIRE(shader("zw")->Run({iota_vec}).zw() == iota_vec.zw());
|
||||
REQUIRE(shader("xyz")->Run({iota_vec}).xyz() == iota_vec.xyz());
|
||||
REQUIRE(shader("xyw")->Run({iota_vec}).xyw() == iota_vec.xyw());
|
||||
REQUIRE(shader("xzw")->Run({iota_vec}).xzw() == iota_vec.xzw());
|
||||
REQUIRE(shader("yzw")->Run({iota_vec}).yzw() == iota_vec.yzw());
|
||||
REQUIRE(shader("xyzw")->Run({iota_vec}) == iota_vec);
|
||||
}
|
||||
|
||||
TEST_CASE("MAD", "[video_core][shader][shader_jit]") {
|
||||
const auto sh_input1 = SourceRegister::MakeInput(0);
|
||||
const auto sh_input2 = SourceRegister::MakeInput(1);
|
||||
const auto sh_input3 = SourceRegister::MakeInput(2);
|
||||
const auto sh_output = DestRegister::MakeOutput(0);
|
||||
|
||||
auto shader_setup = CompileShaderSetup({
|
||||
// TODO: Requires fix from https://github.com/neobrain/nihstro/issues/68
|
||||
// {OpCode::Id::MAD, sh_output, sh_input1, sh_input2, sh_input3},
|
||||
{OpCode::Id::NOP},
|
||||
{OpCode::Id::END},
|
||||
});
|
||||
|
||||
// nihstro does not support the MAD* instructions, so the instruction-binary must be manually
|
||||
// inserted here:
|
||||
nihstro::Instruction MAD = {};
|
||||
MAD.opcode = nihstro::OpCode::Id::MAD;
|
||||
MAD.mad.operand_desc_id = 0;
|
||||
MAD.mad.src1 = sh_input1;
|
||||
MAD.mad.src2 = sh_input2;
|
||||
MAD.mad.src3 = sh_input3;
|
||||
MAD.mad.dest = sh_output;
|
||||
shader_setup->program_code[0] = MAD.hex;
|
||||
|
||||
nihstro::SwizzlePattern swizzle = {};
|
||||
swizzle.dest_mask = 0b1111;
|
||||
swizzle.SetSelectorSrc1(0, SwizzlePattern::Selector::x);
|
||||
swizzle.SetSelectorSrc1(1, SwizzlePattern::Selector::y);
|
||||
swizzle.SetSelectorSrc1(2, SwizzlePattern::Selector::z);
|
||||
swizzle.SetSelectorSrc1(3, SwizzlePattern::Selector::w);
|
||||
swizzle.SetSelectorSrc2(0, SwizzlePattern::Selector::x);
|
||||
swizzle.SetSelectorSrc2(1, SwizzlePattern::Selector::y);
|
||||
swizzle.SetSelectorSrc2(2, SwizzlePattern::Selector::z);
|
||||
swizzle.SetSelectorSrc2(3, SwizzlePattern::Selector::w);
|
||||
swizzle.SetSelectorSrc3(0, SwizzlePattern::Selector::x);
|
||||
swizzle.SetSelectorSrc3(1, SwizzlePattern::Selector::y);
|
||||
swizzle.SetSelectorSrc3(2, SwizzlePattern::Selector::z);
|
||||
swizzle.SetSelectorSrc3(3, SwizzlePattern::Selector::w);
|
||||
shader_setup->swizzle_data[0] = swizzle.hex;
|
||||
|
||||
auto shader = ShaderTest(std::move(shader_setup));
|
||||
|
||||
REQUIRE(shader.Run({vec4_zero, vec4_zero, vec4_zero}) == vec4_zero);
|
||||
REQUIRE(shader.Run({vec4_one, vec4_one, vec4_one}) == (vec4_one * 2.0f));
|
||||
REQUIRE(shader.Run({vec4_inf, vec4_zero, vec4_zero}) == vec4_zero);
|
||||
REQUIRE(shader.Run({vec4_nan, vec4_zero, vec4_zero}) == vec4_nan);
|
||||
}
|
||||
|
||||
TEST_CASE("Nested Loop", "[video_core][shader][shader_jit]") {
|
||||
const auto sh_input = SourceRegister::MakeInput(0);
|
||||
@ -518,4 +667,42 @@ TEST_CASE("Nested Loop", "[video_core][shader][shader_jit]") {
|
||||
}
|
||||
}
|
||||
|
||||
#endif // CITRA_ARCH(x86_64)
|
||||
TEST_CASE("Source Swizzle", "[video_core][shader][shader_jit]") {
|
||||
const auto sh_input = SourceRegister::MakeInput(0);
|
||||
const auto sh_output = DestRegister::MakeOutput(0);
|
||||
|
||||
const auto shader = [&sh_input, &sh_output](const char* swizzle) {
|
||||
return std::unique_ptr<ShaderTest>(new ShaderTest{
|
||||
{OpCode::Id::MOV, sh_output, "xyzw", sh_input, swizzle, SourceRegister{}, ""},
|
||||
{OpCode::Id::END},
|
||||
});
|
||||
};
|
||||
|
||||
const Common::Vec4f iota_vec = {1.0f, 2.0f, 3.0f, 4.0f};
|
||||
|
||||
REQUIRE(shader("x")->Run({iota_vec}).x == iota_vec.x);
|
||||
REQUIRE(shader("y")->Run({iota_vec}).x == iota_vec.y);
|
||||
REQUIRE(shader("z")->Run({iota_vec}).x == iota_vec.z);
|
||||
REQUIRE(shader("w")->Run({iota_vec}).x == iota_vec.w);
|
||||
REQUIRE(shader("xy")->Run({iota_vec}).xy() == iota_vec.xy());
|
||||
REQUIRE(shader("xz")->Run({iota_vec}).xy() == iota_vec.xz());
|
||||
REQUIRE(shader("xw")->Run({iota_vec}).xy() == iota_vec.xw());
|
||||
REQUIRE(shader("yz")->Run({iota_vec}).xy() == iota_vec.yz());
|
||||
REQUIRE(shader("yw")->Run({iota_vec}).xy() == iota_vec.yw());
|
||||
REQUIRE(shader("zw")->Run({iota_vec}).xy() == iota_vec.zw());
|
||||
REQUIRE(shader("yy")->Run({iota_vec}).xy() == iota_vec.yy());
|
||||
REQUIRE(shader("wx")->Run({iota_vec}).xy() == iota_vec.wx());
|
||||
REQUIRE(shader("xyz")->Run({iota_vec}).xyz() == iota_vec.xyz());
|
||||
REQUIRE(shader("xyw")->Run({iota_vec}).xyz() == iota_vec.xyw());
|
||||
REQUIRE(shader("xzw")->Run({iota_vec}).xyz() == iota_vec.xzw());
|
||||
REQUIRE(shader("yzw")->Run({iota_vec}).xyz() == iota_vec.yzw());
|
||||
REQUIRE(shader("yyy")->Run({iota_vec}).xyz() == iota_vec.yyy());
|
||||
REQUIRE(shader("yxw")->Run({iota_vec}).xyz() == iota_vec.yxw());
|
||||
REQUIRE(shader("xyzw")->Run({iota_vec}) == iota_vec);
|
||||
REQUIRE(shader("wzxy")->Run({iota_vec}) ==
|
||||
Common::Vec4f(iota_vec.w, iota_vec.z, iota_vec.x, iota_vec.y));
|
||||
REQUIRE(shader("yyyy")->Run({iota_vec}) ==
|
||||
Common::Vec4f(iota_vec.y, iota_vec.y, iota_vec.y, iota_vec.y));
|
||||
}
|
||||
|
||||
#endif // CITRA_ARCH(x86_64) || CITRA_ARCH(arm64)
|
@ -135,20 +135,29 @@ add_library(video_core STATIC
|
||||
renderer_vulkan/vk_texture_runtime.cpp
|
||||
renderer_vulkan/vk_texture_runtime.h
|
||||
shader/debug_data.h
|
||||
shader/generator/glsl_fs_shader_gen.cpp
|
||||
shader/generator/glsl_fs_shader_gen.h
|
||||
shader/generator/glsl_shader_decompiler.cpp
|
||||
shader/generator/glsl_shader_decompiler.h
|
||||
shader/generator/glsl_shader_gen.cpp
|
||||
shader/generator/glsl_shader_gen.h
|
||||
shader/generator/pica_fs_config.cpp
|
||||
shader/generator/pica_fs_config.h
|
||||
shader/generator/profile.h
|
||||
shader/generator/shader_gen.cpp
|
||||
shader/generator/shader_gen.h
|
||||
shader/generator/shader_uniforms.cpp
|
||||
shader/generator/shader_uniforms.h
|
||||
shader/generator/spv_shader_gen.cpp
|
||||
shader/generator/spv_shader_gen.h
|
||||
shader/generator/spv_fs_shader_gen.cpp
|
||||
shader/generator/spv_fs_shader_gen.h
|
||||
shader/shader.cpp
|
||||
shader/shader.h
|
||||
shader/shader_interpreter.cpp
|
||||
shader/shader_interpreter.h
|
||||
shader/shader_jit_a64.cpp
|
||||
shader/shader_jit_a64_compiler.cpp
|
||||
shader/shader_jit_a64.h
|
||||
shader/shader_jit_a64_compiler.h
|
||||
shader/shader_jit_x64.cpp
|
||||
shader/shader_jit_x64_compiler.cpp
|
||||
shader/shader_jit_x64.h
|
||||
@ -177,6 +186,10 @@ if ("x86_64" IN_LIST ARCHITECTURE)
|
||||
target_link_libraries(video_core PUBLIC xbyak)
|
||||
endif()
|
||||
|
||||
if ("arm64" IN_LIST ARCHITECTURE)
|
||||
target_link_libraries(video_core PUBLIC oaknut)
|
||||
endif()
|
||||
|
||||
if (CITRA_USE_PRECOMPILED_HEADERS)
|
||||
target_precompile_headers(video_core PRIVATE precompiled_headers.h)
|
||||
endif()
|
||||
|
@ -1,9 +1,10 @@
|
||||
// Copyright 2023 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma clang optimize off
|
||||
#include <json.hpp>
|
||||
#include "common/file_util.h"
|
||||
#include "common/file_watcher.h"
|
||||
#include "common/memory_detect.h"
|
||||
#include "common/microprofile.h"
|
||||
#include "common/settings.h"
|
||||
@ -16,6 +17,7 @@
|
||||
#include "video_core/custom_textures/custom_tex_manager.h"
|
||||
#include "video_core/rasterizer_cache/surface_params.h"
|
||||
#include "video_core/rasterizer_cache/utils.h"
|
||||
#include "video_core/renderer_base.h"
|
||||
|
||||
namespace VideoCore {
|
||||
|
||||
@ -63,17 +65,17 @@ void CustomTexManager::TickFrame() {
|
||||
return;
|
||||
}
|
||||
std::size_t num_uploads = 0;
|
||||
for (auto it = async_uploads.begin(); it != async_uploads.end();) {
|
||||
for (auto it = async_actions.begin(); it != async_actions.end();) {
|
||||
if (num_uploads >= MAX_UPLOADS_PER_TICK) {
|
||||
return;
|
||||
}
|
||||
switch (it->material->state) {
|
||||
case DecodeState::Decoded:
|
||||
it->func();
|
||||
it->func(it->material);
|
||||
num_uploads++;
|
||||
[[fallthrough]];
|
||||
case DecodeState::Failed:
|
||||
it = async_uploads.erase(it);
|
||||
it = async_actions.erase(it);
|
||||
continue;
|
||||
default:
|
||||
it++;
|
||||
@ -102,7 +104,7 @@ void CustomTexManager::FindCustomTextures() {
|
||||
if (file.isDirectory) {
|
||||
continue;
|
||||
}
|
||||
custom_textures.push_back(std::make_unique<CustomTexture>(image_interface));
|
||||
custom_textures.emplace_back(std::make_unique<CustomTexture>(image_interface));
|
||||
CustomTexture* const texture{custom_textures.back().get()};
|
||||
if (!ParseFilename(file, texture)) {
|
||||
continue;
|
||||
@ -292,16 +294,17 @@ Material* CustomTexManager::GetMaterial(u64 data_hash) {
|
||||
return it->second.get();
|
||||
}
|
||||
|
||||
bool CustomTexManager::Decode(Material* material, std::function<bool()>&& upload) {
|
||||
bool CustomTexManager::Decode(Material* material, AsyncFunc&& upload) {
|
||||
if (!async_custom_loading) {
|
||||
material->LoadFromDisk(flip_png_files);
|
||||
return upload();
|
||||
return upload(material);
|
||||
}
|
||||
if (material->IsUnloaded()) {
|
||||
material->state = DecodeState::Pending;
|
||||
workers->QueueWork([material, this] { material->LoadFromDisk(flip_png_files); });
|
||||
}
|
||||
async_uploads.push_back({
|
||||
std::scoped_lock lock{async_actions_mutex};
|
||||
async_actions.push_back({
|
||||
.material = material,
|
||||
.func = std::move(upload),
|
||||
});
|
||||
@ -374,6 +377,14 @@ std::vector<FileUtil::FSTEntry> CustomTexManager::GetTextures(u64 title_id) {
|
||||
FileUtil::CreateFullPath(load_path);
|
||||
}
|
||||
|
||||
const auto callback = [this](const std::string& file, Common::FileAction action) {
|
||||
OnFileAction(file, action);
|
||||
};
|
||||
|
||||
// Create a file watcher to monitor any changes to the textures directory for hot-reloading.
|
||||
file_watcher = std::make_unique<Common::FileWatcher>(load_path, callback);
|
||||
|
||||
// Retrieve all texture files.
|
||||
FileUtil::FSTEntry texture_dir;
|
||||
std::vector<FileUtil::FSTEntry> textures;
|
||||
FileUtil::ScanDirectoryTree(load_path, texture_dir, 64);
|
||||
@ -386,4 +397,24 @@ void CustomTexManager::CreateWorkers() {
|
||||
workers = std::make_unique<Common::ThreadWorker>(num_workers, "Custom textures");
|
||||
}
|
||||
|
||||
void CustomTexManager::OnFileAction(const std::string& file, Common::FileAction action) {
|
||||
const auto invalidate = [this](const Material* material) -> bool {
|
||||
for (const SurfaceParams* params : material->loaded_to) {
|
||||
system.Renderer().Rasterizer()->InvalidateRegion(params->addr, params->size);
|
||||
}
|
||||
return true;
|
||||
};
|
||||
|
||||
const std::string filename{FileUtil::GetFilename(file)};
|
||||
const auto& hashes = path_to_hash_map[filename];
|
||||
std::scoped_lock lock{async_actions_mutex};
|
||||
|
||||
for (const Hash hash : hashes) {
|
||||
async_actions.push_back({
|
||||
.material = material_map[hash].get(),
|
||||
.func = std::move(invalidate),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace VideoCore
|
||||
|
@ -12,6 +12,11 @@
|
||||
#include "video_core/custom_textures/material.h"
|
||||
#include "video_core/rasterizer_interface.h"
|
||||
|
||||
namespace Common {
|
||||
class FileWatcher;
|
||||
enum class FileAction : u8;
|
||||
} // namespace Common
|
||||
|
||||
namespace Core {
|
||||
class System;
|
||||
}
|
||||
@ -24,12 +29,10 @@ namespace VideoCore {
|
||||
|
||||
class SurfaceParams;
|
||||
|
||||
struct AsyncUpload {
|
||||
const Material* material;
|
||||
std::function<bool()> func;
|
||||
};
|
||||
|
||||
class CustomTexManager {
|
||||
using Hash = u64;
|
||||
using AsyncFunc = std::function<bool(const Material*)>;
|
||||
|
||||
public:
|
||||
explicit CustomTexManager(Core::System& system);
|
||||
~CustomTexManager();
|
||||
@ -57,7 +60,7 @@ public:
|
||||
Material* GetMaterial(u64 data_hash);
|
||||
|
||||
/// Decodes the textures in material to a consumable format and uploads it.
|
||||
bool Decode(Material* material, std::function<bool()>&& upload);
|
||||
bool Decode(Material* material, AsyncFunc&& func);
|
||||
|
||||
/// True when mipmap uploads should be skipped (legacy packs only)
|
||||
bool SkipMipmaps() const noexcept {
|
||||
@ -79,15 +82,25 @@ private:
|
||||
/// Creates the thread workers.
|
||||
void CreateWorkers();
|
||||
|
||||
/// Callback for when a custom texture file is modified.
|
||||
void OnFileAction(const std::string& file, Common::FileAction action);
|
||||
|
||||
private:
|
||||
struct AsyncAction {
|
||||
const Material* material;
|
||||
AsyncFunc func;
|
||||
};
|
||||
|
||||
Core::System& system;
|
||||
Frontend::ImageInterface& image_interface;
|
||||
std::unordered_set<u64> dumped_textures;
|
||||
std::unordered_map<u64, std::unique_ptr<Material>> material_map;
|
||||
std::unordered_map<std::string, std::vector<u64>> path_to_hash_map;
|
||||
std::unordered_set<Hash> dumped_textures;
|
||||
std::unordered_map<Hash, std::unique_ptr<Material>> material_map;
|
||||
std::unordered_map<std::string, std::vector<Hash>> path_to_hash_map;
|
||||
std::vector<std::unique_ptr<CustomTexture>> custom_textures;
|
||||
std::list<AsyncUpload> async_uploads;
|
||||
std::mutex async_actions_mutex;
|
||||
std::list<AsyncAction> async_actions;
|
||||
std::unique_ptr<Common::ThreadWorker> workers;
|
||||
std::unique_ptr<Common::FileWatcher> file_watcher;
|
||||
bool textures_loaded{false};
|
||||
bool async_custom_loading{true};
|
||||
bool skip_mipmap{false};
|
||||
|
@ -18,6 +18,8 @@ class ImageInterface;
|
||||
|
||||
namespace VideoCore {
|
||||
|
||||
class SurfaceParams;
|
||||
|
||||
enum class MapType : u32 {
|
||||
Color = 0,
|
||||
Normal = 1,
|
||||
@ -72,6 +74,7 @@ struct Material {
|
||||
u64 hash;
|
||||
CustomPixelFormat format;
|
||||
std::array<CustomTexture*, MAX_MAPS> textures;
|
||||
mutable std::vector<SurfaceParams*> loaded_to;
|
||||
std::atomic<DecodeState> state{};
|
||||
|
||||
void LoadFromDisk(bool flip_png) noexcept;
|
||||
|
@ -7,6 +7,7 @@
|
||||
#include "common/vector_math.h"
|
||||
#include "video_core/rasterizer_interface.h"
|
||||
#include "video_core/regs_texturing.h"
|
||||
#include "video_core/shader/generator/pica_fs_config.h"
|
||||
#include "video_core/shader/generator/shader_uniforms.h"
|
||||
|
||||
namespace Memory {
|
||||
@ -153,6 +154,7 @@ protected:
|
||||
Pica::Regs& regs;
|
||||
|
||||
std::vector<HardwareVertex> vertex_batch;
|
||||
Pica::Shader::UserConfig user_config{};
|
||||
bool shader_dirty = true;
|
||||
|
||||
VSUniformBlockData vs_uniform_block_data{};
|
||||
@ -166,4 +168,5 @@ protected:
|
||||
std::array<Common::Vec4f, 256> proctex_lut_data{};
|
||||
std::array<Common::Vec4f, 256> proctex_diff_lut_data{};
|
||||
};
|
||||
|
||||
} // namespace VideoCore
|
||||
|
@ -653,7 +653,6 @@ FramebufferHelper<T> RasterizerCache<T>::GetFramebufferSurfaces(bool using_color
|
||||
static_cast<u32>(std::clamp(viewport_rect.bottom, 0, framebuffer_height)),
|
||||
};
|
||||
|
||||
// get color and depth surfaces
|
||||
SurfaceParams color_params;
|
||||
color_params.is_tiled = true;
|
||||
color_params.res_scale = resolution_scale_factor;
|
||||
@ -672,14 +671,6 @@ FramebufferHelper<T> RasterizerCache<T>::GetFramebufferSurfaces(bool using_color
|
||||
auto color_vp_interval = color_params.GetSubRectInterval(viewport_clamped);
|
||||
auto depth_vp_interval = depth_params.GetSubRectInterval(viewport_clamped);
|
||||
|
||||
// Make sure that framebuffers don't overlap if both color and depth are being used
|
||||
if (using_color_fb && using_depth_fb &&
|
||||
boost::icl::length(color_vp_interval & depth_vp_interval)) {
|
||||
LOG_CRITICAL(HW_GPU, "Color and depth framebuffer memory regions overlap; "
|
||||
"overlapping framebuffers not supported!");
|
||||
using_depth_fb = false;
|
||||
}
|
||||
|
||||
Common::Rectangle<u32> color_rect{};
|
||||
SurfaceId color_id{};
|
||||
u32 color_level{};
|
||||
@ -713,11 +704,13 @@ FramebufferHelper<T> RasterizerCache<T>::GetFramebufferSurfaces(bool using_color
|
||||
|
||||
if (color_id) {
|
||||
color_level = color_surface->LevelOf(color_params.addr);
|
||||
color_surface->flags |= SurfaceFlagBits::RenderTarget;
|
||||
ValidateSurface(color_id, boost::icl::first(color_vp_interval),
|
||||
boost::icl::length(color_vp_interval));
|
||||
}
|
||||
if (depth_id) {
|
||||
depth_level = depth_surface->LevelOf(depth_params.addr);
|
||||
depth_surface->flags |= SurfaceFlagBits::RenderTarget;
|
||||
ValidateSurface(depth_id, boost::icl::first(depth_vp_interval),
|
||||
boost::icl::length(depth_vp_interval));
|
||||
}
|
||||
@ -991,7 +984,9 @@ void RasterizerCache<T>::UploadSurface(Surface& surface, SurfaceInterval interva
|
||||
DecodeTexture(load_info, load_info.addr, load_info.end, upload_data, staging.mapped,
|
||||
runtime.NeedsConversion(surface.pixel_format));
|
||||
|
||||
if (dump_textures && False(surface.flags & SurfaceFlagBits::Custom)) {
|
||||
const bool should_dump = False(surface.flags & SurfaceFlagBits::Custom) &&
|
||||
False(surface.flags & SurfaceFlagBits::RenderTarget);
|
||||
if (dump_textures && should_dump) {
|
||||
const u64 hash = ComputeHash(load_info, upload_data);
|
||||
const u32 level = surface.LevelOf(load_info.addr);
|
||||
custom_tex_manager.DumpTexture(load_info, level, upload_data, hash);
|
||||
@ -1048,7 +1043,7 @@ bool RasterizerCache<T>::UploadCustomSurface(SurfaceId surface_id, SurfaceInterv
|
||||
|
||||
surface.flags |= SurfaceFlagBits::Custom;
|
||||
|
||||
const auto upload = [this, level, surface_id, material]() -> bool {
|
||||
const auto upload = [this, level, surface_id](const Material* material) -> bool {
|
||||
ASSERT_MSG(True(slot_surfaces[surface_id].flags & SurfaceFlagBits::Custom),
|
||||
"Surface is not suitable for custom upload, aborting!");
|
||||
if (!slot_surfaces[surface_id].IsCustom()) {
|
||||
|
@ -11,7 +11,18 @@ namespace VideoCore {
|
||||
|
||||
SurfaceBase::SurfaceBase(const SurfaceParams& params) : SurfaceParams{params} {}
|
||||
|
||||
SurfaceBase::~SurfaceBase() = default;
|
||||
SurfaceBase::SurfaceBase(const SurfaceParams& params, const Material* mat)
|
||||
: SurfaceParams{params}, material{mat} {
|
||||
custom_format = material->format;
|
||||
material->loaded_to.push_back(this);
|
||||
}
|
||||
|
||||
SurfaceBase::~SurfaceBase() {
|
||||
if (!material) {
|
||||
return;
|
||||
}
|
||||
std::erase_if(material->loaded_to, [this](SurfaceParams* params) { return params == this; });
|
||||
}
|
||||
|
||||
bool SurfaceBase::CanFill(const SurfaceParams& dest_surface, SurfaceInterval fill_interval) const {
|
||||
if (type == SurfaceType::Fill && IsRegionValid(fill_interval) &&
|
||||
|
@ -15,17 +15,19 @@ using SurfaceRegions = boost::icl::interval_set<PAddr, std::less, SurfaceInterva
|
||||
struct Material;
|
||||
|
||||
enum class SurfaceFlagBits : u32 {
|
||||
Registered = 1 << 0, ///< Surface is registed in the rasterizer cache.
|
||||
Picked = 1 << 1, ///< Surface has been picked when searching for a match.
|
||||
Tracked = 1 << 2, ///< Surface is part of a texture cube and should be tracked.
|
||||
Custom = 1 << 3, ///< Surface texture has been replaced with a custom texture.
|
||||
ShadowMap = 1 << 4, ///< Surface is used during shadow rendering.
|
||||
Registered = 1 << 0, ///< Surface is registed in the rasterizer cache.
|
||||
Picked = 1 << 1, ///< Surface has been picked when searching for a match.
|
||||
Tracked = 1 << 2, ///< Surface is part of a texture cube and should be tracked.
|
||||
Custom = 1 << 3, ///< Surface texture has been replaced with a custom texture.
|
||||
ShadowMap = 1 << 4, ///< Surface is used during shadow rendering.
|
||||
RenderTarget = 1 << 5, ///< Surface was a render target.
|
||||
};
|
||||
DECLARE_ENUM_FLAG_OPERATORS(SurfaceFlagBits);
|
||||
|
||||
class SurfaceBase : public SurfaceParams {
|
||||
public:
|
||||
SurfaceBase(const SurfaceParams& params);
|
||||
explicit SurfaceBase(const SurfaceParams& params);
|
||||
explicit SurfaceBase(const SurfaceParams& params, const Material* mat);
|
||||
~SurfaceBase();
|
||||
|
||||
/// Returns true when this surface can be used to fill the fill_interval of dest_surface
|
||||
|
@ -179,8 +179,8 @@ void Driver::CheckExtensionSupport() {
|
||||
arb_texture_compression_bptc = GLAD_GL_ARB_texture_compression_bptc;
|
||||
clip_cull_distance = !is_gles || GLAD_GL_EXT_clip_cull_distance;
|
||||
ext_texture_compression_s3tc = GLAD_GL_EXT_texture_compression_s3tc;
|
||||
shader_framebuffer_fetch =
|
||||
GLAD_GL_EXT_shader_framebuffer_fetch || GLAD_GL_ARM_shader_framebuffer_fetch;
|
||||
ext_shader_framebuffer_fetch = GLAD_GL_EXT_shader_framebuffer_fetch;
|
||||
arm_shader_framebuffer_fetch = GLAD_GL_ARM_shader_framebuffer_fetch;
|
||||
blend_minmax_factor = GLAD_GL_AMD_blend_minmax_factor || GLAD_GL_NV_blend_minmax_factor;
|
||||
is_suitable = GLAD_GL_VERSION_4_3 || GLAD_GL_ES_VERSION_3_1;
|
||||
}
|
||||
|
@ -107,7 +107,15 @@ public:
|
||||
|
||||
/// Returns true if the implementation supports (EXT/ARM)_shader_framebuffer_fetch
|
||||
bool HasShaderFramebufferFetch() const {
|
||||
return shader_framebuffer_fetch;
|
||||
return ext_shader_framebuffer_fetch || arm_shader_framebuffer_fetch;
|
||||
}
|
||||
|
||||
bool HasExtFramebufferFetch() const {
|
||||
return ext_shader_framebuffer_fetch;
|
||||
}
|
||||
|
||||
bool HasArmShaderFramebufferFetch() const {
|
||||
return arm_shader_framebuffer_fetch;
|
||||
}
|
||||
|
||||
/// Returns true if the implementation supports (NV/AMD)_blend_minmax_factor
|
||||
@ -136,7 +144,8 @@ private:
|
||||
bool clip_cull_distance{};
|
||||
bool ext_texture_compression_s3tc{};
|
||||
bool arb_texture_compression_bptc{};
|
||||
bool shader_framebuffer_fetch{};
|
||||
bool arm_shader_framebuffer_fetch{};
|
||||
bool ext_shader_framebuffer_fetch{};
|
||||
bool blend_minmax_factor{};
|
||||
|
||||
std::string_view gl_version{};
|
||||
|
@ -426,7 +426,7 @@ bool RasterizerOpenGL::Draw(bool accelerate, bool is_indexed) {
|
||||
|
||||
// Sync and bind the shader
|
||||
if (shader_dirty) {
|
||||
shader_manager.UseFragmentShader(regs, use_custom_normal);
|
||||
shader_manager.UseFragmentShader(regs, user_config);
|
||||
shader_dirty = false;
|
||||
}
|
||||
|
||||
@ -479,7 +479,7 @@ void RasterizerOpenGL::SyncTextureUnits(const Framebuffer* framebuffer) {
|
||||
|
||||
// Reset transient draw state
|
||||
state.color_buffer.texture_2d = 0;
|
||||
use_custom_normal = false;
|
||||
user_config = {};
|
||||
|
||||
const auto pica_textures = regs.texturing.GetTextures();
|
||||
for (u32 texture_index = 0; texture_index < pica_textures.size(); ++texture_index) {
|
||||
@ -577,20 +577,15 @@ void RasterizerOpenGL::BindMaterial(u32 texture_index, Surface& surface) {
|
||||
return;
|
||||
}
|
||||
|
||||
const auto bind_texture = [&](const TextureUnits::TextureUnit& unit, GLuint texture,
|
||||
GLuint sampler) {
|
||||
glActiveTexture(unit.Enum());
|
||||
glBindTexture(GL_TEXTURE_2D, texture);
|
||||
glBindSampler(unit.id, sampler);
|
||||
};
|
||||
|
||||
const GLuint sampler = state.texture_units[texture_index].sampler;
|
||||
if (surface.HasNormalMap()) {
|
||||
if (regs.lighting.disable) {
|
||||
LOG_WARNING(Render_OpenGL, "Custom normal map used but scene has no light enabled");
|
||||
}
|
||||
bind_texture(TextureUnits::TextureNormalMap, surface.Handle(2), sampler);
|
||||
use_custom_normal = true;
|
||||
glActiveTexture(TextureUnits::TextureNormalMap.Enum());
|
||||
glBindTexture(GL_TEXTURE_2D, surface.Handle(2));
|
||||
glBindSampler(TextureUnits::TextureNormalMap.id, sampler);
|
||||
user_config.use_custom_normal.Assign(1);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -155,7 +155,6 @@ private:
|
||||
OGLTexture texture_buffer_lut_lf;
|
||||
OGLTexture texture_buffer_lut_rg;
|
||||
OGLTexture texture_buffer_lut_rgba;
|
||||
bool use_custom_normal{};
|
||||
bool emulate_minmax_blend{};
|
||||
};
|
||||
|
||||
|
@ -14,10 +14,12 @@
|
||||
#include "video_core/renderer_opengl/gl_shader_disk_cache.h"
|
||||
#include "video_core/renderer_opengl/gl_shader_manager.h"
|
||||
#include "video_core/renderer_opengl/gl_state.h"
|
||||
#include "video_core/shader/generator/shader_uniforms.h"
|
||||
#include "video_core/shader/generator/glsl_fs_shader_gen.h"
|
||||
#include "video_core/shader/generator/profile.h"
|
||||
#include "video_core/video_core.h"
|
||||
|
||||
using namespace Pica::Shader::Generator;
|
||||
using Pica::Shader::FSConfig;
|
||||
|
||||
namespace OpenGL {
|
||||
|
||||
@ -146,17 +148,20 @@ private:
|
||||
OGLShaderStage program;
|
||||
};
|
||||
|
||||
template <typename KeyConfigType, std::string (*CodeGenerator)(const KeyConfigType&, bool),
|
||||
GLenum ShaderType>
|
||||
template <typename KeyConfigType, auto CodeGenerator, GLenum ShaderType>
|
||||
class ShaderCache {
|
||||
public:
|
||||
explicit ShaderCache(bool separable) : separable(separable) {}
|
||||
std::tuple<GLuint, std::optional<std::string>> Get(const KeyConfigType& config) {
|
||||
explicit ShaderCache(bool separable_) : separable{separable_} {}
|
||||
~ShaderCache() = default;
|
||||
|
||||
template <typename... Args>
|
||||
std::tuple<GLuint, std::optional<std::string>> Get(const KeyConfigType& config,
|
||||
Args&&... args) {
|
||||
auto [iter, new_shader] = shaders.emplace(config, OGLShaderStage{separable});
|
||||
OGLShaderStage& cached_shader = iter->second;
|
||||
std::optional<std::string> result{};
|
||||
if (new_shader) {
|
||||
result = CodeGenerator(config, separable);
|
||||
result = CodeGenerator(config, args...);
|
||||
cached_shader.Create(result->c_str(), ShaderType);
|
||||
}
|
||||
return {cached_shader.GetHandle(), std::move(result)};
|
||||
@ -243,8 +248,7 @@ using ProgrammableVertexShaders =
|
||||
using FixedGeometryShaders =
|
||||
ShaderCache<PicaFixedGSConfig, &GLSL::GenerateFixedGeometryShader, GL_GEOMETRY_SHADER>;
|
||||
|
||||
using FragmentShaders =
|
||||
ShaderCache<PicaFSConfig, &GLSL::GenerateFragmentShader, GL_FRAGMENT_SHADER>;
|
||||
using FragmentShaders = ShaderCache<FSConfig, &GLSL::GenerateFragmentShader, GL_FRAGMENT_SHADER>;
|
||||
|
||||
class ShaderProgramManager::Impl {
|
||||
public:
|
||||
@ -252,8 +256,24 @@ public:
|
||||
: separable(separable), programmable_vertex_shaders(separable),
|
||||
trivial_vertex_shader(driver, separable), fixed_geometry_shaders(separable),
|
||||
fragment_shaders(separable), disk_cache(separable) {
|
||||
if (separable)
|
||||
if (separable) {
|
||||
pipeline.Create();
|
||||
}
|
||||
profile = Pica::Shader::Profile{
|
||||
.has_separable_shaders = separable,
|
||||
.has_clip_planes = driver.HasClipCullDistance(),
|
||||
.has_geometry_shader = true,
|
||||
.has_custom_border_color = true,
|
||||
.has_fragment_shader_interlock = false,
|
||||
.has_blend_minmax_factor = driver.HasBlendMinMaxFactor(),
|
||||
.has_minus_one_to_one_range = true,
|
||||
.has_logic_op = !driver.IsOpenGLES(),
|
||||
.has_gl_ext_framebuffer_fetch = driver.HasExtFramebufferFetch(),
|
||||
.has_gl_arm_framebuffer_fetch = driver.HasArmShaderFramebufferFetch(),
|
||||
.has_gl_nv_fragment_shader_interlock = driver.GetVendor() == Vendor::Nvidia,
|
||||
.has_gl_intel_fragment_shader_interlock = driver.GetVendor() == Vendor::Intel,
|
||||
.is_vulkan = false,
|
||||
};
|
||||
}
|
||||
|
||||
struct ShaderTuple {
|
||||
@ -283,7 +303,7 @@ public:
|
||||
"ShaderTuple layout changed!");
|
||||
|
||||
bool separable;
|
||||
|
||||
Pica::Shader::Profile profile{};
|
||||
ShaderTuple current;
|
||||
|
||||
ProgrammableVertexShaders programmable_vertex_shaders;
|
||||
@ -336,7 +356,7 @@ void ShaderProgramManager::UseTrivialVertexShader() {
|
||||
|
||||
void ShaderProgramManager::UseFixedGeometryShader(const Pica::Regs& regs) {
|
||||
PicaFixedGSConfig gs_config(regs, driver.HasClipCullDistance());
|
||||
auto [handle, _] = impl->fixed_geometry_shaders.Get(gs_config);
|
||||
auto [handle, _] = impl->fixed_geometry_shaders.Get(gs_config, impl->separable);
|
||||
impl->current.gs = handle;
|
||||
impl->current.gs_hash = gs_config.Hash();
|
||||
}
|
||||
@ -346,12 +366,12 @@ void ShaderProgramManager::UseTrivialGeometryShader() {
|
||||
impl->current.gs_hash = 0;
|
||||
}
|
||||
|
||||
void ShaderProgramManager::UseFragmentShader(const Pica::Regs& regs, bool use_normal) {
|
||||
PicaFSConfig config(regs, false, driver.IsOpenGLES(), false, driver.HasBlendMinMaxFactor(),
|
||||
use_normal);
|
||||
auto [handle, result] = impl->fragment_shaders.Get(config);
|
||||
void ShaderProgramManager::UseFragmentShader(const Pica::Regs& regs,
|
||||
const Pica::Shader::UserConfig& user) {
|
||||
const FSConfig fs_config{regs, user, impl->profile};
|
||||
auto [handle, result] = impl->fragment_shaders.Get(fs_config, impl->profile);
|
||||
impl->current.fs = handle;
|
||||
impl->current.fs_hash = config.Hash();
|
||||
impl->current.fs_hash = fs_config.Hash();
|
||||
// Save FS to the disk cache if its a new shader
|
||||
if (result) {
|
||||
auto& disk_cache = impl->disk_cache;
|
||||
@ -470,8 +490,8 @@ void ShaderProgramManager::LoadDiskCache(const std::atomic_bool& stop_loading,
|
||||
impl->programmable_vertex_shaders.Inject(conf, decomp->second.code,
|
||||
std::move(shader));
|
||||
} else if (raw.GetProgramType() == ProgramType::FS) {
|
||||
PicaFSConfig conf(raw.GetRawShaderConfig(), false, driver.IsOpenGLES(), false,
|
||||
driver.HasBlendMinMaxFactor());
|
||||
// TODO: Support UserConfig in disk shader cache
|
||||
const FSConfig conf(raw.GetRawShaderConfig(), {}, impl->profile);
|
||||
std::scoped_lock lock(mutex);
|
||||
impl->fragment_shaders.Inject(conf, std::move(shader));
|
||||
} else {
|
||||
@ -581,14 +601,14 @@ void ShaderProgramManager::LoadDiskCache(const std::atomic_bool& stop_loading,
|
||||
std::scoped_lock lock(mutex);
|
||||
impl->programmable_vertex_shaders.Inject(conf, code, std::move(stage));
|
||||
} else if (raw.GetProgramType() == ProgramType::FS) {
|
||||
PicaFSConfig conf(raw.GetRawShaderConfig(), false, driver.IsOpenGLES(), false,
|
||||
driver.HasBlendMinMaxFactor());
|
||||
code = GLSL::GenerateFragmentShader(conf, impl->separable);
|
||||
// TODO: Support UserConfig in disk shader cache
|
||||
const FSConfig fs_config{raw.GetRawShaderConfig(), {}, impl->profile};
|
||||
code = GLSL::GenerateFragmentShader(fs_config, impl->profile);
|
||||
OGLShaderStage stage{impl->separable};
|
||||
stage.Create(code.c_str(), GL_FRAGMENT_SHADER);
|
||||
handle = stage.GetHandle();
|
||||
std::scoped_lock lock(mutex);
|
||||
impl->fragment_shaders.Inject(conf, std::move(stage));
|
||||
impl->fragment_shaders.Inject(fs_config, std::move(stage));
|
||||
} else {
|
||||
// Unsupported shader type got stored somehow so nuke the cache
|
||||
LOG_ERROR(Frontend, "failed to load raw ProgramType {}", raw.GetProgramType());
|
||||
|
@ -17,7 +17,8 @@ struct Regs;
|
||||
|
||||
namespace Pica::Shader {
|
||||
struct ShaderSetup;
|
||||
}
|
||||
union UserConfig;
|
||||
} // namespace Pica::Shader
|
||||
|
||||
namespace OpenGL {
|
||||
|
||||
@ -47,7 +48,7 @@ public:
|
||||
|
||||
void UseTrivialGeometryShader();
|
||||
|
||||
void UseFragmentShader(const Pica::Regs& config, bool use_normal);
|
||||
void UseFragmentShader(const Pica::Regs& config, const Pica::Shader::UserConfig& user);
|
||||
|
||||
void ApplyTo(OpenGLState& state);
|
||||
|
||||
|
@ -333,7 +333,7 @@ Surface::Surface(TextureRuntime& runtime_, const VideoCore::SurfaceParams& param
|
||||
|
||||
Surface::Surface(TextureRuntime& runtime, const VideoCore::SurfaceBase& surface,
|
||||
const VideoCore::Material* mat)
|
||||
: SurfaceBase{surface}, tuple{runtime.GetFormatTuple(mat->format)} {
|
||||
: SurfaceBase{surface, mat}, tuple{runtime.GetFormatTuple(mat->format)} {
|
||||
if (mat && !driver->IsCustomFormatSupported(mat->format)) {
|
||||
return;
|
||||
}
|
||||
@ -342,9 +342,6 @@ Surface::Surface(TextureRuntime& runtime, const VideoCore::SurfaceBase& surface,
|
||||
const GLenum target =
|
||||
texture_type == VideoCore::TextureType::CubeMap ? GL_TEXTURE_CUBE_MAP : GL_TEXTURE_2D;
|
||||
|
||||
custom_format = mat->format;
|
||||
material = mat;
|
||||
|
||||
textures[0] = MakeHandle(target, mat->width, mat->height, levels, tuple, DebugName(false));
|
||||
if (res_scale != 1) {
|
||||
textures[1] = MakeHandle(target, mat->width, mat->height, levels, DEFAULT_TUPLE,
|
||||
|
@ -3,10 +3,10 @@
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/file_watcher.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "common/microprofile.h"
|
||||
#include "common/settings.h"
|
||||
#include "common/texture.h"
|
||||
#include "core/core.h"
|
||||
#include "core/frontend/emu_window.h"
|
||||
#include "core/hw/gpu.h"
|
||||
@ -19,7 +19,6 @@
|
||||
#include "video_core/host_shaders/vulkan_present_frag_spv.h"
|
||||
#include "video_core/host_shaders/vulkan_present_interlaced_frag_spv.h"
|
||||
#include "video_core/host_shaders/vulkan_present_vert_spv.h"
|
||||
#include "vulkan/vulkan_format_traits.hpp"
|
||||
|
||||
#include <vk_mem_alloc.h>
|
||||
|
||||
|
@ -4,9 +4,9 @@
|
||||
|
||||
#include "common/thread_worker.h"
|
||||
#include "video_core/rasterizer_cache/pixel_format.h"
|
||||
#include "video_core/regs_pipeline.h"
|
||||
#include "video_core/regs_rasterizer.h"
|
||||
#include "video_core/renderer_vulkan/vk_common.h"
|
||||
#include "video_core/shader/generator/glsl_shader_gen.h"
|
||||
#include "video_core/shader/generator/spv_shader_gen.h"
|
||||
|
||||
namespace Common {
|
||||
|
||||
@ -89,6 +89,9 @@ struct DynamicState {
|
||||
u8 stencil_compare_mask;
|
||||
u8 stencil_write_mask;
|
||||
|
||||
Common::Rectangle<u32> scissor;
|
||||
Common::Rectangle<s32> viewport;
|
||||
|
||||
bool operator==(const DynamicState& other) const noexcept {
|
||||
return std::memcmp(this, &other, sizeof(DynamicState)) == 0;
|
||||
}
|
||||
|
@ -15,8 +15,12 @@
|
||||
#include "video_core/renderer_vulkan/vk_renderpass_cache.h"
|
||||
#include "video_core/renderer_vulkan/vk_scheduler.h"
|
||||
#include "video_core/renderer_vulkan/vk_shader_util.h"
|
||||
#include "video_core/shader/generator/glsl_fs_shader_gen.h"
|
||||
#include "video_core/shader/generator/glsl_shader_gen.h"
|
||||
#include "video_core/shader/generator/spv_fs_shader_gen.h"
|
||||
|
||||
using namespace Pica::Shader::Generator;
|
||||
using Pica::Shader::FSConfig;
|
||||
|
||||
MICROPROFILE_DEFINE(Vulkan_Bind, "Vulkan", "Pipeline Bind", MP_RGB(192, 32, 32));
|
||||
|
||||
@ -86,6 +90,17 @@ PipelineCache::PipelineCache(const Instance& instance_, Scheduler& scheduler_,
|
||||
trivial_vertex_shader{
|
||||
instance, vk::ShaderStageFlagBits::eVertex,
|
||||
GLSL::GenerateTrivialVertexShader(instance.IsShaderClipDistanceSupported(), true)} {
|
||||
profile = Pica::Shader::Profile{
|
||||
.has_separable_shaders = true,
|
||||
.has_clip_planes = instance.IsShaderClipDistanceSupported(),
|
||||
.has_geometry_shader = instance.UseGeometryShaders(),
|
||||
.has_custom_border_color = instance.IsCustomBorderColorSupported(),
|
||||
.has_fragment_shader_interlock = instance.IsFragmentShaderInterlockSupported(),
|
||||
.has_blend_minmax_factor = false,
|
||||
.has_minus_one_to_one_range = false,
|
||||
.has_logic_op = !instance.NeedsLogicOpEmulation(),
|
||||
.is_vulkan = true,
|
||||
};
|
||||
BuildLayout();
|
||||
}
|
||||
|
||||
@ -206,6 +221,32 @@ bool PipelineCache::BindPipeline(const PipelineInfo& info, bool wait_built) {
|
||||
current_depth_stencil = current_info.depth_stencil,
|
||||
rasterization = info.rasterization,
|
||||
depth_stencil = info.depth_stencil](vk::CommandBuffer cmdbuf) {
|
||||
if (dynamic.viewport != current_dynamic.viewport || is_dirty) {
|
||||
const vk::Viewport vk_viewport = {
|
||||
.x = static_cast<f32>(dynamic.viewport.left),
|
||||
.y = static_cast<f32>(dynamic.viewport.top),
|
||||
.width = static_cast<f32>(dynamic.viewport.GetWidth()),
|
||||
.height = static_cast<f32>(dynamic.viewport.GetHeight()),
|
||||
.minDepth = 0.f,
|
||||
.maxDepth = 1.f,
|
||||
};
|
||||
cmdbuf.setViewport(0, vk_viewport);
|
||||
}
|
||||
|
||||
if (dynamic.scissor != current_dynamic.scissor || is_dirty) {
|
||||
const vk::Rect2D scissor = {
|
||||
.offset{
|
||||
.x = static_cast<s32>(dynamic.scissor.left),
|
||||
.y = static_cast<s32>(dynamic.scissor.bottom),
|
||||
},
|
||||
.extent{
|
||||
.width = dynamic.scissor.GetWidth(),
|
||||
.height = dynamic.scissor.GetHeight(),
|
||||
},
|
||||
};
|
||||
cmdbuf.setScissor(0, scissor);
|
||||
}
|
||||
|
||||
if (dynamic.stencil_compare_mask != current_dynamic.stencil_compare_mask || is_dirty) {
|
||||
cmdbuf.setStencilCompareMask(vk::StencilFaceFlagBits::eFrontAndBack,
|
||||
dynamic.stencil_compare_mask);
|
||||
@ -377,35 +418,30 @@ void PipelineCache::UseTrivialGeometryShader() {
|
||||
shader_hashes[ProgramType::GS] = 0;
|
||||
}
|
||||
|
||||
void PipelineCache::UseFragmentShader(const Pica::Regs& regs) {
|
||||
const PicaFSConfig config{regs, instance.IsFragmentShaderInterlockSupported(),
|
||||
instance.NeedsLogicOpEmulation(),
|
||||
!instance.IsCustomBorderColorSupported(), false};
|
||||
|
||||
const auto [it, new_shader] = fragment_shaders.try_emplace(config, instance);
|
||||
void PipelineCache::UseFragmentShader(const Pica::Regs& regs,
|
||||
const Pica::Shader::UserConfig& user) {
|
||||
const FSConfig fs_config{regs, user, profile};
|
||||
const auto [it, new_shader] = fragment_shaders.try_emplace(fs_config, instance);
|
||||
auto& shader = it->second;
|
||||
|
||||
if (new_shader) {
|
||||
const bool use_spirv = Settings::values.spirv_shader_gen.GetValue();
|
||||
const auto texture0_type = config.state.texture0_type.Value();
|
||||
const bool is_shadow = texture0_type == Pica::TexturingRegs::TextureConfig::Shadow2D ||
|
||||
texture0_type == Pica::TexturingRegs::TextureConfig::ShadowCube ||
|
||||
config.state.shadow_rendering.Value();
|
||||
if (use_spirv && !is_shadow) {
|
||||
const std::vector code = SPIRV::GenerateFragmentShader(config);
|
||||
if (use_spirv && !fs_config.UsesShadowPipeline()) {
|
||||
const std::vector code = SPIRV::GenerateFragmentShader(fs_config);
|
||||
shader.module = CompileSPV(code, instance.GetDevice());
|
||||
shader.MarkDone();
|
||||
} else {
|
||||
workers.QueueWork([config, device = instance.GetDevice(), &shader]() {
|
||||
const std::string code = GLSL::GenerateFragmentShader(config, true);
|
||||
shader.module = Compile(code, vk::ShaderStageFlagBits::eFragment, device);
|
||||
workers.QueueWork([fs_config, this, &shader]() {
|
||||
const std::string code = GLSL::GenerateFragmentShader(fs_config, profile);
|
||||
shader.module =
|
||||
Compile(code, vk::ShaderStageFlagBits::eFragment, instance.GetDevice());
|
||||
shader.MarkDone();
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
current_shaders[ProgramType::FS] = &shader;
|
||||
shader_hashes[ProgramType::FS] = config.Hash();
|
||||
shader_hashes[ProgramType::FS] = fs_config.Hash();
|
||||
}
|
||||
|
||||
void PipelineCache::BindTexture(u32 binding, vk::ImageView image_view, vk::Sampler sampler) {
|
||||
|
@ -9,13 +9,18 @@
|
||||
|
||||
#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
|
||||
#include "video_core/renderer_vulkan/vk_graphics_pipeline.h"
|
||||
#include "video_core/shader/generator/glsl_shader_gen.h"
|
||||
#include "video_core/shader/generator/spv_shader_gen.h"
|
||||
#include "video_core/shader/generator/pica_fs_config.h"
|
||||
#include "video_core/shader/generator/profile.h"
|
||||
#include "video_core/shader/generator/shader_gen.h"
|
||||
|
||||
namespace Pica {
|
||||
struct Regs;
|
||||
}
|
||||
|
||||
namespace Pica::Shader {
|
||||
struct ShaderSetup;
|
||||
}
|
||||
|
||||
namespace Vulkan {
|
||||
|
||||
class Instance;
|
||||
@ -62,7 +67,7 @@ public:
|
||||
void UseTrivialGeometryShader();
|
||||
|
||||
/// Binds a fragment shader generated from PICA state
|
||||
void UseFragmentShader(const Pica::Regs& regs);
|
||||
void UseFragmentShader(const Pica::Regs& regs, const Pica::Shader::UserConfig& user);
|
||||
|
||||
/// Binds a texture to the specified binding
|
||||
void BindTexture(u32 binding, vk::ImageView image_view, vk::Sampler sampler);
|
||||
@ -98,6 +103,7 @@ private:
|
||||
RenderpassCache& renderpass_cache;
|
||||
DescriptorPool& pool;
|
||||
|
||||
Pica::Shader::Profile profile{};
|
||||
vk::UniquePipelineCache pipeline_cache;
|
||||
vk::UniquePipelineLayout pipeline_layout;
|
||||
std::size_t num_worker_threads;
|
||||
@ -118,7 +124,7 @@ private:
|
||||
std::unordered_map<Pica::Shader::Generator::PicaVSConfig, Shader*> programmable_vertex_map;
|
||||
std::unordered_map<std::string, Shader> programmable_vertex_cache;
|
||||
std::unordered_map<Pica::Shader::Generator::PicaFixedGSConfig, Shader> fixed_geometry_shaders;
|
||||
std::unordered_map<Pica::Shader::Generator::PicaFSConfig, Shader> fragment_shaders;
|
||||
std::unordered_map<Pica::Shader::FSConfig, Shader> fragment_shaders;
|
||||
Shader trivial_vertex_shader;
|
||||
};
|
||||
|
||||
|
@ -497,7 +497,7 @@ bool RasterizerVulkan::Draw(bool accelerate, bool is_indexed) {
|
||||
|
||||
// Sync and bind the shader
|
||||
if (shader_dirty) {
|
||||
pipeline_cache.UseFragmentShader(regs);
|
||||
pipeline_cache.UseFragmentShader(regs, user_config);
|
||||
shader_dirty = false;
|
||||
}
|
||||
|
||||
@ -512,30 +512,13 @@ bool RasterizerVulkan::Draw(bool accelerate, bool is_indexed) {
|
||||
|
||||
// Configure viewport and scissor
|
||||
const auto viewport = fb_helper.Viewport();
|
||||
scheduler.Record([viewport, draw_rect](vk::CommandBuffer cmdbuf) {
|
||||
const vk::Viewport vk_viewport = {
|
||||
.x = static_cast<f32>(viewport.x),
|
||||
.y = static_cast<f32>(viewport.y),
|
||||
.width = static_cast<f32>(viewport.width),
|
||||
.height = static_cast<f32>(viewport.height),
|
||||
.minDepth = 0.f,
|
||||
.maxDepth = 1.f,
|
||||
};
|
||||
|
||||
const vk::Rect2D scissor = {
|
||||
.offset{
|
||||
.x = static_cast<s32>(draw_rect.left),
|
||||
.y = static_cast<s32>(draw_rect.bottom),
|
||||
},
|
||||
.extent{
|
||||
.width = draw_rect.GetWidth(),
|
||||
.height = draw_rect.GetHeight(),
|
||||
},
|
||||
};
|
||||
|
||||
cmdbuf.setViewport(0, vk_viewport);
|
||||
cmdbuf.setScissor(0, scissor);
|
||||
});
|
||||
pipeline_info.dynamic.viewport = Common::Rectangle<s32>{
|
||||
viewport.x,
|
||||
viewport.y,
|
||||
viewport.x + viewport.width,
|
||||
viewport.y + viewport.height,
|
||||
};
|
||||
pipeline_info.dynamic.scissor = draw_rect;
|
||||
|
||||
// Draw the vertex batch
|
||||
bool succeeded = true;
|
||||
|
@ -44,11 +44,12 @@ vk::MemoryPropertyFlags MakePropertyFlags(BufferType type) {
|
||||
}
|
||||
|
||||
/// Find a memory type with the passed requirements
|
||||
std::optional<u32> FindMemoryType(const vk::PhysicalDeviceMemoryProperties& properties,
|
||||
vk::MemoryPropertyFlags wanted) {
|
||||
std::optional<u32> FindMemoryType(
|
||||
const vk::PhysicalDeviceMemoryProperties& properties, vk::MemoryPropertyFlags wanted,
|
||||
vk::MemoryPropertyFlags excluded = vk::MemoryPropertyFlagBits::eProtected) {
|
||||
for (u32 i = 0; i < properties.memoryTypeCount; ++i) {
|
||||
const auto flags = properties.memoryTypes[i].propertyFlags;
|
||||
if ((flags & wanted) == wanted) {
|
||||
if (((flags & wanted) == wanted) && (!(flags & excluded))) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
@ -166,25 +167,46 @@ void StreamBuffer::CreateBuffers(u64 prefered_size) {
|
||||
static_cast<bool>(mem_type.propertyFlags & vk::MemoryPropertyFlagBits::eHostCoherent);
|
||||
|
||||
// Substract from the preferred heap size some bytes to avoid getting out of memory.
|
||||
const VkDeviceSize heap_size = memory_properties.memoryHeaps[preferred_heap].size;
|
||||
const vk::DeviceSize heap_size = memory_properties.memoryHeaps[preferred_heap].size;
|
||||
// As per DXVK's example, using `heap_size / 2`
|
||||
const VkDeviceSize allocable_size = heap_size / 2;
|
||||
const vk::DeviceSize allocable_size = heap_size / 2;
|
||||
buffer = device.createBuffer({
|
||||
.size = std::min(prefered_size, allocable_size),
|
||||
.usage = usage,
|
||||
});
|
||||
|
||||
const auto requirements = device.getBufferMemoryRequirements(buffer);
|
||||
stream_buffer_size = static_cast<u64>(requirements.size);
|
||||
const auto requirements_chain =
|
||||
device
|
||||
.getBufferMemoryRequirements2<vk::MemoryRequirements2, vk::MemoryDedicatedRequirements>(
|
||||
{.buffer = buffer});
|
||||
|
||||
const auto& requirements = requirements_chain.get<vk::MemoryRequirements2>();
|
||||
const auto& dedicated_requirements = requirements_chain.get<vk::MemoryDedicatedRequirements>();
|
||||
|
||||
stream_buffer_size = static_cast<u64>(requirements.memoryRequirements.size);
|
||||
|
||||
LOG_INFO(Render_Vulkan, "Creating {} buffer with size {} KB with flags {}",
|
||||
BufferTypeName(type), stream_buffer_size / 1024,
|
||||
vk::to_string(mem_type.propertyFlags));
|
||||
|
||||
memory = device.allocateMemory({
|
||||
.allocationSize = requirements.size,
|
||||
.memoryTypeIndex = preferred_type,
|
||||
});
|
||||
if (dedicated_requirements.prefersDedicatedAllocation) {
|
||||
vk::StructureChain<vk::MemoryAllocateInfo, vk::MemoryDedicatedAllocateInfo> alloc_chain =
|
||||
{};
|
||||
|
||||
auto& alloc_info = alloc_chain.get<vk::MemoryAllocateInfo>();
|
||||
alloc_info.allocationSize = requirements.memoryRequirements.size;
|
||||
alloc_info.memoryTypeIndex = preferred_type;
|
||||
|
||||
auto& dedicated_alloc_info = alloc_chain.get<vk::MemoryDedicatedAllocateInfo>();
|
||||
dedicated_alloc_info.buffer = buffer;
|
||||
|
||||
memory = device.allocateMemory(alloc_chain.get());
|
||||
} else {
|
||||
memory = device.allocateMemory({
|
||||
.allocationSize = requirements.memoryRequirements.size,
|
||||
.memoryTypeIndex = preferred_type,
|
||||
});
|
||||
}
|
||||
|
||||
device.bindBufferMemory(buffer, memory, 0);
|
||||
mapped = reinterpret_cast<u8*>(device.mapMemory(memory, 0, VK_WHOLE_SIZE));
|
||||
|
@ -750,7 +750,7 @@ Surface::Surface(TextureRuntime& runtime_, const VideoCore::SurfaceParams& param
|
||||
|
||||
Surface::Surface(TextureRuntime& runtime_, const VideoCore::SurfaceBase& surface,
|
||||
const VideoCore::Material* mat)
|
||||
: SurfaceBase{surface}, runtime{&runtime_}, instance{&runtime_.GetInstance()},
|
||||
: SurfaceBase{surface, mat}, runtime{&runtime_}, instance{&runtime_.GetInstance()},
|
||||
scheduler{&runtime_.GetScheduler()}, traits{instance->GetTraits(mat->format)} {
|
||||
if (!traits.transfer_support) {
|
||||
return;
|
||||
@ -791,9 +791,6 @@ Surface::Surface(TextureRuntime& runtime_, const VideoCore::SurfaceBase& surface
|
||||
vk::PipelineStageFlagBits::eTopOfPipe,
|
||||
vk::DependencyFlagBits::eByRegion, {}, {}, barriers);
|
||||
});
|
||||
|
||||
custom_format = mat->format;
|
||||
material = mat;
|
||||
}
|
||||
|
||||
Surface::~Surface() {
|
||||
|
1605
src/video_core/shader/generator/glsl_fs_shader_gen.cpp
Normal file
1605
src/video_core/shader/generator/glsl_fs_shader_gen.cpp
Normal file
File diff suppressed because it is too large
Load Diff
100
src/video_core/shader/generator/glsl_fs_shader_gen.h
Normal file
100
src/video_core/shader/generator/glsl_fs_shader_gen.h
Normal file
@ -0,0 +1,100 @@
|
||||
// Copyright 2023 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "video_core/shader/generator/pica_fs_config.h"
|
||||
|
||||
namespace Pica::Shader::Generator::GLSL {
|
||||
|
||||
class FragmentModule {
|
||||
public:
|
||||
explicit FragmentModule(const FSConfig& config, const Profile& profile);
|
||||
~FragmentModule();
|
||||
|
||||
/// Emits GLSL source corresponding to the provided pica fragment configuration
|
||||
std::string Generate();
|
||||
|
||||
private:
|
||||
/// Undos the host perspective transformation and applies the PICA one
|
||||
void WriteDepth();
|
||||
|
||||
/// Emits code to emulate the scissor rectangle
|
||||
void WriteScissor();
|
||||
|
||||
/// Writes the code to emulate fragment lighting
|
||||
void WriteLighting();
|
||||
|
||||
/// Writes the code to emulate fog
|
||||
void WriteFog();
|
||||
|
||||
/// Writes the code to emulate gas rendering
|
||||
void WriteGas();
|
||||
|
||||
/// Writes the code to emulate shadow-map rendering
|
||||
void WriteShadow();
|
||||
|
||||
/// Writes the code to emulate logic ops in the fragment shader
|
||||
void WriteLogicOp();
|
||||
|
||||
/// Writes the code to emulate PICA min/max blending factors
|
||||
void WriteBlending();
|
||||
|
||||
/// Writes the specified TEV stage source component(s)
|
||||
void AppendSource(Pica::TexturingRegs::TevStageConfig::Source source, u32 tev_index);
|
||||
|
||||
/// Writes the color components to use for the specified TEV stage color modifier
|
||||
void AppendColorModifier(Pica::TexturingRegs::TevStageConfig::ColorModifier modifier,
|
||||
Pica::TexturingRegs::TevStageConfig::Source source, u32 tev_index);
|
||||
|
||||
/// Writes the alpha component to use for the specified TEV stage alpha modifier
|
||||
void AppendAlphaModifier(Pica::TexturingRegs::TevStageConfig::AlphaModifier modifier,
|
||||
Pica::TexturingRegs::TevStageConfig::Source source, u32 tev_index);
|
||||
|
||||
/// Writes the combiner function for the color components for the specified TEV stage operation
|
||||
void AppendColorCombiner(Pica::TexturingRegs::TevStageConfig::Operation operation);
|
||||
|
||||
/// Writes the combiner function for the alpha component for the specified TEV stage operation
|
||||
void AppendAlphaCombiner(Pica::TexturingRegs::TevStageConfig::Operation operation);
|
||||
|
||||
/// Writes the if-statement condition used to evaluate alpha testing
|
||||
void WriteAlphaTestCondition(Pica::FramebufferRegs::CompareFunc func);
|
||||
|
||||
/// Writes the code to emulate the specified TEV stage
|
||||
void WriteTevStage(u32 index);
|
||||
|
||||
void AppendProcTexShiftOffset(std::string_view v, Pica::TexturingRegs::ProcTexShift mode,
|
||||
Pica::TexturingRegs::ProcTexClamp clamp_mode);
|
||||
|
||||
void AppendProcTexClamp(std::string_view var, Pica::TexturingRegs::ProcTexClamp mode);
|
||||
|
||||
void AppendProcTexCombineAndMap(Pica::TexturingRegs::ProcTexCombiner combiner,
|
||||
std::string_view offset);
|
||||
|
||||
void DefineExtensions();
|
||||
void DefineInterface();
|
||||
void DefineBindings();
|
||||
void DefineHelpers();
|
||||
void DefineLightingHelpers();
|
||||
void DefineShadowHelpers();
|
||||
void DefineProcTexSampler();
|
||||
void DefineTexUnitSampler(u32 i);
|
||||
|
||||
private:
|
||||
const FSConfig& config;
|
||||
const Profile& profile;
|
||||
std::string out;
|
||||
bool use_blend_fallback{};
|
||||
bool use_fragment_shader_interlock{};
|
||||
};
|
||||
|
||||
/**
|
||||
* Generates the GLSL fragment shader program source code for the current Pica state
|
||||
* @param config ShaderCacheKey object generated for the current Pica state, used for the shader
|
||||
* configuration (NOTE: Use state in this struct only, not the Pica registers!)
|
||||
* @returns String of the shader source code
|
||||
*/
|
||||
std::string GenerateFragmentShader(const FSConfig& config, const Profile& profile);
|
||||
|
||||
} // namespace Pica::Shader::Generator::GLSL
|
File diff suppressed because it is too large
Load Diff
@ -46,12 +46,4 @@ std::string GenerateVertexShader(const Pica::Shader::ShaderSetup& setup, const P
|
||||
*/
|
||||
std::string GenerateFixedGeometryShader(const PicaFixedGSConfig& config, bool separable_shader);
|
||||
|
||||
/**
|
||||
* Generates the GLSL fragment shader program source code for the current Pica state
|
||||
* @param config ShaderCacheKey object generated for the current Pica state, used for the shader
|
||||
* configuration (NOTE: Use state in this struct only, not the Pica registers!)
|
||||
* @returns String of the shader source code
|
||||
*/
|
||||
std::string GenerateFragmentShader(const PicaFSConfig& config, bool separable_shader);
|
||||
|
||||
} // namespace Pica::Shader::Generator::GLSL
|
||||
|
193
src/video_core/shader/generator/pica_fs_config.cpp
Normal file
193
src/video_core/shader/generator/pica_fs_config.cpp
Normal file
@ -0,0 +1,193 @@
|
||||
// Copyright 2023 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "video_core/shader/generator/pica_fs_config.h"
|
||||
|
||||
namespace Pica::Shader {
|
||||
|
||||
FramebufferConfig::FramebufferConfig(const Pica::Regs& regs, const Profile& profile) {
|
||||
const auto& output_merger = regs.framebuffer.output_merger;
|
||||
scissor_test_mode.Assign(regs.rasterizer.scissor_test.mode);
|
||||
depthmap_enable.Assign(regs.rasterizer.depthmap_enable);
|
||||
shadow_rendering.Assign(regs.framebuffer.IsShadowRendering());
|
||||
alpha_test_func.Assign(output_merger.alpha_test.enable
|
||||
? output_merger.alpha_test.func.Value()
|
||||
: Pica::FramebufferRegs::CompareFunc::Always);
|
||||
|
||||
// Emulate logic op in the shader if needed and not supported.
|
||||
logic_op.Assign(Pica::FramebufferRegs::LogicOp::Copy);
|
||||
if (!profile.has_logic_op && !regs.framebuffer.output_merger.alphablend_enable) {
|
||||
logic_op.Assign(regs.framebuffer.output_merger.logic_op);
|
||||
}
|
||||
|
||||
const auto alpha_eq = output_merger.alpha_blending.blend_equation_a.Value();
|
||||
const auto rgb_eq = output_merger.alpha_blending.blend_equation_rgb.Value();
|
||||
if (!profile.has_blend_minmax_factor && output_merger.alphablend_enable) {
|
||||
if (rgb_eq == Pica::FramebufferRegs::BlendEquation::Max ||
|
||||
rgb_eq == Pica::FramebufferRegs::BlendEquation::Min) {
|
||||
rgb_blend.eq = rgb_eq;
|
||||
rgb_blend.src_factor = output_merger.alpha_blending.factor_source_rgb;
|
||||
rgb_blend.dst_factor = output_merger.alpha_blending.factor_dest_rgb;
|
||||
}
|
||||
if (alpha_eq == Pica::FramebufferRegs::BlendEquation::Max ||
|
||||
alpha_eq == Pica::FramebufferRegs::BlendEquation::Min) {
|
||||
alpha_blend.eq = alpha_eq;
|
||||
alpha_blend.src_factor = output_merger.alpha_blending.factor_source_a;
|
||||
alpha_blend.dst_factor = output_merger.alpha_blending.factor_dest_a;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TextureConfig::TextureConfig(const Pica::TexturingRegs& regs, const Profile& profile) {
|
||||
texture0_type.Assign(regs.texture0.type);
|
||||
texture2_use_coord1.Assign(regs.main_config.texture2_use_coord1 != 0);
|
||||
combiner_buffer_input.Assign(regs.tev_combiner_buffer_input.update_mask_rgb.Value() |
|
||||
regs.tev_combiner_buffer_input.update_mask_a.Value() << 4);
|
||||
fog_mode.Assign(regs.fog_mode);
|
||||
fog_flip.Assign(regs.fog_flip != 0);
|
||||
shadow_texture_orthographic.Assign(regs.shadow.orthographic != 0);
|
||||
|
||||
// Emulate custom border color if needed and not supported.
|
||||
const auto pica_textures = regs.GetTextures();
|
||||
for (u32 tex_index = 0; tex_index < 3; tex_index++) {
|
||||
const auto& config = pica_textures[tex_index].config;
|
||||
texture_border_color[tex_index].enable_s.Assign(
|
||||
!profile.has_custom_border_color &&
|
||||
config.wrap_s == Pica::TexturingRegs::TextureConfig::WrapMode::ClampToBorder);
|
||||
texture_border_color[tex_index].enable_t.Assign(
|
||||
!profile.has_custom_border_color &&
|
||||
config.wrap_t == Pica::TexturingRegs::TextureConfig::WrapMode::ClampToBorder);
|
||||
}
|
||||
|
||||
const auto& stages = regs.GetTevStages();
|
||||
for (std::size_t i = 0; i < tev_stages.size(); i++) {
|
||||
const auto& tev_stage = stages[i];
|
||||
tev_stages[i].sources_raw = tev_stage.sources_raw;
|
||||
tev_stages[i].modifiers_raw = tev_stage.modifiers_raw;
|
||||
tev_stages[i].ops_raw = tev_stage.ops_raw;
|
||||
tev_stages[i].scales_raw = tev_stage.scales_raw;
|
||||
if (tev_stage.color_op == Pica::TexturingRegs::TevStageConfig::Operation::Dot3_RGBA) {
|
||||
tev_stages[i].sources_raw &= 0xFFF;
|
||||
tev_stages[i].modifiers_raw &= 0xFFF;
|
||||
tev_stages[i].ops_raw &= 0xF;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
LightConfig::LightConfig(const Pica::LightingRegs& regs) {
|
||||
if (regs.disable) {
|
||||
return;
|
||||
}
|
||||
|
||||
enable.Assign(1);
|
||||
src_num.Assign(regs.max_light_index + 1);
|
||||
config.Assign(regs.config0.config);
|
||||
enable_primary_alpha.Assign(regs.config0.enable_primary_alpha);
|
||||
enable_secondary_alpha.Assign(regs.config0.enable_secondary_alpha);
|
||||
bump_mode.Assign(regs.config0.bump_mode);
|
||||
bump_selector.Assign(regs.config0.bump_selector);
|
||||
bump_renorm.Assign(regs.config0.disable_bump_renorm == 0);
|
||||
clamp_highlights.Assign(regs.config0.clamp_highlights != 0);
|
||||
|
||||
enable_shadow.Assign(regs.config0.enable_shadow != 0);
|
||||
if (enable_shadow) {
|
||||
shadow_primary.Assign(regs.config0.shadow_primary != 0);
|
||||
shadow_secondary.Assign(regs.config0.shadow_secondary != 0);
|
||||
shadow_invert.Assign(regs.config0.shadow_invert != 0);
|
||||
shadow_alpha.Assign(regs.config0.shadow_alpha != 0);
|
||||
shadow_selector.Assign(regs.config0.shadow_selector);
|
||||
}
|
||||
|
||||
for (u32 light_index = 0; light_index <= regs.max_light_index; ++light_index) {
|
||||
const u32 num = regs.light_enable.GetNum(light_index);
|
||||
const auto& light = regs.light[num];
|
||||
lights[light_index].num.Assign(num);
|
||||
lights[light_index].directional.Assign(light.config.directional != 0);
|
||||
lights[light_index].two_sided_diffuse.Assign(light.config.two_sided_diffuse != 0);
|
||||
lights[light_index].geometric_factor_0.Assign(light.config.geometric_factor_0 != 0);
|
||||
lights[light_index].geometric_factor_1.Assign(light.config.geometric_factor_1 != 0);
|
||||
lights[light_index].dist_atten_enable.Assign(!regs.IsDistAttenDisabled(num));
|
||||
lights[light_index].spot_atten_enable.Assign(!regs.IsSpotAttenDisabled(num));
|
||||
lights[light_index].shadow_enable.Assign(!regs.IsShadowDisabled(num));
|
||||
}
|
||||
|
||||
lut_d0.enable.Assign(regs.config1.disable_lut_d0 == 0);
|
||||
if (lut_d0.enable) {
|
||||
lut_d0.abs_input.Assign(regs.abs_lut_input.disable_d0 == 0);
|
||||
lut_d0.type.Assign(regs.lut_input.d0.Value());
|
||||
lut_d0.scale = regs.lut_scale.GetScale(regs.lut_scale.d0);
|
||||
}
|
||||
|
||||
lut_d1.enable.Assign(regs.config1.disable_lut_d1 == 0);
|
||||
if (lut_d1.enable) {
|
||||
lut_d1.abs_input.Assign(regs.abs_lut_input.disable_d1 == 0);
|
||||
lut_d1.type.Assign(regs.lut_input.d1.Value());
|
||||
lut_d1.scale = regs.lut_scale.GetScale(regs.lut_scale.d1);
|
||||
}
|
||||
|
||||
// This is a dummy field due to lack of the corresponding register
|
||||
lut_sp.enable.Assign(1);
|
||||
lut_sp.abs_input.Assign(regs.abs_lut_input.disable_sp == 0);
|
||||
lut_sp.type.Assign(regs.lut_input.sp.Value());
|
||||
lut_sp.scale = regs.lut_scale.GetScale(regs.lut_scale.sp);
|
||||
|
||||
lut_fr.enable.Assign(regs.config1.disable_lut_fr == 0);
|
||||
if (lut_fr.enable) {
|
||||
lut_fr.abs_input.Assign(regs.abs_lut_input.disable_fr == 0);
|
||||
lut_fr.type.Assign(regs.lut_input.fr.Value());
|
||||
lut_fr.scale = regs.lut_scale.GetScale(regs.lut_scale.fr);
|
||||
}
|
||||
|
||||
lut_rr.enable.Assign(regs.config1.disable_lut_rr == 0);
|
||||
if (lut_rr.enable) {
|
||||
lut_rr.abs_input.Assign(regs.abs_lut_input.disable_rr == 0);
|
||||
lut_rr.type.Assign(regs.lut_input.rr.Value());
|
||||
lut_rr.scale = regs.lut_scale.GetScale(regs.lut_scale.rr);
|
||||
}
|
||||
|
||||
lut_rg.enable.Assign(regs.config1.disable_lut_rg == 0);
|
||||
if (lut_rg.enable) {
|
||||
lut_rg.abs_input.Assign(regs.abs_lut_input.disable_rg == 0);
|
||||
lut_rg.type.Assign(regs.lut_input.rg.Value());
|
||||
lut_rg.scale = regs.lut_scale.GetScale(regs.lut_scale.rg);
|
||||
}
|
||||
|
||||
lut_rb.enable.Assign(regs.config1.disable_lut_rb == 0);
|
||||
if (lut_rb.enable) {
|
||||
lut_rb.abs_input.Assign(regs.abs_lut_input.disable_rb == 0);
|
||||
lut_rb.type.Assign(regs.lut_input.rb.Value());
|
||||
lut_rb.scale = regs.lut_scale.GetScale(regs.lut_scale.rb);
|
||||
}
|
||||
}
|
||||
|
||||
ProcTexConfig::ProcTexConfig(const Pica::TexturingRegs& regs) {
|
||||
if (!regs.main_config.texture3_enable) {
|
||||
return;
|
||||
}
|
||||
|
||||
enable.Assign(1);
|
||||
coord.Assign(regs.main_config.texture3_coordinates);
|
||||
u_clamp.Assign(regs.proctex.u_clamp);
|
||||
v_clamp.Assign(regs.proctex.v_clamp);
|
||||
color_combiner.Assign(regs.proctex.color_combiner);
|
||||
alpha_combiner.Assign(regs.proctex.alpha_combiner);
|
||||
separate_alpha.Assign(regs.proctex.separate_alpha);
|
||||
noise_enable.Assign(regs.proctex.noise_enable);
|
||||
u_shift.Assign(regs.proctex.u_shift);
|
||||
v_shift.Assign(regs.proctex.v_shift);
|
||||
lut_width = regs.proctex_lut.width;
|
||||
lut_offset0 = regs.proctex_lut_offset.level0;
|
||||
lut_offset1 = regs.proctex_lut_offset.level1;
|
||||
lut_offset2 = regs.proctex_lut_offset.level2;
|
||||
lut_offset3 = regs.proctex_lut_offset.level3;
|
||||
lod_min = regs.proctex_lut.lod_min;
|
||||
lod_max = regs.proctex_lut.lod_max;
|
||||
lut_filter.Assign(regs.proctex_lut.filter);
|
||||
}
|
||||
|
||||
FSConfig::FSConfig(const Pica::Regs& regs, const UserConfig& user_, const Profile& profile)
|
||||
: framebuffer{regs, profile}, texture{regs.texturing, profile}, lighting{regs.lighting},
|
||||
proctex{regs.texturing}, user{user_} {}
|
||||
|
||||
} // namespace Pica::Shader
|
207
src/video_core/shader/generator/pica_fs_config.h
Normal file
207
src/video_core/shader/generator/pica_fs_config.h
Normal file
@ -0,0 +1,207 @@
|
||||
// Copyright 2023 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/hash.h"
|
||||
#include "video_core/regs.h"
|
||||
#include "video_core/shader/generator/profile.h"
|
||||
|
||||
namespace Pica::Shader {
|
||||
|
||||
struct BlendConfig {
|
||||
Pica::FramebufferRegs::BlendEquation eq;
|
||||
Pica::FramebufferRegs::BlendFactor src_factor;
|
||||
Pica::FramebufferRegs::BlendFactor dst_factor;
|
||||
};
|
||||
|
||||
struct FramebufferConfig {
|
||||
explicit FramebufferConfig(const Pica::Regs& regs, const Profile& profile);
|
||||
|
||||
union {
|
||||
u32 raw{};
|
||||
BitField<0, 3, Pica::FramebufferRegs::CompareFunc> alpha_test_func;
|
||||
BitField<3, 2, Pica::RasterizerRegs::ScissorMode> scissor_test_mode;
|
||||
BitField<5, 1, Pica::RasterizerRegs::DepthBuffering> depthmap_enable;
|
||||
BitField<6, 4, Pica::FramebufferRegs::LogicOp> logic_op;
|
||||
BitField<10, 1, u32> shadow_rendering;
|
||||
};
|
||||
BlendConfig rgb_blend{};
|
||||
BlendConfig alpha_blend{};
|
||||
};
|
||||
static_assert(std::has_unique_object_representations_v<FramebufferConfig>);
|
||||
|
||||
struct TevStageConfigRaw {
|
||||
u32 sources_raw;
|
||||
u32 modifiers_raw;
|
||||
u32 ops_raw;
|
||||
u32 scales_raw;
|
||||
operator Pica::TexturingRegs::TevStageConfig() const noexcept {
|
||||
return {
|
||||
.sources_raw = sources_raw,
|
||||
.modifiers_raw = modifiers_raw,
|
||||
.ops_raw = ops_raw,
|
||||
.const_color = 0,
|
||||
.scales_raw = scales_raw,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
union TextureBorder {
|
||||
BitField<0, 1, u32> enable_s;
|
||||
BitField<1, 1, u32> enable_t;
|
||||
};
|
||||
|
||||
struct TextureConfig {
|
||||
explicit TextureConfig(const Pica::TexturingRegs& regs, const Profile& profile);
|
||||
|
||||
union {
|
||||
u32 raw{};
|
||||
BitField<0, 3, Pica::TexturingRegs::TextureConfig::TextureType> texture0_type;
|
||||
BitField<3, 1, u32> texture2_use_coord1;
|
||||
BitField<4, 8, u32> combiner_buffer_input;
|
||||
BitField<12, 3, Pica::TexturingRegs::FogMode> fog_mode;
|
||||
BitField<15, 1, u32> fog_flip;
|
||||
BitField<16, 1, u32> shadow_texture_orthographic;
|
||||
};
|
||||
std::array<TextureBorder, 3> texture_border_color{};
|
||||
std::array<TevStageConfigRaw, 6> tev_stages{};
|
||||
};
|
||||
static_assert(std::has_unique_object_representations_v<TextureConfig>);
|
||||
|
||||
union Light {
|
||||
u16 raw;
|
||||
BitField<0, 3, u16> num;
|
||||
BitField<3, 1, u16> directional;
|
||||
BitField<4, 1, u16> two_sided_diffuse;
|
||||
BitField<5, 1, u16> dist_atten_enable;
|
||||
BitField<6, 1, u16> spot_atten_enable;
|
||||
BitField<7, 1, u16> geometric_factor_0;
|
||||
BitField<8, 1, u16> geometric_factor_1;
|
||||
BitField<9, 1, u16> shadow_enable;
|
||||
};
|
||||
static_assert(std::has_unique_object_representations_v<Light>);
|
||||
|
||||
struct LutConfig {
|
||||
union {
|
||||
u32 raw;
|
||||
BitField<0, 1, u32> enable;
|
||||
BitField<1, 1, u32> abs_input;
|
||||
BitField<2, 3, Pica::LightingRegs::LightingLutInput> type;
|
||||
};
|
||||
f32 scale;
|
||||
};
|
||||
|
||||
struct LightConfig {
|
||||
explicit LightConfig(const Pica::LightingRegs& regs);
|
||||
|
||||
union {
|
||||
u32 raw{};
|
||||
BitField<0, 1, u32> enable;
|
||||
BitField<1, 4, u32> src_num;
|
||||
BitField<5, 2, Pica::LightingRegs::LightingBumpMode> bump_mode;
|
||||
BitField<7, 2, u32> bump_selector;
|
||||
BitField<9, 1, u32> bump_renorm;
|
||||
BitField<10, 1, u32> clamp_highlights;
|
||||
BitField<11, 4, Pica::LightingRegs::LightingConfig> config;
|
||||
BitField<15, 1, u32> enable_primary_alpha;
|
||||
BitField<16, 1, u32> enable_secondary_alpha;
|
||||
BitField<17, 1, u32> enable_shadow;
|
||||
BitField<18, 1, u32> shadow_primary;
|
||||
BitField<19, 1, u32> shadow_secondary;
|
||||
BitField<20, 1, u32> shadow_invert;
|
||||
BitField<21, 1, u32> shadow_alpha;
|
||||
BitField<22, 2, u32> shadow_selector;
|
||||
};
|
||||
LutConfig lut_d0{};
|
||||
LutConfig lut_d1{};
|
||||
LutConfig lut_sp{};
|
||||
LutConfig lut_fr{};
|
||||
LutConfig lut_rr{};
|
||||
LutConfig lut_rg{};
|
||||
LutConfig lut_rb{};
|
||||
std::array<Light, 8> lights{};
|
||||
};
|
||||
|
||||
struct ProcTexConfig {
|
||||
explicit ProcTexConfig(const Pica::TexturingRegs& regs);
|
||||
|
||||
union {
|
||||
u32 raw{};
|
||||
BitField<0, 1, u32> enable;
|
||||
BitField<1, 2, u32> coord;
|
||||
BitField<3, 3, Pica::TexturingRegs::ProcTexClamp> u_clamp;
|
||||
BitField<6, 3, Pica::TexturingRegs::ProcTexClamp> v_clamp;
|
||||
BitField<9, 4, Pica::TexturingRegs::ProcTexCombiner> color_combiner;
|
||||
BitField<13, 4, Pica::TexturingRegs::ProcTexCombiner> alpha_combiner;
|
||||
BitField<17, 3, Pica::TexturingRegs::ProcTexFilter> lut_filter;
|
||||
BitField<20, 1, u32> separate_alpha;
|
||||
BitField<21, 1, u32> noise_enable;
|
||||
BitField<22, 2, Pica::TexturingRegs::ProcTexShift> u_shift;
|
||||
BitField<24, 2, Pica::TexturingRegs::ProcTexShift> v_shift;
|
||||
};
|
||||
s32 lut_width{};
|
||||
s32 lut_offset0{};
|
||||
s32 lut_offset1{};
|
||||
s32 lut_offset2{};
|
||||
s32 lut_offset3{};
|
||||
u16 lod_min{};
|
||||
u16 lod_max{};
|
||||
};
|
||||
static_assert(std::has_unique_object_representations_v<ProcTexConfig>);
|
||||
|
||||
union UserConfig {
|
||||
u32 raw{};
|
||||
BitField<0, 1, u32> use_custom_normal;
|
||||
};
|
||||
static_assert(std::has_unique_object_representations_v<UserConfig>);
|
||||
|
||||
struct FSConfig {
|
||||
explicit FSConfig(const Pica::Regs& regs, const UserConfig& user, const Profile& profile);
|
||||
|
||||
[[nodiscard]] bool TevStageUpdatesCombinerBufferColor(u32 stage_index) const {
|
||||
return (stage_index < 4) && (texture.combiner_buffer_input & (1 << stage_index));
|
||||
}
|
||||
|
||||
[[nodiscard]] bool TevStageUpdatesCombinerBufferAlpha(u32 stage_index) const {
|
||||
return (stage_index < 4) && ((texture.combiner_buffer_input >> 4) & (1 << stage_index));
|
||||
}
|
||||
|
||||
[[nodiscard]] bool EmulateBlend() const {
|
||||
return framebuffer.rgb_blend.eq != Pica::FramebufferRegs::BlendEquation::Add ||
|
||||
framebuffer.alpha_blend.eq != Pica::FramebufferRegs::BlendEquation::Add;
|
||||
}
|
||||
|
||||
[[nodiscard]] bool UsesShadowPipeline() const {
|
||||
const auto texture0_type = texture.texture0_type.Value();
|
||||
return texture0_type == Pica::TexturingRegs::TextureConfig::Shadow2D ||
|
||||
texture0_type == Pica::TexturingRegs::TextureConfig::ShadowCube ||
|
||||
framebuffer.shadow_rendering.Value();
|
||||
}
|
||||
|
||||
bool operator==(const FSConfig& other) const noexcept {
|
||||
return std::memcmp(this, &other, sizeof(FSConfig)) == 0;
|
||||
}
|
||||
|
||||
std::size_t Hash() const noexcept {
|
||||
return Common::ComputeHash64(this, sizeof(FSConfig));
|
||||
}
|
||||
|
||||
FramebufferConfig framebuffer;
|
||||
TextureConfig texture;
|
||||
LightConfig lighting;
|
||||
ProcTexConfig proctex;
|
||||
UserConfig user;
|
||||
};
|
||||
|
||||
} // namespace Pica::Shader
|
||||
|
||||
namespace std {
|
||||
template <>
|
||||
struct hash<Pica::Shader::FSConfig> {
|
||||
std::size_t operator()(const Pica::Shader::FSConfig& k) const noexcept {
|
||||
return k.Hash();
|
||||
}
|
||||
};
|
||||
} // namespace std
|
25
src/video_core/shader/generator/profile.h
Normal file
25
src/video_core/shader/generator/profile.h
Normal file
@ -0,0 +1,25 @@
|
||||
// Copyright 2023 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
namespace Pica::Shader {
|
||||
|
||||
struct Profile {
|
||||
bool has_separable_shaders{};
|
||||
bool has_clip_planes{};
|
||||
bool has_geometry_shader{};
|
||||
bool has_custom_border_color{};
|
||||
bool has_fragment_shader_interlock{};
|
||||
bool has_blend_minmax_factor{};
|
||||
bool has_minus_one_to_one_range{};
|
||||
bool has_logic_op{};
|
||||
bool has_gl_ext_framebuffer_fetch{};
|
||||
bool has_gl_arm_framebuffer_fetch{};
|
||||
bool has_gl_nv_fragment_shader_interlock{};
|
||||
bool has_gl_intel_fragment_shader_interlock{};
|
||||
bool is_vulkan{};
|
||||
};
|
||||
|
||||
} // namespace Pica::Shader
|
@ -9,218 +9,6 @@
|
||||
|
||||
namespace Pica::Shader::Generator {
|
||||
|
||||
PicaFSConfig::PicaFSConfig(const Pica::Regs& regs, bool has_fragment_shader_interlock,
|
||||
bool emulate_logic_op, bool emulate_custom_border_color,
|
||||
bool emulate_blend_minmax_factor, bool use_custom_normal_map) {
|
||||
state.scissor_test_mode.Assign(regs.rasterizer.scissor_test.mode);
|
||||
|
||||
state.depthmap_enable.Assign(regs.rasterizer.depthmap_enable);
|
||||
|
||||
state.alpha_test_func.Assign(regs.framebuffer.output_merger.alpha_test.enable
|
||||
? regs.framebuffer.output_merger.alpha_test.func.Value()
|
||||
: Pica::FramebufferRegs::CompareFunc::Always);
|
||||
|
||||
state.texture0_type.Assign(regs.texturing.texture0.type);
|
||||
|
||||
state.texture2_use_coord1.Assign(regs.texturing.main_config.texture2_use_coord1 != 0);
|
||||
|
||||
const auto pica_textures = regs.texturing.GetTextures();
|
||||
for (u32 tex_index = 0; tex_index < 3; tex_index++) {
|
||||
const auto config = pica_textures[tex_index].config;
|
||||
state.texture_border_color[tex_index].enable_s.Assign(
|
||||
emulate_custom_border_color &&
|
||||
config.wrap_s == Pica::TexturingRegs::TextureConfig::WrapMode::ClampToBorder);
|
||||
state.texture_border_color[tex_index].enable_t.Assign(
|
||||
emulate_custom_border_color &&
|
||||
config.wrap_t == Pica::TexturingRegs::TextureConfig::WrapMode::ClampToBorder);
|
||||
}
|
||||
|
||||
// Emulate logic op in the shader if not supported. This is mostly for mobile GPUs
|
||||
const bool needs_emulate_logic_op =
|
||||
emulate_logic_op && !regs.framebuffer.output_merger.alphablend_enable;
|
||||
|
||||
state.emulate_logic_op.Assign(needs_emulate_logic_op);
|
||||
if (needs_emulate_logic_op) {
|
||||
state.logic_op.Assign(regs.framebuffer.output_merger.logic_op);
|
||||
} else {
|
||||
state.logic_op.Assign(Pica::FramebufferRegs::LogicOp::NoOp);
|
||||
}
|
||||
|
||||
// Copy relevant tev stages fields.
|
||||
// We don't sync const_color here because of the high variance, it is a
|
||||
// shader uniform instead.
|
||||
const auto& tev_stages = regs.texturing.GetTevStages();
|
||||
DEBUG_ASSERT(state.tev_stages.size() == tev_stages.size());
|
||||
for (std::size_t i = 0; i < tev_stages.size(); i++) {
|
||||
const auto& tev_stage = tev_stages[i];
|
||||
state.tev_stages[i].sources_raw = tev_stage.sources_raw;
|
||||
state.tev_stages[i].modifiers_raw = tev_stage.modifiers_raw;
|
||||
state.tev_stages[i].ops_raw = tev_stage.ops_raw;
|
||||
state.tev_stages[i].scales_raw = tev_stage.scales_raw;
|
||||
if (tev_stage.color_op == Pica::TexturingRegs::TevStageConfig::Operation::Dot3_RGBA) {
|
||||
state.tev_stages[i].sources_raw &= 0xFFF;
|
||||
state.tev_stages[i].modifiers_raw &= 0xFFF;
|
||||
state.tev_stages[i].ops_raw &= 0xF;
|
||||
}
|
||||
}
|
||||
|
||||
state.fog_mode.Assign(regs.texturing.fog_mode);
|
||||
state.fog_flip.Assign(regs.texturing.fog_flip != 0);
|
||||
|
||||
state.combiner_buffer_input.Assign(
|
||||
regs.texturing.tev_combiner_buffer_input.update_mask_rgb.Value() |
|
||||
regs.texturing.tev_combiner_buffer_input.update_mask_a.Value() << 4);
|
||||
|
||||
// Fragment lighting
|
||||
state.lighting.enable.Assign(!regs.lighting.disable);
|
||||
if (state.lighting.enable) {
|
||||
state.lighting.src_num.Assign(regs.lighting.max_light_index + 1);
|
||||
|
||||
for (u32 light_index = 0; light_index < state.lighting.src_num; ++light_index) {
|
||||
const u32 num = regs.lighting.light_enable.GetNum(light_index);
|
||||
const auto& light = regs.lighting.light[num];
|
||||
state.lighting.light[light_index].num.Assign(num);
|
||||
state.lighting.light[light_index].directional.Assign(light.config.directional != 0);
|
||||
state.lighting.light[light_index].two_sided_diffuse.Assign(
|
||||
light.config.two_sided_diffuse != 0);
|
||||
state.lighting.light[light_index].geometric_factor_0.Assign(
|
||||
light.config.geometric_factor_0 != 0);
|
||||
state.lighting.light[light_index].geometric_factor_1.Assign(
|
||||
light.config.geometric_factor_1 != 0);
|
||||
state.lighting.light[light_index].dist_atten_enable.Assign(
|
||||
!regs.lighting.IsDistAttenDisabled(num));
|
||||
state.lighting.light[light_index].spot_atten_enable.Assign(
|
||||
!regs.lighting.IsSpotAttenDisabled(num));
|
||||
state.lighting.light[light_index].shadow_enable.Assign(
|
||||
!regs.lighting.IsShadowDisabled(num));
|
||||
}
|
||||
|
||||
state.lighting.lut_d0.enable.Assign(regs.lighting.config1.disable_lut_d0 == 0);
|
||||
if (state.lighting.lut_d0.enable) {
|
||||
state.lighting.lut_d0.abs_input.Assign(regs.lighting.abs_lut_input.disable_d0 == 0);
|
||||
state.lighting.lut_d0.type.Assign(regs.lighting.lut_input.d0.Value());
|
||||
state.lighting.lut_d0.scale =
|
||||
regs.lighting.lut_scale.GetScale(regs.lighting.lut_scale.d0);
|
||||
}
|
||||
|
||||
state.lighting.lut_d1.enable.Assign(regs.lighting.config1.disable_lut_d1 == 0);
|
||||
if (state.lighting.lut_d1.enable) {
|
||||
state.lighting.lut_d1.abs_input.Assign(regs.lighting.abs_lut_input.disable_d1 == 0);
|
||||
state.lighting.lut_d1.type.Assign(regs.lighting.lut_input.d1.Value());
|
||||
state.lighting.lut_d1.scale =
|
||||
regs.lighting.lut_scale.GetScale(regs.lighting.lut_scale.d1);
|
||||
}
|
||||
|
||||
// this is a dummy field due to lack of the corresponding register
|
||||
state.lighting.lut_sp.enable.Assign(1);
|
||||
state.lighting.lut_sp.abs_input.Assign(regs.lighting.abs_lut_input.disable_sp == 0);
|
||||
state.lighting.lut_sp.type.Assign(regs.lighting.lut_input.sp.Value());
|
||||
state.lighting.lut_sp.scale = regs.lighting.lut_scale.GetScale(regs.lighting.lut_scale.sp);
|
||||
|
||||
state.lighting.lut_fr.enable.Assign(regs.lighting.config1.disable_lut_fr == 0);
|
||||
if (state.lighting.lut_fr.enable) {
|
||||
state.lighting.lut_fr.abs_input.Assign(regs.lighting.abs_lut_input.disable_fr == 0);
|
||||
state.lighting.lut_fr.type.Assign(regs.lighting.lut_input.fr.Value());
|
||||
state.lighting.lut_fr.scale =
|
||||
regs.lighting.lut_scale.GetScale(regs.lighting.lut_scale.fr);
|
||||
}
|
||||
|
||||
state.lighting.lut_rr.enable.Assign(regs.lighting.config1.disable_lut_rr == 0);
|
||||
if (state.lighting.lut_rr.enable) {
|
||||
state.lighting.lut_rr.abs_input.Assign(regs.lighting.abs_lut_input.disable_rr == 0);
|
||||
state.lighting.lut_rr.type.Assign(regs.lighting.lut_input.rr.Value());
|
||||
state.lighting.lut_rr.scale =
|
||||
regs.lighting.lut_scale.GetScale(regs.lighting.lut_scale.rr);
|
||||
}
|
||||
|
||||
state.lighting.lut_rg.enable.Assign(regs.lighting.config1.disable_lut_rg == 0);
|
||||
if (state.lighting.lut_rg.enable) {
|
||||
state.lighting.lut_rg.abs_input.Assign(regs.lighting.abs_lut_input.disable_rg == 0);
|
||||
state.lighting.lut_rg.type.Assign(regs.lighting.lut_input.rg.Value());
|
||||
state.lighting.lut_rg.scale =
|
||||
regs.lighting.lut_scale.GetScale(regs.lighting.lut_scale.rg);
|
||||
}
|
||||
|
||||
state.lighting.lut_rb.enable.Assign(regs.lighting.config1.disable_lut_rb == 0);
|
||||
if (state.lighting.lut_rb.enable) {
|
||||
state.lighting.lut_rb.abs_input.Assign(regs.lighting.abs_lut_input.disable_rb == 0);
|
||||
state.lighting.lut_rb.type.Assign(regs.lighting.lut_input.rb.Value());
|
||||
state.lighting.lut_rb.scale =
|
||||
regs.lighting.lut_scale.GetScale(regs.lighting.lut_scale.rb);
|
||||
}
|
||||
|
||||
state.lighting.config.Assign(regs.lighting.config0.config);
|
||||
state.lighting.enable_primary_alpha.Assign(regs.lighting.config0.enable_primary_alpha);
|
||||
state.lighting.enable_secondary_alpha.Assign(regs.lighting.config0.enable_secondary_alpha);
|
||||
state.lighting.bump_mode.Assign(regs.lighting.config0.bump_mode);
|
||||
state.lighting.bump_selector.Assign(regs.lighting.config0.bump_selector);
|
||||
state.lighting.bump_renorm.Assign(regs.lighting.config0.disable_bump_renorm == 0);
|
||||
state.lighting.clamp_highlights.Assign(regs.lighting.config0.clamp_highlights != 0);
|
||||
|
||||
state.lighting.enable_shadow.Assign(regs.lighting.config0.enable_shadow != 0);
|
||||
if (state.lighting.enable_shadow) {
|
||||
state.lighting.shadow_primary.Assign(regs.lighting.config0.shadow_primary != 0);
|
||||
state.lighting.shadow_secondary.Assign(regs.lighting.config0.shadow_secondary != 0);
|
||||
state.lighting.shadow_invert.Assign(regs.lighting.config0.shadow_invert != 0);
|
||||
state.lighting.shadow_alpha.Assign(regs.lighting.config0.shadow_alpha != 0);
|
||||
state.lighting.shadow_selector.Assign(regs.lighting.config0.shadow_selector);
|
||||
}
|
||||
}
|
||||
|
||||
state.proctex.enable.Assign(regs.texturing.main_config.texture3_enable);
|
||||
if (state.proctex.enable) {
|
||||
state.proctex.coord.Assign(regs.texturing.main_config.texture3_coordinates);
|
||||
state.proctex.u_clamp.Assign(regs.texturing.proctex.u_clamp);
|
||||
state.proctex.v_clamp.Assign(regs.texturing.proctex.v_clamp);
|
||||
state.proctex.color_combiner.Assign(regs.texturing.proctex.color_combiner);
|
||||
state.proctex.alpha_combiner.Assign(regs.texturing.proctex.alpha_combiner);
|
||||
state.proctex.separate_alpha.Assign(regs.texturing.proctex.separate_alpha);
|
||||
state.proctex.noise_enable.Assign(regs.texturing.proctex.noise_enable);
|
||||
state.proctex.u_shift.Assign(regs.texturing.proctex.u_shift);
|
||||
state.proctex.v_shift.Assign(regs.texturing.proctex.v_shift);
|
||||
state.proctex.lut_width = regs.texturing.proctex_lut.width;
|
||||
state.proctex.lut_offset0 = regs.texturing.proctex_lut_offset.level0;
|
||||
state.proctex.lut_offset1 = regs.texturing.proctex_lut_offset.level1;
|
||||
state.proctex.lut_offset2 = regs.texturing.proctex_lut_offset.level2;
|
||||
state.proctex.lut_offset3 = regs.texturing.proctex_lut_offset.level3;
|
||||
state.proctex.lod_min = regs.texturing.proctex_lut.lod_min;
|
||||
state.proctex.lod_max = regs.texturing.proctex_lut.lod_max;
|
||||
state.proctex.lut_filter.Assign(regs.texturing.proctex_lut.filter);
|
||||
}
|
||||
|
||||
const auto alpha_eq = regs.framebuffer.output_merger.alpha_blending.blend_equation_a.Value();
|
||||
const auto rgb_eq = regs.framebuffer.output_merger.alpha_blending.blend_equation_rgb.Value();
|
||||
if (emulate_blend_minmax_factor && regs.framebuffer.output_merger.alphablend_enable) {
|
||||
if (rgb_eq == Pica::FramebufferRegs::BlendEquation::Max ||
|
||||
rgb_eq == Pica::FramebufferRegs::BlendEquation::Min) {
|
||||
state.rgb_blend.emulate_blending = true;
|
||||
state.rgb_blend.eq = rgb_eq;
|
||||
state.rgb_blend.src_factor =
|
||||
regs.framebuffer.output_merger.alpha_blending.factor_source_rgb;
|
||||
state.rgb_blend.dst_factor =
|
||||
regs.framebuffer.output_merger.alpha_blending.factor_dest_rgb;
|
||||
}
|
||||
if (alpha_eq == Pica::FramebufferRegs::BlendEquation::Max ||
|
||||
alpha_eq == Pica::FramebufferRegs::BlendEquation::Min) {
|
||||
state.alpha_blend.emulate_blending = true;
|
||||
state.alpha_blend.eq = alpha_eq;
|
||||
state.alpha_blend.src_factor =
|
||||
regs.framebuffer.output_merger.alpha_blending.factor_source_a;
|
||||
state.alpha_blend.dst_factor =
|
||||
regs.framebuffer.output_merger.alpha_blending.factor_dest_a;
|
||||
}
|
||||
}
|
||||
|
||||
state.shadow_rendering.Assign(regs.framebuffer.output_merger.fragment_operation_mode ==
|
||||
Pica::FramebufferRegs::FragmentOperationMode::Shadow);
|
||||
state.shadow_texture_orthographic.Assign(regs.texturing.shadow.orthographic != 0);
|
||||
|
||||
// We only need fragment shader interlock when shadow rendering.
|
||||
state.use_fragment_shader_interlock.Assign(state.shadow_rendering &&
|
||||
has_fragment_shader_interlock);
|
||||
state.use_custom_normal_map.Assign(use_custom_normal_map);
|
||||
}
|
||||
|
||||
void PicaGSConfigState::Init(const Pica::Regs& regs, bool use_clip_planes_) {
|
||||
use_clip_planes = use_clip_planes_;
|
||||
|
||||
|
@ -28,141 +28,6 @@ enum Attributes {
|
||||
ATTRIBUTE_VIEW,
|
||||
};
|
||||
|
||||
// Doesn't include const_color because we don't sync it, see comment in BuildFromRegs()
|
||||
struct TevStageConfigRaw {
|
||||
u32 sources_raw;
|
||||
u32 modifiers_raw;
|
||||
u32 ops_raw;
|
||||
u32 scales_raw;
|
||||
explicit operator Pica::TexturingRegs::TevStageConfig() const noexcept {
|
||||
return {
|
||||
.sources_raw = sources_raw,
|
||||
.modifiers_raw = modifiers_raw,
|
||||
.ops_raw = ops_raw,
|
||||
.const_color = 0,
|
||||
.scales_raw = scales_raw,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
struct PicaFSConfigState {
|
||||
union {
|
||||
BitField<0, 3, Pica::FramebufferRegs::CompareFunc> alpha_test_func;
|
||||
BitField<3, 2, Pica::RasterizerRegs::ScissorMode> scissor_test_mode;
|
||||
BitField<5, 3, Pica::TexturingRegs::TextureConfig::TextureType> texture0_type;
|
||||
BitField<8, 1, u32> texture2_use_coord1;
|
||||
BitField<9, 8, u32> combiner_buffer_input;
|
||||
BitField<17, 1, Pica::RasterizerRegs::DepthBuffering> depthmap_enable;
|
||||
BitField<18, 3, Pica::TexturingRegs::FogMode> fog_mode;
|
||||
BitField<21, 1, u32> fog_flip;
|
||||
BitField<22, 1, u32> emulate_logic_op;
|
||||
BitField<23, 4, Pica::FramebufferRegs::LogicOp> logic_op;
|
||||
BitField<27, 1, u32> shadow_rendering;
|
||||
BitField<28, 1, u32> shadow_texture_orthographic;
|
||||
BitField<29, 1, u32> use_fragment_shader_interlock;
|
||||
BitField<30, 1, u32> use_custom_normal_map;
|
||||
};
|
||||
|
||||
union {
|
||||
BitField<0, 1, u32> enable_s;
|
||||
BitField<1, 1, u32> enable_t;
|
||||
} texture_border_color[3];
|
||||
|
||||
std::array<TevStageConfigRaw, 6> tev_stages;
|
||||
|
||||
struct {
|
||||
union {
|
||||
BitField<0, 3, u16> num;
|
||||
BitField<3, 1, u16> directional;
|
||||
BitField<4, 1, u16> two_sided_diffuse;
|
||||
BitField<5, 1, u16> dist_atten_enable;
|
||||
BitField<6, 1, u16> spot_atten_enable;
|
||||
BitField<7, 1, u16> geometric_factor_0;
|
||||
BitField<8, 1, u16> geometric_factor_1;
|
||||
BitField<9, 1, u16> shadow_enable;
|
||||
} light[8];
|
||||
|
||||
union {
|
||||
BitField<0, 1, u32> enable;
|
||||
BitField<1, 4, u32> src_num;
|
||||
BitField<5, 2, Pica::LightingRegs::LightingBumpMode> bump_mode;
|
||||
BitField<7, 2, u32> bump_selector;
|
||||
BitField<9, 1, u32> bump_renorm;
|
||||
BitField<10, 1, u32> clamp_highlights;
|
||||
BitField<11, 4, Pica::LightingRegs::LightingConfig> config;
|
||||
BitField<15, 1, u32> enable_primary_alpha;
|
||||
BitField<16, 1, u32> enable_secondary_alpha;
|
||||
BitField<17, 1, u32> enable_shadow;
|
||||
BitField<18, 1, u32> shadow_primary;
|
||||
BitField<19, 1, u32> shadow_secondary;
|
||||
BitField<20, 1, u32> shadow_invert;
|
||||
BitField<21, 1, u32> shadow_alpha;
|
||||
BitField<22, 2, u32> shadow_selector;
|
||||
};
|
||||
|
||||
struct {
|
||||
union {
|
||||
BitField<0, 1, u32> enable;
|
||||
BitField<1, 1, u32> abs_input;
|
||||
BitField<2, 3, Pica::LightingRegs::LightingLutInput> type;
|
||||
};
|
||||
float scale;
|
||||
} lut_d0, lut_d1, lut_sp, lut_fr, lut_rr, lut_rg, lut_rb;
|
||||
} lighting;
|
||||
|
||||
struct {
|
||||
union {
|
||||
BitField<0, 1, u32> enable;
|
||||
BitField<1, 2, u32> coord;
|
||||
BitField<3, 3, Pica::TexturingRegs::ProcTexClamp> u_clamp;
|
||||
BitField<6, 3, Pica::TexturingRegs::ProcTexClamp> v_clamp;
|
||||
BitField<9, 4, Pica::TexturingRegs::ProcTexCombiner> color_combiner;
|
||||
BitField<13, 4, Pica::TexturingRegs::ProcTexCombiner> alpha_combiner;
|
||||
BitField<17, 3, Pica::TexturingRegs::ProcTexFilter> lut_filter;
|
||||
BitField<20, 1, u32> separate_alpha;
|
||||
BitField<21, 1, u32> noise_enable;
|
||||
BitField<22, 2, Pica::TexturingRegs::ProcTexShift> u_shift;
|
||||
BitField<24, 2, Pica::TexturingRegs::ProcTexShift> v_shift;
|
||||
};
|
||||
s32 lut_width;
|
||||
s32 lut_offset0;
|
||||
s32 lut_offset1;
|
||||
s32 lut_offset2;
|
||||
s32 lut_offset3;
|
||||
u8 lod_min;
|
||||
u8 lod_max;
|
||||
} proctex;
|
||||
|
||||
struct {
|
||||
bool emulate_blending;
|
||||
Pica::FramebufferRegs::BlendEquation eq;
|
||||
Pica::FramebufferRegs::BlendFactor src_factor;
|
||||
Pica::FramebufferRegs::BlendFactor dst_factor;
|
||||
} rgb_blend, alpha_blend;
|
||||
};
|
||||
|
||||
/**
|
||||
* This struct contains all state used to generate the GLSL fragment shader that emulates the
|
||||
* current Pica register configuration. This struct is used as a cache key for generated GLSL shader
|
||||
* programs. The functions in glsl_shader_gen.cpp should retrieve state from this struct only, not
|
||||
* by directly accessing Pica registers. This should reduce the risk of bugs in shader generation
|
||||
* where Pica state is not being captured in the shader cache key, thereby resulting in (what should
|
||||
* be) two separate shaders sharing the same key.
|
||||
*/
|
||||
struct PicaFSConfig : Common::HashableStruct<PicaFSConfigState> {
|
||||
PicaFSConfig(const Pica::Regs& regs, bool has_fragment_shader_interlock, bool emulate_logic_op,
|
||||
bool emulate_custom_border_color, bool emulate_blend_minmax_factor,
|
||||
bool use_custom_normal_map = false);
|
||||
|
||||
[[nodiscard]] bool TevStageUpdatesCombinerBufferColor(unsigned stage_index) const {
|
||||
return (stage_index < 4) && (state.combiner_buffer_input & (1 << stage_index));
|
||||
}
|
||||
|
||||
[[nodiscard]] bool TevStageUpdatesCombinerBufferAlpha(unsigned stage_index) const {
|
||||
return (stage_index < 4) && ((state.combiner_buffer_input >> 4) & (1 << stage_index));
|
||||
}
|
||||
};
|
||||
|
||||
enum class AttribLoadFlags {
|
||||
Float = 1 << 0,
|
||||
Sint = 1 << 1,
|
||||
@ -238,13 +103,6 @@ struct PicaFixedGSConfig : Common::HashableStruct<PicaGSConfigState> {
|
||||
} // namespace Pica::Shader::Generator
|
||||
|
||||
namespace std {
|
||||
template <>
|
||||
struct hash<Pica::Shader::Generator::PicaFSConfig> {
|
||||
std::size_t operator()(const Pica::Shader::Generator::PicaFSConfig& k) const noexcept {
|
||||
return k.Hash();
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct hash<Pica::Shader::Generator::PicaVSConfig> {
|
||||
std::size_t operator()(const Pica::Shader::Generator::PicaVSConfig& k) const noexcept {
|
||||
|
@ -2,9 +2,9 @@
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "core/core.h"
|
||||
#include "core/telemetry_session.h"
|
||||
#include "video_core/shader/generator/spv_shader_gen.h"
|
||||
#include "video_core/shader/generator/spv_fs_shader_gen.h"
|
||||
|
||||
namespace Pica::Shader::Generator::SPIRV {
|
||||
|
||||
using Pica::FramebufferRegs;
|
||||
using Pica::LightingRegs;
|
||||
@ -12,12 +12,10 @@ using Pica::RasterizerRegs;
|
||||
using Pica::TexturingRegs;
|
||||
using TevStageConfig = TexturingRegs::TevStageConfig;
|
||||
|
||||
namespace Pica::Shader::Generator::SPIRV {
|
||||
|
||||
constexpr u32 SPIRV_VERSION_1_3 = 0x00010300;
|
||||
|
||||
FragmentModule::FragmentModule(Core::TelemetrySession& telemetry_, const PicaFSConfig& config_)
|
||||
: Sirit::Module{SPIRV_VERSION_1_3}, telemetry{telemetry_}, config{config_} {
|
||||
FragmentModule::FragmentModule(const FSConfig& config_)
|
||||
: Sirit::Module{SPIRV_VERSION_1_3}, config{config_} {
|
||||
DefineArithmeticTypes();
|
||||
DefineUniformStructs();
|
||||
DefineInterface();
|
||||
@ -37,38 +35,32 @@ void FragmentModule::Generate() {
|
||||
secondary_fragment_color = ConstF32(0.f, 0.f, 0.f, 0.f);
|
||||
|
||||
// Do not do any sort of processing if it's obvious we're not going to pass the alpha test
|
||||
if (config.state.alpha_test_func == Pica::FramebufferRegs::CompareFunc::Never) {
|
||||
if (config.framebuffer.alpha_test_func == Pica::FramebufferRegs::CompareFunc::Never) {
|
||||
OpKill();
|
||||
OpFunctionEnd();
|
||||
return;
|
||||
}
|
||||
|
||||
// Check if the fragment is outside scissor rectangle
|
||||
// Append the scissor and depth tests
|
||||
WriteDepth();
|
||||
WriteScissor();
|
||||
|
||||
// Write shader bytecode to emulate all enabled PICA lights
|
||||
if (config.state.lighting.enable) {
|
||||
WriteLighting();
|
||||
}
|
||||
WriteLighting();
|
||||
|
||||
combiner_buffer = ConstF32(0.f, 0.f, 0.f, 0.f);
|
||||
next_combiner_buffer = GetShaderDataMember(vec_ids.Get(4), ConstS32(26));
|
||||
last_tex_env_out = rounded_primary_color;
|
||||
|
||||
// Write shader bytecode to emulate PICA TEV stages
|
||||
for (std::size_t index = 0; index < config.state.tev_stages.size(); ++index) {
|
||||
WriteTevStage(static_cast<s32>(index));
|
||||
for (u32 index = 0; index < config.texture.tev_stages.size(); ++index) {
|
||||
WriteTevStage(index);
|
||||
}
|
||||
|
||||
WriteAlphaTestCondition(config.state.alpha_test_func);
|
||||
|
||||
// After perspective divide, OpenGL transform z_over_w from [-1, 1] to [near, far]. Here we use
|
||||
// default near = 0 and far = 1, and undo the transformation to get the original z_over_w, then
|
||||
// do our own transformation according to PICA specification.
|
||||
WriteDepth();
|
||||
WriteAlphaTestCondition(config.framebuffer.alpha_test_func);
|
||||
|
||||
// Emulate the fog
|
||||
switch (config.state.fog_mode) {
|
||||
switch (config.texture.fog_mode) {
|
||||
case TexturingRegs::FogMode::Fog:
|
||||
WriteFog();
|
||||
break;
|
||||
@ -80,29 +72,27 @@ void FragmentModule::Generate() {
|
||||
}
|
||||
|
||||
Id color{Byteround(last_tex_env_out, 4)};
|
||||
if (config.state.emulate_logic_op) {
|
||||
switch (config.state.logic_op) {
|
||||
case FramebufferRegs::LogicOp::Clear:
|
||||
color = ConstF32(0.f, 0.f, 0.f, 0.f);
|
||||
break;
|
||||
case FramebufferRegs::LogicOp::Set:
|
||||
color = ConstF32(1.f, 1.f, 1.f, 1.f);
|
||||
break;
|
||||
case FramebufferRegs::LogicOp::Copy:
|
||||
// Take the color output as-is
|
||||
break;
|
||||
case FramebufferRegs::LogicOp::CopyInverted:
|
||||
// out += "color = ~color;\n";
|
||||
break;
|
||||
case FramebufferRegs::LogicOp::NoOp:
|
||||
// We need to discard the color, but not necessarily the depth. This is not possible
|
||||
// with fragment shader alone, so we emulate this behavior with the color mask.
|
||||
break;
|
||||
default:
|
||||
LOG_CRITICAL(HW_GPU, "Unhandled logic_op {:x}",
|
||||
static_cast<u32>(config.state.logic_op.Value()));
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
switch (config.framebuffer.logic_op) {
|
||||
case FramebufferRegs::LogicOp::Clear:
|
||||
color = ConstF32(0.f, 0.f, 0.f, 0.f);
|
||||
break;
|
||||
case FramebufferRegs::LogicOp::Set:
|
||||
color = ConstF32(1.f, 1.f, 1.f, 1.f);
|
||||
break;
|
||||
case FramebufferRegs::LogicOp::Copy:
|
||||
// Take the color output as-is
|
||||
break;
|
||||
case FramebufferRegs::LogicOp::CopyInverted:
|
||||
// out += "color = ~color;\n";
|
||||
break;
|
||||
case FramebufferRegs::LogicOp::NoOp:
|
||||
// We need to discard the color, but not necessarily the depth. This is not possible
|
||||
// with fragment shader alone, so we emulate this behavior with the color mask.
|
||||
break;
|
||||
default:
|
||||
LOG_CRITICAL(HW_GPU, "Unhandled logic_op {:x}",
|
||||
static_cast<u32>(config.framebuffer.logic_op.Value()));
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
// Write output color
|
||||
@ -119,7 +109,7 @@ void FragmentModule::WriteDepth() {
|
||||
const Id depth_scale{GetShaderDataMember(f32_id, ConstS32(2))};
|
||||
const Id depth_offset{GetShaderDataMember(f32_id, ConstS32(3))};
|
||||
depth = OpFma(f32_id, z_over_w, depth_scale, depth_offset);
|
||||
if (config.state.depthmap_enable == Pica::RasterizerRegs::DepthBuffering::WBuffering) {
|
||||
if (config.framebuffer.depthmap_enable == Pica::RasterizerRegs::DepthBuffering::WBuffering) {
|
||||
const Id gl_frag_coord_w{
|
||||
OpLoad(f32_id, OpAccessChain(input_pointer_id, gl_frag_coord_id, ConstU32(3u)))};
|
||||
depth = OpFDiv(f32_id, depth, gl_frag_coord_w);
|
||||
@ -128,7 +118,7 @@ void FragmentModule::WriteDepth() {
|
||||
}
|
||||
|
||||
void FragmentModule::WriteScissor() {
|
||||
if (config.state.scissor_test_mode == RasterizerRegs::ScissorMode::Disabled) {
|
||||
if (config.framebuffer.scissor_test_mode == RasterizerRegs::ScissorMode::Disabled) {
|
||||
return;
|
||||
}
|
||||
|
||||
@ -149,7 +139,7 @@ void FragmentModule::WriteScissor() {
|
||||
const Id cond2{OpFOrdLessThan(bvec_ids.Get(2), gl_frag_coord_xy, scissor_2)};
|
||||
|
||||
Id result{OpAll(bool_id, OpCompositeConstruct(bvec_ids.Get(4), cond1, cond2))};
|
||||
if (config.state.scissor_test_mode == RasterizerRegs::ScissorMode::Include) {
|
||||
if (config.framebuffer.scissor_test_mode == RasterizerRegs::ScissorMode::Include) {
|
||||
result = OpLogicalNot(bool_id, result);
|
||||
}
|
||||
|
||||
@ -167,7 +157,7 @@ void FragmentModule::WriteScissor() {
|
||||
void FragmentModule::WriteFog() {
|
||||
// Get index into fog LUT
|
||||
Id fog_index{};
|
||||
if (config.state.fog_flip) {
|
||||
if (config.texture.fog_flip) {
|
||||
fog_index = OpFMul(f32_id, OpFSub(f32_id, ConstF32(1.f), depth), ConstF32(128.f));
|
||||
} else {
|
||||
fog_index = OpFMul(f32_id, depth, ConstF32(128.f));
|
||||
@ -201,14 +191,17 @@ void FragmentModule::WriteFog() {
|
||||
|
||||
void FragmentModule::WriteGas() {
|
||||
// TODO: Implement me
|
||||
telemetry.AddField(Common::Telemetry::FieldType::Session, "VideoCore_Pica_UseGasMode", true);
|
||||
LOG_CRITICAL(Render, "Unimplemented gas mode");
|
||||
OpKill();
|
||||
OpFunctionEnd();
|
||||
}
|
||||
|
||||
void FragmentModule::WriteLighting() {
|
||||
const auto& lighting = config.state.lighting;
|
||||
if (!config.lighting.enable) {
|
||||
return;
|
||||
}
|
||||
|
||||
const auto& lighting = config.lighting;
|
||||
|
||||
// Define lighting globals
|
||||
Id diffuse_sum{ConstF32(0.f, 0.f, 0.f, 1.f)};
|
||||
@ -363,7 +356,7 @@ void FragmentModule::WriteLighting() {
|
||||
const Id sampler_index{ConstU32(static_cast<u32>(sampler))};
|
||||
if (abs) {
|
||||
// LUT index is in the range of (0.0, 1.0)
|
||||
index = lighting.light[light_num].two_sided_diffuse
|
||||
index = lighting.lights[light_num].two_sided_diffuse
|
||||
? OpFAbs(f32_id, index)
|
||||
: OpFMax(f32_id, index, ConstF32(0.f));
|
||||
return lookup_lighting_lut_unsigned(sampler_index, index);
|
||||
@ -375,11 +368,12 @@ void FragmentModule::WriteLighting() {
|
||||
|
||||
// Write the code to emulate each enabled light
|
||||
for (u32 light_index = 0; light_index < lighting.src_num; ++light_index) {
|
||||
const auto& light_config = lighting.light[light_index];
|
||||
const auto& light_config = lighting.lights[light_index];
|
||||
|
||||
const auto GetLightMember = [&](s32 member) -> Id {
|
||||
const Id member_type = member < 6 ? vec_ids.Get(3) : f32_id;
|
||||
const Id light_num{ConstS32(static_cast<s32>(lighting.light[light_index].num.Value()))};
|
||||
const Id light_num{
|
||||
ConstS32(static_cast<s32>(lighting.lights[light_index].num.Value()))};
|
||||
return GetShaderDataMember(member_type, ConstS32(24), light_num, ConstS32(member));
|
||||
};
|
||||
|
||||
@ -595,7 +589,7 @@ void FragmentModule::WriteLighting() {
|
||||
|
||||
void FragmentModule::WriteTevStage(s32 index) {
|
||||
const TexturingRegs::TevStageConfig stage =
|
||||
static_cast<const TexturingRegs::TevStageConfig>(config.state.tev_stages[index]);
|
||||
static_cast<const TexturingRegs::TevStageConfig>(config.texture.tev_stages[index]);
|
||||
|
||||
// Detects if a TEV stage is configured to be skipped (to avoid generating unnecessary code)
|
||||
const auto is_passthrough_tev_stage = [](const TevStageConfig& stage) {
|
||||
@ -860,8 +854,6 @@ Id FragmentModule::AppendProcTexCombineAndMap(ProcTexCombiner combiner, Id u, Id
|
||||
}
|
||||
|
||||
void FragmentModule::DefineTexSampler(u32 texture_unit) {
|
||||
const PicaFSConfigState& state = config.state;
|
||||
|
||||
const Id func_type{TypeFunction(vec_ids.Get(4))};
|
||||
sample_tex_unit_func[texture_unit] =
|
||||
OpFunction(vec_ids.Get(4), spv::FunctionControlMask::MaskNone, func_type);
|
||||
@ -869,14 +861,15 @@ void FragmentModule::DefineTexSampler(u32 texture_unit) {
|
||||
|
||||
const Id zero_vec{ConstF32(0.f, 0.f, 0.f, 0.f)};
|
||||
|
||||
if (texture_unit == 0 && state.texture0_type == TexturingRegs::TextureConfig::Disabled) {
|
||||
if (texture_unit == 0 &&
|
||||
config.texture.texture0_type == TexturingRegs::TextureConfig::Disabled) {
|
||||
OpReturnValue(zero_vec);
|
||||
OpFunctionEnd();
|
||||
return;
|
||||
}
|
||||
|
||||
if (texture_unit == 3) {
|
||||
if (state.proctex.enable) {
|
||||
if (config.proctex.enable) {
|
||||
OpReturnValue(ProcTexSampler());
|
||||
} else {
|
||||
OpReturnValue(zero_vec);
|
||||
@ -888,10 +881,10 @@ void FragmentModule::DefineTexSampler(u32 texture_unit) {
|
||||
const Id border_label{OpLabel()};
|
||||
const Id not_border_label{OpLabel()};
|
||||
|
||||
u32 texcoord_num = texture_unit == 2 && state.texture2_use_coord1 ? 1 : texture_unit;
|
||||
u32 texcoord_num = texture_unit == 2 && config.texture.texture2_use_coord1 ? 1 : texture_unit;
|
||||
const Id texcoord{OpLoad(vec_ids.Get(2), texcoord_id[texcoord_num])};
|
||||
|
||||
auto& texture_border_color = state.texture_border_color[texture_unit];
|
||||
const auto& texture_border_color = config.texture.texture_border_color[texture_unit];
|
||||
if (texture_border_color.enable_s || texture_border_color.enable_t) {
|
||||
const Id texcoord_s{OpCompositeExtract(f32_id, texcoord, 0)};
|
||||
const Id texcoord_t{OpCompositeExtract(f32_id, texcoord, 1)};
|
||||
@ -960,7 +953,7 @@ void FragmentModule::DefineTexSampler(u32 texture_unit) {
|
||||
switch (texture_unit) {
|
||||
case 0:
|
||||
// Only unit 0 respects the texturing type
|
||||
switch (state.texture0_type) {
|
||||
switch (config.texture.texture0_type) {
|
||||
case Pica::TexturingRegs::TextureConfig::Texture2D:
|
||||
ret_val = sample_lod(tex0_id);
|
||||
break;
|
||||
@ -976,7 +969,8 @@ void FragmentModule::DefineTexSampler(u32 texture_unit) {
|
||||
// return "shadowTextureCube(texcoord0, texcoord0_w)";
|
||||
break;
|
||||
default:
|
||||
LOG_CRITICAL(Render, "Unhandled texture type {:x}", state.texture0_type.Value());
|
||||
LOG_CRITICAL(Render, "Unhandled texture type {:x}",
|
||||
config.texture.texture0_type.Value());
|
||||
UNIMPLEMENTED();
|
||||
ret_val = zero_vec;
|
||||
break;
|
||||
@ -999,7 +993,7 @@ void FragmentModule::DefineTexSampler(u32 texture_unit) {
|
||||
|
||||
Id FragmentModule::ProcTexSampler() {
|
||||
// Define noise tables at the beginning of the function
|
||||
if (config.state.proctex.noise_enable) {
|
||||
if (config.proctex.noise_enable) {
|
||||
noise1d_table =
|
||||
DefineVar<false>(TypeArray(i32_id, ConstU32(16u)), spv::StorageClass::Function);
|
||||
noise2d_table =
|
||||
@ -1008,8 +1002,8 @@ Id FragmentModule::ProcTexSampler() {
|
||||
lut_offsets = DefineVar<false>(TypeArray(i32_id, ConstU32(8u)), spv::StorageClass::Function);
|
||||
|
||||
Id uv{};
|
||||
if (config.state.proctex.coord < 3) {
|
||||
const Id texcoord{OpLoad(vec_ids.Get(2), texcoord_id[config.state.proctex.coord.Value()])};
|
||||
if (config.proctex.coord < 3) {
|
||||
const Id texcoord{OpLoad(vec_ids.Get(2), texcoord_id[config.proctex.coord.Value()])};
|
||||
uv = OpFAbs(vec_ids.Get(2), texcoord);
|
||||
} else {
|
||||
LOG_CRITICAL(Render, "Unexpected proctex.coord >= 3");
|
||||
@ -1027,26 +1021,24 @@ Id FragmentModule::ProcTexSampler() {
|
||||
// unlike normal texture, the bias is inside the log2
|
||||
const Id proctex_bias{GetShaderDataMember(f32_id, ConstS32(16))};
|
||||
const Id bias{
|
||||
OpFMul(f32_id, ConstF32(static_cast<f32>(config.state.proctex.lut_width)), proctex_bias)};
|
||||
OpFMul(f32_id, ConstF32(static_cast<f32>(config.proctex.lut_width)), proctex_bias)};
|
||||
const Id duv_xy{
|
||||
OpFAdd(f32_id, OpCompositeExtract(f32_id, duv, 0), OpCompositeExtract(f32_id, duv, 1))};
|
||||
|
||||
Id lod{OpLog2(f32_id, OpFMul(f32_id, OpFAbs(f32_id, bias), duv_xy))};
|
||||
lod = OpSelect(f32_id, OpFOrdEqual(bool_id, proctex_bias, ConstF32(0.f)), ConstF32(0.f), lod);
|
||||
lod = OpFClamp(f32_id, lod,
|
||||
ConstF32(std::max(0.0f, static_cast<float>(config.state.proctex.lod_min))),
|
||||
ConstF32(std::min(7.0f, static_cast<float>(config.state.proctex.lod_max))));
|
||||
lod =
|
||||
OpFClamp(f32_id, lod, ConstF32(std::max(0.0f, static_cast<float>(config.proctex.lod_min))),
|
||||
ConstF32(std::min(7.0f, static_cast<float>(config.proctex.lod_max))));
|
||||
|
||||
// Get shift offset before noise generation
|
||||
const Id u_shift{AppendProcTexShiftOffset(OpCompositeExtract(f32_id, uv, 1),
|
||||
config.state.proctex.u_shift,
|
||||
config.state.proctex.u_clamp)};
|
||||
config.proctex.u_shift, config.proctex.u_clamp)};
|
||||
const Id v_shift{AppendProcTexShiftOffset(OpCompositeExtract(f32_id, uv, 0),
|
||||
config.state.proctex.v_shift,
|
||||
config.state.proctex.v_clamp)};
|
||||
config.proctex.v_shift, config.proctex.v_clamp)};
|
||||
|
||||
// Generate noise
|
||||
if (config.state.proctex.noise_enable) {
|
||||
if (config.proctex.noise_enable) {
|
||||
const Id proctex_noise_a{GetShaderDataMember(vec_ids.Get(2), ConstS32(21))};
|
||||
const Id noise_coef{ProcTexNoiseCoef(uv)};
|
||||
uv = OpFAdd(vec_ids.Get(2), uv,
|
||||
@ -1059,16 +1051,16 @@ Id FragmentModule::ProcTexSampler() {
|
||||
Id v{OpFAdd(f32_id, OpCompositeExtract(f32_id, uv, 1), v_shift)};
|
||||
|
||||
// Clamp
|
||||
u = AppendProcTexClamp(u, config.state.proctex.u_clamp);
|
||||
v = AppendProcTexClamp(v, config.state.proctex.v_clamp);
|
||||
u = AppendProcTexClamp(u, config.proctex.u_clamp);
|
||||
v = AppendProcTexClamp(v, config.proctex.v_clamp);
|
||||
|
||||
// Combine and map
|
||||
const Id proctex_color_map_offset{GetShaderDataMember(i32_id, ConstS32(12))};
|
||||
const Id lut_coord{AppendProcTexCombineAndMap(config.state.proctex.color_combiner, u, v,
|
||||
proctex_color_map_offset)};
|
||||
const Id lut_coord{
|
||||
AppendProcTexCombineAndMap(config.proctex.color_combiner, u, v, proctex_color_map_offset)};
|
||||
|
||||
Id final_color{};
|
||||
switch (config.state.proctex.lut_filter) {
|
||||
switch (config.proctex.lut_filter) {
|
||||
case ProcTexFilter::Linear:
|
||||
case ProcTexFilter::Nearest: {
|
||||
final_color = SampleProcTexColor(lut_coord, ConstS32(0));
|
||||
@ -1090,9 +1082,9 @@ Id FragmentModule::ProcTexSampler() {
|
||||
}
|
||||
}
|
||||
|
||||
if (config.state.proctex.separate_alpha) {
|
||||
if (config.proctex.separate_alpha) {
|
||||
const Id proctex_alpha_map_offset{GetShaderDataMember(i32_id, ConstS32(13))};
|
||||
const Id final_alpha{AppendProcTexCombineAndMap(config.state.proctex.alpha_combiner, u, v,
|
||||
const Id final_alpha{AppendProcTexCombineAndMap(config.proctex.alpha_combiner, u, v,
|
||||
proctex_alpha_map_offset)};
|
||||
final_color = OpCompositeInsert(vec_ids.Get(4), final_alpha, final_color, 3);
|
||||
}
|
||||
@ -1189,13 +1181,11 @@ Id FragmentModule::ProcTexNoiseCoef(Id x) {
|
||||
}
|
||||
|
||||
Id FragmentModule::SampleProcTexColor(Id lut_coord, Id level) {
|
||||
const Id lut_width{
|
||||
OpShiftRightArithmetic(i32_id, ConstS32(config.state.proctex.lut_width), level)};
|
||||
const Id lut_width{OpShiftRightArithmetic(i32_id, ConstS32(config.proctex.lut_width), level)};
|
||||
const Id lut_ptr{TypePointer(spv::StorageClass::Function, i32_id)};
|
||||
// Offsets for level 4-7 seem to be hardcoded
|
||||
InitTableS32(lut_offsets, config.state.proctex.lut_offset0, config.state.proctex.lut_offset1,
|
||||
config.state.proctex.lut_offset2, config.state.proctex.lut_offset3, 0xF0, 0xF8,
|
||||
0xFC, 0xFE);
|
||||
InitTableS32(lut_offsets, config.proctex.lut_offset0, config.proctex.lut_offset1,
|
||||
config.proctex.lut_offset2, config.proctex.lut_offset3, 0xF0, 0xF8, 0xFC, 0xFE);
|
||||
const Id lut_offset{OpLoad(i32_id, OpAccessChain(lut_ptr, lut_offsets, level))};
|
||||
// For the color lut, coord=0.0 is lut[offset] and coord=1.0 is lut[offset+width-1]
|
||||
lut_coord =
|
||||
@ -1209,7 +1199,7 @@ Id FragmentModule::SampleProcTexColor(Id lut_coord, Id level) {
|
||||
const Id proctex_lut_offset{GetShaderDataMember(i32_id, ConstS32(14))};
|
||||
const Id lut_rgba{OpImage(image_buffer_id, texture_buffer_lut_rgba)};
|
||||
|
||||
switch (config.state.proctex.lut_filter) {
|
||||
switch (config.proctex.lut_filter) {
|
||||
case ProcTexFilter::Linear:
|
||||
case ProcTexFilter::LinearMipmapLinear:
|
||||
case ProcTexFilter::LinearMipmapNearest: {
|
||||
@ -1549,9 +1539,8 @@ void FragmentModule::DefineInterface() {
|
||||
Decorate(gl_frag_depth_id, spv::Decoration::BuiltIn, spv::BuiltIn::FragDepth);
|
||||
}
|
||||
|
||||
std::vector<u32> GenerateFragmentShader(const PicaFSConfig& config) {
|
||||
auto& telemetry = Core::System::GetInstance().TelemetrySession();
|
||||
FragmentModule module{telemetry, config};
|
||||
std::vector<u32> GenerateFragmentShader(const FSConfig& config) {
|
||||
FragmentModule module{config};
|
||||
module.Generate();
|
||||
return module.Assemble();
|
||||
}
|
@ -7,11 +7,7 @@
|
||||
#include <array>
|
||||
#include <sirit/sirit.h>
|
||||
|
||||
#include "video_core/shader/generator/shader_gen.h"
|
||||
|
||||
namespace Core {
|
||||
class TelemetrySession;
|
||||
}
|
||||
#include "video_core/shader/generator/pica_fs_config.h"
|
||||
|
||||
namespace Pica::Shader::Generator::SPIRV {
|
||||
|
||||
@ -34,7 +30,7 @@ class FragmentModule : public Sirit::Module {
|
||||
static constexpr u32 NUM_NON_PROC_TEX_UNITS = 3;
|
||||
|
||||
public:
|
||||
explicit FragmentModule(Core::TelemetrySession& telemetry, const PicaFSConfig& config);
|
||||
explicit FragmentModule(const FSConfig& config);
|
||||
~FragmentModule();
|
||||
|
||||
/// Emits SPIR-V bytecode corresponding to the provided pica fragment configuration
|
||||
@ -218,8 +214,7 @@ private:
|
||||
Id CompareShadow(Id pixel, Id z);
|
||||
|
||||
private:
|
||||
Core::TelemetrySession& telemetry;
|
||||
PicaFSConfig config;
|
||||
const FSConfig& config;
|
||||
Id void_id{};
|
||||
Id bool_id{};
|
||||
Id f32_id{};
|
||||
@ -289,6 +284,6 @@ private:
|
||||
* @param separable_shader generates shader that can be used for separate shader object
|
||||
* @returns String of the shader source code
|
||||
*/
|
||||
std::vector<u32> GenerateFragmentShader(const PicaFSConfig& config);
|
||||
std::vector<u32> GenerateFragmentShader(const FSConfig& config);
|
||||
|
||||
} // namespace Pica::Shader::Generator::SPIRV
|
@ -15,7 +15,9 @@
|
||||
#include "video_core/shader/shader_interpreter.h"
|
||||
#if CITRA_ARCH(x86_64)
|
||||
#include "video_core/shader/shader_jit_x64.h"
|
||||
#endif // CITRA_ARCH(x86_64)
|
||||
#elif CITRA_ARCH(arm64)
|
||||
#include "video_core/shader/shader_jit_a64.h"
|
||||
#endif
|
||||
#include "video_core/video_core.h"
|
||||
|
||||
namespace Pica::Shader {
|
||||
@ -141,27 +143,29 @@ MICROPROFILE_DEFINE(GPU_Shader, "GPU", "Shader", MP_RGB(50, 50, 240));
|
||||
|
||||
#if CITRA_ARCH(x86_64)
|
||||
static std::unique_ptr<JitX64Engine> jit_engine;
|
||||
#endif // CITRA_ARCH(x86_64)
|
||||
#elif CITRA_ARCH(arm64)
|
||||
static std::unique_ptr<JitA64Engine> jit_engine;
|
||||
#endif
|
||||
static InterpreterEngine interpreter_engine;
|
||||
|
||||
ShaderEngine* GetEngine() {
|
||||
#if CITRA_ARCH(x86_64)
|
||||
#if CITRA_ARCH(x86_64) || CITRA_ARCH(arm64)
|
||||
// TODO(yuriks): Re-initialize on each change rather than being persistent
|
||||
if (VideoCore::g_shader_jit_enabled) {
|
||||
if (jit_engine == nullptr) {
|
||||
jit_engine = std::make_unique<JitX64Engine>();
|
||||
jit_engine = std::make_unique<decltype(jit_engine)::element_type>();
|
||||
}
|
||||
return jit_engine.get();
|
||||
}
|
||||
#endif // CITRA_ARCH(x86_64)
|
||||
#endif // CITRA_ARCH(x86_64) || CITRA_ARCH(arm64)
|
||||
|
||||
return &interpreter_engine;
|
||||
}
|
||||
|
||||
void Shutdown() {
|
||||
#if CITRA_ARCH(x86_64)
|
||||
#if CITRA_ARCH(x86_64) || CITRA_ARCH(arm64)
|
||||
jit_engine = nullptr;
|
||||
#endif // CITRA_ARCH(x86_64)
|
||||
#endif // CITRA_ARCH(x86_64) || CITRA_ARCH(arm64)
|
||||
}
|
||||
|
||||
} // namespace Pica::Shader
|
||||
|
51
src/video_core/shader/shader_jit_a64.cpp
Normal file
51
src/video_core/shader/shader_jit_a64.cpp
Normal file
@ -0,0 +1,51 @@
|
||||
// Copyright 2023 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "common/arch.h"
|
||||
#if CITRA_ARCH(arm64)
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/microprofile.h"
|
||||
#include "video_core/shader/shader.h"
|
||||
#include "video_core/shader/shader_jit_a64.h"
|
||||
#include "video_core/shader/shader_jit_a64_compiler.h"
|
||||
|
||||
namespace Pica::Shader {
|
||||
|
||||
JitA64Engine::JitA64Engine() = default;
|
||||
JitA64Engine::~JitA64Engine() = default;
|
||||
|
||||
void JitA64Engine::SetupBatch(ShaderSetup& setup, unsigned int entry_point) {
|
||||
ASSERT(entry_point < MAX_PROGRAM_CODE_LENGTH);
|
||||
setup.engine_data.entry_point = entry_point;
|
||||
|
||||
u64 code_hash = setup.GetProgramCodeHash();
|
||||
u64 swizzle_hash = setup.GetSwizzleDataHash();
|
||||
|
||||
u64 cache_key = code_hash ^ swizzle_hash;
|
||||
auto iter = cache.find(cache_key);
|
||||
if (iter != cache.end()) {
|
||||
setup.engine_data.cached_shader = iter->second.get();
|
||||
} else {
|
||||
auto shader = std::make_unique<JitShader>();
|
||||
shader->Compile(&setup.program_code, &setup.swizzle_data);
|
||||
setup.engine_data.cached_shader = shader.get();
|
||||
cache.emplace_hint(iter, cache_key, std::move(shader));
|
||||
}
|
||||
}
|
||||
|
||||
MICROPROFILE_DECLARE(GPU_Shader);
|
||||
|
||||
void JitA64Engine::Run(const ShaderSetup& setup, UnitState& state) const {
|
||||
ASSERT(setup.engine_data.cached_shader != nullptr);
|
||||
|
||||
MICROPROFILE_SCOPE(GPU_Shader);
|
||||
|
||||
const JitShader* shader = static_cast<const JitShader*>(setup.engine_data.cached_shader);
|
||||
shader->Run(setup, state, setup.engine_data.entry_point);
|
||||
}
|
||||
|
||||
} // namespace Pica::Shader
|
||||
|
||||
#endif // CITRA_ARCH(arm64)
|
33
src/video_core/shader/shader_jit_a64.h
Normal file
33
src/video_core/shader/shader_jit_a64.h
Normal file
@ -0,0 +1,33 @@
|
||||
// Copyright 2023 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/arch.h"
|
||||
#if CITRA_ARCH(arm64)
|
||||
|
||||
#include <memory>
|
||||
#include <unordered_map>
|
||||
#include "common/common_types.h"
|
||||
#include "video_core/shader/shader.h"
|
||||
|
||||
namespace Pica::Shader {
|
||||
|
||||
class JitShader;
|
||||
|
||||
class JitA64Engine final : public ShaderEngine {
|
||||
public:
|
||||
JitA64Engine();
|
||||
~JitA64Engine() override;
|
||||
|
||||
void SetupBatch(ShaderSetup& setup, unsigned int entry_point) override;
|
||||
void Run(const ShaderSetup& setup, UnitState& state) const override;
|
||||
|
||||
private:
|
||||
std::unordered_map<u64, std::unique_ptr<JitShader>> cache;
|
||||
};
|
||||
|
||||
} // namespace Pica::Shader
|
||||
|
||||
#endif // CITRA_ARCH(arm64)
|
1207
src/video_core/shader/shader_jit_a64_compiler.cpp
Normal file
1207
src/video_core/shader/shader_jit_a64_compiler.cpp
Normal file
File diff suppressed because it is too large
Load Diff
146
src/video_core/shader/shader_jit_a64_compiler.h
Normal file
146
src/video_core/shader/shader_jit_a64_compiler.h
Normal file
@ -0,0 +1,146 @@
|
||||
// Copyright 2023 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/arch.h"
|
||||
#if CITRA_ARCH(arm64)
|
||||
|
||||
#include <array>
|
||||
#include <bitset>
|
||||
#include <cstddef>
|
||||
#include <optional>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
#include <nihstro/shader_bytecode.h>
|
||||
#include <oaknut/code_block.hpp>
|
||||
#include <oaknut/oaknut.hpp>
|
||||
#include "common/common_types.h"
|
||||
#include "video_core/shader/shader.h"
|
||||
|
||||
using nihstro::Instruction;
|
||||
using nihstro::OpCode;
|
||||
using nihstro::SourceRegister;
|
||||
using nihstro::SwizzlePattern;
|
||||
|
||||
namespace Pica::Shader {
|
||||
|
||||
/// Memory allocated for each compiled shader
|
||||
constexpr std::size_t MAX_SHADER_SIZE = MAX_PROGRAM_CODE_LENGTH * 256;
|
||||
|
||||
/**
|
||||
* This class implements the shader JIT compiler. It recompiles a Pica shader program into x86_64
|
||||
* code that can be executed on the host machine directly.
|
||||
*/
|
||||
class JitShader : private oaknut::CodeBlock, public oaknut::CodeGenerator {
|
||||
public:
|
||||
JitShader();
|
||||
|
||||
void Run(const ShaderSetup& setup, UnitState& state, unsigned offset) const {
|
||||
program(&setup.uniforms, &state, instruction_labels[offset].ptr<const std::byte*>());
|
||||
}
|
||||
|
||||
void Compile(const std::array<u32, MAX_PROGRAM_CODE_LENGTH>* program_code,
|
||||
const std::array<u32, MAX_SWIZZLE_DATA_LENGTH>* swizzle_data);
|
||||
|
||||
void Compile_ADD(Instruction instr);
|
||||
void Compile_DP3(Instruction instr);
|
||||
void Compile_DP4(Instruction instr);
|
||||
void Compile_DPH(Instruction instr);
|
||||
void Compile_EX2(Instruction instr);
|
||||
void Compile_LG2(Instruction instr);
|
||||
void Compile_MUL(Instruction instr);
|
||||
void Compile_SGE(Instruction instr);
|
||||
void Compile_SLT(Instruction instr);
|
||||
void Compile_FLR(Instruction instr);
|
||||
void Compile_MAX(Instruction instr);
|
||||
void Compile_MIN(Instruction instr);
|
||||
void Compile_RCP(Instruction instr);
|
||||
void Compile_RSQ(Instruction instr);
|
||||
void Compile_MOVA(Instruction instr);
|
||||
void Compile_MOV(Instruction instr);
|
||||
void Compile_NOP(Instruction instr);
|
||||
void Compile_END(Instruction instr);
|
||||
void Compile_BREAKC(Instruction instr);
|
||||
void Compile_CALL(Instruction instr);
|
||||
void Compile_CALLC(Instruction instr);
|
||||
void Compile_CALLU(Instruction instr);
|
||||
void Compile_IF(Instruction instr);
|
||||
void Compile_LOOP(Instruction instr);
|
||||
void Compile_JMP(Instruction instr);
|
||||
void Compile_CMP(Instruction instr);
|
||||
void Compile_MAD(Instruction instr);
|
||||
void Compile_EMIT(Instruction instr);
|
||||
void Compile_SETE(Instruction instr);
|
||||
|
||||
private:
|
||||
void Compile_Block(unsigned end);
|
||||
void Compile_NextInstr();
|
||||
|
||||
void Compile_SwizzleSrc(Instruction instr, unsigned src_num, SourceRegister src_reg,
|
||||
oaknut::QReg dest);
|
||||
void Compile_DestEnable(Instruction instr, oaknut::QReg dest);
|
||||
|
||||
/**
|
||||
* Compiles a `MUL src1, src2` operation, properly handling the PICA semantics when multiplying
|
||||
* zero by inf. Clobbers `src2` and `scratch`.
|
||||
*/
|
||||
void Compile_SanitizedMul(oaknut::QReg src1, oaknut::QReg src2, oaknut::QReg scratch0);
|
||||
|
||||
void Compile_EvaluateCondition(Instruction instr);
|
||||
void Compile_UniformCondition(Instruction instr);
|
||||
|
||||
/**
|
||||
* Emits the code to conditionally return from a subroutine envoked by the `CALL` instruction.
|
||||
*/
|
||||
void Compile_Return();
|
||||
|
||||
std::bitset<64> PersistentCallerSavedRegs();
|
||||
|
||||
/**
|
||||
* Assertion evaluated at compile-time, but only triggered if executed at runtime.
|
||||
* @param condition Condition to be evaluated.
|
||||
* @param msg Message to be logged if the assertion fails.
|
||||
*/
|
||||
void Compile_Assert(bool condition, const char* msg);
|
||||
|
||||
/**
|
||||
* Analyzes the entire shader program for `CALL` instructions before emitting any code,
|
||||
* identifying the locations where a return needs to be inserted.
|
||||
*/
|
||||
void FindReturnOffsets();
|
||||
|
||||
/**
|
||||
* Emits data and code for utility functions.
|
||||
*/
|
||||
void CompilePrelude();
|
||||
oaknut::Label CompilePrelude_Log2();
|
||||
oaknut::Label CompilePrelude_Exp2();
|
||||
|
||||
const std::array<u32, MAX_PROGRAM_CODE_LENGTH>* program_code = nullptr;
|
||||
const std::array<u32, MAX_SWIZZLE_DATA_LENGTH>* swizzle_data = nullptr;
|
||||
|
||||
/// Mapping of Pica VS instructions to pointers in the emitted code
|
||||
std::array<oaknut::Label, MAX_PROGRAM_CODE_LENGTH> instruction_labels;
|
||||
|
||||
/// Labels pointing to the end of each nested LOOP block. Used by the BREAKC instruction to
|
||||
/// break out of a loop.
|
||||
std::vector<oaknut::Label> loop_break_labels;
|
||||
|
||||
/// Offsets in code where a return needs to be inserted
|
||||
std::vector<unsigned> return_offsets;
|
||||
|
||||
unsigned program_counter = 0; ///< Offset of the next instruction to decode
|
||||
u8 loop_depth = 0; ///< Depth of the (nested) loops currently compiled
|
||||
|
||||
using CompiledShader = void(const void* setup, void* state, const std::byte* start_addr);
|
||||
CompiledShader* program = nullptr;
|
||||
|
||||
oaknut::Label log2_subroutine;
|
||||
oaknut::Label exp2_subroutine;
|
||||
};
|
||||
|
||||
} // namespace Pica::Shader
|
||||
|
||||
#endif
|
Reference in New Issue
Block a user