Compare commits

..

10 Commits

Author SHA1 Message Date
bac61d012c service: Stub mcu::HWC 2024-02-09 18:41:39 +02:00
2766118e33 http: Implement various missing commands (#7415) 2024-02-08 11:01:46 -08:00
06b26691ba soc: Pass accurate sockaddr length to socket functions. (#7426) 2024-02-08 11:01:38 -08:00
d41ce64f7b Add ipv6 socket support (#7418)
* Add IPV6 socket support

* Suggestions
2024-02-07 19:22:44 -08:00
1165a708d5 .tx/config: Use language mappings for android "tx pull" (#7422)
The language names we are using in the android resources differ from those on Transifex.

We need to manually specify mappings for them, so Transifex is able to place the files in the correct folders.
2024-02-07 05:41:29 -08:00
19784355f9 build: Improve support for Windows cross-compilation. (#7389)
* build: Improve support for Windows cross-compilation.

* build: Move linuxdeploy download to bundle target execution time.
2024-02-05 10:09:50 -08:00
aa6a29d7e1 AudioCore/HLE/source: Partially implement last_buffer_id (#7397)
* AudioCore/HLE/source: Partially implement last_buffer_id

shared_memory.h: fix typo

* tests\audio_core\hle\source.cpp: Add test cases to verify last_buffer_id
2024-02-05 09:54:13 -08:00
106364e01e video_core: Use source3 when GPU_PREVIOUS is used in first stage (#7411) 2024-02-05 09:53:54 -08:00
d5a1bd07f3 glsl_shader_gen: Increase z=0 epsillon (#7408) 2024-02-05 09:53:41 -08:00
8afa27718c dumpkeys: Add seeddb.bin to output files. (#7417) 2024-02-05 09:14:14 -08:00
216 changed files with 7311 additions and 8379 deletions

View File

@ -85,8 +85,6 @@ option(ENABLE_VULKAN "Enables the Vulkan renderer" ON)
option(USE_DISCORD_PRESENCE "Enables Discord Rich Presence" OFF)
CMAKE_DEPENDENT_OPTION(CITRA_ENABLE_BUNDLE_TARGET "Enable the distribution bundling target." ON "NOT ANDROID AND NOT IOS" OFF)
# Compile options
CMAKE_DEPENDENT_OPTION(COMPILE_WITH_DWARF "Add DWARF debugging information" ${IS_DEBUG_BUILD} "MINGW" OFF)
option(ENABLE_LTO "Enable link time optimization" ${DEFAULT_ENABLE_LTO})
@ -249,6 +247,26 @@ if (ENABLE_QT)
if (ENABLE_QT_TRANSLATION)
find_package(Qt6 REQUIRED COMPONENTS LinguistTools)
endif()
if (NOT DEFINED QT_TARGET_PATH)
# Determine the location of the compile target's Qt.
get_target_property(qtcore_path Qt6::Core LOCATION_Release)
string(FIND "${qtcore_path}" "/bin/" qtcore_path_bin_pos REVERSE)
string(FIND "${qtcore_path}" "/lib/" qtcore_path_lib_pos REVERSE)
if (qtcore_path_bin_pos GREATER qtcore_path_lib_pos)
string(SUBSTRING "${qtcore_path}" 0 ${qtcore_path_bin_pos} QT_TARGET_PATH)
else()
string(SUBSTRING "${qtcore_path}" 0 ${qtcore_path_lib_pos} QT_TARGET_PATH)
endif()
endif()
if (NOT DEFINED QT_HOST_PATH)
# Use the same for host Qt if none is defined.
set(QT_HOST_PATH "${QT_TARGET_PATH}")
endif()
message(STATUS "Using target Qt at ${QT_TARGET_PATH}")
message(STATUS "Using host Qt at ${QT_HOST_PATH}")
endif()
# Use system tsl::robin_map if available (otherwise we fallback to version bundled with dynarmic)
@ -424,7 +442,8 @@ else()
endif()
# Create target for outputting distributable bundles.
if (CITRA_ENABLE_BUNDLE_TARGET)
# Not supported for mobile platforms as distributables are built differently.
if (NOT ANDROID AND NOT IOS)
include(BundleTarget)
if (ENABLE_SDL2_FRONTEND)
bundle_target(citra)

View File

@ -2,37 +2,104 @@
if (BUNDLE_TARGET_EXECUTE)
# --- Bundling method logic ---
function(symlink_safe_copy from to)
if (WIN32)
# Use cmake copy for maximum compatibility.
execute_process(COMMAND ${CMAKE_COMMAND} -E copy "${from}" "${to}"
RESULT_VARIABLE cp_result)
else()
# Use native copy to turn symlinks into normal files.
execute_process(COMMAND cp -L "${from}" "${to}"
RESULT_VARIABLE cp_result)
endif()
if (NOT cp_result EQUAL "0")
message(FATAL_ERROR "cp \"${from}\" \"${to}\" failed: ${cp_result}")
endif()
endfunction()
function(bundle_qt executable_path)
if (WIN32)
# Perform standalone bundling first to copy over all used libraries, as windeployqt does not do this.
bundle_standalone("${executable_path}" "${EXECUTABLE_PATH}" "${BUNDLE_LIBRARY_PATHS}")
get_filename_component(executable_parent_dir "${executable_path}" DIRECTORY)
find_program(windeployqt_executable windeployqt6)
# Create a qt.conf file pointing to the app directory.
# This ensures Qt can find its plugins.
file(WRITE "${executable_parent_dir}/qt.conf" "[Paths]\nprefix = .")
file(WRITE "${executable_parent_dir}/qt.conf" "[Paths]\nPrefix = .")
find_program(windeployqt_executable windeployqt6 PATHS "${QT_HOST_PATH}/bin")
find_program(qtpaths_executable qtpaths6 PATHS "${QT_HOST_PATH}/bin")
# TODO: Hack around windeployqt's poor cross-compilation support by
# TODO: making a local copy with a prefix pointing to the target Qt.
if (NOT "${QT_HOST_PATH}" STREQUAL "${QT_TARGET_PATH}")
set(windeployqt_dir "${BINARY_PATH}/windeployqt_copy")
file(MAKE_DIRECTORY "${windeployqt_dir}")
symlink_safe_copy("${windeployqt_executable}" "${windeployqt_dir}/windeployqt.exe")
symlink_safe_copy("${qtpaths_executable}" "${windeployqt_dir}/qtpaths.exe")
symlink_safe_copy("${QT_HOST_PATH}/bin/Qt6Core.dll" "${windeployqt_dir}")
if (EXISTS "${QT_TARGET_PATH}/share")
# Unix-style Qt; we need to wire up the paths manually.
file(WRITE "${windeployqt_dir}/qt.conf" "\
[Paths]\n
Prefix = ${QT_TARGET_PATH}\n \
ArchData = ${QT_TARGET_PATH}/share/qt6\n \
Binaries = ${QT_TARGET_PATH}/bin\n \
Data = ${QT_TARGET_PATH}/share/qt6\n \
Documentation = ${QT_TARGET_PATH}/share/qt6/doc\n \
Headers = ${QT_TARGET_PATH}/include/qt6\n \
Libraries = ${QT_TARGET_PATH}/lib\n \
LibraryExecutables = ${QT_TARGET_PATH}/share/qt6/bin\n \
Plugins = ${QT_TARGET_PATH}/share/qt6/plugins\n \
QmlImports = ${QT_TARGET_PATH}/share/qt6/qml\n \
Translations = ${QT_TARGET_PATH}/share/qt6/translations\n \
")
else()
# Windows-style Qt; the defaults should suffice.
file(WRITE "${windeployqt_dir}/qt.conf" "[Paths]\nPrefix = ${QT_TARGET_PATH}")
endif()
set(windeployqt_executable "${windeployqt_dir}/windeployqt.exe")
set(qtpaths_executable "${windeployqt_dir}/qtpaths.exe")
endif()
message(STATUS "Executing windeployqt for executable ${executable_path}")
execute_process(COMMAND "${windeployqt_executable}" "${executable_path}"
--qtpaths "${qtpaths_executable}"
--no-compiler-runtime --no-system-d3d-compiler --no-opengl-sw --no-translations
--plugindir "${executable_parent_dir}/plugins")
--plugindir "${executable_parent_dir}/plugins"
RESULT_VARIABLE windeployqt_result)
if (NOT windeployqt_result EQUAL "0")
message(FATAL_ERROR "windeployqt failed: ${windeployqt_result}")
endif()
# Remove the FFmpeg multimedia plugin as we don't include FFmpeg.
# We want to use the Windows media plugin instead, which is also included.
file(REMOVE "${executable_parent_dir}/plugins/multimedia/ffmpegmediaplugin.dll")
elseif (APPLE)
get_filename_component(executable_name "${executable_path}" NAME_WE)
find_program(MACDEPLOYQT_EXECUTABLE macdeployqt6)
find_program(macdeployqt_executable macdeployqt6 PATHS "${QT_HOST_PATH}/bin")
message(STATUS "Executing macdeployqt for executable ${executable_path}")
message(STATUS "Executing macdeployqt at \"${macdeployqt_executable}\" for executable \"${executable_path}\"")
execute_process(
COMMAND "${MACDEPLOYQT_EXECUTABLE}"
COMMAND "${macdeployqt_executable}"
"${executable_path}"
"-executable=${executable_path}/Contents/MacOS/${executable_name}"
-always-overwrite)
-always-overwrite
RESULT_VARIABLE macdeployqt_result)
if (NOT macdeployqt_result EQUAL "0")
message(FATAL_ERROR "macdeployqt failed: ${macdeployqt_result}")
endif()
# Bundling libraries can rewrite path information and break code signatures of system libraries.
# Perform an ad-hoc re-signing on the whole app bundle to fix this.
execute_process(COMMAND codesign --deep -fs - "${executable_path}")
execute_process(COMMAND codesign --deep -fs - "${executable_path}"
RESULT_VARIABLE codesign_result)
if (NOT codesign_result EQUAL "0")
message(FATAL_ERROR "codesign failed: ${codesign_result}")
endif()
else()
message(FATAL_ERROR "Unsupported OS for Qt bundling.")
endif()
@ -44,9 +111,9 @@ if (BUNDLE_TARGET_EXECUTE)
if (enable_qt)
# Find qmake to make sure the plugin uses the right version of Qt.
find_program(QMAKE_EXECUTABLE qmake6)
find_program(qmake_executable qmake6 PATHS "${QT_HOST_PATH}/bin")
set(extra_linuxdeploy_env "QMAKE=${QMAKE_EXECUTABLE}")
set(extra_linuxdeploy_env "QMAKE=${qmake_executable}")
set(extra_linuxdeploy_args --plugin qt)
endif()
@ -59,7 +126,11 @@ if (BUNDLE_TARGET_EXECUTE)
--executable "${executable_path}"
--icon-file "${source_path}/dist/citra.svg"
--desktop-file "${source_path}/dist/${executable_name}.desktop"
--appdir "${appdir_path}")
--appdir "${appdir_path}"
RESULT_VARIABLE linuxdeploy_appdir_result)
if (NOT linuxdeploy_appdir_result EQUAL "0")
message(FATAL_ERROR "linuxdeploy failed to create AppDir: ${linuxdeploy_appdir_result}")
endif()
if (enable_qt)
set(qt_hook_file "${appdir_path}/apprun-hooks/linuxdeploy-plugin-qt-hook.sh")
@ -82,7 +153,11 @@ if (BUNDLE_TARGET_EXECUTE)
"OUTPUT=${bundle_dir}/${executable_name}.AppImage"
"${linuxdeploy_executable}"
--output appimage
--appdir "${appdir_path}")
--appdir "${appdir_path}"
RESULT_VARIABLE linuxdeploy_appimage_result)
if (NOT linuxdeploy_appimage_result EQUAL "0")
message(FATAL_ERROR "linuxdeploy failed to create AppImage: ${linuxdeploy_appimage_result}")
endif()
endfunction()
function(bundle_standalone executable_path original_executable_path bundle_library_paths)
@ -109,16 +184,23 @@ if (BUNDLE_TARGET_EXECUTE)
file(MAKE_DIRECTORY ${lib_dir})
foreach (lib_file IN LISTS resolved_deps)
message(STATUS "Bundling library ${lib_file}")
# Use native copy to turn symlinks into normal files.
execute_process(COMMAND cp -L "${lib_file}" "${lib_dir}")
symlink_safe_copy("${lib_file}" "${lib_dir}")
endforeach()
endif()
# Add libs directory to executable rpath where applicable.
if (APPLE)
execute_process(COMMAND install_name_tool -add_rpath "@loader_path/libs" "${executable_path}")
execute_process(COMMAND install_name_tool -add_rpath "@loader_path/libs" "${executable_path}"
RESULT_VARIABLE install_name_tool_result)
if (NOT install_name_tool_result EQUAL "0")
message(FATAL_ERROR "install_name_tool failed: ${install_name_tool_result}")
endif()
elseif (UNIX)
execute_process(COMMAND patchelf --set-rpath '$ORIGIN/../libs' "${executable_path}")
execute_process(COMMAND patchelf --set-rpath '$ORIGIN/../libs' "${executable_path}"
RESULT_VARIABLE patchelf_result)
if (NOT patchelf_result EQUAL "0")
message(FATAL_ERROR "patchelf failed: ${patchelf_result}")
endif()
endif()
endfunction()
@ -127,7 +209,7 @@ if (BUNDLE_TARGET_EXECUTE)
set(bundle_dir ${BINARY_PATH}/bundle)
# On Linux, always bundle an AppImage.
if (DEFINED LINUXDEPLOY)
if (CMAKE_HOST_SYSTEM_NAME STREQUAL "Linux")
if (IN_PLACE)
message(FATAL_ERROR "Cannot bundle for Linux in-place.")
endif()
@ -146,14 +228,12 @@ if (BUNDLE_TARGET_EXECUTE)
if (BUNDLE_QT)
bundle_qt("${bundled_executable_path}")
endif()
if (WIN32 OR NOT BUNDLE_QT)
else()
bundle_standalone("${bundled_executable_path}" "${EXECUTABLE_PATH}" "${BUNDLE_LIBRARY_PATHS}")
endif()
endif()
else()
# --- Bundling target creation logic ---
elseif (BUNDLE_TARGET_DOWNLOAD_LINUXDEPLOY)
# --- linuxdeploy download logic ---
# Downloads and extracts a linuxdeploy component.
function(download_linuxdeploy_component base_dir name executable_name)
@ -161,7 +241,7 @@ else()
if (NOT EXISTS "${executable_file}")
message(STATUS "Downloading ${executable_name}")
file(DOWNLOAD
"https://github.com/linuxdeploy/${name}/releases/download/continuous/${executable_name}"
"https://github.com/${name}/releases/download/continuous/${executable_name}"
"${executable_file}" SHOW_PROGRESS)
file(CHMOD "${executable_file}" PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE)
@ -170,7 +250,11 @@ else()
message(STATUS "Extracting ${executable_name}")
execute_process(
COMMAND "${executable_file}" --appimage-extract
WORKING_DIRECTORY "${base_dir}")
WORKING_DIRECTORY "${base_dir}"
RESULT_VARIABLE extract_result)
if (NOT extract_result EQUAL "0")
message(FATAL_ERROR "AppImage extract failed: ${extract_result}")
endif()
else()
message(STATUS "Copying ${executable_name}")
file(COPY "${executable_file}" DESTINATION "${base_dir}/squashfs-root/usr/bin/")
@ -178,89 +262,102 @@ else()
endif()
endfunction()
# Download plugins first so they don't overwrite linuxdeploy's AppRun file.
download_linuxdeploy_component("${LINUXDEPLOY_PATH}" "linuxdeploy/linuxdeploy-plugin-qt" "linuxdeploy-plugin-qt-${LINUXDEPLOY_ARCH}.AppImage")
download_linuxdeploy_component("${LINUXDEPLOY_PATH}" "darealshinji/linuxdeploy-plugin-checkrt" "linuxdeploy-plugin-checkrt.sh")
download_linuxdeploy_component("${LINUXDEPLOY_PATH}" "linuxdeploy/linuxdeploy" "linuxdeploy-${LINUXDEPLOY_ARCH}.AppImage")
else()
# --- Bundling target creation logic ---
# Creates the base bundle target with common files and pre-bundle steps.
function(create_base_bundle_target)
message(STATUS "Creating base bundle target")
add_custom_target(bundle)
add_custom_command(
TARGET bundle
COMMAND ${CMAKE_COMMAND} -E make_directory "${CMAKE_BINARY_DIR}/bundle/")
add_custom_command(
TARGET bundle
COMMAND ${CMAKE_COMMAND} -E make_directory "${CMAKE_BINARY_DIR}/bundle/dist/")
add_custom_command(
TARGET bundle
COMMAND ${CMAKE_COMMAND} -E copy "${CMAKE_SOURCE_DIR}/dist/icon.png" "${CMAKE_BINARY_DIR}/bundle/dist/citra.png")
add_custom_command(
TARGET bundle
COMMAND ${CMAKE_COMMAND} -E copy "${CMAKE_SOURCE_DIR}/license.txt" "${CMAKE_BINARY_DIR}/bundle/")
add_custom_command(
TARGET bundle
COMMAND ${CMAKE_COMMAND} -E copy "${CMAKE_SOURCE_DIR}/README.md" "${CMAKE_BINARY_DIR}/bundle/")
add_custom_command(
TARGET bundle
COMMAND ${CMAKE_COMMAND} -E copy_directory "${CMAKE_SOURCE_DIR}/dist/scripting" "${CMAKE_BINARY_DIR}/bundle/scripting")
# On Linux, add a command to prepare linuxdeploy and any required plugins before any bundling occurs.
if (CMAKE_HOST_SYSTEM_NAME STREQUAL "Linux")
add_custom_command(
TARGET bundle
COMMAND ${CMAKE_COMMAND}
"-DBUNDLE_TARGET_DOWNLOAD_LINUXDEPLOY=1"
"-DLINUXDEPLOY_PATH=${CMAKE_BINARY_DIR}/externals/linuxdeploy"
"-DLINUXDEPLOY_ARCH=${CMAKE_HOST_SYSTEM_PROCESSOR}"
-P "${CMAKE_SOURCE_DIR}/CMakeModules/BundleTarget.cmake"
WORKING_DIRECTORY "${CMAKE_BINARY_DIR}")
endif()
endfunction()
# Adds a target to the bundle target, packing in required libraries.
# If in_place is true, the bundling will be done in-place as part of the specified target.
function(bundle_target_internal target_name in_place)
# Create base bundle target if it does not exist.
if (NOT in_place AND NOT TARGET bundle)
message(STATUS "Creating base bundle target")
add_custom_target(bundle)
add_custom_command(
TARGET bundle
COMMAND ${CMAKE_COMMAND} -E make_directory "${CMAKE_BINARY_DIR}/bundle/")
add_custom_command(
TARGET bundle
COMMAND ${CMAKE_COMMAND} -E make_directory "${CMAKE_BINARY_DIR}/bundle/dist/")
add_custom_command(
TARGET bundle
COMMAND ${CMAKE_COMMAND} -E copy "${CMAKE_SOURCE_DIR}/dist/icon.png" "${CMAKE_BINARY_DIR}/bundle/dist/citra.png")
add_custom_command(
TARGET bundle
COMMAND ${CMAKE_COMMAND} -E copy "${CMAKE_SOURCE_DIR}/license.txt" "${CMAKE_BINARY_DIR}/bundle/")
add_custom_command(
TARGET bundle
COMMAND ${CMAKE_COMMAND} -E copy "${CMAKE_SOURCE_DIR}/README.md" "${CMAKE_BINARY_DIR}/bundle/")
add_custom_command(
TARGET bundle
COMMAND ${CMAKE_COMMAND} -E copy_directory "${CMAKE_SOURCE_DIR}/dist/scripting" "${CMAKE_BINARY_DIR}/bundle/scripting")
create_base_bundle_target()
endif()
set(BUNDLE_EXECUTABLE_PATH "$<TARGET_FILE:${target_name}>")
set(bundle_executable_path "$<TARGET_FILE:${target_name}>")
if (target_name MATCHES ".*qt")
set(BUNDLE_QT ON)
set(bundle_qt ON)
if (APPLE)
# For Qt targets on Apple, expect an app bundle.
set(BUNDLE_EXECUTABLE_PATH "$<TARGET_BUNDLE_DIR:${target_name}>")
set(bundle_executable_path "$<TARGET_BUNDLE_DIR:${target_name}>")
endif()
else()
set(BUNDLE_QT OFF)
set(bundle_qt OFF)
endif()
# Build a list of library search paths from prefix paths.
foreach(prefix_path IN LISTS CMAKE_PREFIX_PATH CMAKE_SYSTEM_PREFIX_PATH)
foreach(prefix_path IN LISTS CMAKE_FIND_ROOT_PATH CMAKE_PREFIX_PATH CMAKE_SYSTEM_PREFIX_PATH)
if (WIN32)
list(APPEND BUNDLE_LIBRARY_PATHS "${prefix_path}/bin")
list(APPEND bundle_library_paths "${prefix_path}/bin")
endif()
list(APPEND BUNDLE_LIBRARY_PATHS "${prefix_path}/lib")
list(APPEND bundle_library_paths "${prefix_path}/lib")
endforeach()
foreach(library_path IN LISTS CMAKE_SYSTEM_LIBRARY_PATH)
list(APPEND BUNDLE_LIBRARY_PATHS "${library_path}")
list(APPEND bundle_library_paths "${library_path}")
endforeach()
# On Linux, prepare linuxdeploy and any required plugins.
if (CMAKE_SYSTEM_NAME STREQUAL "Linux")
set(LINUXDEPLOY_BASE "${CMAKE_BINARY_DIR}/externals/linuxdeploy")
# Download plugins first so they don't overwrite linuxdeploy's AppRun file.
download_linuxdeploy_component("${LINUXDEPLOY_BASE}" "linuxdeploy-plugin-qt" "linuxdeploy-plugin-qt-x86_64.AppImage")
download_linuxdeploy_component("${LINUXDEPLOY_BASE}" "linuxdeploy-plugin-checkrt" "linuxdeploy-plugin-checkrt-x86_64.sh")
download_linuxdeploy_component("${LINUXDEPLOY_BASE}" "linuxdeploy" "linuxdeploy-x86_64.AppImage")
set(EXTRA_BUNDLE_ARGS "-DLINUXDEPLOY=${LINUXDEPLOY_BASE}/squashfs-root/AppRun")
endif()
if (in_place)
message(STATUS "Adding in-place bundling to ${target_name}")
set(DEST_TARGET ${target_name})
set(dest_target ${target_name})
else()
message(STATUS "Adding ${target_name} to bundle target")
set(DEST_TARGET bundle)
set(dest_target bundle)
add_dependencies(bundle ${target_name})
endif()
add_custom_command(TARGET ${DEST_TARGET} POST_BUILD
add_custom_command(TARGET ${dest_target} POST_BUILD
COMMAND ${CMAKE_COMMAND}
"-DCMAKE_PREFIX_PATH=\"${CMAKE_PREFIX_PATH}\""
"-DQT_HOST_PATH=\"${QT_HOST_PATH}\""
"-DQT_TARGET_PATH=\"${QT_TARGET_PATH}\""
"-DBUNDLE_TARGET_EXECUTE=1"
"-DTARGET=${target_name}"
"-DSOURCE_PATH=${CMAKE_SOURCE_DIR}"
"-DBINARY_PATH=${CMAKE_BINARY_DIR}"
"-DEXECUTABLE_PATH=${BUNDLE_EXECUTABLE_PATH}"
"-DBUNDLE_LIBRARY_PATHS=\"${BUNDLE_LIBRARY_PATHS}\""
"-DBUNDLE_QT=${BUNDLE_QT}"
"-DEXECUTABLE_PATH=${bundle_executable_path}"
"-DBUNDLE_LIBRARY_PATHS=\"${bundle_library_paths}\""
"-DBUNDLE_QT=${bundle_qt}"
"-DIN_PLACE=${in_place}"
${EXTRA_BUNDLE_ARGS}
"-DLINUXDEPLOY=${CMAKE_BINARY_DIR}/externals/linuxdeploy/squashfs-root/AppRun"
-P "${CMAKE_SOURCE_DIR}/CMakeModules/BundleTarget.cmake"
WORKING_DIRECTORY "${CMAKE_BINARY_DIR}")
endfunction()

View File

@ -1,21 +1,20 @@
set(CURRENT_MODULE_DIR ${CMAKE_CURRENT_LIST_DIR})
# This function downloads Qt using aqt. The path of the downloaded content will be added to the CMAKE_PREFIX_PATH.
# Params:
# target: Qt dependency to install. Specify a version number to download Qt, or "tools_(name)" for a specific build tool.
function(download_qt target)
# Determines parameters based on the host and target for downloading the right Qt binaries.
function(determine_qt_parameters target host_out type_out arch_out arch_path_out host_type_out host_arch_out host_arch_path_out)
if (target MATCHES "tools_.*")
set(DOWNLOAD_QT_TOOL ON)
set(tool ON)
else()
set(DOWNLOAD_QT_TOOL OFF)
set(tool OFF)
endif()
# Determine installation parameters for OS, architecture, and compiler
if (WIN32)
set(host "windows")
set(type "desktop")
if (NOT DOWNLOAD_QT_TOOL)
if (NOT tool)
if (MINGW)
set(arch "win64_mingw")
set(arch_path "mingw_64")
@ -28,21 +27,35 @@ function(download_qt target)
message(FATAL_ERROR "Unsupported bundled Qt architecture. Enable USE_SYSTEM_QT and provide your own.")
endif()
set(arch "win64_${arch_path}")
# In case we're cross-compiling, prepare to also fetch the correct host Qt tools.
if (CMAKE_HOST_SYSTEM_PROCESSOR STREQUAL "AMD64")
set(host_arch_path "msvc2019_64")
elseif (CMAKE_HOST_SYSTEM_PROCESSOR STREQUAL "ARM64")
# TODO: msvc2019_arm64 doesn't include some of the required tools for some reason,
# TODO: so until it does, just use msvc2019_64 under x86_64 emulation.
# set(host_arch_path "msvc2019_arm64")
set(host_arch_path "msvc2019_64")
endif()
set(host_arch "win64_${host_arch_path}")
else()
message(FATAL_ERROR "Unsupported bundled Qt toolchain. Enable USE_SYSTEM_QT and provide your own.")
endif()
endif()
elseif (APPLE)
set(host "mac")
if (IOS AND NOT DOWNLOAD_QT_TOOL)
set(type "desktop")
set(arch "clang_64")
set(arch_path "macos")
if (IOS AND NOT tool)
set(host_type "${type}")
set(host_arch "${arch}")
set(host_arch_path "${arch_path}")
set(type "ios")
set(arch "ios")
set(arch_path "ios")
set(host_arch_path "macos")
else()
set(type "desktop")
set(arch "clang_64")
set(arch_path "macos")
endif()
else()
set(host "linux")
@ -51,38 +64,64 @@ function(download_qt target)
set(arch_path "linux")
endif()
get_external_prefix(qt base_path)
file(MAKE_DIRECTORY "${base_path}")
set(${host_out} "${host}" PARENT_SCOPE)
set(${type_out} "${type}" PARENT_SCOPE)
set(${arch_out} "${arch}" PARENT_SCOPE)
set(${arch_path_out} "${arch_path}" PARENT_SCOPE)
if (DEFINED host_type)
set(${host_type_out} "${host_type}" PARENT_SCOPE)
else()
set(${host_type_out} "${type}" PARENT_SCOPE)
endif()
if (DEFINED host_arch)
set(${host_arch_out} "${host_arch}" PARENT_SCOPE)
else()
set(${host_arch_out} "${arch}" PARENT_SCOPE)
endif()
if (DEFINED host_arch_path)
set(${host_arch_path_out} "${host_arch_path}" PARENT_SCOPE)
else()
set(${host_arch_path_out} "${arch_path}" PARENT_SCOPE)
endif()
endfunction()
# Download Qt binaries for a specifc configuration.
function(download_qt_configuration prefix_out target host type arch arch_path base_path)
if (target MATCHES "tools_.*")
set(tool ON)
else()
set(tool OFF)
endif()
set(install_args -c "${CURRENT_MODULE_DIR}/aqt_config.ini")
if (DOWNLOAD_QT_TOOL)
if (tool)
set(prefix "${base_path}/Tools")
set(install_args ${install_args} install-tool --outputdir ${base_path} ${host} desktop ${target})
else()
set(prefix "${base_path}/${target}/${arch_path}")
if (host_arch_path)
set(host_flag "--autodesktop")
set(host_prefix "${base_path}/${target}/${host_arch_path}")
endif()
set(install_args ${install_args} install-qt --outputdir ${base_path} ${host} ${type} ${target} ${arch} ${host_flag}
-m qtmultimedia --archives qttranslations qttools qtsvg qtbase)
set(install_args ${install_args} install-qt --outputdir ${base_path} ${host} ${type} ${target} ${arch}
-m qtmultimedia --archives qttranslations qttools qtsvg qtbase)
endif()
if (NOT EXISTS "${prefix}")
message(STATUS "Downloading binaries for Qt...")
message(STATUS "Downloading Qt binaries for ${target}:${host}:${type}:${arch}:${arch_path}")
set(AQT_PREBUILD_BASE_URL "https://github.com/miurahr/aqtinstall/releases/download/v3.1.9")
if (WIN32)
set(aqt_path "${base_path}/aqt.exe")
file(DOWNLOAD
${AQT_PREBUILD_BASE_URL}/aqt.exe
${aqt_path} SHOW_PROGRESS)
if (NOT EXISTS "${aqt_path}")
file(DOWNLOAD
${AQT_PREBUILD_BASE_URL}/aqt.exe
${aqt_path} SHOW_PROGRESS)
endif()
execute_process(COMMAND ${aqt_path} ${install_args}
WORKING_DIRECTORY ${base_path})
elseif (APPLE)
set(aqt_path "${base_path}/aqt-macos")
file(DOWNLOAD
${AQT_PREBUILD_BASE_URL}/aqt-macos
${aqt_path} SHOW_PROGRESS)
if (NOT EXISTS "${aqt_path}")
file(DOWNLOAD
${AQT_PREBUILD_BASE_URL}/aqt-macos
${aqt_path} SHOW_PROGRESS)
endif()
execute_process(COMMAND chmod +x ${aqt_path})
execute_process(COMMAND ${aqt_path} ${install_args}
WORKING_DIRECTORY ${base_path})
@ -96,18 +135,38 @@ function(download_qt target)
execute_process(COMMAND ${CMAKE_COMMAND} -E env PYTHONPATH=${aqt_install_path} python3 -m aqt ${install_args}
WORKING_DIRECTORY ${base_path})
endif()
message(STATUS "Downloaded Qt binaries for ${target}:${host}:${type}:${arch}:${arch_path} to ${prefix}")
endif()
message(STATUS "Using downloaded Qt binaries at ${prefix}")
set(${prefix_out} "${prefix}" PARENT_SCOPE)
endfunction()
# Add the Qt prefix path so CMake can locate it.
# This function downloads Qt using aqt.
# The path of the downloaded content will be added to the CMAKE_PREFIX_PATH.
# QT_TARGET_PATH is set to the Qt for the compile target platform.
# QT_HOST_PATH is set to a host-compatible Qt, for running tools.
# Params:
# target: Qt dependency to install. Specify a version number to download Qt, or "tools_(name)" for a specific build tool.
function(download_qt target)
determine_qt_parameters("${target}" host type arch arch_path host_type host_arch host_arch_path)
get_external_prefix(qt base_path)
file(MAKE_DIRECTORY "${base_path}")
download_qt_configuration(prefix "${target}" "${host}" "${type}" "${arch}" "${arch_path}" "${base_path}")
if (DEFINED host_arch_path AND NOT "${host_arch_path}" STREQUAL "${arch_path}")
download_qt_configuration(host_prefix "${target}" "${host}" "${host_type}" "${host_arch}" "${host_arch_path}" "${base_path}")
else()
set(host_prefix "${prefix}")
endif()
set(QT_TARGET_PATH "${prefix}" CACHE STRING "")
set(QT_HOST_PATH "${host_prefix}" CACHE STRING "")
# Add the target Qt prefix path so CMake can locate it.
list(APPEND CMAKE_PREFIX_PATH "${prefix}")
set(CMAKE_PREFIX_PATH ${CMAKE_PREFIX_PATH} PARENT_SCOPE)
if (DEFINED host_prefix)
message(STATUS "Using downloaded host Qt binaries at ${host_prefix}")
set(QT_HOST_PATH "${host_prefix}" CACHE STRING "")
endif()
endfunction()
function(download_moltenvk)

View File

@ -287,5 +287,13 @@ dumptxt -p $[OUT] "nfcSecret1Seed=$[NFC_SEED_1]"
dumptxt -p $[OUT] "nfcSecret1HmacKey=$[NFC_HMAC_KEY_1]"
dumptxt -p $[OUT] "nfcIv=$[NFC_IV]"
# Dump seeddb.bin as well
set SEEDDB_IN "0:/gm9/out/seeddb.bin"
set SEEDDB_OUT "0:/gm9/seeddb.bin"
sdump -w seeddb.bin
cp -w $[SEEDDB_IN] $[SEEDDB_OUT]
@Exit

View File

@ -6,5 +6,5 @@ Usage:
1. Copy "DumpKeys.gm9" into the "gm9/scripts/" directory on your SD card.
2. Launch GodMode9, press the HOME button, select Scripts, and select "DumpKeys" from the list of scripts that appears.
3. Wait for the script to complete and return you to the GodMode9 main menu.
4. Power off your system and copy the "gm9/aes_keys.txt" file off of your SD card into "(Citra directory)/sysdata/".
4. Power off your system and copy the "gm9/aes_keys.txt" and "gm9/seeddb.bin" files off of your SD card into "(Citra directory)/sysdata/".

View File

@ -11,3 +11,4 @@ type = QT
file_filter = ../../src/android/app/src/main/res/values-<lang>/strings.xml
source_file = ../../src/android/app/src/main/res/values/strings.xml
type = ANDROID
lang_map = es_ES:es, hu_HU:hu, ru_RU:ru, pt_BR:pt, zh_CN:zh

View File

@ -57,6 +57,12 @@ if(USE_SYSTEM_CRYPTOPP)
add_library(cryptopp INTERFACE)
target_link_libraries(cryptopp INTERFACE cryptopp::cryptopp)
else()
if (WIN32 AND NOT MSVC AND "arm64" IN_LIST ARCHITECTURE)
# TODO: CryptoPP ARM64 ASM does not seem to support Windows unless compiled with MSVC.
# TODO: See https://github.com/weidai11/cryptopp/issues/1260
set(CRYPTOPP_DISABLE_ASM ON CACHE BOOL "")
endif()
set(CRYPTOPP_BUILD_DOCUMENTATION OFF CACHE BOOL "")
set(CRYPTOPP_BUILD_TESTING OFF CACHE BOOL "")
set(CRYPTOPP_INSTALL OFF CACHE BOOL "")
@ -235,6 +241,18 @@ endif()
# DiscordRPC
if (USE_DISCORD_PRESENCE)
# rapidjson used by discord-rpc is old and doesn't correctly detect endianness for some platforms.
include(TestBigEndian)
test_big_endian(RAPIDJSON_BIG_ENDIAN)
if(RAPIDJSON_BIG_ENDIAN)
add_compile_definitions(RAPIDJSON_ENDIAN=1)
else()
add_compile_definitions(RAPIDJSON_ENDIAN=0)
endif()
# Apply a dummy CLANG_FORMAT_SUFFIX to disable discord-rpc's unnecessary automatic clang-format.
set(CLANG_FORMAT_SUFFIX "dummy")
add_subdirectory(discord-rpc EXCLUDE_FROM_ALL)
target_include_directories(discord-rpc INTERFACE ./discord-rpc/include)
endif()

View File

@ -316,7 +316,7 @@ struct SourceStatus {
u16_le sync_count; ///< Is set by the DSP to the value of SourceConfiguration::sync_count
u32_dsp buffer_position; ///< Number of samples into the current buffer
u16_le current_buffer_id; ///< Updated when a buffer finishes playing
INSERT_PADDING_DSPWORDS(1);
u16_le last_buffer_id; ///< Updated when all buffers in the queue finish playing
};
Status status[num_sources];

View File

@ -324,6 +324,7 @@ void Source::GenerateFrame() {
if (state.current_buffer.empty() && !DequeueBuffer()) {
state.enabled = false;
state.buffer_update = true;
state.last_buffer_id = state.current_buffer_id;
state.current_buffer_id = 0;
return;
}
@ -411,6 +412,7 @@ bool Source::DequeueBuffer() {
state.next_sample_number = state.current_sample_number;
state.current_buffer_physical_address = buf.physical_address;
state.current_buffer_id = buf.buffer_id;
state.last_buffer_id = 0;
state.buffer_update = buf.from_queue && !buf.has_played;
if (buf.is_looping) {
@ -432,9 +434,10 @@ SourceStatus::Status Source::GetCurrentStatus() {
ret.is_enabled = state.enabled;
ret.current_buffer_id_dirty = state.buffer_update ? 1 : 0;
state.buffer_update = false;
ret.current_buffer_id = state.current_buffer_id;
ret.buffer_position = state.current_sample_number;
ret.sync_count = state.sync_count;
ret.buffer_position = state.current_sample_number;
ret.current_buffer_id = state.current_buffer_id;
ret.last_buffer_id = state.last_buffer_id;
return ret;
}

View File

@ -143,7 +143,8 @@ private:
// buffer_id state
bool buffer_update = false;
u32 current_buffer_id = 0;
u16 last_buffer_id = 0;
u16 current_buffer_id = 0;
// Decoding state

View File

@ -6,13 +6,13 @@
#include "citra_qt/debugger/wait_tree.h"
#include "citra_qt/uisettings.h"
#include "common/assert.h"
#include "core/hle/kernel/k_event.h"
#include "core/hle/kernel/k_mutex.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_semaphore.h"
#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/k_timer.h"
#include "core/hle/kernel/event.h"
#include "core/hle/kernel/mutex.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/semaphore.h"
#include "core/hle/kernel/thread.h"
#include "core/hle/kernel/timer.h"
#include "core/hle/kernel/wait_object.h"
namespace {
@ -98,7 +98,7 @@ QString WaitTreeText::GetText() const {
return text;
}
WaitTreeWaitObject::WaitTreeWaitObject(const Kernel::KSynchronizationObject& o) : object(o) {}
WaitTreeWaitObject::WaitTreeWaitObject(const Kernel::WaitObject& o) : object(o) {}
bool WaitTreeExpandableItem::IsExpandable() const {
return true;
@ -106,24 +106,23 @@ bool WaitTreeExpandableItem::IsExpandable() const {
QString WaitTreeWaitObject::GetText() const {
return tr("[%1]%2 %3")
.arg(/*object.GetObjectId()*/ 0)
.arg(object.GetObjectId())
.arg(QString::fromStdString(object.GetTypeName()),
QString::fromStdString(/*object.GetName()*/ "name"));
QString::fromStdString(object.GetName()));
}
std::unique_ptr<WaitTreeWaitObject> WaitTreeWaitObject::make(
const Kernel::KSynchronizationObject& object) {
switch (object.GetTypeObj().GetClassToken()) {
case Kernel::ClassTokenType::KEvent:
return std::make_unique<WaitTreeEvent>(static_cast<const Kernel::KEvent&>(object));
case Kernel::ClassTokenType::KMutex:
return std::make_unique<WaitTreeMutex>(static_cast<const Kernel::KMutex&>(object));
case Kernel::ClassTokenType::KSemaphore:
return std::make_unique<WaitTreeSemaphore>(static_cast<const Kernel::KSemaphore&>(object));
case Kernel::ClassTokenType::KTimer:
return std::make_unique<WaitTreeTimer>(static_cast<const Kernel::KTimer&>(object));
case Kernel::ClassTokenType::KThread:
return std::make_unique<WaitTreeThread>(static_cast<const Kernel::KThread&>(object));
std::unique_ptr<WaitTreeWaitObject> WaitTreeWaitObject::make(const Kernel::WaitObject& object) {
switch (object.GetHandleType()) {
case Kernel::HandleType::Event:
return std::make_unique<WaitTreeEvent>(static_cast<const Kernel::Event&>(object));
case Kernel::HandleType::Mutex:
return std::make_unique<WaitTreeMutex>(static_cast<const Kernel::Mutex&>(object));
case Kernel::HandleType::Semaphore:
return std::make_unique<WaitTreeSemaphore>(static_cast<const Kernel::Semaphore&>(object));
case Kernel::HandleType::Timer:
return std::make_unique<WaitTreeTimer>(static_cast<const Kernel::Timer&>(object));
case Kernel::HandleType::Thread:
return std::make_unique<WaitTreeThread>(static_cast<const Kernel::Thread&>(object));
default:
return std::make_unique<WaitTreeWaitObject>(object);
}
@ -154,7 +153,7 @@ QString WaitTreeWaitObject::GetResetTypeQString(Kernel::ResetType reset_type) {
return {};
}
WaitTreeObjectList::WaitTreeObjectList(const std::vector<Kernel::KSynchronizationObject*>& list,
WaitTreeObjectList::WaitTreeObjectList(const std::vector<std::shared_ptr<Kernel::WaitObject>>& list,
bool w_all)
: object_list(list), wait_all(w_all) {}
@ -171,12 +170,12 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeObjectList::GetChildren() con
return list;
}
WaitTreeThread::WaitTreeThread(const Kernel::KThread& thread) : WaitTreeWaitObject(thread) {}
WaitTreeThread::WaitTreeThread(const Kernel::Thread& thread) : WaitTreeWaitObject(thread) {}
QString WaitTreeThread::GetText() const {
const auto& thread = static_cast<const Kernel::KThread&>(object);
const auto& thread = static_cast<const Kernel::Thread&>(object);
QString status;
switch (thread.GetStatus()) {
switch (thread.status) {
case Kernel::ThreadStatus::Running:
status = tr("running");
break;
@ -184,7 +183,7 @@ QString WaitTreeThread::GetText() const {
status = tr("ready");
break;
case Kernel::ThreadStatus::WaitArb:
status = tr("waiting for address 0x%1").arg(thread.m_wait_address, 8, 16, QLatin1Char('0'));
status = tr("waiting for address 0x%1").arg(thread.wait_address, 8, 16, QLatin1Char('0'));
break;
case Kernel::ThreadStatus::WaitSleep:
status = tr("sleeping");
@ -206,18 +205,17 @@ QString WaitTreeThread::GetText() const {
status = tr("dead");
break;
}
const auto& context = thread.GetContext();
QString pc_info = tr(" PC = 0x%1 LR = 0x%2")
.arg(context.GetProgramCounter(), 8, 16, QLatin1Char('0'))
.arg(context.GetLinkRegister(), 8, 16, QLatin1Char('0'));
.arg(thread.context.GetProgramCounter(), 8, 16, QLatin1Char('0'))
.arg(thread.context.GetLinkRegister(), 8, 16, QLatin1Char('0'));
return QStringLiteral("%1%2 (%3) ").arg(WaitTreeWaitObject::GetText(), pc_info, status);
}
QColor WaitTreeThread::GetColor() const {
const std::size_t color_index = IsDarkTheme() ? 1 : 0;
const auto& thread = static_cast<const Kernel::KThread&>(object);
switch (thread.GetStatus()) {
const auto& thread = static_cast<const Kernel::Thread&>(object);
switch (thread.status) {
case Kernel::ThreadStatus::Running:
return QColor(WaitTreeColors[0][color_index]);
case Kernel::ThreadStatus::Ready:
@ -244,10 +242,10 @@ QColor WaitTreeThread::GetColor() const {
std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeThread::GetChildren() const {
std::vector<std::unique_ptr<WaitTreeItem>> list(WaitTreeWaitObject::GetChildren());
const auto& thread = static_cast<const Kernel::KThread&>(object);
const auto& thread = static_cast<const Kernel::Thread&>(object);
QString processor;
switch (thread.m_processor_id) {
switch (thread.processor_id) {
case Kernel::ThreadProcessorId::ThreadProcessorIdDefault:
processor = tr("default");
break;
@ -261,88 +259,86 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeThread::GetChildren() const {
processor = tr("SysCore");
break;
default:
processor = tr("Unknown processor %1").arg(thread.m_processor_id);
processor = tr("Unknown processor %1").arg(thread.processor_id);
break;
}
list.push_back(
std::make_unique<WaitTreeText>(tr("object id = %1").arg(/*thread.GetObjectId()*/ 1)));
list.push_back(std::make_unique<WaitTreeText>(tr("object id = %1").arg(thread.GetObjectId())));
list.push_back(std::make_unique<WaitTreeText>(tr("processor = %1").arg(processor)));
list.push_back(std::make_unique<WaitTreeText>(tr("thread id = %1").arg(thread.GetThreadId())));
if (auto process = thread.GetOwner()) {
list.push_back(std::make_unique<WaitTreeText>(
tr("process = %1 (%2)")
.arg(QString::fromStdString(/*process->GetName()*/ ""))
.arg(process->process_id)));
if (auto process = thread.owner_process.lock()) {
list.push_back(
std::make_unique<WaitTreeText>(tr("process = %1 (%2)")
.arg(QString::fromStdString(process->GetName()))
.arg(process->process_id)));
}
list.push_back(std::make_unique<WaitTreeText>(tr("priority = %1(current) / %2(normal)")
.arg(thread.GetCurrentPriority())
.arg(thread.m_nominal_priority)));
.arg(thread.current_priority)
.arg(thread.nominal_priority)));
list.push_back(std::make_unique<WaitTreeText>(
tr("last running ticks = %1").arg(thread.m_last_running_ticks)));
tr("last running ticks = %1").arg(thread.last_running_ticks)));
if (thread.m_held_mutexes.empty()) {
if (thread.held_mutexes.empty()) {
list.push_back(std::make_unique<WaitTreeText>(tr("not holding mutex")));
} else {
list.push_back(std::make_unique<WaitTreeMutexList>(thread.m_held_mutexes));
list.push_back(std::make_unique<WaitTreeMutexList>(thread.held_mutexes));
}
if (thread.GetStatus() == Kernel::ThreadStatus::WaitSynchAny ||
thread.GetStatus() == Kernel::ThreadStatus::WaitSynchAll ||
thread.GetStatus() == Kernel::ThreadStatus::WaitHleEvent) {
list.push_back(std::make_unique<WaitTreeObjectList>(thread.m_wait_objects,
if (thread.status == Kernel::ThreadStatus::WaitSynchAny ||
thread.status == Kernel::ThreadStatus::WaitSynchAll ||
thread.status == Kernel::ThreadStatus::WaitHleEvent) {
list.push_back(std::make_unique<WaitTreeObjectList>(thread.wait_objects,
thread.IsSleepingOnWaitAll()));
}
return list;
}
WaitTreeEvent::WaitTreeEvent(const Kernel::KEvent& object) : WaitTreeWaitObject(object) {}
WaitTreeEvent::WaitTreeEvent(const Kernel::Event& object) : WaitTreeWaitObject(object) {}
std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeEvent::GetChildren() const {
std::vector<std::unique_ptr<WaitTreeItem>> list(WaitTreeWaitObject::GetChildren());
list.push_back(std::make_unique<WaitTreeText>(
tr("reset type = %1")
.arg(GetResetTypeQString(static_cast<const Kernel::KEvent&>(object).GetResetType()))));
.arg(GetResetTypeQString(static_cast<const Kernel::Event&>(object).GetResetType()))));
return list;
}
WaitTreeMutex::WaitTreeMutex(const Kernel::KMutex& object) : WaitTreeWaitObject(object) {}
WaitTreeMutex::WaitTreeMutex(const Kernel::Mutex& object) : WaitTreeWaitObject(object) {}
std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeMutex::GetChildren() const {
std::vector<std::unique_ptr<WaitTreeItem>> list(WaitTreeWaitObject::GetChildren());
const auto& mutex = static_cast<const Kernel::KMutex&>(object);
if (mutex.m_lock_count) {
list.push_back(std::make_unique<WaitTreeText>(
tr("locked %1 times by thread:").arg(mutex.m_lock_count)));
list.push_back(std::make_unique<WaitTreeThread>(*mutex.m_holding_thread));
const auto& mutex = static_cast<const Kernel::Mutex&>(object);
if (mutex.lock_count) {
list.push_back(
std::make_unique<WaitTreeText>(tr("locked %1 times by thread:").arg(mutex.lock_count)));
list.push_back(std::make_unique<WaitTreeThread>(*mutex.holding_thread));
} else {
list.push_back(std::make_unique<WaitTreeText>(tr("free")));
}
return list;
}
WaitTreeSemaphore::WaitTreeSemaphore(const Kernel::KSemaphore& object)
WaitTreeSemaphore::WaitTreeSemaphore(const Kernel::Semaphore& object)
: WaitTreeWaitObject(object) {}
std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeSemaphore::GetChildren() const {
std::vector<std::unique_ptr<WaitTreeItem>> list(WaitTreeWaitObject::GetChildren());
const auto& semaphore = static_cast<const Kernel::KSemaphore&>(object);
list.push_back(std::make_unique<WaitTreeText>(
tr("available count = %1").arg(semaphore.GetAvailableCount())));
const auto& semaphore = static_cast<const Kernel::Semaphore&>(object);
list.push_back(
std::make_unique<WaitTreeText>(tr("max count = %1").arg(semaphore.GetMaxCount())));
std::make_unique<WaitTreeText>(tr("available count = %1").arg(semaphore.available_count)));
list.push_back(std::make_unique<WaitTreeText>(tr("max count = %1").arg(semaphore.max_count)));
return list;
}
WaitTreeTimer::WaitTreeTimer(const Kernel::KTimer& object) : WaitTreeWaitObject(object) {}
WaitTreeTimer::WaitTreeTimer(const Kernel::Timer& object) : WaitTreeWaitObject(object) {}
std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeTimer::GetChildren() const {
std::vector<std::unique_ptr<WaitTreeItem>> list(WaitTreeWaitObject::GetChildren());
const auto& timer = static_cast<const Kernel::KTimer&>(object);
const auto& timer = static_cast<const Kernel::Timer&>(object);
list.push_back(std::make_unique<WaitTreeText>(
tr("reset type = %1").arg(GetResetTypeQString(timer.GetResetType()))));
@ -353,7 +349,8 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeTimer::GetChildren() const {
return list;
}
WaitTreeMutexList::WaitTreeMutexList(const boost::container::flat_set<Kernel::KMutex*>& list)
WaitTreeMutexList::WaitTreeMutexList(
const boost::container::flat_set<std::shared_ptr<Kernel::Mutex>>& list)
: mutex_list(list) {}
QString WaitTreeMutexList::GetText() const {
@ -367,7 +364,7 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeMutexList::GetChildren() cons
return list;
}
WaitTreeThreadList::WaitTreeThreadList(const std::vector<Kernel::KThread*>& list)
WaitTreeThreadList::WaitTreeThreadList(const std::vector<std::shared_ptr<Kernel::Thread>>& list)
: thread_list(list) {}
QString WaitTreeThreadList::GetText() const {

View File

@ -10,16 +10,17 @@
#include <QTreeView>
#include <boost/container/flat_set.hpp>
#include "core/core.h"
#include "core/hle/kernel/object.h"
class EmuThread;
namespace Kernel {
class KSynchronizationObject;
class KEvent;
class KMutex;
class KSemaphore;
class KThread;
class KTimer;
class WaitObject;
class Event;
class Mutex;
class Semaphore;
class Thread;
class Timer;
} // namespace Kernel
namespace Core {
@ -72,13 +73,13 @@ public:
class WaitTreeWaitObject : public WaitTreeExpandableItem {
Q_OBJECT
public:
explicit WaitTreeWaitObject(const Kernel::KSynchronizationObject& object);
static std::unique_ptr<WaitTreeWaitObject> make(const Kernel::KSynchronizationObject& object);
explicit WaitTreeWaitObject(const Kernel::WaitObject& object);
static std::unique_ptr<WaitTreeWaitObject> make(const Kernel::WaitObject& object);
QString GetText() const override;
std::vector<std::unique_ptr<WaitTreeItem>> GetChildren() const override;
protected:
const Kernel::KSynchronizationObject& object;
const Kernel::WaitObject& object;
static QString GetResetTypeQString(Kernel::ResetType reset_type);
};
@ -86,19 +87,19 @@ protected:
class WaitTreeObjectList : public WaitTreeExpandableItem {
Q_OBJECT
public:
WaitTreeObjectList(const std::vector<Kernel::KSynchronizationObject*>& list, bool wait_all);
WaitTreeObjectList(const std::vector<std::shared_ptr<Kernel::WaitObject>>& list, bool wait_all);
QString GetText() const override;
std::vector<std::unique_ptr<WaitTreeItem>> GetChildren() const override;
private:
const std::vector<Kernel::KSynchronizationObject*>& object_list;
const std::vector<std::shared_ptr<Kernel::WaitObject>>& object_list;
bool wait_all;
};
class WaitTreeThread : public WaitTreeWaitObject {
Q_OBJECT
public:
explicit WaitTreeThread(const Kernel::KThread& thread);
explicit WaitTreeThread(const Kernel::Thread& thread);
QString GetText() const override;
QColor GetColor() const override;
std::vector<std::unique_ptr<WaitTreeItem>> GetChildren() const override;
@ -107,52 +108,53 @@ public:
class WaitTreeEvent : public WaitTreeWaitObject {
Q_OBJECT
public:
explicit WaitTreeEvent(const Kernel::KEvent& object);
explicit WaitTreeEvent(const Kernel::Event& object);
std::vector<std::unique_ptr<WaitTreeItem>> GetChildren() const override;
};
class WaitTreeMutex : public WaitTreeWaitObject {
Q_OBJECT
public:
explicit WaitTreeMutex(const Kernel::KMutex& object);
explicit WaitTreeMutex(const Kernel::Mutex& object);
std::vector<std::unique_ptr<WaitTreeItem>> GetChildren() const override;
};
class WaitTreeSemaphore : public WaitTreeWaitObject {
Q_OBJECT
public:
explicit WaitTreeSemaphore(const Kernel::KSemaphore& object);
explicit WaitTreeSemaphore(const Kernel::Semaphore& object);
std::vector<std::unique_ptr<WaitTreeItem>> GetChildren() const override;
};
class WaitTreeTimer : public WaitTreeWaitObject {
Q_OBJECT
public:
explicit WaitTreeTimer(const Kernel::KTimer& object);
explicit WaitTreeTimer(const Kernel::Timer& object);
std::vector<std::unique_ptr<WaitTreeItem>> GetChildren() const override;
};
class WaitTreeMutexList : public WaitTreeExpandableItem {
Q_OBJECT
public:
explicit WaitTreeMutexList(const boost::container::flat_set<Kernel::KMutex*>& list);
explicit WaitTreeMutexList(
const boost::container::flat_set<std::shared_ptr<Kernel::Mutex>>& list);
QString GetText() const override;
std::vector<std::unique_ptr<WaitTreeItem>> GetChildren() const override;
private:
const boost::container::flat_set<Kernel::KMutex*>& mutex_list;
const boost::container::flat_set<std::shared_ptr<Kernel::Mutex>>& mutex_list;
};
class WaitTreeThreadList : public WaitTreeExpandableItem {
Q_OBJECT
public:
explicit WaitTreeThreadList(const std::vector<Kernel::KThread*>& list);
explicit WaitTreeThreadList(const std::vector<std::shared_ptr<Kernel::Thread>>& list);
QString GetText() const override;
std::vector<std::unique_ptr<WaitTreeItem>> GetChildren() const override;
private:
const std::vector<Kernel::KThread*>& thread_list;
const std::vector<std::shared_ptr<Kernel::Thread>>& thread_list;
};
class WaitTreeModel : public QAbstractItemModel {

View File

@ -88,7 +88,6 @@ add_library(citra_common STATIC
file_util.cpp
file_util.h
hash.h
intrusive_list.h
literals.h
logging/backend.cpp
logging/backend.h
@ -110,7 +109,6 @@ add_library(citra_common STATIC
microprofileui.h
param_package.cpp
param_package.h
parent_of_member.h
polyfill_thread.h
precompiled_headers.h
quaternion.h

View File

@ -21,10 +21,4 @@ template <typename T>
return static_cast<T>(value - value % size);
}
template <typename T>
requires std::is_unsigned_v<T>
[[nodiscard]] constexpr bool Is4KBAligned(T value) {
return (value & 0xFFF) == 0;
}
} // namespace Common

View File

@ -49,14 +49,6 @@ __declspec(dllimport) void __stdcall DebugBreak(void);
#define locale_t _locale_t
#endif // _MSC_VER
#define CITRA_NON_COPYABLE(cls) \
cls(const cls&) = delete; \
cls& operator=(const cls&) = delete
#define CITRA_NON_MOVEABLE(cls) \
cls(cls&&) = delete; \
cls& operator=(cls&&) = delete
#define DECLARE_ENUM_FLAG_OPERATORS(type) \
[[nodiscard]] constexpr type operator|(type a, type b) noexcept { \
using T = std::underlying_type_t<type>; \

View File

@ -1,631 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include "common/common_funcs.h"
#include "common/parent_of_member.h"
namespace Common {
// Forward declare implementation class for Node.
namespace impl {
class IntrusiveListImpl;
}
class IntrusiveListNode {
CITRA_NON_COPYABLE(IntrusiveListNode);
private:
friend class impl::IntrusiveListImpl;
IntrusiveListNode* m_prev;
IntrusiveListNode* m_next;
public:
constexpr IntrusiveListNode() : m_prev(this), m_next(this) {}
constexpr bool IsLinked() const {
return m_next != this;
}
private:
constexpr void LinkPrev(IntrusiveListNode* node) {
// We can't link an already linked node.
ASSERT(!node->IsLinked());
this->SplicePrev(node, node);
}
constexpr void SplicePrev(IntrusiveListNode* first, IntrusiveListNode* last) {
// Splice a range into the list.
auto last_prev = last->m_prev;
first->m_prev = m_prev;
last_prev->m_next = this;
m_prev->m_next = first;
m_prev = last_prev;
}
constexpr void LinkNext(IntrusiveListNode* node) {
// We can't link an already linked node.
ASSERT(!node->IsLinked());
return this->SpliceNext(node, node);
}
constexpr void SpliceNext(IntrusiveListNode* first, IntrusiveListNode* last) {
// Splice a range into the list.
auto last_prev = last->m_prev;
first->m_prev = this;
last_prev->m_next = m_next;
m_next->m_prev = last_prev;
m_next = first;
}
constexpr void Unlink() {
this->Unlink(m_next);
}
constexpr void Unlink(IntrusiveListNode* last) {
// Unlink a node from a next node.
auto last_prev = last->m_prev;
m_prev->m_next = last;
last->m_prev = m_prev;
last_prev->m_next = this;
m_prev = last_prev;
}
constexpr IntrusiveListNode* GetPrev() {
return m_prev;
}
constexpr const IntrusiveListNode* GetPrev() const {
return m_prev;
}
constexpr IntrusiveListNode* GetNext() {
return m_next;
}
constexpr const IntrusiveListNode* GetNext() const {
return m_next;
}
};
// DEPRECATED: static_assert(std::is_literal_type<IntrusiveListNode>::value);
namespace impl {
class IntrusiveListImpl {
CITRA_NON_COPYABLE(IntrusiveListImpl);
private:
IntrusiveListNode m_root_node;
public:
template <bool Const>
class Iterator;
using value_type = IntrusiveListNode;
using size_type = size_t;
using difference_type = ptrdiff_t;
using pointer = value_type*;
using const_pointer = const value_type*;
using reference = value_type&;
using const_reference = const value_type&;
using iterator = Iterator<false>;
using const_iterator = Iterator<true>;
using reverse_iterator = std::reverse_iterator<iterator>;
using const_reverse_iterator = std::reverse_iterator<const_iterator>;
template <bool Const>
class Iterator {
public:
using iterator_category = std::bidirectional_iterator_tag;
using value_type = typename IntrusiveListImpl::value_type;
using difference_type = typename IntrusiveListImpl::difference_type;
using pointer =
std::conditional_t<Const, IntrusiveListImpl::const_pointer, IntrusiveListImpl::pointer>;
using reference = std::conditional_t<Const, IntrusiveListImpl::const_reference,
IntrusiveListImpl::reference>;
private:
pointer m_node;
public:
constexpr explicit Iterator(pointer n) : m_node(n) {}
constexpr bool operator==(const Iterator& rhs) const {
return m_node == rhs.m_node;
}
constexpr pointer operator->() const {
return m_node;
}
constexpr reference operator*() const {
return *m_node;
}
constexpr Iterator& operator++() {
m_node = m_node->m_next;
return *this;
}
constexpr Iterator& operator--() {
m_node = m_node->m_prev;
return *this;
}
constexpr Iterator operator++(int) {
const Iterator it{*this};
++(*this);
return it;
}
constexpr Iterator operator--(int) {
const Iterator it{*this};
--(*this);
return it;
}
constexpr operator Iterator<true>() const {
return Iterator<true>(m_node);
}
constexpr Iterator<false> GetNonConstIterator() const {
return Iterator<false>(const_cast<IntrusiveListImpl::pointer>(m_node));
}
};
public:
constexpr IntrusiveListImpl() : m_root_node() {}
// Iterator accessors.
constexpr iterator begin() {
return iterator(m_root_node.GetNext());
}
constexpr const_iterator begin() const {
return const_iterator(m_root_node.GetNext());
}
constexpr iterator end() {
return iterator(std::addressof(m_root_node));
}
constexpr const_iterator end() const {
return const_iterator(std::addressof(m_root_node));
}
constexpr iterator iterator_to(reference v) {
// Only allow iterator_to for values in lists.
ASSERT(v.IsLinked());
return iterator(std::addressof(v));
}
constexpr const_iterator iterator_to(const_reference v) const {
// Only allow iterator_to for values in lists.
ASSERT(v.IsLinked());
return const_iterator(std::addressof(v));
}
// Content management.
constexpr bool empty() const {
return !m_root_node.IsLinked();
}
constexpr size_type size() const {
return static_cast<size_type>(std::distance(this->begin(), this->end()));
}
constexpr reference back() {
return *m_root_node.GetPrev();
}
constexpr const_reference back() const {
return *m_root_node.GetPrev();
}
constexpr reference front() {
return *m_root_node.GetNext();
}
constexpr const_reference front() const {
return *m_root_node.GetNext();
}
constexpr void push_back(reference node) {
m_root_node.LinkPrev(std::addressof(node));
}
constexpr void push_front(reference node) {
m_root_node.LinkNext(std::addressof(node));
}
constexpr void pop_back() {
m_root_node.GetPrev()->Unlink();
}
constexpr void pop_front() {
m_root_node.GetNext()->Unlink();
}
constexpr iterator insert(const_iterator pos, reference node) {
pos.GetNonConstIterator()->LinkPrev(std::addressof(node));
return iterator(std::addressof(node));
}
constexpr void splice(const_iterator pos, IntrusiveListImpl& o) {
splice_impl(pos, o.begin(), o.end());
}
constexpr void splice(const_iterator pos, IntrusiveListImpl&, const_iterator first) {
const_iterator last(first);
std::advance(last, 1);
splice_impl(pos, first, last);
}
constexpr void splice(const_iterator pos, IntrusiveListImpl&, const_iterator first,
const_iterator last) {
splice_impl(pos, first, last);
}
constexpr iterator erase(const_iterator pos) {
if (pos == this->end()) {
return this->end();
}
iterator it(pos.GetNonConstIterator());
(it++)->Unlink();
return it;
}
constexpr void clear() {
while (!this->empty()) {
this->pop_front();
}
}
private:
constexpr void splice_impl(const_iterator _pos, const_iterator _first, const_iterator _last) {
if (_first == _last) {
return;
}
iterator pos(_pos.GetNonConstIterator());
iterator first(_first.GetNonConstIterator());
iterator last(_last.GetNonConstIterator());
first->Unlink(std::addressof(*last));
pos->SplicePrev(std::addressof(*first), std::addressof(*first));
}
};
} // namespace impl
template <class T, class Traits>
class IntrusiveList {
CITRA_NON_COPYABLE(IntrusiveList);
private:
impl::IntrusiveListImpl m_impl;
public:
template <bool Const>
class Iterator;
using value_type = T;
using size_type = size_t;
using difference_type = ptrdiff_t;
using pointer = value_type*;
using const_pointer = const value_type*;
using reference = value_type&;
using const_reference = const value_type&;
using iterator = Iterator<false>;
using const_iterator = Iterator<true>;
using reverse_iterator = std::reverse_iterator<iterator>;
using const_reverse_iterator = std::reverse_iterator<const_iterator>;
template <bool Const>
class Iterator {
public:
friend class Common::IntrusiveList<T, Traits>;
using ImplIterator =
std::conditional_t<Const, Common::impl::IntrusiveListImpl::const_iterator,
Common::impl::IntrusiveListImpl::iterator>;
using iterator_category = std::bidirectional_iterator_tag;
using value_type = typename IntrusiveList::value_type;
using difference_type = typename IntrusiveList::difference_type;
using pointer =
std::conditional_t<Const, IntrusiveList::const_pointer, IntrusiveList::pointer>;
using reference =
std::conditional_t<Const, IntrusiveList::const_reference, IntrusiveList::reference>;
private:
ImplIterator m_iterator;
private:
constexpr explicit Iterator(ImplIterator it) : m_iterator(it) {}
constexpr ImplIterator GetImplIterator() const {
return m_iterator;
}
public:
constexpr bool operator==(const Iterator& rhs) const {
return m_iterator == rhs.m_iterator;
}
constexpr pointer operator->() const {
return std::addressof(Traits::GetParent(*m_iterator));
}
constexpr reference operator*() const {
return Traits::GetParent(*m_iterator);
}
constexpr Iterator& operator++() {
++m_iterator;
return *this;
}
constexpr Iterator& operator--() {
--m_iterator;
return *this;
}
constexpr Iterator operator++(int) {
const Iterator it{*this};
++m_iterator;
return it;
}
constexpr Iterator operator--(int) {
const Iterator it{*this};
--m_iterator;
return it;
}
constexpr operator Iterator<true>() const {
return Iterator<true>(m_iterator);
}
};
private:
static constexpr IntrusiveListNode& GetNode(reference ref) {
return Traits::GetNode(ref);
}
static constexpr IntrusiveListNode const& GetNode(const_reference ref) {
return Traits::GetNode(ref);
}
static constexpr reference GetParent(IntrusiveListNode& node) {
return Traits::GetParent(node);
}
static constexpr const_reference GetParent(IntrusiveListNode const& node) {
return Traits::GetParent(node);
}
public:
constexpr IntrusiveList() : m_impl() {}
// Iterator accessors.
constexpr iterator begin() {
return iterator(m_impl.begin());
}
constexpr const_iterator begin() const {
return const_iterator(m_impl.begin());
}
constexpr iterator end() {
return iterator(m_impl.end());
}
constexpr const_iterator end() const {
return const_iterator(m_impl.end());
}
constexpr const_iterator cbegin() const {
return this->begin();
}
constexpr const_iterator cend() const {
return this->end();
}
constexpr reverse_iterator rbegin() {
return reverse_iterator(this->end());
}
constexpr const_reverse_iterator rbegin() const {
return const_reverse_iterator(this->end());
}
constexpr reverse_iterator rend() {
return reverse_iterator(this->begin());
}
constexpr const_reverse_iterator rend() const {
return const_reverse_iterator(this->begin());
}
constexpr const_reverse_iterator crbegin() const {
return this->rbegin();
}
constexpr const_reverse_iterator crend() const {
return this->rend();
}
constexpr iterator iterator_to(reference v) {
return iterator(m_impl.iterator_to(GetNode(v)));
}
constexpr const_iterator iterator_to(const_reference v) const {
return const_iterator(m_impl.iterator_to(GetNode(v)));
}
// Content management.
constexpr bool empty() const {
return m_impl.empty();
}
constexpr size_type size() const {
return m_impl.size();
}
constexpr reference back() {
return GetParent(m_impl.back());
}
constexpr const_reference back() const {
return GetParent(m_impl.back());
}
constexpr reference front() {
return GetParent(m_impl.front());
}
constexpr const_reference front() const {
return GetParent(m_impl.front());
}
constexpr void push_back(reference ref) {
m_impl.push_back(GetNode(ref));
}
constexpr void push_front(reference ref) {
m_impl.push_front(GetNode(ref));
}
constexpr void pop_back() {
m_impl.pop_back();
}
constexpr void pop_front() {
m_impl.pop_front();
}
constexpr iterator insert(const_iterator pos, reference ref) {
return iterator(m_impl.insert(pos.GetImplIterator(), GetNode(ref)));
}
constexpr void splice(const_iterator pos, IntrusiveList& o) {
m_impl.splice(pos.GetImplIterator(), o.m_impl);
}
constexpr void splice(const_iterator pos, IntrusiveList& o, const_iterator first) {
m_impl.splice(pos.GetImplIterator(), o.m_impl, first.GetImplIterator());
}
constexpr void splice(const_iterator pos, IntrusiveList& o, const_iterator first,
const_iterator last) {
m_impl.splice(pos.GetImplIterator(), o.m_impl, first.GetImplIterator(),
last.GetImplIterator());
}
constexpr iterator erase(const_iterator pos) {
return iterator(m_impl.erase(pos.GetImplIterator()));
}
constexpr void clear() {
m_impl.clear();
}
};
template <auto T, class Derived = Common::impl::GetParentType<T>>
class IntrusiveListMemberTraits;
template <class Parent, IntrusiveListNode Parent::*Member, class Derived>
class IntrusiveListMemberTraits<Member, Derived> {
public:
using ListType = IntrusiveList<Derived, IntrusiveListMemberTraits>;
private:
friend class IntrusiveList<Derived, IntrusiveListMemberTraits>;
static constexpr IntrusiveListNode& GetNode(Derived& parent) {
return parent.*Member;
}
static constexpr IntrusiveListNode const& GetNode(Derived const& parent) {
return parent.*Member;
}
static Derived& GetParent(IntrusiveListNode& node) {
return Common::GetParentReference<Member, Derived>(std::addressof(node));
}
static Derived const& GetParent(IntrusiveListNode const& node) {
return Common::GetParentReference<Member, Derived>(std::addressof(node));
}
};
template <auto T, class Derived = Common::impl::GetParentType<T>>
class IntrusiveListMemberTraitsByNonConstexprOffsetOf;
template <class Parent, IntrusiveListNode Parent::*Member, class Derived>
class IntrusiveListMemberTraitsByNonConstexprOffsetOf<Member, Derived> {
public:
using ListType = IntrusiveList<Derived, IntrusiveListMemberTraitsByNonConstexprOffsetOf>;
private:
friend class IntrusiveList<Derived, IntrusiveListMemberTraitsByNonConstexprOffsetOf>;
static constexpr IntrusiveListNode& GetNode(Derived& parent) {
return parent.*Member;
}
static constexpr IntrusiveListNode const& GetNode(Derived const& parent) {
return parent.*Member;
}
static Derived& GetParent(IntrusiveListNode& node) {
return *reinterpret_cast<Derived*>(reinterpret_cast<char*>(std::addressof(node)) -
GetOffset());
}
static Derived const& GetParent(IntrusiveListNode const& node) {
return *reinterpret_cast<const Derived*>(
reinterpret_cast<const char*>(std::addressof(node)) - GetOffset());
}
static uintptr_t GetOffset() {
return reinterpret_cast<uintptr_t>(std::addressof(reinterpret_cast<Derived*>(0)->*Member));
}
};
template <class Derived>
class IntrusiveListBaseNode : public IntrusiveListNode {};
template <class Derived>
class IntrusiveListBaseTraits {
public:
using ListType = IntrusiveList<Derived, IntrusiveListBaseTraits>;
private:
friend class IntrusiveList<Derived, IntrusiveListBaseTraits>;
static constexpr IntrusiveListNode& GetNode(Derived& parent) {
return static_cast<IntrusiveListNode&>(
static_cast<IntrusiveListBaseNode<Derived>&>(parent));
}
static constexpr IntrusiveListNode const& GetNode(Derived const& parent) {
return static_cast<const IntrusiveListNode&>(
static_cast<const IntrusiveListBaseNode<Derived>&>(parent));
}
static constexpr Derived& GetParent(IntrusiveListNode& node) {
return static_cast<Derived&>(static_cast<IntrusiveListBaseNode<Derived>&>(node));
}
static constexpr Derived const& GetParent(IntrusiveListNode const& node) {
return static_cast<const Derived&>(
static_cast<const IntrusiveListBaseNode<Derived>&>(node));
}
};
} // namespace Common

View File

@ -1,190 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <type_traits>
#include "common/assert.h"
namespace Common {
namespace detail {
template <typename T, size_t Size, size_t Align>
struct TypedStorageImpl {
alignas(Align) u8 storage_[Size];
};
} // namespace detail
template <typename T>
using TypedStorage = detail::TypedStorageImpl<T, sizeof(T), alignof(T)>;
template <typename T>
static constexpr T* GetPointer(TypedStorage<T>& ts) {
return static_cast<T*>(static_cast<void*>(std::addressof(ts.storage_)));
}
template <typename T>
static constexpr const T* GetPointer(const TypedStorage<T>& ts) {
return static_cast<const T*>(static_cast<const void*>(std::addressof(ts.storage_)));
}
namespace impl {
template <size_t MaxDepth>
struct OffsetOfUnionHolder {
template <typename ParentType, typename MemberType, size_t Offset>
union UnionImpl {
using PaddingMember = char;
static constexpr size_t GetOffset() {
return Offset;
}
#pragma pack(push, 1)
struct {
PaddingMember padding[Offset];
MemberType members[(sizeof(ParentType) / sizeof(MemberType)) + 1];
} data;
#pragma pack(pop)
UnionImpl<ParentType, MemberType, Offset + 1> next_union;
};
template <typename ParentType, typename MemberType>
union UnionImpl<ParentType, MemberType, 0> {
static constexpr size_t GetOffset() {
return 0;
}
struct {
MemberType members[(sizeof(ParentType) / sizeof(MemberType)) + 1];
} data;
UnionImpl<ParentType, MemberType, 1> next_union;
};
template <typename ParentType, typename MemberType>
union UnionImpl<ParentType, MemberType, MaxDepth> {};
};
template <typename ParentType, typename MemberType>
struct OffsetOfCalculator {
using UnionHolder =
typename OffsetOfUnionHolder<sizeof(MemberType)>::template UnionImpl<ParentType, MemberType,
0>;
union Union {
char c{};
UnionHolder first_union;
TypedStorage<ParentType> parent;
constexpr Union() : c() {}
};
static constexpr Union U = {};
static constexpr const MemberType* GetNextAddress(const MemberType* start,
const MemberType* target) {
while (start < target) {
start++;
}
return start;
}
static constexpr std::ptrdiff_t GetDifference(const MemberType* start,
const MemberType* target) {
return (target - start) * sizeof(MemberType);
}
template <typename CurUnion>
static constexpr std::ptrdiff_t OffsetOfImpl(MemberType ParentType::*member,
CurUnion& cur_union) {
constexpr size_t Offset = CurUnion::GetOffset();
const auto target = std::addressof(GetPointer(U.parent)->*member);
const auto start = std::addressof(cur_union.data.members[0]);
const auto next = GetNextAddress(start, target);
if (next != target) {
if constexpr (Offset < sizeof(MemberType) - 1) {
return OffsetOfImpl(member, cur_union.next_union);
} else {
UNREACHABLE();
}
}
return static_cast<ptrdiff_t>(static_cast<size_t>(next - start) * sizeof(MemberType) +
Offset);
}
static constexpr std::ptrdiff_t OffsetOf(MemberType ParentType::*member) {
return OffsetOfImpl(member, U.first_union);
}
};
template <typename T>
struct GetMemberPointerTraits;
template <typename P, typename M>
struct GetMemberPointerTraits<M P::*> {
using Parent = P;
using Member = M;
};
template <auto MemberPtr>
using GetParentType = typename GetMemberPointerTraits<decltype(MemberPtr)>::Parent;
template <auto MemberPtr>
using GetMemberType = typename GetMemberPointerTraits<decltype(MemberPtr)>::Member;
template <auto MemberPtr, typename RealParentType = GetParentType<MemberPtr>>
constexpr std::ptrdiff_t OffsetOf() {
using DeducedParentType = GetParentType<MemberPtr>;
using MemberType = GetMemberType<MemberPtr>;
static_assert(std::is_base_of<DeducedParentType, RealParentType>::value ||
std::is_same<RealParentType, DeducedParentType>::value);
return OffsetOfCalculator<RealParentType, MemberType>::OffsetOf(MemberPtr);
};
} // namespace impl
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
constexpr RealParentType& GetParentReference(impl::GetMemberType<MemberPtr>* member) {
std::ptrdiff_t Offset = impl::OffsetOf<MemberPtr, RealParentType>();
return *static_cast<RealParentType*>(
static_cast<void*>(static_cast<uint8_t*>(static_cast<void*>(member)) - Offset));
}
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
constexpr RealParentType const& GetParentReference(impl::GetMemberType<MemberPtr> const* member) {
std::ptrdiff_t Offset = impl::OffsetOf<MemberPtr, RealParentType>();
return *static_cast<const RealParentType*>(static_cast<const void*>(
static_cast<const uint8_t*>(static_cast<const void*>(member)) - Offset));
}
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
constexpr RealParentType* GetParentPointer(impl::GetMemberType<MemberPtr>* member) {
return std::addressof(GetParentReference<MemberPtr, RealParentType>(member));
}
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
constexpr RealParentType const* GetParentPointer(impl::GetMemberType<MemberPtr> const* member) {
return std::addressof(GetParentReference<MemberPtr, RealParentType>(member));
}
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
constexpr RealParentType& GetParentReference(impl::GetMemberType<MemberPtr>& member) {
return GetParentReference<MemberPtr, RealParentType>(std::addressof(member));
}
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
constexpr RealParentType const& GetParentReference(impl::GetMemberType<MemberPtr> const& member) {
return GetParentReference<MemberPtr, RealParentType>(std::addressof(member));
}
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
constexpr RealParentType* GetParentPointer(impl::GetMemberType<MemberPtr>& member) {
return std::addressof(GetParentReference<MemberPtr, RealParentType>(member));
}
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
constexpr RealParentType const* GetParentPointer(impl::GetMemberType<MemberPtr> const& member) {
return std::addressof(GetParentReference<MemberPtr, RealParentType>(member));
}
} // namespace Common

View File

@ -128,70 +128,60 @@ add_library(citra_core STATIC
hle/applets/swkbd.h
hle/ipc.h
hle/ipc_helpers.h
hle/kernel/address_arbiter.cpp
hle/kernel/address_arbiter.h
hle/kernel/client_port.cpp
hle/kernel/client_port.h
hle/kernel/client_session.cpp
hle/kernel/client_session.h
hle/kernel/config_mem.cpp
hle/kernel/config_mem.h
hle/kernel/errors.h
hle/kernel/event.cpp
hle/kernel/event.h
hle/kernel/handle_table.cpp
hle/kernel/handle_table.h
hle/kernel/hle_ipc.cpp
hle/kernel/hle_ipc.h
hle/kernel/ipc.cpp
hle/kernel/ipc.h
hle/kernel/ipc_debugger/recorder.cpp
hle/kernel/ipc_debugger/recorder.h
hle/kernel/k_address_arbiter.cpp
hle/kernel/k_address_arbiter.h
hle/kernel/k_auto_object.cpp
hle/kernel/k_auto_object.h
hle/kernel/k_auto_object_container.cpp
hle/kernel/k_auto_object_container.h
hle/kernel/k_client_port.cpp
hle/kernel/k_client_port.h
hle/kernel/k_client_session.cpp
hle/kernel/k_client_session.h
hle/kernel/k_code_set.h
hle/kernel/k_event.cpp
hle/kernel/k_event.h
hle/kernel/k_handle_table.cpp
hle/kernel/k_handle_table.h
hle/kernel/k_linked_list.h
hle/kernel/k_mutex.cpp
hle/kernel/k_mutex.h
hle/kernel/k_object_name.cpp
hle/kernel/k_object_name.h
hle/kernel/k_port.cpp
hle/kernel/k_port.h
hle/kernel/k_process.cpp
hle/kernel/k_process.h
hle/kernel/k_resource_limit.cpp
hle/kernel/k_resource_limit.h
hle/kernel/k_scoped_resource_reservation.h
hle/kernel/k_semaphore.cpp
hle/kernel/k_semaphore.h
hle/kernel/k_server_port.cpp
hle/kernel/k_server_port.h
hle/kernel/k_server_session.cpp
hle/kernel/k_server_session.h
hle/kernel/k_session.cpp
hle/kernel/k_session.h
hle/kernel/k_shared_memory.cpp
hle/kernel/k_shared_memory.h
hle/kernel/k_slab_heap.h
hle/kernel/k_synchronization_object.cpp
hle/kernel/k_synchronization_object.h
hle/kernel/k_thread.cpp
hle/kernel/k_thread.h
hle/kernel/k_timer.cpp
hle/kernel/k_timer.h
hle/kernel/kernel.cpp
hle/kernel/kernel.h
hle/kernel/memory.cpp
hle/kernel/memory.h
hle/kernel/mutex.cpp
hle/kernel/mutex.h
hle/kernel/object.cpp
hle/kernel/object.h
hle/kernel/process.cpp
hle/kernel/process.h
hle/kernel/resource_limit.cpp
hle/kernel/resource_limit.h
hle/kernel/semaphore.cpp
hle/kernel/semaphore.h
hle/kernel/server_port.cpp
hle/kernel/server_port.h
hle/kernel/server_session.cpp
hle/kernel/server_session.h
hle/kernel/session.h
hle/kernel/session.cpp
hle/kernel/shared_memory.cpp
hle/kernel/shared_memory.h
hle/kernel/shared_page.cpp
hle/kernel/shared_page.h
hle/kernel/svc.cpp
hle/kernel/svc.h
hle/kernel/svc_wrapper.h
hle/kernel/thread.cpp
hle/kernel/thread.h
hle/kernel/timer.cpp
hle/kernel/timer.h
hle/kernel/vm_manager.cpp
hle/kernel/vm_manager.h
hle/kernel/wait_object.cpp
hle/kernel/wait_object.h
hle/mii.h
hle/mii.cpp
hle/result.h
@ -333,12 +323,14 @@ add_library(citra_core STATIC
hle/service/ir/ir_u.h
hle/service/ir/ir_user.cpp
hle/service/ir/ir_user.h
hle/service/kernel_helpers.cpp
hle/service/kernel_helpers.h
hle/service/ldr_ro/cro_helper.cpp
hle/service/ldr_ro/cro_helper.h
hle/service/ldr_ro/ldr_ro.cpp
hle/service/ldr_ro/ldr_ro.h
hle/service/mcu/mcu_hwc.cpp
hle/service/mcu/mcu_hwc.h
hle/service/mcu/mcu.cpp
hle/service/mcu/mcu.h
hle/service/mic/mic_u.cpp
hle/service/mic/mic_u.h
hle/service/mvd/mvd.cpp

View File

@ -292,8 +292,8 @@ void ARM_Dynarmic::SetPageTable(const std::shared_ptr<Memory::PageTable>& page_t
}
void ARM_Dynarmic::ServeBreak() {
Kernel::KThread* thread = system.Kernel().GetCurrentThreadManager().GetCurrentThread();
SaveContext(thread->GetContext());
Kernel::Thread* thread = system.Kernel().GetCurrentThreadManager().GetCurrentThread();
SaveContext(thread->context);
GDBStub::Break();
GDBStub::SendTrap(thread, 5);
}

View File

@ -609,8 +609,8 @@ void ARMul_State::ServeBreak() {
DEBUG_ASSERT(Reg[15] == last_bkpt.address);
}
Kernel::KThread* thread = system.Kernel().GetCurrentThreadManager().GetCurrentThread();
system.GetRunningCore().SaveContext(thread->GetContext());
Kernel::Thread* thread = system.Kernel().GetCurrentThreadManager().GetCurrentThread();
system.GetRunningCore().SaveContext(thread->context);
if (last_bkpt_hit || GDBStub::IsMemoryBreak() || GDBStub::GetCpuStepFlag()) {
last_bkpt_hit = false;

View File

@ -27,9 +27,9 @@
#include "core/frontend/image_interface.h"
#include "core/gdbstub/gdbstub.h"
#include "core/global.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/thread.h"
#include "core/hle/service/apt/applet_manager.h"
#include "core/hle/service/apt/apt.h"
#include "core/hle/service/cam/cam.h"
@ -83,9 +83,9 @@ System::ResultStatus System::RunLoop(bool tight_loop) {
}
if (GDBStub::IsServerEnabled()) {
Kernel::KThread* thread = kernel->GetCurrentThreadManager().GetCurrentThread();
Kernel::Thread* thread = kernel->GetCurrentThreadManager().GetCurrentThread();
if (thread && running_core) {
running_core->SaveContext(thread->GetContext());
running_core->SaveContext(thread->context);
}
GDBStub::HandlePacket(*this);
@ -311,8 +311,8 @@ System::ResultStatus System::Load(Frontend::EmuWindow& emu_window, const std::st
}
telemetry_session->AddInitialInfo(*app_loader);
Kernel::Process* process;
const Loader::ResultStatus load_result{app_loader->Load(std::addressof(process))};
std::shared_ptr<Kernel::Process> process;
const Loader::ResultStatus load_result{app_loader->Load(process)};
if (Loader::ResultStatus::Success != load_result) {
LOG_CRITICAL(Core, "Failed to load ROM (Error {})!", load_result);
System::Shutdown();

View File

@ -7,7 +7,7 @@
#include "common/archives.h"
#include "core/file_sys/archive_other_savedata.h"
#include "core/file_sys/errors.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/process.h"
#include "core/hle/service/fs/archive.h"
SERIALIZE_EXPORT_IMPL(FileSys::ArchiveFactory_OtherSaveDataPermitted)

View File

@ -6,7 +6,7 @@
#include "common/archives.h"
#include "core/core.h"
#include "core/file_sys/archive_savedata.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/process.h"
SERIALIZE_EXPORT_IMPL(FileSys::ArchiveFactory_SaveData)

View File

@ -11,7 +11,7 @@
#include "core/file_sys/archive_selfncch.h"
#include "core/file_sys/errors.h"
#include "core/file_sys/ivfc_archive.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/process.h"
SERIALIZE_EXPORT_IMPL(FileSys::ArchiveFactory_SelfNCCH)

View File

@ -121,7 +121,7 @@ Loader::ResultStatus FileSys::Plugin3GXLoader::Load(
if (!compatible_TID.empty() &&
std::find(compatible_TID.begin(), compatible_TID.end(),
static_cast<u32>(process.codeset.program_id)) == compatible_TID.end()) {
static_cast<u32>(process.codeset->program_id)) == compatible_TID.end()) {
LOG_ERROR(Service_PLGLDR,
"Failed to load 3GX plugin. Not compatible with loaded process: {}",
plg_context.plugin_path);
@ -291,7 +291,7 @@ void FileSys::Plugin3GXLoader::MapBootloader(Kernel::Process& process, Kernel::K
u32 exe_checksum, bool no_flash) {
u32_le game_instructions[2];
kernel.memory.ReadBlock(process, process.codeset.CodeSegment().addr, game_instructions,
kernel.memory.ReadBlock(process, process.codeset->CodeSegment().addr, game_instructions,
sizeof(u32) * 2);
std::array<u32_le, g_plugin_loader_bootloader.size() / sizeof(u32)> bootloader;
@ -307,7 +307,7 @@ void FileSys::Plugin3GXLoader::MapBootloader(Kernel::Process& process, Kernel::K
*it = game_instructions[1];
} break;
case 0xDEAD0002: {
*it = process.codeset.CodeSegment().addr;
*it = process.codeset->CodeSegment().addr;
} break;
case 0xDEAD0003: {
for (u32 i = 0;
@ -361,6 +361,6 @@ void FileSys::Plugin3GXLoader::MapBootloader(Kernel::Process& process, Kernel::K
game_instructions[0] = 0xE51FF004; // ldr pc, [pc, #-4]
game_instructions[1] = _3GX_exe_load_addr - bootloader_memory_size;
kernel.memory.WriteBlock(process, process.codeset.CodeSegment().addr, game_instructions,
kernel.memory.WriteBlock(process, process.codeset->CodeSegment().addr, game_instructions,
sizeof(u32) * 2);
}

View File

@ -25,7 +25,7 @@
#include "common/common_types.h"
#include "common/swap.h"
#include "core/file_sys/archive_backend.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/process.h"
#include "core/hle/service/plgldr/plgldr.h"
namespace Loader {

View File

@ -35,7 +35,7 @@
#include "core/core.h"
#include "core/gdbstub/gdbstub.h"
#include "core/gdbstub/hio.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/process.h"
#include "core/memory.h"
namespace GDBStub {
@ -128,7 +128,7 @@ u32 command_length;
u32 latest_signal = 0;
bool memory_break = false;
static Kernel::KThread* current_thread = nullptr;
static Kernel::Thread* current_thread = nullptr;
// Binding to a port within the reserved ports range (0-1023) requires root permissions,
// so default to a port outside of that range.
@ -159,76 +159,72 @@ BreakpointMap breakpoints_read;
BreakpointMap breakpoints_write;
} // Anonymous namespace
static Kernel::KThread* FindThreadById(int id) {
static Kernel::Thread* FindThreadById(int id) {
u32 num_cores = Core::GetNumCores();
for (u32 i = 0; i < num_cores; ++i) {
const auto& threads =
Core::System::GetInstance().Kernel().GetThreadManager(i).GetThreadList();
for (auto& thread : threads) {
if (thread->GetThreadId() == static_cast<u32>(id)) {
return thread;
return thread.get();
}
}
}
return nullptr;
}
static u32 RegRead(std::size_t id, Kernel::KThread* thread = nullptr) {
static u32 RegRead(std::size_t id, Kernel::Thread* thread = nullptr) {
if (!thread) {
return 0;
}
const auto& context = thread->GetContext();
if (id <= PC_REGISTER) {
return context.cpu_registers[id];
return thread->context.cpu_registers[id];
} else if (id == CPSR_REGISTER) {
return context.cpsr;
return thread->context.cpsr;
} else {
return 0;
}
}
static void RegWrite(std::size_t id, u32 val, Kernel::KThread* thread = nullptr) {
static void RegWrite(std::size_t id, u32 val, Kernel::Thread* thread = nullptr) {
if (!thread) {
return;
}
auto& context = thread->GetContext();
if (id <= PC_REGISTER) {
context.cpu_registers[id] = val;
thread->context.cpu_registers[id] = val;
} else if (id == CPSR_REGISTER) {
context.cpsr = val;
thread->context.cpsr = val;
}
}
static u64 FpuRead(std::size_t id, Kernel::KThread* thread = nullptr) {
static u64 FpuRead(std::size_t id, Kernel::Thread* thread = nullptr) {
if (!thread) {
return 0;
}
const auto& context = thread->GetContext();
if (id >= D0_REGISTER && id < FPSCR_REGISTER) {
u64 ret = context.fpu_registers[2 * (id - D0_REGISTER)];
ret |= static_cast<u64>(context.fpu_registers[2 * (id - D0_REGISTER) + 1]) << 32;
u64 ret = thread->context.fpu_registers[2 * (id - D0_REGISTER)];
ret |= static_cast<u64>(thread->context.fpu_registers[2 * (id - D0_REGISTER) + 1]) << 32;
return ret;
} else if (id == FPSCR_REGISTER) {
return context.fpscr;
return thread->context.fpscr;
} else {
return 0;
}
}
static void FpuWrite(std::size_t id, u64 val, Kernel::KThread* thread = nullptr) {
static void FpuWrite(std::size_t id, u64 val, Kernel::Thread* thread = nullptr) {
if (!thread) {
return;
}
auto& context = thread->GetContext();
if (id >= D0_REGISTER && id < FPSCR_REGISTER) {
context.fpu_registers[2 * (id - D0_REGISTER)] = static_cast<u32>(val);
context.fpu_registers[2 * (id - D0_REGISTER) + 1] = static_cast<u32>(val >> 32);
thread->context.fpu_registers[2 * (id - D0_REGISTER)] = static_cast<u32>(val);
thread->context.fpu_registers[2 * (id - D0_REGISTER) + 1] = static_cast<u32>(val >> 32);
} else if (id == FPSCR_REGISTER) {
context.fpscr = static_cast<u32>(val);
thread->context.fpscr = static_cast<u32>(val);
}
}
@ -610,7 +606,7 @@ static void HandleThreadAlive() {
*
* @param signal Signal to be sent to client.
*/
static void SendSignal(Kernel::KThread* thread, u32 signal, bool full = true) {
static void SendSignal(Kernel::Thread* thread, u32 signal, bool full = true) {
if (gdbserver_socket == -1) {
return;
}
@ -789,7 +785,7 @@ static void WriteRegister() {
return SendReply("E01");
}
Core::GetRunningCore().LoadContext(current_thread->GetContext());
Core::GetRunningCore().LoadContext(current_thread->context);
SendReply("OK");
}
@ -819,7 +815,7 @@ static void WriteRegisters() {
}
}
Core::GetRunningCore().LoadContext(current_thread->GetContext());
Core::GetRunningCore().LoadContext(current_thread->context);
SendReply("OK");
}
@ -894,7 +890,7 @@ void Break(bool is_memory_break) {
static void Step() {
if (command_length > 1) {
RegWrite(PC_REGISTER, GdbHexToInt(command_buffer + 1), current_thread);
Core::GetRunningCore().LoadContext(current_thread->GetContext());
Core::GetRunningCore().LoadContext(current_thread->context);
}
step_loop = true;
halt_loop = true;
@ -1270,7 +1266,7 @@ void SetCpuStepFlag(bool is_step) {
step_loop = is_step;
}
void SendTrap(Kernel::KThread* thread, int trap) {
void SendTrap(Kernel::Thread* thread, int trap) {
if (!send_trap) {
return;
}

View File

@ -8,7 +8,7 @@
#include <span>
#include "common/common_types.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/thread.h"
namespace Core {
class System;
@ -118,7 +118,7 @@ void SetCpuStepFlag(bool is_step);
* @param thread Sending thread.
* @param trap Trap no.
*/
void SendTrap(Kernel::KThread* thread, int trap);
void SendTrap(Kernel::Thread* thread, int trap);
/**
* Send reply to gdb client.

View File

@ -47,10 +47,10 @@ void Applet::SendParameter(const Service::APT::MessageParameter& parameter) {
}
}
void Applet::CloseApplet(Kernel::KAutoObject* object, const std::vector<u8>& buffer) {
void Applet::CloseApplet(std::shared_ptr<Kernel::Object> object, const std::vector<u8>& buffer) {
if (auto locked = manager.lock()) {
locked->PrepareToCloseLibraryApplet(true, false, false);
locked->CloseLibraryApplet(object, buffer);
locked->CloseLibraryApplet(std::move(object), buffer);
} else {
LOG_ERROR(Service_APT, "called after destructing applet manager");
}

View File

@ -8,10 +8,6 @@
#include "core/hle/result.h"
#include "core/hle/service/apt/applet_manager.h"
namespace Core {
class System;
}
namespace HLE::Applets {
class Applet {
@ -43,8 +39,7 @@ public:
protected:
Applet(Core::System& system, Service::APT::AppletId id, Service::APT::AppletId parent,
bool preload, std::weak_ptr<Service::APT::AppletManager> manager)
: system(system), id(id), parent(parent), preload(preload), service_context(system),
manager(std::move(manager)) {}
: system(system), id(id), parent(parent), preload(preload), manager(std::move(manager)) {}
/**
* Handles a parameter from the application.
@ -67,11 +62,11 @@ protected:
virtual Result Finalize() = 0;
Core::System& system;
Service::APT::AppletId id; ///< Id of this Applet
Service::APT::AppletId parent; ///< Id of this Applet's parent
bool preload; ///< Whether the Applet is being preloaded.
std::shared_ptr<std::vector<u8>> heap_memory; ///< Heap memory for this Applet
Service::KernelHelpers::ServiceContext service_context;
/// Whether this applet is running.
bool is_running = true;
@ -80,7 +75,7 @@ protected:
bool is_active = false;
void SendParameter(const Service::APT::MessageParameter& parameter);
void CloseApplet(Kernel::KAutoObject* object, const std::vector<u8>& buffer);
void CloseApplet(std::shared_ptr<Kernel::Object> object, const std::vector<u8>& buffer);
private:
std::weak_ptr<Service::APT::AppletManager> manager;

View File

@ -5,7 +5,6 @@
#include "common/string_util.h"
#include "core/core.h"
#include "core/hle/applets/erreula.h"
#include "core/hle/kernel/k_shared_memory.h"
#include "core/hle/service/apt/apt.h"
namespace HLE::Applets {
@ -29,7 +28,7 @@ Result ErrEula::ReceiveParameterImpl(const Service::APT::MessageParameter& param
// TODO: allocated memory never released
using Kernel::MemoryPermission;
// Create a SharedMemory that directly points to this heap block.
framebuffer_memory = service_context.CreateSharedMemoryForApplet(
framebuffer_memory = system.Kernel().CreateSharedMemoryForApplet(
0, capture_info.size, MemoryPermission::ReadWrite, MemoryPermission::ReadWrite,
"ErrEula Memory");

View File

@ -5,10 +5,7 @@
#pragma once
#include "core/hle/applets/applet.h"
namespace Kernel {
class KSharedMemory;
}
#include "core/hle/kernel/shared_memory.h"
namespace HLE::Applets {
@ -27,7 +24,7 @@ private:
/// This SharedMemory will be created when we receive the LibAppJustStarted message.
/// It holds the framebuffer info retrieved by the application with
/// GSPGPU::ImportDisplayCaptureInfo
Kernel::KSharedMemory* framebuffer_memory;
std::shared_ptr<Kernel::SharedMemory> framebuffer_memory;
/// Parameter received by the applet on start.
std::vector<u8> startup_param;

View File

@ -11,8 +11,8 @@
#include "core/core.h"
#include "core/frontend/applets/mii_selector.h"
#include "core/hle/applets/mii_selector.h"
#include "core/hle/kernel/k_shared_memory.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/shared_memory.h"
#include "core/hle/result.h"
namespace HLE::Applets {
@ -35,7 +35,7 @@ Result MiiSelector::ReceiveParameterImpl(const Service::APT::MessageParameter& p
using Kernel::MemoryPermission;
// Create a SharedMemory that directly points to this heap block.
framebuffer_memory = service_context.CreateSharedMemoryForApplet(
framebuffer_memory = system.Kernel().CreateSharedMemoryForApplet(
0, capture_info.size, MemoryPermission::ReadWrite, MemoryPermission::ReadWrite,
"MiiSelector Memory");

View File

@ -8,6 +8,7 @@
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "core/hle/applets/applet.h"
#include "core/hle/kernel/shared_memory.h"
#include "core/hle/mii.h"
#include "core/hle/result.h"
#include "core/hle/service/apt/apt.h"
@ -17,10 +18,6 @@ class MiiSelector;
struct MiiSelectorConfig;
} // namespace Frontend
namespace Kernel {
class KSharedMemory;
}
namespace HLE::Applets {
struct MiiConfig {
@ -82,7 +79,7 @@ private:
/// This SharedMemory will be created when we receive the LibAppJustStarted message.
/// It holds the framebuffer info retrieved by the application with
/// GSPGPU::ImportDisplayCaptureInfo
Kernel::KSharedMemory* framebuffer_memory;
std::shared_ptr<Kernel::SharedMemory> framebuffer_memory;
MiiConfig config;

View File

@ -2,8 +2,10 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "common/string_util.h"
#include "core/core.h"
#include "core/hle/applets/mint.h"
#include "core/hle/kernel/k_shared_memory.h"
#include "core/hle/service/apt/apt.h"
namespace HLE::Applets {
@ -26,7 +28,7 @@ Result Mint::ReceiveParameterImpl(const Service::APT::MessageParameter& paramete
// TODO: allocated memory never released
using Kernel::MemoryPermission;
// Create a SharedMemory that directly points to this heap block.
framebuffer_memory = service_context.CreateSharedMemoryForApplet(
framebuffer_memory = system.Kernel().CreateSharedMemoryForApplet(
0, capture_info.size, MemoryPermission::ReadWrite, MemoryPermission::ReadWrite,
"Mint Memory");

View File

@ -5,10 +5,7 @@
#pragma once
#include "core/hle/applets/applet.h"
namespace Kernel {
class KSharedMemory;
}
#include "core/hle/kernel/shared_memory.h"
namespace HLE::Applets {
@ -27,7 +24,7 @@ private:
/// This SharedMemory will be created when we receive the Request message.
/// It holds the framebuffer info retrieved by the application with
/// GSPGPU::ImportDisplayCaptureInfo
Kernel::KSharedMemory* framebuffer_memory;
std::shared_ptr<Kernel::SharedMemory> framebuffer_memory;
/// Parameter received by the applet on start.
std::vector<u8> startup_param;

View File

@ -10,9 +10,12 @@
#include "common/string_util.h"
#include "core/core.h"
#include "core/hle/applets/swkbd.h"
#include "core/hle/kernel/k_shared_memory.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/shared_memory.h"
#include "core/hle/result.h"
#include "core/hle/service/gsp/gsp.h"
#include "core/hle/service/hid/hid.h"
#include "core/memory.h"
namespace HLE::Applets {
@ -29,7 +32,7 @@ Result SoftwareKeyboard::ReceiveParameterImpl(Service::APT::MessageParameter con
using Kernel::MemoryPermission;
// Create a SharedMemory that directly points to this heap block.
framebuffer_memory = service_context.CreateSharedMemoryForApplet(
framebuffer_memory = system.Kernel().CreateSharedMemoryForApplet(
0, capture_info.size, MemoryPermission::ReadWrite, MemoryPermission::ReadWrite,
"SoftwareKeyboard Memory");
@ -91,7 +94,7 @@ Result SoftwareKeyboard::Start(Service::APT::MessageParameter const& parameter)
"The size of the parameter (SoftwareKeyboardConfig) is wrong");
std::memcpy(&config, parameter.buffer.data(), parameter.buffer.size());
text_memory = parameter.object->DynamicCast<Kernel::KSharedMemory*>();
text_memory = std::static_pointer_cast<Kernel::SharedMemory, Kernel::Object>(parameter.object);
DrawScreenKeyboard();

View File

@ -9,6 +9,7 @@
#include "common/common_types.h"
#include "core/frontend/applets/swkbd.h"
#include "core/hle/applets/applet.h"
#include "core/hle/kernel/shared_memory.h"
#include "core/hle/result.h"
#include "core/hle/service/apt/apt.h"
@ -194,10 +195,10 @@ private:
/// This SharedMemory will be created when we receive the LibAppJustStarted message.
/// It holds the framebuffer info retrieved by the application with
/// GSPGPU::ImportDisplayCaptureInfo
Kernel::KSharedMemory* framebuffer_memory;
std::shared_ptr<Kernel::SharedMemory> framebuffer_memory;
/// SharedMemory where the output text will be stored
Kernel::KSharedMemory* text_memory;
std::shared_ptr<Kernel::SharedMemory> text_memory;
/// Configuration of this instance of the SoftwareKeyboard, as received from the application
SoftwareKeyboardConfig config;

View File

@ -6,6 +6,8 @@
#include "common/common_types.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/thread.h"
#include "core/memory.h"
namespace IPC {

View File

@ -87,11 +87,11 @@ public:
void PushRaw(const T& value);
// TODO : ensure that translate params are added after all regular params
template <typename... T>
void PushCopyObjects(T*... pointers);
template <typename... O>
void PushCopyObjects(std::shared_ptr<O>... pointers);
template <typename... T>
void PushMoveObjects(T*... pointers);
template <typename... O>
void PushMoveObjects(std::shared_ptr<O>... pointers);
void PushStaticBuffer(std::vector<u8> buffer, u8 buffer_id);
@ -183,14 +183,14 @@ inline void RequestBuilder::PushMoveHLEHandles(H... handles) {
Push(static_cast<u32>(handles)...);
}
template <typename... T>
inline void RequestBuilder::PushCopyObjects(T*... pointers) {
PushCopyHLEHandles(context->AddOutgoingHandle(pointers)...);
template <typename... O>
inline void RequestBuilder::PushCopyObjects(std::shared_ptr<O>... pointers) {
PushCopyHLEHandles(context->AddOutgoingHandle(std::move(pointers))...);
}
template <typename... T>
inline void RequestBuilder::PushMoveObjects(T*... pointers) {
PushMoveHLEHandles(context->AddOutgoingHandle(pointers)...);
template <typename... O>
inline void RequestBuilder::PushMoveObjects(std::shared_ptr<O>... pointers) {
PushMoveHLEHandles(context->AddOutgoingHandle(std::move(pointers))...);
}
inline void RequestBuilder::PushStaticBuffer(std::vector<u8> buffer, u8 buffer_id) {
@ -241,11 +241,11 @@ public:
}
/// Equivalent to calling `PopGenericObjects<1>()[0]`.
Kernel::KAutoObject* PopGenericObject();
std::shared_ptr<Kernel::Object> PopGenericObject();
/// Equivalent to calling `std::get<0>(PopObjects<T>())`.
template <typename T>
T* PopObject();
std::shared_ptr<T> PopObject();
/**
* Pop a descriptor containing `N` handles and resolves them to Kernel::Object pointers. If a
@ -255,7 +255,7 @@ public:
* call to read 2 single-handle descriptors.
*/
template <unsigned int N>
std::array<Kernel::KAutoObject*, N> PopGenericObjects();
std::array<std::shared_ptr<Kernel::Object>, N> PopGenericObjects();
/**
* Resolves handles to Kernel::Objects as in PopGenericsObjects(), but then also casts them to
@ -263,11 +263,11 @@ public:
* not match, null is returned instead.
*/
template <typename... T>
std::tuple<T*...> PopObjects();
std::tuple<std::shared_ptr<T>...> PopObjects();
/// Convenience wrapper around PopObjects() which assigns the handles to the passed references.
template <typename... T>
void PopObjects(T**... pointers) {
void PopObjects(std::shared_ptr<T>&... pointers) {
std::tie(pointers...) = PopObjects<T...>();
}
@ -401,20 +401,20 @@ std::array<u32, N> RequestParser::PopHLEHandles() {
return handles;
}
inline Kernel::KAutoObject* RequestParser::PopGenericObject() {
inline std::shared_ptr<Kernel::Object> RequestParser::PopGenericObject() {
auto [handle] = PopHLEHandles<1>();
return context->GetIncomingHandle(handle);
}
template <typename T>
T* RequestParser::PopObject() {
return PopGenericObject()->DynamicCast<T*>();
std::shared_ptr<T> RequestParser::PopObject() {
return Kernel::DynamicObjectCast<T>(PopGenericObject());
}
template <u32 N>
inline std::array<Kernel::KAutoObject*, N> RequestParser::PopGenericObjects() {
template <unsigned int N>
inline std::array<std::shared_ptr<Kernel::Object>, N> RequestParser::PopGenericObjects() {
std::array<u32, N> handles = PopHLEHandles<N>();
std::array<Kernel::KAutoObject*, N> pointers;
std::array<std::shared_ptr<Kernel::Object>, N> pointers;
for (int i = 0; i < N; ++i) {
pointers[i] = context->GetIncomingHandle(handles[i]);
}
@ -423,14 +423,15 @@ inline std::array<Kernel::KAutoObject*, N> RequestParser::PopGenericObjects() {
namespace detail {
template <typename... T, std::size_t... I>
std::tuple<T*...> PopObjectsHelper(std::array<Kernel::KAutoObject*, sizeof...(T)>& pointers,
std::index_sequence<I...>) {
return std::make_tuple((pointers[I]->template DynamicCast<T*>())...);
std::tuple<std::shared_ptr<T>...> PopObjectsHelper(
std::array<std::shared_ptr<Kernel::Object>, sizeof...(T)>&& pointers,
std::index_sequence<I...>) {
return std::make_tuple(Kernel::DynamicObjectCast<T>(std::move(pointers[I]))...);
}
} // namespace detail
template <typename... T>
inline std::tuple<T*...> RequestParser::PopObjects() {
inline std::tuple<std::shared_ptr<T>...> RequestParser::PopObjects() {
return detail::PopObjectsHelper<T...>(PopGenericObjects<sizeof...(T)>(),
std::index_sequence_for<T...>{});
}

View File

@ -0,0 +1,220 @@
// Copyright 2014 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <algorithm>
#include <boost/serialization/base_object.hpp>
#include <boost/serialization/shared_ptr.hpp>
#include <boost/serialization/string.hpp>
#include <boost/serialization/vector.hpp>
#include "common/archives.h"
#include "common/common_types.h"
#include "common/logging/log.h"
#include "core/hle/kernel/address_arbiter.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/resource_limit.h"
#include "core/hle/kernel/thread.h"
#include "core/memory.h"
SERIALIZE_EXPORT_IMPL(Kernel::AddressArbiter)
SERIALIZE_EXPORT_IMPL(Kernel::AddressArbiter::Callback)
namespace Kernel {
void AddressArbiter::WaitThread(std::shared_ptr<Thread> thread, VAddr wait_address) {
thread->wait_address = wait_address;
thread->status = ThreadStatus::WaitArb;
waiting_threads.emplace_back(std::move(thread));
}
u64 AddressArbiter::ResumeAllThreads(VAddr address) {
// Determine which threads are waiting on this address, those should be woken up.
auto itr = std::stable_partition(waiting_threads.begin(), waiting_threads.end(),
[address](const auto& thread) {
ASSERT_MSG(thread->status == ThreadStatus::WaitArb,
"Inconsistent AddressArbiter state");
return thread->wait_address != address;
});
// Wake up all the found threads
const u64 num_threads = std::distance(itr, waiting_threads.end());
std::for_each(itr, waiting_threads.end(), [](auto& thread) { thread->ResumeFromWait(); });
// Remove the woken up threads from the wait list.
waiting_threads.erase(itr, waiting_threads.end());
return num_threads;
}
bool AddressArbiter::ResumeHighestPriorityThread(VAddr address) {
// Determine which threads are waiting on this address, those should be considered for wakeup.
auto matches_start = std::stable_partition(
waiting_threads.begin(), waiting_threads.end(), [address](const auto& thread) {
ASSERT_MSG(thread->status == ThreadStatus::WaitArb,
"Inconsistent AddressArbiter state");
return thread->wait_address != address;
});
// Iterate through threads, find highest priority thread that is waiting to be arbitrated.
// Note: The real kernel will pick the first thread in the list if more than one have the
// same highest priority value. Lower priority values mean higher priority.
auto itr = std::min_element(matches_start, waiting_threads.end(),
[](const auto& lhs, const auto& rhs) {
return lhs->current_priority < rhs->current_priority;
});
if (itr == waiting_threads.end()) {
return false;
}
auto thread = *itr;
thread->ResumeFromWait();
waiting_threads.erase(itr);
return true;
}
AddressArbiter::AddressArbiter(KernelSystem& kernel)
: Object(kernel), kernel(kernel), timeout_callback(std::make_shared<Callback>(*this)) {}
AddressArbiter::~AddressArbiter() {
if (resource_limit) {
resource_limit->Release(ResourceLimitType::AddressArbiter, 1);
}
}
std::shared_ptr<AddressArbiter> KernelSystem::CreateAddressArbiter(std::string name) {
auto address_arbiter = std::make_shared<AddressArbiter>(*this);
address_arbiter->name = std::move(name);
return address_arbiter;
}
class AddressArbiter::Callback : public WakeupCallback {
public:
explicit Callback(AddressArbiter& _parent) : parent(_parent) {}
AddressArbiter& parent;
void WakeUp(ThreadWakeupReason reason, std::shared_ptr<Thread> thread,
std::shared_ptr<WaitObject> object) override {
parent.WakeUp(reason, std::move(thread), std::move(object));
}
private:
template <class Archive>
void serialize(Archive& ar, const unsigned int) {
ar& boost::serialization::base_object<WakeupCallback>(*this);
}
friend class boost::serialization::access;
};
void AddressArbiter::WakeUp(ThreadWakeupReason reason, std::shared_ptr<Thread> thread,
std::shared_ptr<WaitObject> object) {
ASSERT(reason == ThreadWakeupReason::Timeout);
// Remove the newly-awakened thread from the Arbiter's waiting list.
waiting_threads.erase(std::remove(waiting_threads.begin(), waiting_threads.end(), thread),
waiting_threads.end());
};
Result AddressArbiter::ArbitrateAddress(std::shared_ptr<Thread> thread, ArbitrationType type,
VAddr address, s32 value, u64 nanoseconds) {
switch (type) {
// Signal thread(s) waiting for arbitrate address...
case ArbitrationType::Signal: {
u64 num_threads{};
// Negative value means resume all threads
if (value < 0) {
num_threads = ResumeAllThreads(address);
} else {
// Resume first N threads
for (s32 i = 0; i < value; i++) {
num_threads += ResumeHighestPriorityThread(address);
}
}
// Prevents lag from low priority threads that spam svcArbitrateAddress and wake no threads
// The tick count is taken directly from official HOS kernel. The priority value is one less
// than official kernel as the affected FMV threads dont meet the priority threshold of 50.
// TODO: Revisit this when scheduler is rewritten and adjust if there isn't a problem there.
if (num_threads == 0 && thread->current_priority >= 49) {
kernel.current_cpu->GetTimer().AddTicks(1614u);
}
break;
}
// Wait current thread (acquire the arbiter)...
case ArbitrationType::WaitIfLessThan:
if ((s32)kernel.memory.Read32(address) < value) {
WaitThread(std::move(thread), address);
}
break;
case ArbitrationType::WaitIfLessThanWithTimeout:
if ((s32)kernel.memory.Read32(address) < value) {
thread->wakeup_callback = timeout_callback;
thread->WakeAfterDelay(nanoseconds);
WaitThread(std::move(thread), address);
}
break;
case ArbitrationType::DecrementAndWaitIfLessThan: {
s32 memory_value = kernel.memory.Read32(address);
if (memory_value < value) {
// Only change the memory value if the thread should wait
kernel.memory.Write32(address, (s32)memory_value - 1);
WaitThread(std::move(thread), address);
}
break;
}
case ArbitrationType::DecrementAndWaitIfLessThanWithTimeout: {
s32 memory_value = kernel.memory.Read32(address);
if (memory_value < value) {
// Only change the memory value if the thread should wait
kernel.memory.Write32(address, (s32)memory_value - 1);
thread->wakeup_callback = timeout_callback;
thread->WakeAfterDelay(nanoseconds);
WaitThread(std::move(thread), address);
}
break;
}
default:
LOG_ERROR(Kernel, "unknown type={}", type);
return ResultInvalidEnumValueFnd;
}
// The calls that use a timeout seem to always return a Timeout error even if they did not put
// the thread to sleep
if (type == ArbitrationType::WaitIfLessThanWithTimeout ||
type == ArbitrationType::DecrementAndWaitIfLessThanWithTimeout) {
return ResultTimeout;
}
return ResultSuccess;
}
template <class Archive>
void AddressArbiter::serialize(Archive& ar, const unsigned int) {
ar& boost::serialization::base_object<Object>(*this);
ar& name;
ar& waiting_threads;
ar& timeout_callback;
ar& resource_limit;
}
SERIALIZE_IMPL(AddressArbiter)
} // namespace Kernel
namespace boost::serialization {
template <class Archive>
void save_construct_data(Archive& ar, const Kernel::AddressArbiter::Callback* t,
const unsigned int) {
ar << Kernel::SharedFrom(&t->parent);
}
template <class Archive>
void load_construct_data(Archive& ar, Kernel::AddressArbiter::Callback* t, const unsigned int) {
std::shared_ptr<Kernel::AddressArbiter> parent;
ar >> parent;
::new (t) Kernel::AddressArbiter::Callback(*parent);
}
} // namespace boost::serialization

View File

@ -0,0 +1,88 @@
// Copyright 2014 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <memory>
#include <vector>
#include <boost/serialization/export.hpp>
#include "common/common_types.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/thread.h"
#include "core/hle/result.h"
// Address arbiters are an underlying kernel synchronization object that can be created/used via
// supervisor calls (SVCs). They function as sort of a global lock. Typically, games/other CTR
// applications use them as an underlying mechanism to implement thread-safe barriers, events, and
// semaphores.
namespace Kernel {
class Thread;
class ResourceLimit;
enum class ArbitrationType : u32 {
Signal,
WaitIfLessThan,
DecrementAndWaitIfLessThan,
WaitIfLessThanWithTimeout,
DecrementAndWaitIfLessThanWithTimeout,
};
class AddressArbiter final : public Object, public WakeupCallback {
public:
explicit AddressArbiter(KernelSystem& kernel);
~AddressArbiter() override;
std::string GetTypeName() const override {
return "Arbiter";
}
std::string GetName() const override {
return name;
}
static constexpr HandleType HANDLE_TYPE = HandleType::AddressArbiter;
HandleType GetHandleType() const override {
return HANDLE_TYPE;
}
std::shared_ptr<ResourceLimit> resource_limit;
std::string name; ///< Name of address arbiter object (optional)
Result ArbitrateAddress(std::shared_ptr<Thread> thread, ArbitrationType type, VAddr address,
s32 value, u64 nanoseconds);
class Callback;
private:
KernelSystem& kernel;
/// Puts the thread to wait on the specified arbitration address under this address arbiter.
void WaitThread(std::shared_ptr<Thread> thread, VAddr wait_address);
/// Resume all threads found to be waiting on the address under this address arbiter
u64 ResumeAllThreads(VAddr address);
/// Resume one thread found to be waiting on the address under this address arbiter and return
/// the resumed thread.
bool ResumeHighestPriorityThread(VAddr address);
/// Threads waiting for the address arbiter to be signaled.
std::vector<std::shared_ptr<Thread>> waiting_threads;
std::shared_ptr<Callback> timeout_callback;
void WakeUp(ThreadWakeupReason reason, std::shared_ptr<Thread> thread,
std::shared_ptr<WaitObject> object) override;
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const unsigned int);
};
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::AddressArbiter)
BOOST_CLASS_EXPORT_KEY(Kernel::AddressArbiter::Callback)
CONSTRUCT_KERNEL_OBJECT(Kernel::AddressArbiter)

View File

@ -0,0 +1,63 @@
// Copyright 2016 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <boost/serialization/shared_ptr.hpp>
#include <boost/serialization/string.hpp>
#include "common/archives.h"
#include "common/assert.h"
#include "core/global.h"
#include "core/hle/kernel/client_port.h"
#include "core/hle/kernel/client_session.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/hle_ipc.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/server_port.h"
#include "core/hle/kernel/server_session.h"
SERIALIZE_EXPORT_IMPL(Kernel::ClientPort)
namespace Kernel {
ClientPort::ClientPort(KernelSystem& kernel) : Object(kernel), kernel(kernel) {}
ClientPort::~ClientPort() = default;
Result ClientPort::Connect(std::shared_ptr<ClientSession>* out_client_session) {
// Note: Threads do not wait for the server endpoint to call
// AcceptSession before returning from this call.
R_UNLESS(active_sessions < max_sessions, ResultMaxConnectionsReached);
active_sessions++;
// Create a new session pair, let the created sessions inherit the parent port's HLE handler.
auto [server, client] = kernel.CreateSessionPair(server_port->GetName(), SharedFrom(this));
if (server_port->hle_handler) {
server_port->hle_handler->ClientConnected(server);
} else {
server_port->pending_sessions.push_back(server);
}
// Wake the threads waiting on the ServerPort
server_port->WakeupAllWaitingThreads();
*out_client_session = client;
return ResultSuccess;
}
void ClientPort::ConnectionClosed() {
ASSERT(active_sessions > 0);
--active_sessions;
}
template <class Archive>
void ClientPort::serialize(Archive& ar, const unsigned int) {
ar& boost::serialization::base_object<Object>(*this);
ar& server_port;
ar& max_sessions;
ar& active_sessions;
ar& name;
}
SERIALIZE_IMPL(ClientPort)
} // namespace Kernel

View File

@ -0,0 +1,73 @@
// Copyright 2016 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <memory>
#include <string>
#include <boost/serialization/export.hpp>
#include "common/common_types.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/server_port.h"
#include "core/hle/result.h"
namespace Kernel {
class ClientSession;
class ClientPort final : public Object {
public:
explicit ClientPort(KernelSystem& kernel);
~ClientPort() override;
friend class ServerPort;
std::string GetTypeName() const override {
return "ClientPort";
}
std::string GetName() const override {
return name;
}
static constexpr HandleType HANDLE_TYPE = HandleType::ClientPort;
HandleType GetHandleType() const override {
return HANDLE_TYPE;
}
std::shared_ptr<ServerPort> GetServerPort() const {
return server_port;
}
/**
* Creates a new Session pair, adds the created ServerSession to the associated ServerPort's
* list of pending sessions, and signals the ServerPort, causing any threads
* waiting on it to awake.
* @returns ClientSession The client endpoint of the created Session pair, or error code.
*/
Result Connect(std::shared_ptr<ClientSession>* out_client_session);
/**
* Signifies that a previously active connection has been closed,
* decreasing the total number of active connections to this port.
*/
void ConnectionClosed();
private:
KernelSystem& kernel;
std::shared_ptr<ServerPort> server_port; ///< ServerPort associated with this client port.
u32 max_sessions = 0; ///< Maximum number of simultaneous sessions the port can have
u32 active_sessions = 0; ///< Number of currently open sessions to this port
std::string name; ///< Name of client port (optional)
friend class KernelSystem;
private:
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const unsigned int);
};
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::ClientPort)
CONSTRUCT_KERNEL_OBJECT(Kernel::ClientPort)

View File

@ -0,0 +1,67 @@
// Copyright 2016 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <boost/serialization/base_object.hpp>
#include <boost/serialization/shared_ptr.hpp>
#include <boost/serialization/string.hpp>
#include "common/archives.h"
#include "common/assert.h"
#include "core/hle/kernel/client_session.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/hle_ipc.h"
#include "core/hle/kernel/server_session.h"
#include "core/hle/kernel/session.h"
#include "core/hle/kernel/thread.h"
SERIALIZE_EXPORT_IMPL(Kernel::ClientSession)
namespace Kernel {
ClientSession::ClientSession(KernelSystem& kernel) : Object(kernel) {}
ClientSession::~ClientSession() {
// This destructor will be called automatically when the last ClientSession handle is closed by
// the emulated application.
// Local references to ServerSession and SessionRequestHandler are necessary to guarantee they
// will be kept alive until after ClientDisconnected() returns.
std::shared_ptr<ServerSession> server = SharedFrom(parent->server);
if (server) {
std::shared_ptr<SessionRequestHandler> hle_handler = server->hle_handler;
if (hle_handler)
hle_handler->ClientDisconnected(server);
// Clean up the list of client threads with pending requests, they are unneeded now that the
// client endpoint is closed.
server->pending_requesting_threads.clear();
server->currently_handling = nullptr;
}
parent->client = nullptr;
if (server) {
// Notify any threads waiting on the ServerSession that the endpoint has been closed. Note
// that this call has to happen after `Session::client` has been set to nullptr to let the
// ServerSession know that the client endpoint has been closed.
server->WakeupAllWaitingThreads();
}
}
Result ClientSession::SendSyncRequest(std::shared_ptr<Thread> thread) {
// Keep ServerSession alive until we're done working with it.
std::shared_ptr<ServerSession> server = SharedFrom(parent->server);
R_UNLESS(server, ResultSessionClosed);
// Signal the server session that new data is available
return server->HandleSyncRequest(std::move(thread));
}
template <class Archive>
void ClientSession::serialize(Archive& ar, const unsigned int) {
ar& boost::serialization::base_object<Object>(*this);
ar& name;
ar& parent;
}
SERIALIZE_IMPL(ClientSession)
} // namespace Kernel

View File

@ -0,0 +1,60 @@
// Copyright 2016 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <memory>
#include <string>
#include <boost/serialization/export.hpp>
#include "common/common_types.h"
#include "core/hle/kernel/object.h"
#include "core/hle/result.h"
namespace Kernel {
class Session;
class Thread;
class ClientSession final : public Object {
public:
explicit ClientSession(KernelSystem& kernel);
~ClientSession() override;
friend class KernelSystem;
std::string GetTypeName() const override {
return "ClientSession";
}
std::string GetName() const override {
return name;
}
static constexpr HandleType HANDLE_TYPE = HandleType::ClientSession;
HandleType GetHandleType() const override {
return HANDLE_TYPE;
}
/**
* Sends an SyncRequest from the current emulated thread.
* @param thread Thread that initiated the request.
* @return Result of the operation.
*/
Result SendSyncRequest(std::shared_ptr<Thread> thread);
std::string name; ///< Name of client port (optional)
/// The parent session, which links to the server endpoint.
std::shared_ptr<Session> parent;
private:
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const unsigned int);
};
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::ClientSession)
CONSTRUCT_KERNEL_OBJECT(Kernel::ClientSession)

View File

@ -16,7 +16,6 @@ enum {
OutOfEvents = 15,
OutOfTimers = 16,
OutOfHandles = 19,
ProcessNotFound = 24,
SessionClosedByRemote = 26,
PortNameTooLong = 30,
WrongLockingThread = 31,
@ -110,8 +109,5 @@ constexpr Result ResultTimeout(ErrorDescription::Timeout, ErrorModule::OS,
constexpr Result ResultNoPendingSessions(ErrCodes::NoPendingSessions, ErrorModule::OS,
ErrorSummary::WouldBlock,
ErrorLevel::Permanent); // 0xD8401823
constexpr Result ResultProcessNotFound(ErrCodes::ProcessNotFound, ErrorModule::OS,
ErrorSummary::WrongArgument,
ErrorLevel::Permanent); // 0xD9001818
} // namespace Kernel

View File

@ -0,0 +1,73 @@
// Copyright 2014 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <boost/serialization/base_object.hpp>
#include <boost/serialization/string.hpp>
#include "common/archives.h"
#include "common/assert.h"
#include "core/hle/kernel/event.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/resource_limit.h"
#include "core/hle/kernel/thread.h"
SERIALIZE_EXPORT_IMPL(Kernel::Event)
namespace Kernel {
Event::Event(KernelSystem& kernel) : WaitObject(kernel) {}
Event::~Event() {
if (resource_limit) {
resource_limit->Release(ResourceLimitType::Event, 1);
}
}
std::shared_ptr<Event> KernelSystem::CreateEvent(ResetType reset_type, std::string name) {
auto event = std::make_shared<Event>(*this);
event->signaled = false;
event->reset_type = reset_type;
event->name = std::move(name);
return event;
}
bool Event::ShouldWait(const Thread* thread) const {
return !signaled;
}
void Event::Acquire(Thread* thread) {
ASSERT_MSG(!ShouldWait(thread), "object unavailable!");
if (reset_type == ResetType::OneShot) {
signaled = false;
}
}
void Event::Signal() {
signaled = true;
WakeupAllWaitingThreads();
}
void Event::Clear() {
signaled = false;
}
void Event::WakeupAllWaitingThreads() {
WaitObject::WakeupAllWaitingThreads();
if (reset_type == ResetType::Pulse) {
signaled = false;
}
}
template <class Archive>
void Event::serialize(Archive& ar, const unsigned int) {
ar& boost::serialization::base_object<WaitObject>(*this);
ar& reset_type;
ar& signaled;
ar& name;
ar& resource_limit;
}
SERIALIZE_IMPL(Event)
} // namespace Kernel

View File

@ -0,0 +1,64 @@
// Copyright 2014 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <boost/serialization/export.hpp>
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/resource_limit.h"
#include "core/hle/kernel/wait_object.h"
namespace Kernel {
class Event final : public WaitObject {
public:
explicit Event(KernelSystem& kernel);
~Event() override;
std::string GetTypeName() const override {
return "Event";
}
std::string GetName() const override {
return name;
}
void SetName(const std::string& name_) {
name = name_;
}
static constexpr HandleType HANDLE_TYPE = HandleType::Event;
HandleType GetHandleType() const override {
return HANDLE_TYPE;
}
ResetType GetResetType() const {
return reset_type;
}
bool ShouldWait(const Thread* thread) const override;
void Acquire(Thread* thread) override;
void WakeupAllWaitingThreads() override;
void Signal();
void Clear();
std::shared_ptr<ResourceLimit> resource_limit;
private:
ResetType reset_type; ///< Current ResetType
bool signaled; ///< Whether the event has already been signaled
std::string name; ///< Name of event (optional)
friend class KernelSystem;
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const unsigned int);
};
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::Event)
CONSTRUCT_KERNEL_OBJECT(Kernel::Event)

View File

@ -0,0 +1,111 @@
// Copyright 2014 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <utility>
#include <boost/serialization/array.hpp>
#include <boost/serialization/shared_ptr.hpp>
#include "common/archives.h"
#include "common/assert.h"
#include "common/logging/log.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/handle_table.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/thread.h"
SERIALIZE_EXPORT_IMPL(Kernel::HandleTable)
namespace Kernel {
namespace {
constexpr u16 GetSlot(Handle handle) {
return handle >> 15;
}
constexpr u16 GetGeneration(Handle handle) {
return handle & 0x7FFF;
}
} // Anonymous namespace
HandleTable::HandleTable(KernelSystem& kernel) : kernel(kernel) {
next_generation = 1;
Clear();
}
HandleTable::~HandleTable() = default;
Result HandleTable::Create(Handle* out_handle, std::shared_ptr<Object> obj) {
DEBUG_ASSERT(obj != nullptr);
u16 slot = next_free_slot;
R_UNLESS(slot < generations.size(), ResultOutOfHandles);
next_free_slot = generations[slot];
u16 generation = next_generation++;
// Overflow count so it fits in the 15 bits dedicated to the generation in the handle.
// CTR-OS doesn't use generation 0, so skip straight to 1.
if (next_generation >= (1 << 15)) {
next_generation = 1;
}
generations[slot] = generation;
objects[slot] = std::move(obj);
*out_handle = generation | (slot << 15);
return ResultSuccess;
}
Result HandleTable::Duplicate(Handle* out_handle, Handle handle) {
std::shared_ptr<Object> object = GetGeneric(handle);
R_UNLESS(object, ResultInvalidHandle);
return Create(out_handle, std::move(object));
}
Result HandleTable::Close(Handle handle) {
R_UNLESS(IsValid(handle), ResultInvalidHandle);
const u16 slot = GetSlot(handle);
objects[slot] = nullptr;
generations[slot] = next_free_slot;
next_free_slot = slot;
return ResultSuccess;
}
bool HandleTable::IsValid(Handle handle) const {
const u16 slot = GetSlot(handle);
const u16 generation = GetGeneration(handle);
return slot < MAX_COUNT && objects[slot] != nullptr && generations[slot] == generation;
}
std::shared_ptr<Object> HandleTable::GetGeneric(Handle handle) const {
if (handle == CurrentThread) {
return SharedFrom(kernel.GetCurrentThreadManager().GetCurrentThread());
} else if (handle == CurrentProcess) {
return kernel.GetCurrentProcess();
}
if (!IsValid(handle)) {
return nullptr;
}
return objects[GetSlot(handle)];
}
void HandleTable::Clear() {
for (u16 i = 0; i < MAX_COUNT; ++i) {
generations[i] = i + 1;
objects[i] = nullptr;
}
next_free_slot = 0;
}
template <class Archive>
void HandleTable::serialize(Archive& ar, const unsigned int) {
ar& objects;
ar& generations;
ar& next_generation;
ar& next_free_slot;
}
SERIALIZE_IMPL(HandleTable)
} // namespace Kernel

View File

@ -0,0 +1,129 @@
// Copyright 2014 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <array>
#include <cstddef>
#include <memory>
#include <boost/serialization/export.hpp>
#include "common/common_types.h"
#include "core/hle/kernel/object.h"
#include "core/hle/result.h"
namespace Kernel {
enum KernelHandle : Handle {
CurrentThread = 0xFFFF8000,
CurrentProcess = 0xFFFF8001,
};
/**
* This class allows the creation of Handles, which are references to objects that can be tested
* for validity and looked up. Here they are used to pass references to kernel objects to/from the
* emulated process. it has been designed so that it follows the same handle format and has
* approximately the same restrictions as the handle manager in the CTR-OS.
*
* Handles contain two sub-fields: a slot index (bits 31:15) and a generation value (bits 14:0).
* The slot index is used to index into the arrays in this class to access the data corresponding
* to the Handle.
*
* To prevent accidental use of a freed Handle whose slot has already been reused, a global counter
* is kept and incremented every time a Handle is created. This is the Handle's "generation". The
* value of the counter is stored into the Handle as well as in the handle table (in the
* "generations" array). When looking up a handle, the Handle's generation must match with the
* value stored on the class, otherwise the Handle is considered invalid.
*
* To find free slots when allocating a Handle without needing to scan the entire object array, the
* generations field of unallocated slots is re-purposed as a linked list of indices to free slots.
* When a Handle is created, an index is popped off the list and used for the new Handle. When it
* is destroyed, it is again pushed onto the list to be re-used by the next allocation. It is
* likely that this allocation strategy differs from the one used in CTR-OS, but this hasn't been
* verified and isn't likely to cause any problems.
*/
class HandleTable final : NonCopyable {
public:
explicit HandleTable(KernelSystem& kernel);
~HandleTable();
/**
* Allocates a handle for the given object.
* @return The created Handle or one of the following errors:
* - `ResultOutOfHandles`: the maximum number of handles has been exceeded.
*/
Result Create(Handle* out_handle, std::shared_ptr<Object> obj);
/**
* Returns a new handle that points to the same object as the passed in handle.
* @return The duplicated Handle or one of the following errors:
* - `ResultInvalidHandle`: an invalid handle was passed in.
* - Any errors returned by `Create()`.
*/
Result Duplicate(Handle* out_handle, Handle handle);
/**
* Closes a handle, removing it from the table and decreasing the object's ref-count.
* @return `ResultSuccess` or one of the following errors:
* - `ResultInvalidHandle`: an invalid handle was passed in.
*/
Result Close(Handle handle);
/// Checks if a handle is valid and points to an existing object.
bool IsValid(Handle handle) const;
/**
* Looks up a handle.
* @return Pointer to the looked-up object, or `nullptr` if the handle is not valid.
*/
std::shared_ptr<Object> GetGeneric(Handle handle) const;
/**
* Looks up a handle while verifying its type.
* @return Pointer to the looked-up object, or `nullptr` if the handle is not valid or its
* type differs from the requested one.
*/
template <class T>
std::shared_ptr<T> Get(Handle handle) const {
return DynamicObjectCast<T>(GetGeneric(handle));
}
/// Closes all handles held in this table.
void Clear();
private:
/**
* This is the maximum limit of handles allowed per process in CTR-OS. It can be further
* reduced by ExHeader values, but this is not emulated here.
*/
static const std::size_t MAX_COUNT = 4096;
/// Stores the Object referenced by the handle or null if the slot is empty.
std::array<std::shared_ptr<Object>, MAX_COUNT> objects;
/**
* The value of `next_generation` when the handle was created, used to check for validity. For
* empty slots, contains the index of the next free slot in the list.
*/
std::array<u16, MAX_COUNT> generations;
/**
* Global counter of the number of created handles. Stored in `generations` when a handle is
* created, and wraps around to 1 when it hits 0x8000.
*/
u16 next_generation;
/// Head of the free slots linked list.
u16 next_free_slot;
KernelSystem& kernel;
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const unsigned int);
};
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::HandleTable)
CONSTRUCT_KERNEL_OBJECT(Kernel::HandleTable)

View File

@ -12,12 +12,12 @@
#include "common/assert.h"
#include "common/common_types.h"
#include "core/core.h"
#include "core/hle/kernel/event.h"
#include "core/hle/kernel/handle_table.h"
#include "core/hle/kernel/hle_ipc.h"
#include "core/hle/kernel/ipc_debugger/recorder.h"
#include "core/hle/kernel/k_event.h"
#include "core/hle/kernel/k_handle_table.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/process.h"
SERIALIZE_EXPORT_IMPL(Kernel::SessionRequestHandler)
SERIALIZE_EXPORT_IMPL(Kernel::SessionRequestHandler::SessionDataBase)
@ -33,13 +33,15 @@ public:
ThreadCallback(std::shared_ptr<HLERequestContext> context_,
std::shared_ptr<HLERequestContext::WakeupCallback> callback_)
: callback(std::move(callback_)), context(std::move(context_)) {}
void WakeUp(ThreadWakeupReason reason, KThread* thread, KSynchronizationObject* object) {
ASSERT(thread->m_status == ThreadStatus::WaitHleEvent);
void WakeUp(ThreadWakeupReason reason, std::shared_ptr<Thread> thread,
std::shared_ptr<WaitObject> object) {
ASSERT(thread->status == ThreadStatus::WaitHleEvent);
if (callback) {
callback->WakeUp(thread, *context, reason);
}
Process* process = thread->GetOwner();
auto process = thread->owner_process.lock();
ASSERT(process);
// We must copy the entire command buffer *plus* the entire static buffers area, since
// the translation might need to read from it in order to retrieve the StaticBuffer
@ -68,16 +70,16 @@ private:
friend class boost::serialization::access;
};
SessionRequestHandler::SessionInfo::SessionInfo(KServerSession* session_,
SessionRequestHandler::SessionInfo::SessionInfo(std::shared_ptr<ServerSession> session,
std::unique_ptr<SessionDataBase> data)
: session(session_), data(std::move(data)) {}
: session(std::move(session)), data(std::move(data)) {}
void SessionRequestHandler::ClientConnected(KServerSession* server_session) {
void SessionRequestHandler::ClientConnected(std::shared_ptr<ServerSession> server_session) {
server_session->SetHleHandler(shared_from_this());
connected_sessions.emplace_back(server_session, MakeSessionData());
connected_sessions.emplace_back(std::move(server_session), MakeSessionData());
}
void SessionRequestHandler::ClientDisconnected(KServerSession* server_session) {
void SessionRequestHandler::ClientDisconnected(std::shared_ptr<ServerSession> server_session) {
server_session->SetHleHandler(nullptr);
connected_sessions.erase(
std::remove_if(connected_sessions.begin(), connected_sessions.end(),
@ -102,46 +104,40 @@ void SessionRequestHandler::SessionInfo::serialize(Archive& ar, const unsigned i
}
SERIALIZE_IMPL(SessionRequestHandler::SessionInfo)
KEvent* HLERequestContext::SleepClientThread(const std::string& reason,
std::chrono::nanoseconds timeout,
std::shared_ptr<WakeupCallback> callback) {
std::shared_ptr<Event> HLERequestContext::SleepClientThread(
const std::string& reason, std::chrono::nanoseconds timeout,
std::shared_ptr<WakeupCallback> callback) {
// Put the client thread to sleep until the wait event is signaled or the timeout expires.
thread->m_wakeup_callback = std::make_shared<ThreadCallback>(shared_from_this(), callback);
thread->wakeup_callback = std::make_shared<ThreadCallback>(shared_from_this(), callback);
// Create pause event.
auto* event = KEvent::Create(kernel);
event->Initialize(nullptr, ResetType::OneShot);
event->SetName("HLE Pause Event: " + reason);
KEvent::Register(kernel, event);
// Add the event to the list of objects the thread is waiting for.
thread->m_status = ThreadStatus::WaitHleEvent;
thread->m_wait_objects = {event};
auto event = kernel.CreateEvent(Kernel::ResetType::OneShot, "HLE Pause Event: " + reason);
thread->status = ThreadStatus::WaitHleEvent;
thread->wait_objects = {event};
event->AddWaitingThread(thread);
if (timeout.count() > 0) {
if (timeout.count() > 0)
thread->WakeAfterDelay(timeout.count());
}
return event;
}
HLERequestContext::HLERequestContext() : kernel(Core::Global<KernelSystem>()) {}
HLERequestContext::HLERequestContext(KernelSystem& kernel, KServerSession* session, KThread* thread)
: kernel(kernel), session(session), thread(thread) {
HLERequestContext::HLERequestContext(KernelSystem& kernel, std::shared_ptr<ServerSession> session,
std::shared_ptr<Thread> thread)
: kernel(kernel), session(std::move(session)), thread(thread) {
cmd_buf[0] = 0;
}
HLERequestContext::~HLERequestContext() = default;
KAutoObject* HLERequestContext::GetIncomingHandle(u32 id_from_cmdbuf) const {
std::shared_ptr<Object> HLERequestContext::GetIncomingHandle(u32 id_from_cmdbuf) const {
ASSERT(id_from_cmdbuf < request_handles.size());
return request_handles[id_from_cmdbuf];
}
u32 HLERequestContext::AddOutgoingHandle(KAutoObject* object) {
request_handles.push_back(object);
u32 HLERequestContext::AddOutgoingHandle(std::shared_ptr<Object> object) {
request_handles.push_back(std::move(object));
return static_cast<u32>(request_handles.size() - 1);
}
@ -158,7 +154,8 @@ void HLERequestContext::AddStaticBuffer(u8 buffer_id, std::vector<u8> data) {
}
Result HLERequestContext::PopulateFromIncomingCommandBuffer(const u32_le* src_cmdbuf,
Process* src_process) {
std::shared_ptr<Process> src_process_) {
auto& src_process = *src_process_;
IPC::Header header{src_cmdbuf[0]};
std::size_t untranslated_size = 1u + header.normal_params_size;
@ -182,32 +179,25 @@ Result HLERequestContext::PopulateFromIncomingCommandBuffer(const u32_le* src_cm
switch (IPC::GetDescriptorType(descriptor)) {
case IPC::DescriptorType::CopyHandle:
case IPC::DescriptorType::MoveHandle: {
const u32 num_handles = IPC::HandleNumberFromDesc(descriptor);
auto& src_handle_table = src_process->handle_table;
u32 num_handles = IPC::HandleNumberFromDesc(descriptor);
ASSERT(i + num_handles <= command_size); // TODO(yuriks): Return error
for (u32 j = 0; j < num_handles; ++j) {
const Handle handle = src_cmdbuf[i];
if (!handle) {
cmd_buf[i++] = AddOutgoingHandle(nullptr);
continue;
Handle handle = src_cmdbuf[i];
std::shared_ptr<Object> object = nullptr;
if (handle != 0) {
object = src_process.handle_table.GetGeneric(handle);
ASSERT(object != nullptr); // TODO(yuriks): Return error
if (descriptor == IPC::DescriptorType::MoveHandle) {
src_process.handle_table.Close(handle);
}
}
// Get object from the handle table.
KScopedAutoObject object =
src_handle_table.GetObjectForIpcWithoutPseudoHandle(handle);
ASSERT(object.IsNotNull());
// If we are moving, remove the old handle.
if (descriptor == IPC::DescriptorType::MoveHandle) {
src_handle_table.Remove(handle);
}
cmd_buf[i++] = AddOutgoingHandle(object.GetPointerUnsafe());
cmd_buf[i++] = AddOutgoingHandle(std::move(object));
}
break;
}
case IPC::DescriptorType::CallingPid: {
cmd_buf[i++] = src_process->process_id;
cmd_buf[i++] = src_process.process_id;
break;
}
case IPC::DescriptorType::StaticBuffer: {
@ -216,7 +206,7 @@ Result HLERequestContext::PopulateFromIncomingCommandBuffer(const u32_le* src_cm
// Copy the input buffer into our own vector and store it.
std::vector<u8> data(buffer_info.size);
kernel.memory.ReadBlock(*src_process, source_address, data.data(), data.size());
kernel.memory.ReadBlock(src_process, source_address, data.data(), data.size());
AddStaticBuffer(buffer_info.buffer_id, std::move(data));
cmd_buf[i++] = source_address;
@ -224,7 +214,7 @@ Result HLERequestContext::PopulateFromIncomingCommandBuffer(const u32_le* src_cm
}
case IPC::DescriptorType::MappedBuffer: {
u32 next_id = static_cast<u32>(request_mapped_buffers.size());
request_mapped_buffers.emplace_back(kernel.memory, src_process, descriptor,
request_mapped_buffers.emplace_back(kernel.memory, src_process_, descriptor,
src_cmdbuf[i], next_id);
cmd_buf[i++] = next_id;
break;
@ -269,13 +259,14 @@ Result HLERequestContext::WriteToOutgoingCommandBuffer(u32_le* dst_cmdbuf,
case IPC::DescriptorType::CopyHandle:
case IPC::DescriptorType::MoveHandle: {
// HLE services don't use handles, so we treat both CopyHandle and MoveHandle equally
const u32 num_handles = IPC::HandleNumberFromDesc(descriptor);
u32 num_handles = IPC::HandleNumberFromDesc(descriptor);
ASSERT(i + num_handles <= command_size);
for (u32 j = 0; j < num_handles; ++j) {
KAutoObject* object = GetIncomingHandle(cmd_buf[i]);
std::shared_ptr<Object> object = GetIncomingHandle(cmd_buf[i]);
Handle handle = 0;
if (object != nullptr) {
dst_process.handle_table.Add(std::addressof(handle), object);
// TODO(yuriks): Figure out the proper error handling for if this fails
R_ASSERT(dst_process.handle_table.Create(std::addressof(handle), object));
}
dst_cmdbuf[i++] = handle;
}
@ -336,7 +327,7 @@ void HLERequestContext::serialize(Archive& ar, const unsigned int) {
ar& cmd_buf;
ar& session;
ar& thread;
// ar& request_handles;
ar& request_handles;
ar& static_buffers;
ar& request_mapped_buffers;
}
@ -344,8 +335,8 @@ SERIALIZE_IMPL(HLERequestContext)
MappedBuffer::MappedBuffer() : memory(&Core::Global<Core::System>().Memory()) {}
MappedBuffer::MappedBuffer(Memory::MemorySystem& memory, Process* process, u32 descriptor,
VAddr address, u32 id)
MappedBuffer::MappedBuffer(Memory::MemorySystem& memory, std::shared_ptr<Process> process,
u32 descriptor, VAddr address, u32 id)
: memory(&memory), id(id), address(address), process(std::move(process)) {
IPC::MappedBufferDescInfo desc{descriptor};
size = desc.size;

View File

@ -17,8 +17,8 @@
#include "common/serialization/boost_small_vector.hpp"
#include "common/swap.h"
#include "core/hle/ipc.h"
#include "core/hle/kernel/k_server_session.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/server_session.h"
namespace Service {
class ServiceFrameworkBase;
@ -32,8 +32,8 @@ namespace Kernel {
class HandleTable;
class Process;
class KThread;
class KEvent;
class Thread;
class Event;
class HLERequestContext;
class KernelSystem;
@ -58,14 +58,14 @@ public:
* associated ServerSession alive for the duration of the connection.
* @param server_session Owning pointer to the ServerSession associated with the connection.
*/
virtual void ClientConnected(KServerSession* server_session);
virtual void ClientConnected(std::shared_ptr<ServerSession> server_session);
/**
* Signals that a client has just disconnected from this HLE handler and releases the
* associated ServerSession.
* @param server_session ServerSession associated with the connection.
*/
virtual void ClientDisconnected(KServerSession* server_session);
virtual void ClientDisconnected(std::shared_ptr<ServerSession> server_session);
/// Empty placeholder structure for services with no per-session data. The session data classes
/// in each service must inherit from this.
@ -79,9 +79,9 @@ public:
};
struct SessionInfo {
SessionInfo(KServerSession* session, std::unique_ptr<SessionDataBase> data);
SessionInfo(std::shared_ptr<ServerSession> session, std::unique_ptr<SessionDataBase> data);
KServerSession* session;
std::shared_ptr<ServerSession> session;
std::unique_ptr<SessionDataBase> data;
private:
@ -97,7 +97,7 @@ protected:
/// Returns the session data associated with the server session.
template <typename T>
T* GetSessionData(KServerSession* session) {
T* GetSessionData(std::shared_ptr<ServerSession> session) {
static_assert(std::is_base_of<SessionDataBase, T>(),
"T is not a subclass of SessionDataBase");
auto itr = std::find_if(connected_sessions.begin(), connected_sessions.end(),
@ -120,8 +120,8 @@ private:
class MappedBuffer {
public:
MappedBuffer(Memory::MemorySystem& memory, Process* process, u32 descriptor, VAddr address,
u32 id);
MappedBuffer(Memory::MemorySystem& memory, std::shared_ptr<Process> process, u32 descriptor,
VAddr address, u32 id);
// interface for service
void Read(void* dest_buffer, std::size_t offset, std::size_t size);
@ -144,7 +144,7 @@ private:
Memory::MemorySystem* memory;
u32 id;
VAddr address;
Process* process;
std::shared_ptr<Process> process;
u32 size;
IPC::MappedBufferPermissions perms;
@ -192,7 +192,8 @@ private:
*/
class HLERequestContext : public std::enable_shared_from_this<HLERequestContext> {
public:
explicit HLERequestContext(KernelSystem& kernel, KServerSession* session, KThread* thread);
HLERequestContext(KernelSystem& kernel, std::shared_ptr<ServerSession> session,
std::shared_ptr<Thread> thread);
~HLERequestContext();
/// Returns a pointer to the IPC command buffer for this request.
@ -209,21 +210,21 @@ public:
* Returns the session through which this request was made. This can be used as a map key to
* access per-client data on services.
*/
KServerSession* Session() const {
std::shared_ptr<ServerSession> Session() const {
return session;
}
/**
* Returns the client thread that made the service request.
*/
KThread* ClientThread() const {
std::shared_ptr<Thread> ClientThread() const {
return thread;
}
class WakeupCallback {
public:
virtual ~WakeupCallback() = default;
virtual void WakeUp(KThread* thread, HLERequestContext& context,
virtual void WakeUp(std::shared_ptr<Thread> thread, HLERequestContext& context,
ThreadWakeupReason reason) = 0;
private:
@ -243,8 +244,9 @@ public:
* was called.
* @returns Event that when signaled will resume the thread and call the callback function.
*/
KEvent* SleepClientThread(const std::string& reason, std::chrono::nanoseconds timeout,
std::shared_ptr<WakeupCallback> callback);
std::shared_ptr<Event> SleepClientThread(const std::string& reason,
std::chrono::nanoseconds timeout,
std::shared_ptr<WakeupCallback> callback);
private:
template <typename ResultFunctor>
@ -255,7 +257,7 @@ private:
future = std::move(fut);
}
void WakeUp(Kernel::KThread* thread, Kernel::HLERequestContext& ctx,
void WakeUp(std::shared_ptr<Kernel::Thread> thread, Kernel::HLERequestContext& ctx,
Kernel::ThreadWakeupReason reason) {
functor(ctx);
}
@ -320,13 +322,13 @@ public:
* Resolves a object id from the request command buffer into a pointer to an object. See the
* "HLE handle protocol" section in the class documentation for more details.
*/
KAutoObject* GetIncomingHandle(u32 id_from_cmdbuf) const;
std::shared_ptr<Object> GetIncomingHandle(u32 id_from_cmdbuf) const;
/**
* Adds an outgoing object to the response, returning the id which should be used to reference
* it. See the "HLE handle protocol" section in the class documentation for more details.
*/
u32 AddOutgoingHandle(KAutoObject* object);
u32 AddOutgoingHandle(std::shared_ptr<Object> object);
/**
* Discards all Objects from the context, invalidating all ids. This may be called after reading
@ -354,8 +356,8 @@ public:
MappedBuffer& GetMappedBuffer(u32 id_from_cmdbuf);
/// Populates this context with data from the requesting process/thread.
Result PopulateFromIncomingCommandBuffer(const u32_le* src_cmdbuf, Process* src_process);
Result PopulateFromIncomingCommandBuffer(const u32_le* src_cmdbuf,
std::shared_ptr<Process> src_process);
/// Writes data from this context back to the requesting process/thread.
Result WriteToOutgoingCommandBuffer(u32_le* dst_cmdbuf, Process& dst_process) const;
@ -368,10 +370,10 @@ public:
private:
KernelSystem& kernel;
std::array<u32, IPC::COMMAND_BUFFER_LENGTH> cmd_buf;
KServerSession* session;
KThread* thread;
std::shared_ptr<ServerSession> session;
std::shared_ptr<Thread> thread;
// TODO(yuriks): Check common usage of this and optimize size accordingly
boost::container::small_vector<KAutoObject*, 8> request_handles;
boost::container::small_vector<std::shared_ptr<Object>, 8> request_handles;
// The static buffers will be created when the IPC request is translated.
std::array<std::vector<u8>, IPC::MAX_STATIC_BUFFERS> static_buffers;
// The mapped buffers will be created when the IPC request is translated

View File

@ -9,13 +9,13 @@
#include "common/memory_ref.h"
#include "core/core.h"
#include "core/hle/ipc.h"
#include "core/hle/kernel/handle_table.h"
#include "core/hle/kernel/ipc.h"
#include "core/hle/kernel/ipc_debugger/recorder.h"
#include "core/hle/kernel/k_handle_table.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/memory.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/thread.h"
#include "core/memory.h"
SERIALIZE_EXPORT_IMPL(Kernel::MappedBufferContext)
@ -23,11 +23,12 @@ SERIALIZE_EXPORT_IMPL(Kernel::MappedBufferContext)
namespace Kernel {
Result TranslateCommandBuffer(Kernel::KernelSystem& kernel, Memory::MemorySystem& memory,
KThread* src_thread, KThread* dst_thread, VAddr src_address,
std::shared_ptr<Thread> src_thread,
std::shared_ptr<Thread> dst_thread, VAddr src_address,
VAddr dst_address,
std::vector<MappedBufferContext>& mapped_buffer_context, bool reply) {
auto src_process = src_thread->GetOwner();
auto dst_process = dst_thread->GetOwner();
auto src_process = src_thread->owner_process.lock();
auto dst_process = dst_thread->owner_process.lock();
ASSERT(src_process && dst_process);
IPC::Header header;
@ -68,34 +69,30 @@ Result TranslateCommandBuffer(Kernel::KernelSystem& kernel, Memory::MemorySystem
for (u32 j = 0; j < num_handles; ++j) {
Handle handle = cmd_buf[i];
std::shared_ptr<Object> object = nullptr;
// Perform pseudo-handle detection here because by the time this function is called,
// the current thread and process are no longer the ones which created this IPC
// request, but the ones that are handling it.
KScopedAutoObject object = [&]() -> KScopedAutoObject<KAutoObject> {
if (handle == CurrentThread) {
return src_thread;
} else if (handle == CurrentProcess) {
return src_process;
} else if (handle != 0) {
auto obj = src_process->handle_table.GetObject(handle);
if (descriptor == IPC::DescriptorType::MoveHandle) {
src_process->handle_table.Remove(handle);
}
return obj;
if (handle == CurrentThread) {
object = src_thread;
} else if (handle == CurrentProcess) {
object = src_process;
} else if (handle != 0) {
object = src_process->handle_table.GetGeneric(handle);
if (descriptor == IPC::DescriptorType::MoveHandle) {
src_process->handle_table.Close(handle);
}
return nullptr;
}();
}
if (object.IsNull()) {
if (object == nullptr) {
// Note: The real kernel sets invalid translated handles to 0 in the target
// command buffer.
cmd_buf[i++] = 0;
continue;
}
Handle dst_handle = 0;
dst_process->handle_table.Add(&dst_handle, object.GetPointerUnsafe());
cmd_buf[i++] = dst_handle;
R_ASSERT(dst_process->handle_table.Create(std::addressof(cmd_buf[i++]),
std::move(object)));
}
break;
}

View File

@ -4,11 +4,12 @@
#pragma once
#include <memory>
#include <vector>
#include <boost/serialization/export.hpp>
#include "common/common_types.h"
#include "common/memory_ref.h"
#include "core/hle/ipc.h"
#include "core/hle/kernel/thread.h"
namespace Memory {
class MemorySystem;
@ -17,7 +18,6 @@ class MemorySystem;
namespace Kernel {
class KernelSystem;
class KThread;
struct MappedBufferContext {
IPC::MappedBufferPermissions permissions;
@ -35,7 +35,8 @@ private:
/// Performs IPC command buffer translation from one process to another.
Result TranslateCommandBuffer(KernelSystem& system, Memory::MemorySystem& memory,
KThread* src_thread, KThread* dst_thread, VAddr src_address,
std::shared_ptr<Thread> src_thread,
std::shared_ptr<Thread> dst_thread, VAddr src_address,
VAddr dst_address,
std::vector<MappedBufferContext>& mapped_buffer_context, bool reply);
} // namespace Kernel

View File

@ -4,80 +4,73 @@
#include "common/assert.h"
#include "common/logging/log.h"
#include "common/scope_exit.h"
#include "core/hle/kernel/client_port.h"
#include "core/hle/kernel/client_session.h"
#include "core/hle/kernel/ipc_debugger/recorder.h"
#include "core/hle/kernel/k_client_port.h"
#include "core/hle/kernel/k_client_session.h"
#include "core/hle/kernel/k_port.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_server_port.h"
#include "core/hle/kernel/k_server_session.h"
#include "core/hle/kernel/k_session.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/server_port.h"
#include "core/hle/kernel/server_session.h"
#include "core/hle/kernel/session.h"
#include "core/hle/kernel/thread.h"
#include "core/hle/service/service.h"
namespace IPCDebugger {
namespace {
ObjectInfo GetObjectInfo(const Kernel::KAutoObject* object) {
ObjectInfo GetObjectInfo(const Kernel::Object* object) {
if (object == nullptr) {
return {};
}
return {object->GetTypeName(), /*object->GetName()*/ "KAutoObject",
/*static_cast<int>(object->GetObjectId())*/ 1};
return {object->GetTypeName(), object->GetName(), static_cast<int>(object->GetObjectId())};
}
ObjectInfo GetObjectInfo(const Kernel::KThread* thread) {
ObjectInfo GetObjectInfo(const Kernel::Thread* thread) {
if (thread == nullptr) {
return {};
}
return {thread->GetTypeName(), /*thread->GetName()*/ "KThread",
/*static_cast<int>(object->GetObjectId())*/ 1};
return {thread->GetTypeName(), thread->GetName(), static_cast<int>(thread->GetThreadId())};
}
ObjectInfo GetObjectInfo(const Kernel::Process* process) {
if (process == nullptr) {
return {};
}
return {process->GetTypeName(), /*process->GetName()*/ "KProcess",
static_cast<int>(process->process_id)};
return {process->GetTypeName(), process->GetName(), static_cast<int>(process->process_id)};
}
} // Anonymous namespace
} // namespace
Recorder::Recorder() = default;
Recorder::~Recorder() = default;
bool Recorder::IsEnabled() const {
return enabled.load(std::memory_order_relaxed);
}
void Recorder::RegisterRequest(const Kernel::KClientSession* client_session,
const Kernel::KThread* client_thread) {
void Recorder::RegisterRequest(const std::shared_ptr<Kernel::ClientSession>& client_session,
const std::shared_ptr<Kernel::Thread>& client_thread) {
const u32 thread_id = client_thread->GetThreadId();
const RequestRecord record = {
.id = ++record_count,
.status = RequestStatus::Sent,
.client_process = GetObjectInfo(client_thread->GetOwner()),
.client_thread = GetObjectInfo(client_thread),
.client_session = GetObjectInfo(client_session),
.client_port = GetObjectInfo(client_session->GetParent()->GetParent()),
.server_process = {},
.server_thread = {},
.server_session = GetObjectInfo(&client_session->GetParent()->GetServerSession()),
};
record_map.insert_or_assign(thread_id, std::make_unique<RequestRecord>(record));
client_session_map.insert_or_assign(thread_id, client_session);
InvokeCallbacks(record);
if (auto owner_process = client_thread->owner_process.lock()) {
RequestRecord record = {/* id */ ++record_count,
/* status */ RequestStatus::Sent,
/* client_process */ GetObjectInfo(owner_process.get()),
/* client_thread */ GetObjectInfo(client_thread.get()),
/* client_session */ GetObjectInfo(client_session.get()),
/* client_port */ GetObjectInfo(client_session->parent->port.get()),
/* server_process */ {},
/* server_thread */ {},
/* server_session */ GetObjectInfo(client_session->parent->server)};
record_map.insert_or_assign(thread_id, std::make_unique<RequestRecord>(record));
client_session_map.insert_or_assign(thread_id, client_session);
InvokeCallbacks(record);
}
}
void Recorder::SetRequestInfo(const Kernel::KThread* client_thread,
void Recorder::SetRequestInfo(const std::shared_ptr<Kernel::Thread>& client_thread,
std::vector<u32> untranslated_cmdbuf,
std::vector<u32> translated_cmdbuf,
const Kernel::KThread* server_thread) {
const std::shared_ptr<Kernel::Thread>& server_thread) {
const u32 thread_id = client_thread->GetThreadId();
if (!record_map.count(thread_id)) {
// This is possible when the recorder is enabled after application started
@ -91,34 +84,30 @@ void Recorder::SetRequestInfo(const Kernel::KThread* client_thread,
record.translated_request_cmdbuf = std::move(translated_cmdbuf);
if (server_thread) {
record.server_process = GetObjectInfo(server_thread->GetOwner());
record.server_thread = GetObjectInfo(server_thread);
if (auto owner_process = server_thread->owner_process.lock()) {
record.server_process = GetObjectInfo(owner_process.get());
}
record.server_thread = GetObjectInfo(server_thread.get());
} else {
record.is_hle = true;
}
// Function name
ASSERT_MSG(client_session_map.count(thread_id), "Client session is missing");
const auto client_session = client_session_map[thread_id];
const auto& client_session = client_session_map[thread_id];
if (client_session->parent->port &&
client_session->parent->port->GetServerPort()->hle_handler) {
SCOPE_EXIT({
client_session_map.erase(thread_id);
InvokeCallbacks(record);
});
auto port = client_session->GetParent()->GetParent();
if (!port) {
return;
}
auto hle_handler = port->GetParent()->GetServerPort().GetHleHandler();
if (hle_handler) {
record.function_name = std::dynamic_pointer_cast<Service::ServiceFrameworkBase>(hle_handler)
record.function_name = std::dynamic_pointer_cast<Service::ServiceFrameworkBase>(
client_session->parent->port->GetServerPort()->hle_handler)
->GetFunctionName({record.untranslated_request_cmdbuf[0]});
}
client_session_map.erase(thread_id);
InvokeCallbacks(record);
}
void Recorder::SetReplyInfo(const Kernel::KThread* client_thread,
void Recorder::SetReplyInfo(const std::shared_ptr<Kernel::Thread>& client_thread,
std::vector<u32> untranslated_cmdbuf,
std::vector<u32> translated_cmdbuf) {
const u32 thread_id = client_thread->GetThreadId();
@ -140,7 +129,7 @@ void Recorder::SetReplyInfo(const Kernel::KThread* client_thread,
record_map.erase(thread_id);
}
void Recorder::SetHLEUnimplemented(const Kernel::KThread* client_thread) {
void Recorder::SetHLEUnimplemented(const std::shared_ptr<Kernel::Thread>& client_thread) {
const u32 thread_id = client_thread->GetThreadId();
if (!record_map.count(thread_id)) {
// This is possible when the recorder is enabled after application started

View File

@ -15,9 +15,8 @@
#include "common/common_types.h"
namespace Kernel {
class KClientSession;
class KThread;
enum class ClassTokenType : u32;
class ClientSession;
class Thread;
} // namespace Kernel
namespace IPCDebugger {
@ -28,7 +27,7 @@ namespace IPCDebugger {
struct ObjectInfo {
std::string type;
std::string name;
int id;
int id = -1;
};
/**
@ -81,28 +80,28 @@ public:
/**
* Registers a request into the recorder. The request is then assoicated with the client thread.
*/
void RegisterRequest(const Kernel::KClientSession* client_session,
const Kernel::KThread* client_thread);
void RegisterRequest(const std::shared_ptr<Kernel::ClientSession>& client_session,
const std::shared_ptr<Kernel::Thread>& client_thread);
/**
* Sets the request information of the request record associated with the client thread.
* When the server thread is empty, the request will be considered HLE.
*/
void SetRequestInfo(const Kernel::KThread* client_thread, std::vector<u32> untranslated_cmdbuf,
std::vector<u32> translated_cmdbuf,
const Kernel::KThread* server_thread = nullptr);
void SetRequestInfo(const std::shared_ptr<Kernel::Thread>& client_thread,
std::vector<u32> untranslated_cmdbuf, std::vector<u32> translated_cmdbuf,
const std::shared_ptr<Kernel::Thread>& server_thread = {});
/**
* Sets the reply information of the request record assoicated with the client thread.
* The request is then unlinked from the client thread.
*/
void SetReplyInfo(const Kernel::KThread* client_thread, std::vector<u32> untranslated_cmdbuf,
std::vector<u32> translated_cmdbuf);
void SetReplyInfo(const std::shared_ptr<Kernel::Thread>& client_thread,
std::vector<u32> untranslated_cmdbuf, std::vector<u32> translated_cmdbuf);
/**
* Set the status of a record to HLEUnimplemented.
*/
void SetHLEUnimplemented(const Kernel::KThread* client_thread);
void SetHLEUnimplemented(const std::shared_ptr<Kernel::Thread>& client_thread);
/**
* Set the status of the debugger (enabled/disabled).
@ -119,7 +118,7 @@ private:
int record_count{};
// Temporary client session map for function name handling
std::unordered_map<u32, const Kernel::KClientSession*> client_session_map;
std::unordered_map<u32, std::shared_ptr<Kernel::ClientSession>> client_session_map;
std::atomic_bool enabled{false};

View File

@ -1,226 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <algorithm>
#include <boost/serialization/base_object.hpp>
#include <boost/serialization/string.hpp>
#include <boost/serialization/vector.hpp>
#include "common/archives.h"
#include "common/logging/log.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/k_address_arbiter.h"
#include "core/hle/kernel/k_auto_object_container.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
#include "core/memory.h"
namespace Kernel {
class KAddressArbiter::Callback : public WakeupCallback {
public:
explicit Callback(KAddressArbiter* _parent) : parent(_parent) {}
KAddressArbiter* parent;
void WakeUp(ThreadWakeupReason reason, KThread* thread,
KSynchronizationObject* object) override {
parent->WakeUp(reason, thread, object);
}
private:
template <class Archive>
void serialize(Archive& ar, const unsigned int) {
ar& boost::serialization::base_object<WakeupCallback>(*this);
}
friend class boost::serialization::access;
};
KAddressArbiter::KAddressArbiter(KernelSystem& kernel)
: KAutoObjectWithSlabHeapAndContainer{kernel},
m_timeout_callback(std::make_shared<Callback>(this)) {}
KAddressArbiter::~KAddressArbiter() = default;
void KAddressArbiter::Initialize(Process* owner) {
m_owner = owner;
m_owner->Open();
}
void KAddressArbiter::PostDestroy(uintptr_t arg) {
Process* owner = reinterpret_cast<Process*>(arg);
if (owner != nullptr) {
owner->ReleaseResource(ResourceLimitType::AddressArbiter, 1);
owner->Close();
}
}
void KAddressArbiter::WaitThread(KThread* thread, VAddr wait_address) {
thread->m_wait_address = wait_address;
thread->m_status = ThreadStatus::WaitArb;
m_waiting_threads.emplace_back(thread);
}
u64 KAddressArbiter::ResumeAllThreads(VAddr address) {
// Determine which threads are waiting on this address, those should be woken up.
auto itr = std::stable_partition(m_waiting_threads.begin(), m_waiting_threads.end(),
[address](KThread* thread) {
ASSERT_MSG(thread->GetStatus() == ThreadStatus::WaitArb,
"Inconsistent AddressArbiter state");
return thread->m_wait_address != address;
});
// Wake up all the found threads
const u64 num_threads = std::distance(itr, m_waiting_threads.end());
std::for_each(itr, m_waiting_threads.end(), [](KThread* thread) { thread->ResumeFromWait(); });
// Remove the woken up threads from the wait list.
m_waiting_threads.erase(itr, m_waiting_threads.end());
return num_threads;
}
bool KAddressArbiter::ResumeHighestPriorityThread(VAddr address) {
// Determine which threads are waiting on this address, those should be considered for wakeup.
auto matches_start = std::stable_partition(
m_waiting_threads.begin(), m_waiting_threads.end(), [address](KThread* thread) {
ASSERT_MSG(thread->GetStatus() == ThreadStatus::WaitArb,
"Inconsistent AddressArbiter state");
return thread->m_wait_address != address;
});
// Iterate through threads, find highest priority thread that is waiting to be arbitrated.
// Note: The real kernel will pick the first thread in the list if more than one have the
// same highest priority value. Lower priority values mean higher priority.
auto itr =
std::min_element(matches_start, m_waiting_threads.end(), [](KThread* lhs, KThread* rhs) {
return lhs->GetCurrentPriority() < rhs->GetCurrentPriority();
});
if (itr == m_waiting_threads.end()) {
return false;
}
auto thread = *itr;
thread->ResumeFromWait();
m_waiting_threads.erase(itr);
return true;
}
void KAddressArbiter::WakeUp(ThreadWakeupReason reason, KThread* thread,
KSynchronizationObject* object) {
ASSERT(reason == ThreadWakeupReason::Timeout);
// Remove the newly-awakened thread from the Arbiter's waiting list.
m_waiting_threads.erase(std::remove(m_waiting_threads.begin(), m_waiting_threads.end(), thread),
m_waiting_threads.end());
};
Result KAddressArbiter::ArbitrateAddress(KThread* thread, ArbitrationType type, VAddr address,
s32 value, u64 nanoseconds) {
switch (type) {
// Signal thread(s) waiting for arbitrate address...
case ArbitrationType::Signal: {
u64 num_threads{};
// Negative value means resume all threads
if (value < 0) {
num_threads = ResumeAllThreads(address);
} else {
// Resume first N threads
for (s32 i = 0; i < value; i++) {
num_threads += ResumeHighestPriorityThread(address);
}
}
// Prevents lag from low priority threads that spam svcArbitrateAddress and wake no threads
// The tick count is taken directly from official HOS kernel. The priority value is one less
// than official kernel as the affected FMV threads dont meet the priority threshold of 50.
// TODO: Revisit this when scheduler is rewritten and adjust if there isn't a problem there.
auto* core = m_kernel.current_cpu;
if (num_threads == 0 && core->GetID() == 0 && thread->GetCurrentPriority() >= 49) {
core->GetTimer().AddTicks(1614u);
}
break;
}
// Wait current thread (acquire the arbiter)...
case ArbitrationType::WaitIfLessThan:
if ((s32)m_kernel.memory.Read32(address) < value) {
WaitThread(thread, address);
}
break;
case ArbitrationType::WaitIfLessThanWithTimeout:
if ((s32)m_kernel.memory.Read32(address) < value) {
thread->SetWakeupCallback(m_timeout_callback);
thread->WakeAfterDelay(nanoseconds);
WaitThread(thread, address);
}
break;
case ArbitrationType::DecrementAndWaitIfLessThan: {
s32 memory_value = m_kernel.memory.Read32(address);
if (memory_value < value) {
// Only change the memory value if the thread should wait
m_kernel.memory.Write32(address, (s32)memory_value - 1);
WaitThread(thread, address);
}
break;
}
case ArbitrationType::DecrementAndWaitIfLessThanWithTimeout: {
s32 memory_value = m_kernel.memory.Read32(address);
if (memory_value < value) {
// Only change the memory value if the thread should wait
m_kernel.memory.Write32(address, (s32)memory_value - 1);
thread->SetWakeupCallback(m_timeout_callback);
thread->WakeAfterDelay(nanoseconds);
WaitThread(thread, address);
}
break;
}
default:
LOG_ERROR(Kernel, "unknown type={}", type);
return ResultInvalidEnumValueFnd;
}
// The calls that use a timeout seem to always return a Timeout error even if they did not put
// the thread to sleep
if (type == ArbitrationType::WaitIfLessThanWithTimeout ||
type == ArbitrationType::DecrementAndWaitIfLessThanWithTimeout) {
return ResultTimeout;
}
return ResultSuccess;
}
template <class Archive>
void KAddressArbiter::serialize(Archive& ar, const unsigned int file_version) {
ar& boost::serialization::base_object<KAutoObject>(*this);
ar& m_name;
ar& m_waiting_threads;
// ar& m_timeout_callback;
}
SERIALIZE_IMPL(KAddressArbiter)
} // namespace Kernel
namespace boost::serialization {
template <class Archive>
void save_construct_data(Archive& ar, const Kernel::KAddressArbiter::Callback* t,
const unsigned int) {
ar << t->parent;
}
template <class Archive>
void load_construct_data(Archive& ar, Kernel::KAddressArbiter::Callback* t, const unsigned int) {
Kernel::KAddressArbiter* parent;
ar >> parent;
::new (t) Kernel::KAddressArbiter::Callback(parent);
}
} // namespace boost::serialization
SERIALIZE_EXPORT_IMPL(Kernel::KAddressArbiter)
SERIALIZE_EXPORT_IMPL(Kernel::KAddressArbiter::Callback)

View File

@ -1,75 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <memory>
#include <vector>
#include <boost/serialization/export.hpp>
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/result.h"
namespace Kernel {
class KThread;
enum class ArbitrationType : u32 {
Signal,
WaitIfLessThan,
DecrementAndWaitIfLessThan,
WaitIfLessThanWithTimeout,
DecrementAndWaitIfLessThanWithTimeout,
};
class KAddressArbiter final : public KAutoObjectWithSlabHeapAndContainer<KAddressArbiter>,
public WakeupCallback {
KERNEL_AUTOOBJECT_TRAITS(KAddressArbiter, KAutoObject);
public:
explicit KAddressArbiter(KernelSystem& kernel);
~KAddressArbiter() override;
void Initialize(Process* owner);
uintptr_t GetPostDestroyArgument() const override {
return reinterpret_cast<uintptr_t>(m_owner);
}
static void PostDestroy(uintptr_t arg);
Process* GetOwner() const override {
return m_owner;
}
Result ArbitrateAddress(KThread* thread, ArbitrationType type, VAddr address, s32 value,
u64 nanoseconds);
private:
void WaitThread(KThread* thread, VAddr wait_address);
u64 ResumeAllThreads(VAddr address);
bool ResumeHighestPriorityThread(VAddr address);
void WakeUp(ThreadWakeupReason reason, KThread* thread,
KSynchronizationObject* object) override;
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const u32 file_version);
public:
Process* m_owner{};
std::string m_name{};
std::vector<KThread*> m_waiting_threads;
class Callback;
std::shared_ptr<Callback> m_timeout_callback;
};
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::KAddressArbiter)
BOOST_CLASS_EXPORT_KEY(Kernel::KAddressArbiter::Callback)
CONSTRUCT_KERNEL_OBJECT(Kernel::KAddressArbiter)

View File

@ -1,32 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "common/archives.h"
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/kernel.h"
namespace Kernel {
KAutoObject* KAutoObject::Create(KAutoObject* obj) {
obj->m_ref_count = 1;
return obj;
}
void KAutoObject::RegisterWithKernel() {
m_kernel.RegisterKernelObject(this);
}
void KAutoObject::UnregisterWithKernel(KernelSystem& kernel, KAutoObject* self) {
kernel.UnregisterKernelObject(self);
}
template <class Archive>
void KAutoObject::serialize(Archive& ar, const unsigned int) {
ar& m_name;
// ar& m_ref_count;
}
SERIALIZE_IMPL(KAutoObject)
} // namespace Kernel

View File

@ -1,305 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <atomic>
#include <boost/serialization/access.hpp>
#include "common/assert.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "core/global.h"
namespace Kernel {
class KernelSystem;
class Process;
using Handle = u32;
constexpr u32 DefaultStackSize = 0x4000;
enum class ClassTokenType : u32 {
KAutoObject = 0,
KSynchronizationObject = 1,
KSemaphore = 27,
KEvent = 31,
KTimer = 53,
KMutex = 57,
Debug = 77,
KServerPort = 85,
DmaObject = 89,
KClientPort = 101,
CodeSet = 104,
KSession = 112,
KThread = 141,
KServerSession = 149,
KAddressArbiter = 152,
KClientSession = 165,
KPort = 168,
KSharedMemory = 176,
Process = 197,
KResourceLimit = 200,
};
DECLARE_ENUM_FLAG_OPERATORS(ClassTokenType)
#define KERNEL_AUTOOBJECT_TRAITS_IMPL(CLASS, BASE_CLASS, ATTRIBUTE) \
private: \
static constexpr inline const char* const TypeName = #CLASS; \
static constexpr inline auto ClassToken = ClassTokenType::CLASS; \
\
public: \
CITRA_NON_COPYABLE(CLASS); \
CITRA_NON_MOVEABLE(CLASS); \
\
using BaseClass = BASE_CLASS; \
static constexpr TypeObj GetStaticTypeObj() { return TypeObj(TypeName, ClassToken); } \
static constexpr const char* GetStaticTypeName() { return TypeName; } \
virtual TypeObj GetTypeObj() ATTRIBUTE { return GetStaticTypeObj(); } \
virtual const char* GetTypeName() ATTRIBUTE { return GetStaticTypeName(); } \
\
private: \
constexpr bool operator!=(const TypeObj& rhs)
#define KERNEL_AUTOOBJECT_TRAITS(CLASS, BASE_CLASS) \
KERNEL_AUTOOBJECT_TRAITS_IMPL(CLASS, BASE_CLASS, const override)
class KAutoObject {
protected:
class TypeObj {
public:
constexpr explicit TypeObj(const char* n, ClassTokenType tok)
: m_name(n), m_class_token(tok) {}
constexpr const char* GetName() const {
return m_name;
}
constexpr ClassTokenType GetClassToken() const {
return m_class_token;
}
constexpr bool operator==(const TypeObj& rhs) const {
return this->GetClassToken() == rhs.GetClassToken();
}
constexpr bool operator!=(const TypeObj& rhs) const {
return this->GetClassToken() != rhs.GetClassToken();
}
constexpr bool IsDerivedFrom(const TypeObj& rhs) const {
return (this->GetClassToken() | rhs.GetClassToken()) == this->GetClassToken();
}
private:
const char* m_name;
ClassTokenType m_class_token;
};
private:
KERNEL_AUTOOBJECT_TRAITS_IMPL(KAutoObject, KAutoObject, const);
public:
explicit KAutoObject(KernelSystem& kernel) : m_kernel(kernel) {
RegisterWithKernel();
}
virtual ~KAutoObject() = default;
static KAutoObject* Create(KAutoObject* ptr);
// Destroy is responsible for destroying the auto object's resources when ref_count hits zero.
virtual void Destroy() {
UNIMPLEMENTED();
}
// Finalize is responsible for cleaning up resource, but does not destroy the object.
virtual void Finalize() {}
virtual Process* GetOwner() const {
return nullptr;
}
u32 GetReferenceCount() const {
return m_ref_count.load();
}
bool IsDerivedFrom(const TypeObj& rhs) const {
return this->GetTypeObj().IsDerivedFrom(rhs);
}
bool IsDerivedFrom(const KAutoObject& rhs) const {
return this->IsDerivedFrom(rhs.GetTypeObj());
}
template <typename Derived>
Derived DynamicCast() {
static_assert(std::is_pointer_v<Derived>);
using DerivedType = std::remove_pointer_t<Derived>;
if (this->IsDerivedFrom(DerivedType::GetStaticTypeObj())) {
return static_cast<Derived>(this);
} else {
return nullptr;
}
}
template <typename Derived>
const Derived DynamicCast() const {
static_assert(std::is_pointer_v<Derived>);
using DerivedType = std::remove_pointer_t<Derived>;
if (this->IsDerivedFrom(DerivedType::GetStaticTypeObj())) {
return static_cast<Derived>(this);
} else {
return nullptr;
}
}
bool Open() {
// Atomically increment the reference count, only if it's positive.
u32 cur_ref_count = m_ref_count.load(std::memory_order_acquire);
do {
if (cur_ref_count == 0) {
return false;
}
ASSERT(cur_ref_count < cur_ref_count + 1);
} while (!m_ref_count.compare_exchange_weak(cur_ref_count, cur_ref_count + 1,
std::memory_order_relaxed));
return true;
}
void Close() {
// Atomically decrement the reference count, not allowing it to become negative.
u32 cur_ref_count = m_ref_count.load(std::memory_order_acquire);
do {
ASSERT(cur_ref_count > 0);
} while (!m_ref_count.compare_exchange_weak(cur_ref_count, cur_ref_count - 1,
std::memory_order_acq_rel));
// If ref count hits zero, destroy the object.
if (cur_ref_count - 1 == 0) {
KernelSystem& kernel = m_kernel;
this->Destroy();
KAutoObject::UnregisterWithKernel(kernel, this);
}
}
private:
void RegisterWithKernel();
static void UnregisterWithKernel(KernelSystem& kernel, KAutoObject* self);
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const unsigned int);
protected:
KernelSystem& m_kernel;
std::string m_name{};
private:
std::atomic<u32> m_ref_count{};
};
template <typename T>
class KScopedAutoObject {
public:
CITRA_NON_COPYABLE(KScopedAutoObject);
constexpr KScopedAutoObject() = default;
constexpr KScopedAutoObject(T* o) : m_obj(o) {
if (m_obj != nullptr) {
m_obj->Open();
}
}
~KScopedAutoObject() {
if (m_obj != nullptr) {
m_obj->Close();
}
m_obj = nullptr;
}
template <typename U>
requires(std::derived_from<T, U> || std::derived_from<U, T>)
constexpr KScopedAutoObject(KScopedAutoObject<U>&& rhs) {
if constexpr (std::derived_from<U, T>) {
// Upcast.
m_obj = rhs.m_obj;
rhs.m_obj = nullptr;
} else {
// Downcast.
T* derived = nullptr;
if (rhs.m_obj != nullptr) {
derived = rhs.m_obj->template DynamicCast<T*>();
if (derived == nullptr) {
rhs.m_obj->Close();
}
}
m_obj = derived;
rhs.m_obj = nullptr;
}
}
constexpr KScopedAutoObject<T>& operator=(KScopedAutoObject<T>&& rhs) {
rhs.Swap(*this);
return *this;
}
constexpr T* operator->() {
return m_obj;
}
constexpr T& operator*() {
return *m_obj;
}
constexpr void Reset(T* o) {
KScopedAutoObject(o).Swap(*this);
}
constexpr T* GetPointerUnsafe() {
return m_obj;
}
constexpr T* GetPointerUnsafe() const {
return m_obj;
}
constexpr T* ReleasePointerUnsafe() {
T* ret = m_obj;
m_obj = nullptr;
return ret;
}
constexpr bool IsNull() const {
return m_obj == nullptr;
}
constexpr bool IsNotNull() const {
return m_obj != nullptr;
}
private:
template <typename U>
friend class KScopedAutoObject;
private:
T* m_obj{};
private:
constexpr void Swap(KScopedAutoObject& rhs) noexcept {
std::swap(m_obj, rhs.m_obj);
}
};
} // namespace Kernel
#define CONSTRUCT_KERNEL_OBJECT(T) \
namespace boost::serialization { \
template <class Archive> \
void load_construct_data(Archive& ar, T* t, const unsigned int file_version) { \
::new (t) T(Core::Global<Kernel::KernelSystem>()); \
} \
}

View File

@ -1,31 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <algorithm>
#include "core/hle/kernel/k_auto_object_container.h"
namespace Kernel {
void KAutoObjectWithListContainer::Register(KAutoObject* obj) {
// KScopedLightMutex lk{m_mutex};
m_object_list.push_back(*obj);
}
void KAutoObjectWithListContainer::Unregister(KAutoObject* obj) {
// KScopedLightMutex lk{m_mutex};
for (auto it = m_object_list.begin(); it != m_object_list.end(); it++) {
if (std::addressof(*it) == obj) {
m_object_list.erase(it);
return;
}
}
}
size_t KAutoObjectWithListContainer::GetOwnedCount(Process* owner) {
// KScopedLightMutex lk{m_mutex};
return std::count_if(m_object_list.begin(), m_object_list.end(),
[&](const auto& obj) { return obj.GetOwner() == owner; });
}
} // namespace Kernel

View File

@ -1,37 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include "common/common_funcs.h"
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/k_linked_list.h"
namespace Kernel {
class KernelSystem;
class Process;
class KAutoObjectWithListContainer {
public:
CITRA_NON_COPYABLE(KAutoObjectWithListContainer);
CITRA_NON_MOVEABLE(KAutoObjectWithListContainer);
using ListType = KLinkedList<KAutoObject>;
KAutoObjectWithListContainer(KernelSystem& kernel) : m_object_list(kernel) {}
void Initialize() {}
void Finalize() {}
void Register(KAutoObject* obj);
void Unregister(KAutoObject* obj);
size_t GetOwnedCount(Process* owner);
private:
// KLightMutex m_mutex;
ListType m_object_list;
};
} // namespace Kernel

View File

@ -1,79 +0,0 @@
// Copyright 2016 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <boost/serialization/export.hpp>
#include <boost/serialization/shared_ptr.hpp>
#include <boost/serialization/string.hpp>
#include "common/archives.h"
#include "common/assert.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/hle_ipc.h"
#include "core/hle/kernel/k_client_port.h"
#include "core/hle/kernel/k_client_session.h"
#include "core/hle/kernel/k_port.h"
#include "core/hle/kernel/k_server_port.h"
#include "core/hle/kernel/k_session.h"
SERIALIZE_EXPORT_IMPL(Kernel::KClientPort)
namespace Kernel {
KClientPort::KClientPort(KernelSystem& kernel) : KAutoObject(kernel) {}
KClientPort::~KClientPort() = default;
void KClientPort::Initialize(KPort* parent, s32 max_sessions, std::string name) {
// Set member variables.
m_parent = parent;
m_max_sessions = max_sessions;
m_name = name + "_Client";
}
Result KClientPort::CreateSession(KClientSession** out) {
R_UNLESS(m_active_sessions < m_max_sessions, ResultMaxConnectionsReached);
m_active_sessions++;
// Allocate a new session.
KSession* session = KSession::Create(m_kernel);
// Initialize the session.
session->Initialize(this);
// Register the session.
KSession::Register(m_kernel, session);
// Let the created sessions inherit the parent port's HLE handler.
auto* server = &m_parent->GetServerPort();
auto hle_handler = server->GetHleHandler();
if (hle_handler) {
hle_handler->ClientConnected(&session->GetServerSession());
} else {
server->EnqueueSession(&session->GetServerSession());
}
// Wake the threads waiting on the ServerPort
m_parent->GetServerPort().WakeupAllWaitingThreads();
// We succeeded, so set the output.
*out = std::addressof(session->GetClientSession());
return ResultSuccess;
}
void KClientPort::ConnectionClosed() {
ASSERT(m_active_sessions > 0);
--m_active_sessions;
}
template <class Archive>
void KClientPort::serialize(Archive& ar, const u32 file_version) {
ar& boost::serialization::base_object<KAutoObject>(*this);
// ar& m_parent;
ar& m_max_sessions;
ar& m_active_sessions;
ar& m_name;
}
SERIALIZE_IMPL(KClientPort)
} // namespace Kernel

View File

@ -1,52 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <string>
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/k_server_port.h"
#include "core/hle/result.h"
namespace Kernel {
class KClientSession;
class KClientPort final : public KAutoObject {
KERNEL_AUTOOBJECT_TRAITS(KClientPort, KAutoObject);
public:
explicit KClientPort(KernelSystem& kernel);
~KClientPort() override;
void Initialize(KPort* parent, s32 max_sessions, std::string name);
const KPort* GetParent() const {
return m_parent;
}
KPort* GetParent() {
return m_parent;
}
Result CreateSession(KClientSession** out);
void ConnectionClosed();
private:
KPort* m_parent{};
u32 m_max_sessions{};
u32 m_active_sessions{};
std::string m_name;
friend class KernelSystem;
private:
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const u32 file_version);
};
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::KClientPort)
CONSTRUCT_KERNEL_OBJECT(Kernel::KClientPort)

View File

@ -1,42 +0,0 @@
// Copyright 2016 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <boost/serialization/base_object.hpp>
#include <boost/serialization/string.hpp>
#include "common/archives.h"
#include "core/hle/kernel/k_auto_object_container.h"
#include "core/hle/kernel/k_client_session.h"
#include "core/hle/kernel/k_server_session.h"
#include "core/hle/kernel/k_session.h"
#include "core/hle/kernel/k_thread.h"
SERIALIZE_EXPORT_IMPL(Kernel::KClientSession)
namespace Kernel {
KClientSession::KClientSession(KernelSystem& kernel) : KAutoObject(kernel) {}
KClientSession::~KClientSession() = default;
void KClientSession::Destroy() {
m_parent->OnClientClosed();
m_parent->Close();
}
void KClientSession::OnServerClosed() {}
Result KClientSession::SendSyncRequest(KThread* thread) {
// Signal the server session that new data is available
return m_parent->GetServerSession().HandleSyncRequest(thread);
}
template <class Archive>
void KClientSession::serialize(Archive& ar, const u32 file_version) {
ar& boost::serialization::base_object<KAutoObject>(*this);
// ar& m_parent;
}
SERIALIZE_IMPL(KClientSession)
} // namespace Kernel

View File

@ -1,50 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <boost/serialization/export.hpp>
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/result.h"
namespace Kernel {
class KSession;
class KThread;
class KClientSession final : public KAutoObject {
KERNEL_AUTOOBJECT_TRAITS(KClientSession, KAutoObject);
public:
explicit KClientSession(KernelSystem& kernel);
~KClientSession() override;
void Initialize(KSession* parent) {
// Set member variables.
m_parent = parent;
}
void Destroy() override;
KSession* GetParent() const {
return m_parent;
}
Result SendSyncRequest(KThread* thread);
void OnServerClosed();
private:
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const u32 file_version);
private:
KSession* m_parent{};
};
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::KClientSession)
CONSTRUCT_KERNEL_OBJECT(Kernel::KClientSession)

View File

@ -1,76 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <vector>
#include <boost/serialization/vector.hpp>
namespace Kernel {
class CodeSet {
public:
CodeSet() = default;
~CodeSet() = default;
struct Segment {
std::size_t offset = 0;
VAddr addr = 0;
u32 size = 0;
private:
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const u32 file_version) {
ar& offset;
ar& addr;
ar& size;
}
};
Segment& CodeSegment() {
return segments[0];
}
const Segment& CodeSegment() const {
return segments[0];
}
Segment& RODataSegment() {
return segments[1];
}
const Segment& RODataSegment() const {
return segments[1];
}
Segment& DataSegment() {
return segments[2];
}
const Segment& DataSegment() const {
return segments[2];
}
std::vector<u8> memory;
std::array<Segment, 3> segments;
VAddr entrypoint;
u64 program_id;
std::string name;
private:
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const u32 file_version) {
ar& memory;
ar& segments;
ar& entrypoint;
ar& program_id;
ar& name;
}
};
} // namespace Kernel

View File

@ -1,78 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <boost/serialization/base_object.hpp>
#include "common/archives.h"
#include "common/assert.h"
#include "core/hle/kernel/k_event.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
SERIALIZE_EXPORT_IMPL(Kernel::KEvent)
namespace Kernel {
KEvent::KEvent(KernelSystem& kernel) : KAutoObjectWithSlabHeapAndContainer(kernel) {}
KEvent::~KEvent() = default;
void KEvent::Initialize(Process* owner, ResetType reset_type) {
// Open a reference to the owner process.
if (owner) {
owner->Open();
m_owner = owner;
}
// Set member variables.
m_reset_type = reset_type;
}
void KEvent::PostDestroy(uintptr_t arg) {
Process* owner = reinterpret_cast<Process*>(arg);
if (owner != nullptr) {
owner->ReleaseResource(ResourceLimitType::Event, 1);
owner->Close();
}
}
bool KEvent::ShouldWait(const KThread* thread) const {
return !m_signaled;
}
void KEvent::Acquire(KThread* thread) {
ASSERT_MSG(!ShouldWait(thread), "object unavailable!");
if (m_reset_type == ResetType::OneShot) {
m_signaled = false;
}
}
void KEvent::Signal() {
m_signaled = true;
this->WakeupAllWaitingThreads();
}
void KEvent::Clear() {
m_signaled = false;
}
void KEvent::WakeupAllWaitingThreads() {
KSynchronizationObject::WakeupAllWaitingThreads();
if (m_reset_type == ResetType::Pulse) {
m_signaled = false;
}
}
template <class Archive>
void KEvent::serialize(Archive& ar, const u32 file_version) {
ar& boost::serialization::base_object<KSynchronizationObject>(*this);
ar& m_owner;
ar& m_reset_type;
ar& m_signaled;
}
SERIALIZE_IMPL(KEvent)
} // namespace Kernel

View File

@ -1,74 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <boost/serialization/export.hpp>
#include "core/global.h"
#include "core/hle/kernel/k_auto_object_container.h"
#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/slab_helpers.h"
namespace Kernel {
enum class ResetType : u32 {
OneShot,
Sticky,
Pulse,
};
class KEvent final : public KAutoObjectWithSlabHeapAndContainer<KEvent, KSynchronizationObject> {
KERNEL_AUTOOBJECT_TRAITS(KEvent, KSynchronizationObject);
public:
explicit KEvent(KernelSystem& kernel);
~KEvent() override;
std::string GetName() const {
return m_name;
}
void SetName(const std::string& name) {
m_name = name;
}
void Initialize(Process* owner, ResetType reset_type);
uintptr_t GetPostDestroyArgument() const override {
return reinterpret_cast<uintptr_t>(m_owner);
}
static void PostDestroy(uintptr_t arg);
Process* GetOwner() const override {
return m_owner;
}
ResetType GetResetType() const {
return m_reset_type;
}
bool ShouldWait(const KThread* thread) const override;
void Acquire(KThread* thread) override;
void WakeupAllWaitingThreads() override;
void Signal();
void Clear();
private:
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const u32 file_version);
private:
Process* m_owner{};
ResetType m_reset_type{};
bool m_signaled{};
std::string m_name;
};
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::KEvent)
CONSTRUCT_KERNEL_OBJECT(Kernel::KEvent)

View File

@ -1,106 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <boost/serialization/array.hpp>
#include <boost/serialization/shared_ptr.hpp>
#include "common/archives.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/k_handle_table.h"
#include "core/hle/kernel/k_process.h"
namespace Kernel {
Result KHandleTable::Finalize() {
// Close and free all entries.
for (size_t i = 0; i < m_table_size; i++) {
if (KAutoObject* obj = m_objects[i]; obj != nullptr) {
obj->Close();
}
}
return ResultSuccess;
}
bool KHandleTable::Remove(Handle handle) {
// Don't allow removal of a pseudo-handle.
if (handle == KernelHandle::CurrentProcess || handle == KernelHandle::CurrentThread)
[[unlikely]] {
return false;
}
// Handles must not have reserved bits set.
const auto handle_pack = HandlePack(handle);
if (handle_pack.reserved != 0) [[unlikely]] {
return false;
}
// Find the object and free the entry.
KAutoObject* obj = nullptr;
{
// KScopedLightMutex lk{m_mutex};
if (this->IsValidHandle(handle)) [[likely]] {
const auto index = handle_pack.index;
obj = m_objects[index];
this->FreeEntry(index);
} else {
return false;
}
}
// Close the object.
obj->Close();
return true;
}
Result KHandleTable::Add(Handle* out_handle, KAutoObject* obj) {
// KScopedLightMutex lk{m_mutex};
// Never exceed our capacity.
R_UNLESS(m_count < m_table_size, ResultOutOfHandles);
// Allocate entry, set output handle.
const auto linear_id = this->AllocateLinearId();
const auto index = this->AllocateEntry();
m_entry_infos[index].linear_id = linear_id;
m_objects[index] = obj;
obj->Open();
*out_handle = EncodeHandle(static_cast<u16>(index), linear_id);
return ResultSuccess;
}
KScopedAutoObject<KAutoObject> KHandleTable::GetObjectForIpc(Handle handle,
KThread* cur_thread) const {
// Handle pseudo-handles.
ASSERT(cur_thread != nullptr);
if (handle == KernelHandle::CurrentProcess) {
auto* cur_process = cur_thread->GetOwner();
ASSERT(cur_process != nullptr);
return cur_process;
}
if (handle == KernelHandle::CurrentThread) {
return cur_thread;
}
return this->GetObjectForIpcWithoutPseudoHandle(handle);
}
template <class Archive>
void KHandleTable::serialize(Archive& ar, const u32 file_version) {
// ar& m_entry_infos;
// ar& m_objects;
ar& m_free_head_index;
ar& m_table_size;
ar& m_next_id;
ar& m_max_count;
ar& m_next_linear_id;
ar& m_count;
}
SERIALIZE_IMPL(KHandleTable)
} // namespace Kernel

View File

@ -1,286 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <vector>
#include "common/common_types.h"
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/result.h"
namespace Kernel {
enum KernelHandle : Handle {
CurrentThread = 0xFFFF8000,
CurrentProcess = 0xFFFF8001,
};
class KHandleTable {
CITRA_NON_COPYABLE(KHandleTable);
CITRA_NON_MOVEABLE(KHandleTable);
public:
static constexpr size_t MaxTableSize = 1024;
public:
explicit KHandleTable(KernelSystem& kernel) : m_kernel(kernel) {}
Result Initialize(s32 size) {
// KScopedLightMutex lk{m_mutex};
// Initialize all fields.
m_max_count = 0;
m_table_size = static_cast<s16>((size <= 0) ? MaxTableSize : size);
m_next_linear_id = MinLinearId;
m_count = 0;
m_free_head_index = -1;
// Create the arrays
m_objects.resize(m_table_size);
m_entry_infos.resize(m_table_size);
// Free all entries.
for (s32 i = 0; i < static_cast<s32>(m_table_size); ++i) {
m_objects[i] = nullptr;
m_entry_infos[i].next_free_index = static_cast<s16>(i - 1);
m_free_head_index = i;
}
return ResultSuccess;
}
size_t GetTableSize() const {
return m_table_size;
}
size_t GetCount() const {
return m_count;
}
size_t GetMaxCount() const {
return m_max_count;
}
Result Finalize();
bool Remove(Handle handle);
Result Add(Handle* out_handle, KAutoObject* obj);
template <typename T = KAutoObject>
KScopedAutoObject<T> GetObjectWithoutPseudoHandle(Handle handle) const {
// KScopedLightMutex lk{m_mutex};
if constexpr (std::is_same_v<T, KAutoObject>) {
return this->GetObjectImpl(handle);
} else {
if (auto* obj = this->GetObjectImpl(handle); obj != nullptr) [[likely]] {
return obj->DynamicCast<T*>();
} else {
return nullptr;
}
}
}
template <typename T = KAutoObject>
KScopedAutoObject<T> GetObject(Handle handle) const {
// Handle pseudo-handles.
if constexpr (std::derived_from<Process, T>) {
if (handle == KernelHandle::CurrentProcess) {
auto* const cur_process = m_kernel.GetCurrentProcess();
ASSERT(cur_process != nullptr);
return cur_process;
}
} else if constexpr (std::derived_from<KThread, T>) {
if (handle == KernelHandle::CurrentThread) {
auto* const cur_thread = m_kernel.GetCurrentThreadManager().GetCurrentThread();
ASSERT(cur_thread != nullptr);
return cur_thread;
}
}
return this->template GetObjectWithoutPseudoHandle<T>(handle);
}
KScopedAutoObject<KAutoObject> GetObjectForIpcWithoutPseudoHandle(Handle handle) const {
return this->GetObjectImpl(handle);
}
KScopedAutoObject<KAutoObject> GetObjectForIpc(Handle handle, KThread* cur_thread) const;
template <typename T>
bool GetMultipleObjects(T** out, const Handle* handles, size_t num_handles) const {
// Try to convert and open all the handles.
size_t num_opened;
{
// KScopedLightMutex lk{m_mutex};
for (num_opened = 0; num_opened < num_handles; num_opened++) {
// Get the current handle.
const auto cur_handle = handles[num_opened];
// Get the object for the current handle.
KAutoObject* cur_object = this->GetObjectImpl(cur_handle);
if (cur_object == nullptr) [[unlikely]] {
break;
}
// Cast the current object to the desired type.
T* cur_t = cur_object->DynamicCast<T*>();
if (cur_t == nullptr) [[unlikely]] {
break;
}
// Open a reference to the current object.
cur_t->Open();
out[num_opened] = cur_t;
}
}
// If we converted every object, succeed.
if (num_opened == num_handles) [[likely]] {
return true;
}
// If we didn't convert entry object, close the ones we opened.
for (size_t i = 0; i < num_opened; i++) {
out[i]->Close();
}
return false;
}
private:
s32 AllocateEntry() {
ASSERT(m_count < m_table_size);
const auto index = m_free_head_index;
m_free_head_index = m_entry_infos[index].GetNextFreeIndex();
m_max_count = std::max(m_max_count, ++m_count);
return index;
}
void FreeEntry(s32 index) {
ASSERT(m_count > 0);
m_objects[index] = nullptr;
m_entry_infos[index].next_free_index = static_cast<s16>(m_free_head_index);
m_free_head_index = index;
--m_count;
}
u16 AllocateLinearId() {
const u16 id = m_next_linear_id++;
if (m_next_linear_id > MaxLinearId) {
m_next_linear_id = MinLinearId;
}
return id;
}
bool IsValidHandle(Handle handle) const {
// Unpack the handle.
const auto handle_pack = HandlePack(handle);
const auto raw_value = handle_pack.raw;
const auto index = handle_pack.index;
const auto linear_id = handle_pack.linear_id;
const auto reserved = handle_pack.reserved;
ASSERT(reserved == 0);
// Validate our indexing information.
if (raw_value == 0) [[unlikely]] {
return false;
}
if (linear_id == 0) [[unlikely]] {
return false;
}
if (index >= m_table_size) [[unlikely]] {
return false;
}
// Check that there's an object, and our serial id is correct.
if (m_objects[index] == nullptr) [[unlikely]] {
return false;
}
if (m_entry_infos[index].GetLinearId() != linear_id) [[unlikely]] {
return false;
}
return true;
}
KAutoObject* GetObjectImpl(Handle handle) const {
// Handles must not have reserved bits set.
const auto handle_pack = HandlePack(handle);
if (handle_pack.reserved != 0) [[unlikely]] {
return nullptr;
}
if (this->IsValidHandle(handle)) [[likely]] {
return m_objects[handle_pack.index];
} else {
return nullptr;
}
}
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const u32 file_version);
private:
union HandlePack {
constexpr HandlePack() = default;
constexpr HandlePack(Handle handle) : raw{static_cast<u32>(handle)} {}
u32 raw{};
BitField<0, 15, u32> index;
BitField<15, 15, u32> linear_id;
BitField<30, 2, u32> reserved;
};
static constexpr Handle EncodeHandle(u16 index, u16 linear_id) {
HandlePack handle{};
handle.index.Assign(index);
handle.linear_id.Assign(linear_id);
handle.reserved.Assign(0);
return handle.raw;
}
private:
static constexpr u16 MinLinearId = 1;
static constexpr u16 MaxLinearId = 0x7FFF;
union EntryInfo {
u16 linear_id;
s16 next_free_index;
constexpr u16 GetLinearId() const {
return linear_id;
}
constexpr s32 GetNextFreeIndex() const {
return next_free_index;
}
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const u32 file_version) {
ar& linear_id;
ar& next_free_index;
}
};
private:
KernelSystem& m_kernel;
std::vector<EntryInfo> m_entry_infos{};
std::vector<KAutoObject*> m_objects{};
s32 m_free_head_index{};
u16 m_table_size{};
u16 m_next_id{};
u16 m_max_count{};
u16 m_next_linear_id{};
u16 m_count{};
// KLightMutex mutex;
};
} // namespace Kernel

View File

@ -1,237 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include "common/intrusive_list.h"
#include "core/hle/kernel/k_slab_heap.h"
namespace Kernel {
class KernelSystem;
class KLinkedListNode : public Common::IntrusiveListBaseNode<KLinkedListNode>,
public KSlabAllocated<KLinkedListNode> {
public:
explicit KLinkedListNode(KernelSystem&) {}
KLinkedListNode() = default;
void Initialize(void* it) {
m_item = it;
}
void* GetItem() const {
return m_item;
}
private:
void* m_item = nullptr;
};
template <typename T>
class KLinkedList : private Common::IntrusiveListBaseTraits<KLinkedListNode>::ListType {
private:
using BaseList = Common::IntrusiveListBaseTraits<KLinkedListNode>::ListType;
public:
template <bool Const>
class Iterator;
using value_type = T;
using size_type = size_t;
using difference_type = ptrdiff_t;
using pointer = value_type*;
using const_pointer = const value_type*;
using reference = value_type&;
using const_reference = const value_type&;
using iterator = Iterator<false>;
using const_iterator = Iterator<true>;
using reverse_iterator = std::reverse_iterator<iterator>;
using const_reverse_iterator = std::reverse_iterator<const_iterator>;
template <bool Const>
class Iterator {
private:
using BaseIterator = BaseList::iterator;
friend class KLinkedList;
public:
using iterator_category = std::bidirectional_iterator_tag;
using value_type = typename KLinkedList::value_type;
using difference_type = typename KLinkedList::difference_type;
using pointer = std::conditional_t<Const, KLinkedList::const_pointer, KLinkedList::pointer>;
using reference =
std::conditional_t<Const, KLinkedList::const_reference, KLinkedList::reference>;
public:
explicit Iterator(BaseIterator it) : m_base_it(it) {}
pointer GetItem() const {
return static_cast<pointer>(m_base_it->GetItem());
}
bool operator==(const Iterator& rhs) const {
return m_base_it == rhs.m_base_it;
}
bool operator!=(const Iterator& rhs) const {
return !(*this == rhs);
}
pointer operator->() const {
return this->GetItem();
}
reference operator*() const {
return *this->GetItem();
}
Iterator& operator++() {
++m_base_it;
return *this;
}
Iterator& operator--() {
--m_base_it;
return *this;
}
Iterator operator++(int) {
const Iterator it{*this};
++(*this);
return it;
}
Iterator operator--(int) {
const Iterator it{*this};
--(*this);
return it;
}
operator Iterator<true>() const {
return Iterator<true>(m_base_it);
}
private:
BaseIterator m_base_it;
};
public:
constexpr KLinkedList(KernelSystem& kernel_) : BaseList(), kernel{kernel_} {}
~KLinkedList() {
// Erase all elements.
for (auto it = begin(); it != end(); it = erase(it)) {
}
// Ensure we succeeded.
ASSERT(this->empty());
}
// Iterator accessors.
iterator begin() {
return iterator(BaseList::begin());
}
const_iterator begin() const {
return const_iterator(BaseList::begin());
}
iterator end() {
return iterator(BaseList::end());
}
const_iterator end() const {
return const_iterator(BaseList::end());
}
const_iterator cbegin() const {
return this->begin();
}
const_iterator cend() const {
return this->end();
}
reverse_iterator rbegin() {
return reverse_iterator(this->end());
}
const_reverse_iterator rbegin() const {
return const_reverse_iterator(this->end());
}
reverse_iterator rend() {
return reverse_iterator(this->begin());
}
const_reverse_iterator rend() const {
return const_reverse_iterator(this->begin());
}
const_reverse_iterator crbegin() const {
return this->rbegin();
}
const_reverse_iterator crend() const {
return this->rend();
}
// Content management.
using BaseList::empty;
using BaseList::size;
reference back() {
return *(--this->end());
}
const_reference back() const {
return *(--this->end());
}
reference front() {
return *this->begin();
}
const_reference front() const {
return *this->begin();
}
iterator insert(const_iterator pos, reference ref) {
KLinkedListNode* new_node = KLinkedListNode::Allocate(kernel);
ASSERT(new_node != nullptr);
new_node->Initialize(std::addressof(ref));
return iterator(BaseList::insert(pos.m_base_it, *new_node));
}
void push_back(reference ref) {
this->insert(this->end(), ref);
}
void push_front(reference ref) {
this->insert(this->begin(), ref);
}
void pop_back() {
this->erase(--this->end());
}
void pop_front() {
this->erase(this->begin());
}
iterator erase(const iterator pos) {
KLinkedListNode* freed_node = std::addressof(*pos.m_base_it);
iterator ret = iterator(BaseList::erase(pos.m_base_it));
KLinkedListNode::Free(kernel, freed_node);
return ret;
}
private:
KernelSystem& kernel;
};
} // namespace Kernel

View File

@ -1,150 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <boost/serialization/base_object.hpp>
#include "common/archives.h"
#include "common/assert.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/k_mutex.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
SERIALIZE_EXPORT_IMPL(Kernel::KMutex)
namespace Kernel {
void ReleaseThreadMutexes(KThread* thread) {
for (KMutex* mtx : thread->m_held_mutexes) {
mtx->m_lock_count = 0;
mtx->m_holding_thread = nullptr;
mtx->WakeupAllWaitingThreads();
}
thread->m_held_mutexes.clear();
}
KMutex::KMutex(KernelSystem& kernel) : KAutoObjectWithSlabHeapAndContainer(kernel) {}
KMutex::~KMutex() = default;
void KMutex::Initialize(Process* owner, bool initial_locked) {
// Open a reference to the owner process.
if (owner) {
owner->Open();
m_owner = owner;
}
// Set default priority
m_priority = ThreadPrioLowest;
// Acquire mutex with current thread if initialized as locked
if (initial_locked) {
KThread* thread = m_kernel.GetCurrentThreadManager().GetCurrentThread();
this->Acquire(thread);
}
}
void KMutex::PostDestroy(uintptr_t arg) {
Process* owner = reinterpret_cast<Process*>(arg);
if (owner != nullptr) {
owner->ReleaseResource(ResourceLimitType::Mutex, 1);
owner->Close();
}
}
bool KMutex::ShouldWait(const KThread* thread) const {
return m_lock_count > 0 && thread != m_holding_thread;
}
void KMutex::Acquire(KThread* thread) {
ASSERT_MSG(!ShouldWait(thread), "object unavailable!");
// Actually "acquire" the mutex only if we don't already have it
if (m_lock_count == 0) {
m_priority = thread->m_current_priority;
thread->m_held_mutexes.insert(this);
m_holding_thread = thread;
thread->UpdatePriority();
m_kernel.PrepareReschedule();
}
m_lock_count++;
}
Result KMutex::Release(KThread* thread) {
// We can only release the mutex if it's held by the calling thread.
if (thread != m_holding_thread) {
if (m_holding_thread) {
LOG_ERROR(
Kernel,
"Tried to release a mutex (owned by thread id {}) from a different thread id {}",
m_holding_thread->m_thread_id, thread->m_thread_id);
}
return Result(ErrCodes::WrongLockingThread, ErrorModule::Kernel,
ErrorSummary::InvalidArgument, ErrorLevel::Permanent);
}
// Note: It should not be possible for the situation where the mutex has a holding thread with a
// zero lock count to occur. The real kernel still checks for this, so we do too.
if (m_lock_count <= 0) {
return Result(ErrorDescription::InvalidResultValue, ErrorModule::Kernel,
ErrorSummary::InvalidState, ErrorLevel::Permanent);
}
m_lock_count--;
// Yield to the next thread only if we've fully released the mutex
if (m_lock_count == 0) {
m_holding_thread->m_held_mutexes.erase(this);
m_holding_thread->UpdatePriority();
m_holding_thread = nullptr;
WakeupAllWaitingThreads();
m_kernel.PrepareReschedule();
}
return ResultSuccess;
}
void KMutex::AddWaitingThread(KThread* thread) {
KSynchronizationObject::AddWaitingThread(thread);
thread->m_pending_mutexes.insert(this);
this->UpdatePriority();
}
void KMutex::RemoveWaitingThread(KThread* thread) {
KSynchronizationObject::RemoveWaitingThread(thread);
thread->m_pending_mutexes.erase(this);
this->UpdatePriority();
}
void KMutex::UpdatePriority() {
if (!m_holding_thread) {
return;
}
u32 best_priority = ThreadPrioLowest;
for (const KThread* waiter : GetWaitingThreads()) {
if (waiter->m_current_priority < best_priority) {
best_priority = waiter->m_current_priority;
}
}
if (best_priority != m_priority) {
m_priority = best_priority;
m_holding_thread->UpdatePriority();
}
}
template <class Archive>
void KMutex::serialize(Archive& ar, const u32 file_version) {
ar& boost::serialization::base_object<KSynchronizationObject>(*this);
ar& m_lock_count;
ar& m_priority;
ar& m_holding_thread;
}
SERIALIZE_IMPL(KMutex)
} // namespace Kernel

View File

@ -1,81 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <boost/serialization/export.hpp>
#include "core/global.h"
#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/result.h"
namespace Kernel {
class KThread;
class KMutex final : public KAutoObjectWithSlabHeapAndContainer<KMutex, KSynchronizationObject> {
KERNEL_AUTOOBJECT_TRAITS(KMutex, KSynchronizationObject);
public:
explicit KMutex(KernelSystem& kernel);
~KMutex() override;
void Initialize(Process* owner, bool initial_locked);
uintptr_t GetPostDestroyArgument() const override {
return reinterpret_cast<uintptr_t>(m_owner);
}
static void PostDestroy(uintptr_t arg);
Process* GetOwner() const override {
return m_owner;
}
u32 GetPriority() const {
return m_priority;
}
bool ShouldWait(const KThread* thread) const override;
void Acquire(KThread* thread) override;
void AddWaitingThread(KThread* thread) override;
void RemoveWaitingThread(KThread* thread) override;
/**
* Elevate the mutex priority to the best priority
* among the priorities of all its waiting threads.
*/
void UpdatePriority();
/**
* Attempts to release the mutex from the specified thread.
* @param thread Thread that wants to release the mutex.
* @returns The result code of the operation.
*/
Result Release(KThread* thread);
private:
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const u32 file_version);
public:
Process* m_owner{};
int m_lock_count{};
u32 m_priority{};
KThread* m_holding_thread{};
};
/**
* Releases all the mutexes held by the specified thread
* @param thread Thread that is holding the mutexes
*/
void ReleaseThreadMutexes(KThread* thread);
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::KMutex)
CONSTRUCT_KERNEL_OBJECT(Kernel::KMutex)

View File

@ -1,103 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "core/hle/kernel/k_object_name.h"
namespace Kernel {
KObjectNameGlobalData::KObjectNameGlobalData(KernelSystem& kernel) {}
KObjectNameGlobalData::~KObjectNameGlobalData() = default;
void KObjectName::Initialize(KAutoObject* obj, const char* name) {
// Set member variables.
m_object = obj;
std::strncpy(m_name.data(), name, sizeof(m_name) - 1);
m_name[sizeof(m_name) - 1] = '\x00';
// Open a reference to the object we hold.
m_object->Open();
}
bool KObjectName::MatchesName(const char* name) const {
return std::strncmp(m_name.data(), name, sizeof(m_name)) == 0;
}
Result KObjectName::NewFromName(KernelSystem& kernel, KAutoObject* obj, const char* name) {
// Create a new object name.
KObjectName* new_name = KObjectName::Allocate(kernel);
R_UNLESS(new_name != nullptr, Result{0xD86007F3});
// Initialize the new name.
new_name->Initialize(obj, name);
// Check if there's an existing name.
{
// Get the global data.
KObjectNameGlobalData& gd{kernel.ObjectNameGlobalData()};
// Ensure we have exclusive access to the global list.
// KScopedLightMutex lk{gd.GetObjectListLock()};
// If the object doesn't exist, put it into the list.
KScopedAutoObject existing_object = FindImpl(kernel, name);
if (existing_object.IsNull()) {
gd.GetObjectList().push_back(*new_name);
return ResultSuccess;
}
}
// The object already exists, the kernel does not check for this.
UNREACHABLE();
}
Result KObjectName::Delete(KernelSystem& kernel, KAutoObject* obj, const char* compare_name) {
// Get the global data.
KObjectNameGlobalData& gd{kernel.ObjectNameGlobalData()};
// Ensure we have exclusive access to the global list.
// KScopedLightMutex lk{gd.GetObjectListLock()};
// Find a matching entry in the list, and delete it.
for (auto& name : gd.GetObjectList()) {
if (name.MatchesName(compare_name) && obj == name.GetObject()) {
// We found a match, clean up its resources.
obj->Close();
gd.GetObjectList().erase(gd.GetObjectList().iterator_to(name));
KObjectName::Free(kernel, std::addressof(name));
return ResultSuccess;
}
}
// We didn't find the object in the list.
return ResultNotFound;
}
KScopedAutoObject<KAutoObject> KObjectName::Find(KernelSystem& kernel, const char* name) {
// Get the global data.
// KObjectNameGlobalData& gd{kernel.ObjectNameGlobalData()};
// Ensure we have exclusive access to the global list.
// KScopedLightMutex lk{gd.GetObjectListLock()};
return FindImpl(kernel, name);
}
KScopedAutoObject<KAutoObject> KObjectName::FindImpl(KernelSystem& kernel,
const char* compare_name) {
// Get the global data.
KObjectNameGlobalData& gd{kernel.ObjectNameGlobalData()};
// Try to find a matching object in the global list.
for (const auto& name : gd.GetObjectList()) {
if (name.MatchesName(compare_name)) {
return name.GetObject();
}
}
// There's no matching entry in the list.
return nullptr;
}
} // namespace Kernel

View File

@ -1,82 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <array>
#include "common/intrusive_list.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/k_slab_heap.h"
namespace Kernel {
class KObjectNameGlobalData;
class KObjectName : public KSlabAllocated<KObjectName>,
public Common::IntrusiveListBaseNode<KObjectName> {
public:
explicit KObjectName(KernelSystem&) {}
virtual ~KObjectName() = default;
static constexpr size_t NameLengthMax = 12;
using List = Common::IntrusiveListBaseTraits<KObjectName>::ListType;
static Result NewFromName(KernelSystem& kernel, KAutoObject* obj, const char* name);
static Result Delete(KernelSystem& kernel, KAutoObject* obj, const char* name);
static KScopedAutoObject<KAutoObject> Find(KernelSystem& kernel, const char* name);
template <typename Derived>
static Result Delete(KernelSystem& kernel, const char* name) {
// Find the object.
KScopedAutoObject obj = Find(kernel, name);
R_UNLESS(obj.IsNotNull(), ResultNotFound);
// Cast the object to the desired type.
Derived* derived = obj->DynamicCast<Derived*>();
R_UNLESS(derived != nullptr, ResultNotFound);
// Check that the object is closed.
R_UNLESS(derived->IsServerClosed(), ResultInvalidAddressState);
return Delete(kernel, obj.GetPointerUnsafe(), name);
}
template <typename Derived>
requires(std::derived_from<Derived, KAutoObject>)
static KScopedAutoObject<Derived> Find(KernelSystem& kernel, const char* name) {
return Find(kernel, name);
}
private:
static KScopedAutoObject<KAutoObject> FindImpl(KernelSystem& kernel, const char* name);
void Initialize(KAutoObject* obj, const char* name);
bool MatchesName(const char* name) const;
KAutoObject* GetObject() const {
return m_object;
}
private:
std::array<char, NameLengthMax> m_name{};
KAutoObject* m_object{};
};
class KObjectNameGlobalData {
public:
explicit KObjectNameGlobalData(KernelSystem& kernel);
~KObjectNameGlobalData();
KObjectName::List& GetObjectList() {
return m_object_list;
}
private:
// KMutex m_mutex;
KObjectName::List m_object_list;
};
} // namespace Kernel

View File

@ -1,25 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "core/hle/kernel/k_port.h"
namespace Kernel {
KPort::KPort(KernelSystem& kernel)
: KAutoObjectWithSlabHeapAndContainer{kernel}, m_server{kernel}, m_client{kernel} {}
KPort::~KPort() = default;
void KPort::Initialize(s32 max_sessions, std::string name) {
// Open a new reference count to the initialized port.
this->Open();
// Create and initialize our server/client pair.
KAutoObject::Create(std::addressof(m_server));
KAutoObject::Create(std::addressof(m_client));
m_server.Initialize(this, name);
m_client.Initialize(this, max_sessions, name);
}
} // namespace Kernel

View File

@ -1,52 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include "common/common_types.h"
#include "core/hle/kernel/k_client_port.h"
#include "core/hle/kernel/k_server_port.h"
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/result.h"
namespace Kernel {
class KServerSession;
class KPort final : public KAutoObjectWithSlabHeapAndContainer<KPort> {
KERNEL_AUTOOBJECT_TRAITS(KPort, KAutoObject);
public:
explicit KPort(KernelSystem& kernel);
~KPort() override;
static void PostDestroy(uintptr_t arg) {}
void Initialize(s32 max_sessions, std::string name);
void OnClientClosed();
void OnServerClosed();
bool IsServerClosed() const;
Result EnqueueSession(KServerSession* session);
KClientPort& GetClientPort() {
return m_client;
}
KServerPort& GetServerPort() {
return m_server;
}
const KClientPort& GetClientPort() const {
return m_client;
}
const KServerPort& GetServerPort() const {
return m_server;
}
private:
KServerPort m_server;
KClientPort m_client;
};
} // namespace Kernel

View File

@ -1,50 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include "common/common_types.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_resource_limit.h"
namespace Kernel {
class KScopedResourceReservation {
public:
explicit KScopedResourceReservation(KResourceLimit* l, ResourceLimitType type, s32 amount = 1)
: m_limit(l), m_amount(amount), m_type(type) {
if (m_limit) {
m_succeeded = m_limit->Reserve(m_type, m_amount);
} else {
m_succeeded = true;
}
}
explicit KScopedResourceReservation(const Process* p, ResourceLimitType type, s32 amount = 1)
: KScopedResourceReservation(p->resource_limit, type, amount) {}
~KScopedResourceReservation() noexcept {
if (m_limit && m_succeeded) {
// Resource was not committed, release the reservation.
m_limit->Release(m_type, m_amount);
}
}
/// Commit the resource reservation, destruction of this object does not release the resource
void Commit() {
m_limit = nullptr;
}
bool Succeeded() const {
return m_succeeded;
}
private:
KResourceLimit* m_limit{};
s32 m_amount{};
ResourceLimitType m_type{};
bool m_succeeded{};
};
} // namespace Kernel

View File

@ -1,78 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <boost/serialization/base_object.hpp>
#include <boost/serialization/export.hpp>
#include <boost/serialization/string.hpp>
#include "common/archives.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/k_auto_object_container.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/k_semaphore.h"
#include "core/hle/kernel/kernel.h"
SERIALIZE_EXPORT_IMPL(Kernel::KSemaphore)
namespace Kernel {
KSemaphore::KSemaphore(KernelSystem& kernel) : KAutoObjectWithSlabHeapAndContainer(kernel) {}
KSemaphore::~KSemaphore() = default;
void KSemaphore::Initialize(Process* owner, s32 initial_count, s32 max_count, std::string name) {
// Open a reference to the owner process.
if (owner) {
owner->Open();
m_owner = owner;
}
// Set member variables
m_available_count = initial_count;
m_max_count = max_count;
m_name = name;
}
void KSemaphore::PostDestroy(uintptr_t arg) {
Process* owner = reinterpret_cast<Process*>(arg);
if (owner != nullptr) {
owner->ReleaseResource(ResourceLimitType::Semaphore, 1);
owner->Close();
}
}
bool KSemaphore::ShouldWait(const KThread* thread) const {
return m_available_count <= 0;
}
void KSemaphore::Acquire(KThread* thread) {
if (m_available_count <= 0) {
return;
}
--m_available_count;
}
Result KSemaphore::Release(s32* out_count, s32 release_count) {
R_UNLESS(release_count + m_available_count <= m_max_count, ResultOutOfRangeKernel);
// Update available count.
const s32 previous_count = m_available_count;
m_available_count += release_count;
// Wakeup waiting threads and return.
this->WakeupAllWaitingThreads();
*out_count = previous_count;
return ResultSuccess;
}
template <class Archive>
void KSemaphore::serialize(Archive& ar, const u32 file_version) {
ar& boost::serialization::base_object<KSynchronizationObject>(*this);
ar& m_max_count;
ar& m_available_count;
}
SERIALIZE_IMPL(KSemaphore)
} // namespace Kernel

View File

@ -1,67 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <string>
#include <boost/serialization/export.hpp>
#include "common/common_types.h"
#include "core/global.h"
#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/result.h"
namespace Kernel {
class ResourceLimit;
class KSemaphore final
: public KAutoObjectWithSlabHeapAndContainer<KSemaphore, KSynchronizationObject> {
KERNEL_AUTOOBJECT_TRAITS(KSemaphore, KSynchronizationObject);
public:
explicit KSemaphore(KernelSystem& kernel);
~KSemaphore() override;
void Initialize(Process* owner, s32 initial_count, s32 max_count, std::string name);
uintptr_t GetPostDestroyArgument() const override {
return reinterpret_cast<uintptr_t>(m_owner);
}
static void PostDestroy(uintptr_t arg);
Process* GetOwner() const override {
return m_owner;
}
bool ShouldWait(const KThread* thread) const override;
void Acquire(KThread* thread) override;
s32 GetAvailableCount() const {
return m_available_count;
}
s32 GetMaxCount() const {
return m_max_count;
}
Result Release(s32* out_count, s32 release_count);
private:
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const u32 file_version);
private:
Process* m_owner{};
s32 m_max_count{};
s32 m_available_count{};
std::string m_name;
};
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::KSemaphore)
CONSTRUCT_KERNEL_OBJECT(Kernel::KSemaphore)

View File

@ -1,68 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <boost/serialization/base_object.hpp>
#include <boost/serialization/shared_ptr.hpp>
#include <boost/serialization/string.hpp>
#include <boost/serialization/vector.hpp>
#include "common/archives.h"
#include "common/assert.h"
#include "core/hle/kernel/k_port.h"
#include "core/hle/kernel/k_server_port.h"
#include "core/hle/kernel/k_server_session.h"
#include "core/hle/kernel/k_thread.h"
SERIALIZE_EXPORT_IMPL(Kernel::KServerPort)
namespace Kernel {
KServerPort::KServerPort(KernelSystem& kernel) : KSynchronizationObject(kernel) {}
KServerPort::~KServerPort() = default;
void KServerPort::Initialize(KPort* parent, std::string name) {
m_parent = parent;
m_name = name + "_Server";
}
void KServerPort::Destroy() {
// Close our reference to our parent.
m_parent->Close();
}
KServerSession* KServerPort::AcceptSession() {
// Return the first session in the list.
if (m_pending_sessions.empty()) {
return nullptr;
}
KServerSession* session = m_pending_sessions.back();
m_pending_sessions.pop_back();
return session;
}
void KServerPort::EnqueueSession(KServerSession* session) {
// Add the session to our queue.
m_pending_sessions.push_back(session);
}
bool KServerPort::ShouldWait(const KThread* thread) const {
// If there are no pending sessions, we wait until a new one is added.
return m_pending_sessions.size() == 0;
}
void KServerPort::Acquire(KThread* thread) {
ASSERT_MSG(!ShouldWait(thread), "object unavailable!");
}
template <class Archive>
void KServerPort::serialize(Archive& ar, const u32 file_version) {
ar& boost::serialization::base_object<KSynchronizationObject>(*this);
ar& m_name;
ar& m_pending_sessions;
// ar& m_hle_handler;
}
SERIALIZE_IMPL(KServerPort)
} // namespace Kernel

View File

@ -1,61 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <memory>
#include <string>
#include <boost/serialization/export.hpp>
#include "core/hle/kernel/k_server_session.h"
#include "core/hle/kernel/k_synchronization_object.h"
namespace Kernel {
class KClientPort;
class KServerSession;
class KPort;
class SessionRequestHandler;
class KServerPort final : public KSynchronizationObject {
KERNEL_AUTOOBJECT_TRAITS(KServerPort, KSynchronizationObject);
public:
explicit KServerPort(KernelSystem& kernel);
~KServerPort() override;
void Initialize(KPort* parent, std::string name);
void Destroy() override;
void EnqueueSession(KServerSession* session);
KServerSession* AcceptSession();
bool ShouldWait(const KThread* thread) const override;
void Acquire(KThread* thread) override;
void SetHleHandler(std::shared_ptr<SessionRequestHandler> hle_handler_) {
m_hle_handler = std::move(hle_handler_);
}
std::shared_ptr<SessionRequestHandler> GetHleHandler() {
return m_hle_handler;
}
private:
KPort* m_parent{};
std::string m_name;
std::vector<KServerSession*> m_pending_sessions;
std::shared_ptr<SessionRequestHandler> m_hle_handler;
private:
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const unsigned int file_version);
};
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::KServerPort)
CONSTRUCT_KERNEL_OBJECT(Kernel::KServerPort)

View File

@ -1,144 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <boost/serialization/shared_ptr.hpp>
#include <boost/serialization/string.hpp>
#include <boost/serialization/vector.hpp>
#include "common/archives.h"
#include "core/hle/kernel/hle_ipc.h"
#include "core/hle/kernel/k_client_session.h"
#include "core/hle/kernel/k_server_session.h"
#include "core/hle/kernel/k_session.h"
#include "core/hle/kernel/k_thread.h"
SERIALIZE_EXPORT_IMPL(Kernel::KServerSession)
namespace Kernel {
KServerSession::KServerSession(KernelSystem& kernel) : KSynchronizationObject(kernel) {}
KServerSession::~KServerSession() = default;
void KServerSession::Destroy() {
m_parent->OnServerClosed();
m_parent->Close();
}
bool KServerSession::ShouldWait(const KThread* thread) const {
// Closed sessions should never wait, an error will be returned from svcReplyAndReceive.
const auto state = m_parent->GetState();
if (state != KSessionState::Normal) {
return false;
}
// Wait if we have no pending requests, or if we're currently handling a request.
return pending_requesting_threads.empty() || currently_handling != nullptr;
}
void KServerSession::Acquire(KThread* thread) {
ASSERT_MSG(!ShouldWait(thread), "object unavailable!");
// If the client endpoint was closed, don't do anything. This KServerSession is now useless and
// will linger until its last handle is closed by the running application.
const auto state = m_parent->GetState();
if (state != KSessionState::Normal) {
return;
}
// We are now handling a request, pop it from the stack.
ASSERT(!pending_requesting_threads.empty());
currently_handling = pending_requesting_threads.back();
pending_requesting_threads.pop_back();
}
void KServerSession::OnClientClosed() {
// Notify HLE handler that client session has been disconnected.
if (hle_handler) {
hle_handler->ClientDisconnected(this);
}
// Clean up the list of client threads with pending requests, they are unneeded now that the
// client endpoint is closed.
pending_requesting_threads.clear();
currently_handling = nullptr;
// Notify any threads waiting on the KServerSession that the endpoint has been closed. Note
// that this call has to happen after `Session::client` has been set to nullptr to let the
// ServerSession know that the client endpoint has been closed.
this->WakeupAllWaitingThreads();
}
Result KServerSession::HandleSyncRequest(KThread* thread) {
// The KServerSession received a sync request, this means that there's new data available
// from its ClientSession, so wake up any threads that may be waiting on a svcReplyAndReceive or
// similar.
// If this KServerSession has an associated HLE handler, forward the request to it.
if (hle_handler != nullptr) {
std::array<u32_le, IPC::COMMAND_BUFFER_LENGTH + 2 * IPC::MAX_STATIC_BUFFERS> cmd_buf;
auto current_process = thread->GetOwner();
ASSERT(current_process);
m_kernel.memory.ReadBlock(*current_process, thread->GetCommandBufferAddress(),
cmd_buf.data(), cmd_buf.size() * sizeof(u32));
auto context = std::make_shared<Kernel::HLERequestContext>(m_kernel, this, thread);
context->PopulateFromIncomingCommandBuffer(cmd_buf.data(), current_process);
hle_handler->HandleSyncRequest(*context);
ASSERT(thread->m_status == Kernel::ThreadStatus::Running ||
thread->m_status == Kernel::ThreadStatus::WaitHleEvent);
// Only write the response immediately if the thread is still running. If the HLE handler
// put the thread to sleep then the writing of the command buffer will be deferred to the
// wakeup callback.
if (thread->m_status == Kernel::ThreadStatus::Running) {
context->WriteToOutgoingCommandBuffer(cmd_buf.data(), *current_process);
m_kernel.memory.WriteBlock(*current_process, thread->GetCommandBufferAddress(),
cmd_buf.data(), cmd_buf.size() * sizeof(u32));
}
}
if (thread->m_status == ThreadStatus::Running) {
// Put the thread to sleep until the server replies, it will be awoken in
// svcReplyAndReceive for LLE servers.
thread->m_status = ThreadStatus::WaitIPC;
if (hle_handler != nullptr) {
// For HLE services, we put the request threads to sleep for a short duration to
// simulate IPC overhead, but only if the HLE handler didn't put the thread to sleep for
// other reasons like an async callback. The IPC overhead is needed to prevent
// starvation when a thread only does sync requests to HLE services while a
// lower-priority thread is waiting to run.
// This delay was approximated in a homebrew application by measuring the average time
// it takes for svcSendSyncRequest to return when performing the SetLcdForceBlack IPC
// request to the GSP:GPU service in a n3DS with firmware 11.6. The measured values have
// a high variance and vary between models.
static constexpr u64 IPCDelayNanoseconds = 39000;
thread->WakeAfterDelay(IPCDelayNanoseconds);
} else {
// Add the thread to the list of threads that have issued a sync request with this
// server.
pending_requesting_threads.push_back(std::move(thread));
}
}
// If this KServerSession does not have an HLE implementation,
// just wake up the threads waiting on it.
this->WakeupAllWaitingThreads();
return ResultSuccess;
}
template <class Archive>
void KServerSession::serialize(Archive& ar, const u32 file_version) {
ar& boost::serialization::base_object<KSynchronizationObject>(*this);
ar& m_name;
// ar& m_parent;
ar& hle_handler;
ar& pending_requesting_threads;
ar& currently_handling;
ar& mapped_buffer_context;
}
SERIALIZE_IMPL(KServerSession)
} // namespace Kernel

View File

@ -1,80 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <memory>
#include <string>
#include <boost/serialization/export.hpp>
#include "common/common_types.h"
#include "core/hle/kernel/ipc.h"
#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/result.h"
namespace Kernel {
class ClientSession;
class ClientPort;
class KSession;
class SessionRequestHandler;
class KThread;
class KServerSession final : public KSynchronizationObject {
KERNEL_AUTOOBJECT_TRAITS(KServerSession, KSynchronizationObject);
public:
~KServerSession() override;
explicit KServerSession(KernelSystem& kernel);
void Destroy() override;
void Initialize(KSession* parent) {
m_parent = parent;
}
KSession* GetParent() const {
return m_parent;
}
KThread* GetCurrent() {
return currently_handling;
}
std::vector<MappedBufferContext>& GetMappedBufferContext() {
return mapped_buffer_context;
}
void SetHleHandler(std::shared_ptr<SessionRequestHandler>&& hle_handler_) {
hle_handler = std::move(hle_handler_);
}
std::shared_ptr<SessionRequestHandler>& GetHleHandler() {
return hle_handler;
}
void OnClientClosed();
Result HandleSyncRequest(KThread* thread);
bool ShouldWait(const KThread* thread) const override;
void Acquire(KThread* thread) override;
private:
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const u32 file_version);
public:
std::string m_name;
KSession* m_parent{};
std::shared_ptr<SessionRequestHandler> hle_handler;
std::vector<KThread*> pending_requesting_threads;
KThread* currently_handling;
std::vector<MappedBufferContext> mapped_buffer_context;
};
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::KServerSession)
CONSTRUCT_KERNEL_OBJECT(Kernel::KServerSession)

View File

@ -1,63 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "core/hle/kernel/k_client_port.h"
#include "core/hle/kernel/k_client_session.h"
#include "core/hle/kernel/k_server_session.h"
#include "core/hle/kernel/k_session.h"
namespace Kernel {
KSession::KSession(KernelSystem& kernel)
: KAutoObjectWithSlabHeapAndContainer{kernel}, m_server{kernel}, m_client{kernel} {}
KSession::~KSession() = default;
void KSession::Initialize(KClientPort* client_port) {
// Increment reference count.
// Because reference count is one on creation, this will result
// in a reference count of two. Thus, when both server and client are closed
// this object will be destroyed.
this->Open();
// Create our sub sessions.
KAutoObject::Create(std::addressof(m_server));
KAutoObject::Create(std::addressof(m_client));
// Initialize our sub sessions.
m_state = KSessionState::Normal;
m_server.Initialize(this);
m_client.Initialize(this);
// Set our port.
m_port = client_port;
if (m_port != nullptr) {
m_port->Open();
}
// Mark initialized.
m_initialized = true;
}
void KSession::Finalize() {
if (m_port != nullptr) {
m_port->ConnectionClosed();
m_port->Close();
}
}
void KSession::OnServerClosed() {
if (m_state == KSessionState::Normal) {
m_state = KSessionState::ServerClosed;
m_client.OnServerClosed();
}
}
void KSession::OnClientClosed() {
if (m_state == KSessionState::Normal) {
m_state = KSessionState::ClientClosed;
m_server.OnClientClosed();
}
}
} // namespace Kernel

View File

@ -1,76 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <string>
#include "core/hle/kernel/k_client_session.h"
#include "core/hle/kernel/k_server_session.h"
#include "core/hle/kernel/slab_helpers.h"
namespace Kernel {
class KClientPort;
enum class KSessionState : u8 {
Invalid = 0,
Normal = 1,
ClientClosed = 2,
ServerClosed = 3,
};
class KSession final : public KAutoObjectWithSlabHeapAndContainer<KSession> {
KERNEL_AUTOOBJECT_TRAITS(KSession, KAutoObject);
public:
explicit KSession(KernelSystem& kernel);
~KSession() override;
void Initialize(KClientPort* port);
void Finalize() override;
bool IsInitialized() const override {
return m_initialized;
}
static void PostDestroy(uintptr_t arg) {}
void OnServerClosed();
void OnClientClosed();
KSessionState GetState() const {
return m_state;
}
KClientSession& GetClientSession() {
return m_client;
}
KServerSession& GetServerSession() {
return m_server;
}
const KClientSession& GetClientSession() const {
return m_client;
}
const KServerSession& GetServerSession() const {
return m_server;
}
KClientPort* GetParent() {
return m_port;
}
private:
KServerSession m_server;
KClientSession m_client;
KClientPort* m_port{};
KSessionState m_state{};
bool m_initialized{};
};
} // namespace Kernel

View File

@ -1,238 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <boost/serialization/base_object.hpp>
#include "common/archives.h"
#include "common/logging/log.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/k_shared_memory.h"
#include "core/hle/kernel/memory.h"
#include "core/memory.h"
SERIALIZE_EXPORT_IMPL(Kernel::KSharedMemory)
namespace Kernel {
KSharedMemory::KSharedMemory(KernelSystem& kernel) : KAutoObjectWithSlabHeapAndContainer(kernel) {}
KSharedMemory::~KSharedMemory() = default;
Result KSharedMemory::Initialize(Process* owner, u32 size, MemoryPermission permissions,
MemoryPermission other_permissions, VAddr address,
MemoryRegion region) {
// Open a reference to our owner process.
if (owner) {
owner->Open();
m_owner = owner;
}
// Set member variables.
m_base_address = address;
m_size = size;
m_permissions = permissions;
m_other_permissions = other_permissions;
// Allocate the shared memory block.
if (address == 0) {
// We need to allocate a block from the Linear Heap ourselves.
// We'll manually allocate some memory from the linear heap in the specified region.
auto memory_region = m_kernel.GetMemoryRegion(region);
auto offset = memory_region->LinearAllocate(size);
ASSERT_MSG(offset, "Not enough space in region to allocate shared memory!");
// Store the backing blocks of allocated memory.
auto& memory = m_kernel.memory;
std::fill(memory.GetFCRAMPointer(*offset), memory.GetFCRAMPointer(*offset + size), 0);
m_backing_blocks = {{memory.GetFCRAMRef(*offset), size}};
m_holding_memory += MemoryRegionInfo::Interval(*offset, *offset + size);
m_linear_heap_phys_offset = *offset;
// Increase the amount of used linear heap memory for the owner process.
if (m_owner) {
m_owner->memory_used += size;
}
} else {
// The memory is already available and mapped in the owner process.
ASSERT(m_owner);
auto& vm_manager = m_owner->vm_manager;
R_TRY(vm_manager.ChangeMemoryState(address, size, MemoryState::Private,
VMAPermission::ReadWrite, MemoryState::Locked,
KSharedMemory::ConvertPermissions(permissions)));
// Should succeed after verifying memory state above.
auto backing_blocks = vm_manager.GetBackingBlocksForRange(address, size);
ASSERT(backing_blocks.Succeeded());
m_backing_blocks = std::move(backing_blocks).Unwrap();
}
return ResultSuccess;
}
void KSharedMemory::InitializeForApplet(u32 offset, u32 size, MemoryPermission permissions,
MemoryPermission other_permissions) {
// Allocate memory in heap
auto memory_region = m_kernel.GetMemoryRegion(MemoryRegion::SYSTEM);
auto backing_blocks = memory_region->HeapAllocate(size);
ASSERT_MSG(!backing_blocks.empty(), "Not enough space in region to allocate shared memory!");
// Set member variables
m_holding_memory = backing_blocks;
m_base_address = Memory::HEAP_VADDR + offset;
m_size = size;
m_permissions = permissions;
m_other_permissions = other_permissions;
// Initialize backing blocks
auto& memory = m_kernel.memory;
for (const auto& interval : backing_blocks) {
const VAddr addr = interval.lower();
const VAddr end = interval.upper();
m_backing_blocks.emplace_back(memory.GetFCRAMRef(addr), end - addr);
std::fill(memory.GetFCRAMPointer(addr), memory.GetFCRAMPointer(end), 0);
}
}
void KSharedMemory::Finalize() {
auto memory_region = m_kernel.GetMemoryRegion(MemoryRegion::SYSTEM);
for (const auto& interval : m_holding_memory) {
memory_region->Free(interval.lower(), interval.upper() - interval.lower());
}
if (m_owner) {
if (m_base_address != 0) {
m_owner->vm_manager.ChangeMemoryState(m_base_address, m_size, MemoryState::Locked,
VMAPermission::None, MemoryState::Private,
VMAPermission::ReadWrite);
} else {
m_owner->memory_used -= m_size;
}
}
}
void KSharedMemory::PostDestroy(uintptr_t arg) {
Process* owner = reinterpret_cast<Process*>(arg);
if (owner != nullptr) {
owner->ReleaseResource(ResourceLimitType::SharedMemory, 1);
owner->Close();
}
}
Result KSharedMemory::Map(Process& target_process, VAddr address, MemoryPermission permissions,
MemoryPermission other_permissions) {
const MemoryPermission own_other_permissions =
&target_process == m_owner ? m_permissions : m_other_permissions;
// Automatically allocated memory blocks can only be mapped with other_permissions = DontCare
R_UNLESS(m_base_address != 0 || other_permissions == MemoryPermission::DontCare,
ResultInvalidCombination);
// Heap-backed memory blocks can not be mapped with other_permissions = DontCare
R_UNLESS(m_base_address == 0 || other_permissions != MemoryPermission::DontCare,
ResultInvalidCombination);
// Error out if the requested permissions don't match what the creator process allows.
if (static_cast<u32>(permissions) & ~static_cast<u32>(own_other_permissions)) {
LOG_ERROR(Kernel, "cannot map address={:#08X}, permissions don't match", address);
return ResultInvalidCombination;
}
// Error out if the provided permissions are not compatible with what the creator process needs.
if (other_permissions != MemoryPermission::DontCare &&
static_cast<u32>(m_permissions) & ~static_cast<u32>(other_permissions)) {
LOG_ERROR(Kernel, "cannot map address={:#08X}, permissions don't match", address);
return ResultWrongPermission;
}
// TODO(Subv): Check for the Shared Device Mem flag in the creator process.
/*if (was_created_with_shared_device_mem && address != 0) {
return Result(ErrorDescription::InvalidCombination, ErrorModule::OS,
ErrorSummary::InvalidArgument, ErrorLevel::Usage);
}*/
// TODO(Subv): The same process that created a SharedMemory object
// can not map it in its own address space unless it was created with addr=0, result 0xD900182C.
if (address != 0) {
if (address < Memory::HEAP_VADDR || address + m_size >= Memory::SHARED_MEMORY_VADDR_END) {
LOG_ERROR(Kernel, "cannot map address={:#08X}, invalid address", address);
return ResultInvalidAddress;
}
}
VAddr target_address = address;
if (m_base_address == 0 && target_address == 0) {
// Calculate the address at which to map the memory block.
// Note: even on new firmware versions, the target address is still in the old linear heap
// region. This exception is made to keep the shared font compatibility. See
// APT:GetSharedFont for detail.
target_address = m_linear_heap_phys_offset + Memory::LINEAR_HEAP_VADDR;
}
{
auto vma = target_process.vm_manager.FindVMA(target_address);
if (vma->second.type != VMAType::Free ||
vma->second.base + vma->second.size < target_address + m_size) {
LOG_ERROR(Kernel, "cannot map address={:#08X}, mapping to already allocated memory",
address);
return ResultInvalidAddressState;
}
}
// Map the memory block into the target process
VAddr interval_target = target_address;
for (const auto& interval : m_backing_blocks) {
auto vma = target_process.vm_manager.MapBackingMemory(interval_target, interval.first,
interval.second, MemoryState::Shared);
ASSERT(vma.Succeeded());
target_process.vm_manager.Reprotect(vma.Unwrap(), ConvertPermissions(permissions));
interval_target += interval.second;
}
return ResultSuccess;
}
Result KSharedMemory::Unmap(Process& target_process, VAddr address) {
// TODO(Subv): Verify what happens if the application tries to unmap an address that is not
// mapped to a SharedMemory.
return target_process.vm_manager.UnmapRange(address, m_size);
}
VMAPermission KSharedMemory::ConvertPermissions(MemoryPermission permission) {
u32 masked_permissions =
static_cast<u32>(permission) & static_cast<u32>(MemoryPermission::ReadWriteExecute);
return static_cast<VMAPermission>(masked_permissions);
};
u8* KSharedMemory::GetPointer(u32 offset) {
if (m_backing_blocks.size() != 1) {
LOG_WARNING(Kernel, "Unsafe GetPointer on discontinuous SharedMemory");
}
return m_backing_blocks[0].first + offset;
}
const u8* KSharedMemory::GetPointer(u32 offset) const {
if (m_backing_blocks.size() != 1) {
LOG_WARNING(Kernel, "Unsafe GetPointer on discontinuous SharedMemory");
}
return m_backing_blocks[0].first + offset;
}
template <class Archive>
void KSharedMemory::serialize(Archive& ar, const u32 file_version) {
ar& boost::serialization::base_object<KAutoObject>(*this);
ar& m_linear_heap_phys_offset;
// ar& m_backing_blocks;
ar& m_size;
ar& m_permissions;
ar& m_other_permissions;
ar& m_owner;
ar& m_base_address;
ar& m_holding_memory;
}
SERIALIZE_IMPL(KSharedMemory)
} // namespace Kernel

View File

@ -1,234 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#pragma clang optimize off
#include <atomic>
#include "common/assert.h"
#include "common/atomic_ops.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "core/hle/kernel/kernel.h"
namespace Kernel {
namespace impl {
class KSlabHeapImpl {
CITRA_NON_COPYABLE(KSlabHeapImpl);
CITRA_NON_MOVEABLE(KSlabHeapImpl);
public:
struct Node {
Node* next{};
};
public:
constexpr KSlabHeapImpl() = default;
void Initialize() {
ASSERT(m_head == nullptr);
}
Node* GetHead() const {
return m_head;
}
void* Allocate() {
Node* ret = m_head;
if (ret != nullptr) [[likely]] {
m_head = ret->next;
}
return ret;
}
void Free(void* obj) {
Node* node = static_cast<Node*>(obj);
node->next = m_head;
m_head = node;
}
private:
std::atomic<Node*> m_head{};
};
} // namespace impl
class KSlabHeapBase : protected impl::KSlabHeapImpl {
CITRA_NON_COPYABLE(KSlabHeapBase);
CITRA_NON_MOVEABLE(KSlabHeapBase);
private:
size_t m_obj_size{};
uintptr_t m_peak{};
uintptr_t m_start{};
uintptr_t m_end{};
private:
void UpdatePeakImpl(uintptr_t obj) {
const uintptr_t alloc_peak = obj + this->GetObjectSize();
uintptr_t cur_peak = m_peak;
do {
if (alloc_peak <= cur_peak) {
break;
}
} while (
!Common::AtomicCompareAndSwap(std::addressof(m_peak), alloc_peak, cur_peak, cur_peak));
}
public:
constexpr KSlabHeapBase() = default;
bool Contains(uintptr_t address) const {
return m_start <= address && address < m_end;
}
void Initialize(size_t obj_size, void* memory, size_t memory_size) {
// Ensure we don't initialize a slab using null memory.
ASSERT(memory != nullptr);
// Set our object size.
m_obj_size = obj_size;
// Initialize the base allocator.
KSlabHeapImpl::Initialize();
// Set our tracking variables.
const size_t num_obj = (memory_size / obj_size);
m_start = reinterpret_cast<uintptr_t>(memory);
m_end = m_start + num_obj * obj_size;
m_peak = m_start;
// Free the objects.
u8* cur = reinterpret_cast<u8*>(m_end);
for (size_t i = 0; i < num_obj; i++) {
cur -= obj_size;
KSlabHeapImpl::Free(cur);
}
}
size_t GetSlabHeapSize() const {
return (m_end - m_start) / this->GetObjectSize();
}
size_t GetObjectSize() const {
return m_obj_size;
}
void* Allocate() {
void* obj = KSlabHeapImpl::Allocate();
return obj;
}
void Free(void* obj) {
// Don't allow freeing an object that wasn't allocated from this heap.
const bool contained = this->Contains(reinterpret_cast<uintptr_t>(obj));
ASSERT(contained);
KSlabHeapImpl::Free(obj);
}
size_t GetObjectIndex(const void* obj) const {
return (reinterpret_cast<uintptr_t>(obj) - m_start) / this->GetObjectSize();
}
size_t GetPeakIndex() const {
return this->GetObjectIndex(reinterpret_cast<const void*>(m_peak));
}
uintptr_t GetSlabHeapAddress() const {
return m_start;
}
size_t GetNumRemaining() const {
// Only calculate the number of remaining objects under debug configuration.
return 0;
}
};
template <typename T>
class KSlabHeap final : public KSlabHeapBase {
private:
using BaseHeap = KSlabHeapBase;
public:
constexpr KSlabHeap() = default;
void Initialize(void* memory, size_t memory_size) {
BaseHeap::Initialize(sizeof(T), memory, memory_size);
}
T* Allocate() {
T* obj = static_cast<T*>(BaseHeap::Allocate());
if (obj != nullptr) [[likely]] {
std::construct_at(obj);
}
return obj;
}
T* Allocate(KernelSystem& kernel) {
T* obj = static_cast<T*>(BaseHeap::Allocate());
if (obj != nullptr) [[likely]] {
std::construct_at(obj, kernel);
}
return obj;
}
void Free(T* obj) {
BaseHeap::Free(obj);
}
size_t GetObjectIndex(const T* obj) const {
return BaseHeap::GetObjectIndex(obj);
}
};
template <class Derived>
class KSlabAllocated {
public:
constexpr KSlabAllocated() = default;
size_t GetSlabIndex(KernelSystem& kernel) const {
return kernel.SlabHeap<Derived>().GetIndex(static_cast<const Derived*>(this));
}
public:
static void InitializeSlabHeap(KernelSystem& kernel, void* memory, size_t memory_size) {
kernel.SlabHeap<Derived>().Initialize(memory, memory_size);
}
static Derived* Allocate(KernelSystem& kernel) {
return kernel.SlabHeap<Derived>().Allocate(kernel);
}
static void Free(KernelSystem& kernel, Derived* obj) {
kernel.SlabHeap<Derived>().Free(obj);
}
static size_t GetObjectSize(KernelSystem& kernel) {
return kernel.SlabHeap<Derived>().GetObjectSize();
}
static size_t GetSlabHeapSize(KernelSystem& kernel) {
return kernel.SlabHeap<Derived>().GetSlabHeapSize();
}
static size_t GetPeakIndex(KernelSystem& kernel) {
return kernel.SlabHeap<Derived>().GetPeakIndex();
}
static uintptr_t GetSlabHeapAddress(KernelSystem& kernel) {
return kernel.SlabHeap<Derived>().GetSlabHeapAddress();
}
static size_t GetNumRemaining(KernelSystem& kernel) {
return kernel.SlabHeap<Derived>().GetNumRemaining();
}
};
} // namespace Kernel

View File

@ -1,117 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <algorithm>
#include <utility>
#include <boost/serialization/base_object.hpp>
#include <boost/serialization/shared_ptr.hpp>
#include <boost/serialization/vector.hpp>
#include "common/archives.h"
#include "common/assert.h"
#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
namespace Kernel {
KSynchronizationObject::KSynchronizationObject(KernelSystem& kernel) : KAutoObject(kernel) {}
KSynchronizationObject::~KSynchronizationObject() = default;
void KSynchronizationObject::AddWaitingThread(KThread* thread) {
auto it = std::ranges::find(waiting_threads, thread);
if (it == waiting_threads.end()) {
waiting_threads.push_back(thread);
}
}
void KSynchronizationObject::RemoveWaitingThread(KThread* thread) {
// If a thread passed multiple handles to the same object,
// the kernel might attempt to remove the thread from the object's
// waiting threads list multiple times.
auto it = std::ranges::find(waiting_threads, thread);
if (it != waiting_threads.end()) {
waiting_threads.erase(it);
}
}
KThread* KSynchronizationObject::GetHighestPriorityReadyThread() const {
KThread* candidate = nullptr;
u32 candidate_priority = ThreadPrioLowest + 1;
for (auto* thread : waiting_threads) {
// The list of waiting threads must not contain threads that are not waiting to be awakened.
ASSERT_MSG(thread->GetStatus() == ThreadStatus::WaitSynchAny ||
thread->GetStatus() == ThreadStatus::WaitSynchAll ||
thread->GetStatus() == ThreadStatus::WaitHleEvent,
"Inconsistent thread statuses in waiting_threads");
if (thread->GetCurrentPriority() >= candidate_priority || ShouldWait(thread)) {
continue;
}
// A thread is ready to run if it's either in ThreadStatus::WaitSynchAny or
// in ThreadStatus::WaitSynchAll and the rest of the objects it is waiting on are ready.
bool ready_to_run = true;
if (thread->GetStatus() == ThreadStatus::WaitSynchAll) {
ready_to_run =
std::ranges::none_of(thread->m_wait_objects, [thread](const auto* object) {
return object->ShouldWait(thread);
});
}
if (ready_to_run) {
candidate = thread;
candidate_priority = thread->GetCurrentPriority();
}
}
return candidate;
}
void KSynchronizationObject::WakeupAllWaitingThreads() {
while (auto thread = GetHighestPriorityReadyThread()) {
if (!thread->IsSleepingOnWaitAll()) {
Acquire(thread);
} else {
for (auto& object : thread->m_wait_objects) {
object->Acquire(thread);
}
}
// Invoke the wakeup callback before clearing the wait objects
if (thread->m_wakeup_callback) {
thread->m_wakeup_callback->WakeUp(ThreadWakeupReason::Signal, thread, this);
}
for (auto& object : thread->m_wait_objects) {
object->RemoveWaitingThread(thread);
}
thread->m_wait_objects.clear();
thread->ResumeFromWait();
}
if (hle_notifier) {
hle_notifier();
}
}
const std::vector<KThread*>& KSynchronizationObject::GetWaitingThreads() const {
return waiting_threads;
}
void KSynchronizationObject::SetHLENotifier(std::function<void()> callback) {
hle_notifier = std::move(callback);
}
template <class Archive>
void KSynchronizationObject::serialize(Archive& ar, const unsigned int file_version) {
ar& boost::serialization::base_object<KAutoObject>(*this);
ar& waiting_threads;
// NB: hle_notifier *not* serialized since it's a callback!
// Fortunately it's only used in one place (DSP) so we can reconstruct it there
}
SERIALIZE_IMPL(KSynchronizationObject)
} // namespace Kernel

View File

@ -1,452 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <algorithm>
#include <boost/serialization/string.hpp>
#include <boost/serialization/unordered_map.hpp>
#include <boost/serialization/vector.hpp>
#include <boost/serialization/weak_ptr.hpp>
#include "common/archives.h"
#include "common/assert.h"
#include "common/common_types.h"
#include "common/logging/log.h"
#include "common/serialization/boost_flat_set.h"
#include "core/arm/arm_interface.h"
#include "core/arm/skyeye_common/armstate.h"
#include "core/core_timing.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/k_mutex.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/result.h"
#include "core/memory.h"
SERIALIZE_EXPORT_IMPL(Kernel::KThread)
SERIALIZE_EXPORT_IMPL(Kernel::WakeupCallback)
namespace Kernel {
template <class Archive>
void ThreadManager::serialize(Archive& ar, const unsigned int) {
ar& current_thread;
ar& ready_queue;
ar& wakeup_callback_table;
ar& thread_list;
}
SERIALIZE_IMPL(ThreadManager)
template <class Archive>
void WakeupCallback::serialize(Archive& ar, const unsigned int) {}
SERIALIZE_IMPL(WakeupCallback)
ThreadManager::ThreadManager(Kernel::KernelSystem& kernel, u32 core_id) : kernel(kernel) {
thread_wakeup_event_type = kernel.timing.RegisterEvent(
"ThreadWakeupCallback_" + std::to_string(core_id),
[this](u64 thread_id, s64 cycle_late) { ThreadWakeupCallback(thread_id, cycle_late); });
}
ThreadManager::~ThreadManager() {
for (auto& t : thread_list) {
t->Stop();
}
}
void ThreadManager::SwitchContext(KThread* new_thread) {
auto& timing = kernel.timing;
KThread* previous_thread = GetCurrentThread();
Process* previous_process = nullptr;
// Save context for previous thread
if (previous_thread) {
previous_process = previous_thread->GetOwner();
previous_thread->m_last_running_ticks = cpu->GetTimer().GetTicks();
cpu->SaveContext(previous_thread->m_context);
if (previous_thread->m_status == ThreadStatus::Running) {
// This is only the case when a reschedule is triggered without the current thread
// yielding execution (i.e. an event triggered, system core time-sliced, etc)
ready_queue.push_front(previous_thread->m_current_priority, previous_thread);
previous_thread->m_status = ThreadStatus::Ready;
}
}
// Load context of new thread
if (new_thread) {
ASSERT_MSG(new_thread->m_status == ThreadStatus::Ready,
"Thread must be ready to become running.");
// Cancel any outstanding wakeup events for this thread
timing.UnscheduleEvent(thread_wakeup_event_type, new_thread->m_thread_id);
current_thread = new_thread;
ready_queue.remove(new_thread->m_current_priority, new_thread);
new_thread->m_status = ThreadStatus::Running;
ASSERT(current_thread->GetOwner());
if (previous_process != current_thread->GetOwner()) {
kernel.SetCurrentProcessForCPU(current_thread->GetOwner(), cpu->GetID());
}
cpu->LoadContext(new_thread->m_context);
cpu->SetCP15Register(CP15_THREAD_URO, new_thread->GetTLSAddress());
} else {
current_thread = nullptr;
// Note: We do not reset the current process and current page table when idling because
// technically we haven't changed processes, our threads are just paused.
}
}
KThread* ThreadManager::PopNextReadyThread() {
KThread* next = nullptr;
KThread* thread = GetCurrentThread();
if (thread && thread->m_status == ThreadStatus::Running) {
do {
// We have to do better than the current thread.
// This call returns null when that's not possible.
next = ready_queue.pop_first_better(thread->m_current_priority);
if (!next) {
// Otherwise just keep going with the current thread
next = thread;
break;
} else if (!next->m_can_schedule)
unscheduled_ready_queue.push_back(next);
} while (!next->m_can_schedule);
} else {
do {
next = ready_queue.pop_first();
if (next && !next->m_can_schedule)
unscheduled_ready_queue.push_back(next);
} while (next && !next->m_can_schedule);
}
while (!unscheduled_ready_queue.empty()) {
auto t = std::move(unscheduled_ready_queue.back());
ready_queue.push_back(t->m_current_priority, t);
unscheduled_ready_queue.pop_back();
}
return next;
}
void ThreadManager::WaitCurrentThread_Sleep() {
KThread* thread = GetCurrentThread();
thread->m_status = ThreadStatus::WaitSleep;
}
void ThreadManager::ExitCurrentThread() {
current_thread->Stop();
std::erase(thread_list, current_thread);
kernel.PrepareReschedule();
}
void ThreadManager::TerminateProcessThreads(Process* process) {
auto iter = thread_list.begin();
while (iter != thread_list.end()) {
auto& thread = *iter;
if (thread == current_thread || thread->GetOwner() != process) {
iter++;
continue;
}
if (thread->m_status != ThreadStatus::WaitSynchAny &&
thread->m_status != ThreadStatus::WaitSynchAll) {
// TODO: How does the real kernel handle non-waiting threads?
LOG_WARNING(Kernel, "Terminating non-waiting thread {}", thread->m_thread_id);
}
thread->Stop();
iter = thread_list.erase(iter);
}
// Kill the current thread last, if applicable.
if (current_thread != nullptr && current_thread->GetOwner() == process) {
ExitCurrentThread();
}
}
void ThreadManager::ThreadWakeupCallback(u64 thread_id, s64 cycles_late) {
KThread* thread = wakeup_callback_table.at(thread_id);
if (thread == nullptr) {
LOG_CRITICAL(Kernel, "Callback fired for invalid thread {:08X}", thread_id);
return;
}
if (thread->m_status == ThreadStatus::WaitSynchAny ||
thread->m_status == ThreadStatus::WaitSynchAll ||
thread->m_status == ThreadStatus::WaitArb ||
thread->m_status == ThreadStatus::WaitHleEvent) {
// Invoke the wakeup callback before clearing the wait objects
if (thread->m_wakeup_callback) {
thread->m_wakeup_callback->WakeUp(ThreadWakeupReason::Timeout, thread, nullptr);
}
// Remove the thread from each of its waiting objects' waitlists
for (KSynchronizationObject* object : thread->m_wait_objects) {
object->RemoveWaitingThread(thread);
}
thread->m_wait_objects.clear();
}
thread->ResumeFromWait();
}
bool ThreadManager::HaveReadyThreads() {
return ready_queue.get_first() != nullptr;
}
void ThreadManager::Reschedule() {
KThread* cur = GetCurrentThread();
KThread* next = PopNextReadyThread();
if (cur && next) {
LOG_TRACE(Kernel, "context switch {} -> {}", cur->GetObjectId(), next->GetObjectId());
} else if (cur) {
LOG_TRACE(Kernel, "context switch {} -> idle", cur->GetObjectId());
} else if (next) {
LOG_TRACE(Kernel, "context switch idle -> {}", next->GetObjectId());
} else {
LOG_TRACE(Kernel, "context switch idle -> idle, do nothing");
return;
}
SwitchContext(next);
}
KThread::KThread(KernelSystem& kernel) : KAutoObjectWithSlabHeapAndContainer(kernel) {}
KThread::~KThread() = default;
void KThread::Stop() {
// Cancel any outstanding wakeup events for this thread
auto& timing = m_kernel.timing;
timing.UnscheduleEvent(m_manager->thread_wakeup_event_type, m_thread_id);
m_manager->wakeup_callback_table.erase(m_thread_id);
// Clean up thread from ready queue
// This is only needed when the thread is termintated forcefully (SVC TerminateProcess)
if (m_status == ThreadStatus::Ready) {
m_manager->ready_queue.remove(m_current_priority, this);
}
// Wake all threads waiting on this thread.
m_status = ThreadStatus::Dead;
this->WakeupAllWaitingThreads();
// Clean up any dangling references in objects that this thread was waiting for
for (KSynchronizationObject* object : m_wait_objects) {
object->RemoveWaitingThread(this);
}
m_wait_objects.clear();
// Release all the mutexes that this thread holds
ReleaseThreadMutexes(this);
// Mark the TLS slot in the thread's page as free.
const u32 tls_page = (m_tls_address - Memory::TLS_AREA_VADDR) / Memory::CITRA_PAGE_SIZE;
const u32 tls_slot = ((m_tls_address - Memory::TLS_AREA_VADDR) % Memory::CITRA_PAGE_SIZE) /
Memory::TLS_ENTRY_SIZE;
m_owner->tls_slots[tls_page].reset(tls_slot);
}
void KThread::WakeAfterDelay(s64 nanoseconds, bool thread_safe_mode) {
// Don't schedule a wakeup if the thread wants to wait forever
if (nanoseconds == -1) {
return;
}
auto& timing = m_kernel.timing;
const size_t core = thread_safe_mode ? m_core_id : std::numeric_limits<std::size_t>::max();
timing.ScheduleEvent(nsToCycles(nanoseconds), m_manager->thread_wakeup_event_type, m_thread_id,
core, thread_safe_mode);
}
void KThread::ResumeFromWait() {
ASSERT_MSG(m_wait_objects.empty(), "Thread is waking up while waiting for objects");
switch (m_status) {
case ThreadStatus::WaitSynchAll:
case ThreadStatus::WaitSynchAny:
case ThreadStatus::WaitHleEvent:
case ThreadStatus::WaitArb:
case ThreadStatus::WaitSleep:
case ThreadStatus::WaitIPC:
case ThreadStatus::Dormant:
break;
case ThreadStatus::Ready:
// The thread's wakeup callback must have already been cleared when the thread was first
// awoken.
ASSERT(m_wakeup_callback == nullptr);
// If the thread is waiting on multiple wait objects, it might be awoken more than once
// before actually resuming. We can ignore subsequent wakeups if the thread status has
// already been set to ThreadStatus::Ready.
return;
case ThreadStatus::Running:
DEBUG_ASSERT_MSG(false, "Thread with object id {} has already resumed.", GetObjectId());
return;
case ThreadStatus::Dead:
// This should never happen, as threads must complete before being stopped.
DEBUG_ASSERT_MSG(false, "Thread with object id {} cannot be resumed because it's DEAD.",
GetObjectId());
return;
}
// Mark as ready and reschedule.
m_wakeup_callback = nullptr;
m_manager->ready_queue.push_back(m_current_priority, this);
m_status = ThreadStatus::Ready;
m_kernel.PrepareReschedule();
}
/**
* Resets a thread context, making it ready to be scheduled and run by the CPU
* @param context Thread context to reset
* @param stack_top Address of the top of the stack
* @param entry_point Address of entry point for execution
* @param arg User argument for thread
*/
static void ResetThreadContext(Core::ARM_Interface::ThreadContext& context, u32 stack_top,
u32 entry_point, u32 arg) {
context.cpu_registers[0] = arg;
context.SetProgramCounter(entry_point);
context.SetStackPointer(stack_top);
context.cpsr = USER32MODE | ((entry_point & 1) << 5); // Usermode and THUMB mode
context.fpscr = FPSCR_DEFAULT_NAN | FPSCR_FLUSH_TO_ZERO | FPSCR_ROUND_TOZERO | FPSCR_IXC;
}
Result KThread::Initialize(std::string name, VAddr entry_point, u32 priority, u32 arg,
s32 processor_id, VAddr stack_top, Process* owner_process) {
R_UNLESS(priority <= ThreadPrioLowest, ResultOutOfRange);
R_UNLESS(processor_id <= ThreadProcessorIdMax, ResultOutOfRangeKernel);
// Open a reference to our owner process
m_owner = owner_process;
m_owner->Open();
// Set last running ticks.
auto& timing = m_kernel.timing;
m_last_running_ticks = timing.GetTimer(processor_id)->GetTicks();
// Set member variables.
m_thread_id = m_kernel.NewThreadId();
m_status = ThreadStatus::Ready;
m_entry_point = entry_point;
m_stack_top = stack_top;
m_nominal_priority = m_current_priority = priority;
m_processor_id = processor_id;
m_wait_objects.clear();
m_wait_address = 0;
m_name = std::move(name);
// Register thread in the thread manager.
auto& thread_manager = m_kernel.GetThreadManager(processor_id);
m_manager = std::addressof(thread_manager);
m_manager->thread_list.push_back(this);
m_manager->ready_queue.prepare(priority);
m_manager->wakeup_callback_table[m_thread_id] = this;
// Allocate the thread local region.
R_TRY(m_owner->AllocateThreadLocalStorage(std::addressof(m_tls_address)));
// Reset the thread context.
ResetThreadContext(m_context, stack_top, entry_point, arg);
// Mark thread as ready and return
m_manager->ready_queue.push_back(m_current_priority, this);
return ResultSuccess;
}
void KThread::SetPriority(u32 priority) {
ASSERT_MSG(priority <= ThreadPrioLowest && priority >= ThreadPrioHighest,
"Invalid priority value.");
// If thread was ready, adjust queues
if (m_status == ThreadStatus::Ready) {
m_manager->ready_queue.move(this, m_current_priority, priority);
} else {
m_manager->ready_queue.prepare(priority);
}
// Set the priority
m_nominal_priority = m_current_priority = priority;
}
void KThread::UpdatePriority() {
u32 best_priority = m_nominal_priority;
for (KMutex* mutex : m_held_mutexes) {
if (mutex->GetPriority() < best_priority) {
best_priority = mutex->GetPriority();
}
}
this->BoostPriority(best_priority);
}
void KThread::BoostPriority(u32 priority) {
// If thread was ready, adjust queues
if (m_status == ThreadStatus::Ready) {
m_manager->ready_queue.move(this, m_current_priority, priority);
} else {
m_manager->ready_queue.prepare(priority);
}
m_current_priority = priority;
}
void KThread::PostDestroy(uintptr_t arg) {
Process* owner = reinterpret_cast<Process*>(arg);
if (owner != nullptr) {
owner->ReleaseResource(ResourceLimitType::Thread, 1);
owner->Close();
}
}
void KThread::SetWaitSynchronizationResult(Result result) {
m_context.cpu_registers[0] = result.raw;
}
void KThread::SetWaitSynchronizationOutput(s32 output) {
m_context.cpu_registers[1] = output;
}
s32 KThread::GetWaitObjectIndex(const KSynchronizationObject* object) const {
ASSERT_MSG(!m_wait_objects.empty(), "Thread is not waiting for anything");
const auto match = std::find(m_wait_objects.rbegin(), m_wait_objects.rend(), object);
return static_cast<s32>(std::distance(match, m_wait_objects.rend()) - 1);
}
VAddr KThread::GetCommandBufferAddress() const {
// Offset from the start of TLS at which the IPC command buffer begins.
constexpr u32 command_header_offset = 0x80;
return GetTLSAddress() + command_header_offset;
}
template <class Archive>
void KThread::serialize(Archive& ar, const unsigned int file_version) {
ar& boost::serialization::base_object<KSynchronizationObject>(*this);
ar& m_context;
ar& m_thread_id;
ar& m_status;
ar& m_entry_point;
ar& m_stack_top;
ar& m_nominal_priority;
ar& m_current_priority;
ar& m_last_running_ticks;
ar& m_processor_id;
ar& m_tls_address;
ar& m_held_mutexes;
ar& m_pending_mutexes;
ar& m_owner;
ar& m_wait_objects;
ar& m_wait_address;
ar& m_name;
ar& m_wakeup_callback;
}
SERIALIZE_IMPL(KThread)
} // namespace Kernel

View File

@ -1,122 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "common/archives.h"
#include "common/assert.h"
#include "common/logging/log.h"
#include "core/core.h"
#include "core/hle/kernel/k_event.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/k_timer.h"
SERIALIZE_EXPORT_IMPL(Kernel::KTimer)
namespace Kernel {
KTimer::KTimer(KernelSystem& kernel)
: KAutoObjectWithSlabHeapAndContainer(kernel), m_timer_manager(kernel.GetTimerManager()) {}
KTimer::~KTimer() = default;
void KTimer::Initialize(Process* owner, ResetType reset_type) {
// Open a reference to the owner process.
owner->Open();
// Set member variables.
m_owner = owner;
m_reset_type = reset_type;
// Register to TimerManager
m_callback_id = m_timer_manager.GetNextCallbackId();
m_timer_manager.Register(m_callback_id, this);
}
void KTimer::Finalize() {
this->Cancel();
m_timer_manager.Unregister(m_callback_id);
}
void KTimer::PostDestroy(uintptr_t arg) {
// Release the session count resource the owner process holds.
Process* owner = reinterpret_cast<Process*>(arg);
owner->ReleaseResource(ResourceLimitType::Timer, 1);
owner->Close();
}
bool KTimer::ShouldWait(const KThread* thread) const {
return !m_signaled;
}
void KTimer::Acquire(KThread* thread) {
ASSERT_MSG(!ShouldWait(thread), "object unavailable!");
if (m_reset_type == ResetType::OneShot) {
m_signaled = false;
}
}
void KTimer::Set(s64 initial, s64 interval) {
// Ensure we get rid of any previous scheduled event
this->Cancel();
// Set member variables
m_initial_delay = initial;
m_interval_delay = interval;
if (initial == 0) {
// Immediately invoke the callback
this->Signal(0);
} else {
auto& timing = m_kernel.timing;
timing.ScheduleEvent(nsToCycles(initial), m_timer_manager.GetEventType(), m_callback_id);
}
}
void KTimer::Cancel() {
auto& timing = m_kernel.timing;
timing.UnscheduleEvent(m_timer_manager.GetEventType(), m_callback_id);
}
void KTimer::Clear() {
m_signaled = false;
}
void KTimer::WakeupAllWaitingThreads() {
KSynchronizationObject::WakeupAllWaitingThreads();
if (m_reset_type == ResetType::Pulse) {
m_signaled = false;
}
}
void KTimer::Signal(s64 cycles_late) {
LOG_TRACE(Kernel, "Timer {} fired", GetObjectId());
m_signaled = true;
// Resume all waiting threads
this->WakeupAllWaitingThreads();
// Reschedule the timer with the interval delay
if (m_interval_delay != 0) {
auto& timing = m_kernel.timing;
const s64 cycles_into_future = nsToCycles(m_interval_delay) - cycles_late;
timing.ScheduleEvent(cycles_into_future, m_timer_manager.GetEventType(), m_callback_id);
}
}
void TimerManager::TimerCallback(u64 callback_id, s64 cycles_late) {
KTimer* timer = m_timer_callback_table.at(callback_id);
ASSERT_MSG(timer, "Callback fired for invalid timer {:016x}", callback_id);
timer->Signal(cycles_late);
}
TimerManager::TimerManager(Core::Timing& timing) : m_timing(timing) {
m_timer_callback_event_type =
timing.RegisterEvent("TimerCallback", [this](u64 thread_id, s64 cycle_late) {
this->TimerCallback(thread_id, cycle_late);
});
}
TimerManager::~TimerManager() = default;
} // namespace Kernel

View File

@ -1,133 +0,0 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <boost/serialization/string.hpp>
#include <boost/serialization/unordered_map.hpp>
#include "common/common_types.h"
#include "core/core_timing.h"
#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/slab_helpers.h"
namespace Core {
class Timing;
}
namespace Kernel {
class KTimer;
class TimerManager {
public:
explicit TimerManager(Core::Timing& timing);
~TimerManager();
u64 GetNextCallbackId() {
return +m_next_timer_callback_id;
}
Core::TimingEventType* GetEventType() {
return m_timer_callback_event_type;
}
void Register(u64 callback_id, KTimer* timer) {
m_timer_callback_table[callback_id] = timer;
}
void Unregister(u64 callback_id) {
m_timer_callback_table.erase(callback_id);
}
private:
void TimerCallback(u64 callback_id, s64 cycles_late);
private:
[[maybe_unused]] Core::Timing& m_timing;
Core::TimingEventType* m_timer_callback_event_type{};
u64 m_next_timer_callback_id{};
std::unordered_map<u64, KTimer*> m_timer_callback_table;
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const u32 file_version) {
ar& m_next_timer_callback_id;
ar& m_timer_callback_table;
}
};
class ResourceLimit;
enum class ResetType : u32;
class KTimer final : public KAutoObjectWithSlabHeapAndContainer<KTimer, KSynchronizationObject> {
KERNEL_AUTOOBJECT_TRAITS(KTimer, KSynchronizationObject);
public:
explicit KTimer(KernelSystem& kernel);
~KTimer() override;
void Initialize(Process* owner, ResetType reset_type);
void Finalize() override;
uintptr_t GetPostDestroyArgument() const override {
return reinterpret_cast<uintptr_t>(m_owner);
}
static void PostDestroy(uintptr_t arg);
Process* GetOwner() const override {
return m_owner;
}
ResetType GetResetType() const {
return m_reset_type;
}
u64 GetInitialDelay() const {
return m_initial_delay;
}
u64 GetIntervalDelay() const {
return m_interval_delay;
}
void Set(s64 initial, s64 interval);
void Signal(s64 cycles_late);
void Cancel();
void Clear();
void WakeupAllWaitingThreads() override;
bool ShouldWait(const KThread* thread) const override;
void Acquire(KThread* thread) override;
private:
TimerManager& m_timer_manager;
Process* m_owner{};
ResetType m_reset_type{};
u64 m_initial_delay{};
u64 m_interval_delay{};
bool m_signaled{};
u64 m_callback_id{};
friend class KernelSystem;
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const u32 file_version) {
ar& boost::serialization::base_object<KSynchronizationObject>(*this);
ar& m_owner;
ar& m_reset_type;
ar& m_initial_delay;
ar& m_interval_delay;
ar& m_signaled;
ar& m_callback_id;
}
};
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::KTimer)
CONSTRUCT_KERNEL_OBJECT(Kernel::KTimer)

View File

@ -6,27 +6,18 @@
#include <boost/serialization/unordered_map.hpp>
#include <boost/serialization/vector.hpp>
#include "common/archives.h"
#include "common/serialization/atomic.h"
#include "core/hle/kernel/client_port.h"
#include "core/hle/kernel/config_mem.h"
#include "core/hle/kernel/handle_table.h"
#include "core/hle/kernel/ipc_debugger/recorder.h"
#include "core/hle/kernel/k_address_arbiter.h"
#include "core/hle/kernel/k_auto_object_container.h"
#include "core/hle/kernel/k_event.h"
#include "core/hle/kernel/k_handle_table.h"
#include "core/hle/kernel/k_linked_list.h"
#include "core/hle/kernel/k_mutex.h"
#include "core/hle/kernel/k_object_name.h"
#include "core/hle/kernel/k_port.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/k_semaphore.h"
#include "core/hle/kernel/k_session.h"
#include "core/hle/kernel/k_shared_memory.h"
#include "core/hle/kernel/k_slab_heap.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/k_timer.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/memory.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/resource_limit.h"
#include "core/hle/kernel/shared_page.h"
#include "core/hle/kernel/thread.h"
#include "core/hle/kernel/timer.h"
SERIALIZE_EXPORT_IMPL(Kernel::New3dsHwCapabilities)
@ -40,9 +31,6 @@ KernelSystem::KernelSystem(Memory::MemorySystem& memory, Core::Timing& timing,
: memory(memory), timing(timing),
prepare_reschedule_callback(std::move(prepare_reschedule_callback)), memory_mode(memory_mode),
n3ds_hw_caps(n3ds_hw_caps) {
global_object_list_container = std::make_unique<KAutoObjectWithListContainer>(*this);
object_name_global_data = std::make_unique<KObjectNameGlobalData>(*this);
slab_heap_container = std::make_unique<SlabHeapContainer>();
std::generate(memory_regions.begin(), memory_regions.end(),
[] { return std::make_shared<MemoryRegionInfo>(); });
MemoryInit(memory_mode, n3ds_hw_caps.memory_mode, override_init_time);
@ -75,16 +63,16 @@ u32 KernelSystem::GenerateObjectID() {
return next_object_id++;
}
Process* KernelSystem::GetCurrentProcess() const {
std::shared_ptr<Process> KernelSystem::GetCurrentProcess() const {
return current_process;
}
void KernelSystem::SetCurrentProcess(Process* process) {
void KernelSystem::SetCurrentProcess(std::shared_ptr<Process> process) {
current_process = process;
SetCurrentMemoryPageTable(process->vm_manager.page_table);
}
void KernelSystem::SetCurrentProcessForCPU(Process* process, u32 core_id) {
void KernelSystem::SetCurrentProcessForCPU(std::shared_ptr<Process> process, u32 core_id) {
if (current_cpu->GetID() == core_id) {
current_process = process;
SetCurrentMemoryPageTable(process->vm_manager.page_table);
@ -163,12 +151,12 @@ const IPCDebugger::Recorder& KernelSystem::GetIPCRecorder() const {
return *ipc_recorder;
}
u32 KernelSystem::NewThreadId() {
return next_thread_id++;
void KernelSystem::AddNamedPort(std::string name, std::shared_ptr<ClientPort> port) {
named_ports.emplace(std::move(name), std::move(port));
}
u32 KernelSystem::NewProcessId() {
return ++next_process_id;
u32 KernelSystem::NewThreadId() {
return next_thread_id++;
}
void KernelSystem::ResetThreadIDs() {
@ -178,10 +166,11 @@ void KernelSystem::ResetThreadIDs() {
template <class Archive>
void KernelSystem::serialize(Archive& ar, const unsigned int) {
ar& memory_regions;
ar& named_ports;
// current_cpu set externally
// NB: subsystem references and prepare_reschedule_callback are constant
ar&* resource_limits.get();
// ar& next_object_id;
ar& next_object_id;
ar&* timer_manager.get();
ar& next_process_id;
ar& process_list;
@ -208,136 +197,6 @@ void KernelSystem::serialize(Archive& ar, const unsigned int) {
}
}
}
void KernelSystem::RegisterKernelObject(KAutoObject* object) {
registered_objects.insert(object);
}
void KernelSystem::UnregisterKernelObject(KAutoObject* object) {
registered_objects.erase(object);
}
// Constexpr counts.
constexpr size_t SlabHeapTotalSize = 0x450000;
constexpr size_t SlabCountKProcess = 47;
constexpr size_t SlabCountKThread = 300;
constexpr size_t SlabCountKEvent = 315;
constexpr size_t SlabCountKMutex = 85;
constexpr size_t SlabCountKSemaphore = 83;
constexpr size_t SlabCountKTimer = 60;
constexpr size_t SlabCountKPort = 153;
constexpr size_t SlabCountKSharedMemory = 63;
constexpr size_t SlabCountKSession = 345;
constexpr size_t SlabCountKAddressArbiter = 51;
constexpr size_t SlabCountKObjectName = 7;
constexpr size_t SlabCountKResourceLimit = 5;
// constexpr size_t SlabCountKDebug = 3;
constexpr size_t SlabCountKLinkedListNode = 4273;
// constexpr size_t SlabCountKBlockInfo = 601;
// constexpr size_t SlabCountKMemoryBlock = 1723;
struct KernelSystem::SlabHeapContainer {
SlabHeapContainer() {
// TODO: Allocate slab heap on FCRAM
storage.resize(SlabHeapTotalSize);
u8* memory = storage.data();
event.Initialize(memory, SlabCountKEvent * sizeof(KEvent));
memory += SlabCountKEvent * sizeof(KEvent);
mutex.Initialize(memory, SlabCountKMutex * sizeof(KMutex));
memory += SlabCountKMutex * sizeof(KMutex);
semaphore.Initialize(memory, SlabCountKSemaphore * sizeof(KSemaphore));
memory += SlabCountKSemaphore * sizeof(KSemaphore);
timer.Initialize(memory, SlabCountKTimer * sizeof(KTimer));
memory += SlabCountKTimer * sizeof(KTimer);
process.Initialize(memory, SlabCountKProcess * sizeof(Process));
memory += SlabCountKProcess * sizeof(Process);
thread.Initialize(memory, SlabCountKThread * sizeof(KThread));
memory += SlabCountKThread * sizeof(KThread);
port.Initialize(memory, SlabCountKPort * sizeof(KPort));
memory += SlabCountKPort * sizeof(KPort);
shared_memory.Initialize(memory, SlabCountKSharedMemory * sizeof(KSharedMemory));
memory += SlabCountKSharedMemory * sizeof(KSharedMemory);
session.Initialize(memory, SlabCountKSession * sizeof(KSession));
memory += SlabCountKSession * sizeof(KSession);
resource_limit.Initialize(memory, SlabCountKResourceLimit * sizeof(KResourceLimit));
memory += SlabCountKResourceLimit * sizeof(KResourceLimit);
address_arbiter.Initialize(memory, SlabCountKAddressArbiter * sizeof(KAddressArbiter));
memory += SlabCountKAddressArbiter * sizeof(KAddressArbiter);
linked_list_node.Initialize(memory, SlabCountKLinkedListNode * sizeof(KLinkedListNode));
memory += SlabCountKLinkedListNode * sizeof(KLinkedListNode);
object_name.Initialize(memory, SlabCountKObjectName * sizeof(KObjectName));
}
std::vector<u8> storage;
KSlabHeap<KEvent> event;
KSlabHeap<KMutex> mutex;
KSlabHeap<KSemaphore> semaphore;
KSlabHeap<KTimer> timer;
KSlabHeap<Process> process;
KSlabHeap<KThread> thread;
KSlabHeap<KPort> port;
KSlabHeap<KSharedMemory> shared_memory;
KSlabHeap<KSession> session;
KSlabHeap<KResourceLimit> resource_limit;
KSlabHeap<KAddressArbiter> address_arbiter;
KSlabHeap<KLinkedListNode> linked_list_node;
KSlabHeap<KObjectName> object_name;
};
template <typename T>
KSlabHeap<T>& KernelSystem::SlabHeap() {
if constexpr (std::is_same_v<T, KEvent>) {
return slab_heap_container->event;
} else if constexpr (std::is_same_v<T, KPort>) {
return slab_heap_container->port;
} else if constexpr (std::is_same_v<T, Process>) {
return slab_heap_container->process;
} else if constexpr (std::is_same_v<T, KResourceLimit>) {
return slab_heap_container->resource_limit;
} else if constexpr (std::is_same_v<T, KSession>) {
return slab_heap_container->session;
} else if constexpr (std::is_same_v<T, KSharedMemory>) {
return slab_heap_container->shared_memory;
} else if constexpr (std::is_same_v<T, KThread>) {
return slab_heap_container->thread;
} else if constexpr (std::is_same_v<T, KAddressArbiter>) {
return slab_heap_container->address_arbiter;
} else if constexpr (std::is_same_v<T, KSemaphore>) {
return slab_heap_container->semaphore;
} else if constexpr (std::is_same_v<T, KMutex>) {
return slab_heap_container->mutex;
} else if constexpr (std::is_same_v<T, KObjectName>) {
return slab_heap_container->object_name;
} else if constexpr (std::is_same_v<T, KLinkedListNode>) {
return slab_heap_container->linked_list_node;
} else if constexpr (std::is_same_v<T, KTimer>) {
return slab_heap_container->timer;
}
UNREACHABLE();
}
KAutoObjectWithListContainer& KernelSystem::ObjectListContainer() {
return *global_object_list_container;
}
KObjectNameGlobalData& KernelSystem::ObjectNameGlobalData() {
return *object_name_global_data;
}
template KSlabHeap<KEvent>& KernelSystem::SlabHeap();
template KSlabHeap<KPort>& KernelSystem::SlabHeap();
template KSlabHeap<Process>& KernelSystem::SlabHeap();
template KSlabHeap<KResourceLimit>& KernelSystem::SlabHeap();
template KSlabHeap<KSession>& KernelSystem::SlabHeap();
template KSlabHeap<KSharedMemory>& KernelSystem::SlabHeap();
template KSlabHeap<KThread>& KernelSystem::SlabHeap();
template KSlabHeap<KObjectName>& KernelSystem::SlabHeap();
template KSlabHeap<KAddressArbiter>& KernelSystem::SlabHeap();
template KSlabHeap<KSemaphore>& KernelSystem::SlabHeap();
template KSlabHeap<KMutex>& KernelSystem::SlabHeap();
template KSlabHeap<KLinkedListNode>& KernelSystem::SlabHeap();
template KSlabHeap<KTimer>& KernelSystem::SlabHeap();
SERIALIZE_IMPL(KernelSystem)
template <class Archive>

View File

@ -9,11 +9,14 @@
#include <functional>
#include <memory>
#include <mutex>
#include <unordered_set>
#include <span>
#include <string>
#include <unordered_map>
#include <vector>
#include "common/bit_field.h"
#include <boost/serialization/export.hpp>
#include "common/common_types.h"
#include "core/hle/kernel/memory.h"
#include "core/hle/result.h"
#include "core/memory.h"
namespace ConfigMem {
@ -39,18 +42,30 @@ class Recorder;
namespace Kernel {
class AddressArbiter;
class Event;
class Mutex;
class CodeSet;
class Process;
class KThread;
class Thread;
class Semaphore;
class Timer;
class ClientPort;
class ServerPort;
class ClientSession;
class ServerSession;
class ResourceLimitList;
class SharedMemory;
class ThreadManager;
class TimerManager;
class VMManager;
struct AddressMapping;
class KAutoObject;
class KObjectName;
class KObjectNameGlobalData;
enum class ResetType {
OneShot,
Sticky,
Pulse,
};
/// Permissions for mapped shared memory blocks
enum class MemoryPermission : u32 {
@ -64,7 +79,6 @@ enum class MemoryPermission : u32 {
ReadWriteExecute = (Read | Write | Execute),
DontCare = (1u << 28)
};
DECLARE_ENUM_FLAG_OPERATORS(MemoryPermission)
enum class MemoryRegion : u16 {
APPLICATION = 1,
@ -115,10 +129,6 @@ private:
friend class boost::serialization::access;
};
template <typename T>
class KSlabHeap;
class KAutoObjectWithListContainer;
class KernelSystem {
public:
explicit KernelSystem(Memory::MemorySystem& memory, Core::Timing& timing,
@ -127,44 +137,142 @@ public:
u64 override_init_time = 0);
~KernelSystem();
using PortPair = std::pair<std::shared_ptr<ServerPort>, std::shared_ptr<ClientPort>>;
using SessionPair = std::pair<std::shared_ptr<ServerSession>, std::shared_ptr<ClientSession>>;
/**
* Creates an address arbiter.
*
* @param name Optional name used for debugging.
* @returns The created AddressArbiter.
*/
std::shared_ptr<AddressArbiter> CreateAddressArbiter(std::string name = "Unknown");
/**
* Creates an event
* @param reset_type ResetType describing how to create event
* @param name Optional name of event
*/
std::shared_ptr<Event> CreateEvent(ResetType reset_type, std::string name = "Unknown");
/**
* Creates a mutex.
* @param initial_locked Specifies if the mutex should be locked initially
* @param name Optional name of mutex
* @return Pointer to new Mutex object
*/
std::shared_ptr<Mutex> CreateMutex(bool initial_locked, std::string name = "Unknown");
std::shared_ptr<CodeSet> CreateCodeSet(std::string name, u64 program_id);
std::shared_ptr<Process> CreateProcess(std::shared_ptr<CodeSet> code_set);
/**
* Terminates a process, killing its threads and removing it from the process list.
* @param process Process to terminate.
*/
void TerminateProcess(Process* process);
void TerminateProcess(std::shared_ptr<Process> process);
/**
* Creates and returns a new thread. The new thread is immediately scheduled
* @param name The friendly name desired for the thread
* @param entry_point The address at which the thread should start execution
* @param priority The thread's priority
* @param arg User data to pass to the thread
* @param processor_id The ID(s) of the processors on which the thread is desired to be run
* @param stack_top The address of the thread's stack top
* @param owner_process The parent process for the thread
* @param make_ready If the thread should be put in the ready queue
* @return A shared pointer to the newly created thread
*/
ResultVal<std::shared_ptr<Thread>> CreateThread(std::string name, VAddr entry_point,
u32 priority, u32 arg, s32 processor_id,
VAddr stack_top,
std::shared_ptr<Process> owner_process,
bool make_ready = true);
/**
* Creates a semaphore.
* @param initial_count Number of slots reserved for other threads
* @param max_count Maximum number of slots the semaphore can have
* @param name Optional name of semaphore
* @return The created semaphore
*/
ResultVal<std::shared_ptr<Semaphore>> CreateSemaphore(s32 initial_count, s32 max_count,
std::string name = "Unknown");
/**
* Creates a timer
* @param reset_type ResetType describing how to create the timer
* @param name Optional name of timer
* @return The created Timer
*/
std::shared_ptr<Timer> CreateTimer(ResetType reset_type, std::string name = "Unknown");
/**
* Creates a pair of ServerPort and an associated ClientPort.
*
* @param max_sessions Maximum number of sessions to the port
* @param name Optional name of the ports
* @return The created port tuple
*/
PortPair CreatePortPair(u32 max_sessions, std::string name = "UnknownPort");
/**
* Creates a pair of ServerSession and an associated ClientSession.
* @param name Optional name of the ports.
* @param client_port Optional The ClientPort that spawned this session.
* @return The created session tuple
*/
SessionPair CreateSessionPair(const std::string& name = "Unknown",
std::shared_ptr<ClientPort> client_port = nullptr);
ResourceLimitList& ResourceLimit();
const ResourceLimitList& ResourceLimit() const;
/**
* Creates a shared memory object.
* @param owner_process Process that created this shared memory object.
* @param size Size of the memory block. Must be page-aligned.
* @param permissions Permission restrictions applied to the process which created the block.
* @param other_permissions Permission restrictions applied to other processes mapping the
* block.
* @param address The address from which to map the Shared Memory.
* @param region If the address is 0, the shared memory will be allocated in this region of the
* linear heap.
* @param name Optional object name, used for debugging purposes.
*/
ResultVal<std::shared_ptr<SharedMemory>> CreateSharedMemory(
std::shared_ptr<Process> owner_process, u32 size, MemoryPermission permissions,
MemoryPermission other_permissions, VAddr address = 0,
MemoryRegion region = MemoryRegion::BASE, std::string name = "Unknown");
/**
* Creates a shared memory object from a block of memory managed by an HLE applet.
* @param offset The offset into the heap block that the SharedMemory will map.
* @param size Size of the memory block. Must be page-aligned.
* @param permissions Permission restrictions applied to the process which created the block.
* @param other_permissions Permission restrictions applied to other processes mapping the
* block.
* @param name Optional object name, used for debugging purposes.
*/
std::shared_ptr<SharedMemory> CreateSharedMemoryForApplet(u32 offset, u32 size,
MemoryPermission permissions,
MemoryPermission other_permissions,
std::string name = "Unknown Applet");
u32 GenerateObjectID();
/// Gets the slab heap for the specified kernel object type.
template <typename T>
KSlabHeap<T>& SlabHeap();
KAutoObjectWithListContainer& ObjectListContainer();
/// Gets global data for KObjectName.
KObjectNameGlobalData& ObjectNameGlobalData();
/// Registers all kernel objects with the global emulation state, this is purely for tracking
/// leaks after emulation has been shutdown.
void RegisterKernelObject(KAutoObject* object);
/// Unregisters a kernel object previously registered with RegisterKernelObject when it was
/// destroyed during the current emulation session.
void UnregisterKernelObject(KAutoObject* object);
/// Retrieves a process from the current list of processes.
Process* GetProcessById(u32 process_id) const;
std::shared_ptr<Process> GetProcessById(u32 process_id) const;
const std::vector<Process*>& GetProcessList() const {
std::span<const std::shared_ptr<Process>> GetProcessList() const {
return process_list;
}
Process* GetCurrentProcess() const;
void SetCurrentProcess(Process* process);
void SetCurrentProcessForCPU(Process* process, u32 core_id);
std::shared_ptr<Process> GetCurrentProcess() const;
void SetCurrentProcess(std::shared_ptr<Process> process);
void SetCurrentProcessForCPU(std::shared_ptr<Process> process, u32 core_id);
void SetCurrentMemoryPageTable(std::shared_ptr<Memory::PageTable> page_table);
@ -197,12 +305,14 @@ public:
std::array<std::shared_ptr<MemoryRegionInfo>, 3> memory_regions{};
/// Adds a port to the named port table
void AddNamedPort(std::string name, std::shared_ptr<ClientPort> port);
void PrepareReschedule() {
prepare_reschedule_callback();
}
u32 NewThreadId();
u32 NewProcessId();
void ResetThreadIDs();
@ -218,15 +328,15 @@ public:
return hle_lock;
}
/// Map of named ports managed by the kernel, which can be retrieved using the ConnectToPort
std::unordered_map<std::string, std::shared_ptr<ClientPort>> named_ports;
Core::ARM_Interface* current_cpu = nullptr;
Memory::MemorySystem& memory;
Core::Timing& timing;
// Lists all processes that exist in the current session.
std::vector<Process*> process_list;
/// Sleep main thread of the first ever launched non-sysmodule process.
void SetAppMainThreadExtendedSleep(bool requires_sleep) {
main_thread_extended_sleep = requires_sleep;
@ -257,8 +367,11 @@ private:
// reserved for low-level services
u32 next_process_id = 10;
Process* current_process{};
std::vector<Process*> stored_processes;
// Lists all processes that exist in the current session.
std::vector<std::shared_ptr<Process>> process_list;
std::shared_ptr<Process> current_process;
std::vector<std::shared_ptr<Process>> stored_processes;
std::vector<std::unique_ptr<ThreadManager>> thread_managers;
@ -272,16 +385,6 @@ private:
MemoryMode memory_mode;
New3dsHwCapabilities n3ds_hw_caps;
/// Helper to encapsulate all slab heaps in a single heap allocated container
struct SlabHeapContainer;
std::unique_ptr<SlabHeapContainer> slab_heap_container;
std::unique_ptr<KObjectNameGlobalData> object_name_global_data;
std::unique_ptr<KAutoObjectWithListContainer> global_object_list_container;
std::unordered_set<KAutoObject*> registered_objects;
/*
* Synchronizes access to the internal HLE kernel structures, it is acquired when a guest
* application thread performs a syscall. It should be acquired by any host threads that read or

View File

@ -14,8 +14,8 @@
#include "common/settings.h"
#include "core/core.h"
#include "core/hle/kernel/config_mem.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/memory.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/shared_page.h"
#include "core/hle/kernel/vm_manager.h"
#include "core/hle/result.h"

Some files were not shown because too many files have changed in this diff Show More