Compare commits

..

1 Commits

Author SHA1 Message Date
d02cb52b76 kernel: Migrate to KAutoObject 2024-02-04 02:40:03 +02:00
216 changed files with 8380 additions and 7312 deletions

View File

@ -85,6 +85,8 @@ option(ENABLE_VULKAN "Enables the Vulkan renderer" ON)
option(USE_DISCORD_PRESENCE "Enables Discord Rich Presence" OFF)
CMAKE_DEPENDENT_OPTION(CITRA_ENABLE_BUNDLE_TARGET "Enable the distribution bundling target." ON "NOT ANDROID AND NOT IOS" OFF)
# Compile options
CMAKE_DEPENDENT_OPTION(COMPILE_WITH_DWARF "Add DWARF debugging information" ${IS_DEBUG_BUILD} "MINGW" OFF)
option(ENABLE_LTO "Enable link time optimization" ${DEFAULT_ENABLE_LTO})
@ -247,26 +249,6 @@ if (ENABLE_QT)
if (ENABLE_QT_TRANSLATION)
find_package(Qt6 REQUIRED COMPONENTS LinguistTools)
endif()
if (NOT DEFINED QT_TARGET_PATH)
# Determine the location of the compile target's Qt.
get_target_property(qtcore_path Qt6::Core LOCATION_Release)
string(FIND "${qtcore_path}" "/bin/" qtcore_path_bin_pos REVERSE)
string(FIND "${qtcore_path}" "/lib/" qtcore_path_lib_pos REVERSE)
if (qtcore_path_bin_pos GREATER qtcore_path_lib_pos)
string(SUBSTRING "${qtcore_path}" 0 ${qtcore_path_bin_pos} QT_TARGET_PATH)
else()
string(SUBSTRING "${qtcore_path}" 0 ${qtcore_path_lib_pos} QT_TARGET_PATH)
endif()
endif()
if (NOT DEFINED QT_HOST_PATH)
# Use the same for host Qt if none is defined.
set(QT_HOST_PATH "${QT_TARGET_PATH}")
endif()
message(STATUS "Using target Qt at ${QT_TARGET_PATH}")
message(STATUS "Using host Qt at ${QT_HOST_PATH}")
endif()
# Use system tsl::robin_map if available (otherwise we fallback to version bundled with dynarmic)
@ -442,8 +424,7 @@ else()
endif()
# Create target for outputting distributable bundles.
# Not supported for mobile platforms as distributables are built differently.
if (NOT ANDROID AND NOT IOS)
if (CITRA_ENABLE_BUNDLE_TARGET)
include(BundleTarget)
if (ENABLE_SDL2_FRONTEND)
bundle_target(citra)

View File

@ -2,104 +2,37 @@
if (BUNDLE_TARGET_EXECUTE)
# --- Bundling method logic ---
function(symlink_safe_copy from to)
if (WIN32)
# Use cmake copy for maximum compatibility.
execute_process(COMMAND ${CMAKE_COMMAND} -E copy "${from}" "${to}"
RESULT_VARIABLE cp_result)
else()
# Use native copy to turn symlinks into normal files.
execute_process(COMMAND cp -L "${from}" "${to}"
RESULT_VARIABLE cp_result)
endif()
if (NOT cp_result EQUAL "0")
message(FATAL_ERROR "cp \"${from}\" \"${to}\" failed: ${cp_result}")
endif()
endfunction()
function(bundle_qt executable_path)
if (WIN32)
# Perform standalone bundling first to copy over all used libraries, as windeployqt does not do this.
bundle_standalone("${executable_path}" "${EXECUTABLE_PATH}" "${BUNDLE_LIBRARY_PATHS}")
get_filename_component(executable_parent_dir "${executable_path}" DIRECTORY)
find_program(windeployqt_executable windeployqt6)
# Create a qt.conf file pointing to the app directory.
# This ensures Qt can find its plugins.
file(WRITE "${executable_parent_dir}/qt.conf" "[Paths]\nPrefix = .")
find_program(windeployqt_executable windeployqt6 PATHS "${QT_HOST_PATH}/bin")
find_program(qtpaths_executable qtpaths6 PATHS "${QT_HOST_PATH}/bin")
# TODO: Hack around windeployqt's poor cross-compilation support by
# TODO: making a local copy with a prefix pointing to the target Qt.
if (NOT "${QT_HOST_PATH}" STREQUAL "${QT_TARGET_PATH}")
set(windeployqt_dir "${BINARY_PATH}/windeployqt_copy")
file(MAKE_DIRECTORY "${windeployqt_dir}")
symlink_safe_copy("${windeployqt_executable}" "${windeployqt_dir}/windeployqt.exe")
symlink_safe_copy("${qtpaths_executable}" "${windeployqt_dir}/qtpaths.exe")
symlink_safe_copy("${QT_HOST_PATH}/bin/Qt6Core.dll" "${windeployqt_dir}")
if (EXISTS "${QT_TARGET_PATH}/share")
# Unix-style Qt; we need to wire up the paths manually.
file(WRITE "${windeployqt_dir}/qt.conf" "\
[Paths]\n
Prefix = ${QT_TARGET_PATH}\n \
ArchData = ${QT_TARGET_PATH}/share/qt6\n \
Binaries = ${QT_TARGET_PATH}/bin\n \
Data = ${QT_TARGET_PATH}/share/qt6\n \
Documentation = ${QT_TARGET_PATH}/share/qt6/doc\n \
Headers = ${QT_TARGET_PATH}/include/qt6\n \
Libraries = ${QT_TARGET_PATH}/lib\n \
LibraryExecutables = ${QT_TARGET_PATH}/share/qt6/bin\n \
Plugins = ${QT_TARGET_PATH}/share/qt6/plugins\n \
QmlImports = ${QT_TARGET_PATH}/share/qt6/qml\n \
Translations = ${QT_TARGET_PATH}/share/qt6/translations\n \
")
else()
# Windows-style Qt; the defaults should suffice.
file(WRITE "${windeployqt_dir}/qt.conf" "[Paths]\nPrefix = ${QT_TARGET_PATH}")
endif()
set(windeployqt_executable "${windeployqt_dir}/windeployqt.exe")
set(qtpaths_executable "${windeployqt_dir}/qtpaths.exe")
endif()
file(WRITE "${executable_parent_dir}/qt.conf" "[Paths]\nprefix = .")
message(STATUS "Executing windeployqt for executable ${executable_path}")
execute_process(COMMAND "${windeployqt_executable}" "${executable_path}"
--qtpaths "${qtpaths_executable}"
--no-compiler-runtime --no-system-d3d-compiler --no-opengl-sw --no-translations
--plugindir "${executable_parent_dir}/plugins"
RESULT_VARIABLE windeployqt_result)
if (NOT windeployqt_result EQUAL "0")
message(FATAL_ERROR "windeployqt failed: ${windeployqt_result}")
endif()
--plugindir "${executable_parent_dir}/plugins")
# Remove the FFmpeg multimedia plugin as we don't include FFmpeg.
# We want to use the Windows media plugin instead, which is also included.
file(REMOVE "${executable_parent_dir}/plugins/multimedia/ffmpegmediaplugin.dll")
elseif (APPLE)
get_filename_component(executable_name "${executable_path}" NAME_WE)
find_program(macdeployqt_executable macdeployqt6 PATHS "${QT_HOST_PATH}/bin")
find_program(MACDEPLOYQT_EXECUTABLE macdeployqt6)
message(STATUS "Executing macdeployqt at \"${macdeployqt_executable}\" for executable \"${executable_path}\"")
message(STATUS "Executing macdeployqt for executable ${executable_path}")
execute_process(
COMMAND "${macdeployqt_executable}"
COMMAND "${MACDEPLOYQT_EXECUTABLE}"
"${executable_path}"
"-executable=${executable_path}/Contents/MacOS/${executable_name}"
-always-overwrite
RESULT_VARIABLE macdeployqt_result)
if (NOT macdeployqt_result EQUAL "0")
message(FATAL_ERROR "macdeployqt failed: ${macdeployqt_result}")
endif()
-always-overwrite)
# Bundling libraries can rewrite path information and break code signatures of system libraries.
# Perform an ad-hoc re-signing on the whole app bundle to fix this.
execute_process(COMMAND codesign --deep -fs - "${executable_path}"
RESULT_VARIABLE codesign_result)
if (NOT codesign_result EQUAL "0")
message(FATAL_ERROR "codesign failed: ${codesign_result}")
endif()
execute_process(COMMAND codesign --deep -fs - "${executable_path}")
else()
message(FATAL_ERROR "Unsupported OS for Qt bundling.")
endif()
@ -111,9 +44,9 @@ if (BUNDLE_TARGET_EXECUTE)
if (enable_qt)
# Find qmake to make sure the plugin uses the right version of Qt.
find_program(qmake_executable qmake6 PATHS "${QT_HOST_PATH}/bin")
find_program(QMAKE_EXECUTABLE qmake6)
set(extra_linuxdeploy_env "QMAKE=${qmake_executable}")
set(extra_linuxdeploy_env "QMAKE=${QMAKE_EXECUTABLE}")
set(extra_linuxdeploy_args --plugin qt)
endif()
@ -126,11 +59,7 @@ if (BUNDLE_TARGET_EXECUTE)
--executable "${executable_path}"
--icon-file "${source_path}/dist/citra.svg"
--desktop-file "${source_path}/dist/${executable_name}.desktop"
--appdir "${appdir_path}"
RESULT_VARIABLE linuxdeploy_appdir_result)
if (NOT linuxdeploy_appdir_result EQUAL "0")
message(FATAL_ERROR "linuxdeploy failed to create AppDir: ${linuxdeploy_appdir_result}")
endif()
--appdir "${appdir_path}")
if (enable_qt)
set(qt_hook_file "${appdir_path}/apprun-hooks/linuxdeploy-plugin-qt-hook.sh")
@ -153,11 +82,7 @@ if (BUNDLE_TARGET_EXECUTE)
"OUTPUT=${bundle_dir}/${executable_name}.AppImage"
"${linuxdeploy_executable}"
--output appimage
--appdir "${appdir_path}"
RESULT_VARIABLE linuxdeploy_appimage_result)
if (NOT linuxdeploy_appimage_result EQUAL "0")
message(FATAL_ERROR "linuxdeploy failed to create AppImage: ${linuxdeploy_appimage_result}")
endif()
--appdir "${appdir_path}")
endfunction()
function(bundle_standalone executable_path original_executable_path bundle_library_paths)
@ -184,23 +109,16 @@ if (BUNDLE_TARGET_EXECUTE)
file(MAKE_DIRECTORY ${lib_dir})
foreach (lib_file IN LISTS resolved_deps)
message(STATUS "Bundling library ${lib_file}")
symlink_safe_copy("${lib_file}" "${lib_dir}")
# Use native copy to turn symlinks into normal files.
execute_process(COMMAND cp -L "${lib_file}" "${lib_dir}")
endforeach()
endif()
# Add libs directory to executable rpath where applicable.
if (APPLE)
execute_process(COMMAND install_name_tool -add_rpath "@loader_path/libs" "${executable_path}"
RESULT_VARIABLE install_name_tool_result)
if (NOT install_name_tool_result EQUAL "0")
message(FATAL_ERROR "install_name_tool failed: ${install_name_tool_result}")
endif()
execute_process(COMMAND install_name_tool -add_rpath "@loader_path/libs" "${executable_path}")
elseif (UNIX)
execute_process(COMMAND patchelf --set-rpath '$ORIGIN/../libs' "${executable_path}"
RESULT_VARIABLE patchelf_result)
if (NOT patchelf_result EQUAL "0")
message(FATAL_ERROR "patchelf failed: ${patchelf_result}")
endif()
execute_process(COMMAND patchelf --set-rpath '$ORIGIN/../libs' "${executable_path}")
endif()
endfunction()
@ -209,7 +127,7 @@ if (BUNDLE_TARGET_EXECUTE)
set(bundle_dir ${BINARY_PATH}/bundle)
# On Linux, always bundle an AppImage.
if (CMAKE_HOST_SYSTEM_NAME STREQUAL "Linux")
if (DEFINED LINUXDEPLOY)
if (IN_PLACE)
message(FATAL_ERROR "Cannot bundle for Linux in-place.")
endif()
@ -228,12 +146,14 @@ if (BUNDLE_TARGET_EXECUTE)
if (BUNDLE_QT)
bundle_qt("${bundled_executable_path}")
else()
endif()
if (WIN32 OR NOT BUNDLE_QT)
bundle_standalone("${bundled_executable_path}" "${EXECUTABLE_PATH}" "${BUNDLE_LIBRARY_PATHS}")
endif()
endif()
elseif (BUNDLE_TARGET_DOWNLOAD_LINUXDEPLOY)
# --- linuxdeploy download logic ---
else()
# --- Bundling target creation logic ---
# Downloads and extracts a linuxdeploy component.
function(download_linuxdeploy_component base_dir name executable_name)
@ -241,7 +161,7 @@ elseif (BUNDLE_TARGET_DOWNLOAD_LINUXDEPLOY)
if (NOT EXISTS "${executable_file}")
message(STATUS "Downloading ${executable_name}")
file(DOWNLOAD
"https://github.com/${name}/releases/download/continuous/${executable_name}"
"https://github.com/linuxdeploy/${name}/releases/download/continuous/${executable_name}"
"${executable_file}" SHOW_PROGRESS)
file(CHMOD "${executable_file}" PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE)
@ -250,11 +170,7 @@ elseif (BUNDLE_TARGET_DOWNLOAD_LINUXDEPLOY)
message(STATUS "Extracting ${executable_name}")
execute_process(
COMMAND "${executable_file}" --appimage-extract
WORKING_DIRECTORY "${base_dir}"
RESULT_VARIABLE extract_result)
if (NOT extract_result EQUAL "0")
message(FATAL_ERROR "AppImage extract failed: ${extract_result}")
endif()
WORKING_DIRECTORY "${base_dir}")
else()
message(STATUS "Copying ${executable_name}")
file(COPY "${executable_file}" DESTINATION "${base_dir}/squashfs-root/usr/bin/")
@ -262,102 +178,89 @@ elseif (BUNDLE_TARGET_DOWNLOAD_LINUXDEPLOY)
endif()
endfunction()
# Download plugins first so they don't overwrite linuxdeploy's AppRun file.
download_linuxdeploy_component("${LINUXDEPLOY_PATH}" "linuxdeploy/linuxdeploy-plugin-qt" "linuxdeploy-plugin-qt-${LINUXDEPLOY_ARCH}.AppImage")
download_linuxdeploy_component("${LINUXDEPLOY_PATH}" "darealshinji/linuxdeploy-plugin-checkrt" "linuxdeploy-plugin-checkrt.sh")
download_linuxdeploy_component("${LINUXDEPLOY_PATH}" "linuxdeploy/linuxdeploy" "linuxdeploy-${LINUXDEPLOY_ARCH}.AppImage")
else()
# --- Bundling target creation logic ---
# Creates the base bundle target with common files and pre-bundle steps.
function(create_base_bundle_target)
message(STATUS "Creating base bundle target")
add_custom_target(bundle)
add_custom_command(
TARGET bundle
COMMAND ${CMAKE_COMMAND} -E make_directory "${CMAKE_BINARY_DIR}/bundle/")
add_custom_command(
TARGET bundle
COMMAND ${CMAKE_COMMAND} -E make_directory "${CMAKE_BINARY_DIR}/bundle/dist/")
add_custom_command(
TARGET bundle
COMMAND ${CMAKE_COMMAND} -E copy "${CMAKE_SOURCE_DIR}/dist/icon.png" "${CMAKE_BINARY_DIR}/bundle/dist/citra.png")
add_custom_command(
TARGET bundle
COMMAND ${CMAKE_COMMAND} -E copy "${CMAKE_SOURCE_DIR}/license.txt" "${CMAKE_BINARY_DIR}/bundle/")
add_custom_command(
TARGET bundle
COMMAND ${CMAKE_COMMAND} -E copy "${CMAKE_SOURCE_DIR}/README.md" "${CMAKE_BINARY_DIR}/bundle/")
add_custom_command(
TARGET bundle
COMMAND ${CMAKE_COMMAND} -E copy_directory "${CMAKE_SOURCE_DIR}/dist/scripting" "${CMAKE_BINARY_DIR}/bundle/scripting")
# On Linux, add a command to prepare linuxdeploy and any required plugins before any bundling occurs.
if (CMAKE_HOST_SYSTEM_NAME STREQUAL "Linux")
add_custom_command(
TARGET bundle
COMMAND ${CMAKE_COMMAND}
"-DBUNDLE_TARGET_DOWNLOAD_LINUXDEPLOY=1"
"-DLINUXDEPLOY_PATH=${CMAKE_BINARY_DIR}/externals/linuxdeploy"
"-DLINUXDEPLOY_ARCH=${CMAKE_HOST_SYSTEM_PROCESSOR}"
-P "${CMAKE_SOURCE_DIR}/CMakeModules/BundleTarget.cmake"
WORKING_DIRECTORY "${CMAKE_BINARY_DIR}")
endif()
endfunction()
# Adds a target to the bundle target, packing in required libraries.
# If in_place is true, the bundling will be done in-place as part of the specified target.
function(bundle_target_internal target_name in_place)
# Create base bundle target if it does not exist.
if (NOT in_place AND NOT TARGET bundle)
create_base_bundle_target()
message(STATUS "Creating base bundle target")
add_custom_target(bundle)
add_custom_command(
TARGET bundle
COMMAND ${CMAKE_COMMAND} -E make_directory "${CMAKE_BINARY_DIR}/bundle/")
add_custom_command(
TARGET bundle
COMMAND ${CMAKE_COMMAND} -E make_directory "${CMAKE_BINARY_DIR}/bundle/dist/")
add_custom_command(
TARGET bundle
COMMAND ${CMAKE_COMMAND} -E copy "${CMAKE_SOURCE_DIR}/dist/icon.png" "${CMAKE_BINARY_DIR}/bundle/dist/citra.png")
add_custom_command(
TARGET bundle
COMMAND ${CMAKE_COMMAND} -E copy "${CMAKE_SOURCE_DIR}/license.txt" "${CMAKE_BINARY_DIR}/bundle/")
add_custom_command(
TARGET bundle
COMMAND ${CMAKE_COMMAND} -E copy "${CMAKE_SOURCE_DIR}/README.md" "${CMAKE_BINARY_DIR}/bundle/")
add_custom_command(
TARGET bundle
COMMAND ${CMAKE_COMMAND} -E copy_directory "${CMAKE_SOURCE_DIR}/dist/scripting" "${CMAKE_BINARY_DIR}/bundle/scripting")
endif()
set(bundle_executable_path "$<TARGET_FILE:${target_name}>")
set(BUNDLE_EXECUTABLE_PATH "$<TARGET_FILE:${target_name}>")
if (target_name MATCHES ".*qt")
set(bundle_qt ON)
set(BUNDLE_QT ON)
if (APPLE)
# For Qt targets on Apple, expect an app bundle.
set(bundle_executable_path "$<TARGET_BUNDLE_DIR:${target_name}>")
set(BUNDLE_EXECUTABLE_PATH "$<TARGET_BUNDLE_DIR:${target_name}>")
endif()
else()
set(bundle_qt OFF)
set(BUNDLE_QT OFF)
endif()
# Build a list of library search paths from prefix paths.
foreach(prefix_path IN LISTS CMAKE_FIND_ROOT_PATH CMAKE_PREFIX_PATH CMAKE_SYSTEM_PREFIX_PATH)
foreach(prefix_path IN LISTS CMAKE_PREFIX_PATH CMAKE_SYSTEM_PREFIX_PATH)
if (WIN32)
list(APPEND bundle_library_paths "${prefix_path}/bin")
list(APPEND BUNDLE_LIBRARY_PATHS "${prefix_path}/bin")
endif()
list(APPEND bundle_library_paths "${prefix_path}/lib")
list(APPEND BUNDLE_LIBRARY_PATHS "${prefix_path}/lib")
endforeach()
foreach(library_path IN LISTS CMAKE_SYSTEM_LIBRARY_PATH)
list(APPEND bundle_library_paths "${library_path}")
list(APPEND BUNDLE_LIBRARY_PATHS "${library_path}")
endforeach()
# On Linux, prepare linuxdeploy and any required plugins.
if (CMAKE_SYSTEM_NAME STREQUAL "Linux")
set(LINUXDEPLOY_BASE "${CMAKE_BINARY_DIR}/externals/linuxdeploy")
# Download plugins first so they don't overwrite linuxdeploy's AppRun file.
download_linuxdeploy_component("${LINUXDEPLOY_BASE}" "linuxdeploy-plugin-qt" "linuxdeploy-plugin-qt-x86_64.AppImage")
download_linuxdeploy_component("${LINUXDEPLOY_BASE}" "linuxdeploy-plugin-checkrt" "linuxdeploy-plugin-checkrt-x86_64.sh")
download_linuxdeploy_component("${LINUXDEPLOY_BASE}" "linuxdeploy" "linuxdeploy-x86_64.AppImage")
set(EXTRA_BUNDLE_ARGS "-DLINUXDEPLOY=${LINUXDEPLOY_BASE}/squashfs-root/AppRun")
endif()
if (in_place)
message(STATUS "Adding in-place bundling to ${target_name}")
set(dest_target ${target_name})
set(DEST_TARGET ${target_name})
else()
message(STATUS "Adding ${target_name} to bundle target")
set(dest_target bundle)
set(DEST_TARGET bundle)
add_dependencies(bundle ${target_name})
endif()
add_custom_command(TARGET ${dest_target} POST_BUILD
add_custom_command(TARGET ${DEST_TARGET} POST_BUILD
COMMAND ${CMAKE_COMMAND}
"-DQT_HOST_PATH=\"${QT_HOST_PATH}\""
"-DQT_TARGET_PATH=\"${QT_TARGET_PATH}\""
"-DCMAKE_PREFIX_PATH=\"${CMAKE_PREFIX_PATH}\""
"-DBUNDLE_TARGET_EXECUTE=1"
"-DTARGET=${target_name}"
"-DSOURCE_PATH=${CMAKE_SOURCE_DIR}"
"-DBINARY_PATH=${CMAKE_BINARY_DIR}"
"-DEXECUTABLE_PATH=${bundle_executable_path}"
"-DBUNDLE_LIBRARY_PATHS=\"${bundle_library_paths}\""
"-DBUNDLE_QT=${bundle_qt}"
"-DEXECUTABLE_PATH=${BUNDLE_EXECUTABLE_PATH}"
"-DBUNDLE_LIBRARY_PATHS=\"${BUNDLE_LIBRARY_PATHS}\""
"-DBUNDLE_QT=${BUNDLE_QT}"
"-DIN_PLACE=${in_place}"
"-DLINUXDEPLOY=${CMAKE_BINARY_DIR}/externals/linuxdeploy/squashfs-root/AppRun"
${EXTRA_BUNDLE_ARGS}
-P "${CMAKE_SOURCE_DIR}/CMakeModules/BundleTarget.cmake"
WORKING_DIRECTORY "${CMAKE_BINARY_DIR}")
endfunction()

View File

@ -1,20 +1,21 @@
set(CURRENT_MODULE_DIR ${CMAKE_CURRENT_LIST_DIR})
# Determines parameters based on the host and target for downloading the right Qt binaries.
function(determine_qt_parameters target host_out type_out arch_out arch_path_out host_type_out host_arch_out host_arch_path_out)
# This function downloads Qt using aqt. The path of the downloaded content will be added to the CMAKE_PREFIX_PATH.
# Params:
# target: Qt dependency to install. Specify a version number to download Qt, or "tools_(name)" for a specific build tool.
function(download_qt target)
if (target MATCHES "tools_.*")
set(tool ON)
set(DOWNLOAD_QT_TOOL ON)
else()
set(tool OFF)
set(DOWNLOAD_QT_TOOL OFF)
endif()
# Determine installation parameters for OS, architecture, and compiler
if (WIN32)
set(host "windows")
set(type "desktop")
if (NOT tool)
if (NOT DOWNLOAD_QT_TOOL)
if (MINGW)
set(arch "win64_mingw")
set(arch_path "mingw_64")
@ -27,35 +28,21 @@ function(determine_qt_parameters target host_out type_out arch_out arch_path_out
message(FATAL_ERROR "Unsupported bundled Qt architecture. Enable USE_SYSTEM_QT and provide your own.")
endif()
set(arch "win64_${arch_path}")
# In case we're cross-compiling, prepare to also fetch the correct host Qt tools.
if (CMAKE_HOST_SYSTEM_PROCESSOR STREQUAL "AMD64")
set(host_arch_path "msvc2019_64")
elseif (CMAKE_HOST_SYSTEM_PROCESSOR STREQUAL "ARM64")
# TODO: msvc2019_arm64 doesn't include some of the required tools for some reason,
# TODO: so until it does, just use msvc2019_64 under x86_64 emulation.
# set(host_arch_path "msvc2019_arm64")
set(host_arch_path "msvc2019_64")
endif()
set(host_arch "win64_${host_arch_path}")
else()
message(FATAL_ERROR "Unsupported bundled Qt toolchain. Enable USE_SYSTEM_QT and provide your own.")
endif()
endif()
elseif (APPLE)
set(host "mac")
set(type "desktop")
set(arch "clang_64")
set(arch_path "macos")
if (IOS AND NOT tool)
set(host_type "${type}")
set(host_arch "${arch}")
set(host_arch_path "${arch_path}")
if (IOS AND NOT DOWNLOAD_QT_TOOL)
set(type "ios")
set(arch "ios")
set(arch_path "ios")
set(host_arch_path "macos")
else()
set(type "desktop")
set(arch "clang_64")
set(arch_path "macos")
endif()
else()
set(host "linux")
@ -64,64 +51,38 @@ function(determine_qt_parameters target host_out type_out arch_out arch_path_out
set(arch_path "linux")
endif()
set(${host_out} "${host}" PARENT_SCOPE)
set(${type_out} "${type}" PARENT_SCOPE)
set(${arch_out} "${arch}" PARENT_SCOPE)
set(${arch_path_out} "${arch_path}" PARENT_SCOPE)
if (DEFINED host_type)
set(${host_type_out} "${host_type}" PARENT_SCOPE)
else()
set(${host_type_out} "${type}" PARENT_SCOPE)
endif()
if (DEFINED host_arch)
set(${host_arch_out} "${host_arch}" PARENT_SCOPE)
else()
set(${host_arch_out} "${arch}" PARENT_SCOPE)
endif()
if (DEFINED host_arch_path)
set(${host_arch_path_out} "${host_arch_path}" PARENT_SCOPE)
else()
set(${host_arch_path_out} "${arch_path}" PARENT_SCOPE)
endif()
endfunction()
# Download Qt binaries for a specifc configuration.
function(download_qt_configuration prefix_out target host type arch arch_path base_path)
if (target MATCHES "tools_.*")
set(tool ON)
else()
set(tool OFF)
endif()
get_external_prefix(qt base_path)
file(MAKE_DIRECTORY "${base_path}")
set(install_args -c "${CURRENT_MODULE_DIR}/aqt_config.ini")
if (tool)
if (DOWNLOAD_QT_TOOL)
set(prefix "${base_path}/Tools")
set(install_args ${install_args} install-tool --outputdir ${base_path} ${host} desktop ${target})
else()
set(prefix "${base_path}/${target}/${arch_path}")
set(install_args ${install_args} install-qt --outputdir ${base_path} ${host} ${type} ${target} ${arch}
-m qtmultimedia --archives qttranslations qttools qtsvg qtbase)
if (host_arch_path)
set(host_flag "--autodesktop")
set(host_prefix "${base_path}/${target}/${host_arch_path}")
endif()
set(install_args ${install_args} install-qt --outputdir ${base_path} ${host} ${type} ${target} ${arch} ${host_flag}
-m qtmultimedia --archives qttranslations qttools qtsvg qtbase)
endif()
if (NOT EXISTS "${prefix}")
message(STATUS "Downloading Qt binaries for ${target}:${host}:${type}:${arch}:${arch_path}")
message(STATUS "Downloading binaries for Qt...")
set(AQT_PREBUILD_BASE_URL "https://github.com/miurahr/aqtinstall/releases/download/v3.1.9")
if (WIN32)
set(aqt_path "${base_path}/aqt.exe")
if (NOT EXISTS "${aqt_path}")
file(DOWNLOAD
${AQT_PREBUILD_BASE_URL}/aqt.exe
${aqt_path} SHOW_PROGRESS)
endif()
file(DOWNLOAD
${AQT_PREBUILD_BASE_URL}/aqt.exe
${aqt_path} SHOW_PROGRESS)
execute_process(COMMAND ${aqt_path} ${install_args}
WORKING_DIRECTORY ${base_path})
elseif (APPLE)
set(aqt_path "${base_path}/aqt-macos")
if (NOT EXISTS "${aqt_path}")
file(DOWNLOAD
${AQT_PREBUILD_BASE_URL}/aqt-macos
${aqt_path} SHOW_PROGRESS)
endif()
file(DOWNLOAD
${AQT_PREBUILD_BASE_URL}/aqt-macos
${aqt_path} SHOW_PROGRESS)
execute_process(COMMAND chmod +x ${aqt_path})
execute_process(COMMAND ${aqt_path} ${install_args}
WORKING_DIRECTORY ${base_path})
@ -135,38 +96,18 @@ function(download_qt_configuration prefix_out target host type arch arch_path ba
execute_process(COMMAND ${CMAKE_COMMAND} -E env PYTHONPATH=${aqt_install_path} python3 -m aqt ${install_args}
WORKING_DIRECTORY ${base_path})
endif()
message(STATUS "Downloaded Qt binaries for ${target}:${host}:${type}:${arch}:${arch_path} to ${prefix}")
endif()
set(${prefix_out} "${prefix}" PARENT_SCOPE)
endfunction()
message(STATUS "Using downloaded Qt binaries at ${prefix}")
# This function downloads Qt using aqt.
# The path of the downloaded content will be added to the CMAKE_PREFIX_PATH.
# QT_TARGET_PATH is set to the Qt for the compile target platform.
# QT_HOST_PATH is set to a host-compatible Qt, for running tools.
# Params:
# target: Qt dependency to install. Specify a version number to download Qt, or "tools_(name)" for a specific build tool.
function(download_qt target)
determine_qt_parameters("${target}" host type arch arch_path host_type host_arch host_arch_path)
get_external_prefix(qt base_path)
file(MAKE_DIRECTORY "${base_path}")
download_qt_configuration(prefix "${target}" "${host}" "${type}" "${arch}" "${arch_path}" "${base_path}")
if (DEFINED host_arch_path AND NOT "${host_arch_path}" STREQUAL "${arch_path}")
download_qt_configuration(host_prefix "${target}" "${host}" "${host_type}" "${host_arch}" "${host_arch_path}" "${base_path}")
else()
set(host_prefix "${prefix}")
endif()
set(QT_TARGET_PATH "${prefix}" CACHE STRING "")
set(QT_HOST_PATH "${host_prefix}" CACHE STRING "")
# Add the target Qt prefix path so CMake can locate it.
# Add the Qt prefix path so CMake can locate it.
list(APPEND CMAKE_PREFIX_PATH "${prefix}")
set(CMAKE_PREFIX_PATH ${CMAKE_PREFIX_PATH} PARENT_SCOPE)
if (DEFINED host_prefix)
message(STATUS "Using downloaded host Qt binaries at ${host_prefix}")
set(QT_HOST_PATH "${host_prefix}" CACHE STRING "")
endif()
endfunction()
function(download_moltenvk)

View File

@ -287,13 +287,5 @@ dumptxt -p $[OUT] "nfcSecret1Seed=$[NFC_SEED_1]"
dumptxt -p $[OUT] "nfcSecret1HmacKey=$[NFC_HMAC_KEY_1]"
dumptxt -p $[OUT] "nfcIv=$[NFC_IV]"
# Dump seeddb.bin as well
set SEEDDB_IN "0:/gm9/out/seeddb.bin"
set SEEDDB_OUT "0:/gm9/seeddb.bin"
sdump -w seeddb.bin
cp -w $[SEEDDB_IN] $[SEEDDB_OUT]
@Exit

View File

@ -6,5 +6,5 @@ Usage:
1. Copy "DumpKeys.gm9" into the "gm9/scripts/" directory on your SD card.
2. Launch GodMode9, press the HOME button, select Scripts, and select "DumpKeys" from the list of scripts that appears.
3. Wait for the script to complete and return you to the GodMode9 main menu.
4. Power off your system and copy the "gm9/aes_keys.txt" and "gm9/seeddb.bin" files off of your SD card into "(Citra directory)/sysdata/".
4. Power off your system and copy the "gm9/aes_keys.txt" file off of your SD card into "(Citra directory)/sysdata/".

View File

@ -11,4 +11,3 @@ type = QT
file_filter = ../../src/android/app/src/main/res/values-<lang>/strings.xml
source_file = ../../src/android/app/src/main/res/values/strings.xml
type = ANDROID
lang_map = es_ES:es, hu_HU:hu, ru_RU:ru, pt_BR:pt, zh_CN:zh

View File

@ -57,12 +57,6 @@ if(USE_SYSTEM_CRYPTOPP)
add_library(cryptopp INTERFACE)
target_link_libraries(cryptopp INTERFACE cryptopp::cryptopp)
else()
if (WIN32 AND NOT MSVC AND "arm64" IN_LIST ARCHITECTURE)
# TODO: CryptoPP ARM64 ASM does not seem to support Windows unless compiled with MSVC.
# TODO: See https://github.com/weidai11/cryptopp/issues/1260
set(CRYPTOPP_DISABLE_ASM ON CACHE BOOL "")
endif()
set(CRYPTOPP_BUILD_DOCUMENTATION OFF CACHE BOOL "")
set(CRYPTOPP_BUILD_TESTING OFF CACHE BOOL "")
set(CRYPTOPP_INSTALL OFF CACHE BOOL "")
@ -241,18 +235,6 @@ endif()
# DiscordRPC
if (USE_DISCORD_PRESENCE)
# rapidjson used by discord-rpc is old and doesn't correctly detect endianness for some platforms.
include(TestBigEndian)
test_big_endian(RAPIDJSON_BIG_ENDIAN)
if(RAPIDJSON_BIG_ENDIAN)
add_compile_definitions(RAPIDJSON_ENDIAN=1)
else()
add_compile_definitions(RAPIDJSON_ENDIAN=0)
endif()
# Apply a dummy CLANG_FORMAT_SUFFIX to disable discord-rpc's unnecessary automatic clang-format.
set(CLANG_FORMAT_SUFFIX "dummy")
add_subdirectory(discord-rpc EXCLUDE_FROM_ALL)
target_include_directories(discord-rpc INTERFACE ./discord-rpc/include)
endif()

View File

@ -316,7 +316,7 @@ struct SourceStatus {
u16_le sync_count; ///< Is set by the DSP to the value of SourceConfiguration::sync_count
u32_dsp buffer_position; ///< Number of samples into the current buffer
u16_le current_buffer_id; ///< Updated when a buffer finishes playing
u16_le last_buffer_id; ///< Updated when all buffers in the queue finish playing
INSERT_PADDING_DSPWORDS(1);
};
Status status[num_sources];

View File

@ -324,7 +324,6 @@ void Source::GenerateFrame() {
if (state.current_buffer.empty() && !DequeueBuffer()) {
state.enabled = false;
state.buffer_update = true;
state.last_buffer_id = state.current_buffer_id;
state.current_buffer_id = 0;
return;
}
@ -412,7 +411,6 @@ bool Source::DequeueBuffer() {
state.next_sample_number = state.current_sample_number;
state.current_buffer_physical_address = buf.physical_address;
state.current_buffer_id = buf.buffer_id;
state.last_buffer_id = 0;
state.buffer_update = buf.from_queue && !buf.has_played;
if (buf.is_looping) {
@ -434,10 +432,9 @@ SourceStatus::Status Source::GetCurrentStatus() {
ret.is_enabled = state.enabled;
ret.current_buffer_id_dirty = state.buffer_update ? 1 : 0;
state.buffer_update = false;
ret.sync_count = state.sync_count;
ret.buffer_position = state.current_sample_number;
ret.current_buffer_id = state.current_buffer_id;
ret.last_buffer_id = state.last_buffer_id;
ret.buffer_position = state.current_sample_number;
ret.sync_count = state.sync_count;
return ret;
}

View File

@ -143,8 +143,7 @@ private:
// buffer_id state
bool buffer_update = false;
u16 last_buffer_id = 0;
u16 current_buffer_id = 0;
u32 current_buffer_id = 0;
// Decoding state

View File

@ -6,13 +6,13 @@
#include "citra_qt/debugger/wait_tree.h"
#include "citra_qt/uisettings.h"
#include "common/assert.h"
#include "core/hle/kernel/event.h"
#include "core/hle/kernel/mutex.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/semaphore.h"
#include "core/hle/kernel/thread.h"
#include "core/hle/kernel/timer.h"
#include "core/hle/kernel/wait_object.h"
#include "core/hle/kernel/k_event.h"
#include "core/hle/kernel/k_mutex.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_semaphore.h"
#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/k_timer.h"
namespace {
@ -98,7 +98,7 @@ QString WaitTreeText::GetText() const {
return text;
}
WaitTreeWaitObject::WaitTreeWaitObject(const Kernel::WaitObject& o) : object(o) {}
WaitTreeWaitObject::WaitTreeWaitObject(const Kernel::KSynchronizationObject& o) : object(o) {}
bool WaitTreeExpandableItem::IsExpandable() const {
return true;
@ -106,23 +106,24 @@ bool WaitTreeExpandableItem::IsExpandable() const {
QString WaitTreeWaitObject::GetText() const {
return tr("[%1]%2 %3")
.arg(object.GetObjectId())
.arg(/*object.GetObjectId()*/ 0)
.arg(QString::fromStdString(object.GetTypeName()),
QString::fromStdString(object.GetName()));
QString::fromStdString(/*object.GetName()*/ "name"));
}
std::unique_ptr<WaitTreeWaitObject> WaitTreeWaitObject::make(const Kernel::WaitObject& object) {
switch (object.GetHandleType()) {
case Kernel::HandleType::Event:
return std::make_unique<WaitTreeEvent>(static_cast<const Kernel::Event&>(object));
case Kernel::HandleType::Mutex:
return std::make_unique<WaitTreeMutex>(static_cast<const Kernel::Mutex&>(object));
case Kernel::HandleType::Semaphore:
return std::make_unique<WaitTreeSemaphore>(static_cast<const Kernel::Semaphore&>(object));
case Kernel::HandleType::Timer:
return std::make_unique<WaitTreeTimer>(static_cast<const Kernel::Timer&>(object));
case Kernel::HandleType::Thread:
return std::make_unique<WaitTreeThread>(static_cast<const Kernel::Thread&>(object));
std::unique_ptr<WaitTreeWaitObject> WaitTreeWaitObject::make(
const Kernel::KSynchronizationObject& object) {
switch (object.GetTypeObj().GetClassToken()) {
case Kernel::ClassTokenType::KEvent:
return std::make_unique<WaitTreeEvent>(static_cast<const Kernel::KEvent&>(object));
case Kernel::ClassTokenType::KMutex:
return std::make_unique<WaitTreeMutex>(static_cast<const Kernel::KMutex&>(object));
case Kernel::ClassTokenType::KSemaphore:
return std::make_unique<WaitTreeSemaphore>(static_cast<const Kernel::KSemaphore&>(object));
case Kernel::ClassTokenType::KTimer:
return std::make_unique<WaitTreeTimer>(static_cast<const Kernel::KTimer&>(object));
case Kernel::ClassTokenType::KThread:
return std::make_unique<WaitTreeThread>(static_cast<const Kernel::KThread&>(object));
default:
return std::make_unique<WaitTreeWaitObject>(object);
}
@ -153,7 +154,7 @@ QString WaitTreeWaitObject::GetResetTypeQString(Kernel::ResetType reset_type) {
return {};
}
WaitTreeObjectList::WaitTreeObjectList(const std::vector<std::shared_ptr<Kernel::WaitObject>>& list,
WaitTreeObjectList::WaitTreeObjectList(const std::vector<Kernel::KSynchronizationObject*>& list,
bool w_all)
: object_list(list), wait_all(w_all) {}
@ -170,12 +171,12 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeObjectList::GetChildren() con
return list;
}
WaitTreeThread::WaitTreeThread(const Kernel::Thread& thread) : WaitTreeWaitObject(thread) {}
WaitTreeThread::WaitTreeThread(const Kernel::KThread& thread) : WaitTreeWaitObject(thread) {}
QString WaitTreeThread::GetText() const {
const auto& thread = static_cast<const Kernel::Thread&>(object);
const auto& thread = static_cast<const Kernel::KThread&>(object);
QString status;
switch (thread.status) {
switch (thread.GetStatus()) {
case Kernel::ThreadStatus::Running:
status = tr("running");
break;
@ -183,7 +184,7 @@ QString WaitTreeThread::GetText() const {
status = tr("ready");
break;
case Kernel::ThreadStatus::WaitArb:
status = tr("waiting for address 0x%1").arg(thread.wait_address, 8, 16, QLatin1Char('0'));
status = tr("waiting for address 0x%1").arg(thread.m_wait_address, 8, 16, QLatin1Char('0'));
break;
case Kernel::ThreadStatus::WaitSleep:
status = tr("sleeping");
@ -205,17 +206,18 @@ QString WaitTreeThread::GetText() const {
status = tr("dead");
break;
}
const auto& context = thread.GetContext();
QString pc_info = tr(" PC = 0x%1 LR = 0x%2")
.arg(thread.context.GetProgramCounter(), 8, 16, QLatin1Char('0'))
.arg(thread.context.GetLinkRegister(), 8, 16, QLatin1Char('0'));
.arg(context.GetProgramCounter(), 8, 16, QLatin1Char('0'))
.arg(context.GetLinkRegister(), 8, 16, QLatin1Char('0'));
return QStringLiteral("%1%2 (%3) ").arg(WaitTreeWaitObject::GetText(), pc_info, status);
}
QColor WaitTreeThread::GetColor() const {
const std::size_t color_index = IsDarkTheme() ? 1 : 0;
const auto& thread = static_cast<const Kernel::Thread&>(object);
switch (thread.status) {
const auto& thread = static_cast<const Kernel::KThread&>(object);
switch (thread.GetStatus()) {
case Kernel::ThreadStatus::Running:
return QColor(WaitTreeColors[0][color_index]);
case Kernel::ThreadStatus::Ready:
@ -242,10 +244,10 @@ QColor WaitTreeThread::GetColor() const {
std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeThread::GetChildren() const {
std::vector<std::unique_ptr<WaitTreeItem>> list(WaitTreeWaitObject::GetChildren());
const auto& thread = static_cast<const Kernel::Thread&>(object);
const auto& thread = static_cast<const Kernel::KThread&>(object);
QString processor;
switch (thread.processor_id) {
switch (thread.m_processor_id) {
case Kernel::ThreadProcessorId::ThreadProcessorIdDefault:
processor = tr("default");
break;
@ -259,86 +261,88 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeThread::GetChildren() const {
processor = tr("SysCore");
break;
default:
processor = tr("Unknown processor %1").arg(thread.processor_id);
processor = tr("Unknown processor %1").arg(thread.m_processor_id);
break;
}
list.push_back(std::make_unique<WaitTreeText>(tr("object id = %1").arg(thread.GetObjectId())));
list.push_back(
std::make_unique<WaitTreeText>(tr("object id = %1").arg(/*thread.GetObjectId()*/ 1)));
list.push_back(std::make_unique<WaitTreeText>(tr("processor = %1").arg(processor)));
list.push_back(std::make_unique<WaitTreeText>(tr("thread id = %1").arg(thread.GetThreadId())));
if (auto process = thread.owner_process.lock()) {
list.push_back(
std::make_unique<WaitTreeText>(tr("process = %1 (%2)")
.arg(QString::fromStdString(process->GetName()))
.arg(process->process_id)));
if (auto process = thread.GetOwner()) {
list.push_back(std::make_unique<WaitTreeText>(
tr("process = %1 (%2)")
.arg(QString::fromStdString(/*process->GetName()*/ ""))
.arg(process->process_id)));
}
list.push_back(std::make_unique<WaitTreeText>(tr("priority = %1(current) / %2(normal)")
.arg(thread.current_priority)
.arg(thread.nominal_priority)));
.arg(thread.GetCurrentPriority())
.arg(thread.m_nominal_priority)));
list.push_back(std::make_unique<WaitTreeText>(
tr("last running ticks = %1").arg(thread.last_running_ticks)));
tr("last running ticks = %1").arg(thread.m_last_running_ticks)));
if (thread.held_mutexes.empty()) {
if (thread.m_held_mutexes.empty()) {
list.push_back(std::make_unique<WaitTreeText>(tr("not holding mutex")));
} else {
list.push_back(std::make_unique<WaitTreeMutexList>(thread.held_mutexes));
list.push_back(std::make_unique<WaitTreeMutexList>(thread.m_held_mutexes));
}
if (thread.status == Kernel::ThreadStatus::WaitSynchAny ||
thread.status == Kernel::ThreadStatus::WaitSynchAll ||
thread.status == Kernel::ThreadStatus::WaitHleEvent) {
list.push_back(std::make_unique<WaitTreeObjectList>(thread.wait_objects,
if (thread.GetStatus() == Kernel::ThreadStatus::WaitSynchAny ||
thread.GetStatus() == Kernel::ThreadStatus::WaitSynchAll ||
thread.GetStatus() == Kernel::ThreadStatus::WaitHleEvent) {
list.push_back(std::make_unique<WaitTreeObjectList>(thread.m_wait_objects,
thread.IsSleepingOnWaitAll()));
}
return list;
}
WaitTreeEvent::WaitTreeEvent(const Kernel::Event& object) : WaitTreeWaitObject(object) {}
WaitTreeEvent::WaitTreeEvent(const Kernel::KEvent& object) : WaitTreeWaitObject(object) {}
std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeEvent::GetChildren() const {
std::vector<std::unique_ptr<WaitTreeItem>> list(WaitTreeWaitObject::GetChildren());
list.push_back(std::make_unique<WaitTreeText>(
tr("reset type = %1")
.arg(GetResetTypeQString(static_cast<const Kernel::Event&>(object).GetResetType()))));
.arg(GetResetTypeQString(static_cast<const Kernel::KEvent&>(object).GetResetType()))));
return list;
}
WaitTreeMutex::WaitTreeMutex(const Kernel::Mutex& object) : WaitTreeWaitObject(object) {}
WaitTreeMutex::WaitTreeMutex(const Kernel::KMutex& object) : WaitTreeWaitObject(object) {}
std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeMutex::GetChildren() const {
std::vector<std::unique_ptr<WaitTreeItem>> list(WaitTreeWaitObject::GetChildren());
const auto& mutex = static_cast<const Kernel::Mutex&>(object);
if (mutex.lock_count) {
list.push_back(
std::make_unique<WaitTreeText>(tr("locked %1 times by thread:").arg(mutex.lock_count)));
list.push_back(std::make_unique<WaitTreeThread>(*mutex.holding_thread));
const auto& mutex = static_cast<const Kernel::KMutex&>(object);
if (mutex.m_lock_count) {
list.push_back(std::make_unique<WaitTreeText>(
tr("locked %1 times by thread:").arg(mutex.m_lock_count)));
list.push_back(std::make_unique<WaitTreeThread>(*mutex.m_holding_thread));
} else {
list.push_back(std::make_unique<WaitTreeText>(tr("free")));
}
return list;
}
WaitTreeSemaphore::WaitTreeSemaphore(const Kernel::Semaphore& object)
WaitTreeSemaphore::WaitTreeSemaphore(const Kernel::KSemaphore& object)
: WaitTreeWaitObject(object) {}
std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeSemaphore::GetChildren() const {
std::vector<std::unique_ptr<WaitTreeItem>> list(WaitTreeWaitObject::GetChildren());
const auto& semaphore = static_cast<const Kernel::Semaphore&>(object);
const auto& semaphore = static_cast<const Kernel::KSemaphore&>(object);
list.push_back(std::make_unique<WaitTreeText>(
tr("available count = %1").arg(semaphore.GetAvailableCount())));
list.push_back(
std::make_unique<WaitTreeText>(tr("available count = %1").arg(semaphore.available_count)));
list.push_back(std::make_unique<WaitTreeText>(tr("max count = %1").arg(semaphore.max_count)));
std::make_unique<WaitTreeText>(tr("max count = %1").arg(semaphore.GetMaxCount())));
return list;
}
WaitTreeTimer::WaitTreeTimer(const Kernel::Timer& object) : WaitTreeWaitObject(object) {}
WaitTreeTimer::WaitTreeTimer(const Kernel::KTimer& object) : WaitTreeWaitObject(object) {}
std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeTimer::GetChildren() const {
std::vector<std::unique_ptr<WaitTreeItem>> list(WaitTreeWaitObject::GetChildren());
const auto& timer = static_cast<const Kernel::Timer&>(object);
const auto& timer = static_cast<const Kernel::KTimer&>(object);
list.push_back(std::make_unique<WaitTreeText>(
tr("reset type = %1").arg(GetResetTypeQString(timer.GetResetType()))));
@ -349,8 +353,7 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeTimer::GetChildren() const {
return list;
}
WaitTreeMutexList::WaitTreeMutexList(
const boost::container::flat_set<std::shared_ptr<Kernel::Mutex>>& list)
WaitTreeMutexList::WaitTreeMutexList(const boost::container::flat_set<Kernel::KMutex*>& list)
: mutex_list(list) {}
QString WaitTreeMutexList::GetText() const {
@ -364,7 +367,7 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeMutexList::GetChildren() cons
return list;
}
WaitTreeThreadList::WaitTreeThreadList(const std::vector<std::shared_ptr<Kernel::Thread>>& list)
WaitTreeThreadList::WaitTreeThreadList(const std::vector<Kernel::KThread*>& list)
: thread_list(list) {}
QString WaitTreeThreadList::GetText() const {

View File

@ -10,17 +10,16 @@
#include <QTreeView>
#include <boost/container/flat_set.hpp>
#include "core/core.h"
#include "core/hle/kernel/object.h"
class EmuThread;
namespace Kernel {
class WaitObject;
class Event;
class Mutex;
class Semaphore;
class Thread;
class Timer;
class KSynchronizationObject;
class KEvent;
class KMutex;
class KSemaphore;
class KThread;
class KTimer;
} // namespace Kernel
namespace Core {
@ -73,13 +72,13 @@ public:
class WaitTreeWaitObject : public WaitTreeExpandableItem {
Q_OBJECT
public:
explicit WaitTreeWaitObject(const Kernel::WaitObject& object);
static std::unique_ptr<WaitTreeWaitObject> make(const Kernel::WaitObject& object);
explicit WaitTreeWaitObject(const Kernel::KSynchronizationObject& object);
static std::unique_ptr<WaitTreeWaitObject> make(const Kernel::KSynchronizationObject& object);
QString GetText() const override;
std::vector<std::unique_ptr<WaitTreeItem>> GetChildren() const override;
protected:
const Kernel::WaitObject& object;
const Kernel::KSynchronizationObject& object;
static QString GetResetTypeQString(Kernel::ResetType reset_type);
};
@ -87,19 +86,19 @@ protected:
class WaitTreeObjectList : public WaitTreeExpandableItem {
Q_OBJECT
public:
WaitTreeObjectList(const std::vector<std::shared_ptr<Kernel::WaitObject>>& list, bool wait_all);
WaitTreeObjectList(const std::vector<Kernel::KSynchronizationObject*>& list, bool wait_all);
QString GetText() const override;
std::vector<std::unique_ptr<WaitTreeItem>> GetChildren() const override;
private:
const std::vector<std::shared_ptr<Kernel::WaitObject>>& object_list;
const std::vector<Kernel::KSynchronizationObject*>& object_list;
bool wait_all;
};
class WaitTreeThread : public WaitTreeWaitObject {
Q_OBJECT
public:
explicit WaitTreeThread(const Kernel::Thread& thread);
explicit WaitTreeThread(const Kernel::KThread& thread);
QString GetText() const override;
QColor GetColor() const override;
std::vector<std::unique_ptr<WaitTreeItem>> GetChildren() const override;
@ -108,53 +107,52 @@ public:
class WaitTreeEvent : public WaitTreeWaitObject {
Q_OBJECT
public:
explicit WaitTreeEvent(const Kernel::Event& object);
explicit WaitTreeEvent(const Kernel::KEvent& object);
std::vector<std::unique_ptr<WaitTreeItem>> GetChildren() const override;
};
class WaitTreeMutex : public WaitTreeWaitObject {
Q_OBJECT
public:
explicit WaitTreeMutex(const Kernel::Mutex& object);
explicit WaitTreeMutex(const Kernel::KMutex& object);
std::vector<std::unique_ptr<WaitTreeItem>> GetChildren() const override;
};
class WaitTreeSemaphore : public WaitTreeWaitObject {
Q_OBJECT
public:
explicit WaitTreeSemaphore(const Kernel::Semaphore& object);
explicit WaitTreeSemaphore(const Kernel::KSemaphore& object);
std::vector<std::unique_ptr<WaitTreeItem>> GetChildren() const override;
};
class WaitTreeTimer : public WaitTreeWaitObject {
Q_OBJECT
public:
explicit WaitTreeTimer(const Kernel::Timer& object);
explicit WaitTreeTimer(const Kernel::KTimer& object);
std::vector<std::unique_ptr<WaitTreeItem>> GetChildren() const override;
};
class WaitTreeMutexList : public WaitTreeExpandableItem {
Q_OBJECT
public:
explicit WaitTreeMutexList(
const boost::container::flat_set<std::shared_ptr<Kernel::Mutex>>& list);
explicit WaitTreeMutexList(const boost::container::flat_set<Kernel::KMutex*>& list);
QString GetText() const override;
std::vector<std::unique_ptr<WaitTreeItem>> GetChildren() const override;
private:
const boost::container::flat_set<std::shared_ptr<Kernel::Mutex>>& mutex_list;
const boost::container::flat_set<Kernel::KMutex*>& mutex_list;
};
class WaitTreeThreadList : public WaitTreeExpandableItem {
Q_OBJECT
public:
explicit WaitTreeThreadList(const std::vector<std::shared_ptr<Kernel::Thread>>& list);
explicit WaitTreeThreadList(const std::vector<Kernel::KThread*>& list);
QString GetText() const override;
std::vector<std::unique_ptr<WaitTreeItem>> GetChildren() const override;
private:
const std::vector<std::shared_ptr<Kernel::Thread>>& thread_list;
const std::vector<Kernel::KThread*>& thread_list;
};
class WaitTreeModel : public QAbstractItemModel {

View File

@ -88,6 +88,7 @@ add_library(citra_common STATIC
file_util.cpp
file_util.h
hash.h
intrusive_list.h
literals.h
logging/backend.cpp
logging/backend.h
@ -109,6 +110,7 @@ add_library(citra_common STATIC
microprofileui.h
param_package.cpp
param_package.h
parent_of_member.h
polyfill_thread.h
precompiled_headers.h
quaternion.h

View File

@ -21,4 +21,10 @@ template <typename T>
return static_cast<T>(value - value % size);
}
template <typename T>
requires std::is_unsigned_v<T>
[[nodiscard]] constexpr bool Is4KBAligned(T value) {
return (value & 0xFFF) == 0;
}
} // namespace Common

View File

@ -49,6 +49,14 @@ __declspec(dllimport) void __stdcall DebugBreak(void);
#define locale_t _locale_t
#endif // _MSC_VER
#define CITRA_NON_COPYABLE(cls) \
cls(const cls&) = delete; \
cls& operator=(const cls&) = delete
#define CITRA_NON_MOVEABLE(cls) \
cls(cls&&) = delete; \
cls& operator=(cls&&) = delete
#define DECLARE_ENUM_FLAG_OPERATORS(type) \
[[nodiscard]] constexpr type operator|(type a, type b) noexcept { \
using T = std::underlying_type_t<type>; \

631
src/common/intrusive_list.h Normal file
View File

@ -0,0 +1,631 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include "common/common_funcs.h"
#include "common/parent_of_member.h"
namespace Common {
// Forward declare implementation class for Node.
namespace impl {
class IntrusiveListImpl;
}
class IntrusiveListNode {
CITRA_NON_COPYABLE(IntrusiveListNode);
private:
friend class impl::IntrusiveListImpl;
IntrusiveListNode* m_prev;
IntrusiveListNode* m_next;
public:
constexpr IntrusiveListNode() : m_prev(this), m_next(this) {}
constexpr bool IsLinked() const {
return m_next != this;
}
private:
constexpr void LinkPrev(IntrusiveListNode* node) {
// We can't link an already linked node.
ASSERT(!node->IsLinked());
this->SplicePrev(node, node);
}
constexpr void SplicePrev(IntrusiveListNode* first, IntrusiveListNode* last) {
// Splice a range into the list.
auto last_prev = last->m_prev;
first->m_prev = m_prev;
last_prev->m_next = this;
m_prev->m_next = first;
m_prev = last_prev;
}
constexpr void LinkNext(IntrusiveListNode* node) {
// We can't link an already linked node.
ASSERT(!node->IsLinked());
return this->SpliceNext(node, node);
}
constexpr void SpliceNext(IntrusiveListNode* first, IntrusiveListNode* last) {
// Splice a range into the list.
auto last_prev = last->m_prev;
first->m_prev = this;
last_prev->m_next = m_next;
m_next->m_prev = last_prev;
m_next = first;
}
constexpr void Unlink() {
this->Unlink(m_next);
}
constexpr void Unlink(IntrusiveListNode* last) {
// Unlink a node from a next node.
auto last_prev = last->m_prev;
m_prev->m_next = last;
last->m_prev = m_prev;
last_prev->m_next = this;
m_prev = last_prev;
}
constexpr IntrusiveListNode* GetPrev() {
return m_prev;
}
constexpr const IntrusiveListNode* GetPrev() const {
return m_prev;
}
constexpr IntrusiveListNode* GetNext() {
return m_next;
}
constexpr const IntrusiveListNode* GetNext() const {
return m_next;
}
};
// DEPRECATED: static_assert(std::is_literal_type<IntrusiveListNode>::value);
namespace impl {
class IntrusiveListImpl {
CITRA_NON_COPYABLE(IntrusiveListImpl);
private:
IntrusiveListNode m_root_node;
public:
template <bool Const>
class Iterator;
using value_type = IntrusiveListNode;
using size_type = size_t;
using difference_type = ptrdiff_t;
using pointer = value_type*;
using const_pointer = const value_type*;
using reference = value_type&;
using const_reference = const value_type&;
using iterator = Iterator<false>;
using const_iterator = Iterator<true>;
using reverse_iterator = std::reverse_iterator<iterator>;
using const_reverse_iterator = std::reverse_iterator<const_iterator>;
template <bool Const>
class Iterator {
public:
using iterator_category = std::bidirectional_iterator_tag;
using value_type = typename IntrusiveListImpl::value_type;
using difference_type = typename IntrusiveListImpl::difference_type;
using pointer =
std::conditional_t<Const, IntrusiveListImpl::const_pointer, IntrusiveListImpl::pointer>;
using reference = std::conditional_t<Const, IntrusiveListImpl::const_reference,
IntrusiveListImpl::reference>;
private:
pointer m_node;
public:
constexpr explicit Iterator(pointer n) : m_node(n) {}
constexpr bool operator==(const Iterator& rhs) const {
return m_node == rhs.m_node;
}
constexpr pointer operator->() const {
return m_node;
}
constexpr reference operator*() const {
return *m_node;
}
constexpr Iterator& operator++() {
m_node = m_node->m_next;
return *this;
}
constexpr Iterator& operator--() {
m_node = m_node->m_prev;
return *this;
}
constexpr Iterator operator++(int) {
const Iterator it{*this};
++(*this);
return it;
}
constexpr Iterator operator--(int) {
const Iterator it{*this};
--(*this);
return it;
}
constexpr operator Iterator<true>() const {
return Iterator<true>(m_node);
}
constexpr Iterator<false> GetNonConstIterator() const {
return Iterator<false>(const_cast<IntrusiveListImpl::pointer>(m_node));
}
};
public:
constexpr IntrusiveListImpl() : m_root_node() {}
// Iterator accessors.
constexpr iterator begin() {
return iterator(m_root_node.GetNext());
}
constexpr const_iterator begin() const {
return const_iterator(m_root_node.GetNext());
}
constexpr iterator end() {
return iterator(std::addressof(m_root_node));
}
constexpr const_iterator end() const {
return const_iterator(std::addressof(m_root_node));
}
constexpr iterator iterator_to(reference v) {
// Only allow iterator_to for values in lists.
ASSERT(v.IsLinked());
return iterator(std::addressof(v));
}
constexpr const_iterator iterator_to(const_reference v) const {
// Only allow iterator_to for values in lists.
ASSERT(v.IsLinked());
return const_iterator(std::addressof(v));
}
// Content management.
constexpr bool empty() const {
return !m_root_node.IsLinked();
}
constexpr size_type size() const {
return static_cast<size_type>(std::distance(this->begin(), this->end()));
}
constexpr reference back() {
return *m_root_node.GetPrev();
}
constexpr const_reference back() const {
return *m_root_node.GetPrev();
}
constexpr reference front() {
return *m_root_node.GetNext();
}
constexpr const_reference front() const {
return *m_root_node.GetNext();
}
constexpr void push_back(reference node) {
m_root_node.LinkPrev(std::addressof(node));
}
constexpr void push_front(reference node) {
m_root_node.LinkNext(std::addressof(node));
}
constexpr void pop_back() {
m_root_node.GetPrev()->Unlink();
}
constexpr void pop_front() {
m_root_node.GetNext()->Unlink();
}
constexpr iterator insert(const_iterator pos, reference node) {
pos.GetNonConstIterator()->LinkPrev(std::addressof(node));
return iterator(std::addressof(node));
}
constexpr void splice(const_iterator pos, IntrusiveListImpl& o) {
splice_impl(pos, o.begin(), o.end());
}
constexpr void splice(const_iterator pos, IntrusiveListImpl&, const_iterator first) {
const_iterator last(first);
std::advance(last, 1);
splice_impl(pos, first, last);
}
constexpr void splice(const_iterator pos, IntrusiveListImpl&, const_iterator first,
const_iterator last) {
splice_impl(pos, first, last);
}
constexpr iterator erase(const_iterator pos) {
if (pos == this->end()) {
return this->end();
}
iterator it(pos.GetNonConstIterator());
(it++)->Unlink();
return it;
}
constexpr void clear() {
while (!this->empty()) {
this->pop_front();
}
}
private:
constexpr void splice_impl(const_iterator _pos, const_iterator _first, const_iterator _last) {
if (_first == _last) {
return;
}
iterator pos(_pos.GetNonConstIterator());
iterator first(_first.GetNonConstIterator());
iterator last(_last.GetNonConstIterator());
first->Unlink(std::addressof(*last));
pos->SplicePrev(std::addressof(*first), std::addressof(*first));
}
};
} // namespace impl
template <class T, class Traits>
class IntrusiveList {
CITRA_NON_COPYABLE(IntrusiveList);
private:
impl::IntrusiveListImpl m_impl;
public:
template <bool Const>
class Iterator;
using value_type = T;
using size_type = size_t;
using difference_type = ptrdiff_t;
using pointer = value_type*;
using const_pointer = const value_type*;
using reference = value_type&;
using const_reference = const value_type&;
using iterator = Iterator<false>;
using const_iterator = Iterator<true>;
using reverse_iterator = std::reverse_iterator<iterator>;
using const_reverse_iterator = std::reverse_iterator<const_iterator>;
template <bool Const>
class Iterator {
public:
friend class Common::IntrusiveList<T, Traits>;
using ImplIterator =
std::conditional_t<Const, Common::impl::IntrusiveListImpl::const_iterator,
Common::impl::IntrusiveListImpl::iterator>;
using iterator_category = std::bidirectional_iterator_tag;
using value_type = typename IntrusiveList::value_type;
using difference_type = typename IntrusiveList::difference_type;
using pointer =
std::conditional_t<Const, IntrusiveList::const_pointer, IntrusiveList::pointer>;
using reference =
std::conditional_t<Const, IntrusiveList::const_reference, IntrusiveList::reference>;
private:
ImplIterator m_iterator;
private:
constexpr explicit Iterator(ImplIterator it) : m_iterator(it) {}
constexpr ImplIterator GetImplIterator() const {
return m_iterator;
}
public:
constexpr bool operator==(const Iterator& rhs) const {
return m_iterator == rhs.m_iterator;
}
constexpr pointer operator->() const {
return std::addressof(Traits::GetParent(*m_iterator));
}
constexpr reference operator*() const {
return Traits::GetParent(*m_iterator);
}
constexpr Iterator& operator++() {
++m_iterator;
return *this;
}
constexpr Iterator& operator--() {
--m_iterator;
return *this;
}
constexpr Iterator operator++(int) {
const Iterator it{*this};
++m_iterator;
return it;
}
constexpr Iterator operator--(int) {
const Iterator it{*this};
--m_iterator;
return it;
}
constexpr operator Iterator<true>() const {
return Iterator<true>(m_iterator);
}
};
private:
static constexpr IntrusiveListNode& GetNode(reference ref) {
return Traits::GetNode(ref);
}
static constexpr IntrusiveListNode const& GetNode(const_reference ref) {
return Traits::GetNode(ref);
}
static constexpr reference GetParent(IntrusiveListNode& node) {
return Traits::GetParent(node);
}
static constexpr const_reference GetParent(IntrusiveListNode const& node) {
return Traits::GetParent(node);
}
public:
constexpr IntrusiveList() : m_impl() {}
// Iterator accessors.
constexpr iterator begin() {
return iterator(m_impl.begin());
}
constexpr const_iterator begin() const {
return const_iterator(m_impl.begin());
}
constexpr iterator end() {
return iterator(m_impl.end());
}
constexpr const_iterator end() const {
return const_iterator(m_impl.end());
}
constexpr const_iterator cbegin() const {
return this->begin();
}
constexpr const_iterator cend() const {
return this->end();
}
constexpr reverse_iterator rbegin() {
return reverse_iterator(this->end());
}
constexpr const_reverse_iterator rbegin() const {
return const_reverse_iterator(this->end());
}
constexpr reverse_iterator rend() {
return reverse_iterator(this->begin());
}
constexpr const_reverse_iterator rend() const {
return const_reverse_iterator(this->begin());
}
constexpr const_reverse_iterator crbegin() const {
return this->rbegin();
}
constexpr const_reverse_iterator crend() const {
return this->rend();
}
constexpr iterator iterator_to(reference v) {
return iterator(m_impl.iterator_to(GetNode(v)));
}
constexpr const_iterator iterator_to(const_reference v) const {
return const_iterator(m_impl.iterator_to(GetNode(v)));
}
// Content management.
constexpr bool empty() const {
return m_impl.empty();
}
constexpr size_type size() const {
return m_impl.size();
}
constexpr reference back() {
return GetParent(m_impl.back());
}
constexpr const_reference back() const {
return GetParent(m_impl.back());
}
constexpr reference front() {
return GetParent(m_impl.front());
}
constexpr const_reference front() const {
return GetParent(m_impl.front());
}
constexpr void push_back(reference ref) {
m_impl.push_back(GetNode(ref));
}
constexpr void push_front(reference ref) {
m_impl.push_front(GetNode(ref));
}
constexpr void pop_back() {
m_impl.pop_back();
}
constexpr void pop_front() {
m_impl.pop_front();
}
constexpr iterator insert(const_iterator pos, reference ref) {
return iterator(m_impl.insert(pos.GetImplIterator(), GetNode(ref)));
}
constexpr void splice(const_iterator pos, IntrusiveList& o) {
m_impl.splice(pos.GetImplIterator(), o.m_impl);
}
constexpr void splice(const_iterator pos, IntrusiveList& o, const_iterator first) {
m_impl.splice(pos.GetImplIterator(), o.m_impl, first.GetImplIterator());
}
constexpr void splice(const_iterator pos, IntrusiveList& o, const_iterator first,
const_iterator last) {
m_impl.splice(pos.GetImplIterator(), o.m_impl, first.GetImplIterator(),
last.GetImplIterator());
}
constexpr iterator erase(const_iterator pos) {
return iterator(m_impl.erase(pos.GetImplIterator()));
}
constexpr void clear() {
m_impl.clear();
}
};
template <auto T, class Derived = Common::impl::GetParentType<T>>
class IntrusiveListMemberTraits;
template <class Parent, IntrusiveListNode Parent::*Member, class Derived>
class IntrusiveListMemberTraits<Member, Derived> {
public:
using ListType = IntrusiveList<Derived, IntrusiveListMemberTraits>;
private:
friend class IntrusiveList<Derived, IntrusiveListMemberTraits>;
static constexpr IntrusiveListNode& GetNode(Derived& parent) {
return parent.*Member;
}
static constexpr IntrusiveListNode const& GetNode(Derived const& parent) {
return parent.*Member;
}
static Derived& GetParent(IntrusiveListNode& node) {
return Common::GetParentReference<Member, Derived>(std::addressof(node));
}
static Derived const& GetParent(IntrusiveListNode const& node) {
return Common::GetParentReference<Member, Derived>(std::addressof(node));
}
};
template <auto T, class Derived = Common::impl::GetParentType<T>>
class IntrusiveListMemberTraitsByNonConstexprOffsetOf;
template <class Parent, IntrusiveListNode Parent::*Member, class Derived>
class IntrusiveListMemberTraitsByNonConstexprOffsetOf<Member, Derived> {
public:
using ListType = IntrusiveList<Derived, IntrusiveListMemberTraitsByNonConstexprOffsetOf>;
private:
friend class IntrusiveList<Derived, IntrusiveListMemberTraitsByNonConstexprOffsetOf>;
static constexpr IntrusiveListNode& GetNode(Derived& parent) {
return parent.*Member;
}
static constexpr IntrusiveListNode const& GetNode(Derived const& parent) {
return parent.*Member;
}
static Derived& GetParent(IntrusiveListNode& node) {
return *reinterpret_cast<Derived*>(reinterpret_cast<char*>(std::addressof(node)) -
GetOffset());
}
static Derived const& GetParent(IntrusiveListNode const& node) {
return *reinterpret_cast<const Derived*>(
reinterpret_cast<const char*>(std::addressof(node)) - GetOffset());
}
static uintptr_t GetOffset() {
return reinterpret_cast<uintptr_t>(std::addressof(reinterpret_cast<Derived*>(0)->*Member));
}
};
template <class Derived>
class IntrusiveListBaseNode : public IntrusiveListNode {};
template <class Derived>
class IntrusiveListBaseTraits {
public:
using ListType = IntrusiveList<Derived, IntrusiveListBaseTraits>;
private:
friend class IntrusiveList<Derived, IntrusiveListBaseTraits>;
static constexpr IntrusiveListNode& GetNode(Derived& parent) {
return static_cast<IntrusiveListNode&>(
static_cast<IntrusiveListBaseNode<Derived>&>(parent));
}
static constexpr IntrusiveListNode const& GetNode(Derived const& parent) {
return static_cast<const IntrusiveListNode&>(
static_cast<const IntrusiveListBaseNode<Derived>&>(parent));
}
static constexpr Derived& GetParent(IntrusiveListNode& node) {
return static_cast<Derived&>(static_cast<IntrusiveListBaseNode<Derived>&>(node));
}
static constexpr Derived const& GetParent(IntrusiveListNode const& node) {
return static_cast<const Derived&>(
static_cast<const IntrusiveListBaseNode<Derived>&>(node));
}
};
} // namespace Common

View File

@ -0,0 +1,190 @@
// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <type_traits>
#include "common/assert.h"
namespace Common {
namespace detail {
template <typename T, size_t Size, size_t Align>
struct TypedStorageImpl {
alignas(Align) u8 storage_[Size];
};
} // namespace detail
template <typename T>
using TypedStorage = detail::TypedStorageImpl<T, sizeof(T), alignof(T)>;
template <typename T>
static constexpr T* GetPointer(TypedStorage<T>& ts) {
return static_cast<T*>(static_cast<void*>(std::addressof(ts.storage_)));
}
template <typename T>
static constexpr const T* GetPointer(const TypedStorage<T>& ts) {
return static_cast<const T*>(static_cast<const void*>(std::addressof(ts.storage_)));
}
namespace impl {
template <size_t MaxDepth>
struct OffsetOfUnionHolder {
template <typename ParentType, typename MemberType, size_t Offset>
union UnionImpl {
using PaddingMember = char;
static constexpr size_t GetOffset() {
return Offset;
}
#pragma pack(push, 1)
struct {
PaddingMember padding[Offset];
MemberType members[(sizeof(ParentType) / sizeof(MemberType)) + 1];
} data;
#pragma pack(pop)
UnionImpl<ParentType, MemberType, Offset + 1> next_union;
};
template <typename ParentType, typename MemberType>
union UnionImpl<ParentType, MemberType, 0> {
static constexpr size_t GetOffset() {
return 0;
}
struct {
MemberType members[(sizeof(ParentType) / sizeof(MemberType)) + 1];
} data;
UnionImpl<ParentType, MemberType, 1> next_union;
};
template <typename ParentType, typename MemberType>
union UnionImpl<ParentType, MemberType, MaxDepth> {};
};
template <typename ParentType, typename MemberType>
struct OffsetOfCalculator {
using UnionHolder =
typename OffsetOfUnionHolder<sizeof(MemberType)>::template UnionImpl<ParentType, MemberType,
0>;
union Union {
char c{};
UnionHolder first_union;
TypedStorage<ParentType> parent;
constexpr Union() : c() {}
};
static constexpr Union U = {};
static constexpr const MemberType* GetNextAddress(const MemberType* start,
const MemberType* target) {
while (start < target) {
start++;
}
return start;
}
static constexpr std::ptrdiff_t GetDifference(const MemberType* start,
const MemberType* target) {
return (target - start) * sizeof(MemberType);
}
template <typename CurUnion>
static constexpr std::ptrdiff_t OffsetOfImpl(MemberType ParentType::*member,
CurUnion& cur_union) {
constexpr size_t Offset = CurUnion::GetOffset();
const auto target = std::addressof(GetPointer(U.parent)->*member);
const auto start = std::addressof(cur_union.data.members[0]);
const auto next = GetNextAddress(start, target);
if (next != target) {
if constexpr (Offset < sizeof(MemberType) - 1) {
return OffsetOfImpl(member, cur_union.next_union);
} else {
UNREACHABLE();
}
}
return static_cast<ptrdiff_t>(static_cast<size_t>(next - start) * sizeof(MemberType) +
Offset);
}
static constexpr std::ptrdiff_t OffsetOf(MemberType ParentType::*member) {
return OffsetOfImpl(member, U.first_union);
}
};
template <typename T>
struct GetMemberPointerTraits;
template <typename P, typename M>
struct GetMemberPointerTraits<M P::*> {
using Parent = P;
using Member = M;
};
template <auto MemberPtr>
using GetParentType = typename GetMemberPointerTraits<decltype(MemberPtr)>::Parent;
template <auto MemberPtr>
using GetMemberType = typename GetMemberPointerTraits<decltype(MemberPtr)>::Member;
template <auto MemberPtr, typename RealParentType = GetParentType<MemberPtr>>
constexpr std::ptrdiff_t OffsetOf() {
using DeducedParentType = GetParentType<MemberPtr>;
using MemberType = GetMemberType<MemberPtr>;
static_assert(std::is_base_of<DeducedParentType, RealParentType>::value ||
std::is_same<RealParentType, DeducedParentType>::value);
return OffsetOfCalculator<RealParentType, MemberType>::OffsetOf(MemberPtr);
};
} // namespace impl
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
constexpr RealParentType& GetParentReference(impl::GetMemberType<MemberPtr>* member) {
std::ptrdiff_t Offset = impl::OffsetOf<MemberPtr, RealParentType>();
return *static_cast<RealParentType*>(
static_cast<void*>(static_cast<uint8_t*>(static_cast<void*>(member)) - Offset));
}
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
constexpr RealParentType const& GetParentReference(impl::GetMemberType<MemberPtr> const* member) {
std::ptrdiff_t Offset = impl::OffsetOf<MemberPtr, RealParentType>();
return *static_cast<const RealParentType*>(static_cast<const void*>(
static_cast<const uint8_t*>(static_cast<const void*>(member)) - Offset));
}
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
constexpr RealParentType* GetParentPointer(impl::GetMemberType<MemberPtr>* member) {
return std::addressof(GetParentReference<MemberPtr, RealParentType>(member));
}
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
constexpr RealParentType const* GetParentPointer(impl::GetMemberType<MemberPtr> const* member) {
return std::addressof(GetParentReference<MemberPtr, RealParentType>(member));
}
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
constexpr RealParentType& GetParentReference(impl::GetMemberType<MemberPtr>& member) {
return GetParentReference<MemberPtr, RealParentType>(std::addressof(member));
}
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
constexpr RealParentType const& GetParentReference(impl::GetMemberType<MemberPtr> const& member) {
return GetParentReference<MemberPtr, RealParentType>(std::addressof(member));
}
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
constexpr RealParentType* GetParentPointer(impl::GetMemberType<MemberPtr>& member) {
return std::addressof(GetParentReference<MemberPtr, RealParentType>(member));
}
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
constexpr RealParentType const* GetParentPointer(impl::GetMemberType<MemberPtr> const& member) {
return std::addressof(GetParentReference<MemberPtr, RealParentType>(member));
}
} // namespace Common

View File

@ -128,60 +128,70 @@ add_library(citra_core STATIC
hle/applets/swkbd.h
hle/ipc.h
hle/ipc_helpers.h
hle/kernel/address_arbiter.cpp
hle/kernel/address_arbiter.h
hle/kernel/client_port.cpp
hle/kernel/client_port.h
hle/kernel/client_session.cpp
hle/kernel/client_session.h
hle/kernel/config_mem.cpp
hle/kernel/config_mem.h
hle/kernel/errors.h
hle/kernel/event.cpp
hle/kernel/event.h
hle/kernel/handle_table.cpp
hle/kernel/handle_table.h
hle/kernel/hle_ipc.cpp
hle/kernel/hle_ipc.h
hle/kernel/ipc.cpp
hle/kernel/ipc.h
hle/kernel/ipc_debugger/recorder.cpp
hle/kernel/ipc_debugger/recorder.h
hle/kernel/k_address_arbiter.cpp
hle/kernel/k_address_arbiter.h
hle/kernel/k_auto_object.cpp
hle/kernel/k_auto_object.h
hle/kernel/k_auto_object_container.cpp
hle/kernel/k_auto_object_container.h
hle/kernel/k_client_port.cpp
hle/kernel/k_client_port.h
hle/kernel/k_client_session.cpp
hle/kernel/k_client_session.h
hle/kernel/k_code_set.h
hle/kernel/k_event.cpp
hle/kernel/k_event.h
hle/kernel/k_handle_table.cpp
hle/kernel/k_handle_table.h
hle/kernel/k_linked_list.h
hle/kernel/k_mutex.cpp
hle/kernel/k_mutex.h
hle/kernel/k_object_name.cpp
hle/kernel/k_object_name.h
hle/kernel/k_port.cpp
hle/kernel/k_port.h
hle/kernel/k_process.cpp
hle/kernel/k_process.h
hle/kernel/k_resource_limit.cpp
hle/kernel/k_resource_limit.h
hle/kernel/k_scoped_resource_reservation.h
hle/kernel/k_semaphore.cpp
hle/kernel/k_semaphore.h
hle/kernel/k_server_port.cpp
hle/kernel/k_server_port.h
hle/kernel/k_server_session.cpp
hle/kernel/k_server_session.h
hle/kernel/k_session.cpp
hle/kernel/k_session.h
hle/kernel/k_shared_memory.cpp
hle/kernel/k_shared_memory.h
hle/kernel/k_slab_heap.h
hle/kernel/k_synchronization_object.cpp
hle/kernel/k_synchronization_object.h
hle/kernel/k_thread.cpp
hle/kernel/k_thread.h
hle/kernel/k_timer.cpp
hle/kernel/k_timer.h
hle/kernel/kernel.cpp
hle/kernel/kernel.h
hle/kernel/memory.cpp
hle/kernel/memory.h
hle/kernel/mutex.cpp
hle/kernel/mutex.h
hle/kernel/object.cpp
hle/kernel/object.h
hle/kernel/process.cpp
hle/kernel/process.h
hle/kernel/resource_limit.cpp
hle/kernel/resource_limit.h
hle/kernel/semaphore.cpp
hle/kernel/semaphore.h
hle/kernel/server_port.cpp
hle/kernel/server_port.h
hle/kernel/server_session.cpp
hle/kernel/server_session.h
hle/kernel/session.h
hle/kernel/session.cpp
hle/kernel/shared_memory.cpp
hle/kernel/shared_memory.h
hle/kernel/shared_page.cpp
hle/kernel/shared_page.h
hle/kernel/svc.cpp
hle/kernel/svc.h
hle/kernel/svc_wrapper.h
hle/kernel/thread.cpp
hle/kernel/thread.h
hle/kernel/timer.cpp
hle/kernel/timer.h
hle/kernel/vm_manager.cpp
hle/kernel/vm_manager.h
hle/kernel/wait_object.cpp
hle/kernel/wait_object.h
hle/mii.h
hle/mii.cpp
hle/result.h
@ -323,14 +333,12 @@ add_library(citra_core STATIC
hle/service/ir/ir_u.h
hle/service/ir/ir_user.cpp
hle/service/ir/ir_user.h
hle/service/kernel_helpers.cpp
hle/service/kernel_helpers.h
hle/service/ldr_ro/cro_helper.cpp
hle/service/ldr_ro/cro_helper.h
hle/service/ldr_ro/ldr_ro.cpp
hle/service/ldr_ro/ldr_ro.h
hle/service/mcu/mcu_hwc.cpp
hle/service/mcu/mcu_hwc.h
hle/service/mcu/mcu.cpp
hle/service/mcu/mcu.h
hle/service/mic/mic_u.cpp
hle/service/mic/mic_u.h
hle/service/mvd/mvd.cpp

View File

@ -292,8 +292,8 @@ void ARM_Dynarmic::SetPageTable(const std::shared_ptr<Memory::PageTable>& page_t
}
void ARM_Dynarmic::ServeBreak() {
Kernel::Thread* thread = system.Kernel().GetCurrentThreadManager().GetCurrentThread();
SaveContext(thread->context);
Kernel::KThread* thread = system.Kernel().GetCurrentThreadManager().GetCurrentThread();
SaveContext(thread->GetContext());
GDBStub::Break();
GDBStub::SendTrap(thread, 5);
}

View File

@ -609,8 +609,8 @@ void ARMul_State::ServeBreak() {
DEBUG_ASSERT(Reg[15] == last_bkpt.address);
}
Kernel::Thread* thread = system.Kernel().GetCurrentThreadManager().GetCurrentThread();
system.GetRunningCore().SaveContext(thread->context);
Kernel::KThread* thread = system.Kernel().GetCurrentThreadManager().GetCurrentThread();
system.GetRunningCore().SaveContext(thread->GetContext());
if (last_bkpt_hit || GDBStub::IsMemoryBreak() || GDBStub::GetCpuStepFlag()) {
last_bkpt_hit = false;

View File

@ -27,9 +27,9 @@
#include "core/frontend/image_interface.h"
#include "core/gdbstub/gdbstub.h"
#include "core/global.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/thread.h"
#include "core/hle/service/apt/applet_manager.h"
#include "core/hle/service/apt/apt.h"
#include "core/hle/service/cam/cam.h"
@ -83,9 +83,9 @@ System::ResultStatus System::RunLoop(bool tight_loop) {
}
if (GDBStub::IsServerEnabled()) {
Kernel::Thread* thread = kernel->GetCurrentThreadManager().GetCurrentThread();
Kernel::KThread* thread = kernel->GetCurrentThreadManager().GetCurrentThread();
if (thread && running_core) {
running_core->SaveContext(thread->context);
running_core->SaveContext(thread->GetContext());
}
GDBStub::HandlePacket(*this);
@ -311,8 +311,8 @@ System::ResultStatus System::Load(Frontend::EmuWindow& emu_window, const std::st
}
telemetry_session->AddInitialInfo(*app_loader);
std::shared_ptr<Kernel::Process> process;
const Loader::ResultStatus load_result{app_loader->Load(process)};
Kernel::Process* process;
const Loader::ResultStatus load_result{app_loader->Load(std::addressof(process))};
if (Loader::ResultStatus::Success != load_result) {
LOG_CRITICAL(Core, "Failed to load ROM (Error {})!", load_result);
System::Shutdown();

View File

@ -7,7 +7,7 @@
#include "common/archives.h"
#include "core/file_sys/archive_other_savedata.h"
#include "core/file_sys/errors.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/service/fs/archive.h"
SERIALIZE_EXPORT_IMPL(FileSys::ArchiveFactory_OtherSaveDataPermitted)

View File

@ -6,7 +6,7 @@
#include "common/archives.h"
#include "core/core.h"
#include "core/file_sys/archive_savedata.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/k_process.h"
SERIALIZE_EXPORT_IMPL(FileSys::ArchiveFactory_SaveData)

View File

@ -11,7 +11,7 @@
#include "core/file_sys/archive_selfncch.h"
#include "core/file_sys/errors.h"
#include "core/file_sys/ivfc_archive.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/k_process.h"
SERIALIZE_EXPORT_IMPL(FileSys::ArchiveFactory_SelfNCCH)

View File

@ -121,7 +121,7 @@ Loader::ResultStatus FileSys::Plugin3GXLoader::Load(
if (!compatible_TID.empty() &&
std::find(compatible_TID.begin(), compatible_TID.end(),
static_cast<u32>(process.codeset->program_id)) == compatible_TID.end()) {
static_cast<u32>(process.codeset.program_id)) == compatible_TID.end()) {
LOG_ERROR(Service_PLGLDR,
"Failed to load 3GX plugin. Not compatible with loaded process: {}",
plg_context.plugin_path);
@ -291,7 +291,7 @@ void FileSys::Plugin3GXLoader::MapBootloader(Kernel::Process& process, Kernel::K
u32 exe_checksum, bool no_flash) {
u32_le game_instructions[2];
kernel.memory.ReadBlock(process, process.codeset->CodeSegment().addr, game_instructions,
kernel.memory.ReadBlock(process, process.codeset.CodeSegment().addr, game_instructions,
sizeof(u32) * 2);
std::array<u32_le, g_plugin_loader_bootloader.size() / sizeof(u32)> bootloader;
@ -307,7 +307,7 @@ void FileSys::Plugin3GXLoader::MapBootloader(Kernel::Process& process, Kernel::K
*it = game_instructions[1];
} break;
case 0xDEAD0002: {
*it = process.codeset->CodeSegment().addr;
*it = process.codeset.CodeSegment().addr;
} break;
case 0xDEAD0003: {
for (u32 i = 0;
@ -361,6 +361,6 @@ void FileSys::Plugin3GXLoader::MapBootloader(Kernel::Process& process, Kernel::K
game_instructions[0] = 0xE51FF004; // ldr pc, [pc, #-4]
game_instructions[1] = _3GX_exe_load_addr - bootloader_memory_size;
kernel.memory.WriteBlock(process, process.codeset->CodeSegment().addr, game_instructions,
kernel.memory.WriteBlock(process, process.codeset.CodeSegment().addr, game_instructions,
sizeof(u32) * 2);
}

View File

@ -25,7 +25,7 @@
#include "common/common_types.h"
#include "common/swap.h"
#include "core/file_sys/archive_backend.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/service/plgldr/plgldr.h"
namespace Loader {

View File

@ -35,7 +35,7 @@
#include "core/core.h"
#include "core/gdbstub/gdbstub.h"
#include "core/gdbstub/hio.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/k_process.h"
#include "core/memory.h"
namespace GDBStub {
@ -128,7 +128,7 @@ u32 command_length;
u32 latest_signal = 0;
bool memory_break = false;
static Kernel::Thread* current_thread = nullptr;
static Kernel::KThread* current_thread = nullptr;
// Binding to a port within the reserved ports range (0-1023) requires root permissions,
// so default to a port outside of that range.
@ -159,72 +159,76 @@ BreakpointMap breakpoints_read;
BreakpointMap breakpoints_write;
} // Anonymous namespace
static Kernel::Thread* FindThreadById(int id) {
static Kernel::KThread* FindThreadById(int id) {
u32 num_cores = Core::GetNumCores();
for (u32 i = 0; i < num_cores; ++i) {
const auto& threads =
Core::System::GetInstance().Kernel().GetThreadManager(i).GetThreadList();
for (auto& thread : threads) {
if (thread->GetThreadId() == static_cast<u32>(id)) {
return thread.get();
return thread;
}
}
}
return nullptr;
}
static u32 RegRead(std::size_t id, Kernel::Thread* thread = nullptr) {
static u32 RegRead(std::size_t id, Kernel::KThread* thread = nullptr) {
if (!thread) {
return 0;
}
const auto& context = thread->GetContext();
if (id <= PC_REGISTER) {
return thread->context.cpu_registers[id];
return context.cpu_registers[id];
} else if (id == CPSR_REGISTER) {
return thread->context.cpsr;
return context.cpsr;
} else {
return 0;
}
}
static void RegWrite(std::size_t id, u32 val, Kernel::Thread* thread = nullptr) {
static void RegWrite(std::size_t id, u32 val, Kernel::KThread* thread = nullptr) {
if (!thread) {
return;
}
auto& context = thread->GetContext();
if (id <= PC_REGISTER) {
thread->context.cpu_registers[id] = val;
context.cpu_registers[id] = val;
} else if (id == CPSR_REGISTER) {
thread->context.cpsr = val;
context.cpsr = val;
}
}
static u64 FpuRead(std::size_t id, Kernel::Thread* thread = nullptr) {
static u64 FpuRead(std::size_t id, Kernel::KThread* thread = nullptr) {
if (!thread) {
return 0;
}
const auto& context = thread->GetContext();
if (id >= D0_REGISTER && id < FPSCR_REGISTER) {
u64 ret = thread->context.fpu_registers[2 * (id - D0_REGISTER)];
ret |= static_cast<u64>(thread->context.fpu_registers[2 * (id - D0_REGISTER) + 1]) << 32;
u64 ret = context.fpu_registers[2 * (id - D0_REGISTER)];
ret |= static_cast<u64>(context.fpu_registers[2 * (id - D0_REGISTER) + 1]) << 32;
return ret;
} else if (id == FPSCR_REGISTER) {
return thread->context.fpscr;
return context.fpscr;
} else {
return 0;
}
}
static void FpuWrite(std::size_t id, u64 val, Kernel::Thread* thread = nullptr) {
static void FpuWrite(std::size_t id, u64 val, Kernel::KThread* thread = nullptr) {
if (!thread) {
return;
}
auto& context = thread->GetContext();
if (id >= D0_REGISTER && id < FPSCR_REGISTER) {
thread->context.fpu_registers[2 * (id - D0_REGISTER)] = static_cast<u32>(val);
thread->context.fpu_registers[2 * (id - D0_REGISTER) + 1] = static_cast<u32>(val >> 32);
context.fpu_registers[2 * (id - D0_REGISTER)] = static_cast<u32>(val);
context.fpu_registers[2 * (id - D0_REGISTER) + 1] = static_cast<u32>(val >> 32);
} else if (id == FPSCR_REGISTER) {
thread->context.fpscr = static_cast<u32>(val);
context.fpscr = static_cast<u32>(val);
}
}
@ -606,7 +610,7 @@ static void HandleThreadAlive() {
*
* @param signal Signal to be sent to client.
*/
static void SendSignal(Kernel::Thread* thread, u32 signal, bool full = true) {
static void SendSignal(Kernel::KThread* thread, u32 signal, bool full = true) {
if (gdbserver_socket == -1) {
return;
}
@ -785,7 +789,7 @@ static void WriteRegister() {
return SendReply("E01");
}
Core::GetRunningCore().LoadContext(current_thread->context);
Core::GetRunningCore().LoadContext(current_thread->GetContext());
SendReply("OK");
}
@ -815,7 +819,7 @@ static void WriteRegisters() {
}
}
Core::GetRunningCore().LoadContext(current_thread->context);
Core::GetRunningCore().LoadContext(current_thread->GetContext());
SendReply("OK");
}
@ -890,7 +894,7 @@ void Break(bool is_memory_break) {
static void Step() {
if (command_length > 1) {
RegWrite(PC_REGISTER, GdbHexToInt(command_buffer + 1), current_thread);
Core::GetRunningCore().LoadContext(current_thread->context);
Core::GetRunningCore().LoadContext(current_thread->GetContext());
}
step_loop = true;
halt_loop = true;
@ -1266,7 +1270,7 @@ void SetCpuStepFlag(bool is_step) {
step_loop = is_step;
}
void SendTrap(Kernel::Thread* thread, int trap) {
void SendTrap(Kernel::KThread* thread, int trap) {
if (!send_trap) {
return;
}

View File

@ -8,7 +8,7 @@
#include <span>
#include "common/common_types.h"
#include "core/hle/kernel/thread.h"
#include "core/hle/kernel/k_thread.h"
namespace Core {
class System;
@ -118,7 +118,7 @@ void SetCpuStepFlag(bool is_step);
* @param thread Sending thread.
* @param trap Trap no.
*/
void SendTrap(Kernel::Thread* thread, int trap);
void SendTrap(Kernel::KThread* thread, int trap);
/**
* Send reply to gdb client.

View File

@ -47,10 +47,10 @@ void Applet::SendParameter(const Service::APT::MessageParameter& parameter) {
}
}
void Applet::CloseApplet(std::shared_ptr<Kernel::Object> object, const std::vector<u8>& buffer) {
void Applet::CloseApplet(Kernel::KAutoObject* object, const std::vector<u8>& buffer) {
if (auto locked = manager.lock()) {
locked->PrepareToCloseLibraryApplet(true, false, false);
locked->CloseLibraryApplet(std::move(object), buffer);
locked->CloseLibraryApplet(object, buffer);
} else {
LOG_ERROR(Service_APT, "called after destructing applet manager");
}

View File

@ -8,6 +8,10 @@
#include "core/hle/result.h"
#include "core/hle/service/apt/applet_manager.h"
namespace Core {
class System;
}
namespace HLE::Applets {
class Applet {
@ -39,7 +43,8 @@ public:
protected:
Applet(Core::System& system, Service::APT::AppletId id, Service::APT::AppletId parent,
bool preload, std::weak_ptr<Service::APT::AppletManager> manager)
: system(system), id(id), parent(parent), preload(preload), manager(std::move(manager)) {}
: system(system), id(id), parent(parent), preload(preload), service_context(system),
manager(std::move(manager)) {}
/**
* Handles a parameter from the application.
@ -62,11 +67,11 @@ protected:
virtual Result Finalize() = 0;
Core::System& system;
Service::APT::AppletId id; ///< Id of this Applet
Service::APT::AppletId parent; ///< Id of this Applet's parent
bool preload; ///< Whether the Applet is being preloaded.
std::shared_ptr<std::vector<u8>> heap_memory; ///< Heap memory for this Applet
Service::KernelHelpers::ServiceContext service_context;
/// Whether this applet is running.
bool is_running = true;
@ -75,7 +80,7 @@ protected:
bool is_active = false;
void SendParameter(const Service::APT::MessageParameter& parameter);
void CloseApplet(std::shared_ptr<Kernel::Object> object, const std::vector<u8>& buffer);
void CloseApplet(Kernel::KAutoObject* object, const std::vector<u8>& buffer);
private:
std::weak_ptr<Service::APT::AppletManager> manager;

View File

@ -5,6 +5,7 @@
#include "common/string_util.h"
#include "core/core.h"
#include "core/hle/applets/erreula.h"
#include "core/hle/kernel/k_shared_memory.h"
#include "core/hle/service/apt/apt.h"
namespace HLE::Applets {
@ -28,7 +29,7 @@ Result ErrEula::ReceiveParameterImpl(const Service::APT::MessageParameter& param
// TODO: allocated memory never released
using Kernel::MemoryPermission;
// Create a SharedMemory that directly points to this heap block.
framebuffer_memory = system.Kernel().CreateSharedMemoryForApplet(
framebuffer_memory = service_context.CreateSharedMemoryForApplet(
0, capture_info.size, MemoryPermission::ReadWrite, MemoryPermission::ReadWrite,
"ErrEula Memory");

View File

@ -5,7 +5,10 @@
#pragma once
#include "core/hle/applets/applet.h"
#include "core/hle/kernel/shared_memory.h"
namespace Kernel {
class KSharedMemory;
}
namespace HLE::Applets {
@ -24,7 +27,7 @@ private:
/// This SharedMemory will be created when we receive the LibAppJustStarted message.
/// It holds the framebuffer info retrieved by the application with
/// GSPGPU::ImportDisplayCaptureInfo
std::shared_ptr<Kernel::SharedMemory> framebuffer_memory;
Kernel::KSharedMemory* framebuffer_memory;
/// Parameter received by the applet on start.
std::vector<u8> startup_param;

View File

@ -11,8 +11,8 @@
#include "core/core.h"
#include "core/frontend/applets/mii_selector.h"
#include "core/hle/applets/mii_selector.h"
#include "core/hle/kernel/k_shared_memory.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/shared_memory.h"
#include "core/hle/result.h"
namespace HLE::Applets {
@ -35,7 +35,7 @@ Result MiiSelector::ReceiveParameterImpl(const Service::APT::MessageParameter& p
using Kernel::MemoryPermission;
// Create a SharedMemory that directly points to this heap block.
framebuffer_memory = system.Kernel().CreateSharedMemoryForApplet(
framebuffer_memory = service_context.CreateSharedMemoryForApplet(
0, capture_info.size, MemoryPermission::ReadWrite, MemoryPermission::ReadWrite,
"MiiSelector Memory");

View File

@ -8,7 +8,6 @@
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "core/hle/applets/applet.h"
#include "core/hle/kernel/shared_memory.h"
#include "core/hle/mii.h"
#include "core/hle/result.h"
#include "core/hle/service/apt/apt.h"
@ -18,6 +17,10 @@ class MiiSelector;
struct MiiSelectorConfig;
} // namespace Frontend
namespace Kernel {
class KSharedMemory;
}
namespace HLE::Applets {
struct MiiConfig {
@ -79,7 +82,7 @@ private:
/// This SharedMemory will be created when we receive the LibAppJustStarted message.
/// It holds the framebuffer info retrieved by the application with
/// GSPGPU::ImportDisplayCaptureInfo
std::shared_ptr<Kernel::SharedMemory> framebuffer_memory;
Kernel::KSharedMemory* framebuffer_memory;
MiiConfig config;

View File

@ -2,10 +2,8 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "common/string_util.h"
#include "core/core.h"
#include "core/hle/applets/mint.h"
#include "core/hle/service/apt/apt.h"
#include "core/hle/kernel/k_shared_memory.h"
namespace HLE::Applets {
@ -28,7 +26,7 @@ Result Mint::ReceiveParameterImpl(const Service::APT::MessageParameter& paramete
// TODO: allocated memory never released
using Kernel::MemoryPermission;
// Create a SharedMemory that directly points to this heap block.
framebuffer_memory = system.Kernel().CreateSharedMemoryForApplet(
framebuffer_memory = service_context.CreateSharedMemoryForApplet(
0, capture_info.size, MemoryPermission::ReadWrite, MemoryPermission::ReadWrite,
"Mint Memory");

View File

@ -5,7 +5,10 @@
#pragma once
#include "core/hle/applets/applet.h"
#include "core/hle/kernel/shared_memory.h"
namespace Kernel {
class KSharedMemory;
}
namespace HLE::Applets {
@ -24,7 +27,7 @@ private:
/// This SharedMemory will be created when we receive the Request message.
/// It holds the framebuffer info retrieved by the application with
/// GSPGPU::ImportDisplayCaptureInfo
std::shared_ptr<Kernel::SharedMemory> framebuffer_memory;
Kernel::KSharedMemory* framebuffer_memory;
/// Parameter received by the applet on start.
std::vector<u8> startup_param;

View File

@ -10,12 +10,9 @@
#include "common/string_util.h"
#include "core/core.h"
#include "core/hle/applets/swkbd.h"
#include "core/hle/kernel/k_shared_memory.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/shared_memory.h"
#include "core/hle/result.h"
#include "core/hle/service/gsp/gsp.h"
#include "core/hle/service/hid/hid.h"
#include "core/memory.h"
namespace HLE::Applets {
@ -32,7 +29,7 @@ Result SoftwareKeyboard::ReceiveParameterImpl(Service::APT::MessageParameter con
using Kernel::MemoryPermission;
// Create a SharedMemory that directly points to this heap block.
framebuffer_memory = system.Kernel().CreateSharedMemoryForApplet(
framebuffer_memory = service_context.CreateSharedMemoryForApplet(
0, capture_info.size, MemoryPermission::ReadWrite, MemoryPermission::ReadWrite,
"SoftwareKeyboard Memory");
@ -94,7 +91,7 @@ Result SoftwareKeyboard::Start(Service::APT::MessageParameter const& parameter)
"The size of the parameter (SoftwareKeyboardConfig) is wrong");
std::memcpy(&config, parameter.buffer.data(), parameter.buffer.size());
text_memory = std::static_pointer_cast<Kernel::SharedMemory, Kernel::Object>(parameter.object);
text_memory = parameter.object->DynamicCast<Kernel::KSharedMemory*>();
DrawScreenKeyboard();

View File

@ -9,7 +9,6 @@
#include "common/common_types.h"
#include "core/frontend/applets/swkbd.h"
#include "core/hle/applets/applet.h"
#include "core/hle/kernel/shared_memory.h"
#include "core/hle/result.h"
#include "core/hle/service/apt/apt.h"
@ -195,10 +194,10 @@ private:
/// This SharedMemory will be created when we receive the LibAppJustStarted message.
/// It holds the framebuffer info retrieved by the application with
/// GSPGPU::ImportDisplayCaptureInfo
std::shared_ptr<Kernel::SharedMemory> framebuffer_memory;
Kernel::KSharedMemory* framebuffer_memory;
/// SharedMemory where the output text will be stored
std::shared_ptr<Kernel::SharedMemory> text_memory;
Kernel::KSharedMemory* text_memory;
/// Configuration of this instance of the SoftwareKeyboard, as received from the application
SoftwareKeyboardConfig config;

View File

@ -6,8 +6,6 @@
#include "common/common_types.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/thread.h"
#include "core/memory.h"
namespace IPC {

View File

@ -87,11 +87,11 @@ public:
void PushRaw(const T& value);
// TODO : ensure that translate params are added after all regular params
template <typename... O>
void PushCopyObjects(std::shared_ptr<O>... pointers);
template <typename... T>
void PushCopyObjects(T*... pointers);
template <typename... O>
void PushMoveObjects(std::shared_ptr<O>... pointers);
template <typename... T>
void PushMoveObjects(T*... pointers);
void PushStaticBuffer(std::vector<u8> buffer, u8 buffer_id);
@ -183,14 +183,14 @@ inline void RequestBuilder::PushMoveHLEHandles(H... handles) {
Push(static_cast<u32>(handles)...);
}
template <typename... O>
inline void RequestBuilder::PushCopyObjects(std::shared_ptr<O>... pointers) {
PushCopyHLEHandles(context->AddOutgoingHandle(std::move(pointers))...);
template <typename... T>
inline void RequestBuilder::PushCopyObjects(T*... pointers) {
PushCopyHLEHandles(context->AddOutgoingHandle(pointers)...);
}
template <typename... O>
inline void RequestBuilder::PushMoveObjects(std::shared_ptr<O>... pointers) {
PushMoveHLEHandles(context->AddOutgoingHandle(std::move(pointers))...);
template <typename... T>
inline void RequestBuilder::PushMoveObjects(T*... pointers) {
PushMoveHLEHandles(context->AddOutgoingHandle(pointers)...);
}
inline void RequestBuilder::PushStaticBuffer(std::vector<u8> buffer, u8 buffer_id) {
@ -241,11 +241,11 @@ public:
}
/// Equivalent to calling `PopGenericObjects<1>()[0]`.
std::shared_ptr<Kernel::Object> PopGenericObject();
Kernel::KAutoObject* PopGenericObject();
/// Equivalent to calling `std::get<0>(PopObjects<T>())`.
template <typename T>
std::shared_ptr<T> PopObject();
T* PopObject();
/**
* Pop a descriptor containing `N` handles and resolves them to Kernel::Object pointers. If a
@ -255,7 +255,7 @@ public:
* call to read 2 single-handle descriptors.
*/
template <unsigned int N>
std::array<std::shared_ptr<Kernel::Object>, N> PopGenericObjects();
std::array<Kernel::KAutoObject*, N> PopGenericObjects();
/**
* Resolves handles to Kernel::Objects as in PopGenericsObjects(), but then also casts them to
@ -263,11 +263,11 @@ public:
* not match, null is returned instead.
*/
template <typename... T>
std::tuple<std::shared_ptr<T>...> PopObjects();
std::tuple<T*...> PopObjects();
/// Convenience wrapper around PopObjects() which assigns the handles to the passed references.
template <typename... T>
void PopObjects(std::shared_ptr<T>&... pointers) {
void PopObjects(T**... pointers) {
std::tie(pointers...) = PopObjects<T...>();
}
@ -401,20 +401,20 @@ std::array<u32, N> RequestParser::PopHLEHandles() {
return handles;
}
inline std::shared_ptr<Kernel::Object> RequestParser::PopGenericObject() {
inline Kernel::KAutoObject* RequestParser::PopGenericObject() {
auto [handle] = PopHLEHandles<1>();
return context->GetIncomingHandle(handle);
}
template <typename T>
std::shared_ptr<T> RequestParser::PopObject() {
return Kernel::DynamicObjectCast<T>(PopGenericObject());
T* RequestParser::PopObject() {
return PopGenericObject()->DynamicCast<T*>();
}
template <unsigned int N>
inline std::array<std::shared_ptr<Kernel::Object>, N> RequestParser::PopGenericObjects() {
template <u32 N>
inline std::array<Kernel::KAutoObject*, N> RequestParser::PopGenericObjects() {
std::array<u32, N> handles = PopHLEHandles<N>();
std::array<std::shared_ptr<Kernel::Object>, N> pointers;
std::array<Kernel::KAutoObject*, N> pointers;
for (int i = 0; i < N; ++i) {
pointers[i] = context->GetIncomingHandle(handles[i]);
}
@ -423,15 +423,14 @@ inline std::array<std::shared_ptr<Kernel::Object>, N> RequestParser::PopGenericO
namespace detail {
template <typename... T, std::size_t... I>
std::tuple<std::shared_ptr<T>...> PopObjectsHelper(
std::array<std::shared_ptr<Kernel::Object>, sizeof...(T)>&& pointers,
std::index_sequence<I...>) {
return std::make_tuple(Kernel::DynamicObjectCast<T>(std::move(pointers[I]))...);
std::tuple<T*...> PopObjectsHelper(std::array<Kernel::KAutoObject*, sizeof...(T)>& pointers,
std::index_sequence<I...>) {
return std::make_tuple((pointers[I]->template DynamicCast<T*>())...);
}
} // namespace detail
template <typename... T>
inline std::tuple<std::shared_ptr<T>...> RequestParser::PopObjects() {
inline std::tuple<T*...> RequestParser::PopObjects() {
return detail::PopObjectsHelper<T...>(PopGenericObjects<sizeof...(T)>(),
std::index_sequence_for<T...>{});
}

View File

@ -1,220 +0,0 @@
// Copyright 2014 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <algorithm>
#include <boost/serialization/base_object.hpp>
#include <boost/serialization/shared_ptr.hpp>
#include <boost/serialization/string.hpp>
#include <boost/serialization/vector.hpp>
#include "common/archives.h"
#include "common/common_types.h"
#include "common/logging/log.h"
#include "core/hle/kernel/address_arbiter.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/resource_limit.h"
#include "core/hle/kernel/thread.h"
#include "core/memory.h"
SERIALIZE_EXPORT_IMPL(Kernel::AddressArbiter)
SERIALIZE_EXPORT_IMPL(Kernel::AddressArbiter::Callback)
namespace Kernel {
void AddressArbiter::WaitThread(std::shared_ptr<Thread> thread, VAddr wait_address) {
thread->wait_address = wait_address;
thread->status = ThreadStatus::WaitArb;
waiting_threads.emplace_back(std::move(thread));
}
u64 AddressArbiter::ResumeAllThreads(VAddr address) {
// Determine which threads are waiting on this address, those should be woken up.
auto itr = std::stable_partition(waiting_threads.begin(), waiting_threads.end(),
[address](const auto& thread) {
ASSERT_MSG(thread->status == ThreadStatus::WaitArb,
"Inconsistent AddressArbiter state");
return thread->wait_address != address;
});
// Wake up all the found threads
const u64 num_threads = std::distance(itr, waiting_threads.end());
std::for_each(itr, waiting_threads.end(), [](auto& thread) { thread->ResumeFromWait(); });
// Remove the woken up threads from the wait list.
waiting_threads.erase(itr, waiting_threads.end());
return num_threads;
}
bool AddressArbiter::ResumeHighestPriorityThread(VAddr address) {
// Determine which threads are waiting on this address, those should be considered for wakeup.
auto matches_start = std::stable_partition(
waiting_threads.begin(), waiting_threads.end(), [address](const auto& thread) {
ASSERT_MSG(thread->status == ThreadStatus::WaitArb,
"Inconsistent AddressArbiter state");
return thread->wait_address != address;
});
// Iterate through threads, find highest priority thread that is waiting to be arbitrated.
// Note: The real kernel will pick the first thread in the list if more than one have the
// same highest priority value. Lower priority values mean higher priority.
auto itr = std::min_element(matches_start, waiting_threads.end(),
[](const auto& lhs, const auto& rhs) {
return lhs->current_priority < rhs->current_priority;
});
if (itr == waiting_threads.end()) {
return false;
}
auto thread = *itr;
thread->ResumeFromWait();
waiting_threads.erase(itr);
return true;
}
AddressArbiter::AddressArbiter(KernelSystem& kernel)
: Object(kernel), kernel(kernel), timeout_callback(std::make_shared<Callback>(*this)) {}
AddressArbiter::~AddressArbiter() {
if (resource_limit) {
resource_limit->Release(ResourceLimitType::AddressArbiter, 1);
}
}
std::shared_ptr<AddressArbiter> KernelSystem::CreateAddressArbiter(std::string name) {
auto address_arbiter = std::make_shared<AddressArbiter>(*this);
address_arbiter->name = std::move(name);
return address_arbiter;
}
class AddressArbiter::Callback : public WakeupCallback {
public:
explicit Callback(AddressArbiter& _parent) : parent(_parent) {}
AddressArbiter& parent;
void WakeUp(ThreadWakeupReason reason, std::shared_ptr<Thread> thread,
std::shared_ptr<WaitObject> object) override {
parent.WakeUp(reason, std::move(thread), std::move(object));
}
private:
template <class Archive>
void serialize(Archive& ar, const unsigned int) {
ar& boost::serialization::base_object<WakeupCallback>(*this);
}
friend class boost::serialization::access;
};
void AddressArbiter::WakeUp(ThreadWakeupReason reason, std::shared_ptr<Thread> thread,
std::shared_ptr<WaitObject> object) {
ASSERT(reason == ThreadWakeupReason::Timeout);
// Remove the newly-awakened thread from the Arbiter's waiting list.
waiting_threads.erase(std::remove(waiting_threads.begin(), waiting_threads.end(), thread),
waiting_threads.end());
};
Result AddressArbiter::ArbitrateAddress(std::shared_ptr<Thread> thread, ArbitrationType type,
VAddr address, s32 value, u64 nanoseconds) {
switch (type) {
// Signal thread(s) waiting for arbitrate address...
case ArbitrationType::Signal: {
u64 num_threads{};
// Negative value means resume all threads
if (value < 0) {
num_threads = ResumeAllThreads(address);
} else {
// Resume first N threads
for (s32 i = 0; i < value; i++) {
num_threads += ResumeHighestPriorityThread(address);
}
}
// Prevents lag from low priority threads that spam svcArbitrateAddress and wake no threads
// The tick count is taken directly from official HOS kernel. The priority value is one less
// than official kernel as the affected FMV threads dont meet the priority threshold of 50.
// TODO: Revisit this when scheduler is rewritten and adjust if there isn't a problem there.
if (num_threads == 0 && thread->current_priority >= 49) {
kernel.current_cpu->GetTimer().AddTicks(1614u);
}
break;
}
// Wait current thread (acquire the arbiter)...
case ArbitrationType::WaitIfLessThan:
if ((s32)kernel.memory.Read32(address) < value) {
WaitThread(std::move(thread), address);
}
break;
case ArbitrationType::WaitIfLessThanWithTimeout:
if ((s32)kernel.memory.Read32(address) < value) {
thread->wakeup_callback = timeout_callback;
thread->WakeAfterDelay(nanoseconds);
WaitThread(std::move(thread), address);
}
break;
case ArbitrationType::DecrementAndWaitIfLessThan: {
s32 memory_value = kernel.memory.Read32(address);
if (memory_value < value) {
// Only change the memory value if the thread should wait
kernel.memory.Write32(address, (s32)memory_value - 1);
WaitThread(std::move(thread), address);
}
break;
}
case ArbitrationType::DecrementAndWaitIfLessThanWithTimeout: {
s32 memory_value = kernel.memory.Read32(address);
if (memory_value < value) {
// Only change the memory value if the thread should wait
kernel.memory.Write32(address, (s32)memory_value - 1);
thread->wakeup_callback = timeout_callback;
thread->WakeAfterDelay(nanoseconds);
WaitThread(std::move(thread), address);
}
break;
}
default:
LOG_ERROR(Kernel, "unknown type={}", type);
return ResultInvalidEnumValueFnd;
}
// The calls that use a timeout seem to always return a Timeout error even if they did not put
// the thread to sleep
if (type == ArbitrationType::WaitIfLessThanWithTimeout ||
type == ArbitrationType::DecrementAndWaitIfLessThanWithTimeout) {
return ResultTimeout;
}
return ResultSuccess;
}
template <class Archive>
void AddressArbiter::serialize(Archive& ar, const unsigned int) {
ar& boost::serialization::base_object<Object>(*this);
ar& name;
ar& waiting_threads;
ar& timeout_callback;
ar& resource_limit;
}
SERIALIZE_IMPL(AddressArbiter)
} // namespace Kernel
namespace boost::serialization {
template <class Archive>
void save_construct_data(Archive& ar, const Kernel::AddressArbiter::Callback* t,
const unsigned int) {
ar << Kernel::SharedFrom(&t->parent);
}
template <class Archive>
void load_construct_data(Archive& ar, Kernel::AddressArbiter::Callback* t, const unsigned int) {
std::shared_ptr<Kernel::AddressArbiter> parent;
ar >> parent;
::new (t) Kernel::AddressArbiter::Callback(*parent);
}
} // namespace boost::serialization

View File

@ -1,88 +0,0 @@
// Copyright 2014 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <memory>
#include <vector>
#include <boost/serialization/export.hpp>
#include "common/common_types.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/thread.h"
#include "core/hle/result.h"
// Address arbiters are an underlying kernel synchronization object that can be created/used via
// supervisor calls (SVCs). They function as sort of a global lock. Typically, games/other CTR
// applications use them as an underlying mechanism to implement thread-safe barriers, events, and
// semaphores.
namespace Kernel {
class Thread;
class ResourceLimit;
enum class ArbitrationType : u32 {
Signal,
WaitIfLessThan,
DecrementAndWaitIfLessThan,
WaitIfLessThanWithTimeout,
DecrementAndWaitIfLessThanWithTimeout,
};
class AddressArbiter final : public Object, public WakeupCallback {
public:
explicit AddressArbiter(KernelSystem& kernel);
~AddressArbiter() override;
std::string GetTypeName() const override {
return "Arbiter";
}
std::string GetName() const override {
return name;
}
static constexpr HandleType HANDLE_TYPE = HandleType::AddressArbiter;
HandleType GetHandleType() const override {
return HANDLE_TYPE;
}
std::shared_ptr<ResourceLimit> resource_limit;
std::string name; ///< Name of address arbiter object (optional)
Result ArbitrateAddress(std::shared_ptr<Thread> thread, ArbitrationType type, VAddr address,
s32 value, u64 nanoseconds);
class Callback;
private:
KernelSystem& kernel;
/// Puts the thread to wait on the specified arbitration address under this address arbiter.
void WaitThread(std::shared_ptr<Thread> thread, VAddr wait_address);
/// Resume all threads found to be waiting on the address under this address arbiter
u64 ResumeAllThreads(VAddr address);
/// Resume one thread found to be waiting on the address under this address arbiter and return
/// the resumed thread.
bool ResumeHighestPriorityThread(VAddr address);
/// Threads waiting for the address arbiter to be signaled.
std::vector<std::shared_ptr<Thread>> waiting_threads;
std::shared_ptr<Callback> timeout_callback;
void WakeUp(ThreadWakeupReason reason, std::shared_ptr<Thread> thread,
std::shared_ptr<WaitObject> object) override;
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const unsigned int);
};
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::AddressArbiter)
BOOST_CLASS_EXPORT_KEY(Kernel::AddressArbiter::Callback)
CONSTRUCT_KERNEL_OBJECT(Kernel::AddressArbiter)

View File

@ -1,63 +0,0 @@
// Copyright 2016 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <boost/serialization/shared_ptr.hpp>
#include <boost/serialization/string.hpp>
#include "common/archives.h"
#include "common/assert.h"
#include "core/global.h"
#include "core/hle/kernel/client_port.h"
#include "core/hle/kernel/client_session.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/hle_ipc.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/server_port.h"
#include "core/hle/kernel/server_session.h"
SERIALIZE_EXPORT_IMPL(Kernel::ClientPort)
namespace Kernel {
ClientPort::ClientPort(KernelSystem& kernel) : Object(kernel), kernel(kernel) {}
ClientPort::~ClientPort() = default;
Result ClientPort::Connect(std::shared_ptr<ClientSession>* out_client_session) {
// Note: Threads do not wait for the server endpoint to call
// AcceptSession before returning from this call.
R_UNLESS(active_sessions < max_sessions, ResultMaxConnectionsReached);
active_sessions++;
// Create a new session pair, let the created sessions inherit the parent port's HLE handler.
auto [server, client] = kernel.CreateSessionPair(server_port->GetName(), SharedFrom(this));
if (server_port->hle_handler) {
server_port->hle_handler->ClientConnected(server);
} else {
server_port->pending_sessions.push_back(server);
}
// Wake the threads waiting on the ServerPort
server_port->WakeupAllWaitingThreads();
*out_client_session = client;
return ResultSuccess;
}
void ClientPort::ConnectionClosed() {
ASSERT(active_sessions > 0);
--active_sessions;
}
template <class Archive>
void ClientPort::serialize(Archive& ar, const unsigned int) {
ar& boost::serialization::base_object<Object>(*this);
ar& server_port;
ar& max_sessions;
ar& active_sessions;
ar& name;
}
SERIALIZE_IMPL(ClientPort)
} // namespace Kernel

View File

@ -1,73 +0,0 @@
// Copyright 2016 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <memory>
#include <string>
#include <boost/serialization/export.hpp>
#include "common/common_types.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/server_port.h"
#include "core/hle/result.h"
namespace Kernel {
class ClientSession;
class ClientPort final : public Object {
public:
explicit ClientPort(KernelSystem& kernel);
~ClientPort() override;
friend class ServerPort;
std::string GetTypeName() const override {
return "ClientPort";
}
std::string GetName() const override {
return name;
}
static constexpr HandleType HANDLE_TYPE = HandleType::ClientPort;
HandleType GetHandleType() const override {
return HANDLE_TYPE;
}
std::shared_ptr<ServerPort> GetServerPort() const {
return server_port;
}
/**
* Creates a new Session pair, adds the created ServerSession to the associated ServerPort's
* list of pending sessions, and signals the ServerPort, causing any threads
* waiting on it to awake.
* @returns ClientSession The client endpoint of the created Session pair, or error code.
*/
Result Connect(std::shared_ptr<ClientSession>* out_client_session);
/**
* Signifies that a previously active connection has been closed,
* decreasing the total number of active connections to this port.
*/
void ConnectionClosed();
private:
KernelSystem& kernel;
std::shared_ptr<ServerPort> server_port; ///< ServerPort associated with this client port.
u32 max_sessions = 0; ///< Maximum number of simultaneous sessions the port can have
u32 active_sessions = 0; ///< Number of currently open sessions to this port
std::string name; ///< Name of client port (optional)
friend class KernelSystem;
private:
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const unsigned int);
};
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::ClientPort)
CONSTRUCT_KERNEL_OBJECT(Kernel::ClientPort)

View File

@ -1,67 +0,0 @@
// Copyright 2016 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <boost/serialization/base_object.hpp>
#include <boost/serialization/shared_ptr.hpp>
#include <boost/serialization/string.hpp>
#include "common/archives.h"
#include "common/assert.h"
#include "core/hle/kernel/client_session.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/hle_ipc.h"
#include "core/hle/kernel/server_session.h"
#include "core/hle/kernel/session.h"
#include "core/hle/kernel/thread.h"
SERIALIZE_EXPORT_IMPL(Kernel::ClientSession)
namespace Kernel {
ClientSession::ClientSession(KernelSystem& kernel) : Object(kernel) {}
ClientSession::~ClientSession() {
// This destructor will be called automatically when the last ClientSession handle is closed by
// the emulated application.
// Local references to ServerSession and SessionRequestHandler are necessary to guarantee they
// will be kept alive until after ClientDisconnected() returns.
std::shared_ptr<ServerSession> server = SharedFrom(parent->server);
if (server) {
std::shared_ptr<SessionRequestHandler> hle_handler = server->hle_handler;
if (hle_handler)
hle_handler->ClientDisconnected(server);
// Clean up the list of client threads with pending requests, they are unneeded now that the
// client endpoint is closed.
server->pending_requesting_threads.clear();
server->currently_handling = nullptr;
}
parent->client = nullptr;
if (server) {
// Notify any threads waiting on the ServerSession that the endpoint has been closed. Note
// that this call has to happen after `Session::client` has been set to nullptr to let the
// ServerSession know that the client endpoint has been closed.
server->WakeupAllWaitingThreads();
}
}
Result ClientSession::SendSyncRequest(std::shared_ptr<Thread> thread) {
// Keep ServerSession alive until we're done working with it.
std::shared_ptr<ServerSession> server = SharedFrom(parent->server);
R_UNLESS(server, ResultSessionClosed);
// Signal the server session that new data is available
return server->HandleSyncRequest(std::move(thread));
}
template <class Archive>
void ClientSession::serialize(Archive& ar, const unsigned int) {
ar& boost::serialization::base_object<Object>(*this);
ar& name;
ar& parent;
}
SERIALIZE_IMPL(ClientSession)
} // namespace Kernel

View File

@ -1,60 +0,0 @@
// Copyright 2016 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <memory>
#include <string>
#include <boost/serialization/export.hpp>
#include "common/common_types.h"
#include "core/hle/kernel/object.h"
#include "core/hle/result.h"
namespace Kernel {
class Session;
class Thread;
class ClientSession final : public Object {
public:
explicit ClientSession(KernelSystem& kernel);
~ClientSession() override;
friend class KernelSystem;
std::string GetTypeName() const override {
return "ClientSession";
}
std::string GetName() const override {
return name;
}
static constexpr HandleType HANDLE_TYPE = HandleType::ClientSession;
HandleType GetHandleType() const override {
return HANDLE_TYPE;
}
/**
* Sends an SyncRequest from the current emulated thread.
* @param thread Thread that initiated the request.
* @return Result of the operation.
*/
Result SendSyncRequest(std::shared_ptr<Thread> thread);
std::string name; ///< Name of client port (optional)
/// The parent session, which links to the server endpoint.
std::shared_ptr<Session> parent;
private:
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const unsigned int);
};
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::ClientSession)
CONSTRUCT_KERNEL_OBJECT(Kernel::ClientSession)

View File

@ -16,6 +16,7 @@ enum {
OutOfEvents = 15,
OutOfTimers = 16,
OutOfHandles = 19,
ProcessNotFound = 24,
SessionClosedByRemote = 26,
PortNameTooLong = 30,
WrongLockingThread = 31,
@ -109,5 +110,8 @@ constexpr Result ResultTimeout(ErrorDescription::Timeout, ErrorModule::OS,
constexpr Result ResultNoPendingSessions(ErrCodes::NoPendingSessions, ErrorModule::OS,
ErrorSummary::WouldBlock,
ErrorLevel::Permanent); // 0xD8401823
constexpr Result ResultProcessNotFound(ErrCodes::ProcessNotFound, ErrorModule::OS,
ErrorSummary::WrongArgument,
ErrorLevel::Permanent); // 0xD9001818
} // namespace Kernel

View File

@ -1,73 +0,0 @@
// Copyright 2014 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <boost/serialization/base_object.hpp>
#include <boost/serialization/string.hpp>
#include "common/archives.h"
#include "common/assert.h"
#include "core/hle/kernel/event.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/resource_limit.h"
#include "core/hle/kernel/thread.h"
SERIALIZE_EXPORT_IMPL(Kernel::Event)
namespace Kernel {
Event::Event(KernelSystem& kernel) : WaitObject(kernel) {}
Event::~Event() {
if (resource_limit) {
resource_limit->Release(ResourceLimitType::Event, 1);
}
}
std::shared_ptr<Event> KernelSystem::CreateEvent(ResetType reset_type, std::string name) {
auto event = std::make_shared<Event>(*this);
event->signaled = false;
event->reset_type = reset_type;
event->name = std::move(name);
return event;
}
bool Event::ShouldWait(const Thread* thread) const {
return !signaled;
}
void Event::Acquire(Thread* thread) {
ASSERT_MSG(!ShouldWait(thread), "object unavailable!");
if (reset_type == ResetType::OneShot) {
signaled = false;
}
}
void Event::Signal() {
signaled = true;
WakeupAllWaitingThreads();
}
void Event::Clear() {
signaled = false;
}
void Event::WakeupAllWaitingThreads() {
WaitObject::WakeupAllWaitingThreads();
if (reset_type == ResetType::Pulse) {
signaled = false;
}
}
template <class Archive>
void Event::serialize(Archive& ar, const unsigned int) {
ar& boost::serialization::base_object<WaitObject>(*this);
ar& reset_type;
ar& signaled;
ar& name;
ar& resource_limit;
}
SERIALIZE_IMPL(Event)
} // namespace Kernel

View File

@ -1,64 +0,0 @@
// Copyright 2014 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <boost/serialization/export.hpp>
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/resource_limit.h"
#include "core/hle/kernel/wait_object.h"
namespace Kernel {
class Event final : public WaitObject {
public:
explicit Event(KernelSystem& kernel);
~Event() override;
std::string GetTypeName() const override {
return "Event";
}
std::string GetName() const override {
return name;
}
void SetName(const std::string& name_) {
name = name_;
}
static constexpr HandleType HANDLE_TYPE = HandleType::Event;
HandleType GetHandleType() const override {
return HANDLE_TYPE;
}
ResetType GetResetType() const {
return reset_type;
}
bool ShouldWait(const Thread* thread) const override;
void Acquire(Thread* thread) override;
void WakeupAllWaitingThreads() override;
void Signal();
void Clear();
std::shared_ptr<ResourceLimit> resource_limit;
private:
ResetType reset_type; ///< Current ResetType
bool signaled; ///< Whether the event has already been signaled
std::string name; ///< Name of event (optional)
friend class KernelSystem;
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const unsigned int);
};
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::Event)
CONSTRUCT_KERNEL_OBJECT(Kernel::Event)

View File

@ -1,111 +0,0 @@
// Copyright 2014 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <utility>
#include <boost/serialization/array.hpp>
#include <boost/serialization/shared_ptr.hpp>
#include "common/archives.h"
#include "common/assert.h"
#include "common/logging/log.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/handle_table.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/thread.h"
SERIALIZE_EXPORT_IMPL(Kernel::HandleTable)
namespace Kernel {
namespace {
constexpr u16 GetSlot(Handle handle) {
return handle >> 15;
}
constexpr u16 GetGeneration(Handle handle) {
return handle & 0x7FFF;
}
} // Anonymous namespace
HandleTable::HandleTable(KernelSystem& kernel) : kernel(kernel) {
next_generation = 1;
Clear();
}
HandleTable::~HandleTable() = default;
Result HandleTable::Create(Handle* out_handle, std::shared_ptr<Object> obj) {
DEBUG_ASSERT(obj != nullptr);
u16 slot = next_free_slot;
R_UNLESS(slot < generations.size(), ResultOutOfHandles);
next_free_slot = generations[slot];
u16 generation = next_generation++;
// Overflow count so it fits in the 15 bits dedicated to the generation in the handle.
// CTR-OS doesn't use generation 0, so skip straight to 1.
if (next_generation >= (1 << 15)) {
next_generation = 1;
}
generations[slot] = generation;
objects[slot] = std::move(obj);
*out_handle = generation | (slot << 15);
return ResultSuccess;
}
Result HandleTable::Duplicate(Handle* out_handle, Handle handle) {
std::shared_ptr<Object> object = GetGeneric(handle);
R_UNLESS(object, ResultInvalidHandle);
return Create(out_handle, std::move(object));
}
Result HandleTable::Close(Handle handle) {
R_UNLESS(IsValid(handle), ResultInvalidHandle);
const u16 slot = GetSlot(handle);
objects[slot] = nullptr;
generations[slot] = next_free_slot;
next_free_slot = slot;
return ResultSuccess;
}
bool HandleTable::IsValid(Handle handle) const {
const u16 slot = GetSlot(handle);
const u16 generation = GetGeneration(handle);
return slot < MAX_COUNT && objects[slot] != nullptr && generations[slot] == generation;
}
std::shared_ptr<Object> HandleTable::GetGeneric(Handle handle) const {
if (handle == CurrentThread) {
return SharedFrom(kernel.GetCurrentThreadManager().GetCurrentThread());
} else if (handle == CurrentProcess) {
return kernel.GetCurrentProcess();
}
if (!IsValid(handle)) {
return nullptr;
}
return objects[GetSlot(handle)];
}
void HandleTable::Clear() {
for (u16 i = 0; i < MAX_COUNT; ++i) {
generations[i] = i + 1;
objects[i] = nullptr;
}
next_free_slot = 0;
}
template <class Archive>
void HandleTable::serialize(Archive& ar, const unsigned int) {
ar& objects;
ar& generations;
ar& next_generation;
ar& next_free_slot;
}
SERIALIZE_IMPL(HandleTable)
} // namespace Kernel

View File

@ -1,129 +0,0 @@
// Copyright 2014 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <array>
#include <cstddef>
#include <memory>
#include <boost/serialization/export.hpp>
#include "common/common_types.h"
#include "core/hle/kernel/object.h"
#include "core/hle/result.h"
namespace Kernel {
enum KernelHandle : Handle {
CurrentThread = 0xFFFF8000,
CurrentProcess = 0xFFFF8001,
};
/**
* This class allows the creation of Handles, which are references to objects that can be tested
* for validity and looked up. Here they are used to pass references to kernel objects to/from the
* emulated process. it has been designed so that it follows the same handle format and has
* approximately the same restrictions as the handle manager in the CTR-OS.
*
* Handles contain two sub-fields: a slot index (bits 31:15) and a generation value (bits 14:0).
* The slot index is used to index into the arrays in this class to access the data corresponding
* to the Handle.
*
* To prevent accidental use of a freed Handle whose slot has already been reused, a global counter
* is kept and incremented every time a Handle is created. This is the Handle's "generation". The
* value of the counter is stored into the Handle as well as in the handle table (in the
* "generations" array). When looking up a handle, the Handle's generation must match with the
* value stored on the class, otherwise the Handle is considered invalid.
*
* To find free slots when allocating a Handle without needing to scan the entire object array, the
* generations field of unallocated slots is re-purposed as a linked list of indices to free slots.
* When a Handle is created, an index is popped off the list and used for the new Handle. When it
* is destroyed, it is again pushed onto the list to be re-used by the next allocation. It is
* likely that this allocation strategy differs from the one used in CTR-OS, but this hasn't been
* verified and isn't likely to cause any problems.
*/
class HandleTable final : NonCopyable {
public:
explicit HandleTable(KernelSystem& kernel);
~HandleTable();
/**
* Allocates a handle for the given object.
* @return The created Handle or one of the following errors:
* - `ResultOutOfHandles`: the maximum number of handles has been exceeded.
*/
Result Create(Handle* out_handle, std::shared_ptr<Object> obj);
/**
* Returns a new handle that points to the same object as the passed in handle.
* @return The duplicated Handle or one of the following errors:
* - `ResultInvalidHandle`: an invalid handle was passed in.
* - Any errors returned by `Create()`.
*/
Result Duplicate(Handle* out_handle, Handle handle);
/**
* Closes a handle, removing it from the table and decreasing the object's ref-count.
* @return `ResultSuccess` or one of the following errors:
* - `ResultInvalidHandle`: an invalid handle was passed in.
*/
Result Close(Handle handle);
/// Checks if a handle is valid and points to an existing object.
bool IsValid(Handle handle) const;
/**
* Looks up a handle.
* @return Pointer to the looked-up object, or `nullptr` if the handle is not valid.
*/
std::shared_ptr<Object> GetGeneric(Handle handle) const;
/**
* Looks up a handle while verifying its type.
* @return Pointer to the looked-up object, or `nullptr` if the handle is not valid or its
* type differs from the requested one.
*/
template <class T>
std::shared_ptr<T> Get(Handle handle) const {
return DynamicObjectCast<T>(GetGeneric(handle));
}
/// Closes all handles held in this table.
void Clear();
private:
/**
* This is the maximum limit of handles allowed per process in CTR-OS. It can be further
* reduced by ExHeader values, but this is not emulated here.
*/
static const std::size_t MAX_COUNT = 4096;
/// Stores the Object referenced by the handle or null if the slot is empty.
std::array<std::shared_ptr<Object>, MAX_COUNT> objects;
/**
* The value of `next_generation` when the handle was created, used to check for validity. For
* empty slots, contains the index of the next free slot in the list.
*/
std::array<u16, MAX_COUNT> generations;
/**
* Global counter of the number of created handles. Stored in `generations` when a handle is
* created, and wraps around to 1 when it hits 0x8000.
*/
u16 next_generation;
/// Head of the free slots linked list.
u16 next_free_slot;
KernelSystem& kernel;
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const unsigned int);
};
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::HandleTable)
CONSTRUCT_KERNEL_OBJECT(Kernel::HandleTable)

View File

@ -12,12 +12,12 @@
#include "common/assert.h"
#include "common/common_types.h"
#include "core/core.h"
#include "core/hle/kernel/event.h"
#include "core/hle/kernel/handle_table.h"
#include "core/hle/kernel/hle_ipc.h"
#include "core/hle/kernel/ipc_debugger/recorder.h"
#include "core/hle/kernel/k_event.h"
#include "core/hle/kernel/k_handle_table.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/process.h"
SERIALIZE_EXPORT_IMPL(Kernel::SessionRequestHandler)
SERIALIZE_EXPORT_IMPL(Kernel::SessionRequestHandler::SessionDataBase)
@ -33,15 +33,13 @@ public:
ThreadCallback(std::shared_ptr<HLERequestContext> context_,
std::shared_ptr<HLERequestContext::WakeupCallback> callback_)
: callback(std::move(callback_)), context(std::move(context_)) {}
void WakeUp(ThreadWakeupReason reason, std::shared_ptr<Thread> thread,
std::shared_ptr<WaitObject> object) {
ASSERT(thread->status == ThreadStatus::WaitHleEvent);
void WakeUp(ThreadWakeupReason reason, KThread* thread, KSynchronizationObject* object) {
ASSERT(thread->m_status == ThreadStatus::WaitHleEvent);
if (callback) {
callback->WakeUp(thread, *context, reason);
}
auto process = thread->owner_process.lock();
ASSERT(process);
Process* process = thread->GetOwner();
// We must copy the entire command buffer *plus* the entire static buffers area, since
// the translation might need to read from it in order to retrieve the StaticBuffer
@ -70,16 +68,16 @@ private:
friend class boost::serialization::access;
};
SessionRequestHandler::SessionInfo::SessionInfo(std::shared_ptr<ServerSession> session,
SessionRequestHandler::SessionInfo::SessionInfo(KServerSession* session_,
std::unique_ptr<SessionDataBase> data)
: session(std::move(session)), data(std::move(data)) {}
: session(session_), data(std::move(data)) {}
void SessionRequestHandler::ClientConnected(std::shared_ptr<ServerSession> server_session) {
void SessionRequestHandler::ClientConnected(KServerSession* server_session) {
server_session->SetHleHandler(shared_from_this());
connected_sessions.emplace_back(std::move(server_session), MakeSessionData());
connected_sessions.emplace_back(server_session, MakeSessionData());
}
void SessionRequestHandler::ClientDisconnected(std::shared_ptr<ServerSession> server_session) {
void SessionRequestHandler::ClientDisconnected(KServerSession* server_session) {
server_session->SetHleHandler(nullptr);
connected_sessions.erase(
std::remove_if(connected_sessions.begin(), connected_sessions.end(),
@ -104,40 +102,46 @@ void SessionRequestHandler::SessionInfo::serialize(Archive& ar, const unsigned i
}
SERIALIZE_IMPL(SessionRequestHandler::SessionInfo)
std::shared_ptr<Event> HLERequestContext::SleepClientThread(
const std::string& reason, std::chrono::nanoseconds timeout,
std::shared_ptr<WakeupCallback> callback) {
KEvent* HLERequestContext::SleepClientThread(const std::string& reason,
std::chrono::nanoseconds timeout,
std::shared_ptr<WakeupCallback> callback) {
// Put the client thread to sleep until the wait event is signaled or the timeout expires.
thread->wakeup_callback = std::make_shared<ThreadCallback>(shared_from_this(), callback);
thread->m_wakeup_callback = std::make_shared<ThreadCallback>(shared_from_this(), callback);
auto event = kernel.CreateEvent(Kernel::ResetType::OneShot, "HLE Pause Event: " + reason);
thread->status = ThreadStatus::WaitHleEvent;
thread->wait_objects = {event};
// Create pause event.
auto* event = KEvent::Create(kernel);
event->Initialize(nullptr, ResetType::OneShot);
event->SetName("HLE Pause Event: " + reason);
KEvent::Register(kernel, event);
// Add the event to the list of objects the thread is waiting for.
thread->m_status = ThreadStatus::WaitHleEvent;
thread->m_wait_objects = {event};
event->AddWaitingThread(thread);
if (timeout.count() > 0)
if (timeout.count() > 0) {
thread->WakeAfterDelay(timeout.count());
}
return event;
}
HLERequestContext::HLERequestContext() : kernel(Core::Global<KernelSystem>()) {}
HLERequestContext::HLERequestContext(KernelSystem& kernel, std::shared_ptr<ServerSession> session,
std::shared_ptr<Thread> thread)
: kernel(kernel), session(std::move(session)), thread(thread) {
HLERequestContext::HLERequestContext(KernelSystem& kernel, KServerSession* session, KThread* thread)
: kernel(kernel), session(session), thread(thread) {
cmd_buf[0] = 0;
}
HLERequestContext::~HLERequestContext() = default;
std::shared_ptr<Object> HLERequestContext::GetIncomingHandle(u32 id_from_cmdbuf) const {
KAutoObject* HLERequestContext::GetIncomingHandle(u32 id_from_cmdbuf) const {
ASSERT(id_from_cmdbuf < request_handles.size());
return request_handles[id_from_cmdbuf];
}
u32 HLERequestContext::AddOutgoingHandle(std::shared_ptr<Object> object) {
request_handles.push_back(std::move(object));
u32 HLERequestContext::AddOutgoingHandle(KAutoObject* object) {
request_handles.push_back(object);
return static_cast<u32>(request_handles.size() - 1);
}
@ -154,8 +158,7 @@ void HLERequestContext::AddStaticBuffer(u8 buffer_id, std::vector<u8> data) {
}
Result HLERequestContext::PopulateFromIncomingCommandBuffer(const u32_le* src_cmdbuf,
std::shared_ptr<Process> src_process_) {
auto& src_process = *src_process_;
Process* src_process) {
IPC::Header header{src_cmdbuf[0]};
std::size_t untranslated_size = 1u + header.normal_params_size;
@ -179,25 +182,32 @@ Result HLERequestContext::PopulateFromIncomingCommandBuffer(const u32_le* src_cm
switch (IPC::GetDescriptorType(descriptor)) {
case IPC::DescriptorType::CopyHandle:
case IPC::DescriptorType::MoveHandle: {
u32 num_handles = IPC::HandleNumberFromDesc(descriptor);
const u32 num_handles = IPC::HandleNumberFromDesc(descriptor);
auto& src_handle_table = src_process->handle_table;
ASSERT(i + num_handles <= command_size); // TODO(yuriks): Return error
for (u32 j = 0; j < num_handles; ++j) {
Handle handle = src_cmdbuf[i];
std::shared_ptr<Object> object = nullptr;
if (handle != 0) {
object = src_process.handle_table.GetGeneric(handle);
ASSERT(object != nullptr); // TODO(yuriks): Return error
if (descriptor == IPC::DescriptorType::MoveHandle) {
src_process.handle_table.Close(handle);
}
const Handle handle = src_cmdbuf[i];
if (!handle) {
cmd_buf[i++] = AddOutgoingHandle(nullptr);
continue;
}
cmd_buf[i++] = AddOutgoingHandle(std::move(object));
// Get object from the handle table.
KScopedAutoObject object =
src_handle_table.GetObjectForIpcWithoutPseudoHandle(handle);
ASSERT(object.IsNotNull());
// If we are moving, remove the old handle.
if (descriptor == IPC::DescriptorType::MoveHandle) {
src_handle_table.Remove(handle);
}
cmd_buf[i++] = AddOutgoingHandle(object.GetPointerUnsafe());
}
break;
}
case IPC::DescriptorType::CallingPid: {
cmd_buf[i++] = src_process.process_id;
cmd_buf[i++] = src_process->process_id;
break;
}
case IPC::DescriptorType::StaticBuffer: {
@ -206,7 +216,7 @@ Result HLERequestContext::PopulateFromIncomingCommandBuffer(const u32_le* src_cm
// Copy the input buffer into our own vector and store it.
std::vector<u8> data(buffer_info.size);
kernel.memory.ReadBlock(src_process, source_address, data.data(), data.size());
kernel.memory.ReadBlock(*src_process, source_address, data.data(), data.size());
AddStaticBuffer(buffer_info.buffer_id, std::move(data));
cmd_buf[i++] = source_address;
@ -214,7 +224,7 @@ Result HLERequestContext::PopulateFromIncomingCommandBuffer(const u32_le* src_cm
}
case IPC::DescriptorType::MappedBuffer: {
u32 next_id = static_cast<u32>(request_mapped_buffers.size());
request_mapped_buffers.emplace_back(kernel.memory, src_process_, descriptor,
request_mapped_buffers.emplace_back(kernel.memory, src_process, descriptor,
src_cmdbuf[i], next_id);
cmd_buf[i++] = next_id;
break;
@ -259,14 +269,13 @@ Result HLERequestContext::WriteToOutgoingCommandBuffer(u32_le* dst_cmdbuf,
case IPC::DescriptorType::CopyHandle:
case IPC::DescriptorType::MoveHandle: {
// HLE services don't use handles, so we treat both CopyHandle and MoveHandle equally
u32 num_handles = IPC::HandleNumberFromDesc(descriptor);
const u32 num_handles = IPC::HandleNumberFromDesc(descriptor);
ASSERT(i + num_handles <= command_size);
for (u32 j = 0; j < num_handles; ++j) {
std::shared_ptr<Object> object = GetIncomingHandle(cmd_buf[i]);
KAutoObject* object = GetIncomingHandle(cmd_buf[i]);
Handle handle = 0;
if (object != nullptr) {
// TODO(yuriks): Figure out the proper error handling for if this fails
R_ASSERT(dst_process.handle_table.Create(std::addressof(handle), object));
dst_process.handle_table.Add(std::addressof(handle), object);
}
dst_cmdbuf[i++] = handle;
}
@ -327,7 +336,7 @@ void HLERequestContext::serialize(Archive& ar, const unsigned int) {
ar& cmd_buf;
ar& session;
ar& thread;
ar& request_handles;
// ar& request_handles;
ar& static_buffers;
ar& request_mapped_buffers;
}
@ -335,8 +344,8 @@ SERIALIZE_IMPL(HLERequestContext)
MappedBuffer::MappedBuffer() : memory(&Core::Global<Core::System>().Memory()) {}
MappedBuffer::MappedBuffer(Memory::MemorySystem& memory, std::shared_ptr<Process> process,
u32 descriptor, VAddr address, u32 id)
MappedBuffer::MappedBuffer(Memory::MemorySystem& memory, Process* process, u32 descriptor,
VAddr address, u32 id)
: memory(&memory), id(id), address(address), process(std::move(process)) {
IPC::MappedBufferDescInfo desc{descriptor};
size = desc.size;

View File

@ -17,8 +17,8 @@
#include "common/serialization/boost_small_vector.hpp"
#include "common/swap.h"
#include "core/hle/ipc.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/server_session.h"
#include "core/hle/kernel/k_server_session.h"
#include "core/hle/kernel/k_thread.h"
namespace Service {
class ServiceFrameworkBase;
@ -32,8 +32,8 @@ namespace Kernel {
class HandleTable;
class Process;
class Thread;
class Event;
class KThread;
class KEvent;
class HLERequestContext;
class KernelSystem;
@ -58,14 +58,14 @@ public:
* associated ServerSession alive for the duration of the connection.
* @param server_session Owning pointer to the ServerSession associated with the connection.
*/
virtual void ClientConnected(std::shared_ptr<ServerSession> server_session);
virtual void ClientConnected(KServerSession* server_session);
/**
* Signals that a client has just disconnected from this HLE handler and releases the
* associated ServerSession.
* @param server_session ServerSession associated with the connection.
*/
virtual void ClientDisconnected(std::shared_ptr<ServerSession> server_session);
virtual void ClientDisconnected(KServerSession* server_session);
/// Empty placeholder structure for services with no per-session data. The session data classes
/// in each service must inherit from this.
@ -79,9 +79,9 @@ public:
};
struct SessionInfo {
SessionInfo(std::shared_ptr<ServerSession> session, std::unique_ptr<SessionDataBase> data);
SessionInfo(KServerSession* session, std::unique_ptr<SessionDataBase> data);
std::shared_ptr<ServerSession> session;
KServerSession* session;
std::unique_ptr<SessionDataBase> data;
private:
@ -97,7 +97,7 @@ protected:
/// Returns the session data associated with the server session.
template <typename T>
T* GetSessionData(std::shared_ptr<ServerSession> session) {
T* GetSessionData(KServerSession* session) {
static_assert(std::is_base_of<SessionDataBase, T>(),
"T is not a subclass of SessionDataBase");
auto itr = std::find_if(connected_sessions.begin(), connected_sessions.end(),
@ -120,8 +120,8 @@ private:
class MappedBuffer {
public:
MappedBuffer(Memory::MemorySystem& memory, std::shared_ptr<Process> process, u32 descriptor,
VAddr address, u32 id);
MappedBuffer(Memory::MemorySystem& memory, Process* process, u32 descriptor, VAddr address,
u32 id);
// interface for service
void Read(void* dest_buffer, std::size_t offset, std::size_t size);
@ -144,7 +144,7 @@ private:
Memory::MemorySystem* memory;
u32 id;
VAddr address;
std::shared_ptr<Process> process;
Process* process;
u32 size;
IPC::MappedBufferPermissions perms;
@ -192,8 +192,7 @@ private:
*/
class HLERequestContext : public std::enable_shared_from_this<HLERequestContext> {
public:
HLERequestContext(KernelSystem& kernel, std::shared_ptr<ServerSession> session,
std::shared_ptr<Thread> thread);
explicit HLERequestContext(KernelSystem& kernel, KServerSession* session, KThread* thread);
~HLERequestContext();
/// Returns a pointer to the IPC command buffer for this request.
@ -210,21 +209,21 @@ public:
* Returns the session through which this request was made. This can be used as a map key to
* access per-client data on services.
*/
std::shared_ptr<ServerSession> Session() const {
KServerSession* Session() const {
return session;
}
/**
* Returns the client thread that made the service request.
*/
std::shared_ptr<Thread> ClientThread() const {
KThread* ClientThread() const {
return thread;
}
class WakeupCallback {
public:
virtual ~WakeupCallback() = default;
virtual void WakeUp(std::shared_ptr<Thread> thread, HLERequestContext& context,
virtual void WakeUp(KThread* thread, HLERequestContext& context,
ThreadWakeupReason reason) = 0;
private:
@ -244,9 +243,8 @@ public:
* was called.
* @returns Event that when signaled will resume the thread and call the callback function.
*/
std::shared_ptr<Event> SleepClientThread(const std::string& reason,
std::chrono::nanoseconds timeout,
std::shared_ptr<WakeupCallback> callback);
KEvent* SleepClientThread(const std::string& reason, std::chrono::nanoseconds timeout,
std::shared_ptr<WakeupCallback> callback);
private:
template <typename ResultFunctor>
@ -257,7 +255,7 @@ private:
future = std::move(fut);
}
void WakeUp(std::shared_ptr<Kernel::Thread> thread, Kernel::HLERequestContext& ctx,
void WakeUp(Kernel::KThread* thread, Kernel::HLERequestContext& ctx,
Kernel::ThreadWakeupReason reason) {
functor(ctx);
}
@ -322,13 +320,13 @@ public:
* Resolves a object id from the request command buffer into a pointer to an object. See the
* "HLE handle protocol" section in the class documentation for more details.
*/
std::shared_ptr<Object> GetIncomingHandle(u32 id_from_cmdbuf) const;
KAutoObject* GetIncomingHandle(u32 id_from_cmdbuf) const;
/**
* Adds an outgoing object to the response, returning the id which should be used to reference
* it. See the "HLE handle protocol" section in the class documentation for more details.
*/
u32 AddOutgoingHandle(std::shared_ptr<Object> object);
u32 AddOutgoingHandle(KAutoObject* object);
/**
* Discards all Objects from the context, invalidating all ids. This may be called after reading
@ -356,8 +354,8 @@ public:
MappedBuffer& GetMappedBuffer(u32 id_from_cmdbuf);
/// Populates this context with data from the requesting process/thread.
Result PopulateFromIncomingCommandBuffer(const u32_le* src_cmdbuf,
std::shared_ptr<Process> src_process);
Result PopulateFromIncomingCommandBuffer(const u32_le* src_cmdbuf, Process* src_process);
/// Writes data from this context back to the requesting process/thread.
Result WriteToOutgoingCommandBuffer(u32_le* dst_cmdbuf, Process& dst_process) const;
@ -370,10 +368,10 @@ public:
private:
KernelSystem& kernel;
std::array<u32, IPC::COMMAND_BUFFER_LENGTH> cmd_buf;
std::shared_ptr<ServerSession> session;
std::shared_ptr<Thread> thread;
KServerSession* session;
KThread* thread;
// TODO(yuriks): Check common usage of this and optimize size accordingly
boost::container::small_vector<std::shared_ptr<Object>, 8> request_handles;
boost::container::small_vector<KAutoObject*, 8> request_handles;
// The static buffers will be created when the IPC request is translated.
std::array<std::vector<u8>, IPC::MAX_STATIC_BUFFERS> static_buffers;
// The mapped buffers will be created when the IPC request is translated

View File

@ -9,13 +9,13 @@
#include "common/memory_ref.h"
#include "core/core.h"
#include "core/hle/ipc.h"
#include "core/hle/kernel/handle_table.h"
#include "core/hle/kernel/ipc.h"
#include "core/hle/kernel/ipc_debugger/recorder.h"
#include "core/hle/kernel/k_handle_table.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/memory.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/thread.h"
#include "core/memory.h"
SERIALIZE_EXPORT_IMPL(Kernel::MappedBufferContext)
@ -23,12 +23,11 @@ SERIALIZE_EXPORT_IMPL(Kernel::MappedBufferContext)
namespace Kernel {
Result TranslateCommandBuffer(Kernel::KernelSystem& kernel, Memory::MemorySystem& memory,
std::shared_ptr<Thread> src_thread,
std::shared_ptr<Thread> dst_thread, VAddr src_address,
KThread* src_thread, KThread* dst_thread, VAddr src_address,
VAddr dst_address,
std::vector<MappedBufferContext>& mapped_buffer_context, bool reply) {
auto src_process = src_thread->owner_process.lock();
auto dst_process = dst_thread->owner_process.lock();
auto src_process = src_thread->GetOwner();
auto dst_process = dst_thread->GetOwner();
ASSERT(src_process && dst_process);
IPC::Header header;
@ -69,30 +68,34 @@ Result TranslateCommandBuffer(Kernel::KernelSystem& kernel, Memory::MemorySystem
for (u32 j = 0; j < num_handles; ++j) {
Handle handle = cmd_buf[i];
std::shared_ptr<Object> object = nullptr;
// Perform pseudo-handle detection here because by the time this function is called,
// the current thread and process are no longer the ones which created this IPC
// request, but the ones that are handling it.
if (handle == CurrentThread) {
object = src_thread;
} else if (handle == CurrentProcess) {
object = src_process;
} else if (handle != 0) {
object = src_process->handle_table.GetGeneric(handle);
if (descriptor == IPC::DescriptorType::MoveHandle) {
src_process->handle_table.Close(handle);
KScopedAutoObject object = [&]() -> KScopedAutoObject<KAutoObject> {
if (handle == CurrentThread) {
return src_thread;
} else if (handle == CurrentProcess) {
return src_process;
} else if (handle != 0) {
auto obj = src_process->handle_table.GetObject(handle);
if (descriptor == IPC::DescriptorType::MoveHandle) {
src_process->handle_table.Remove(handle);
}
return obj;
}
}
return nullptr;
}();
if (object == nullptr) {
if (object.IsNull()) {
// Note: The real kernel sets invalid translated handles to 0 in the target
// command buffer.
cmd_buf[i++] = 0;
continue;
}
R_ASSERT(dst_process->handle_table.Create(std::addressof(cmd_buf[i++]),
std::move(object)));
Handle dst_handle = 0;
dst_process->handle_table.Add(&dst_handle, object.GetPointerUnsafe());
cmd_buf[i++] = dst_handle;
}
break;
}

View File

@ -4,12 +4,11 @@
#pragma once
#include <memory>
#include <vector>
#include <boost/serialization/export.hpp>
#include "common/common_types.h"
#include "common/memory_ref.h"
#include "core/hle/ipc.h"
#include "core/hle/kernel/thread.h"
namespace Memory {
class MemorySystem;
@ -18,6 +17,7 @@ class MemorySystem;
namespace Kernel {
class KernelSystem;
class KThread;
struct MappedBufferContext {
IPC::MappedBufferPermissions permissions;
@ -35,8 +35,7 @@ private:
/// Performs IPC command buffer translation from one process to another.
Result TranslateCommandBuffer(KernelSystem& system, Memory::MemorySystem& memory,
std::shared_ptr<Thread> src_thread,
std::shared_ptr<Thread> dst_thread, VAddr src_address,
KThread* src_thread, KThread* dst_thread, VAddr src_address,
VAddr dst_address,
std::vector<MappedBufferContext>& mapped_buffer_context, bool reply);
} // namespace Kernel

View File

@ -4,73 +4,80 @@
#include "common/assert.h"
#include "common/logging/log.h"
#include "core/hle/kernel/client_port.h"
#include "core/hle/kernel/client_session.h"
#include "common/scope_exit.h"
#include "core/hle/kernel/ipc_debugger/recorder.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/server_port.h"
#include "core/hle/kernel/server_session.h"
#include "core/hle/kernel/session.h"
#include "core/hle/kernel/thread.h"
#include "core/hle/kernel/k_client_port.h"
#include "core/hle/kernel/k_client_session.h"
#include "core/hle/kernel/k_port.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_server_port.h"
#include "core/hle/kernel/k_server_session.h"
#include "core/hle/kernel/k_session.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/service/service.h"
namespace IPCDebugger {
namespace {
ObjectInfo GetObjectInfo(const Kernel::Object* object) {
ObjectInfo GetObjectInfo(const Kernel::KAutoObject* object) {
if (object == nullptr) {
return {};
}
return {object->GetTypeName(), object->GetName(), static_cast<int>(object->GetObjectId())};
return {object->GetTypeName(), /*object->GetName()*/ "KAutoObject",
/*static_cast<int>(object->GetObjectId())*/ 1};
}
ObjectInfo GetObjectInfo(const Kernel::Thread* thread) {
ObjectInfo GetObjectInfo(const Kernel::KThread* thread) {
if (thread == nullptr) {
return {};
}
return {thread->GetTypeName(), thread->GetName(), static_cast<int>(thread->GetThreadId())};
return {thread->GetTypeName(), /*thread->GetName()*/ "KThread",
/*static_cast<int>(object->GetObjectId())*/ 1};
}
ObjectInfo GetObjectInfo(const Kernel::Process* process) {
if (process == nullptr) {
return {};
}
return {process->GetTypeName(), process->GetName(), static_cast<int>(process->process_id)};
return {process->GetTypeName(), /*process->GetName()*/ "KProcess",
static_cast<int>(process->process_id)};
}
} // namespace
} // Anonymous namespace
Recorder::Recorder() = default;
Recorder::~Recorder() = default;
bool Recorder::IsEnabled() const {
return enabled.load(std::memory_order_relaxed);
}
void Recorder::RegisterRequest(const std::shared_ptr<Kernel::ClientSession>& client_session,
const std::shared_ptr<Kernel::Thread>& client_thread) {
void Recorder::RegisterRequest(const Kernel::KClientSession* client_session,
const Kernel::KThread* client_thread) {
const u32 thread_id = client_thread->GetThreadId();
const RequestRecord record = {
.id = ++record_count,
.status = RequestStatus::Sent,
.client_process = GetObjectInfo(client_thread->GetOwner()),
.client_thread = GetObjectInfo(client_thread),
.client_session = GetObjectInfo(client_session),
.client_port = GetObjectInfo(client_session->GetParent()->GetParent()),
.server_process = {},
.server_thread = {},
.server_session = GetObjectInfo(&client_session->GetParent()->GetServerSession()),
};
if (auto owner_process = client_thread->owner_process.lock()) {
RequestRecord record = {/* id */ ++record_count,
/* status */ RequestStatus::Sent,
/* client_process */ GetObjectInfo(owner_process.get()),
/* client_thread */ GetObjectInfo(client_thread.get()),
/* client_session */ GetObjectInfo(client_session.get()),
/* client_port */ GetObjectInfo(client_session->parent->port.get()),
/* server_process */ {},
/* server_thread */ {},
/* server_session */ GetObjectInfo(client_session->parent->server)};
record_map.insert_or_assign(thread_id, std::make_unique<RequestRecord>(record));
client_session_map.insert_or_assign(thread_id, client_session);
InvokeCallbacks(record);
}
record_map.insert_or_assign(thread_id, std::make_unique<RequestRecord>(record));
client_session_map.insert_or_assign(thread_id, client_session);
InvokeCallbacks(record);
}
void Recorder::SetRequestInfo(const std::shared_ptr<Kernel::Thread>& client_thread,
void Recorder::SetRequestInfo(const Kernel::KThread* client_thread,
std::vector<u32> untranslated_cmdbuf,
std::vector<u32> translated_cmdbuf,
const std::shared_ptr<Kernel::Thread>& server_thread) {
const Kernel::KThread* server_thread) {
const u32 thread_id = client_thread->GetThreadId();
if (!record_map.count(thread_id)) {
// This is possible when the recorder is enabled after application started
@ -84,30 +91,34 @@ void Recorder::SetRequestInfo(const std::shared_ptr<Kernel::Thread>& client_thre
record.translated_request_cmdbuf = std::move(translated_cmdbuf);
if (server_thread) {
if (auto owner_process = server_thread->owner_process.lock()) {
record.server_process = GetObjectInfo(owner_process.get());
}
record.server_thread = GetObjectInfo(server_thread.get());
record.server_process = GetObjectInfo(server_thread->GetOwner());
record.server_thread = GetObjectInfo(server_thread);
} else {
record.is_hle = true;
}
// Function name
ASSERT_MSG(client_session_map.count(thread_id), "Client session is missing");
const auto& client_session = client_session_map[thread_id];
if (client_session->parent->port &&
client_session->parent->port->GetServerPort()->hle_handler) {
const auto client_session = client_session_map[thread_id];
record.function_name = std::dynamic_pointer_cast<Service::ServiceFrameworkBase>(
client_session->parent->port->GetServerPort()->hle_handler)
SCOPE_EXIT({
client_session_map.erase(thread_id);
InvokeCallbacks(record);
});
auto port = client_session->GetParent()->GetParent();
if (!port) {
return;
}
auto hle_handler = port->GetParent()->GetServerPort().GetHleHandler();
if (hle_handler) {
record.function_name = std::dynamic_pointer_cast<Service::ServiceFrameworkBase>(hle_handler)
->GetFunctionName({record.untranslated_request_cmdbuf[0]});
}
client_session_map.erase(thread_id);
InvokeCallbacks(record);
}
void Recorder::SetReplyInfo(const std::shared_ptr<Kernel::Thread>& client_thread,
void Recorder::SetReplyInfo(const Kernel::KThread* client_thread,
std::vector<u32> untranslated_cmdbuf,
std::vector<u32> translated_cmdbuf) {
const u32 thread_id = client_thread->GetThreadId();
@ -129,7 +140,7 @@ void Recorder::SetReplyInfo(const std::shared_ptr<Kernel::Thread>& client_thread
record_map.erase(thread_id);
}
void Recorder::SetHLEUnimplemented(const std::shared_ptr<Kernel::Thread>& client_thread) {
void Recorder::SetHLEUnimplemented(const Kernel::KThread* client_thread) {
const u32 thread_id = client_thread->GetThreadId();
if (!record_map.count(thread_id)) {
// This is possible when the recorder is enabled after application started

View File

@ -15,8 +15,9 @@
#include "common/common_types.h"
namespace Kernel {
class ClientSession;
class Thread;
class KClientSession;
class KThread;
enum class ClassTokenType : u32;
} // namespace Kernel
namespace IPCDebugger {
@ -27,7 +28,7 @@ namespace IPCDebugger {
struct ObjectInfo {
std::string type;
std::string name;
int id = -1;
int id;
};
/**
@ -80,28 +81,28 @@ public:
/**
* Registers a request into the recorder. The request is then assoicated with the client thread.
*/
void RegisterRequest(const std::shared_ptr<Kernel::ClientSession>& client_session,
const std::shared_ptr<Kernel::Thread>& client_thread);
void RegisterRequest(const Kernel::KClientSession* client_session,
const Kernel::KThread* client_thread);
/**
* Sets the request information of the request record associated with the client thread.
* When the server thread is empty, the request will be considered HLE.
*/
void SetRequestInfo(const std::shared_ptr<Kernel::Thread>& client_thread,
std::vector<u32> untranslated_cmdbuf, std::vector<u32> translated_cmdbuf,
const std::shared_ptr<Kernel::Thread>& server_thread = {});
void SetRequestInfo(const Kernel::KThread* client_thread, std::vector<u32> untranslated_cmdbuf,
std::vector<u32> translated_cmdbuf,
const Kernel::KThread* server_thread = nullptr);
/**
* Sets the reply information of the request record assoicated with the client thread.
* The request is then unlinked from the client thread.
*/
void SetReplyInfo(const std::shared_ptr<Kernel::Thread>& client_thread,
std::vector<u32> untranslated_cmdbuf, std::vector<u32> translated_cmdbuf);
void SetReplyInfo(const Kernel::KThread* client_thread, std::vector<u32> untranslated_cmdbuf,
std::vector<u32> translated_cmdbuf);
/**
* Set the status of a record to HLEUnimplemented.
*/
void SetHLEUnimplemented(const std::shared_ptr<Kernel::Thread>& client_thread);
void SetHLEUnimplemented(const Kernel::KThread* client_thread);
/**
* Set the status of the debugger (enabled/disabled).
@ -118,7 +119,7 @@ private:
int record_count{};
// Temporary client session map for function name handling
std::unordered_map<u32, std::shared_ptr<Kernel::ClientSession>> client_session_map;
std::unordered_map<u32, const Kernel::KClientSession*> client_session_map;
std::atomic_bool enabled{false};

View File

@ -0,0 +1,226 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <algorithm>
#include <boost/serialization/base_object.hpp>
#include <boost/serialization/string.hpp>
#include <boost/serialization/vector.hpp>
#include "common/archives.h"
#include "common/logging/log.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/k_address_arbiter.h"
#include "core/hle/kernel/k_auto_object_container.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
#include "core/memory.h"
namespace Kernel {
class KAddressArbiter::Callback : public WakeupCallback {
public:
explicit Callback(KAddressArbiter* _parent) : parent(_parent) {}
KAddressArbiter* parent;
void WakeUp(ThreadWakeupReason reason, KThread* thread,
KSynchronizationObject* object) override {
parent->WakeUp(reason, thread, object);
}
private:
template <class Archive>
void serialize(Archive& ar, const unsigned int) {
ar& boost::serialization::base_object<WakeupCallback>(*this);
}
friend class boost::serialization::access;
};
KAddressArbiter::KAddressArbiter(KernelSystem& kernel)
: KAutoObjectWithSlabHeapAndContainer{kernel},
m_timeout_callback(std::make_shared<Callback>(this)) {}
KAddressArbiter::~KAddressArbiter() = default;
void KAddressArbiter::Initialize(Process* owner) {
m_owner = owner;
m_owner->Open();
}
void KAddressArbiter::PostDestroy(uintptr_t arg) {
Process* owner = reinterpret_cast<Process*>(arg);
if (owner != nullptr) {
owner->ReleaseResource(ResourceLimitType::AddressArbiter, 1);
owner->Close();
}
}
void KAddressArbiter::WaitThread(KThread* thread, VAddr wait_address) {
thread->m_wait_address = wait_address;
thread->m_status = ThreadStatus::WaitArb;
m_waiting_threads.emplace_back(thread);
}
u64 KAddressArbiter::ResumeAllThreads(VAddr address) {
// Determine which threads are waiting on this address, those should be woken up.
auto itr = std::stable_partition(m_waiting_threads.begin(), m_waiting_threads.end(),
[address](KThread* thread) {
ASSERT_MSG(thread->GetStatus() == ThreadStatus::WaitArb,
"Inconsistent AddressArbiter state");
return thread->m_wait_address != address;
});
// Wake up all the found threads
const u64 num_threads = std::distance(itr, m_waiting_threads.end());
std::for_each(itr, m_waiting_threads.end(), [](KThread* thread) { thread->ResumeFromWait(); });
// Remove the woken up threads from the wait list.
m_waiting_threads.erase(itr, m_waiting_threads.end());
return num_threads;
}
bool KAddressArbiter::ResumeHighestPriorityThread(VAddr address) {
// Determine which threads are waiting on this address, those should be considered for wakeup.
auto matches_start = std::stable_partition(
m_waiting_threads.begin(), m_waiting_threads.end(), [address](KThread* thread) {
ASSERT_MSG(thread->GetStatus() == ThreadStatus::WaitArb,
"Inconsistent AddressArbiter state");
return thread->m_wait_address != address;
});
// Iterate through threads, find highest priority thread that is waiting to be arbitrated.
// Note: The real kernel will pick the first thread in the list if more than one have the
// same highest priority value. Lower priority values mean higher priority.
auto itr =
std::min_element(matches_start, m_waiting_threads.end(), [](KThread* lhs, KThread* rhs) {
return lhs->GetCurrentPriority() < rhs->GetCurrentPriority();
});
if (itr == m_waiting_threads.end()) {
return false;
}
auto thread = *itr;
thread->ResumeFromWait();
m_waiting_threads.erase(itr);
return true;
}
void KAddressArbiter::WakeUp(ThreadWakeupReason reason, KThread* thread,
KSynchronizationObject* object) {
ASSERT(reason == ThreadWakeupReason::Timeout);
// Remove the newly-awakened thread from the Arbiter's waiting list.
m_waiting_threads.erase(std::remove(m_waiting_threads.begin(), m_waiting_threads.end(), thread),
m_waiting_threads.end());
};
Result KAddressArbiter::ArbitrateAddress(KThread* thread, ArbitrationType type, VAddr address,
s32 value, u64 nanoseconds) {
switch (type) {
// Signal thread(s) waiting for arbitrate address...
case ArbitrationType::Signal: {
u64 num_threads{};
// Negative value means resume all threads
if (value < 0) {
num_threads = ResumeAllThreads(address);
} else {
// Resume first N threads
for (s32 i = 0; i < value; i++) {
num_threads += ResumeHighestPriorityThread(address);
}
}
// Prevents lag from low priority threads that spam svcArbitrateAddress and wake no threads
// The tick count is taken directly from official HOS kernel. The priority value is one less
// than official kernel as the affected FMV threads dont meet the priority threshold of 50.
// TODO: Revisit this when scheduler is rewritten and adjust if there isn't a problem there.
auto* core = m_kernel.current_cpu;
if (num_threads == 0 && core->GetID() == 0 && thread->GetCurrentPriority() >= 49) {
core->GetTimer().AddTicks(1614u);
}
break;
}
// Wait current thread (acquire the arbiter)...
case ArbitrationType::WaitIfLessThan:
if ((s32)m_kernel.memory.Read32(address) < value) {
WaitThread(thread, address);
}
break;
case ArbitrationType::WaitIfLessThanWithTimeout:
if ((s32)m_kernel.memory.Read32(address) < value) {
thread->SetWakeupCallback(m_timeout_callback);
thread->WakeAfterDelay(nanoseconds);
WaitThread(thread, address);
}
break;
case ArbitrationType::DecrementAndWaitIfLessThan: {
s32 memory_value = m_kernel.memory.Read32(address);
if (memory_value < value) {
// Only change the memory value if the thread should wait
m_kernel.memory.Write32(address, (s32)memory_value - 1);
WaitThread(thread, address);
}
break;
}
case ArbitrationType::DecrementAndWaitIfLessThanWithTimeout: {
s32 memory_value = m_kernel.memory.Read32(address);
if (memory_value < value) {
// Only change the memory value if the thread should wait
m_kernel.memory.Write32(address, (s32)memory_value - 1);
thread->SetWakeupCallback(m_timeout_callback);
thread->WakeAfterDelay(nanoseconds);
WaitThread(thread, address);
}
break;
}
default:
LOG_ERROR(Kernel, "unknown type={}", type);
return ResultInvalidEnumValueFnd;
}
// The calls that use a timeout seem to always return a Timeout error even if they did not put
// the thread to sleep
if (type == ArbitrationType::WaitIfLessThanWithTimeout ||
type == ArbitrationType::DecrementAndWaitIfLessThanWithTimeout) {
return ResultTimeout;
}
return ResultSuccess;
}
template <class Archive>
void KAddressArbiter::serialize(Archive& ar, const unsigned int file_version) {
ar& boost::serialization::base_object<KAutoObject>(*this);
ar& m_name;
ar& m_waiting_threads;
// ar& m_timeout_callback;
}
SERIALIZE_IMPL(KAddressArbiter)
} // namespace Kernel
namespace boost::serialization {
template <class Archive>
void save_construct_data(Archive& ar, const Kernel::KAddressArbiter::Callback* t,
const unsigned int) {
ar << t->parent;
}
template <class Archive>
void load_construct_data(Archive& ar, Kernel::KAddressArbiter::Callback* t, const unsigned int) {
Kernel::KAddressArbiter* parent;
ar >> parent;
::new (t) Kernel::KAddressArbiter::Callback(parent);
}
} // namespace boost::serialization
SERIALIZE_EXPORT_IMPL(Kernel::KAddressArbiter)
SERIALIZE_EXPORT_IMPL(Kernel::KAddressArbiter::Callback)

View File

@ -0,0 +1,75 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <memory>
#include <vector>
#include <boost/serialization/export.hpp>
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/result.h"
namespace Kernel {
class KThread;
enum class ArbitrationType : u32 {
Signal,
WaitIfLessThan,
DecrementAndWaitIfLessThan,
WaitIfLessThanWithTimeout,
DecrementAndWaitIfLessThanWithTimeout,
};
class KAddressArbiter final : public KAutoObjectWithSlabHeapAndContainer<KAddressArbiter>,
public WakeupCallback {
KERNEL_AUTOOBJECT_TRAITS(KAddressArbiter, KAutoObject);
public:
explicit KAddressArbiter(KernelSystem& kernel);
~KAddressArbiter() override;
void Initialize(Process* owner);
uintptr_t GetPostDestroyArgument() const override {
return reinterpret_cast<uintptr_t>(m_owner);
}
static void PostDestroy(uintptr_t arg);
Process* GetOwner() const override {
return m_owner;
}
Result ArbitrateAddress(KThread* thread, ArbitrationType type, VAddr address, s32 value,
u64 nanoseconds);
private:
void WaitThread(KThread* thread, VAddr wait_address);
u64 ResumeAllThreads(VAddr address);
bool ResumeHighestPriorityThread(VAddr address);
void WakeUp(ThreadWakeupReason reason, KThread* thread,
KSynchronizationObject* object) override;
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const u32 file_version);
public:
Process* m_owner{};
std::string m_name{};
std::vector<KThread*> m_waiting_threads;
class Callback;
std::shared_ptr<Callback> m_timeout_callback;
};
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::KAddressArbiter)
BOOST_CLASS_EXPORT_KEY(Kernel::KAddressArbiter::Callback)
CONSTRUCT_KERNEL_OBJECT(Kernel::KAddressArbiter)

View File

@ -0,0 +1,32 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "common/archives.h"
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/kernel.h"
namespace Kernel {
KAutoObject* KAutoObject::Create(KAutoObject* obj) {
obj->m_ref_count = 1;
return obj;
}
void KAutoObject::RegisterWithKernel() {
m_kernel.RegisterKernelObject(this);
}
void KAutoObject::UnregisterWithKernel(KernelSystem& kernel, KAutoObject* self) {
kernel.UnregisterKernelObject(self);
}
template <class Archive>
void KAutoObject::serialize(Archive& ar, const unsigned int) {
ar& m_name;
// ar& m_ref_count;
}
SERIALIZE_IMPL(KAutoObject)
} // namespace Kernel

View File

@ -0,0 +1,305 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <atomic>
#include <boost/serialization/access.hpp>
#include "common/assert.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "core/global.h"
namespace Kernel {
class KernelSystem;
class Process;
using Handle = u32;
constexpr u32 DefaultStackSize = 0x4000;
enum class ClassTokenType : u32 {
KAutoObject = 0,
KSynchronizationObject = 1,
KSemaphore = 27,
KEvent = 31,
KTimer = 53,
KMutex = 57,
Debug = 77,
KServerPort = 85,
DmaObject = 89,
KClientPort = 101,
CodeSet = 104,
KSession = 112,
KThread = 141,
KServerSession = 149,
KAddressArbiter = 152,
KClientSession = 165,
KPort = 168,
KSharedMemory = 176,
Process = 197,
KResourceLimit = 200,
};
DECLARE_ENUM_FLAG_OPERATORS(ClassTokenType)
#define KERNEL_AUTOOBJECT_TRAITS_IMPL(CLASS, BASE_CLASS, ATTRIBUTE) \
private: \
static constexpr inline const char* const TypeName = #CLASS; \
static constexpr inline auto ClassToken = ClassTokenType::CLASS; \
\
public: \
CITRA_NON_COPYABLE(CLASS); \
CITRA_NON_MOVEABLE(CLASS); \
\
using BaseClass = BASE_CLASS; \
static constexpr TypeObj GetStaticTypeObj() { return TypeObj(TypeName, ClassToken); } \
static constexpr const char* GetStaticTypeName() { return TypeName; } \
virtual TypeObj GetTypeObj() ATTRIBUTE { return GetStaticTypeObj(); } \
virtual const char* GetTypeName() ATTRIBUTE { return GetStaticTypeName(); } \
\
private: \
constexpr bool operator!=(const TypeObj& rhs)
#define KERNEL_AUTOOBJECT_TRAITS(CLASS, BASE_CLASS) \
KERNEL_AUTOOBJECT_TRAITS_IMPL(CLASS, BASE_CLASS, const override)
class KAutoObject {
protected:
class TypeObj {
public:
constexpr explicit TypeObj(const char* n, ClassTokenType tok)
: m_name(n), m_class_token(tok) {}
constexpr const char* GetName() const {
return m_name;
}
constexpr ClassTokenType GetClassToken() const {
return m_class_token;
}
constexpr bool operator==(const TypeObj& rhs) const {
return this->GetClassToken() == rhs.GetClassToken();
}
constexpr bool operator!=(const TypeObj& rhs) const {
return this->GetClassToken() != rhs.GetClassToken();
}
constexpr bool IsDerivedFrom(const TypeObj& rhs) const {
return (this->GetClassToken() | rhs.GetClassToken()) == this->GetClassToken();
}
private:
const char* m_name;
ClassTokenType m_class_token;
};
private:
KERNEL_AUTOOBJECT_TRAITS_IMPL(KAutoObject, KAutoObject, const);
public:
explicit KAutoObject(KernelSystem& kernel) : m_kernel(kernel) {
RegisterWithKernel();
}
virtual ~KAutoObject() = default;
static KAutoObject* Create(KAutoObject* ptr);
// Destroy is responsible for destroying the auto object's resources when ref_count hits zero.
virtual void Destroy() {
UNIMPLEMENTED();
}
// Finalize is responsible for cleaning up resource, but does not destroy the object.
virtual void Finalize() {}
virtual Process* GetOwner() const {
return nullptr;
}
u32 GetReferenceCount() const {
return m_ref_count.load();
}
bool IsDerivedFrom(const TypeObj& rhs) const {
return this->GetTypeObj().IsDerivedFrom(rhs);
}
bool IsDerivedFrom(const KAutoObject& rhs) const {
return this->IsDerivedFrom(rhs.GetTypeObj());
}
template <typename Derived>
Derived DynamicCast() {
static_assert(std::is_pointer_v<Derived>);
using DerivedType = std::remove_pointer_t<Derived>;
if (this->IsDerivedFrom(DerivedType::GetStaticTypeObj())) {
return static_cast<Derived>(this);
} else {
return nullptr;
}
}
template <typename Derived>
const Derived DynamicCast() const {
static_assert(std::is_pointer_v<Derived>);
using DerivedType = std::remove_pointer_t<Derived>;
if (this->IsDerivedFrom(DerivedType::GetStaticTypeObj())) {
return static_cast<Derived>(this);
} else {
return nullptr;
}
}
bool Open() {
// Atomically increment the reference count, only if it's positive.
u32 cur_ref_count = m_ref_count.load(std::memory_order_acquire);
do {
if (cur_ref_count == 0) {
return false;
}
ASSERT(cur_ref_count < cur_ref_count + 1);
} while (!m_ref_count.compare_exchange_weak(cur_ref_count, cur_ref_count + 1,
std::memory_order_relaxed));
return true;
}
void Close() {
// Atomically decrement the reference count, not allowing it to become negative.
u32 cur_ref_count = m_ref_count.load(std::memory_order_acquire);
do {
ASSERT(cur_ref_count > 0);
} while (!m_ref_count.compare_exchange_weak(cur_ref_count, cur_ref_count - 1,
std::memory_order_acq_rel));
// If ref count hits zero, destroy the object.
if (cur_ref_count - 1 == 0) {
KernelSystem& kernel = m_kernel;
this->Destroy();
KAutoObject::UnregisterWithKernel(kernel, this);
}
}
private:
void RegisterWithKernel();
static void UnregisterWithKernel(KernelSystem& kernel, KAutoObject* self);
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const unsigned int);
protected:
KernelSystem& m_kernel;
std::string m_name{};
private:
std::atomic<u32> m_ref_count{};
};
template <typename T>
class KScopedAutoObject {
public:
CITRA_NON_COPYABLE(KScopedAutoObject);
constexpr KScopedAutoObject() = default;
constexpr KScopedAutoObject(T* o) : m_obj(o) {
if (m_obj != nullptr) {
m_obj->Open();
}
}
~KScopedAutoObject() {
if (m_obj != nullptr) {
m_obj->Close();
}
m_obj = nullptr;
}
template <typename U>
requires(std::derived_from<T, U> || std::derived_from<U, T>)
constexpr KScopedAutoObject(KScopedAutoObject<U>&& rhs) {
if constexpr (std::derived_from<U, T>) {
// Upcast.
m_obj = rhs.m_obj;
rhs.m_obj = nullptr;
} else {
// Downcast.
T* derived = nullptr;
if (rhs.m_obj != nullptr) {
derived = rhs.m_obj->template DynamicCast<T*>();
if (derived == nullptr) {
rhs.m_obj->Close();
}
}
m_obj = derived;
rhs.m_obj = nullptr;
}
}
constexpr KScopedAutoObject<T>& operator=(KScopedAutoObject<T>&& rhs) {
rhs.Swap(*this);
return *this;
}
constexpr T* operator->() {
return m_obj;
}
constexpr T& operator*() {
return *m_obj;
}
constexpr void Reset(T* o) {
KScopedAutoObject(o).Swap(*this);
}
constexpr T* GetPointerUnsafe() {
return m_obj;
}
constexpr T* GetPointerUnsafe() const {
return m_obj;
}
constexpr T* ReleasePointerUnsafe() {
T* ret = m_obj;
m_obj = nullptr;
return ret;
}
constexpr bool IsNull() const {
return m_obj == nullptr;
}
constexpr bool IsNotNull() const {
return m_obj != nullptr;
}
private:
template <typename U>
friend class KScopedAutoObject;
private:
T* m_obj{};
private:
constexpr void Swap(KScopedAutoObject& rhs) noexcept {
std::swap(m_obj, rhs.m_obj);
}
};
} // namespace Kernel
#define CONSTRUCT_KERNEL_OBJECT(T) \
namespace boost::serialization { \
template <class Archive> \
void load_construct_data(Archive& ar, T* t, const unsigned int file_version) { \
::new (t) T(Core::Global<Kernel::KernelSystem>()); \
} \
}

View File

@ -0,0 +1,31 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <algorithm>
#include "core/hle/kernel/k_auto_object_container.h"
namespace Kernel {
void KAutoObjectWithListContainer::Register(KAutoObject* obj) {
// KScopedLightMutex lk{m_mutex};
m_object_list.push_back(*obj);
}
void KAutoObjectWithListContainer::Unregister(KAutoObject* obj) {
// KScopedLightMutex lk{m_mutex};
for (auto it = m_object_list.begin(); it != m_object_list.end(); it++) {
if (std::addressof(*it) == obj) {
m_object_list.erase(it);
return;
}
}
}
size_t KAutoObjectWithListContainer::GetOwnedCount(Process* owner) {
// KScopedLightMutex lk{m_mutex};
return std::count_if(m_object_list.begin(), m_object_list.end(),
[&](const auto& obj) { return obj.GetOwner() == owner; });
}
} // namespace Kernel

View File

@ -0,0 +1,37 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include "common/common_funcs.h"
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/k_linked_list.h"
namespace Kernel {
class KernelSystem;
class Process;
class KAutoObjectWithListContainer {
public:
CITRA_NON_COPYABLE(KAutoObjectWithListContainer);
CITRA_NON_MOVEABLE(KAutoObjectWithListContainer);
using ListType = KLinkedList<KAutoObject>;
KAutoObjectWithListContainer(KernelSystem& kernel) : m_object_list(kernel) {}
void Initialize() {}
void Finalize() {}
void Register(KAutoObject* obj);
void Unregister(KAutoObject* obj);
size_t GetOwnedCount(Process* owner);
private:
// KLightMutex m_mutex;
ListType m_object_list;
};
} // namespace Kernel

View File

@ -0,0 +1,79 @@
// Copyright 2016 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <boost/serialization/export.hpp>
#include <boost/serialization/shared_ptr.hpp>
#include <boost/serialization/string.hpp>
#include "common/archives.h"
#include "common/assert.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/hle_ipc.h"
#include "core/hle/kernel/k_client_port.h"
#include "core/hle/kernel/k_client_session.h"
#include "core/hle/kernel/k_port.h"
#include "core/hle/kernel/k_server_port.h"
#include "core/hle/kernel/k_session.h"
SERIALIZE_EXPORT_IMPL(Kernel::KClientPort)
namespace Kernel {
KClientPort::KClientPort(KernelSystem& kernel) : KAutoObject(kernel) {}
KClientPort::~KClientPort() = default;
void KClientPort::Initialize(KPort* parent, s32 max_sessions, std::string name) {
// Set member variables.
m_parent = parent;
m_max_sessions = max_sessions;
m_name = name + "_Client";
}
Result KClientPort::CreateSession(KClientSession** out) {
R_UNLESS(m_active_sessions < m_max_sessions, ResultMaxConnectionsReached);
m_active_sessions++;
// Allocate a new session.
KSession* session = KSession::Create(m_kernel);
// Initialize the session.
session->Initialize(this);
// Register the session.
KSession::Register(m_kernel, session);
// Let the created sessions inherit the parent port's HLE handler.
auto* server = &m_parent->GetServerPort();
auto hle_handler = server->GetHleHandler();
if (hle_handler) {
hle_handler->ClientConnected(&session->GetServerSession());
} else {
server->EnqueueSession(&session->GetServerSession());
}
// Wake the threads waiting on the ServerPort
m_parent->GetServerPort().WakeupAllWaitingThreads();
// We succeeded, so set the output.
*out = std::addressof(session->GetClientSession());
return ResultSuccess;
}
void KClientPort::ConnectionClosed() {
ASSERT(m_active_sessions > 0);
--m_active_sessions;
}
template <class Archive>
void KClientPort::serialize(Archive& ar, const u32 file_version) {
ar& boost::serialization::base_object<KAutoObject>(*this);
// ar& m_parent;
ar& m_max_sessions;
ar& m_active_sessions;
ar& m_name;
}
SERIALIZE_IMPL(KClientPort)
} // namespace Kernel

View File

@ -0,0 +1,52 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <string>
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/k_server_port.h"
#include "core/hle/result.h"
namespace Kernel {
class KClientSession;
class KClientPort final : public KAutoObject {
KERNEL_AUTOOBJECT_TRAITS(KClientPort, KAutoObject);
public:
explicit KClientPort(KernelSystem& kernel);
~KClientPort() override;
void Initialize(KPort* parent, s32 max_sessions, std::string name);
const KPort* GetParent() const {
return m_parent;
}
KPort* GetParent() {
return m_parent;
}
Result CreateSession(KClientSession** out);
void ConnectionClosed();
private:
KPort* m_parent{};
u32 m_max_sessions{};
u32 m_active_sessions{};
std::string m_name;
friend class KernelSystem;
private:
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const u32 file_version);
};
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::KClientPort)
CONSTRUCT_KERNEL_OBJECT(Kernel::KClientPort)

View File

@ -0,0 +1,42 @@
// Copyright 2016 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <boost/serialization/base_object.hpp>
#include <boost/serialization/string.hpp>
#include "common/archives.h"
#include "core/hle/kernel/k_auto_object_container.h"
#include "core/hle/kernel/k_client_session.h"
#include "core/hle/kernel/k_server_session.h"
#include "core/hle/kernel/k_session.h"
#include "core/hle/kernel/k_thread.h"
SERIALIZE_EXPORT_IMPL(Kernel::KClientSession)
namespace Kernel {
KClientSession::KClientSession(KernelSystem& kernel) : KAutoObject(kernel) {}
KClientSession::~KClientSession() = default;
void KClientSession::Destroy() {
m_parent->OnClientClosed();
m_parent->Close();
}
void KClientSession::OnServerClosed() {}
Result KClientSession::SendSyncRequest(KThread* thread) {
// Signal the server session that new data is available
return m_parent->GetServerSession().HandleSyncRequest(thread);
}
template <class Archive>
void KClientSession::serialize(Archive& ar, const u32 file_version) {
ar& boost::serialization::base_object<KAutoObject>(*this);
// ar& m_parent;
}
SERIALIZE_IMPL(KClientSession)
} // namespace Kernel

View File

@ -0,0 +1,50 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <boost/serialization/export.hpp>
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/result.h"
namespace Kernel {
class KSession;
class KThread;
class KClientSession final : public KAutoObject {
KERNEL_AUTOOBJECT_TRAITS(KClientSession, KAutoObject);
public:
explicit KClientSession(KernelSystem& kernel);
~KClientSession() override;
void Initialize(KSession* parent) {
// Set member variables.
m_parent = parent;
}
void Destroy() override;
KSession* GetParent() const {
return m_parent;
}
Result SendSyncRequest(KThread* thread);
void OnServerClosed();
private:
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const u32 file_version);
private:
KSession* m_parent{};
};
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::KClientSession)
CONSTRUCT_KERNEL_OBJECT(Kernel::KClientSession)

View File

@ -0,0 +1,76 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <vector>
#include <boost/serialization/vector.hpp>
namespace Kernel {
class CodeSet {
public:
CodeSet() = default;
~CodeSet() = default;
struct Segment {
std::size_t offset = 0;
VAddr addr = 0;
u32 size = 0;
private:
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const u32 file_version) {
ar& offset;
ar& addr;
ar& size;
}
};
Segment& CodeSegment() {
return segments[0];
}
const Segment& CodeSegment() const {
return segments[0];
}
Segment& RODataSegment() {
return segments[1];
}
const Segment& RODataSegment() const {
return segments[1];
}
Segment& DataSegment() {
return segments[2];
}
const Segment& DataSegment() const {
return segments[2];
}
std::vector<u8> memory;
std::array<Segment, 3> segments;
VAddr entrypoint;
u64 program_id;
std::string name;
private:
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const u32 file_version) {
ar& memory;
ar& segments;
ar& entrypoint;
ar& program_id;
ar& name;
}
};
} // namespace Kernel

View File

@ -0,0 +1,78 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <boost/serialization/base_object.hpp>
#include "common/archives.h"
#include "common/assert.h"
#include "core/hle/kernel/k_event.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
SERIALIZE_EXPORT_IMPL(Kernel::KEvent)
namespace Kernel {
KEvent::KEvent(KernelSystem& kernel) : KAutoObjectWithSlabHeapAndContainer(kernel) {}
KEvent::~KEvent() = default;
void KEvent::Initialize(Process* owner, ResetType reset_type) {
// Open a reference to the owner process.
if (owner) {
owner->Open();
m_owner = owner;
}
// Set member variables.
m_reset_type = reset_type;
}
void KEvent::PostDestroy(uintptr_t arg) {
Process* owner = reinterpret_cast<Process*>(arg);
if (owner != nullptr) {
owner->ReleaseResource(ResourceLimitType::Event, 1);
owner->Close();
}
}
bool KEvent::ShouldWait(const KThread* thread) const {
return !m_signaled;
}
void KEvent::Acquire(KThread* thread) {
ASSERT_MSG(!ShouldWait(thread), "object unavailable!");
if (m_reset_type == ResetType::OneShot) {
m_signaled = false;
}
}
void KEvent::Signal() {
m_signaled = true;
this->WakeupAllWaitingThreads();
}
void KEvent::Clear() {
m_signaled = false;
}
void KEvent::WakeupAllWaitingThreads() {
KSynchronizationObject::WakeupAllWaitingThreads();
if (m_reset_type == ResetType::Pulse) {
m_signaled = false;
}
}
template <class Archive>
void KEvent::serialize(Archive& ar, const u32 file_version) {
ar& boost::serialization::base_object<KSynchronizationObject>(*this);
ar& m_owner;
ar& m_reset_type;
ar& m_signaled;
}
SERIALIZE_IMPL(KEvent)
} // namespace Kernel

View File

@ -0,0 +1,74 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <boost/serialization/export.hpp>
#include "core/global.h"
#include "core/hle/kernel/k_auto_object_container.h"
#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/slab_helpers.h"
namespace Kernel {
enum class ResetType : u32 {
OneShot,
Sticky,
Pulse,
};
class KEvent final : public KAutoObjectWithSlabHeapAndContainer<KEvent, KSynchronizationObject> {
KERNEL_AUTOOBJECT_TRAITS(KEvent, KSynchronizationObject);
public:
explicit KEvent(KernelSystem& kernel);
~KEvent() override;
std::string GetName() const {
return m_name;
}
void SetName(const std::string& name) {
m_name = name;
}
void Initialize(Process* owner, ResetType reset_type);
uintptr_t GetPostDestroyArgument() const override {
return reinterpret_cast<uintptr_t>(m_owner);
}
static void PostDestroy(uintptr_t arg);
Process* GetOwner() const override {
return m_owner;
}
ResetType GetResetType() const {
return m_reset_type;
}
bool ShouldWait(const KThread* thread) const override;
void Acquire(KThread* thread) override;
void WakeupAllWaitingThreads() override;
void Signal();
void Clear();
private:
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const u32 file_version);
private:
Process* m_owner{};
ResetType m_reset_type{};
bool m_signaled{};
std::string m_name;
};
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::KEvent)
CONSTRUCT_KERNEL_OBJECT(Kernel::KEvent)

View File

@ -0,0 +1,106 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <boost/serialization/array.hpp>
#include <boost/serialization/shared_ptr.hpp>
#include "common/archives.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/k_handle_table.h"
#include "core/hle/kernel/k_process.h"
namespace Kernel {
Result KHandleTable::Finalize() {
// Close and free all entries.
for (size_t i = 0; i < m_table_size; i++) {
if (KAutoObject* obj = m_objects[i]; obj != nullptr) {
obj->Close();
}
}
return ResultSuccess;
}
bool KHandleTable::Remove(Handle handle) {
// Don't allow removal of a pseudo-handle.
if (handle == KernelHandle::CurrentProcess || handle == KernelHandle::CurrentThread)
[[unlikely]] {
return false;
}
// Handles must not have reserved bits set.
const auto handle_pack = HandlePack(handle);
if (handle_pack.reserved != 0) [[unlikely]] {
return false;
}
// Find the object and free the entry.
KAutoObject* obj = nullptr;
{
// KScopedLightMutex lk{m_mutex};
if (this->IsValidHandle(handle)) [[likely]] {
const auto index = handle_pack.index;
obj = m_objects[index];
this->FreeEntry(index);
} else {
return false;
}
}
// Close the object.
obj->Close();
return true;
}
Result KHandleTable::Add(Handle* out_handle, KAutoObject* obj) {
// KScopedLightMutex lk{m_mutex};
// Never exceed our capacity.
R_UNLESS(m_count < m_table_size, ResultOutOfHandles);
// Allocate entry, set output handle.
const auto linear_id = this->AllocateLinearId();
const auto index = this->AllocateEntry();
m_entry_infos[index].linear_id = linear_id;
m_objects[index] = obj;
obj->Open();
*out_handle = EncodeHandle(static_cast<u16>(index), linear_id);
return ResultSuccess;
}
KScopedAutoObject<KAutoObject> KHandleTable::GetObjectForIpc(Handle handle,
KThread* cur_thread) const {
// Handle pseudo-handles.
ASSERT(cur_thread != nullptr);
if (handle == KernelHandle::CurrentProcess) {
auto* cur_process = cur_thread->GetOwner();
ASSERT(cur_process != nullptr);
return cur_process;
}
if (handle == KernelHandle::CurrentThread) {
return cur_thread;
}
return this->GetObjectForIpcWithoutPseudoHandle(handle);
}
template <class Archive>
void KHandleTable::serialize(Archive& ar, const u32 file_version) {
// ar& m_entry_infos;
// ar& m_objects;
ar& m_free_head_index;
ar& m_table_size;
ar& m_next_id;
ar& m_max_count;
ar& m_next_linear_id;
ar& m_count;
}
SERIALIZE_IMPL(KHandleTable)
} // namespace Kernel

View File

@ -0,0 +1,286 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <vector>
#include "common/common_types.h"
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/result.h"
namespace Kernel {
enum KernelHandle : Handle {
CurrentThread = 0xFFFF8000,
CurrentProcess = 0xFFFF8001,
};
class KHandleTable {
CITRA_NON_COPYABLE(KHandleTable);
CITRA_NON_MOVEABLE(KHandleTable);
public:
static constexpr size_t MaxTableSize = 1024;
public:
explicit KHandleTable(KernelSystem& kernel) : m_kernel(kernel) {}
Result Initialize(s32 size) {
// KScopedLightMutex lk{m_mutex};
// Initialize all fields.
m_max_count = 0;
m_table_size = static_cast<s16>((size <= 0) ? MaxTableSize : size);
m_next_linear_id = MinLinearId;
m_count = 0;
m_free_head_index = -1;
// Create the arrays
m_objects.resize(m_table_size);
m_entry_infos.resize(m_table_size);
// Free all entries.
for (s32 i = 0; i < static_cast<s32>(m_table_size); ++i) {
m_objects[i] = nullptr;
m_entry_infos[i].next_free_index = static_cast<s16>(i - 1);
m_free_head_index = i;
}
return ResultSuccess;
}
size_t GetTableSize() const {
return m_table_size;
}
size_t GetCount() const {
return m_count;
}
size_t GetMaxCount() const {
return m_max_count;
}
Result Finalize();
bool Remove(Handle handle);
Result Add(Handle* out_handle, KAutoObject* obj);
template <typename T = KAutoObject>
KScopedAutoObject<T> GetObjectWithoutPseudoHandle(Handle handle) const {
// KScopedLightMutex lk{m_mutex};
if constexpr (std::is_same_v<T, KAutoObject>) {
return this->GetObjectImpl(handle);
} else {
if (auto* obj = this->GetObjectImpl(handle); obj != nullptr) [[likely]] {
return obj->DynamicCast<T*>();
} else {
return nullptr;
}
}
}
template <typename T = KAutoObject>
KScopedAutoObject<T> GetObject(Handle handle) const {
// Handle pseudo-handles.
if constexpr (std::derived_from<Process, T>) {
if (handle == KernelHandle::CurrentProcess) {
auto* const cur_process = m_kernel.GetCurrentProcess();
ASSERT(cur_process != nullptr);
return cur_process;
}
} else if constexpr (std::derived_from<KThread, T>) {
if (handle == KernelHandle::CurrentThread) {
auto* const cur_thread = m_kernel.GetCurrentThreadManager().GetCurrentThread();
ASSERT(cur_thread != nullptr);
return cur_thread;
}
}
return this->template GetObjectWithoutPseudoHandle<T>(handle);
}
KScopedAutoObject<KAutoObject> GetObjectForIpcWithoutPseudoHandle(Handle handle) const {
return this->GetObjectImpl(handle);
}
KScopedAutoObject<KAutoObject> GetObjectForIpc(Handle handle, KThread* cur_thread) const;
template <typename T>
bool GetMultipleObjects(T** out, const Handle* handles, size_t num_handles) const {
// Try to convert and open all the handles.
size_t num_opened;
{
// KScopedLightMutex lk{m_mutex};
for (num_opened = 0; num_opened < num_handles; num_opened++) {
// Get the current handle.
const auto cur_handle = handles[num_opened];
// Get the object for the current handle.
KAutoObject* cur_object = this->GetObjectImpl(cur_handle);
if (cur_object == nullptr) [[unlikely]] {
break;
}
// Cast the current object to the desired type.
T* cur_t = cur_object->DynamicCast<T*>();
if (cur_t == nullptr) [[unlikely]] {
break;
}
// Open a reference to the current object.
cur_t->Open();
out[num_opened] = cur_t;
}
}
// If we converted every object, succeed.
if (num_opened == num_handles) [[likely]] {
return true;
}
// If we didn't convert entry object, close the ones we opened.
for (size_t i = 0; i < num_opened; i++) {
out[i]->Close();
}
return false;
}
private:
s32 AllocateEntry() {
ASSERT(m_count < m_table_size);
const auto index = m_free_head_index;
m_free_head_index = m_entry_infos[index].GetNextFreeIndex();
m_max_count = std::max(m_max_count, ++m_count);
return index;
}
void FreeEntry(s32 index) {
ASSERT(m_count > 0);
m_objects[index] = nullptr;
m_entry_infos[index].next_free_index = static_cast<s16>(m_free_head_index);
m_free_head_index = index;
--m_count;
}
u16 AllocateLinearId() {
const u16 id = m_next_linear_id++;
if (m_next_linear_id > MaxLinearId) {
m_next_linear_id = MinLinearId;
}
return id;
}
bool IsValidHandle(Handle handle) const {
// Unpack the handle.
const auto handle_pack = HandlePack(handle);
const auto raw_value = handle_pack.raw;
const auto index = handle_pack.index;
const auto linear_id = handle_pack.linear_id;
const auto reserved = handle_pack.reserved;
ASSERT(reserved == 0);
// Validate our indexing information.
if (raw_value == 0) [[unlikely]] {
return false;
}
if (linear_id == 0) [[unlikely]] {
return false;
}
if (index >= m_table_size) [[unlikely]] {
return false;
}
// Check that there's an object, and our serial id is correct.
if (m_objects[index] == nullptr) [[unlikely]] {
return false;
}
if (m_entry_infos[index].GetLinearId() != linear_id) [[unlikely]] {
return false;
}
return true;
}
KAutoObject* GetObjectImpl(Handle handle) const {
// Handles must not have reserved bits set.
const auto handle_pack = HandlePack(handle);
if (handle_pack.reserved != 0) [[unlikely]] {
return nullptr;
}
if (this->IsValidHandle(handle)) [[likely]] {
return m_objects[handle_pack.index];
} else {
return nullptr;
}
}
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const u32 file_version);
private:
union HandlePack {
constexpr HandlePack() = default;
constexpr HandlePack(Handle handle) : raw{static_cast<u32>(handle)} {}
u32 raw{};
BitField<0, 15, u32> index;
BitField<15, 15, u32> linear_id;
BitField<30, 2, u32> reserved;
};
static constexpr Handle EncodeHandle(u16 index, u16 linear_id) {
HandlePack handle{};
handle.index.Assign(index);
handle.linear_id.Assign(linear_id);
handle.reserved.Assign(0);
return handle.raw;
}
private:
static constexpr u16 MinLinearId = 1;
static constexpr u16 MaxLinearId = 0x7FFF;
union EntryInfo {
u16 linear_id;
s16 next_free_index;
constexpr u16 GetLinearId() const {
return linear_id;
}
constexpr s32 GetNextFreeIndex() const {
return next_free_index;
}
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const u32 file_version) {
ar& linear_id;
ar& next_free_index;
}
};
private:
KernelSystem& m_kernel;
std::vector<EntryInfo> m_entry_infos{};
std::vector<KAutoObject*> m_objects{};
s32 m_free_head_index{};
u16 m_table_size{};
u16 m_next_id{};
u16 m_max_count{};
u16 m_next_linear_id{};
u16 m_count{};
// KLightMutex mutex;
};
} // namespace Kernel

View File

@ -0,0 +1,237 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include "common/intrusive_list.h"
#include "core/hle/kernel/k_slab_heap.h"
namespace Kernel {
class KernelSystem;
class KLinkedListNode : public Common::IntrusiveListBaseNode<KLinkedListNode>,
public KSlabAllocated<KLinkedListNode> {
public:
explicit KLinkedListNode(KernelSystem&) {}
KLinkedListNode() = default;
void Initialize(void* it) {
m_item = it;
}
void* GetItem() const {
return m_item;
}
private:
void* m_item = nullptr;
};
template <typename T>
class KLinkedList : private Common::IntrusiveListBaseTraits<KLinkedListNode>::ListType {
private:
using BaseList = Common::IntrusiveListBaseTraits<KLinkedListNode>::ListType;
public:
template <bool Const>
class Iterator;
using value_type = T;
using size_type = size_t;
using difference_type = ptrdiff_t;
using pointer = value_type*;
using const_pointer = const value_type*;
using reference = value_type&;
using const_reference = const value_type&;
using iterator = Iterator<false>;
using const_iterator = Iterator<true>;
using reverse_iterator = std::reverse_iterator<iterator>;
using const_reverse_iterator = std::reverse_iterator<const_iterator>;
template <bool Const>
class Iterator {
private:
using BaseIterator = BaseList::iterator;
friend class KLinkedList;
public:
using iterator_category = std::bidirectional_iterator_tag;
using value_type = typename KLinkedList::value_type;
using difference_type = typename KLinkedList::difference_type;
using pointer = std::conditional_t<Const, KLinkedList::const_pointer, KLinkedList::pointer>;
using reference =
std::conditional_t<Const, KLinkedList::const_reference, KLinkedList::reference>;
public:
explicit Iterator(BaseIterator it) : m_base_it(it) {}
pointer GetItem() const {
return static_cast<pointer>(m_base_it->GetItem());
}
bool operator==(const Iterator& rhs) const {
return m_base_it == rhs.m_base_it;
}
bool operator!=(const Iterator& rhs) const {
return !(*this == rhs);
}
pointer operator->() const {
return this->GetItem();
}
reference operator*() const {
return *this->GetItem();
}
Iterator& operator++() {
++m_base_it;
return *this;
}
Iterator& operator--() {
--m_base_it;
return *this;
}
Iterator operator++(int) {
const Iterator it{*this};
++(*this);
return it;
}
Iterator operator--(int) {
const Iterator it{*this};
--(*this);
return it;
}
operator Iterator<true>() const {
return Iterator<true>(m_base_it);
}
private:
BaseIterator m_base_it;
};
public:
constexpr KLinkedList(KernelSystem& kernel_) : BaseList(), kernel{kernel_} {}
~KLinkedList() {
// Erase all elements.
for (auto it = begin(); it != end(); it = erase(it)) {
}
// Ensure we succeeded.
ASSERT(this->empty());
}
// Iterator accessors.
iterator begin() {
return iterator(BaseList::begin());
}
const_iterator begin() const {
return const_iterator(BaseList::begin());
}
iterator end() {
return iterator(BaseList::end());
}
const_iterator end() const {
return const_iterator(BaseList::end());
}
const_iterator cbegin() const {
return this->begin();
}
const_iterator cend() const {
return this->end();
}
reverse_iterator rbegin() {
return reverse_iterator(this->end());
}
const_reverse_iterator rbegin() const {
return const_reverse_iterator(this->end());
}
reverse_iterator rend() {
return reverse_iterator(this->begin());
}
const_reverse_iterator rend() const {
return const_reverse_iterator(this->begin());
}
const_reverse_iterator crbegin() const {
return this->rbegin();
}
const_reverse_iterator crend() const {
return this->rend();
}
// Content management.
using BaseList::empty;
using BaseList::size;
reference back() {
return *(--this->end());
}
const_reference back() const {
return *(--this->end());
}
reference front() {
return *this->begin();
}
const_reference front() const {
return *this->begin();
}
iterator insert(const_iterator pos, reference ref) {
KLinkedListNode* new_node = KLinkedListNode::Allocate(kernel);
ASSERT(new_node != nullptr);
new_node->Initialize(std::addressof(ref));
return iterator(BaseList::insert(pos.m_base_it, *new_node));
}
void push_back(reference ref) {
this->insert(this->end(), ref);
}
void push_front(reference ref) {
this->insert(this->begin(), ref);
}
void pop_back() {
this->erase(--this->end());
}
void pop_front() {
this->erase(this->begin());
}
iterator erase(const iterator pos) {
KLinkedListNode* freed_node = std::addressof(*pos.m_base_it);
iterator ret = iterator(BaseList::erase(pos.m_base_it));
KLinkedListNode::Free(kernel, freed_node);
return ret;
}
private:
KernelSystem& kernel;
};
} // namespace Kernel

View File

@ -0,0 +1,150 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <boost/serialization/base_object.hpp>
#include "common/archives.h"
#include "common/assert.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/k_mutex.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
SERIALIZE_EXPORT_IMPL(Kernel::KMutex)
namespace Kernel {
void ReleaseThreadMutexes(KThread* thread) {
for (KMutex* mtx : thread->m_held_mutexes) {
mtx->m_lock_count = 0;
mtx->m_holding_thread = nullptr;
mtx->WakeupAllWaitingThreads();
}
thread->m_held_mutexes.clear();
}
KMutex::KMutex(KernelSystem& kernel) : KAutoObjectWithSlabHeapAndContainer(kernel) {}
KMutex::~KMutex() = default;
void KMutex::Initialize(Process* owner, bool initial_locked) {
// Open a reference to the owner process.
if (owner) {
owner->Open();
m_owner = owner;
}
// Set default priority
m_priority = ThreadPrioLowest;
// Acquire mutex with current thread if initialized as locked
if (initial_locked) {
KThread* thread = m_kernel.GetCurrentThreadManager().GetCurrentThread();
this->Acquire(thread);
}
}
void KMutex::PostDestroy(uintptr_t arg) {
Process* owner = reinterpret_cast<Process*>(arg);
if (owner != nullptr) {
owner->ReleaseResource(ResourceLimitType::Mutex, 1);
owner->Close();
}
}
bool KMutex::ShouldWait(const KThread* thread) const {
return m_lock_count > 0 && thread != m_holding_thread;
}
void KMutex::Acquire(KThread* thread) {
ASSERT_MSG(!ShouldWait(thread), "object unavailable!");
// Actually "acquire" the mutex only if we don't already have it
if (m_lock_count == 0) {
m_priority = thread->m_current_priority;
thread->m_held_mutexes.insert(this);
m_holding_thread = thread;
thread->UpdatePriority();
m_kernel.PrepareReschedule();
}
m_lock_count++;
}
Result KMutex::Release(KThread* thread) {
// We can only release the mutex if it's held by the calling thread.
if (thread != m_holding_thread) {
if (m_holding_thread) {
LOG_ERROR(
Kernel,
"Tried to release a mutex (owned by thread id {}) from a different thread id {}",
m_holding_thread->m_thread_id, thread->m_thread_id);
}
return Result(ErrCodes::WrongLockingThread, ErrorModule::Kernel,
ErrorSummary::InvalidArgument, ErrorLevel::Permanent);
}
// Note: It should not be possible for the situation where the mutex has a holding thread with a
// zero lock count to occur. The real kernel still checks for this, so we do too.
if (m_lock_count <= 0) {
return Result(ErrorDescription::InvalidResultValue, ErrorModule::Kernel,
ErrorSummary::InvalidState, ErrorLevel::Permanent);
}
m_lock_count--;
// Yield to the next thread only if we've fully released the mutex
if (m_lock_count == 0) {
m_holding_thread->m_held_mutexes.erase(this);
m_holding_thread->UpdatePriority();
m_holding_thread = nullptr;
WakeupAllWaitingThreads();
m_kernel.PrepareReschedule();
}
return ResultSuccess;
}
void KMutex::AddWaitingThread(KThread* thread) {
KSynchronizationObject::AddWaitingThread(thread);
thread->m_pending_mutexes.insert(this);
this->UpdatePriority();
}
void KMutex::RemoveWaitingThread(KThread* thread) {
KSynchronizationObject::RemoveWaitingThread(thread);
thread->m_pending_mutexes.erase(this);
this->UpdatePriority();
}
void KMutex::UpdatePriority() {
if (!m_holding_thread) {
return;
}
u32 best_priority = ThreadPrioLowest;
for (const KThread* waiter : GetWaitingThreads()) {
if (waiter->m_current_priority < best_priority) {
best_priority = waiter->m_current_priority;
}
}
if (best_priority != m_priority) {
m_priority = best_priority;
m_holding_thread->UpdatePriority();
}
}
template <class Archive>
void KMutex::serialize(Archive& ar, const u32 file_version) {
ar& boost::serialization::base_object<KSynchronizationObject>(*this);
ar& m_lock_count;
ar& m_priority;
ar& m_holding_thread;
}
SERIALIZE_IMPL(KMutex)
} // namespace Kernel

View File

@ -0,0 +1,81 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <boost/serialization/export.hpp>
#include "core/global.h"
#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/result.h"
namespace Kernel {
class KThread;
class KMutex final : public KAutoObjectWithSlabHeapAndContainer<KMutex, KSynchronizationObject> {
KERNEL_AUTOOBJECT_TRAITS(KMutex, KSynchronizationObject);
public:
explicit KMutex(KernelSystem& kernel);
~KMutex() override;
void Initialize(Process* owner, bool initial_locked);
uintptr_t GetPostDestroyArgument() const override {
return reinterpret_cast<uintptr_t>(m_owner);
}
static void PostDestroy(uintptr_t arg);
Process* GetOwner() const override {
return m_owner;
}
u32 GetPriority() const {
return m_priority;
}
bool ShouldWait(const KThread* thread) const override;
void Acquire(KThread* thread) override;
void AddWaitingThread(KThread* thread) override;
void RemoveWaitingThread(KThread* thread) override;
/**
* Elevate the mutex priority to the best priority
* among the priorities of all its waiting threads.
*/
void UpdatePriority();
/**
* Attempts to release the mutex from the specified thread.
* @param thread Thread that wants to release the mutex.
* @returns The result code of the operation.
*/
Result Release(KThread* thread);
private:
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const u32 file_version);
public:
Process* m_owner{};
int m_lock_count{};
u32 m_priority{};
KThread* m_holding_thread{};
};
/**
* Releases all the mutexes held by the specified thread
* @param thread Thread that is holding the mutexes
*/
void ReleaseThreadMutexes(KThread* thread);
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::KMutex)
CONSTRUCT_KERNEL_OBJECT(Kernel::KMutex)

View File

@ -0,0 +1,103 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "core/hle/kernel/k_object_name.h"
namespace Kernel {
KObjectNameGlobalData::KObjectNameGlobalData(KernelSystem& kernel) {}
KObjectNameGlobalData::~KObjectNameGlobalData() = default;
void KObjectName::Initialize(KAutoObject* obj, const char* name) {
// Set member variables.
m_object = obj;
std::strncpy(m_name.data(), name, sizeof(m_name) - 1);
m_name[sizeof(m_name) - 1] = '\x00';
// Open a reference to the object we hold.
m_object->Open();
}
bool KObjectName::MatchesName(const char* name) const {
return std::strncmp(m_name.data(), name, sizeof(m_name)) == 0;
}
Result KObjectName::NewFromName(KernelSystem& kernel, KAutoObject* obj, const char* name) {
// Create a new object name.
KObjectName* new_name = KObjectName::Allocate(kernel);
R_UNLESS(new_name != nullptr, Result{0xD86007F3});
// Initialize the new name.
new_name->Initialize(obj, name);
// Check if there's an existing name.
{
// Get the global data.
KObjectNameGlobalData& gd{kernel.ObjectNameGlobalData()};
// Ensure we have exclusive access to the global list.
// KScopedLightMutex lk{gd.GetObjectListLock()};
// If the object doesn't exist, put it into the list.
KScopedAutoObject existing_object = FindImpl(kernel, name);
if (existing_object.IsNull()) {
gd.GetObjectList().push_back(*new_name);
return ResultSuccess;
}
}
// The object already exists, the kernel does not check for this.
UNREACHABLE();
}
Result KObjectName::Delete(KernelSystem& kernel, KAutoObject* obj, const char* compare_name) {
// Get the global data.
KObjectNameGlobalData& gd{kernel.ObjectNameGlobalData()};
// Ensure we have exclusive access to the global list.
// KScopedLightMutex lk{gd.GetObjectListLock()};
// Find a matching entry in the list, and delete it.
for (auto& name : gd.GetObjectList()) {
if (name.MatchesName(compare_name) && obj == name.GetObject()) {
// We found a match, clean up its resources.
obj->Close();
gd.GetObjectList().erase(gd.GetObjectList().iterator_to(name));
KObjectName::Free(kernel, std::addressof(name));
return ResultSuccess;
}
}
// We didn't find the object in the list.
return ResultNotFound;
}
KScopedAutoObject<KAutoObject> KObjectName::Find(KernelSystem& kernel, const char* name) {
// Get the global data.
// KObjectNameGlobalData& gd{kernel.ObjectNameGlobalData()};
// Ensure we have exclusive access to the global list.
// KScopedLightMutex lk{gd.GetObjectListLock()};
return FindImpl(kernel, name);
}
KScopedAutoObject<KAutoObject> KObjectName::FindImpl(KernelSystem& kernel,
const char* compare_name) {
// Get the global data.
KObjectNameGlobalData& gd{kernel.ObjectNameGlobalData()};
// Try to find a matching object in the global list.
for (const auto& name : gd.GetObjectList()) {
if (name.MatchesName(compare_name)) {
return name.GetObject();
}
}
// There's no matching entry in the list.
return nullptr;
}
} // namespace Kernel

View File

@ -0,0 +1,82 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <array>
#include "common/intrusive_list.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/k_slab_heap.h"
namespace Kernel {
class KObjectNameGlobalData;
class KObjectName : public KSlabAllocated<KObjectName>,
public Common::IntrusiveListBaseNode<KObjectName> {
public:
explicit KObjectName(KernelSystem&) {}
virtual ~KObjectName() = default;
static constexpr size_t NameLengthMax = 12;
using List = Common::IntrusiveListBaseTraits<KObjectName>::ListType;
static Result NewFromName(KernelSystem& kernel, KAutoObject* obj, const char* name);
static Result Delete(KernelSystem& kernel, KAutoObject* obj, const char* name);
static KScopedAutoObject<KAutoObject> Find(KernelSystem& kernel, const char* name);
template <typename Derived>
static Result Delete(KernelSystem& kernel, const char* name) {
// Find the object.
KScopedAutoObject obj = Find(kernel, name);
R_UNLESS(obj.IsNotNull(), ResultNotFound);
// Cast the object to the desired type.
Derived* derived = obj->DynamicCast<Derived*>();
R_UNLESS(derived != nullptr, ResultNotFound);
// Check that the object is closed.
R_UNLESS(derived->IsServerClosed(), ResultInvalidAddressState);
return Delete(kernel, obj.GetPointerUnsafe(), name);
}
template <typename Derived>
requires(std::derived_from<Derived, KAutoObject>)
static KScopedAutoObject<Derived> Find(KernelSystem& kernel, const char* name) {
return Find(kernel, name);
}
private:
static KScopedAutoObject<KAutoObject> FindImpl(KernelSystem& kernel, const char* name);
void Initialize(KAutoObject* obj, const char* name);
bool MatchesName(const char* name) const;
KAutoObject* GetObject() const {
return m_object;
}
private:
std::array<char, NameLengthMax> m_name{};
KAutoObject* m_object{};
};
class KObjectNameGlobalData {
public:
explicit KObjectNameGlobalData(KernelSystem& kernel);
~KObjectNameGlobalData();
KObjectName::List& GetObjectList() {
return m_object_list;
}
private:
// KMutex m_mutex;
KObjectName::List m_object_list;
};
} // namespace Kernel

View File

@ -0,0 +1,25 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "core/hle/kernel/k_port.h"
namespace Kernel {
KPort::KPort(KernelSystem& kernel)
: KAutoObjectWithSlabHeapAndContainer{kernel}, m_server{kernel}, m_client{kernel} {}
KPort::~KPort() = default;
void KPort::Initialize(s32 max_sessions, std::string name) {
// Open a new reference count to the initialized port.
this->Open();
// Create and initialize our server/client pair.
KAutoObject::Create(std::addressof(m_server));
KAutoObject::Create(std::addressof(m_client));
m_server.Initialize(this, name);
m_client.Initialize(this, max_sessions, name);
}
} // namespace Kernel

View File

@ -0,0 +1,52 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include "common/common_types.h"
#include "core/hle/kernel/k_client_port.h"
#include "core/hle/kernel/k_server_port.h"
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/result.h"
namespace Kernel {
class KServerSession;
class KPort final : public KAutoObjectWithSlabHeapAndContainer<KPort> {
KERNEL_AUTOOBJECT_TRAITS(KPort, KAutoObject);
public:
explicit KPort(KernelSystem& kernel);
~KPort() override;
static void PostDestroy(uintptr_t arg) {}
void Initialize(s32 max_sessions, std::string name);
void OnClientClosed();
void OnServerClosed();
bool IsServerClosed() const;
Result EnqueueSession(KServerSession* session);
KClientPort& GetClientPort() {
return m_client;
}
KServerPort& GetServerPort() {
return m_server;
}
const KClientPort& GetClientPort() const {
return m_client;
}
const KServerPort& GetServerPort() const {
return m_server;
}
private:
KServerPort m_server;
KClientPort m_client;
};
} // namespace Kernel

View File

@ -12,18 +12,18 @@
#include <boost/serialization/vector.hpp>
#include "common/archives.h"
#include "common/assert.h"
#include "common/common_funcs.h"
#include "common/logging/log.h"
#include "common/serialization/boost_vector.hpp"
#include "common/scope_exit.h"
#include "core/core.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/k_auto_object_container.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/k_scoped_resource_reservation.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/memory.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/resource_limit.h"
#include "core/hle/kernel/thread.h"
#include "core/hle/kernel/vm_manager.h"
#include "core/hle/service/plgldr/plgldr.h"
#include "core/loader/loader.h"
#include "core/memory.h"
SERIALIZE_EXPORT_IMPL(Kernel::AddressMapping)
@ -44,14 +44,13 @@ SERIALIZE_IMPL(AddressMapping)
template <class Archive>
void Process::serialize(Archive& ar, const unsigned int) {
ar& boost::serialization::base_object<Object>(*this);
ar& boost::serialization::base_object<KAutoObject>(*this);
ar& handle_table;
ar& codeset; // TODO: Replace with apploader reference
ar& resource_limit;
ar& svc_access_mask;
ar& handle_table_size;
ar&(boost::container::vector<AddressMapping, boost::container::dtl::static_storage_allocator<
AddressMapping, 8, 0, true>>&)address_mappings;
// ar& address_mappings;
ar& flags.raw;
ar& no_thread_restrictions;
ar& kernel_version;
@ -68,52 +67,7 @@ void Process::serialize(Archive& ar, const unsigned int) {
}
SERIALIZE_IMPL(Process)
std::shared_ptr<CodeSet> KernelSystem::CreateCodeSet(std::string name, u64 program_id) {
auto codeset{std::make_shared<CodeSet>(*this)};
codeset->name = std::move(name);
codeset->program_id = program_id;
return codeset;
}
CodeSet::CodeSet(KernelSystem& kernel) : Object(kernel) {}
CodeSet::~CodeSet() {}
template <class Archive>
void CodeSet::serialize(Archive& ar, const unsigned int) {
ar& boost::serialization::base_object<Object>(*this);
ar& memory;
ar& segments;
ar& entrypoint;
ar& name;
ar& program_id;
}
SERIALIZE_IMPL(CodeSet)
template <class Archive>
void CodeSet::Segment::serialize(Archive& ar, const unsigned int) {
ar& offset;
ar& addr;
ar& size;
}
SERIALIZE_IMPL(CodeSet::Segment)
std::shared_ptr<Process> KernelSystem::CreateProcess(std::shared_ptr<CodeSet> code_set) {
auto process{std::make_shared<Process>(*this)};
process->codeset = std::move(code_set);
process->flags.raw = 0;
process->flags.memory_region.Assign(MemoryRegion::APPLICATION);
process->status = ProcessStatus::Created;
process->process_id = ++next_process_id;
process->creation_time_ticks = timing.GetTicks();
process_list.push_back(process);
return process;
}
void KernelSystem::TerminateProcess(std::shared_ptr<Process> process) {
void KernelSystem::TerminateProcess(Process* process) {
LOG_INFO(Kernel_SVC, "Process {} exiting", process->process_id);
ASSERT_MSG(process->status == ProcessStatus::Running, "Process has already exited");
@ -198,6 +152,8 @@ void Process::ParseKernelCaps(const u32* kernel_caps, std::size_t len) {
LOG_ERROR(Loader, "Unhandled kernel caps descriptor: 0x{:08X}", descriptor);
}
}
handle_table.Initialize(handle_table_size);
}
void Process::Set3dsxKernelCaps() {
@ -219,25 +175,20 @@ void Process::Set3dsxKernelCaps() {
void Process::Run(s32 main_thread_priority, u32 stack_size) {
memory_region = kernel.GetMemoryRegion(flags.memory_region);
// Ensure we can reserve a thread. Real kernel returns 0xC860180C if this fails.
if (!resource_limit->Reserve(ResourceLimitType::Thread, 1)) {
return;
}
VAddr out_addr{};
auto MapSegment = [&](CodeSet::Segment& segment, VMAPermission permissions,
MemoryState memory_state) {
HeapAllocate(std::addressof(out_addr), segment.addr, segment.size, permissions,
memory_state, true);
kernel.memory.WriteBlock(*this, segment.addr, codeset->memory.data() + segment.offset,
kernel.memory.WriteBlock(*this, segment.addr, codeset.memory.data() + segment.offset,
segment.size);
};
// Map CodeSet segments
MapSegment(codeset->CodeSegment(), VMAPermission::ReadExecute, MemoryState::Code);
MapSegment(codeset->RODataSegment(), VMAPermission::Read, MemoryState::Code);
MapSegment(codeset->DataSegment(), VMAPermission::ReadWrite, MemoryState::Private);
MapSegment(codeset.CodeSegment(), VMAPermission::ReadExecute, MemoryState::Code);
MapSegment(codeset.RODataSegment(), VMAPermission::Read, MemoryState::Code);
MapSegment(codeset.DataSegment(), VMAPermission::ReadWrite, MemoryState::Private);
// Allocate and map stack
HeapAllocate(std::addressof(out_addr), Memory::HEAP_VADDR_END - stack_size, stack_size,
@ -255,9 +206,23 @@ void Process::Run(s32 main_thread_priority, u32 stack_size) {
}
status = ProcessStatus::Running;
vm_manager.LogLayout(Common::Log::Level::Debug);
Kernel::SetupMainThread(kernel, codeset->entrypoint, main_thread_priority, SharedFrom(this));
// Place a tentative reservation of a thread for this process.
KScopedResourceReservation thread_reservation(this, ResourceLimitType::Thread);
ASSERT(thread_reservation.Succeeded());
// Create a new thread for the process.
KThread* main_thread = KThread::Create(m_kernel);
ASSERT(main_thread != nullptr);
// Initialize the thread.
main_thread->Initialize("", codeset.entrypoint, main_thread_priority, 0, ideal_processor,
Memory::HEAP_VADDR_END, this);
// Register the thread, and commit our reservation.
KThread::Register(m_kernel, main_thread);
thread_reservation.Commit();
}
void Process::Exit() {
@ -425,7 +390,7 @@ Result Process::LinearFree(VAddr target, u32 size) {
return ResultSuccess;
}
ResultVal<VAddr> Process::AllocateThreadLocalStorage() {
Result Process::AllocateThreadLocalStorage(VAddr* out_tls_addr) {
std::size_t tls_page;
std::size_t tls_slot;
bool needs_allocation = true;
@ -492,7 +457,8 @@ ResultVal<VAddr> Process::AllocateThreadLocalStorage() {
static_cast<VAddr>(tls_slot) * Memory::TLS_ENTRY_SIZE;
kernel.memory.ZeroBlock(*this, tls_address, Memory::TLS_ENTRY_SIZE);
return tls_address;
*out_tls_addr = tls_address;
return ResultSuccess;
}
Result Process::Map(VAddr target, VAddr source, u32 size, VMAPermission perms, bool privileged) {
@ -590,6 +556,11 @@ Result Process::Unmap(VAddr target, VAddr source, u32 size, VMAPermission perms,
return ResultSuccess;
}
void Process::ReleaseResource(ResourceLimitType type, s32 amount) {
ASSERT(resource_limit);
resource_limit->Release(type, amount);
}
void Process::FreeAllMemory() {
if (memory_region == nullptr || resource_limit == nullptr) {
return;
@ -627,30 +598,35 @@ void Process::FreeAllMemory() {
}
Kernel::Process::Process(KernelSystem& kernel)
: Object(kernel), handle_table(kernel), vm_manager(kernel.memory, *this), kernel(kernel) {
: KAutoObjectWithSlabHeapAndContainer(kernel), handle_table(kernel),
vm_manager(kernel.memory, *this), kernel(kernel) {
kernel.memory.RegisterPageTable(vm_manager.page_table);
}
Kernel::Process::~Process() {
LOG_INFO(Kernel, "Cleaning up process {}", process_id);
// Release all objects this process owns first so that their potential destructor can do clean
// up with this process before further destruction.
// TODO(wwylele): explicitly destroy or invalidate objects this process owns (threads, shared
// memory etc.) even if they are still referenced by other processes.
handle_table.Clear();
Kernel::Process::~Process() = default;
void Process::Initialize() {
flags.memory_region.Assign(MemoryRegion::APPLICATION);
status = ProcessStatus::Created;
process_id = m_kernel.NewProcessId();
creation_time_ticks = m_kernel.timing.GetTicks();
m_kernel.process_list.push_back(this);
handle_table.Initialize(handle_table_size);
}
void Process::Finalize() {
handle_table.Finalize();
FreeAllMemory();
kernel.memory.UnregisterPageTable(vm_manager.page_table);
}
std::shared_ptr<Process> KernelSystem::GetProcessById(u32 process_id) const {
auto itr = std::find_if(
process_list.begin(), process_list.end(),
[&](const std::shared_ptr<Process>& process) { return process->process_id == process_id; });
Process* KernelSystem::GetProcessById(u32 process_id) const {
auto it = std::ranges::find_if(
process_list, [&](const auto process) { return process->process_id == process_id; });
if (itr == process_list.end())
if (it == process_list.end()) {
return nullptr;
return *itr;
}
return *it;
}
} // namespace Kernel

View File

@ -4,18 +4,17 @@
#pragma once
#include <array>
#include <bitset>
#include <cstddef>
#include <memory>
#include <string>
#include <vector>
#include <boost/container/static_vector.hpp>
#include <boost/serialization/export.hpp>
#include "common/bit_field.h"
#include "common/common_types.h"
#include "core/hle/kernel/handle_table.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/k_code_set.h"
#include "core/hle/kernel/k_handle_table.h"
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/kernel/vm_manager.h"
namespace Kernel {
@ -51,110 +50,37 @@ union ProcessFlags {
BitField<12, 1, u16> loaded_high; ///< Application loaded high (not at 0x00100000).
};
enum class ProcessStatus { Created, Running, Exited };
class ResourceLimit;
struct MemoryRegionInfo;
class CodeSet final : public Object {
public:
explicit CodeSet(KernelSystem& kernel);
~CodeSet() override;
struct Segment {
std::size_t offset = 0;
VAddr addr = 0;
u32 size = 0;
private:
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const unsigned int);
};
std::string GetTypeName() const override {
return "CodeSet";
}
std::string GetName() const override {
return name;
}
static constexpr HandleType HANDLE_TYPE = HandleType::CodeSet;
HandleType GetHandleType() const override {
return HANDLE_TYPE;
}
Segment& CodeSegment() {
return segments[0];
}
const Segment& CodeSegment() const {
return segments[0];
}
Segment& RODataSegment() {
return segments[1];
}
const Segment& RODataSegment() const {
return segments[1];
}
Segment& DataSegment() {
return segments[2];
}
const Segment& DataSegment() const {
return segments[2];
}
std::vector<u8> memory;
std::array<Segment, 3> segments;
VAddr entrypoint;
/// Name of the process
std::string name;
/// Title ID corresponding to the process
u64 program_id;
private:
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const unsigned int);
enum class ProcessStatus {
Created,
Running,
Exited,
};
class Process final : public Object {
class KResourceLimit;
enum class ResourceLimitType : u32;
struct MemoryRegionInfo;
class Process final : public KAutoObjectWithSlabHeapAndContainer<Process> {
KERNEL_AUTOOBJECT_TRAITS(Process, KAutoObject);
public:
explicit Process(Kernel::KernelSystem& kernel);
~Process() override;
std::string GetTypeName() const override {
return "Process";
}
std::string GetName() const override {
return codeset->name;
}
KHandleTable handle_table;
static constexpr HandleType HANDLE_TYPE = HandleType::Process;
HandleType GetHandleType() const override {
return HANDLE_TYPE;
}
HandleTable handle_table;
std::shared_ptr<CodeSet> codeset;
CodeSet codeset{};
/// Resource limit descriptor for this process
std::shared_ptr<ResourceLimit> resource_limit;
KResourceLimit* resource_limit{};
/// The process may only call SVCs which have the corresponding bit set.
std::bitset<0x80> svc_access_mask;
/// Maximum size of the handle table for the process.
unsigned int handle_table_size = 0x200;
u32 handle_table_size = 0x200;
/// Special memory ranges mapped into this processes address space. This is used to give
/// processes access to specific I/O regions and device memory.
boost::container::static_vector<AddressMapping, 8> address_mappings;
ProcessFlags flags;
ProcessFlags flags{};
bool no_thread_restrictions = false;
/// Kernel compatibility version for this process
u16 kernel_version = 0;
@ -169,6 +95,12 @@ public:
// Creation time in ticks of the process.
u64 creation_time_ticks;
void Initialize();
static void PostDestroy(uintptr_t arg) {}
void Finalize() override;
/**
* Parses a list of kernel capability descriptors (as found in the ExHeader) and applies them
* to this process.
@ -190,9 +122,6 @@ public:
*/
void Exit();
///////////////////////////////////////////////////////////////////////////////////////////////
// Memory Management
VMManager vm_manager;
u32 memory_used = 0;
@ -220,12 +149,14 @@ public:
Result LinearAllocate(VAddr* out_addr, VAddr target, u32 size, VMAPermission perms);
Result LinearFree(VAddr target, u32 size);
ResultVal<VAddr> AllocateThreadLocalStorage();
Result AllocateThreadLocalStorage(VAddr* out_tls);
Result Map(VAddr target, VAddr source, u32 size, VMAPermission perms, bool privileged = false);
Result Unmap(VAddr target, VAddr source, u32 size, VMAPermission perms,
bool privileged = false);
void ReleaseResource(ResourceLimitType type, s32 amount);
private:
void FreeAllMemory();
@ -238,9 +169,5 @@ private:
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::AddressMapping)
BOOST_CLASS_EXPORT_KEY(Kernel::CodeSet)
BOOST_CLASS_EXPORT_KEY(Kernel::CodeSet::Segment)
BOOST_CLASS_EXPORT_KEY(Kernel::Process)
CONSTRUCT_KERNEL_OBJECT(Kernel::CodeSet)
CONSTRUCT_KERNEL_OBJECT(Kernel::Process)

View File

@ -1,4 +1,4 @@
// Copyright 2015 Citra Emulator Project
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
@ -9,40 +9,36 @@
#include "common/archives.h"
#include "common/assert.h"
#include "common/settings.h"
#include "core/hle/kernel/resource_limit.h"
#include "core/hle/kernel/k_auto_object_container.h"
#include "core/hle/kernel/k_resource_limit.h"
SERIALIZE_EXPORT_IMPL(Kernel::ResourceLimit)
SERIALIZE_EXPORT_IMPL(Kernel::KResourceLimit)
SERIALIZE_EXPORT_IMPL(Kernel::ResourceLimitList)
namespace Kernel {
ResourceLimit::ResourceLimit(KernelSystem& kernel) : Object(kernel) {}
KResourceLimit::KResourceLimit(KernelSystem& kernel)
: KAutoObjectWithSlabHeapAndContainer(kernel) {}
ResourceLimit::~ResourceLimit() = default;
KResourceLimit::~KResourceLimit() = default;
std::shared_ptr<ResourceLimit> ResourceLimit::Create(KernelSystem& kernel, std::string name) {
auto resource_limit = std::make_shared<ResourceLimit>(kernel);
resource_limit->m_name = std::move(name);
return resource_limit;
}
s32 ResourceLimit::GetCurrentValue(ResourceLimitType type) const {
const auto index = static_cast<std::size_t>(type);
s32 KResourceLimit::GetCurrentValue(ResourceLimitType type) const {
const auto index = static_cast<size_t>(type);
return m_current_values[index];
}
s32 ResourceLimit::GetLimitValue(ResourceLimitType type) const {
const auto index = static_cast<std::size_t>(type);
s32 KResourceLimit::GetLimitValue(ResourceLimitType type) const {
const auto index = static_cast<size_t>(type);
return m_limit_values[index];
}
void ResourceLimit::SetLimitValue(ResourceLimitType type, s32 value) {
const auto index = static_cast<std::size_t>(type);
void KResourceLimit::SetLimitValue(ResourceLimitType type, s32 value) {
const auto index = static_cast<size_t>(type);
m_limit_values[index] = value;
}
bool ResourceLimit::Reserve(ResourceLimitType type, s32 amount) {
const auto index = static_cast<std::size_t>(type);
bool KResourceLimit::Reserve(ResourceLimitType type, s32 amount) {
const auto index = static_cast<size_t>(type);
const s32 limit = m_limit_values[index];
const s32 new_value = m_current_values[index] + amount;
if (new_value > limit) {
@ -54,8 +50,8 @@ bool ResourceLimit::Reserve(ResourceLimitType type, s32 amount) {
return true;
}
bool ResourceLimit::Release(ResourceLimitType type, s32 amount) {
const auto index = static_cast<std::size_t>(type);
bool KResourceLimit::Release(ResourceLimitType type, s32 amount) {
const auto index = static_cast<size_t>(type);
const s32 value = m_current_values[index];
if (amount > value) {
LOG_ERROR(Kernel, "Amount {} exceeds current value {} for resource type {}", amount, value,
@ -67,13 +63,13 @@ bool ResourceLimit::Release(ResourceLimitType type, s32 amount) {
}
template <class Archive>
void ResourceLimit::serialize(Archive& ar, const unsigned int) {
ar& boost::serialization::base_object<Object>(*this);
void KResourceLimit::serialize(Archive& ar, const unsigned int) {
ar& boost::serialization::base_object<KAutoObject>(*this);
ar& m_name;
ar& m_limit_values;
ar& m_current_values;
}
SERIALIZE_IMPL(ResourceLimit)
SERIALIZE_IMPL(KResourceLimit)
ResourceLimitList::ResourceLimitList(KernelSystem& kernel) {
// PM makes APPMEMALLOC always match app RESLIMIT_COMMIT.
@ -81,8 +77,15 @@ ResourceLimitList::ResourceLimitList(KernelSystem& kernel) {
const bool is_new_3ds = Settings::values.is_new_3ds.GetValue();
const auto& appmemalloc = kernel.GetMemoryRegion(MemoryRegion::APPLICATION);
const auto CreateLimit = [&](std::string name) {
KResourceLimit* limit = KResourceLimit::Create(kernel);
limit->Initialize(name);
KResourceLimit::Register(kernel, limit);
return limit;
};
// Create the Application resource limit
auto resource_limit = ResourceLimit::Create(kernel, "Applications");
auto resource_limit = CreateLimit("Applications");
resource_limit->SetLimitValue(ResourceLimitType::Priority, 0x18);
resource_limit->SetLimitValue(ResourceLimitType::Commit, appmemalloc->size);
resource_limit->SetLimitValue(ResourceLimitType::Thread, 0x20);
@ -96,7 +99,7 @@ ResourceLimitList::ResourceLimitList(KernelSystem& kernel) {
resource_limits[static_cast<u8>(ResourceLimitCategory::Application)] = resource_limit;
// Create the SysApplet resource limit
resource_limit = ResourceLimit::Create(kernel, "System Applets");
resource_limit = CreateLimit("System Applets");
resource_limit->SetLimitValue(ResourceLimitType::Priority, 0x4);
resource_limit->SetLimitValue(ResourceLimitType::Commit, is_new_3ds ? 0x5E06000 : 0x2606000);
resource_limit->SetLimitValue(ResourceLimitType::Thread, is_new_3ds ? 0x1D : 0xE);
@ -110,7 +113,7 @@ ResourceLimitList::ResourceLimitList(KernelSystem& kernel) {
resource_limits[static_cast<u8>(ResourceLimitCategory::SysApplet)] = resource_limit;
// Create the LibApplet resource limit
resource_limit = ResourceLimit::Create(kernel, "Library Applets");
resource_limit = CreateLimit("Library Applets");
resource_limit->SetLimitValue(ResourceLimitType::Priority, 0x4);
resource_limit->SetLimitValue(ResourceLimitType::Commit, 0x602000);
resource_limit->SetLimitValue(ResourceLimitType::Thread, 0xE);
@ -124,7 +127,7 @@ ResourceLimitList::ResourceLimitList(KernelSystem& kernel) {
resource_limits[static_cast<u8>(ResourceLimitCategory::LibApplet)] = resource_limit;
// Create the Other resource limit
resource_limit = ResourceLimit::Create(kernel, "Others");
resource_limit = CreateLimit("Others");
resource_limit->SetLimitValue(ResourceLimitType::Priority, 0x4);
resource_limit->SetLimitValue(ResourceLimitType::Commit, is_new_3ds ? 0x2182000 : 0x1682000);
resource_limit->SetLimitValue(ResourceLimitType::Thread, is_new_3ds ? 0xE1 : 0xCA);
@ -140,7 +143,7 @@ ResourceLimitList::ResourceLimitList(KernelSystem& kernel) {
ResourceLimitList::~ResourceLimitList() = default;
std::shared_ptr<ResourceLimit> ResourceLimitList::GetForCategory(ResourceLimitCategory category) {
KResourceLimit* ResourceLimitList::GetForCategory(ResourceLimitCategory category) {
switch (category) {
case ResourceLimitCategory::Application:
case ResourceLimitCategory::SysApplet:

View File

@ -1,4 +1,4 @@
// Copyright 2015 Citra Emulator Project
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
@ -8,7 +8,8 @@
#include <memory>
#include <boost/serialization/export.hpp>
#include "common/common_types.h"
#include "core/hle/kernel/object.h"
#include "core/global.h"
#include "core/hle/kernel/slab_helpers.h"
namespace Kernel {
@ -33,28 +34,14 @@ enum class ResourceLimitType : u32 {
Max = 10,
};
class ResourceLimit final : public Object {
class KResourceLimit final : public KAutoObjectWithSlabHeapAndContainer<KResourceLimit> {
KERNEL_AUTOOBJECT_TRAITS(KResourceLimit, KAutoObject);
public:
explicit ResourceLimit(KernelSystem& kernel);
~ResourceLimit() override;
explicit KResourceLimit(KernelSystem& kernel);
~KResourceLimit() override;
/**
* Creates a resource limit object.
*/
static std::shared_ptr<ResourceLimit> Create(KernelSystem& kernel,
std::string name = "Unknown");
std::string GetTypeName() const override {
return "ResourceLimit";
}
std::string GetName() const override {
return m_name;
}
static constexpr HandleType HANDLE_TYPE = HandleType::ResourceLimit;
HandleType GetHandleType() const override {
return HANDLE_TYPE;
}
void Initialize(std::string name) {}
s32 GetCurrentValue(ResourceLimitType type) const;
s32 GetLimitValue(ResourceLimitType type) const;
@ -64,16 +51,18 @@ public:
bool Reserve(ResourceLimitType type, s32 amount);
bool Release(ResourceLimitType type, s32 amount);
static void PostDestroy(uintptr_t arg) {}
private:
using ResourceArray = std::array<s32, static_cast<std::size_t>(ResourceLimitType::Max)>;
ResourceArray m_limit_values{};
ResourceArray m_current_values{};
std::string m_name;
std::string m_name{};
private:
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const unsigned int);
void serialize(Archive& ar, const unsigned int file_version);
};
class ResourceLimitList {
@ -86,10 +75,10 @@ public:
* @param category The resource limit category
* @returns The resource limit associated with the category
*/
std::shared_ptr<ResourceLimit> GetForCategory(ResourceLimitCategory category);
KResourceLimit* GetForCategory(ResourceLimitCategory category);
private:
std::array<std::shared_ptr<ResourceLimit>, 4> resource_limits;
std::array<KResourceLimit*, 4> resource_limits;
friend class boost::serialization::access;
template <class Archive>
@ -98,7 +87,7 @@ private:
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::ResourceLimit)
BOOST_CLASS_EXPORT_KEY(Kernel::KResourceLimit)
BOOST_CLASS_EXPORT_KEY(Kernel::ResourceLimitList)
CONSTRUCT_KERNEL_OBJECT(Kernel::ResourceLimit)
CONSTRUCT_KERNEL_OBJECT(Kernel::KResourceLimit)
CONSTRUCT_KERNEL_OBJECT(Kernel::ResourceLimitList)

View File

@ -0,0 +1,50 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include "common/common_types.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_resource_limit.h"
namespace Kernel {
class KScopedResourceReservation {
public:
explicit KScopedResourceReservation(KResourceLimit* l, ResourceLimitType type, s32 amount = 1)
: m_limit(l), m_amount(amount), m_type(type) {
if (m_limit) {
m_succeeded = m_limit->Reserve(m_type, m_amount);
} else {
m_succeeded = true;
}
}
explicit KScopedResourceReservation(const Process* p, ResourceLimitType type, s32 amount = 1)
: KScopedResourceReservation(p->resource_limit, type, amount) {}
~KScopedResourceReservation() noexcept {
if (m_limit && m_succeeded) {
// Resource was not committed, release the reservation.
m_limit->Release(m_type, m_amount);
}
}
/// Commit the resource reservation, destruction of this object does not release the resource
void Commit() {
m_limit = nullptr;
}
bool Succeeded() const {
return m_succeeded;
}
private:
KResourceLimit* m_limit{};
s32 m_amount{};
ResourceLimitType m_type{};
bool m_succeeded{};
};
} // namespace Kernel

View File

@ -0,0 +1,78 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <boost/serialization/base_object.hpp>
#include <boost/serialization/export.hpp>
#include <boost/serialization/string.hpp>
#include "common/archives.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/k_auto_object_container.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/k_semaphore.h"
#include "core/hle/kernel/kernel.h"
SERIALIZE_EXPORT_IMPL(Kernel::KSemaphore)
namespace Kernel {
KSemaphore::KSemaphore(KernelSystem& kernel) : KAutoObjectWithSlabHeapAndContainer(kernel) {}
KSemaphore::~KSemaphore() = default;
void KSemaphore::Initialize(Process* owner, s32 initial_count, s32 max_count, std::string name) {
// Open a reference to the owner process.
if (owner) {
owner->Open();
m_owner = owner;
}
// Set member variables
m_available_count = initial_count;
m_max_count = max_count;
m_name = name;
}
void KSemaphore::PostDestroy(uintptr_t arg) {
Process* owner = reinterpret_cast<Process*>(arg);
if (owner != nullptr) {
owner->ReleaseResource(ResourceLimitType::Semaphore, 1);
owner->Close();
}
}
bool KSemaphore::ShouldWait(const KThread* thread) const {
return m_available_count <= 0;
}
void KSemaphore::Acquire(KThread* thread) {
if (m_available_count <= 0) {
return;
}
--m_available_count;
}
Result KSemaphore::Release(s32* out_count, s32 release_count) {
R_UNLESS(release_count + m_available_count <= m_max_count, ResultOutOfRangeKernel);
// Update available count.
const s32 previous_count = m_available_count;
m_available_count += release_count;
// Wakeup waiting threads and return.
this->WakeupAllWaitingThreads();
*out_count = previous_count;
return ResultSuccess;
}
template <class Archive>
void KSemaphore::serialize(Archive& ar, const u32 file_version) {
ar& boost::serialization::base_object<KSynchronizationObject>(*this);
ar& m_max_count;
ar& m_available_count;
}
SERIALIZE_IMPL(KSemaphore)
} // namespace Kernel

View File

@ -0,0 +1,67 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <string>
#include <boost/serialization/export.hpp>
#include "common/common_types.h"
#include "core/global.h"
#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/result.h"
namespace Kernel {
class ResourceLimit;
class KSemaphore final
: public KAutoObjectWithSlabHeapAndContainer<KSemaphore, KSynchronizationObject> {
KERNEL_AUTOOBJECT_TRAITS(KSemaphore, KSynchronizationObject);
public:
explicit KSemaphore(KernelSystem& kernel);
~KSemaphore() override;
void Initialize(Process* owner, s32 initial_count, s32 max_count, std::string name);
uintptr_t GetPostDestroyArgument() const override {
return reinterpret_cast<uintptr_t>(m_owner);
}
static void PostDestroy(uintptr_t arg);
Process* GetOwner() const override {
return m_owner;
}
bool ShouldWait(const KThread* thread) const override;
void Acquire(KThread* thread) override;
s32 GetAvailableCount() const {
return m_available_count;
}
s32 GetMaxCount() const {
return m_max_count;
}
Result Release(s32* out_count, s32 release_count);
private:
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const u32 file_version);
private:
Process* m_owner{};
s32 m_max_count{};
s32 m_available_count{};
std::string m_name;
};
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::KSemaphore)
CONSTRUCT_KERNEL_OBJECT(Kernel::KSemaphore)

View File

@ -0,0 +1,68 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <boost/serialization/base_object.hpp>
#include <boost/serialization/shared_ptr.hpp>
#include <boost/serialization/string.hpp>
#include <boost/serialization/vector.hpp>
#include "common/archives.h"
#include "common/assert.h"
#include "core/hle/kernel/k_port.h"
#include "core/hle/kernel/k_server_port.h"
#include "core/hle/kernel/k_server_session.h"
#include "core/hle/kernel/k_thread.h"
SERIALIZE_EXPORT_IMPL(Kernel::KServerPort)
namespace Kernel {
KServerPort::KServerPort(KernelSystem& kernel) : KSynchronizationObject(kernel) {}
KServerPort::~KServerPort() = default;
void KServerPort::Initialize(KPort* parent, std::string name) {
m_parent = parent;
m_name = name + "_Server";
}
void KServerPort::Destroy() {
// Close our reference to our parent.
m_parent->Close();
}
KServerSession* KServerPort::AcceptSession() {
// Return the first session in the list.
if (m_pending_sessions.empty()) {
return nullptr;
}
KServerSession* session = m_pending_sessions.back();
m_pending_sessions.pop_back();
return session;
}
void KServerPort::EnqueueSession(KServerSession* session) {
// Add the session to our queue.
m_pending_sessions.push_back(session);
}
bool KServerPort::ShouldWait(const KThread* thread) const {
// If there are no pending sessions, we wait until a new one is added.
return m_pending_sessions.size() == 0;
}
void KServerPort::Acquire(KThread* thread) {
ASSERT_MSG(!ShouldWait(thread), "object unavailable!");
}
template <class Archive>
void KServerPort::serialize(Archive& ar, const u32 file_version) {
ar& boost::serialization::base_object<KSynchronizationObject>(*this);
ar& m_name;
ar& m_pending_sessions;
// ar& m_hle_handler;
}
SERIALIZE_IMPL(KServerPort)
} // namespace Kernel

View File

@ -0,0 +1,61 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <memory>
#include <string>
#include <boost/serialization/export.hpp>
#include "core/hle/kernel/k_server_session.h"
#include "core/hle/kernel/k_synchronization_object.h"
namespace Kernel {
class KClientPort;
class KServerSession;
class KPort;
class SessionRequestHandler;
class KServerPort final : public KSynchronizationObject {
KERNEL_AUTOOBJECT_TRAITS(KServerPort, KSynchronizationObject);
public:
explicit KServerPort(KernelSystem& kernel);
~KServerPort() override;
void Initialize(KPort* parent, std::string name);
void Destroy() override;
void EnqueueSession(KServerSession* session);
KServerSession* AcceptSession();
bool ShouldWait(const KThread* thread) const override;
void Acquire(KThread* thread) override;
void SetHleHandler(std::shared_ptr<SessionRequestHandler> hle_handler_) {
m_hle_handler = std::move(hle_handler_);
}
std::shared_ptr<SessionRequestHandler> GetHleHandler() {
return m_hle_handler;
}
private:
KPort* m_parent{};
std::string m_name;
std::vector<KServerSession*> m_pending_sessions;
std::shared_ptr<SessionRequestHandler> m_hle_handler;
private:
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const unsigned int file_version);
};
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::KServerPort)
CONSTRUCT_KERNEL_OBJECT(Kernel::KServerPort)

View File

@ -0,0 +1,144 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <boost/serialization/shared_ptr.hpp>
#include <boost/serialization/string.hpp>
#include <boost/serialization/vector.hpp>
#include "common/archives.h"
#include "core/hle/kernel/hle_ipc.h"
#include "core/hle/kernel/k_client_session.h"
#include "core/hle/kernel/k_server_session.h"
#include "core/hle/kernel/k_session.h"
#include "core/hle/kernel/k_thread.h"
SERIALIZE_EXPORT_IMPL(Kernel::KServerSession)
namespace Kernel {
KServerSession::KServerSession(KernelSystem& kernel) : KSynchronizationObject(kernel) {}
KServerSession::~KServerSession() = default;
void KServerSession::Destroy() {
m_parent->OnServerClosed();
m_parent->Close();
}
bool KServerSession::ShouldWait(const KThread* thread) const {
// Closed sessions should never wait, an error will be returned from svcReplyAndReceive.
const auto state = m_parent->GetState();
if (state != KSessionState::Normal) {
return false;
}
// Wait if we have no pending requests, or if we're currently handling a request.
return pending_requesting_threads.empty() || currently_handling != nullptr;
}
void KServerSession::Acquire(KThread* thread) {
ASSERT_MSG(!ShouldWait(thread), "object unavailable!");
// If the client endpoint was closed, don't do anything. This KServerSession is now useless and
// will linger until its last handle is closed by the running application.
const auto state = m_parent->GetState();
if (state != KSessionState::Normal) {
return;
}
// We are now handling a request, pop it from the stack.
ASSERT(!pending_requesting_threads.empty());
currently_handling = pending_requesting_threads.back();
pending_requesting_threads.pop_back();
}
void KServerSession::OnClientClosed() {
// Notify HLE handler that client session has been disconnected.
if (hle_handler) {
hle_handler->ClientDisconnected(this);
}
// Clean up the list of client threads with pending requests, they are unneeded now that the
// client endpoint is closed.
pending_requesting_threads.clear();
currently_handling = nullptr;
// Notify any threads waiting on the KServerSession that the endpoint has been closed. Note
// that this call has to happen after `Session::client` has been set to nullptr to let the
// ServerSession know that the client endpoint has been closed.
this->WakeupAllWaitingThreads();
}
Result KServerSession::HandleSyncRequest(KThread* thread) {
// The KServerSession received a sync request, this means that there's new data available
// from its ClientSession, so wake up any threads that may be waiting on a svcReplyAndReceive or
// similar.
// If this KServerSession has an associated HLE handler, forward the request to it.
if (hle_handler != nullptr) {
std::array<u32_le, IPC::COMMAND_BUFFER_LENGTH + 2 * IPC::MAX_STATIC_BUFFERS> cmd_buf;
auto current_process = thread->GetOwner();
ASSERT(current_process);
m_kernel.memory.ReadBlock(*current_process, thread->GetCommandBufferAddress(),
cmd_buf.data(), cmd_buf.size() * sizeof(u32));
auto context = std::make_shared<Kernel::HLERequestContext>(m_kernel, this, thread);
context->PopulateFromIncomingCommandBuffer(cmd_buf.data(), current_process);
hle_handler->HandleSyncRequest(*context);
ASSERT(thread->m_status == Kernel::ThreadStatus::Running ||
thread->m_status == Kernel::ThreadStatus::WaitHleEvent);
// Only write the response immediately if the thread is still running. If the HLE handler
// put the thread to sleep then the writing of the command buffer will be deferred to the
// wakeup callback.
if (thread->m_status == Kernel::ThreadStatus::Running) {
context->WriteToOutgoingCommandBuffer(cmd_buf.data(), *current_process);
m_kernel.memory.WriteBlock(*current_process, thread->GetCommandBufferAddress(),
cmd_buf.data(), cmd_buf.size() * sizeof(u32));
}
}
if (thread->m_status == ThreadStatus::Running) {
// Put the thread to sleep until the server replies, it will be awoken in
// svcReplyAndReceive for LLE servers.
thread->m_status = ThreadStatus::WaitIPC;
if (hle_handler != nullptr) {
// For HLE services, we put the request threads to sleep for a short duration to
// simulate IPC overhead, but only if the HLE handler didn't put the thread to sleep for
// other reasons like an async callback. The IPC overhead is needed to prevent
// starvation when a thread only does sync requests to HLE services while a
// lower-priority thread is waiting to run.
// This delay was approximated in a homebrew application by measuring the average time
// it takes for svcSendSyncRequest to return when performing the SetLcdForceBlack IPC
// request to the GSP:GPU service in a n3DS with firmware 11.6. The measured values have
// a high variance and vary between models.
static constexpr u64 IPCDelayNanoseconds = 39000;
thread->WakeAfterDelay(IPCDelayNanoseconds);
} else {
// Add the thread to the list of threads that have issued a sync request with this
// server.
pending_requesting_threads.push_back(std::move(thread));
}
}
// If this KServerSession does not have an HLE implementation,
// just wake up the threads waiting on it.
this->WakeupAllWaitingThreads();
return ResultSuccess;
}
template <class Archive>
void KServerSession::serialize(Archive& ar, const u32 file_version) {
ar& boost::serialization::base_object<KSynchronizationObject>(*this);
ar& m_name;
// ar& m_parent;
ar& hle_handler;
ar& pending_requesting_threads;
ar& currently_handling;
ar& mapped_buffer_context;
}
SERIALIZE_IMPL(KServerSession)
} // namespace Kernel

View File

@ -0,0 +1,80 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <memory>
#include <string>
#include <boost/serialization/export.hpp>
#include "common/common_types.h"
#include "core/hle/kernel/ipc.h"
#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/result.h"
namespace Kernel {
class ClientSession;
class ClientPort;
class KSession;
class SessionRequestHandler;
class KThread;
class KServerSession final : public KSynchronizationObject {
KERNEL_AUTOOBJECT_TRAITS(KServerSession, KSynchronizationObject);
public:
~KServerSession() override;
explicit KServerSession(KernelSystem& kernel);
void Destroy() override;
void Initialize(KSession* parent) {
m_parent = parent;
}
KSession* GetParent() const {
return m_parent;
}
KThread* GetCurrent() {
return currently_handling;
}
std::vector<MappedBufferContext>& GetMappedBufferContext() {
return mapped_buffer_context;
}
void SetHleHandler(std::shared_ptr<SessionRequestHandler>&& hle_handler_) {
hle_handler = std::move(hle_handler_);
}
std::shared_ptr<SessionRequestHandler>& GetHleHandler() {
return hle_handler;
}
void OnClientClosed();
Result HandleSyncRequest(KThread* thread);
bool ShouldWait(const KThread* thread) const override;
void Acquire(KThread* thread) override;
private:
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const u32 file_version);
public:
std::string m_name;
KSession* m_parent{};
std::shared_ptr<SessionRequestHandler> hle_handler;
std::vector<KThread*> pending_requesting_threads;
KThread* currently_handling;
std::vector<MappedBufferContext> mapped_buffer_context;
};
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::KServerSession)
CONSTRUCT_KERNEL_OBJECT(Kernel::KServerSession)

View File

@ -0,0 +1,63 @@
// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "core/hle/kernel/k_client_port.h"
#include "core/hle/kernel/k_client_session.h"
#include "core/hle/kernel/k_server_session.h"
#include "core/hle/kernel/k_session.h"
namespace Kernel {
KSession::KSession(KernelSystem& kernel)
: KAutoObjectWithSlabHeapAndContainer{kernel}, m_server{kernel}, m_client{kernel} {}
KSession::~KSession() = default;
void KSession::Initialize(KClientPort* client_port) {
// Increment reference count.
// Because reference count is one on creation, this will result
// in a reference count of two. Thus, when both server and client are closed
// this object will be destroyed.
this->Open();
// Create our sub sessions.
KAutoObject::Create(std::addressof(m_server));
KAutoObject::Create(std::addressof(m_client));
// Initialize our sub sessions.
m_state = KSessionState::Normal;
m_server.Initialize(this);
m_client.Initialize(this);
// Set our port.
m_port = client_port;
if (m_port != nullptr) {
m_port->Open();
}
// Mark initialized.
m_initialized = true;
}
void KSession::Finalize() {
if (m_port != nullptr) {
m_port->ConnectionClosed();
m_port->Close();
}
}
void KSession::OnServerClosed() {
if (m_state == KSessionState::Normal) {
m_state = KSessionState::ServerClosed;
m_client.OnServerClosed();
}
}
void KSession::OnClientClosed() {
if (m_state == KSessionState::Normal) {
m_state = KSessionState::ClientClosed;
m_server.OnClientClosed();
}
}
} // namespace Kernel

View File

@ -0,0 +1,76 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <string>
#include "core/hle/kernel/k_client_session.h"
#include "core/hle/kernel/k_server_session.h"
#include "core/hle/kernel/slab_helpers.h"
namespace Kernel {
class KClientPort;
enum class KSessionState : u8 {
Invalid = 0,
Normal = 1,
ClientClosed = 2,
ServerClosed = 3,
};
class KSession final : public KAutoObjectWithSlabHeapAndContainer<KSession> {
KERNEL_AUTOOBJECT_TRAITS(KSession, KAutoObject);
public:
explicit KSession(KernelSystem& kernel);
~KSession() override;
void Initialize(KClientPort* port);
void Finalize() override;
bool IsInitialized() const override {
return m_initialized;
}
static void PostDestroy(uintptr_t arg) {}
void OnServerClosed();
void OnClientClosed();
KSessionState GetState() const {
return m_state;
}
KClientSession& GetClientSession() {
return m_client;
}
KServerSession& GetServerSession() {
return m_server;
}
const KClientSession& GetClientSession() const {
return m_client;
}
const KServerSession& GetServerSession() const {
return m_server;
}
KClientPort* GetParent() {
return m_port;
}
private:
KServerSession m_server;
KClientSession m_client;
KClientPort* m_port{};
KSessionState m_state{};
bool m_initialized{};
};
} // namespace Kernel

View File

@ -0,0 +1,238 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <boost/serialization/base_object.hpp>
#include "common/archives.h"
#include "common/logging/log.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/k_shared_memory.h"
#include "core/hle/kernel/memory.h"
#include "core/memory.h"
SERIALIZE_EXPORT_IMPL(Kernel::KSharedMemory)
namespace Kernel {
KSharedMemory::KSharedMemory(KernelSystem& kernel) : KAutoObjectWithSlabHeapAndContainer(kernel) {}
KSharedMemory::~KSharedMemory() = default;
Result KSharedMemory::Initialize(Process* owner, u32 size, MemoryPermission permissions,
MemoryPermission other_permissions, VAddr address,
MemoryRegion region) {
// Open a reference to our owner process.
if (owner) {
owner->Open();
m_owner = owner;
}
// Set member variables.
m_base_address = address;
m_size = size;
m_permissions = permissions;
m_other_permissions = other_permissions;
// Allocate the shared memory block.
if (address == 0) {
// We need to allocate a block from the Linear Heap ourselves.
// We'll manually allocate some memory from the linear heap in the specified region.
auto memory_region = m_kernel.GetMemoryRegion(region);
auto offset = memory_region->LinearAllocate(size);
ASSERT_MSG(offset, "Not enough space in region to allocate shared memory!");
// Store the backing blocks of allocated memory.
auto& memory = m_kernel.memory;
std::fill(memory.GetFCRAMPointer(*offset), memory.GetFCRAMPointer(*offset + size), 0);
m_backing_blocks = {{memory.GetFCRAMRef(*offset), size}};
m_holding_memory += MemoryRegionInfo::Interval(*offset, *offset + size);
m_linear_heap_phys_offset = *offset;
// Increase the amount of used linear heap memory for the owner process.
if (m_owner) {
m_owner->memory_used += size;
}
} else {
// The memory is already available and mapped in the owner process.
ASSERT(m_owner);
auto& vm_manager = m_owner->vm_manager;
R_TRY(vm_manager.ChangeMemoryState(address, size, MemoryState::Private,
VMAPermission::ReadWrite, MemoryState::Locked,
KSharedMemory::ConvertPermissions(permissions)));
// Should succeed after verifying memory state above.
auto backing_blocks = vm_manager.GetBackingBlocksForRange(address, size);
ASSERT(backing_blocks.Succeeded());
m_backing_blocks = std::move(backing_blocks).Unwrap();
}
return ResultSuccess;
}
void KSharedMemory::InitializeForApplet(u32 offset, u32 size, MemoryPermission permissions,
MemoryPermission other_permissions) {
// Allocate memory in heap
auto memory_region = m_kernel.GetMemoryRegion(MemoryRegion::SYSTEM);
auto backing_blocks = memory_region->HeapAllocate(size);
ASSERT_MSG(!backing_blocks.empty(), "Not enough space in region to allocate shared memory!");
// Set member variables
m_holding_memory = backing_blocks;
m_base_address = Memory::HEAP_VADDR + offset;
m_size = size;
m_permissions = permissions;
m_other_permissions = other_permissions;
// Initialize backing blocks
auto& memory = m_kernel.memory;
for (const auto& interval : backing_blocks) {
const VAddr addr = interval.lower();
const VAddr end = interval.upper();
m_backing_blocks.emplace_back(memory.GetFCRAMRef(addr), end - addr);
std::fill(memory.GetFCRAMPointer(addr), memory.GetFCRAMPointer(end), 0);
}
}
void KSharedMemory::Finalize() {
auto memory_region = m_kernel.GetMemoryRegion(MemoryRegion::SYSTEM);
for (const auto& interval : m_holding_memory) {
memory_region->Free(interval.lower(), interval.upper() - interval.lower());
}
if (m_owner) {
if (m_base_address != 0) {
m_owner->vm_manager.ChangeMemoryState(m_base_address, m_size, MemoryState::Locked,
VMAPermission::None, MemoryState::Private,
VMAPermission::ReadWrite);
} else {
m_owner->memory_used -= m_size;
}
}
}
void KSharedMemory::PostDestroy(uintptr_t arg) {
Process* owner = reinterpret_cast<Process*>(arg);
if (owner != nullptr) {
owner->ReleaseResource(ResourceLimitType::SharedMemory, 1);
owner->Close();
}
}
Result KSharedMemory::Map(Process& target_process, VAddr address, MemoryPermission permissions,
MemoryPermission other_permissions) {
const MemoryPermission own_other_permissions =
&target_process == m_owner ? m_permissions : m_other_permissions;
// Automatically allocated memory blocks can only be mapped with other_permissions = DontCare
R_UNLESS(m_base_address != 0 || other_permissions == MemoryPermission::DontCare,
ResultInvalidCombination);
// Heap-backed memory blocks can not be mapped with other_permissions = DontCare
R_UNLESS(m_base_address == 0 || other_permissions != MemoryPermission::DontCare,
ResultInvalidCombination);
// Error out if the requested permissions don't match what the creator process allows.
if (static_cast<u32>(permissions) & ~static_cast<u32>(own_other_permissions)) {
LOG_ERROR(Kernel, "cannot map address={:#08X}, permissions don't match", address);
return ResultInvalidCombination;
}
// Error out if the provided permissions are not compatible with what the creator process needs.
if (other_permissions != MemoryPermission::DontCare &&
static_cast<u32>(m_permissions) & ~static_cast<u32>(other_permissions)) {
LOG_ERROR(Kernel, "cannot map address={:#08X}, permissions don't match", address);
return ResultWrongPermission;
}
// TODO(Subv): Check for the Shared Device Mem flag in the creator process.
/*if (was_created_with_shared_device_mem && address != 0) {
return Result(ErrorDescription::InvalidCombination, ErrorModule::OS,
ErrorSummary::InvalidArgument, ErrorLevel::Usage);
}*/
// TODO(Subv): The same process that created a SharedMemory object
// can not map it in its own address space unless it was created with addr=0, result 0xD900182C.
if (address != 0) {
if (address < Memory::HEAP_VADDR || address + m_size >= Memory::SHARED_MEMORY_VADDR_END) {
LOG_ERROR(Kernel, "cannot map address={:#08X}, invalid address", address);
return ResultInvalidAddress;
}
}
VAddr target_address = address;
if (m_base_address == 0 && target_address == 0) {
// Calculate the address at which to map the memory block.
// Note: even on new firmware versions, the target address is still in the old linear heap
// region. This exception is made to keep the shared font compatibility. See
// APT:GetSharedFont for detail.
target_address = m_linear_heap_phys_offset + Memory::LINEAR_HEAP_VADDR;
}
{
auto vma = target_process.vm_manager.FindVMA(target_address);
if (vma->second.type != VMAType::Free ||
vma->second.base + vma->second.size < target_address + m_size) {
LOG_ERROR(Kernel, "cannot map address={:#08X}, mapping to already allocated memory",
address);
return ResultInvalidAddressState;
}
}
// Map the memory block into the target process
VAddr interval_target = target_address;
for (const auto& interval : m_backing_blocks) {
auto vma = target_process.vm_manager.MapBackingMemory(interval_target, interval.first,
interval.second, MemoryState::Shared);
ASSERT(vma.Succeeded());
target_process.vm_manager.Reprotect(vma.Unwrap(), ConvertPermissions(permissions));
interval_target += interval.second;
}
return ResultSuccess;
}
Result KSharedMemory::Unmap(Process& target_process, VAddr address) {
// TODO(Subv): Verify what happens if the application tries to unmap an address that is not
// mapped to a SharedMemory.
return target_process.vm_manager.UnmapRange(address, m_size);
}
VMAPermission KSharedMemory::ConvertPermissions(MemoryPermission permission) {
u32 masked_permissions =
static_cast<u32>(permission) & static_cast<u32>(MemoryPermission::ReadWriteExecute);
return static_cast<VMAPermission>(masked_permissions);
};
u8* KSharedMemory::GetPointer(u32 offset) {
if (m_backing_blocks.size() != 1) {
LOG_WARNING(Kernel, "Unsafe GetPointer on discontinuous SharedMemory");
}
return m_backing_blocks[0].first + offset;
}
const u8* KSharedMemory::GetPointer(u32 offset) const {
if (m_backing_blocks.size() != 1) {
LOG_WARNING(Kernel, "Unsafe GetPointer on discontinuous SharedMemory");
}
return m_backing_blocks[0].first + offset;
}
template <class Archive>
void KSharedMemory::serialize(Archive& ar, const u32 file_version) {
ar& boost::serialization::base_object<KAutoObject>(*this);
ar& m_linear_heap_phys_offset;
// ar& m_backing_blocks;
ar& m_size;
ar& m_permissions;
ar& m_other_permissions;
ar& m_owner;
ar& m_base_address;
ar& m_holding_memory;
}
SERIALIZE_IMPL(KSharedMemory)
} // namespace Kernel

View File

@ -1,50 +1,54 @@
// Copyright 2014 Citra Emulator Project
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <string>
#include <utility>
#include <boost/serialization/export.hpp>
#include "common/common_types.h"
#include "common/memory_ref.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/process.h"
#include "core/global.h"
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/result.h"
namespace Kernel {
class SharedMemory final : public Object {
enum class VMAPermission : u8;
class KSharedMemory final : public KAutoObjectWithSlabHeapAndContainer<KSharedMemory> {
KERNEL_AUTOOBJECT_TRAITS(KSharedMemory, KAutoObject);
public:
explicit SharedMemory(KernelSystem& kernel);
~SharedMemory() override;
explicit KSharedMemory(KernelSystem& kernel);
~KSharedMemory() override;
std::string GetTypeName() const override {
return "SharedMemory";
}
std::string GetName() const override {
return name;
}
void SetName(std::string name_) {
name = std::move(name_);
Result Initialize(Process* owner, u32 size, MemoryPermission permissions,
MemoryPermission other_permissions, VAddr address, MemoryRegion region);
void InitializeForApplet(u32 offset, u32 size, MemoryPermission permissions,
MemoryPermission other_permissions);
void Finalize() override;
uintptr_t GetPostDestroyArgument() const override {
return reinterpret_cast<uintptr_t>(m_owner);
}
static constexpr HandleType HANDLE_TYPE = HandleType::SharedMemory;
HandleType GetHandleType() const override {
return HANDLE_TYPE;
static void PostDestroy(uintptr_t arg);
Process* GetOwner() const override {
return m_owner;
}
/// Gets the size of the underlying memory block in bytes.
u64 GetSize() const {
return size;
return m_size;
}
/// Gets the linear heap physical offset
u64 GetLinearHeapPhysicalOffset() const {
return linear_heap_phys_offset;
return m_linear_heap_phys_offset;
}
void SetName(std::string&& name_) {}
/**
* Converts the specified MemoryPermission into the equivalent VMAPermission.
* @param permission The MemoryPermission to convert.
@ -84,37 +88,22 @@ public:
const u8* GetPointer(u32 offset = 0) const;
private:
/// Offset in FCRAM of the shared memory block in the linear heap if no address was specified
/// during creation.
PAddr linear_heap_phys_offset = 0;
/// Backing memory for this shared memory block.
std::vector<std::pair<MemoryRef, u32>> backing_blocks;
/// Size of the memory block. Page-aligned.
u32 size = 0;
/// Region of memory this block exists in.
std::shared_ptr<MemoryRegionInfo> memory_region = nullptr;
/// Permission restrictions applied to the process which created the block.
MemoryPermission permissions{};
/// Permission restrictions applied to other processes mapping the block.
MemoryPermission other_permissions{};
/// Process that created this shared memory block.
std::weak_ptr<Process> owner_process;
/// Address of shared memory block in the owner process if specified.
VAddr base_address = 0;
/// Name of shared memory object.
std::string name;
MemoryRegionInfo::IntervalSet holding_memory;
friend class KernelSystem;
KernelSystem& kernel;
template <class Archive>
void serialize(Archive& ar, const unsigned int);
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const u32 file_version);
private:
Process* m_owner{};
PAddr m_linear_heap_phys_offset{};
VAddr m_base_address{};
u32 m_size{};
MemoryPermission m_permissions{};
MemoryPermission m_other_permissions{};
std::vector<std::pair<MemoryRef, u32>> m_backing_blocks;
MemoryRegionInfo::IntervalSet m_holding_memory;
};
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::SharedMemory)
CONSTRUCT_KERNEL_OBJECT(Kernel::SharedMemory)
BOOST_CLASS_EXPORT_KEY(Kernel::KSharedMemory)
CONSTRUCT_KERNEL_OBJECT(Kernel::KSharedMemory)

View File

@ -0,0 +1,234 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#pragma clang optimize off
#include <atomic>
#include "common/assert.h"
#include "common/atomic_ops.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "core/hle/kernel/kernel.h"
namespace Kernel {
namespace impl {
class KSlabHeapImpl {
CITRA_NON_COPYABLE(KSlabHeapImpl);
CITRA_NON_MOVEABLE(KSlabHeapImpl);
public:
struct Node {
Node* next{};
};
public:
constexpr KSlabHeapImpl() = default;
void Initialize() {
ASSERT(m_head == nullptr);
}
Node* GetHead() const {
return m_head;
}
void* Allocate() {
Node* ret = m_head;
if (ret != nullptr) [[likely]] {
m_head = ret->next;
}
return ret;
}
void Free(void* obj) {
Node* node = static_cast<Node*>(obj);
node->next = m_head;
m_head = node;
}
private:
std::atomic<Node*> m_head{};
};
} // namespace impl
class KSlabHeapBase : protected impl::KSlabHeapImpl {
CITRA_NON_COPYABLE(KSlabHeapBase);
CITRA_NON_MOVEABLE(KSlabHeapBase);
private:
size_t m_obj_size{};
uintptr_t m_peak{};
uintptr_t m_start{};
uintptr_t m_end{};
private:
void UpdatePeakImpl(uintptr_t obj) {
const uintptr_t alloc_peak = obj + this->GetObjectSize();
uintptr_t cur_peak = m_peak;
do {
if (alloc_peak <= cur_peak) {
break;
}
} while (
!Common::AtomicCompareAndSwap(std::addressof(m_peak), alloc_peak, cur_peak, cur_peak));
}
public:
constexpr KSlabHeapBase() = default;
bool Contains(uintptr_t address) const {
return m_start <= address && address < m_end;
}
void Initialize(size_t obj_size, void* memory, size_t memory_size) {
// Ensure we don't initialize a slab using null memory.
ASSERT(memory != nullptr);
// Set our object size.
m_obj_size = obj_size;
// Initialize the base allocator.
KSlabHeapImpl::Initialize();
// Set our tracking variables.
const size_t num_obj = (memory_size / obj_size);
m_start = reinterpret_cast<uintptr_t>(memory);
m_end = m_start + num_obj * obj_size;
m_peak = m_start;
// Free the objects.
u8* cur = reinterpret_cast<u8*>(m_end);
for (size_t i = 0; i < num_obj; i++) {
cur -= obj_size;
KSlabHeapImpl::Free(cur);
}
}
size_t GetSlabHeapSize() const {
return (m_end - m_start) / this->GetObjectSize();
}
size_t GetObjectSize() const {
return m_obj_size;
}
void* Allocate() {
void* obj = KSlabHeapImpl::Allocate();
return obj;
}
void Free(void* obj) {
// Don't allow freeing an object that wasn't allocated from this heap.
const bool contained = this->Contains(reinterpret_cast<uintptr_t>(obj));
ASSERT(contained);
KSlabHeapImpl::Free(obj);
}
size_t GetObjectIndex(const void* obj) const {
return (reinterpret_cast<uintptr_t>(obj) - m_start) / this->GetObjectSize();
}
size_t GetPeakIndex() const {
return this->GetObjectIndex(reinterpret_cast<const void*>(m_peak));
}
uintptr_t GetSlabHeapAddress() const {
return m_start;
}
size_t GetNumRemaining() const {
// Only calculate the number of remaining objects under debug configuration.
return 0;
}
};
template <typename T>
class KSlabHeap final : public KSlabHeapBase {
private:
using BaseHeap = KSlabHeapBase;
public:
constexpr KSlabHeap() = default;
void Initialize(void* memory, size_t memory_size) {
BaseHeap::Initialize(sizeof(T), memory, memory_size);
}
T* Allocate() {
T* obj = static_cast<T*>(BaseHeap::Allocate());
if (obj != nullptr) [[likely]] {
std::construct_at(obj);
}
return obj;
}
T* Allocate(KernelSystem& kernel) {
T* obj = static_cast<T*>(BaseHeap::Allocate());
if (obj != nullptr) [[likely]] {
std::construct_at(obj, kernel);
}
return obj;
}
void Free(T* obj) {
BaseHeap::Free(obj);
}
size_t GetObjectIndex(const T* obj) const {
return BaseHeap::GetObjectIndex(obj);
}
};
template <class Derived>
class KSlabAllocated {
public:
constexpr KSlabAllocated() = default;
size_t GetSlabIndex(KernelSystem& kernel) const {
return kernel.SlabHeap<Derived>().GetIndex(static_cast<const Derived*>(this));
}
public:
static void InitializeSlabHeap(KernelSystem& kernel, void* memory, size_t memory_size) {
kernel.SlabHeap<Derived>().Initialize(memory, memory_size);
}
static Derived* Allocate(KernelSystem& kernel) {
return kernel.SlabHeap<Derived>().Allocate(kernel);
}
static void Free(KernelSystem& kernel, Derived* obj) {
kernel.SlabHeap<Derived>().Free(obj);
}
static size_t GetObjectSize(KernelSystem& kernel) {
return kernel.SlabHeap<Derived>().GetObjectSize();
}
static size_t GetSlabHeapSize(KernelSystem& kernel) {
return kernel.SlabHeap<Derived>().GetSlabHeapSize();
}
static size_t GetPeakIndex(KernelSystem& kernel) {
return kernel.SlabHeap<Derived>().GetPeakIndex();
}
static uintptr_t GetSlabHeapAddress(KernelSystem& kernel) {
return kernel.SlabHeap<Derived>().GetSlabHeapAddress();
}
static size_t GetNumRemaining(KernelSystem& kernel) {
return kernel.SlabHeap<Derived>().GetNumRemaining();
}
};
} // namespace Kernel

View File

@ -0,0 +1,117 @@
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <algorithm>
#include <utility>
#include <boost/serialization/base_object.hpp>
#include <boost/serialization/shared_ptr.hpp>
#include <boost/serialization/vector.hpp>
#include "common/archives.h"
#include "common/assert.h"
#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
namespace Kernel {
KSynchronizationObject::KSynchronizationObject(KernelSystem& kernel) : KAutoObject(kernel) {}
KSynchronizationObject::~KSynchronizationObject() = default;
void KSynchronizationObject::AddWaitingThread(KThread* thread) {
auto it = std::ranges::find(waiting_threads, thread);
if (it == waiting_threads.end()) {
waiting_threads.push_back(thread);
}
}
void KSynchronizationObject::RemoveWaitingThread(KThread* thread) {
// If a thread passed multiple handles to the same object,
// the kernel might attempt to remove the thread from the object's
// waiting threads list multiple times.
auto it = std::ranges::find(waiting_threads, thread);
if (it != waiting_threads.end()) {
waiting_threads.erase(it);
}
}
KThread* KSynchronizationObject::GetHighestPriorityReadyThread() const {
KThread* candidate = nullptr;
u32 candidate_priority = ThreadPrioLowest + 1;
for (auto* thread : waiting_threads) {
// The list of waiting threads must not contain threads that are not waiting to be awakened.
ASSERT_MSG(thread->GetStatus() == ThreadStatus::WaitSynchAny ||
thread->GetStatus() == ThreadStatus::WaitSynchAll ||
thread->GetStatus() == ThreadStatus::WaitHleEvent,
"Inconsistent thread statuses in waiting_threads");
if (thread->GetCurrentPriority() >= candidate_priority || ShouldWait(thread)) {
continue;
}
// A thread is ready to run if it's either in ThreadStatus::WaitSynchAny or
// in ThreadStatus::WaitSynchAll and the rest of the objects it is waiting on are ready.
bool ready_to_run = true;
if (thread->GetStatus() == ThreadStatus::WaitSynchAll) {
ready_to_run =
std::ranges::none_of(thread->m_wait_objects, [thread](const auto* object) {
return object->ShouldWait(thread);
});
}
if (ready_to_run) {
candidate = thread;
candidate_priority = thread->GetCurrentPriority();
}
}
return candidate;
}
void KSynchronizationObject::WakeupAllWaitingThreads() {
while (auto thread = GetHighestPriorityReadyThread()) {
if (!thread->IsSleepingOnWaitAll()) {
Acquire(thread);
} else {
for (auto& object : thread->m_wait_objects) {
object->Acquire(thread);
}
}
// Invoke the wakeup callback before clearing the wait objects
if (thread->m_wakeup_callback) {
thread->m_wakeup_callback->WakeUp(ThreadWakeupReason::Signal, thread, this);
}
for (auto& object : thread->m_wait_objects) {
object->RemoveWaitingThread(thread);
}
thread->m_wait_objects.clear();
thread->ResumeFromWait();
}
if (hle_notifier) {
hle_notifier();
}
}
const std::vector<KThread*>& KSynchronizationObject::GetWaitingThreads() const {
return waiting_threads;
}
void KSynchronizationObject::SetHLENotifier(std::function<void()> callback) {
hle_notifier = std::move(callback);
}
template <class Archive>
void KSynchronizationObject::serialize(Archive& ar, const unsigned int file_version) {
ar& boost::serialization::base_object<KAutoObject>(*this);
ar& waiting_threads;
// NB: hle_notifier *not* serialized since it's a callback!
// Fortunately it's only used in one place (DSP) so we can reconstruct it there
}
SERIALIZE_IMPL(KSynchronizationObject)
} // namespace Kernel

View File

@ -1,45 +1,48 @@
// Copyright 2014 Citra Emulator Project
// Copyright 2023 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <functional>
#include <memory>
#include <span>
#include <vector>
#include "common/common_types.h"
#include "core/hle/kernel/object.h"
#include <boost/serialization/access.hpp>
#include <boost/serialization/export.hpp>
#include "core/hle/kernel/k_auto_object.h"
namespace Kernel {
class Thread;
class KThread;
class KSynchronizationObject : public KAutoObject {
KERNEL_AUTOOBJECT_TRAITS(KSynchronizationObject, KAutoObject);
/// Class that represents a Kernel object that a thread can be waiting on
class WaitObject : public Object {
public:
using Object::Object;
explicit KSynchronizationObject(KernelSystem& kernel);
~KSynchronizationObject();
/**
* Check if the specified thread should wait until the object is available
* @param thread The thread about which we're deciding.
* @return True if the current thread should wait due to this object being unavailable
*/
virtual bool ShouldWait(const Thread* thread) const = 0;
virtual bool ShouldWait(const KThread* thread) const = 0;
/// Acquire/lock the object for the specified thread if it is available
virtual void Acquire(Thread* thread) = 0;
virtual void Acquire(KThread* thread) = 0;
/**
* Add a thread to wait on this object
* @param thread Pointer to thread to add
*/
virtual void AddWaitingThread(std::shared_ptr<Thread> thread);
virtual void AddWaitingThread(KThread* thread);
/**
* Removes a thread from waiting on this object (e.g. if it was resumed already)
* @param thread Pointer to thread to remove
*/
virtual void RemoveWaitingThread(Thread* thread);
virtual void RemoveWaitingThread(KThread* thread);
/**
* Wake up all threads waiting on this object that can be awoken, in priority order,
@ -48,17 +51,17 @@ public:
virtual void WakeupAllWaitingThreads();
/// Obtains the highest priority thread that is ready to run from this object's waiting list.
std::shared_ptr<Thread> GetHighestPriorityReadyThread() const;
KThread* GetHighestPriorityReadyThread() const;
/// Get a const reference to the waiting threads list for debug use
const std::vector<std::shared_ptr<Thread>>& GetWaitingThreads() const;
const std::vector<KThread*>& GetWaitingThreads() const;
/// Sets a callback which is called when the object becomes available
void SetHLENotifier(std::function<void()> callback);
private:
/// Threads waiting for this object to become available
std::vector<std::shared_ptr<Thread>> waiting_threads;
std::vector<KThread*> waiting_threads;
/// Function to call when this object becomes available
std::function<void()> hle_notifier;
@ -69,15 +72,6 @@ private:
void serialize(Archive& ar, const unsigned int);
};
// Specialization of DynamicObjectCast for WaitObjects
template <>
inline std::shared_ptr<WaitObject> DynamicObjectCast<WaitObject>(std::shared_ptr<Object> object) {
if (object != nullptr && object->IsWaitable()) {
return std::static_pointer_cast<WaitObject>(object);
}
return nullptr;
}
} // namespace Kernel
BOOST_CLASS_EXPORT_KEY(Kernel::WaitObject)
BOOST_CLASS_EXPORT_KEY(Kernel::KSynchronizationObject)

Some files were not shown because too many files have changed in this diff Show More