diff --git a/.github/workflows/pr-auto-manager.yml b/.github/workflows/pr-auto-manager.yml index d95ff8e73..cf247ff9d 100644 --- a/.github/workflows/pr-auto-manager.yml +++ b/.github/workflows/pr-auto-manager.yml @@ -1,6 +1,7 @@ name: 🔀 Pull Request Manager on: + workflow_dispatch: # Allow to manually call this workflow pull_request_target: types: [opened, synchronize, reopened, edited, labeled, unlabeled, closed] pull_request_review_comment: @@ -17,6 +18,12 @@ jobs: # Only needs to run when code is changed if: github.event.action == 'opened' || github.event.action == 'synchronize' + # Override permissions, linter likely needs write access to issues + permissions: + contents: read + issues: write + pull-requests: write + steps: - name: Checkout Repository # Checkout diff --git a/public/script.js b/public/script.js index 0752d5600..15ec77c97 100644 --- a/public/script.js +++ b/public/script.js @@ -2069,8 +2069,9 @@ export function messageFormatting(mes, ch_name, isSystem, isUser, messageId, san } // Prompt bias replacement should be applied on the raw message - if (!power_user.show_user_prompt_bias && ch_name && !isUser && !isSystem) { - mes = mes.replaceAll(substituteParams(power_user.user_prompt_bias), ''); + const replacedPromptBias = power_user.user_prompt_bias && substituteParams(power_user.user_prompt_bias); + if (!power_user.show_user_prompt_bias && ch_name && !isUser && !isSystem && replacedPromptBias && mes.startsWith(replacedPromptBias)) { + mes = mes.slice(replacedPromptBias.length); } if (!isSystem) { diff --git a/public/scripts/custom-request.js b/public/scripts/custom-request.js index d6fccf1a9..250e74f11 100644 --- a/public/scripts/custom-request.js +++ b/public/scripts/custom-request.js @@ -3,10 +3,13 @@ import { extractMessageFromData, getGenerateUrl, getRequestHeaders } from '../sc import { getTextGenServer } from './textgen-settings.js'; import { extractReasoningFromData } from './reasoning.js'; import { formatInstructModeChat, formatInstructModePrompt, names_behavior_types } from './instruct-mode.js'; +import { getStreamingReply, tryParseStreamingError } from './openai.js'; +import EventSourceStream from './sse-stream.js'; // #region Type Definitions /** * @typedef {Object} TextCompletionRequestBase + * @property {boolean?} [stream=false] - Whether to stream the response * @property {number} max_tokens - Maximum number of tokens to generate * @property {string} [model] - Optional model name * @property {string} api_type - Type of API to use @@ -17,6 +20,7 @@ import { formatInstructModeChat, formatInstructModePrompt, names_behavior_types /** * @typedef {Object} TextCompletionPayloadBase + * @property {boolean?} [stream=false] - Whether to stream the response * @property {string} prompt - The text prompt for completion * @property {number} max_tokens - Maximum number of tokens to generate * @property {number} max_new_tokens - Alias for max_tokens @@ -36,6 +40,7 @@ import { formatInstructModeChat, formatInstructModePrompt, names_behavior_types /** * @typedef {Object} ChatCompletionPayloadBase + * @property {boolean?} [stream=false] - Whether to stream the response * @property {ChatCompletionMessage[]} messages - Array of chat messages * @property {string} [model] - Optional model name to use for completion * @property {string} chat_completion_source - Source provider for chat completion @@ -52,10 +57,19 @@ import { formatInstructModeChat, formatInstructModePrompt, names_behavior_types * @property {string} reasoning - Extracted reasoning. */ +/** + * @typedef {Object} StreamResponse + * @property {string} text - Generated text. + * @property {string[]} swipes - Generated swipes + * @property {Object} state - Generated state + * @property {string?} [state.reasoning] - Generated reasoning + * @property {string?} [state.image] - Generated image + */ + // #endregion /** - * Creates & sends a text completion request. Streaming is not supported. + * Creates & sends a text completion request. */ export class TextCompletionService { static TYPE = 'textgenerationwebui'; @@ -64,9 +78,10 @@ export class TextCompletionService { * @param {Record & TextCompletionRequestBase & {prompt: string}} custom * @returns {TextCompletionPayload} */ - static createRequestData({ prompt, max_tokens, model, api_type, api_server, temperature, min_p, ...props }) { + static createRequestData({ stream = false, prompt, max_tokens, model, api_type, api_server, temperature, min_p, ...props }) { const payload = { ...props, + stream, prompt, max_tokens, max_new_tokens: max_tokens, @@ -75,7 +90,6 @@ export class TextCompletionService { api_server: api_server ?? getTextGenServer(api_type), temperature, min_p, - stream: false, }; // Remove undefined values to avoid API errors @@ -92,34 +106,81 @@ export class TextCompletionService { * Sends a text completion request to the specified server * @param {TextCompletionPayload} data Request data * @param {boolean?} extractData Extract message from the response. Default true - * @returns {Promise} Extracted data or the raw response + * @param {AbortSignal?} signal + * @returns {Promise AsyncGenerator)>} If not streaming, returns extracted data; if streaming, returns a function that creates an AsyncGenerator * @throws {Error} */ - static async sendRequest(data, extractData = true) { - const response = await fetch(getGenerateUrl(this.TYPE), { + static async sendRequest(data, extractData = true, signal = null) { + if (!data.stream) { + const response = await fetch(getGenerateUrl(this.TYPE), { + method: 'POST', + headers: getRequestHeaders(), + cache: 'no-cache', + body: JSON.stringify(data), + signal: signal ?? new AbortController().signal, + }); + + const json = await response.json(); + if (!response.ok || json.error) { + throw json; + } + + if (!extractData) { + return json; + } + + return { + content: extractMessageFromData(json, this.TYPE), + reasoning: extractReasoningFromData(json, { + mainApi: this.TYPE, + textGenType: data.api_type, + ignoreShowThoughts: true, + }), + }; + } + + const response = await fetch('/api/backends/text-completions/generate', { method: 'POST', headers: getRequestHeaders(), cache: 'no-cache', body: JSON.stringify(data), - signal: new AbortController().signal, + signal: signal ?? new AbortController().signal, }); - const json = await response.json(); - if (!response.ok || json.error) { - throw json; + if (!response.ok) { + const text = await response.text(); + tryParseStreamingError(response, text, { quiet: true }); + + throw new Error(`Got response status ${response.status}`); } - if (!extractData) { - return json; - } + const eventStream = new EventSourceStream(); + response.body.pipeThrough(eventStream); + const reader = eventStream.readable.getReader(); + return async function* streamData() { + let text = ''; + const swipes = []; + const state = { reasoning: '' }; + while (true) { + const { done, value } = await reader.read(); + if (done) return; + if (value.data === '[DONE]') return; - return { - content: extractMessageFromData(json, this.TYPE), - reasoning: extractReasoningFromData(json, { - mainApi: this.TYPE, - textGenType: data.api_type, - ignoreShowThoughts: true, - }), + tryParseStreamingError(response, value.data, { quiet: true }); + + let data = JSON.parse(value.data); + + if (data?.choices?.[0]?.index > 0) { + const swipeIndex = data.choices[0].index - 1; + swipes[swipeIndex] = (swipes[swipeIndex] || '') + data.choices[0].text; + } else { + const newText = data?.choices?.[0]?.text || data?.content || ''; + text += newText; + state.reasoning += data?.choices?.[0]?.reasoning ?? ''; + } + + yield { text, swipes, state }; + } }; } @@ -130,13 +191,15 @@ export class TextCompletionService { * @param {string?} [options.presetName] - Name of the preset to use for generation settings * @param {string?} [options.instructName] - Name of instruct preset for message formatting * @param {boolean} extractData - Whether to extract structured data from response - * @returns {Promise} Extracted data or the raw response + * @param {AbortSignal?} [signal] + * @returns {Promise AsyncGenerator)>} If not streaming, returns extracted data; if streaming, returns a function that creates an AsyncGenerator * @throws {Error} */ static async processRequest( custom, options = {}, extractData = true, + signal = null, ) { const { presetName, instructName } = options; let requestData = { ...custom }; @@ -220,7 +283,7 @@ export class TextCompletionService { // @ts-ignore const data = this.createRequestData(requestData); - return await this.sendRequest(data, extractData); + return await this.sendRequest(data, extractData, signal); } /** @@ -256,7 +319,7 @@ export class TextCompletionService { } /** - * Creates & sends a chat completion request. Streaming is not supported. + * Creates & sends a chat completion request. */ export class ChatCompletionService { static TYPE = 'openai'; @@ -265,16 +328,16 @@ export class ChatCompletionService { * @param {ChatCompletionPayload} custom * @returns {ChatCompletionPayload} */ - static createRequestData({ messages, model, chat_completion_source, max_tokens, temperature, custom_url, ...props }) { + static createRequestData({ stream = false, messages, model, chat_completion_source, max_tokens, temperature, custom_url, ...props }) { const payload = { ...props, + stream, messages, model, chat_completion_source, max_tokens, temperature, custom_url, - stream: false, }; // Remove undefined values to avoid API errors @@ -291,34 +354,74 @@ export class ChatCompletionService { * Sends a chat completion request * @param {ChatCompletionPayload} data Request data * @param {boolean?} extractData Extract message from the response. Default true - * @returns {Promise} Extracted data or the raw response + * @param {AbortSignal?} signal Abort signal + * @returns {Promise AsyncGenerator)>} If not streaming, returns extracted data; if streaming, returns a function that creates an AsyncGenerator * @throws {Error} */ - static async sendRequest(data, extractData = true) { + static async sendRequest(data, extractData = true, signal = null) { const response = await fetch('/api/backends/chat-completions/generate', { method: 'POST', headers: getRequestHeaders(), cache: 'no-cache', body: JSON.stringify(data), - signal: new AbortController().signal, + signal: signal ?? new AbortController().signal, }); - const json = await response.json(); - if (!response.ok || json.error) { - throw json; + if (!data.stream) { + const json = await response.json(); + if (!response.ok || json.error) { + throw json; + } + + if (!extractData) { + return json; + } + + return { + content: extractMessageFromData(json, this.TYPE), + reasoning: extractReasoningFromData(json, { + mainApi: this.TYPE, + textGenType: data.chat_completion_source, + ignoreShowThoughts: true, + }), + }; } - if (!extractData) { - return json; + if (!response.ok) { + const text = await response.text(); + tryParseStreamingError(response, text, { quiet: true }); + + throw new Error(`Got response status ${response.status}`); } - return { - content: extractMessageFromData(json, this.TYPE), - reasoning: extractReasoningFromData(json, { - mainApi: this.TYPE, - textGenType: data.chat_completion_source, - ignoreShowThoughts: true, - }), + const eventStream = new EventSourceStream(); + response.body.pipeThrough(eventStream); + const reader = eventStream.readable.getReader(); + return async function* streamData() { + let text = ''; + const swipes = []; + const state = { reasoning: '', image: '' }; + while (true) { + const { done, value } = await reader.read(); + if (done) return; + const rawData = value.data; + if (rawData === '[DONE]') return; + tryParseStreamingError(response, rawData, { quiet: true }); + const parsed = JSON.parse(rawData); + + const reply = getStreamingReply(parsed, state, { + chatCompletionSource: data.chat_completion_source, + overrideShowThoughts: true, + }); + if (Array.isArray(parsed?.choices) && parsed?.choices?.[0]?.index > 0) { + const swipeIndex = parsed.choices[0].index - 1; + swipes[swipeIndex] = (swipes[swipeIndex] || '') + reply; + } else { + text += reply; + } + + yield { text, swipes: swipes, state }; + } }; } @@ -327,11 +430,12 @@ export class ChatCompletionService { * @param {ChatCompletionPayload} custom * @param {Object} options - Configuration options * @param {string?} [options.presetName] - Name of the preset to use for generation settings - * @param {boolean} extractData - Whether to extract structured data from response - * @returns {Promise} Extracted data or the raw response + * @param {boolean} [extractData=true] - Whether to extract structured data from response + * @param {AbortSignal?} [signal] - Abort signal + * @returns {Promise AsyncGenerator)>} If not streaming, returns extracted data; if streaming, returns a function that creates an AsyncGenerator * @throws {Error} */ - static async processRequest(custom, options, extractData = true) { + static async processRequest(custom, options, extractData = true, signal = null) { const { presetName } = options; let requestData = { ...custom }; @@ -354,7 +458,7 @@ export class ChatCompletionService { const data = this.createRequestData(requestData); - return await this.sendRequest(data, extractData); + return await this.sendRequest(data, extractData, signal); } /** diff --git a/public/scripts/extensions/shared.js b/public/scripts/extensions/shared.js index d5549c40a..144ac3d6b 100644 --- a/public/scripts/extensions/shared.js +++ b/public/scripts/extensions/shared.js @@ -276,10 +276,12 @@ export async function getWebLlmContextSize() { } /** - * It uses the profiles to send a generate request to the API. Doesn't support streaming. + * It uses the profiles to send a generate request to the API. */ export class ConnectionManagerRequestService { static defaultSendRequestParams = { + stream: false, + signal: null, extractData: true, includePreset: true, includeInstruct: true, @@ -296,11 +298,11 @@ export class ConnectionManagerRequestService { * @param {string} profileId * @param {string | (import('../custom-request.js').ChatCompletionMessage & {ignoreInstruct?: boolean})[]} prompt * @param {number} maxTokens - * @param {{extractData?: boolean, includePreset?: boolean, includeInstruct?: boolean}} custom - default values are true - * @returns {Promise} Extracted data or the raw response + * @param {{stream?: boolean, signal?: AbortSignal, extractData?: boolean, includePreset?: boolean, includeInstruct?: boolean}} custom - default values are true + * @returns {Promise AsyncGenerator)>} If not streaming, returns extracted data; if streaming, returns a function that creates an AsyncGenerator */ static async sendRequest(profileId, prompt, maxTokens, custom = this.defaultSendRequestParams) { - const { extractData, includePreset, includeInstruct } = { ...this.defaultSendRequestParams, ...custom }; + const { stream, signal, extractData, includePreset, includeInstruct } = { ...this.defaultSendRequestParams, ...custom }; const context = SillyTavern.getContext(); if (context.extensionSettings.disabledExtensions.includes('connection-manager')) { @@ -319,6 +321,7 @@ export class ConnectionManagerRequestService { const messages = Array.isArray(prompt) ? prompt : [{ role: 'user', content: prompt }]; return await context.ChatCompletionService.processRequest({ + stream, messages, max_tokens: maxTokens, model: profile.model, @@ -326,7 +329,7 @@ export class ConnectionManagerRequestService { custom_url: profile['api-url'], }, { presetName: includePreset ? profile.preset : undefined, - }, extractData); + }, extractData, signal); } case 'textgenerationwebui': { if (!selectedApiMap.type) { @@ -334,6 +337,7 @@ export class ConnectionManagerRequestService { } return await context.TextCompletionService.processRequest({ + stream, prompt, max_tokens: maxTokens, model: profile.model, @@ -342,7 +346,7 @@ export class ConnectionManagerRequestService { }, { instructName: includeInstruct ? profile.instruct : undefined, presetName: includePreset ? profile.preset : undefined, - }, extractData); + }, extractData, signal); } default: { throw new Error(`Unknown API type ${selectedApiMap.selected}`); diff --git a/public/scripts/group-chats.js b/public/scripts/group-chats.js index 337f3c247..91cf0a88d 100644 --- a/public/scripts/group-chats.js +++ b/public/scripts/group-chats.js @@ -882,7 +882,7 @@ async function generateGroupWrapper(by_auto_mode, type = null, params = {}) { activatedMembers = activateListOrder(enabledMembers); } else if (activationStrategy === group_activation_strategy.POOLED) { - activatedMembers = activatePooledOrder(enabledMembers, lastMessage); + activatedMembers = activatePooledOrder(enabledMembers, lastMessage, isUserInput); } else if (activationStrategy === group_activation_strategy.MANUAL && !isUserInput) { activatedMembers = shuffle(enabledMembers).slice(0, 1).map(x => characters.findIndex(y => y.avatar === x)).filter(x => x !== -1); @@ -1030,16 +1030,17 @@ function activateListOrder(members) { * Activate group members based on the last message. * @param {string[]} members List of member avatars * @param {Object} lastMessage Last message + * @param {boolean} isUserInput Whether the user has input text * @returns {number[]} List of character ids */ -function activatePooledOrder(members, lastMessage) { +function activatePooledOrder(members, lastMessage, isUserInput) { /** @type {string} */ let activatedMember = null; /** @type {string[]} */ const spokenSinceUser = []; for (const message of chat.slice().reverse()) { - if (message.is_user) { + if (message.is_user || isUserInput) { break; } diff --git a/public/scripts/openai.js b/public/scripts/openai.js index 36e224bf9..695db39a0 100644 --- a/public/scripts/openai.js +++ b/public/scripts/openai.js @@ -1444,8 +1444,10 @@ export async function prepareOpenAIMessages({ * Handles errors during streaming requests. * @param {Response} response * @param {string} decoded - response text or decoded stream data + * @param {object} [options] + * @param {boolean?} [options.quiet=false] Suppress toast messages */ -function tryParseStreamingError(response, decoded) { +export function tryParseStreamingError(response, decoded, { quiet = false } = {}) { try { const data = JSON.parse(decoded); @@ -1453,19 +1455,19 @@ function tryParseStreamingError(response, decoded) { return; } - checkQuotaError(data); - checkModerationError(data); + checkQuotaError(data, { quiet }); + checkModerationError(data, { quiet }); // these do not throw correctly (equiv to Error("[object Object]")) // if trying to fix "[object Object]" displayed to users, start here if (data.error) { - toastr.error(data.error.message || response.statusText, 'Chat Completion API'); + !quiet && toastr.error(data.error.message || response.statusText, 'Chat Completion API'); throw new Error(data); } if (data.message) { - toastr.error(data.message, 'Chat Completion API'); + !quiet && toastr.error(data.message, 'Chat Completion API'); throw new Error(data); } } @@ -1477,16 +1479,18 @@ function tryParseStreamingError(response, decoded) { /** * Checks if the response contains a quota error and displays a popup if it does. * @param data + * @param {object} [options] + * @param {boolean?} [options.quiet=false] Suppress toast messages * @returns {void} * @throws {object} - response JSON */ -function checkQuotaError(data) { +function checkQuotaError(data, { quiet = false } = {}) { if (!data) { return; } if (data.quota_error) { - renderTemplateAsync('quotaError').then((html) => Popup.show.text('Quota Error', html)); + !quiet && renderTemplateAsync('quotaError').then((html) => Popup.show.text('Quota Error', html)); // this does not throw correctly (equiv to Error("[object Object]")) // if trying to fix "[object Object]" displayed to users, start here @@ -1494,9 +1498,14 @@ function checkQuotaError(data) { } } -function checkModerationError(data) { +/** + * @param {any} data + * @param {object} [options] + * @param {boolean?} [options.quiet=false] Suppress toast messages + */ +function checkModerationError(data, { quiet = false } = {}) { const moderationError = data?.error?.message?.includes('requires moderation'); - if (moderationError) { + if (moderationError && !quiet) { const moderationReason = `Reasons: ${data?.error?.metadata?.reasons?.join(', ') ?? '(N/A)'}`; const flaggedText = data?.error?.metadata?.flagged_input ?? '(N/A)'; toastr.info(flaggedText, moderationReason, { timeOut: 10000 }); @@ -2255,37 +2264,43 @@ async function sendOpenAIRequest(type, messages, signal) { * Extracts the reply from the response data from a chat completions-like source * @param {object} data Response data from the chat completions-like source * @param {object} state Additional state to keep track of + * @param {object} [options] Additional options + * @param {string?} [options.chatCompletionSource] Chat completion source + * @param {boolean?} [options.overrideShowThoughts] Override show thoughts * @returns {string} The reply extracted from the response data */ -function getStreamingReply(data, state) { - if (oai_settings.chat_completion_source === chat_completion_sources.CLAUDE) { - if (oai_settings.show_thoughts) { +export function getStreamingReply(data, state, { chatCompletionSource = null, overrideShowThoughts = null } = {}) { + const chat_completion_source = chatCompletionSource ?? oai_settings.chat_completion_source; + const show_thoughts = overrideShowThoughts ?? oai_settings.show_thoughts; + + if (chat_completion_source === chat_completion_sources.CLAUDE) { + if (show_thoughts) { state.reasoning += data?.delta?.thinking || ''; } return data?.delta?.text || ''; - } else if (oai_settings.chat_completion_source === chat_completion_sources.MAKERSUITE) { + } else if (chat_completion_source === chat_completion_sources.MAKERSUITE) { const inlineData = data?.candidates?.[0]?.content?.parts?.find(x => x.inlineData)?.inlineData; if (inlineData) { state.image = `data:${inlineData.mimeType};base64,${inlineData.data}`; } - if (oai_settings.show_thoughts) { + if (show_thoughts) { state.reasoning += (data?.candidates?.[0]?.content?.parts?.filter(x => x.thought)?.map(x => x.text)?.[0] || ''); } return data?.candidates?.[0]?.content?.parts?.filter(x => !x.thought)?.map(x => x.text)?.[0] || ''; - } else if (oai_settings.chat_completion_source === chat_completion_sources.COHERE) { + } else if (chat_completion_source === chat_completion_sources.COHERE) { return data?.delta?.message?.content?.text || data?.delta?.message?.tool_plan || ''; - } else if (oai_settings.chat_completion_source === chat_completion_sources.DEEPSEEK) { - if (oai_settings.show_thoughts) { + } else if (chat_completion_source === chat_completion_sources.DEEPSEEK) { + if (show_thoughts) { state.reasoning += (data.choices?.filter(x => x?.delta?.reasoning_content)?.[0]?.delta?.reasoning_content || ''); } return data.choices?.[0]?.delta?.content || ''; - } else if (oai_settings.chat_completion_source === chat_completion_sources.OPENROUTER) { - if (oai_settings.show_thoughts) { + } else if (chat_completion_source === chat_completion_sources.OPENROUTER) { + if (show_thoughts) { state.reasoning += (data.choices?.filter(x => x?.delta?.reasoning)?.[0]?.delta?.reasoning || ''); } return data.choices?.[0]?.delta?.content ?? data.choices?.[0]?.message?.content ?? data.choices?.[0]?.text ?? ''; - } else if (oai_settings.chat_completion_source === chat_completion_sources.CUSTOM) { - if (oai_settings.show_thoughts) { + } else if (chat_completion_source === chat_completion_sources.CUSTOM) { + if (show_thoughts) { state.reasoning += data.choices?.filter(x => x?.delta?.reasoning_content)?.[0]?.delta?.reasoning_content ?? data.choices?.filter(x => x?.delta?.reasoning)?.[0]?.delta?.reasoning ?? diff --git a/public/scripts/slash-commands.js b/public/scripts/slash-commands.js index 096da917a..969348ae5 100644 --- a/public/scripts/slash-commands.js +++ b/public/scripts/slash-commands.js @@ -69,7 +69,7 @@ import { SlashCommand } from './slash-commands/SlashCommand.js'; import { SlashCommandAbortController } from './slash-commands/SlashCommandAbortController.js'; import { SlashCommandNamedArgumentAssignment } from './slash-commands/SlashCommandNamedArgumentAssignment.js'; import { SlashCommandEnumValue, enumTypes } from './slash-commands/SlashCommandEnumValue.js'; -import { POPUP_TYPE, Popup, callGenericPopup } from './popup.js'; +import { POPUP_RESULT, POPUP_TYPE, Popup, callGenericPopup } from './popup.js'; import { commonEnumProviders, enumIcons } from './slash-commands/SlashCommandCommonEnumsProvider.js'; import { SlashCommandBreakController } from './slash-commands/SlashCommandBreakController.js'; import { SlashCommandExecutionError } from './slash-commands/SlashCommandExecutionError.js'; @@ -77,6 +77,7 @@ import { slashCommandReturnHelper } from './slash-commands/SlashCommandReturnHel import { accountStorage } from './util/AccountStorage.js'; import { SlashCommandDebugController } from './slash-commands/SlashCommandDebugController.js'; import { SlashCommandScope } from './slash-commands/SlashCommandScope.js'; +import { t } from './i18n.js'; export { executeSlashCommands, executeSlashCommandsWithOptions, getSlashCommandsHelp, registerSlashCommand, }; @@ -1556,16 +1557,28 @@ export function initDefaultSlashCommands() { SlashCommandParser.addCommandObject(SlashCommand.fromProps({ name: 'buttons', callback: buttonsCallback, - returns: 'clicked button label', + returns: 'clicked button label (or array of labels if multiple is enabled)', namedArgumentList: [ - new SlashCommandNamedArgument( - 'labels', 'button labels', [ARGUMENT_TYPE.LIST], true, - ), + SlashCommandNamedArgument.fromProps({ + name: 'labels', + description: 'button labels', + typeList: [ARGUMENT_TYPE.LIST], + isRequired: true, + }), + SlashCommandNamedArgument.fromProps({ + name: 'multiple', + description: 'if enabled multiple buttons can be clicked/toggled, and all clicked buttons are returned as an array', + typeList: [ARGUMENT_TYPE.BOOLEAN], + enumList: commonEnumProviders.boolean('trueFalse')(), + defaultValue: 'false', + }), ], unnamedArgumentList: [ - new SlashCommandArgument( - 'text', [ARGUMENT_TYPE.STRING], true, - ), + SlashCommandArgument.fromProps({ + description: 'text', + typeList: [ARGUMENT_TYPE.STRING], + isRequired: true, + }), ], helpString: `
@@ -2377,6 +2390,18 @@ async function trimTokensCallback(arg, value) { } } +/** + * Displays a popup with buttons based on provided labels and handles button interactions. + * + * @param {object} args - Named arguments for the command + * @param {string} args.labels - JSON string of an array of button labels + * @param {string} [args.multiple=false] - Flag indicating if multiple buttons can be toggled + * @param {string} text - The text content to be displayed within the popup + * + * @returns {Promise} - A promise that resolves to a string of the button labels selected + * If 'multiple' is true, returns a JSON string array of labels. + * If 'multiple' is false, returns a single label string. + */ async function buttonsCallback(args, text) { try { /** @type {string[]} */ @@ -2387,6 +2412,10 @@ async function buttonsCallback(args, text) { return ''; } + /** @type {Set} */ + const multipleToggledState = new Set(); + const multiple = isTrueBoolean(args?.multiple); + // Map custom buttons to results. Start at 2 because 1 and 0 are reserved for ok and cancel const resultToButtonMap = new Map(buttons.map((button, index) => [index + 2, button])); @@ -2404,11 +2433,24 @@ async function buttonsCallback(args, text) { for (const [result, button] of resultToButtonMap) { const buttonElement = document.createElement('div'); - buttonElement.classList.add('menu_button', 'result-control', 'wide100p'); - buttonElement.dataset.result = String(result); - buttonElement.addEventListener('click', async () => { - await popup.complete(result); - }); + buttonElement.classList.add('menu_button', 'wide100p'); + + if (multiple) { + buttonElement.classList.add('toggleable'); + buttonElement.dataset.toggleValue = String(result); + buttonElement.addEventListener('click', async () => { + buttonElement.classList.toggle('toggled'); + if (buttonElement.classList.contains('toggled')) { + multipleToggledState.add(result); + } else { + multipleToggledState.delete(result); + } + }); + } else { + buttonElement.classList.add('result-control'); + buttonElement.dataset.result = String(result); + } + buttonElement.innerText = button; buttonContainer.appendChild(buttonElement); } @@ -2424,10 +2466,19 @@ async function buttonsCallback(args, text) { popupContainer.style.flexDirection = 'column'; popupContainer.style.maxHeight = '80vh'; // Limit the overall height of the popup - popup = new Popup(popupContainer, POPUP_TYPE.TEXT, '', { okButton: 'Cancel', allowVerticalScrolling: true }); + popup = new Popup(popupContainer, POPUP_TYPE.TEXT, '', { okButton: multiple ? t`Ok` : t`Cancel`, allowVerticalScrolling: true }); popup.show() - .then((result => resolve(typeof result === 'number' ? resultToButtonMap.get(result) ?? '' : ''))) + .then((result => resolve(getResult(result)))) .catch(() => resolve('')); + + /** @returns {string} @param {string|number|boolean} result */ + function getResult(result) { + if (multiple) { + const array = result === POPUP_RESULT.AFFIRMATIVE ? Array.from(multipleToggledState).map(r => resultToButtonMap.get(r) ?? '') : []; + return JSON.stringify(array); + } + return typeof result === 'number' ? resultToButtonMap.get(result) ?? '' : ''; + } }); } catch { return ''; diff --git a/public/style.css b/public/style.css index 7fa72d91b..13a1267ab 100644 --- a/public/style.css +++ b/public/style.css @@ -2911,6 +2911,35 @@ select option:not(:checked) { pointer-events: none; } +.menu_button.toggleable { + padding-left: 20px; +} + +.menu_button.toggleable.toggled { + border-color: var(--active); +} + +.menu_button.toggleable:not(.toggled) { + filter: brightness(80%); +} + +.menu_button.toggleable::before { + font-family: "Font Awesome 6 Free"; + margin-left: 10px; + position: absolute; + left: 0; +} + +.menu_button.toggleable.toggled::before { + content: "\f00c"; + color: var(--active); +} + +.menu_button.toggleable:not(.toggled)::before { + content: "\f00d"; + color: var(--fullred); +} + .fav_on { color: var(--golden) !important; } @@ -2921,7 +2950,7 @@ select option:not(:checked) { } .menu_button.togglable:not(.toggleEnabled) { - color: red; + color: var(--fullred); } .displayBlock {