From f53e051cbf66434c7e111801c1b81f2c42c92dc7 Mon Sep 17 00:00:00 2001 From: valadaptive Date: Sat, 23 Dec 2023 05:24:31 -0500 Subject: [PATCH 01/22] Lift precondition check out of processCommands Instead of passing type and dryRun into processCommands, do the check in Generate, the only function that calls it. This makes the logic clearer. --- public/script.js | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/public/script.js b/public/script.js index 23be30980..933fbf0ff 100644 --- a/public/script.js +++ b/public/script.js @@ -2354,11 +2354,7 @@ export async function generateQuietPrompt(quiet_prompt, quietToLoud, skipWIAN, q return generateFinished; } -async function processCommands(message, type, dryRun) { - if (dryRun || type == 'regenerate' || type == 'swipe' || type == 'quiet') { - return null; - } - +async function processCommands(message) { const previousText = String($('#send_textarea').val()); const result = await executeSlashCommands(message); @@ -2946,12 +2942,14 @@ async function Generate(type, { automatic_trigger, force_name2, quiet_prompt, qu let message_already_generated = isImpersonate ? `${name1}: ` : `${name2}: `; - const interruptedByCommand = await processCommands($('#send_textarea').val(), type, dryRun); + if (!(dryRun || type == 'regenerate' || type == 'swipe' || type == 'quiet')) { + const interruptedByCommand = await processCommands($('#send_textarea').val()); - if (interruptedByCommand) { - //$("#send_textarea").val('').trigger('input'); - unblockGeneration(); - return Promise.resolve(); + if (interruptedByCommand) { + //$("#send_textarea").val('').trigger('input'); + unblockGeneration(); + return Promise.resolve(); + } } if (main_api == 'kobold' && kai_settings.streaming_kobold && !kai_flags.can_use_streaming) { From d2f86323683fb33195bf34a3aa2da37dfee4f637 Mon Sep 17 00:00:00 2001 From: valadaptive Date: Sat, 23 Dec 2023 05:55:44 -0500 Subject: [PATCH 02/22] Remove populateLegacyTokenCounts Unused and the documentation says it should probably be removed --- public/scripts/PromptManager.js | 28 ---------------------------- 1 file changed, 28 deletions(-) diff --git a/public/scripts/PromptManager.js b/public/scripts/PromptManager.js index 094fe49cc..4e8fb84a8 100644 --- a/public/scripts/PromptManager.js +++ b/public/scripts/PromptManager.js @@ -1293,34 +1293,6 @@ class PromptManager { this.log('Updated token usage with ' + this.tokenUsage); } - /** - * Populates legacy token counts - * - * @deprecated This might serve no purpose and should be evaluated for removal - * - * @param {MessageCollection} messages - */ - populateLegacyTokenCounts(messages) { - // Update general token counts - const chatHistory = messages.getItemByIdentifier('chatHistory'); - const startChat = chatHistory?.getCollection()[0]?.getTokens() || 0; - const continueNudge = chatHistory?.getCollection().find(message => message.identifier === 'continueNudge')?.getTokens() || 0; - - this.tokenHandler.counts = { - ...this.tokenHandler.counts, - ...{ - 'start_chat': startChat, - 'prompt': 0, - 'bias': this.tokenHandler.counts.bias ?? 0, - 'nudge': continueNudge, - 'jailbreak': this.tokenHandler.counts.jailbreak ?? 0, - 'impersonate': 0, - 'examples': this.tokenHandler.counts.dialogueExamples ?? 0, - 'conversation': this.tokenHandler.counts.chatHistory ?? 0, - }, - }; - } - /** * Empties, then re-assembles the container containing the prompt list. */ From 0d3505c44b91783ea675d7ec6e06c86d1ae333be Mon Sep 17 00:00:00 2001 From: valadaptive Date: Sat, 23 Dec 2023 05:58:41 -0500 Subject: [PATCH 03/22] Remove OAI_BEFORE_CHATCOMPLETION Not used in any internal code or extensions I can find. --- public/script.js | 1 - public/scripts/openai.js | 3 --- 2 files changed, 4 deletions(-) diff --git a/public/script.js b/public/script.js index 933fbf0ff..06f362b82 100644 --- a/public/script.js +++ b/public/script.js @@ -320,7 +320,6 @@ export const event_types = { SETTINGS_LOADED_AFTER: 'settings_loaded_after', CHATCOMPLETION_SOURCE_CHANGED: 'chatcompletion_source_changed', CHATCOMPLETION_MODEL_CHANGED: 'chatcompletion_model_changed', - OAI_BEFORE_CHATCOMPLETION: 'oai_before_chatcompletion', OAI_PRESET_CHANGED_BEFORE: 'oai_preset_changed_before', OAI_PRESET_CHANGED_AFTER: 'oai_preset_changed_after', WORLDINFO_SETTINGS_UPDATED: 'worldinfo_settings_updated', diff --git a/public/scripts/openai.js b/public/scripts/openai.js index ffb4a9efe..7126c380f 100644 --- a/public/scripts/openai.js +++ b/public/scripts/openai.js @@ -1031,9 +1031,6 @@ function preparePromptsForChatCompletion({ Scenario, charPersonality, name2, wor prompts.set(jbReplacement, prompts.index('jailbreak')); } - // Allow subscribers to manipulate the prompts object - eventSource.emit(event_types.OAI_BEFORE_CHATCOMPLETION, prompts); - return prompts; } From 4fc2f15448fac2e6e8c15546958a9dcc39932bd7 Mon Sep 17 00:00:00 2001 From: valadaptive Date: Sat, 23 Dec 2023 07:54:44 -0500 Subject: [PATCH 04/22] Reformat up Generate() group logic The first two conditions in the group if/else blocks are the same, so we can combine them. --- public/script.js | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/public/script.js b/public/script.js index 06f362b82..78454a016 100644 --- a/public/script.js +++ b/public/script.js @@ -2978,10 +2978,12 @@ async function Generate(type, { automatic_trigger, force_name2, quiet_prompt, qu chat_metadata['tainted'] = true; } - if (selected_group && !is_group_generating && !dryRun) { - // Returns the promise that generateGroupWrapper returns; resolves when generation is done - return generateGroupWrapper(false, type, { quiet_prompt, force_chid, signal: abortController.signal, quietImage, maxLoops }); - } else if (selected_group && !is_group_generating && dryRun) { + if (selected_group && !is_group_generating) { + if (!dryRun) { + // Returns the promise that generateGroupWrapper returns; resolves when generation is done + return generateGroupWrapper(false, type, { quiet_prompt, force_chid, signal: abortController.signal, quietImage, maxLoops }); + } + const characterIndexMap = new Map(characters.map((char, index) => [char.avatar, index])); const group = groups.find((x) => x.id === selected_group); From 1029ad90a2a858afffba56ce333ebfec17ee4f2c Mon Sep 17 00:00:00 2001 From: valadaptive Date: Mon, 25 Dec 2023 03:19:08 -0500 Subject: [PATCH 05/22] Extract "not in a chat" check into guard clause This lets us remove a layer of indentation, and reveal the error handling logic that was previously hidden below a really long block of code. --- public/script.js | 1874 +++++++++++++++++++++++----------------------- 1 file changed, 939 insertions(+), 935 deletions(-) diff --git a/public/script.js b/public/script.js index 78454a016..4cdd68979 100644 --- a/public/script.js +++ b/public/script.js @@ -3015,946 +3015,950 @@ async function Generate(type, { automatic_trigger, force_name2, quiet_prompt, qu quiet_prompt = main_api == 'novel' && !quietToLoud ? adjustNovelInstructionPrompt(quiet_prompt) : quiet_prompt; } - if (true === dryRun || - (online_status != 'no_connection' && this_chid != undefined && this_chid !== 'invalid-safety-id')) { - let textareaText; - if (type !== 'regenerate' && type !== 'swipe' && type !== 'quiet' && !isImpersonate && !dryRun) { - is_send_press = true; - textareaText = String($('#send_textarea').val()); - $('#send_textarea').val('').trigger('input'); - } else { - textareaText = ''; - if (chat.length && chat[chat.length - 1]['is_user']) { - //do nothing? why does this check exist? - } - else if (type !== 'quiet' && type !== 'swipe' && !isImpersonate && !dryRun && chat.length) { - chat.length = chat.length - 1; - count_view_mes -= 1; - $('#chat').children().last().hide(250, function () { - $(this).remove(); - }); - await eventSource.emit(event_types.MESSAGE_DELETED, chat.length); - } - } + const isChatValid = online_status != 'no_connection' && this_chid != undefined && this_chid !== 'invalid-safety-id'; - if (!type && !textareaText && power_user.continue_on_send && !selected_group && chat.length && !chat[chat.length - 1]['is_user'] && !chat[chat.length - 1]['is_system']) { - type = 'continue'; - } - - const isContinue = type == 'continue'; - - // Rewrite the generation timer to account for the time passed for all the continuations. - if (isContinue && chat.length) { - const prevFinished = chat[chat.length - 1]['gen_finished']; - const prevStarted = chat[chat.length - 1]['gen_started']; - - if (prevFinished && prevStarted) { - const timePassed = prevFinished - prevStarted; - generation_started = new Date(Date.now() - timePassed); - chat[chat.length - 1]['gen_started'] = generation_started; - } - } - - if (!dryRun) { - deactivateSendButtons(); - } - - let { messageBias, promptBias, isUserPromptBias } = getBiasStrings(textareaText, type); - - //********************************* - //PRE FORMATING STRING - //********************************* - - //for normal messages sent from user.. - if ((textareaText != '' || hasPendingFileAttachment()) && !automatic_trigger && type !== 'quiet' && !dryRun) { - // If user message contains no text other than bias - send as a system message - if (messageBias && !removeMacros(textareaText)) { - sendSystemMessage(system_message_types.GENERIC, ' ', { bias: messageBias }); - } - else { - await sendMessageAsUser(textareaText, messageBias); - } - } - else if (textareaText == '' && !automatic_trigger && !dryRun && type === undefined && main_api == 'openai' && oai_settings.send_if_empty.trim().length > 0) { - // Use send_if_empty if set and the user message is empty. Only when sending messages normally - await sendMessageAsUser(oai_settings.send_if_empty.trim(), messageBias); - } - - let { - description, - personality, - persona, - scenario, - mesExamples, - system, - jailbreak, - } = getCharacterCardFields(); - - if (isInstruct) { - system = power_user.prefer_character_prompt && system ? system : baseChatReplace(power_user.instruct.system_prompt, name1, name2); - system = formatInstructModeSystemPrompt(substituteParams(system, name1, name2, power_user.instruct.system_prompt)); - } - - // Depth prompt (character-specific A/N) - removeDepthPrompts(); - const groupDepthPrompts = getGroupDepthPrompts(selected_group, Number(this_chid)); - - if (selected_group && Array.isArray(groupDepthPrompts) && groupDepthPrompts.length > 0) { - groupDepthPrompts.forEach((value, index) => { - setExtensionPrompt('DEPTH_PROMPT_' + index, value.text, extension_prompt_types.IN_CHAT, value.depth, extension_settings.note.allowWIScan); - }); - } else { - const depthPromptText = baseChatReplace(characters[this_chid].data?.extensions?.depth_prompt?.prompt?.trim(), name1, name2) || ''; - const depthPromptDepth = characters[this_chid].data?.extensions?.depth_prompt?.depth ?? depth_prompt_depth_default; - setExtensionPrompt('DEPTH_PROMPT', depthPromptText, extension_prompt_types.IN_CHAT, depthPromptDepth, extension_settings.note.allowWIScan); - } - - // Parse example messages - if (!mesExamples.startsWith('')) { - mesExamples = '\n' + mesExamples.trim(); - } - if (mesExamples.replace(//gi, '').trim().length === 0) { - mesExamples = ''; - } - if (mesExamples && isInstruct) { - mesExamples = formatInstructModeExamples(mesExamples, name1, name2); - } - - const exampleSeparator = power_user.context.example_separator ? `${substituteParams(power_user.context.example_separator)}\n` : ''; - const blockHeading = main_api === 'openai' ? '\n' : exampleSeparator; - let mesExamplesArray = mesExamples.split(//gi).slice(1).map(block => `${blockHeading}${block.trim()}\n`); - - // First message in fresh 1-on-1 chat reacts to user/character settings changes - if (chat.length) { - chat[0].mes = substituteParams(chat[0].mes); - } - - // Collect messages with usable content - let coreChat = chat.filter(x => !x.is_system); - if (type === 'swipe') { - coreChat.pop(); - } - - coreChat = await Promise.all(coreChat.map(async (chatItem, index) => { - let message = chatItem.mes; - let regexType = chatItem.is_user ? regex_placement.USER_INPUT : regex_placement.AI_OUTPUT; - let options = { isPrompt: true }; - - let regexedMessage = getRegexedString(message, regexType, options); - regexedMessage = await appendFileContent(chatItem, regexedMessage); - - return { - ...chatItem, - mes: regexedMessage, - index, - }; - })); - - // Determine token limit - let this_max_context = getMaxContextSize(); - - if (!dryRun && type !== 'quiet') { - console.debug('Running extension interceptors'); - const aborted = await runGenerationInterceptors(coreChat, this_max_context); - - if (aborted) { - console.debug('Generation aborted by extension interceptors'); - unblockGeneration(); - return Promise.resolve(); - } - } else { - console.debug('Skipping extension interceptors for dry run'); - } - - console.log(`Core/all messages: ${coreChat.length}/${chat.length}`); - - // kingbri MARK: - Make sure the prompt bias isn't the same as the user bias - if ((promptBias && !isUserPromptBias) || power_user.always_force_name2 || main_api == 'novel') { - force_name2 = true; - } - - if (isImpersonate) { - force_name2 = false; - } - - ////////////////////////////////// - - let chat2 = []; - let continue_mag = ''; - for (let i = coreChat.length - 1, j = 0; i >= 0; i--, j++) { - // For OpenAI it's only used in WI - if (main_api == 'openai' && (!world_info || world_info.length === 0)) { - console.debug('No WI, skipping chat2 for OAI'); - break; - } - - chat2[i] = formatMessageHistoryItem(coreChat[j], isInstruct, false); - - if (j === 0 && isInstruct) { - // Reformat with the first output sequence (if any) - chat2[i] = formatMessageHistoryItem(coreChat[j], isInstruct, force_output_sequence.FIRST); - } - - // Do not suffix the message for continuation - if (i === 0 && isContinue) { - if (isInstruct) { - // Reformat with the last output sequence (if any) - chat2[i] = formatMessageHistoryItem(coreChat[j], isInstruct, force_output_sequence.LAST); - } - - chat2[i] = chat2[i].slice(0, chat2[i].lastIndexOf(coreChat[j].mes) + coreChat[j].mes.length); - continue_mag = coreChat[j].mes; - } - } - - // Adjust token limit for Horde - let adjustedParams; - if (main_api == 'koboldhorde' && (horde_settings.auto_adjust_context_length || horde_settings.auto_adjust_response_length)) { - try { - adjustedParams = await adjustHordeGenerationParams(max_context, amount_gen); - } - catch { - unblockGeneration(); - return Promise.resolve(); - } - if (horde_settings.auto_adjust_context_length) { - this_max_context = (adjustedParams.maxContextLength - adjustedParams.maxLength); - } - } - - // Extension added strings - // Set non-WI AN - setFloatingPrompt(); - // Add WI to prompt (and also inject WI to AN value via hijack) - - let { worldInfoString, worldInfoBefore, worldInfoAfter, worldInfoDepth } = await getWorldInfoPrompt(chat2, this_max_context); - - if (skipWIAN !== true) { - console.log('skipWIAN not active, adding WIAN'); - // Add all depth WI entries to prompt - flushWIDepthInjections(); - if (Array.isArray(worldInfoDepth)) { - worldInfoDepth.forEach((e) => { - const joinedEntries = e.entries.join('\n'); - setExtensionPrompt(`customDepthWI-${e.depth}`, joinedEntries, extension_prompt_types.IN_CHAT, e.depth); - }); - } - } else { - console.log('skipping WIAN'); - } - - // Add persona description to prompt - addPersonaDescriptionExtensionPrompt(); - // Call combined AN into Generate - let allAnchors = getAllExtensionPrompts(); - const beforeScenarioAnchor = getExtensionPrompt(extension_prompt_types.BEFORE_PROMPT).trimStart(); - const afterScenarioAnchor = getExtensionPrompt(extension_prompt_types.IN_PROMPT); - let zeroDepthAnchor = getExtensionPrompt(extension_prompt_types.IN_CHAT, 0, ' '); - - const storyStringParams = { - description: description, - personality: personality, - persona: persona, - scenario: scenario, - system: isInstruct ? system : '', - char: name2, - user: name1, - wiBefore: worldInfoBefore, - wiAfter: worldInfoAfter, - loreBefore: worldInfoBefore, - loreAfter: worldInfoAfter, - mesExamples: mesExamplesArray.join(''), - }; - - const storyString = renderStoryString(storyStringParams); - - // Story string rendered, safe to remove - if (power_user.strip_examples) { - mesExamplesArray = []; - } - - let oaiMessages = []; - let oaiMessageExamples = []; - - if (main_api === 'openai') { - message_already_generated = ''; - oaiMessages = setOpenAIMessages(coreChat); - oaiMessageExamples = setOpenAIMessageExamples(mesExamplesArray); - } - - // hack for regeneration of the first message - if (chat2.length == 0) { - chat2.push(''); - } - - let examplesString = ''; - let chatString = ''; - let cyclePrompt = ''; - - function getMessagesTokenCount() { - const encodeString = [ - storyString, - examplesString, - chatString, - allAnchors, - quiet_prompt, - cyclePrompt, - ].join('').replace(/\r/gm, ''); - return getTokenCount(encodeString, power_user.token_padding); - } - - // Force pinned examples into the context - let pinExmString; - if (power_user.pin_examples) { - pinExmString = examplesString = mesExamplesArray.join(''); - } - - // Only add the chat in context if past the greeting message - if (isContinue && (chat2.length > 1 || main_api === 'openai')) { - cyclePrompt = chat2.shift(); - } - - // Collect enough messages to fill the context - let arrMes = []; - let tokenCount = getMessagesTokenCount(); - for (let item of chat2) { - // not needed for OAI prompting - if (main_api == 'openai') { - break; - } - - tokenCount += getTokenCount(item.replace(/\r/gm, '')); - chatString = item + chatString; - if (tokenCount < this_max_context) { - arrMes[arrMes.length] = item; - } else { - break; - } - - // Prevent UI thread lock on tokenization - await delay(1); - } - - if (main_api !== 'openai') { - setInContextMessages(arrMes.length, type); - } - - // Estimate how many unpinned example messages fit in the context - tokenCount = getMessagesTokenCount(); - let count_exm_add = 0; - if (!power_user.pin_examples) { - for (let example of mesExamplesArray) { - tokenCount += getTokenCount(example.replace(/\r/gm, '')); - examplesString += example; - if (tokenCount < this_max_context) { - count_exm_add++; - } else { - break; - } - await delay(1); - } - } - - let mesSend = []; - console.debug('calling runGenerate'); - - if (isContinue) { - // Coping mechanism for OAI spacing - const isForceInstruct = isOpenRouterWithInstruct(); - if (main_api === 'openai' && !isForceInstruct && !cyclePrompt.endsWith(' ')) { - cyclePrompt += ' '; - continue_mag += ' '; - } - message_already_generated = continue_mag; - } - - const originalType = type; - - if (!dryRun) { - is_send_press = true; - } - - generatedPromptCache += cyclePrompt; - if (generatedPromptCache.length == 0 || type === 'continue') { - console.debug('generating prompt'); - chatString = ''; - arrMes = arrMes.reverse(); - arrMes.forEach(function (item, i, arr) {// For added anchors and others - // OAI doesn't need all of this - if (main_api === 'openai') { - return; - } - - // Cohee: I'm not even sure what this is for anymore - if (i === arrMes.length - 1 && type !== 'continue') { - item = item.replace(/\n?$/, ''); - } - - mesSend[mesSend.length] = { message: item, extensionPrompts: [] }; - }); - } - - let mesExmString = ''; - - function setPromptString() { - if (main_api == 'openai') { - return; - } - - console.debug('--setting Prompt string'); - mesExmString = pinExmString ?? mesExamplesArray.slice(0, count_exm_add).join(''); - - if (mesSend.length) { - mesSend[mesSend.length - 1].message = modifyLastPromptLine(mesSend[mesSend.length - 1].message); - } - } - - function modifyLastPromptLine(lastMesString) { - //#########QUIET PROMPT STUFF PT2############## - - // Add quiet generation prompt at depth 0 - if (quiet_prompt && quiet_prompt.length) { - - // here name1 is forced for all quiet prompts..why? - const name = name1; - //checks if we are in instruct, if so, formats the chat as such, otherwise just adds the quiet prompt - const quietAppend = isInstruct ? formatInstructModeChat(name, quiet_prompt, false, true, '', name1, name2, false) : `\n${quiet_prompt}`; - - //This begins to fix quietPrompts (particularly /sysgen) for instruct - //previously instruct input sequence was being appended to the last chat message w/o '\n' - //and no output sequence was added after the input's content. - //TODO: respect output_sequence vs last_output_sequence settings - //TODO: decide how to prompt this to clarify who is talking 'Narrator', 'System', etc. - if (isInstruct) { - lastMesString += '\n' + quietAppend; // + power_user.instruct.output_sequence + '\n'; - } else { - lastMesString += quietAppend; - } - - - // Ross: bailing out early prevents quiet prompts from respecting other instruct prompt toggles - // for sysgen, SD, and summary this is desireable as it prevents the AI from responding as char.. - // but for idle prompting, we want the flexibility of the other prompt toggles, and to respect them as per settings in the extension - // need a detection for what the quiet prompt is being asked for... - - // Bail out early? - if (quietToLoud !== true) { - return lastMesString; - } - } - - - // Get instruct mode line - if (isInstruct && !isContinue) { - const name = isImpersonate ? name1 : name2; - lastMesString += formatInstructModePrompt(name, isImpersonate, promptBias, name1, name2); - } - - // Get non-instruct impersonation line - if (!isInstruct && isImpersonate && !isContinue) { - const name = name1; - if (!lastMesString.endsWith('\n')) { - lastMesString += '\n'; - } - lastMesString += name + ':'; - } - - // Add character's name - // Force name append on continue (if not continuing on user message) - if (!isInstruct && force_name2) { - if (!lastMesString.endsWith('\n')) { - lastMesString += '\n'; - } - if (!isContinue || !(chat[chat.length - 1]?.is_user)) { - lastMesString += `${name2}:`; - } - } - - return lastMesString; - } - - // Clean up the already generated prompt for seamless addition - function cleanupPromptCache(promptCache) { - // Remove the first occurrance of character's name - if (promptCache.trimStart().startsWith(`${name2}:`)) { - promptCache = promptCache.replace(`${name2}:`, '').trimStart(); - } - - // Remove the first occurrance of prompt bias - if (promptCache.trimStart().startsWith(promptBias)) { - promptCache = promptCache.replace(promptBias, ''); - } - - // Add a space if prompt cache doesn't start with one - if (!/^\s/.test(promptCache) && !isInstruct && !isContinue) { - promptCache = ' ' + promptCache; - } - - return promptCache; - } - - function checkPromptSize() { - console.debug('---checking Prompt size'); - setPromptString(); - const prompt = [ - storyString, - mesExmString, - mesSend.join(''), - generatedPromptCache, - allAnchors, - quiet_prompt, - ].join('').replace(/\r/gm, ''); - let thisPromptContextSize = getTokenCount(prompt, power_user.token_padding); - - if (thisPromptContextSize > this_max_context) { //if the prepared prompt is larger than the max context size... - if (count_exm_add > 0) { // ..and we have example mesages.. - count_exm_add--; // remove the example messages... - checkPromptSize(); // and try agin... - } else if (mesSend.length > 0) { // if the chat history is longer than 0 - mesSend.shift(); // remove the first (oldest) chat entry.. - checkPromptSize(); // and check size again.. - } else { - //end - console.debug(`---mesSend.length = ${mesSend.length}`); - } - } - } - - if (generatedPromptCache.length > 0 && main_api !== 'openai') { - console.debug('---Generated Prompt Cache length: ' + generatedPromptCache.length); - checkPromptSize(); - } else { - console.debug('---calling setPromptString ' + generatedPromptCache.length); - setPromptString(); - } - - // Fetches the combined prompt for both negative and positive prompts - const cfgGuidanceScale = getGuidanceScale(); - - // For prompt bit itemization - let mesSendString = ''; - - function getCombinedPrompt(isNegative) { - // Only return if the guidance scale doesn't exist or the value is 1 - // Also don't return if constructing the neutral prompt - if (isNegative && (!cfgGuidanceScale || cfgGuidanceScale?.value === 1)) { - return; - } - - // OAI has its own prompt manager. No need to do anything here - if (main_api === 'openai') { - return ''; - } - - // Deep clone - let finalMesSend = structuredClone(mesSend); - - // TODO: Rewrite getExtensionPrompt to not require multiple for loops - // Set all extension prompts where insertion depth > mesSend length - if (finalMesSend.length) { - for (let upperDepth = MAX_INJECTION_DEPTH; upperDepth >= finalMesSend.length; upperDepth--) { - const upperAnchor = getExtensionPrompt(extension_prompt_types.IN_CHAT, upperDepth); - if (upperAnchor && upperAnchor.length) { - finalMesSend[0].extensionPrompts.push(upperAnchor); - } - } - } - - finalMesSend.forEach((mesItem, index) => { - if (index === 0) { - return; - } - - const anchorDepth = Math.abs(index - finalMesSend.length); - // NOTE: Depth injected here! - const extensionAnchor = getExtensionPrompt(extension_prompt_types.IN_CHAT, anchorDepth); - - if (anchorDepth >= 0 && extensionAnchor && extensionAnchor.length) { - mesItem.extensionPrompts.push(extensionAnchor); - } - }); - - // TODO: Move zero-depth anchor append to work like CFG and bias appends - if (zeroDepthAnchor?.length && !isContinue) { - console.debug(/\s/.test(finalMesSend[finalMesSend.length - 1].message.slice(-1))); - finalMesSend[finalMesSend.length - 1].message += - /\s/.test(finalMesSend[finalMesSend.length - 1].message.slice(-1)) - ? zeroDepthAnchor - : `${zeroDepthAnchor}`; - } - - let cfgPrompt = {}; - if (cfgGuidanceScale && cfgGuidanceScale?.value !== 1) { - cfgPrompt = getCfgPrompt(cfgGuidanceScale, isNegative); - } - - if (cfgPrompt && cfgPrompt?.value) { - if (cfgPrompt?.depth === 0) { - finalMesSend[finalMesSend.length - 1].message += - /\s/.test(finalMesSend[finalMesSend.length - 1].message.slice(-1)) - ? cfgPrompt.value - : ` ${cfgPrompt.value}`; - } else { - // TODO: Make all extension prompts use an array/splice method - const lengthDiff = mesSend.length - cfgPrompt.depth; - const cfgDepth = lengthDiff >= 0 ? lengthDiff : 0; - finalMesSend[cfgDepth].extensionPrompts.push(`${cfgPrompt.value}\n`); - } - } - - // Add prompt bias after everything else - // Always run with continue - if (!isInstruct && !isImpersonate) { - if (promptBias.trim().length !== 0) { - finalMesSend[finalMesSend.length - 1].message += - /\s/.test(finalMesSend[finalMesSend.length - 1].message.slice(-1)) - ? promptBias.trimStart() - : ` ${promptBias.trimStart()}`; - } - } - - // Prune from prompt cache if it exists - if (generatedPromptCache.length !== 0) { - generatedPromptCache = cleanupPromptCache(generatedPromptCache); - } - - // Flattens the multiple prompt objects to a string. - const combine = () => { - // Right now, everything is suffixed with a newline - mesSendString = finalMesSend.map((e) => `${e.extensionPrompts.join('')}${e.message}`).join(''); - - // add a custom dingus (if defined) - mesSendString = addChatsSeparator(mesSendString); - - // add chat preamble - mesSendString = addChatsPreamble(mesSendString); - - let combinedPrompt = beforeScenarioAnchor + - storyString + - afterScenarioAnchor + - mesExmString + - mesSendString + - generatedPromptCache; - - combinedPrompt = combinedPrompt.replace(/\r/gm, ''); - - if (power_user.collapse_newlines) { - combinedPrompt = collapseNewlines(combinedPrompt); - } - - return combinedPrompt; - }; - - let data = { - api: main_api, - combinedPrompt: null, - description, - personality, - persona, - scenario, - char: name2, - user: name1, - beforeScenarioAnchor, - afterScenarioAnchor, - mesExmString, - finalMesSend, - generatedPromptCache, - main: system, - jailbreak, - naiPreamble: nai_settings.preamble, - }; - - // Before returning the combined prompt, give available context related information to all subscribers. - eventSource.emitAndWait(event_types.GENERATE_BEFORE_COMBINE_PROMPTS, data); - - // If one or multiple subscribers return a value, forfeit the responsibillity of flattening the context. - return !data.combinedPrompt ? combine() : data.combinedPrompt; - } - - // Get the negative prompt first since it has the unmodified mesSend array - let negativePrompt = main_api == 'textgenerationwebui' ? getCombinedPrompt(true) : undefined; - let finalPrompt = getCombinedPrompt(false); - - // Include the entire guidance scale object - const cfgValues = cfgGuidanceScale && cfgGuidanceScale?.value !== 1 ? ({ guidanceScale: cfgGuidanceScale, negativePrompt: negativePrompt }) : null; - - let maxLength = Number(amount_gen); // how many tokens the AI will be requested to generate - let thisPromptBits = []; - - // TODO: Make this a switch - if (main_api == 'koboldhorde' && horde_settings.auto_adjust_response_length) { - maxLength = Math.min(maxLength, adjustedParams.maxLength); - maxLength = Math.max(maxLength, MIN_LENGTH); // prevent validation errors - } - - let generate_data; - if (main_api == 'koboldhorde' || main_api == 'kobold') { - generate_data = { - prompt: finalPrompt, - gui_settings: true, - max_length: maxLength, - max_context_length: max_context, - }; - - if (preset_settings != 'gui') { - const isHorde = main_api == 'koboldhorde'; - const presetSettings = koboldai_settings[koboldai_setting_names[preset_settings]]; - const maxContext = (adjustedParams && horde_settings.auto_adjust_context_length) ? adjustedParams.maxContextLength : max_context; - generate_data = getKoboldGenerationData(finalPrompt, presetSettings, maxLength, maxContext, isHorde, type); - } - } - else if (main_api == 'textgenerationwebui') { - generate_data = getTextGenGenerationData(finalPrompt, maxLength, isImpersonate, isContinue, cfgValues, type); - } - else if (main_api == 'novel') { - const presetSettings = novelai_settings[novelai_setting_names[nai_settings.preset_settings_novel]]; - generate_data = getNovelGenerationData(finalPrompt, presetSettings, maxLength, isImpersonate, isContinue, cfgValues, type); - } - else if (main_api == 'openai') { - let [prompt, counts] = await prepareOpenAIMessages({ - name2: name2, - charDescription: description, - charPersonality: personality, - Scenario: scenario, - worldInfoBefore: worldInfoBefore, - worldInfoAfter: worldInfoAfter, - extensionPrompts: extension_prompts, - bias: promptBias, - type: type, - quietPrompt: quiet_prompt, - quietImage: quietImage, - cyclePrompt: cyclePrompt, - systemPromptOverride: system, - jailbreakPromptOverride: jailbreak, - personaDescription: persona, - messages: oaiMessages, - messageExamples: oaiMessageExamples, - }, dryRun); - generate_data = { prompt: prompt }; - - // counts will return false if the user has not enabled the token breakdown feature - if (counts) { - parseTokenCounts(counts, thisPromptBits); - } - - if (!dryRun) { - setInContextMessages(openai_messages_count, type); - } - } - - async function finishGenerating() { - if (dryRun) return { error: 'dryRun' }; - - if (power_user.console_log_prompts) { - console.log(generate_data.prompt); - } - - console.debug('rungenerate calling API'); - - showStopButton(); - - //set array object for prompt token itemization of this message - let currentArrayEntry = Number(thisPromptBits.length - 1); - let additionalPromptStuff = { - ...thisPromptBits[currentArrayEntry], - rawPrompt: generate_data.prompt || generate_data.input, - mesId: getNextMessageId(type), - allAnchors: allAnchors, - summarizeString: (extension_prompts['1_memory']?.value || ''), - authorsNoteString: (extension_prompts['2_floating_prompt']?.value || ''), - smartContextString: (extension_prompts['chromadb']?.value || ''), - worldInfoString: worldInfoString, - storyString: storyString, - beforeScenarioAnchor: beforeScenarioAnchor, - afterScenarioAnchor: afterScenarioAnchor, - examplesString: examplesString, - mesSendString: mesSendString, - generatedPromptCache: generatedPromptCache, - promptBias: promptBias, - finalPrompt: finalPrompt, - charDescription: description, - charPersonality: personality, - scenarioText: scenario, - this_max_context: this_max_context, - padding: power_user.token_padding, - main_api: main_api, - instruction: isInstruct ? substituteParams(power_user.prefer_character_prompt && system ? system : power_user.instruct.system_prompt) : '', - userPersona: (power_user.persona_description || ''), - }; - - thisPromptBits = additionalPromptStuff; - - //console.log(thisPromptBits); - const itemizedIndex = itemizedPrompts.findIndex((item) => item.mesId === thisPromptBits['mesId']); - - if (itemizedIndex !== -1) { - itemizedPrompts[itemizedIndex] = thisPromptBits; - } - else { - itemizedPrompts.push(thisPromptBits); - } - - console.debug(`pushed prompt bits to itemizedPrompts array. Length is now: ${itemizedPrompts.length}`); - - if (isStreamingEnabled() && type !== 'quiet') { - streamingProcessor = new StreamingProcessor(type, force_name2, generation_started, message_already_generated); - if (isContinue) { - // Save reply does add cycle text to the prompt, so it's not needed here - streamingProcessor.firstMessageText = ''; - } - - streamingProcessor.generator = await sendStreamingRequest(type, generate_data); - - hideSwipeButtons(); - let getMessage = await streamingProcessor.generate(); - let messageChunk = cleanUpMessage(getMessage, isImpersonate, isContinue, false); - - if (isContinue) { - getMessage = continue_mag + getMessage; - } - - if (streamingProcessor && !streamingProcessor.isStopped && streamingProcessor.isFinished) { - await streamingProcessor.onFinishStreaming(streamingProcessor.messageId, getMessage); - streamingProcessor = null; - triggerAutoContinue(messageChunk, isImpersonate); - } - } else { - return await sendGenerationRequest(type, generate_data); - } - } - - return finishGenerating().then(onSuccess, onError); - - async function onSuccess(data) { - if (!data) return; - let messageChunk = ''; - - if (data.error == 'dryRun') { - generatedPromptCache = ''; - return; - } - - if (!data.error) { - //const getData = await response.json(); - let getMessage = extractMessageFromData(data); - let title = extractTitleFromData(data); - kobold_horde_model = title; - - const swipes = extractMultiSwipes(data, type); - - messageChunk = cleanUpMessage(getMessage, isImpersonate, isContinue, false); - - if (isContinue) { - getMessage = continue_mag + getMessage; - } - - //Formating - const displayIncomplete = type === 'quiet' && !quietToLoud; - getMessage = cleanUpMessage(getMessage, isImpersonate, isContinue, displayIncomplete); - - if (getMessage.length > 0) { - if (isImpersonate) { - $('#send_textarea').val(getMessage).trigger('input'); - generatedPromptCache = ''; - await eventSource.emit(event_types.IMPERSONATE_READY, getMessage); - } - else if (type == 'quiet') { - unblockGeneration(); - return getMessage; - } - else { - // Without streaming we'll be having a full message on continuation. Treat it as a last chunk. - if (originalType !== 'continue') { - ({ type, getMessage } = await saveReply(type, getMessage, false, title, swipes)); - } - else { - ({ type, getMessage } = await saveReply('appendFinal', getMessage, false, title, swipes)); - } - } - - if (type !== 'quiet') { - playMessageSound(); - } - } else { - // If maxLoops is not passed in (e.g. first time generating), set it to MAX_GENERATION_LOOPS - maxLoops ??= MAX_GENERATION_LOOPS; - - if (maxLoops === 0) { - if (type !== 'quiet') { - throwCircuitBreakerError(); - } - throw new Error('Generate circuit breaker interruption'); - } - - // regenerate with character speech reenforced - // to make sure we leave on swipe type while also adding the name2 appendage - await delay(1000); - // The first await is for waiting for the generate to start. The second one is waiting for it to finish - const result = await await Generate(type, { automatic_trigger, force_name2: true, quiet_prompt, skipWIAN, force_chid, maxLoops: maxLoops - 1 }); - return result; - } - - if (power_user.auto_swipe) { - console.debug('checking for autoswipeblacklist on non-streaming message'); - function containsBlacklistedWords(getMessage, blacklist, threshold) { - console.debug('checking blacklisted words'); - const regex = new RegExp(`\\b(${blacklist.join('|')})\\b`, 'gi'); - const matches = getMessage.match(regex) || []; - return matches.length >= threshold; - } - - const generatedTextFiltered = (getMessage) => { - if (power_user.auto_swipe_blacklist_threshold) { - if (containsBlacklistedWords(getMessage, power_user.auto_swipe_blacklist, power_user.auto_swipe_blacklist_threshold)) { - console.debug('Generated text has blacklisted words'); - return true; - } - } - - return false; - }; - if (generatedTextFiltered(getMessage)) { - console.debug('swiping right automatically'); - is_send_press = false; - swipe_right(); - // TODO: do we want to resolve after an auto-swipe? - return; - } - } - } else { - generatedPromptCache = ''; - - if (data?.response) { - toastr.error(data.response, 'API Error'); - } - throw data?.response; - } - - console.debug('/api/chats/save called by /Generate'); - await saveChatConditional(); - unblockGeneration(); - streamingProcessor = null; - - if (type !== 'quiet') { - triggerAutoContinue(messageChunk, isImpersonate); - } - } - - function onError(exception) { - if (typeof exception?.error?.message === 'string') { - toastr.error(exception.error.message, 'Error', { timeOut: 10000, extendedTimeOut: 20000 }); - } - - unblockGeneration(); - console.log(exception); - streamingProcessor = null; - throw exception; - } - } else { //generate's primary loop ends, after this is error handling for no-connection or safety-id + // We can't do anything because we're not in a chat right now. (Unless it's a dry run, in which case we need to + // assemble the prompt so we can count its tokens regardless of whether a chat is active.) + if (!dryRun && !isChatValid) { if (this_chid === undefined || this_chid === 'invalid-safety-id') { toastr.warning('Сharacter is not selected'); } is_send_press = false; + return Promise.resolve(); + } + + let textareaText; + if (type !== 'regenerate' && type !== 'swipe' && type !== 'quiet' && !isImpersonate && !dryRun) { + is_send_press = true; + textareaText = String($('#send_textarea').val()); + $('#send_textarea').val('').trigger('input'); + } else { + textareaText = ''; + if (chat.length && chat[chat.length - 1]['is_user']) { + //do nothing? why does this check exist? + } + else if (type !== 'quiet' && type !== 'swipe' && !isImpersonate && !dryRun && chat.length) { + chat.length = chat.length - 1; + count_view_mes -= 1; + $('#chat').children().last().hide(250, function () { + $(this).remove(); + }); + await eventSource.emit(event_types.MESSAGE_DELETED, chat.length); + } + } + + if (!type && !textareaText && power_user.continue_on_send && !selected_group && chat.length && !chat[chat.length - 1]['is_user'] && !chat[chat.length - 1]['is_system']) { + type = 'continue'; + } + + const isContinue = type == 'continue'; + + // Rewrite the generation timer to account for the time passed for all the continuations. + if (isContinue && chat.length) { + const prevFinished = chat[chat.length - 1]['gen_finished']; + const prevStarted = chat[chat.length - 1]['gen_started']; + + if (prevFinished && prevStarted) { + const timePassed = prevFinished - prevStarted; + generation_started = new Date(Date.now() - timePassed); + chat[chat.length - 1]['gen_started'] = generation_started; + } + } + + if (!dryRun) { + deactivateSendButtons(); + } + + let { messageBias, promptBias, isUserPromptBias } = getBiasStrings(textareaText, type); + + //********************************* + //PRE FORMATING STRING + //********************************* + + //for normal messages sent from user.. + if ((textareaText != '' || hasPendingFileAttachment()) && !automatic_trigger && type !== 'quiet' && !dryRun) { + // If user message contains no text other than bias - send as a system message + if (messageBias && !removeMacros(textareaText)) { + sendSystemMessage(system_message_types.GENERIC, ' ', { bias: messageBias }); + } + else { + await sendMessageAsUser(textareaText, messageBias); + } + } + else if (textareaText == '' && !automatic_trigger && !dryRun && type === undefined && main_api == 'openai' && oai_settings.send_if_empty.trim().length > 0) { + // Use send_if_empty if set and the user message is empty. Only when sending messages normally + await sendMessageAsUser(oai_settings.send_if_empty.trim(), messageBias); + } + + let { + description, + personality, + persona, + scenario, + mesExamples, + system, + jailbreak, + } = getCharacterCardFields(); + + if (isInstruct) { + system = power_user.prefer_character_prompt && system ? system : baseChatReplace(power_user.instruct.system_prompt, name1, name2); + system = formatInstructModeSystemPrompt(substituteParams(system, name1, name2, power_user.instruct.system_prompt)); + } + + // Depth prompt (character-specific A/N) + removeDepthPrompts(); + const groupDepthPrompts = getGroupDepthPrompts(selected_group, Number(this_chid)); + + if (selected_group && Array.isArray(groupDepthPrompts) && groupDepthPrompts.length > 0) { + groupDepthPrompts.forEach((value, index) => { + setExtensionPrompt('DEPTH_PROMPT_' + index, value.text, extension_prompt_types.IN_CHAT, value.depth, extension_settings.note.allowWIScan); + }); + } else { + const depthPromptText = baseChatReplace(characters[this_chid].data?.extensions?.depth_prompt?.prompt?.trim(), name1, name2) || ''; + const depthPromptDepth = characters[this_chid].data?.extensions?.depth_prompt?.depth ?? depth_prompt_depth_default; + setExtensionPrompt('DEPTH_PROMPT', depthPromptText, extension_prompt_types.IN_CHAT, depthPromptDepth, extension_settings.note.allowWIScan); + } + + // Parse example messages + if (!mesExamples.startsWith('')) { + mesExamples = '\n' + mesExamples.trim(); + } + if (mesExamples.replace(//gi, '').trim().length === 0) { + mesExamples = ''; + } + if (mesExamples && isInstruct) { + mesExamples = formatInstructModeExamples(mesExamples, name1, name2); + } + + const exampleSeparator = power_user.context.example_separator ? `${substituteParams(power_user.context.example_separator)}\n` : ''; + const blockHeading = main_api === 'openai' ? '\n' : exampleSeparator; + let mesExamplesArray = mesExamples.split(//gi).slice(1).map(block => `${blockHeading}${block.trim()}\n`); + + // First message in fresh 1-on-1 chat reacts to user/character settings changes + if (chat.length) { + chat[0].mes = substituteParams(chat[0].mes); + } + + // Collect messages with usable content + let coreChat = chat.filter(x => !x.is_system); + if (type === 'swipe') { + coreChat.pop(); + } + + coreChat = await Promise.all(coreChat.map(async (chatItem, index) => { + let message = chatItem.mes; + let regexType = chatItem.is_user ? regex_placement.USER_INPUT : regex_placement.AI_OUTPUT; + let options = { isPrompt: true }; + + let regexedMessage = getRegexedString(message, regexType, options); + regexedMessage = await appendFileContent(chatItem, regexedMessage); + + return { + ...chatItem, + mes: regexedMessage, + index, + }; + })); + + // Determine token limit + let this_max_context = getMaxContextSize(); + + if (!dryRun && type !== 'quiet') { + console.debug('Running extension interceptors'); + const aborted = await runGenerationInterceptors(coreChat, this_max_context); + + if (aborted) { + console.debug('Generation aborted by extension interceptors'); + unblockGeneration(); + return Promise.resolve(); + } + } else { + console.debug('Skipping extension interceptors for dry run'); + } + + console.log(`Core/all messages: ${coreChat.length}/${chat.length}`); + + // kingbri MARK: - Make sure the prompt bias isn't the same as the user bias + if ((promptBias && !isUserPromptBias) || power_user.always_force_name2 || main_api == 'novel') { + force_name2 = true; + } + + if (isImpersonate) { + force_name2 = false; + } + + ////////////////////////////////// + + let chat2 = []; + let continue_mag = ''; + for (let i = coreChat.length - 1, j = 0; i >= 0; i--, j++) { + // For OpenAI it's only used in WI + if (main_api == 'openai' && (!world_info || world_info.length === 0)) { + console.debug('No WI, skipping chat2 for OAI'); + break; + } + + chat2[i] = formatMessageHistoryItem(coreChat[j], isInstruct, false); + + if (j === 0 && isInstruct) { + // Reformat with the first output sequence (if any) + chat2[i] = formatMessageHistoryItem(coreChat[j], isInstruct, force_output_sequence.FIRST); + } + + // Do not suffix the message for continuation + if (i === 0 && isContinue) { + if (isInstruct) { + // Reformat with the last output sequence (if any) + chat2[i] = formatMessageHistoryItem(coreChat[j], isInstruct, force_output_sequence.LAST); + } + + chat2[i] = chat2[i].slice(0, chat2[i].lastIndexOf(coreChat[j].mes) + coreChat[j].mes.length); + continue_mag = coreChat[j].mes; + } + } + + // Adjust token limit for Horde + let adjustedParams; + if (main_api == 'koboldhorde' && (horde_settings.auto_adjust_context_length || horde_settings.auto_adjust_response_length)) { + try { + adjustedParams = await adjustHordeGenerationParams(max_context, amount_gen); + } + catch { + unblockGeneration(); + return Promise.resolve(); + } + if (horde_settings.auto_adjust_context_length) { + this_max_context = (adjustedParams.maxContextLength - adjustedParams.maxLength); + } + } + + // Extension added strings + // Set non-WI AN + setFloatingPrompt(); + // Add WI to prompt (and also inject WI to AN value via hijack) + + let { worldInfoString, worldInfoBefore, worldInfoAfter, worldInfoDepth } = await getWorldInfoPrompt(chat2, this_max_context); + + if (skipWIAN !== true) { + console.log('skipWIAN not active, adding WIAN'); + // Add all depth WI entries to prompt + flushWIDepthInjections(); + if (Array.isArray(worldInfoDepth)) { + worldInfoDepth.forEach((e) => { + const joinedEntries = e.entries.join('\n'); + setExtensionPrompt(`customDepthWI-${e.depth}`, joinedEntries, extension_prompt_types.IN_CHAT, e.depth); + }); + } + } else { + console.log('skipping WIAN'); + } + + // Add persona description to prompt + addPersonaDescriptionExtensionPrompt(); + // Call combined AN into Generate + let allAnchors = getAllExtensionPrompts(); + const beforeScenarioAnchor = getExtensionPrompt(extension_prompt_types.BEFORE_PROMPT).trimStart(); + const afterScenarioAnchor = getExtensionPrompt(extension_prompt_types.IN_PROMPT); + let zeroDepthAnchor = getExtensionPrompt(extension_prompt_types.IN_CHAT, 0, ' '); + + const storyStringParams = { + description: description, + personality: personality, + persona: persona, + scenario: scenario, + system: isInstruct ? system : '', + char: name2, + user: name1, + wiBefore: worldInfoBefore, + wiAfter: worldInfoAfter, + loreBefore: worldInfoBefore, + loreAfter: worldInfoAfter, + mesExamples: mesExamplesArray.join(''), + }; + + const storyString = renderStoryString(storyStringParams); + + // Story string rendered, safe to remove + if (power_user.strip_examples) { + mesExamplesArray = []; + } + + let oaiMessages = []; + let oaiMessageExamples = []; + + if (main_api === 'openai') { + message_already_generated = ''; + oaiMessages = setOpenAIMessages(coreChat); + oaiMessageExamples = setOpenAIMessageExamples(mesExamplesArray); + } + + // hack for regeneration of the first message + if (chat2.length == 0) { + chat2.push(''); + } + + let examplesString = ''; + let chatString = ''; + let cyclePrompt = ''; + + function getMessagesTokenCount() { + const encodeString = [ + storyString, + examplesString, + chatString, + allAnchors, + quiet_prompt, + cyclePrompt, + ].join('').replace(/\r/gm, ''); + return getTokenCount(encodeString, power_user.token_padding); + } + + // Force pinned examples into the context + let pinExmString; + if (power_user.pin_examples) { + pinExmString = examplesString = mesExamplesArray.join(''); + } + + // Only add the chat in context if past the greeting message + if (isContinue && (chat2.length > 1 || main_api === 'openai')) { + cyclePrompt = chat2.shift(); + } + + // Collect enough messages to fill the context + let arrMes = []; + let tokenCount = getMessagesTokenCount(); + for (let item of chat2) { + // not needed for OAI prompting + if (main_api == 'openai') { + break; + } + + tokenCount += getTokenCount(item.replace(/\r/gm, '')); + chatString = item + chatString; + if (tokenCount < this_max_context) { + arrMes[arrMes.length] = item; + } else { + break; + } + + // Prevent UI thread lock on tokenization + await delay(1); + } + + if (main_api !== 'openai') { + setInContextMessages(arrMes.length, type); + } + + // Estimate how many unpinned example messages fit in the context + tokenCount = getMessagesTokenCount(); + let count_exm_add = 0; + if (!power_user.pin_examples) { + for (let example of mesExamplesArray) { + tokenCount += getTokenCount(example.replace(/\r/gm, '')); + examplesString += example; + if (tokenCount < this_max_context) { + count_exm_add++; + } else { + break; + } + await delay(1); + } + } + + let mesSend = []; + console.debug('calling runGenerate'); + + if (isContinue) { + // Coping mechanism for OAI spacing + const isForceInstruct = isOpenRouterWithInstruct(); + if (main_api === 'openai' && !isForceInstruct && !cyclePrompt.endsWith(' ')) { + cyclePrompt += ' '; + continue_mag += ' '; + } + message_already_generated = continue_mag; + } + + const originalType = type; + + if (!dryRun) { + is_send_press = true; + } + + generatedPromptCache += cyclePrompt; + if (generatedPromptCache.length == 0 || type === 'continue') { + console.debug('generating prompt'); + chatString = ''; + arrMes = arrMes.reverse(); + arrMes.forEach(function (item, i, arr) {// For added anchors and others + // OAI doesn't need all of this + if (main_api === 'openai') { + return; + } + + // Cohee: I'm not even sure what this is for anymore + if (i === arrMes.length - 1 && type !== 'continue') { + item = item.replace(/\n?$/, ''); + } + + mesSend[mesSend.length] = { message: item, extensionPrompts: [] }; + }); + } + + let mesExmString = ''; + + function setPromptString() { + if (main_api == 'openai') { + return; + } + + console.debug('--setting Prompt string'); + mesExmString = pinExmString ?? mesExamplesArray.slice(0, count_exm_add).join(''); + + if (mesSend.length) { + mesSend[mesSend.length - 1].message = modifyLastPromptLine(mesSend[mesSend.length - 1].message); + } + } + + function modifyLastPromptLine(lastMesString) { + //#########QUIET PROMPT STUFF PT2############## + + // Add quiet generation prompt at depth 0 + if (quiet_prompt && quiet_prompt.length) { + + // here name1 is forced for all quiet prompts..why? + const name = name1; + //checks if we are in instruct, if so, formats the chat as such, otherwise just adds the quiet prompt + const quietAppend = isInstruct ? formatInstructModeChat(name, quiet_prompt, false, true, '', name1, name2, false) : `\n${quiet_prompt}`; + + //This begins to fix quietPrompts (particularly /sysgen) for instruct + //previously instruct input sequence was being appended to the last chat message w/o '\n' + //and no output sequence was added after the input's content. + //TODO: respect output_sequence vs last_output_sequence settings + //TODO: decide how to prompt this to clarify who is talking 'Narrator', 'System', etc. + if (isInstruct) { + lastMesString += '\n' + quietAppend; // + power_user.instruct.output_sequence + '\n'; + } else { + lastMesString += quietAppend; + } + + + // Ross: bailing out early prevents quiet prompts from respecting other instruct prompt toggles + // for sysgen, SD, and summary this is desireable as it prevents the AI from responding as char.. + // but for idle prompting, we want the flexibility of the other prompt toggles, and to respect them as per settings in the extension + // need a detection for what the quiet prompt is being asked for... + + // Bail out early? + if (quietToLoud !== true) { + return lastMesString; + } + } + + + // Get instruct mode line + if (isInstruct && !isContinue) { + const name = isImpersonate ? name1 : name2; + lastMesString += formatInstructModePrompt(name, isImpersonate, promptBias, name1, name2); + } + + // Get non-instruct impersonation line + if (!isInstruct && isImpersonate && !isContinue) { + const name = name1; + if (!lastMesString.endsWith('\n')) { + lastMesString += '\n'; + } + lastMesString += name + ':'; + } + + // Add character's name + // Force name append on continue (if not continuing on user message) + if (!isInstruct && force_name2) { + if (!lastMesString.endsWith('\n')) { + lastMesString += '\n'; + } + if (!isContinue || !(chat[chat.length - 1]?.is_user)) { + lastMesString += `${name2}:`; + } + } + + return lastMesString; + } + + // Clean up the already generated prompt for seamless addition + function cleanupPromptCache(promptCache) { + // Remove the first occurrance of character's name + if (promptCache.trimStart().startsWith(`${name2}:`)) { + promptCache = promptCache.replace(`${name2}:`, '').trimStart(); + } + + // Remove the first occurrance of prompt bias + if (promptCache.trimStart().startsWith(promptBias)) { + promptCache = promptCache.replace(promptBias, ''); + } + + // Add a space if prompt cache doesn't start with one + if (!/^\s/.test(promptCache) && !isInstruct && !isContinue) { + promptCache = ' ' + promptCache; + } + + return promptCache; + } + + function checkPromptSize() { + console.debug('---checking Prompt size'); + setPromptString(); + const prompt = [ + storyString, + mesExmString, + mesSend.join(''), + generatedPromptCache, + allAnchors, + quiet_prompt, + ].join('').replace(/\r/gm, ''); + let thisPromptContextSize = getTokenCount(prompt, power_user.token_padding); + + if (thisPromptContextSize > this_max_context) { //if the prepared prompt is larger than the max context size... + if (count_exm_add > 0) { // ..and we have example mesages.. + count_exm_add--; // remove the example messages... + checkPromptSize(); // and try agin... + } else if (mesSend.length > 0) { // if the chat history is longer than 0 + mesSend.shift(); // remove the first (oldest) chat entry.. + checkPromptSize(); // and check size again.. + } else { + //end + console.debug(`---mesSend.length = ${mesSend.length}`); + } + } + } + + if (generatedPromptCache.length > 0 && main_api !== 'openai') { + console.debug('---Generated Prompt Cache length: ' + generatedPromptCache.length); + checkPromptSize(); + } else { + console.debug('---calling setPromptString ' + generatedPromptCache.length); + setPromptString(); + } + + // Fetches the combined prompt for both negative and positive prompts + const cfgGuidanceScale = getGuidanceScale(); + + // For prompt bit itemization + let mesSendString = ''; + + function getCombinedPrompt(isNegative) { + // Only return if the guidance scale doesn't exist or the value is 1 + // Also don't return if constructing the neutral prompt + if (isNegative && (!cfgGuidanceScale || cfgGuidanceScale?.value === 1)) { + return; + } + + // OAI has its own prompt manager. No need to do anything here + if (main_api === 'openai') { + return ''; + } + + // Deep clone + let finalMesSend = structuredClone(mesSend); + + // TODO: Rewrite getExtensionPrompt to not require multiple for loops + // Set all extension prompts where insertion depth > mesSend length + if (finalMesSend.length) { + for (let upperDepth = MAX_INJECTION_DEPTH; upperDepth >= finalMesSend.length; upperDepth--) { + const upperAnchor = getExtensionPrompt(extension_prompt_types.IN_CHAT, upperDepth); + if (upperAnchor && upperAnchor.length) { + finalMesSend[0].extensionPrompts.push(upperAnchor); + } + } + } + + finalMesSend.forEach((mesItem, index) => { + if (index === 0) { + return; + } + + const anchorDepth = Math.abs(index - finalMesSend.length); + // NOTE: Depth injected here! + const extensionAnchor = getExtensionPrompt(extension_prompt_types.IN_CHAT, anchorDepth); + + if (anchorDepth >= 0 && extensionAnchor && extensionAnchor.length) { + mesItem.extensionPrompts.push(extensionAnchor); + } + }); + + // TODO: Move zero-depth anchor append to work like CFG and bias appends + if (zeroDepthAnchor?.length && !isContinue) { + console.debug(/\s/.test(finalMesSend[finalMesSend.length - 1].message.slice(-1))); + finalMesSend[finalMesSend.length - 1].message += + /\s/.test(finalMesSend[finalMesSend.length - 1].message.slice(-1)) + ? zeroDepthAnchor + : `${zeroDepthAnchor}`; + } + + let cfgPrompt = {}; + if (cfgGuidanceScale && cfgGuidanceScale?.value !== 1) { + cfgPrompt = getCfgPrompt(cfgGuidanceScale, isNegative); + } + + if (cfgPrompt && cfgPrompt?.value) { + if (cfgPrompt?.depth === 0) { + finalMesSend[finalMesSend.length - 1].message += + /\s/.test(finalMesSend[finalMesSend.length - 1].message.slice(-1)) + ? cfgPrompt.value + : ` ${cfgPrompt.value}`; + } else { + // TODO: Make all extension prompts use an array/splice method + const lengthDiff = mesSend.length - cfgPrompt.depth; + const cfgDepth = lengthDiff >= 0 ? lengthDiff : 0; + finalMesSend[cfgDepth].extensionPrompts.push(`${cfgPrompt.value}\n`); + } + } + + // Add prompt bias after everything else + // Always run with continue + if (!isInstruct && !isImpersonate) { + if (promptBias.trim().length !== 0) { + finalMesSend[finalMesSend.length - 1].message += + /\s/.test(finalMesSend[finalMesSend.length - 1].message.slice(-1)) + ? promptBias.trimStart() + : ` ${promptBias.trimStart()}`; + } + } + + // Prune from prompt cache if it exists + if (generatedPromptCache.length !== 0) { + generatedPromptCache = cleanupPromptCache(generatedPromptCache); + } + + // Flattens the multiple prompt objects to a string. + const combine = () => { + // Right now, everything is suffixed with a newline + mesSendString = finalMesSend.map((e) => `${e.extensionPrompts.join('')}${e.message}`).join(''); + + // add a custom dingus (if defined) + mesSendString = addChatsSeparator(mesSendString); + + // add chat preamble + mesSendString = addChatsPreamble(mesSendString); + + let combinedPrompt = beforeScenarioAnchor + + storyString + + afterScenarioAnchor + + mesExmString + + mesSendString + + generatedPromptCache; + + combinedPrompt = combinedPrompt.replace(/\r/gm, ''); + + if (power_user.collapse_newlines) { + combinedPrompt = collapseNewlines(combinedPrompt); + } + + return combinedPrompt; + }; + + let data = { + api: main_api, + combinedPrompt: null, + description, + personality, + persona, + scenario, + char: name2, + user: name1, + beforeScenarioAnchor, + afterScenarioAnchor, + mesExmString, + finalMesSend, + generatedPromptCache, + main: system, + jailbreak, + naiPreamble: nai_settings.preamble, + }; + + // Before returning the combined prompt, give available context related information to all subscribers. + eventSource.emitAndWait(event_types.GENERATE_BEFORE_COMBINE_PROMPTS, data); + + // If one or multiple subscribers return a value, forfeit the responsibillity of flattening the context. + return !data.combinedPrompt ? combine() : data.combinedPrompt; + } + + // Get the negative prompt first since it has the unmodified mesSend array + let negativePrompt = main_api == 'textgenerationwebui' ? getCombinedPrompt(true) : undefined; + let finalPrompt = getCombinedPrompt(false); + + // Include the entire guidance scale object + const cfgValues = cfgGuidanceScale && cfgGuidanceScale?.value !== 1 ? ({ guidanceScale: cfgGuidanceScale, negativePrompt: negativePrompt }) : null; + + let maxLength = Number(amount_gen); // how many tokens the AI will be requested to generate + let thisPromptBits = []; + + // TODO: Make this a switch + if (main_api == 'koboldhorde' && horde_settings.auto_adjust_response_length) { + maxLength = Math.min(maxLength, adjustedParams.maxLength); + maxLength = Math.max(maxLength, MIN_LENGTH); // prevent validation errors + } + + let generate_data; + if (main_api == 'koboldhorde' || main_api == 'kobold') { + generate_data = { + prompt: finalPrompt, + gui_settings: true, + max_length: maxLength, + max_context_length: max_context, + }; + + if (preset_settings != 'gui') { + const isHorde = main_api == 'koboldhorde'; + const presetSettings = koboldai_settings[koboldai_setting_names[preset_settings]]; + const maxContext = (adjustedParams && horde_settings.auto_adjust_context_length) ? adjustedParams.maxContextLength : max_context; + generate_data = getKoboldGenerationData(finalPrompt, presetSettings, maxLength, maxContext, isHorde, type); + } + } + else if (main_api == 'textgenerationwebui') { + generate_data = getTextGenGenerationData(finalPrompt, maxLength, isImpersonate, isContinue, cfgValues, type); + } + else if (main_api == 'novel') { + const presetSettings = novelai_settings[novelai_setting_names[nai_settings.preset_settings_novel]]; + generate_data = getNovelGenerationData(finalPrompt, presetSettings, maxLength, isImpersonate, isContinue, cfgValues, type); + } + else if (main_api == 'openai') { + let [prompt, counts] = await prepareOpenAIMessages({ + name2: name2, + charDescription: description, + charPersonality: personality, + Scenario: scenario, + worldInfoBefore: worldInfoBefore, + worldInfoAfter: worldInfoAfter, + extensionPrompts: extension_prompts, + bias: promptBias, + type: type, + quietPrompt: quiet_prompt, + quietImage: quietImage, + cyclePrompt: cyclePrompt, + systemPromptOverride: system, + jailbreakPromptOverride: jailbreak, + personaDescription: persona, + messages: oaiMessages, + messageExamples: oaiMessageExamples, + }, dryRun); + generate_data = { prompt: prompt }; + + // counts will return false if the user has not enabled the token breakdown feature + if (counts) { + parseTokenCounts(counts, thisPromptBits); + } + + if (!dryRun) { + setInContextMessages(openai_messages_count, type); + } + } + + async function finishGenerating() { + if (dryRun) return { error: 'dryRun' }; + + if (power_user.console_log_prompts) { + console.log(generate_data.prompt); + } + + console.debug('rungenerate calling API'); + + showStopButton(); + + //set array object for prompt token itemization of this message + let currentArrayEntry = Number(thisPromptBits.length - 1); + let additionalPromptStuff = { + ...thisPromptBits[currentArrayEntry], + rawPrompt: generate_data.prompt || generate_data.input, + mesId: getNextMessageId(type), + allAnchors: allAnchors, + summarizeString: (extension_prompts['1_memory']?.value || ''), + authorsNoteString: (extension_prompts['2_floating_prompt']?.value || ''), + smartContextString: (extension_prompts['chromadb']?.value || ''), + worldInfoString: worldInfoString, + storyString: storyString, + beforeScenarioAnchor: beforeScenarioAnchor, + afterScenarioAnchor: afterScenarioAnchor, + examplesString: examplesString, + mesSendString: mesSendString, + generatedPromptCache: generatedPromptCache, + promptBias: promptBias, + finalPrompt: finalPrompt, + charDescription: description, + charPersonality: personality, + scenarioText: scenario, + this_max_context: this_max_context, + padding: power_user.token_padding, + main_api: main_api, + instruction: isInstruct ? substituteParams(power_user.prefer_character_prompt && system ? system : power_user.instruct.system_prompt) : '', + userPersona: (power_user.persona_description || ''), + }; + + thisPromptBits = additionalPromptStuff; + + //console.log(thisPromptBits); + const itemizedIndex = itemizedPrompts.findIndex((item) => item.mesId === thisPromptBits['mesId']); + + if (itemizedIndex !== -1) { + itemizedPrompts[itemizedIndex] = thisPromptBits; + } + else { + itemizedPrompts.push(thisPromptBits); + } + + console.debug(`pushed prompt bits to itemizedPrompts array. Length is now: ${itemizedPrompts.length}`); + + if (isStreamingEnabled() && type !== 'quiet') { + streamingProcessor = new StreamingProcessor(type, force_name2, generation_started, message_already_generated); + if (isContinue) { + // Save reply does add cycle text to the prompt, so it's not needed here + streamingProcessor.firstMessageText = ''; + } + + streamingProcessor.generator = await sendStreamingRequest(type, generate_data); + + hideSwipeButtons(); + let getMessage = await streamingProcessor.generate(); + let messageChunk = cleanUpMessage(getMessage, isImpersonate, isContinue, false); + + if (isContinue) { + getMessage = continue_mag + getMessage; + } + + if (streamingProcessor && !streamingProcessor.isStopped && streamingProcessor.isFinished) { + await streamingProcessor.onFinishStreaming(streamingProcessor.messageId, getMessage); + streamingProcessor = null; + triggerAutoContinue(messageChunk, isImpersonate); + } + } else { + return await sendGenerationRequest(type, generate_data); + } + } + + return finishGenerating().then(onSuccess, onError); + + async function onSuccess(data) { + if (!data) return; + let messageChunk = ''; + + if (data.error == 'dryRun') { + generatedPromptCache = ''; + return; + } + + if (!data.error) { + //const getData = await response.json(); + let getMessage = extractMessageFromData(data); + let title = extractTitleFromData(data); + kobold_horde_model = title; + + const swipes = extractMultiSwipes(data, type); + + messageChunk = cleanUpMessage(getMessage, isImpersonate, isContinue, false); + + if (isContinue) { + getMessage = continue_mag + getMessage; + } + + //Formating + const displayIncomplete = type === 'quiet' && !quietToLoud; + getMessage = cleanUpMessage(getMessage, isImpersonate, isContinue, displayIncomplete); + + if (getMessage.length > 0) { + if (isImpersonate) { + $('#send_textarea').val(getMessage).trigger('input'); + generatedPromptCache = ''; + await eventSource.emit(event_types.IMPERSONATE_READY, getMessage); + } + else if (type == 'quiet') { + unblockGeneration(); + return getMessage; + } + else { + // Without streaming we'll be having a full message on continuation. Treat it as a last chunk. + if (originalType !== 'continue') { + ({ type, getMessage } = await saveReply(type, getMessage, false, title, swipes)); + } + else { + ({ type, getMessage } = await saveReply('appendFinal', getMessage, false, title, swipes)); + } + } + + if (type !== 'quiet') { + playMessageSound(); + } + } else { + // If maxLoops is not passed in (e.g. first time generating), set it to MAX_GENERATION_LOOPS + maxLoops ??= MAX_GENERATION_LOOPS; + + if (maxLoops === 0) { + if (type !== 'quiet') { + throwCircuitBreakerError(); + } + throw new Error('Generate circuit breaker interruption'); + } + + // regenerate with character speech reenforced + // to make sure we leave on swipe type while also adding the name2 appendage + await delay(1000); + // The first await is for waiting for the generate to start. The second one is waiting for it to finish + const result = await await Generate(type, { automatic_trigger, force_name2: true, quiet_prompt, skipWIAN, force_chid, maxLoops: maxLoops - 1 }); + return result; + } + + if (power_user.auto_swipe) { + console.debug('checking for autoswipeblacklist on non-streaming message'); + function containsBlacklistedWords(getMessage, blacklist, threshold) { + console.debug('checking blacklisted words'); + const regex = new RegExp(`\\b(${blacklist.join('|')})\\b`, 'gi'); + const matches = getMessage.match(regex) || []; + return matches.length >= threshold; + } + + const generatedTextFiltered = (getMessage) => { + if (power_user.auto_swipe_blacklist_threshold) { + if (containsBlacklistedWords(getMessage, power_user.auto_swipe_blacklist, power_user.auto_swipe_blacklist_threshold)) { + console.debug('Generated text has blacklisted words'); + return true; + } + } + + return false; + }; + if (generatedTextFiltered(getMessage)) { + console.debug('swiping right automatically'); + is_send_press = false; + swipe_right(); + // TODO: do we want to resolve after an auto-swipe? + return; + } + } + } else { + generatedPromptCache = ''; + + if (data?.response) { + toastr.error(data.response, 'API Error'); + } + throw data?.response; + } + + console.debug('/api/chats/save called by /Generate'); + await saveChatConditional(); + unblockGeneration(); + streamingProcessor = null; + + if (type !== 'quiet') { + triggerAutoContinue(messageChunk, isImpersonate); + } + } + + function onError(exception) { + if (typeof exception?.error?.message === 'string') { + toastr.error(exception.error.message, 'Error', { timeOut: 10000, extendedTimeOut: 20000 }); + } + + unblockGeneration(); + console.log(exception); + streamingProcessor = null; + throw exception; } } From 789954975464290a34a36c43bb19da9e36c227dd Mon Sep 17 00:00:00 2001 From: valadaptive Date: Mon, 25 Dec 2023 03:29:14 -0500 Subject: [PATCH 06/22] Make "send message from chat box" into a function Right now all it does is handle returning if there's already a message being generated, but I'll extend it with more logic that I want to move out of Generate(). --- public/script.js | 15 +++++++++------ public/scripts/RossAscends-mods.js | 6 +++--- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/public/script.js b/public/script.js index 4cdd68979..ebe0654b5 100644 --- a/public/script.js +++ b/public/script.js @@ -1472,6 +1472,14 @@ export async function reloadCurrentChat() { showSwipeButtons(); } +/** + * Send the message currently typed into the chat box. + */ +export function sendTextareaMessage() { + if (is_send_press) return; + Generate(); +} + function messageFormatting(mes, ch_name, isSystem, isUser) { if (!mes) { return ''; @@ -7971,12 +7979,7 @@ jQuery(async function () { }); $('#send_but').on('click', function () { - if (is_send_press == false) { - // This prevents from running /trigger command with a send button - // But send on Enter doesn't set is_send_press (it is done by the Generate itself) - // is_send_press = true; - Generate(); - } + sendTextareaMessage(); }); //menu buttons setup diff --git a/public/scripts/RossAscends-mods.js b/public/scripts/RossAscends-mods.js index 7a0c648f3..46e76e97f 100644 --- a/public/scripts/RossAscends-mods.js +++ b/public/scripts/RossAscends-mods.js @@ -1,5 +1,4 @@ import { - Generate, characters, online_status, main_api, @@ -18,6 +17,7 @@ import { menu_type, substituteParams, callPopup, + sendTextareaMessage, } from '../script.js'; import { @@ -954,9 +954,9 @@ export function initRossMods() { //Enter to send when send_textarea in focus if ($(':focus').attr('id') === 'send_textarea') { const sendOnEnter = shouldSendOnEnter(); - if (!event.shiftKey && !event.ctrlKey && !event.altKey && event.key == 'Enter' && is_send_press == false && sendOnEnter) { + if (!event.shiftKey && !event.ctrlKey && !event.altKey && event.key == 'Enter' && sendOnEnter) { event.preventDefault(); - Generate(); + sendTextareaMessage(); } } if ($(':focus').attr('id') === 'dialogue_popup_input' && !isMobile()) { From 3c0207f6cbcac0812ba137e27fb6e3792c2e63d7 Mon Sep 17 00:00:00 2001 From: valadaptive Date: Mon, 25 Dec 2023 03:32:26 -0500 Subject: [PATCH 07/22] Move "continue on send" logic out of Generate() --- public/script.js | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/public/script.js b/public/script.js index ebe0654b5..58609f800 100644 --- a/public/script.js +++ b/public/script.js @@ -1477,7 +1477,22 @@ export async function reloadCurrentChat() { */ export function sendTextareaMessage() { if (is_send_press) return; - Generate(); + + let generateType; + // "Continue on send" is activated when the user hits "send" (or presses enter) on an empty chat box, and the last + // message was sent from a character (not the user or the system). + const textareaText = String($('#send_textarea').val()); + if (power_user.continue_on_send && + !textareaText && + !selected_group && + chat.length && + !chat[chat.length - 1]['is_user'] && + !chat[chat.length - 1]['is_system'] + ) { + generateType = 'continue'; + } + + Generate(generateType); } function messageFormatting(mes, ch_name, isSystem, isUser) { @@ -3055,10 +3070,6 @@ async function Generate(type, { automatic_trigger, force_name2, quiet_prompt, qu } } - if (!type && !textareaText && power_user.continue_on_send && !selected_group && chat.length && !chat[chat.length - 1]['is_user'] && !chat[chat.length - 1]['is_system']) { - type = 'continue'; - } - const isContinue = type == 'continue'; // Rewrite the generation timer to account for the time passed for all the continuations. From 0f8a16325b66d36505a68ce6922aec6f0bd09cb9 Mon Sep 17 00:00:00 2001 From: valadaptive Date: Mon, 25 Dec 2023 03:45:34 -0500 Subject: [PATCH 08/22] Extract dryRun early return from finishGenerating This means we only have to handle it in one place rather than two. --- public/script.js | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/public/script.js b/public/script.js index 58609f800..eb6dd72e9 100644 --- a/public/script.js +++ b/public/script.js @@ -3771,9 +3771,12 @@ async function Generate(type, { automatic_trigger, force_name2, quiet_prompt, qu } } - async function finishGenerating() { - if (dryRun) return { error: 'dryRun' }; + if (dryRun) { + generatedPromptCache = ''; + return Promise.resolve(); + } + async function finishGenerating() { if (power_user.console_log_prompts) { console.log(generate_data.prompt); } @@ -3858,11 +3861,6 @@ async function Generate(type, { automatic_trigger, force_name2, quiet_prompt, qu if (!data) return; let messageChunk = ''; - if (data.error == 'dryRun') { - generatedPromptCache = ''; - return; - } - if (!data.error) { //const getData = await response.json(); let getMessage = extractMessageFromData(data); From 77b02a8d4bd7149e6181015dcc5bbe8cda91963a Mon Sep 17 00:00:00 2001 From: valadaptive Date: Tue, 26 Dec 2023 12:41:35 -0500 Subject: [PATCH 09/22] Extract data.error check --- public/script.js | 176 +++++++++++++++++++++++------------------------ 1 file changed, 88 insertions(+), 88 deletions(-) diff --git a/public/script.js b/public/script.js index eb6dd72e9..3ff06988a 100644 --- a/public/script.js +++ b/public/script.js @@ -3861,94 +3861,7 @@ async function Generate(type, { automatic_trigger, force_name2, quiet_prompt, qu if (!data) return; let messageChunk = ''; - if (!data.error) { - //const getData = await response.json(); - let getMessage = extractMessageFromData(data); - let title = extractTitleFromData(data); - kobold_horde_model = title; - - const swipes = extractMultiSwipes(data, type); - - messageChunk = cleanUpMessage(getMessage, isImpersonate, isContinue, false); - - if (isContinue) { - getMessage = continue_mag + getMessage; - } - - //Formating - const displayIncomplete = type === 'quiet' && !quietToLoud; - getMessage = cleanUpMessage(getMessage, isImpersonate, isContinue, displayIncomplete); - - if (getMessage.length > 0) { - if (isImpersonate) { - $('#send_textarea').val(getMessage).trigger('input'); - generatedPromptCache = ''; - await eventSource.emit(event_types.IMPERSONATE_READY, getMessage); - } - else if (type == 'quiet') { - unblockGeneration(); - return getMessage; - } - else { - // Without streaming we'll be having a full message on continuation. Treat it as a last chunk. - if (originalType !== 'continue') { - ({ type, getMessage } = await saveReply(type, getMessage, false, title, swipes)); - } - else { - ({ type, getMessage } = await saveReply('appendFinal', getMessage, false, title, swipes)); - } - } - - if (type !== 'quiet') { - playMessageSound(); - } - } else { - // If maxLoops is not passed in (e.g. first time generating), set it to MAX_GENERATION_LOOPS - maxLoops ??= MAX_GENERATION_LOOPS; - - if (maxLoops === 0) { - if (type !== 'quiet') { - throwCircuitBreakerError(); - } - throw new Error('Generate circuit breaker interruption'); - } - - // regenerate with character speech reenforced - // to make sure we leave on swipe type while also adding the name2 appendage - await delay(1000); - // The first await is for waiting for the generate to start. The second one is waiting for it to finish - const result = await await Generate(type, { automatic_trigger, force_name2: true, quiet_prompt, skipWIAN, force_chid, maxLoops: maxLoops - 1 }); - return result; - } - - if (power_user.auto_swipe) { - console.debug('checking for autoswipeblacklist on non-streaming message'); - function containsBlacklistedWords(getMessage, blacklist, threshold) { - console.debug('checking blacklisted words'); - const regex = new RegExp(`\\b(${blacklist.join('|')})\\b`, 'gi'); - const matches = getMessage.match(regex) || []; - return matches.length >= threshold; - } - - const generatedTextFiltered = (getMessage) => { - if (power_user.auto_swipe_blacklist_threshold) { - if (containsBlacklistedWords(getMessage, power_user.auto_swipe_blacklist, power_user.auto_swipe_blacklist_threshold)) { - console.debug('Generated text has blacklisted words'); - return true; - } - } - - return false; - }; - if (generatedTextFiltered(getMessage)) { - console.debug('swiping right automatically'); - is_send_press = false; - swipe_right(); - // TODO: do we want to resolve after an auto-swipe? - return; - } - } - } else { + if (data.error) { generatedPromptCache = ''; if (data?.response) { @@ -3957,6 +3870,93 @@ async function Generate(type, { automatic_trigger, force_name2, quiet_prompt, qu throw data?.response; } + //const getData = await response.json(); + let getMessage = extractMessageFromData(data); + let title = extractTitleFromData(data); + kobold_horde_model = title; + + const swipes = extractMultiSwipes(data, type); + + messageChunk = cleanUpMessage(getMessage, isImpersonate, isContinue, false); + + if (isContinue) { + getMessage = continue_mag + getMessage; + } + + //Formating + const displayIncomplete = type === 'quiet' && !quietToLoud; + getMessage = cleanUpMessage(getMessage, isImpersonate, isContinue, displayIncomplete); + + if (getMessage.length > 0) { + if (isImpersonate) { + $('#send_textarea').val(getMessage).trigger('input'); + generatedPromptCache = ''; + await eventSource.emit(event_types.IMPERSONATE_READY, getMessage); + } + else if (type == 'quiet') { + unblockGeneration(); + return getMessage; + } + else { + // Without streaming we'll be having a full message on continuation. Treat it as a last chunk. + if (originalType !== 'continue') { + ({ type, getMessage } = await saveReply(type, getMessage, false, title, swipes)); + } + else { + ({ type, getMessage } = await saveReply('appendFinal', getMessage, false, title, swipes)); + } + } + + if (type !== 'quiet') { + playMessageSound(); + } + } else { + // If maxLoops is not passed in (e.g. first time generating), set it to MAX_GENERATION_LOOPS + maxLoops ??= MAX_GENERATION_LOOPS; + + if (maxLoops === 0) { + if (type !== 'quiet') { + throwCircuitBreakerError(); + } + throw new Error('Generate circuit breaker interruption'); + } + + // regenerate with character speech reenforced + // to make sure we leave on swipe type while also adding the name2 appendage + await delay(1000); + // The first await is for waiting for the generate to start. The second one is waiting for it to finish + const result = await await Generate(type, { automatic_trigger, force_name2: true, quiet_prompt, skipWIAN, force_chid, maxLoops: maxLoops - 1 }); + return result; + } + + if (power_user.auto_swipe) { + console.debug('checking for autoswipeblacklist on non-streaming message'); + function containsBlacklistedWords(getMessage, blacklist, threshold) { + console.debug('checking blacklisted words'); + const regex = new RegExp(`\\b(${blacklist.join('|')})\\b`, 'gi'); + const matches = getMessage.match(regex) || []; + return matches.length >= threshold; + } + + const generatedTextFiltered = (getMessage) => { + if (power_user.auto_swipe_blacklist_threshold) { + if (containsBlacklistedWords(getMessage, power_user.auto_swipe_blacklist, power_user.auto_swipe_blacklist_threshold)) { + console.debug('Generated text has blacklisted words'); + return true; + } + } + + return false; + }; + if (generatedTextFiltered(getMessage)) { + console.debug('swiping right automatically'); + is_send_press = false; + swipe_right(); + // TODO: do we want to resolve after an auto-swipe? + return; + } + } + console.debug('/api/chats/save called by /Generate'); await saveChatConditional(); unblockGeneration(); From f9745091f56ee235e1ae42ea80162abfe82df3fd Mon Sep 17 00:00:00 2001 From: Cohee <18619528+Cohee1207@users.noreply.github.com> Date: Wed, 27 Dec 2023 11:04:26 +0200 Subject: [PATCH 10/22] Update readme.md --- .github/readme.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/readme.md b/.github/readme.md index 5ac31ed28..8727cb69c 100644 --- a/.github/readme.md +++ b/.github/readme.md @@ -85,7 +85,7 @@ SillyTavern has extensibility support, with some additional AI modules hosted vi * Stable Diffusion image generation (5 chat-related presets plus 'free mode') * Text-to-speech for AI response messages (via ElevenLabs, Silero, or the OS's System TTS) -A full list of included extensions and tutorials on how to use them can be found in the [Docs](https://docs.sillytavern.app/extras/extensions/). +A full list of included extensions and tutorials on how to use them can be found in the [Docs](https://docs.sillytavern.app/). ## UI/CSS/Quality of Life tweaks by RossAscends From 6508a2d92474017aa21f5ac363effd5566995523 Mon Sep 17 00:00:00 2001 From: Cohee <18619528+Cohee1207@users.noreply.github.com> Date: Wed, 27 Dec 2023 11:04:59 +0200 Subject: [PATCH 11/22] Update readme-zh_cn.md --- .github/readme-zh_cn.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/readme-zh_cn.md b/.github/readme-zh_cn.md index efb77c517..18802cc93 100644 --- a/.github/readme-zh_cn.md +++ b/.github/readme-zh_cn.md @@ -82,7 +82,7 @@ SillyTavern 支持扩展服务,一些额外的人工智能模块可通过 [Sil * 文本图像生成(5 预设,以及 "自由模式") * 聊天信息的文字转语音(通过 ElevenLabs、Silero 或操作系统的语音生成) -扩展服务的完整功能介绍和使用教程,请参阅 [Docs](https://docs.sillytavern.app/extras/extensions/)。 +扩展服务的完整功能介绍和使用教程,请参阅 [Docs](https://docs.sillytavern.app/)。 ## 界面/CSS/性能,由 RossAscends 调整并优化 From 1bd8ef6054df00d1499ce3b46e5386b99cdc14eb Mon Sep 17 00:00:00 2001 From: Ikko Eltociear Ashimine Date: Thu, 28 Dec 2023 15:23:03 +0900 Subject: [PATCH 12/22] Add Japanese README --- .github/readme-ja_jp.md | 314 ++++++++++++++++++++++++++++++++++++++++ .github/readme-zh_cn.md | 2 +- .github/readme.md | 2 +- 3 files changed, 316 insertions(+), 2 deletions(-) create mode 100644 .github/readme-ja_jp.md diff --git a/.github/readme-ja_jp.md b/.github/readme-ja_jp.md new file mode 100644 index 000000000..59451dd91 --- /dev/null +++ b/.github/readme-ja_jp.md @@ -0,0 +1,314 @@ +[English](readme.md) | [中文](readme-zh_cn.md) | 日本語 + +![SillyTavern-Banner](https://github.com/SillyTavern/SillyTavern/assets/18619528/c2be4c3f-aada-4f64-87a3-ae35a68b61a4) + +モバイルフレンドリーなレイアウト、マルチAPI(KoboldAI/CPP、Horde、NovelAI、Ooba、OpenAI、OpenRouter、Claude、Scale)、VN ライクな Waifu モード、Stable Diffusion、TTS、WorldInfo(伝承本)、カスタマイズ可能な UI、自動翻訳、あなたにとって必要とする以上のプロンプトオプション+サードパーティの拡張機能をインストールする機能。 + +[TavernAI](https://github.com/TavernAI/TavernAI) 1.2.8 のフォークに基づいています + +## 重要ニュース! + +1. 私たちは[ドキュメント website](https://docs.sillytavern.app/) を作成し、ほとんどの質問にお答えしています。 + +2. アップデートしたらに拡張機能を見失った?リリースバージョン 1.10.6 以降、これまで内蔵されていた拡張機能のほとんどがダウンロード可能なアドオンに変更されました。ダウンロードは、拡張機能パネル(トップバーのスタックドブロックアイコン)にある内蔵の "Download Extensions and Assets" メニューから行えます。 + +### Cohee、RossAscends、SillyTavern コミュニティがお届けします + +### SillyTavern または TavernAI とは何ですか? + +SillyTavern は、あなたのコンピュータ(および Android スマホ)にインストールできるユーザーインターフェイスで、テキスト生成 AI と対話したり、あなたやコミュニティが作成したキャラクターとチャットやロールプレイをすることができます。 + +SillyTavern は TavernAI 1.2.8 のフォークで、より活発な開発が行われており、多くの主要な機能が追加されています。現時点では、これらは完全に独立したプログラムと考えることができます。 + +### ブランチ + +SillyTavern は、すべてのユーザーにスムーズな体験を保証するために、2 つのブランチシステムを使用して開発されています。 + +* release -🌟 **ほとんどのユーザーにお勧め。** これは最も安定した推奨ブランチで、メジャーリリースがプッシュされた時のみ更新されます。大半のユーザーに適しています。 +* staging - ⚠️ **カジュアルな使用にはお勧めしない。** このブランチには最新の機能がありますが、いつ壊れるかわからないので注意してください。パワーユーザーとマニア向けです。 + +git CLI の使い方に慣れていなかったり、ブランチが何なのかわからなかったりしても、心配はいりません!リリースブランチが常に望ましい選択肢となります。 + +### Tavern 以外に何が必要ですか? + +Tavern は単なるユーザーインターフェイスなので、それだけでは役に立ちません。ロールプレイキャラクターとして機能する AI システムのバックエンドにアクセスする必要があります。様々なバックエンドがサポートされています: OpenAPI API (GPT)、KoboldAI (ローカルまたは Google Colab 上で動作)、その他。詳しくは [FAQ](https://docs.sillytavern.app/usage/faq/) をご覧ください。 + +### Tavern を実行するには、強力な PC が必要ですか? + +Tavern は単なるユーザーインターフェイスであり、必要なハードウェアはごくわずかです。パワフルである必要があるのは、AI システムのバックエンドです。 + +## モバイルサポート + +> **注** + +> **このフォークは Termux を使って Android スマホでネイティブに実行できます。ArroganceComplex#2659 のガイドを参照してください:** + + + +## ご質問やご提案 + +### コミュニティ Discord サーバーを開設しました + +サポートを受け、お気に入りのキャラクターやプロンプトを共有する: + +### [参加](https://discord.gg/RZdyAEUPvj) + +*** + +開発者と直接連絡を取る: + +* Discord: cohee または rossascends +* Reddit: /u/RossAscends または /u/sillylossy +* [GitHub issue を投稿](https://github.com/SillyTavern/SillyTavern/issues) + +## このバージョンには以下が含まれる + +* 大幅に修正された TavernAI 1.2.8 (コードの 50% 以上が書き換えまたは最適化されています) +* スワイプ +* グループチャット: キャラクター同士が会話できるマルチボットルーム +* チャットチェックポイント / ブランチ +* 高度なKoboldAI / TextGen生成設定と、コミュニティが作成した多くのプリセット +* ワールド情報サポート: 豊富な伝承を作成したり、キャラクターカードにトークンを保存したりできます +* [OpenRouter](https://openrouter.ai) 各種 API(Claude、GPT-4/3.5 など)の接続 +* [Oobabooga's TextGen WebUI](https://github.com/oobabooga/text-generation-webui) API 接続 +* [AI Horde](https://horde.koboldai.net/) 接続 +* プロンプト生成フォーマットの調整 + +## 拡張機能 + +SillyTavern は拡張性をサポートしており、[SillyTavern Extras API](https://github.com/SillyTavern/SillyTavern-extras) を介していくつかの追加AIモジュールをホストしています + +* 作者ノート/キャラクターバイアス +* キャラクターの感情表現(スプライト) +* チャット履歴の自動サマリー +* チャットに画像を送り、AI が内容を解釈する +* Stable Diffusion 画像生成 (5 つのチャット関連プリセットと 'free mode') +* AI 応答メッセージの音声合成(ElevenLabs、Silero、または OS のシステム TTS 経由) + +含まれている拡張機能の完全なリストとその使い方のチュートリアルは [Docs](https://docs.sillytavern.app/) にあります。 + +## RossAscends による UI/CSS/クオリティオブライフの調整 + +* iOS 用に最適化されたモバイル UI で、ホーム画面へのショートカット保存とフルスクリーンモードでの起動をサポート。 +* ホットキー + * Up = チャットの最後のメッセージを編集する + * Ctrl+Up = チャットで最後のユーザーメッセージを編集する + * Left = 左スワイプ + * Right = 右スワイプ (注: チャットバーに何か入力されている場合、スワイプホットキーが無効になります) + * Ctrl+Left = ローカルに保存された変数を見る(ブラウザのコンソールウィンドウにて) + * Enter (チャットバー選択時) = AI にメッセージを送る + * Ctrl+Enter = 最後の AI 応答を再生成する + +* ユーザー名の変更と文字の削除でページが更新されなくなりました。 + +* ページロード時に API に自動的に接続するかどうかを切り替えます。 +* ページの読み込み時に、最近見た文字を自動的に読み込むかどうかを切り替えます。 +* より良いトークンカウンター - 保存されていないキャラクターに対して機能し、永続的なトークンと一時的なトークンの両方を表示する。 + +* より良い過去のチャット + * 新しいチャットのファイル名は、"(文字) - (作成日)" という読みやすい形式で保存されます + * チャットのプレビューが 40 文字から 300 文字に増加。 + * 文字リストの並べ替えに複数のオプション(名前順、作成日順、チャットサイズ順)があります。 + +* デフォルトでは、左右の設定パネルはクリックすると閉じます。 +* ナビパネルのロックをクリックすると、パネルが開いたままになり、この設定はセッションをまたいで記憶されます。 +* ナビパネルの開閉状態もセッションをまたいで保存されます。 + +* カスタマイズ可能なチャット UI: + * 新しいメッセージが届いたときにサウンドを再生する + * 丸型、長方形のアバタースタイルの切り替え + * デスクトップのチャットウィンドウを広くする + * オプションの半透明ガラス風パネル + * 'メインテキスト'、'引用テキスト'、'斜体テキスト'のページカラーをカスタマイズ可能。 + * カスタマイズ可能な UI 背景色とぼかし量 + +## インストール + +*注: このソフトウェアはローカルにインストールすることを目的としており、colab や他のクラウドノートブックサービス上では十分にテストされていません。* + +> **警告** + +> WINDOWS が管理しているフォルダ(Program Files、System32 など)にはインストールしないでください + +> START.BAT を管理者権限で実行しないでください + +### Windows + +Git 経由でのインストール(更新を容易にするため推奨) + +きれいな写真付きのわかりやすいガイド: + + + 1. [NodeJS](https://nodejs.org/en) をインストールする(最新の LTS 版を推奨) + 2. [GitHub Desktop](https://central.github.com/deployments/desktop/desktop/latest/win32) をインストールする + 3. Windows エクスプローラーを開く (`Win+E`) + 4. Windows によって制御または監視されていないフォルダを参照または作成する。(例: C:\MySpecialFolder\) + 5. 上部のアドレスバーをクリックし、`cmd` と入力して Enter キーを押し、そのフォルダーの中にコマンドプロンプトを開きます。 + 6. 黒いボックス(コマンドプロンプト)がポップアップしたら、そこに以下のいずれかを入力し、Enter を押します: + +* Release ブランチの場合: `git clone https://github.com/SillyTavern/SillyTavern -b release` +* Staging ブランチの場合: `git clone https://github.com/SillyTavern/SillyTavern -b staging` + + 7. すべてをクローンしたら、`Start.bat` をダブルクリックして、NodeJS に要件をインストールさせる。 + 8. サーバーが起動し、SillyTavern がブラウザにポップアップ表示されます。 + +ZIP ダウンロードによるインストール(推奨しない) + + 1. [NodeJS](https://nodejs.org/en) をインストールする(最新の LTS 版を推奨) + 2. GitHub のリポジトリから zip をダウンロードする。(`ソースコード(zip)` は [Releases](https://github.com/SillyTavern/SillyTavern/releases/latest) から入手) + 3. お好きなフォルダに解凍してください + 4. `Start.bat` をダブルクリックまたはコマンドラインで実行する。 + 5. サーバーがあなたのためにすべてを準備したら、ブラウザのタブを開きます。 + +### Linux + + 1. `node -v` を実行して、Node.js v18 以上(最新の [LTS バージョン](https://nodejs.org/en/download/) を推奨)がインストールされていることを確認してください。 +または、[Node Version Manager](https://github.com/nvm-sh/nvm#installing-and-updating) スクリプトを使用して、迅速かつ簡単に Node のインストールを管理します。 + 2. `start.sh` スクリプトを実行する。 + 3. お楽しみください。 + +## API キー管理 + +SillyTavern は API キーをサーバーディレクトリの `secrets.json` ファイルに保存します。 + +デフォルトでは、入力後にページをリロードしても、フロントエンドには表示されません。 + +API ブロックのボタンをクリックして、キーを閲覧できるようにする: + +1. ファイル `config.yaml` で `allowKeysExposure` の値を `true` に設定する。 +2. SillyTavern サーバを再起動します。 + +## リモート接続 + +SillyTavern をスマホで使用しながら、同じ Wifi ネットワーク上で ST サーバーを PC で実行したい場合に使用します。 + +しかし、これはどこからでもリモート接続を許可するために使用することができます。 + +**重要: SillyTavern はシングルユーザーのプログラムなので、ログインすれば誰でもすべてのキャラクターとチャットを見ることができ、UI 内で設定を変更することができます。** + +### 1. ホワイトリスト IP の管理 + +* SillyTavern のベースインストールフォルダ内に `whitelist.txt` という新しいテキストファイルを作成します。 +* テキストエディタでこのファイルを開き、接続を許可したい IP のリストを追加します。 + +*個々の IP とワイルドカード IP 範囲の両方が受け入れられる。例:* + +```txt +192.168.0.1 +192.168.0.20 +``` + +または + +```txt +192.168.0.* +``` + +(上記のワイルドカード IP 範囲は、ローカルネットワーク上のどのデバイスでも) + +CIDR マスクも受け付ける(例:10.0.0.0/24)。 + +* `whitelist.txt` ファイルを保存する。 +* TAI サーバーを再起動する。 + +これでファイルに指定された IP を持つデバイスが接続できるようになる。 + +*注: `config.yaml` にも `whitelist` 配列があり、同じように使うことができるが、`whitelist.txt` が存在する場合、この配列は無視される。* + +### 2. ST ホストマシンの IP の取得 + +ホワイトリストの設定後、ST ホストデバイスの IP が必要になります。 + +ST ホストデバイスが同じ無線 LAN ネットワーク上にある場合、ST ホストの内部無線 LAN IP を使用します: + +* Windows の場合: ウィンドウズボタン > 検索バーに `cmd.exe` と入力 > コンソールに `ipconfig` と入力して Enter > `IPv4` のリストを探す。 + +同じネットワーク上にいない状態で、ホストしているSTに接続したい場合は、STホスト機器のパブリックIPが必要です。 + +* ST ホストデバイスを使用中に、[このページ](https://whatismyipaddress.com/)にアクセスし、`IPv4` を探してください。これはリモートデバイスからの接続に使用するものです。 + +### 3. リモートデバイスを ST ホストマシンに接続します。 + +最終的に使用する IP が何であれ、その IP アドレスとポート番号をリモートデバイスのウェブブラウザに入力します。 + +同じ無線 LAN ネットワーク上の ST ホストの典型的なアドレスは以下のようになります: + +`http://192.168.0.5:8000` + +http:// を使用し、https:// は使用しないでください + +### ST をすべての IP に開放する + +これはお勧めしませんが、`config.yaml` を開き、`whitelistMode` を `false` に変更してください。 + +SillyTavern のベースインストールフォルダにある `whitelist.txt` が存在する場合は削除(または名前の変更)する必要があります。 + +これは通常安全ではないので、これを行う際にはユーザー名とパスワードを設定する必要があります。 + +ユーザー名とパスワードは `config.yaml` で設定します。 + +ST サーバを再起動すると、ユーザ名とパスワードさえ知っていれば、IP に関係なくどのデバイスでも ST サーバに接続できるようになる。 + +### まだ接続できませんか? + +* `config.yaml` で見つかったポートに対して、インバウンド/アウトバウンドのファイアウォールルールを作成します。これをルーターのポートフォワーディングと間違えないでください。そうしないと、誰かがあなたのチャットログを見つける可能性があり、それはマジで止めましょう。 +* 設定 > ネットワークとインターネット > イーサネットで、プライベートネットワークのプロファイルタイプを有効にします。そうしないと、前述のファイアウォールルールを使っても接続できません。 + +## パフォーマンスに問題がありますか? + +ユーザー設定パネルでブラー効果なし(高速 UI)モードを有効にしてみてください。 + +## このプロジェクトが好きです!どうすればコントリビュートできますか? + +### やるべきこと + +1. プルリクエストを送る +2. 確立されたテンプレートを使って機能提案と課題レポートを送る +3. 何か質問する前に、readme ファイルや組み込みのドキュメントを読んでください + +### やらないべきこと + +1. 金銭の寄付を申し出る +2. 何の脈絡もなくバグ報告を送る +3. すでに何度も回答されている質問をする + +## 古い背景画像はどこにありますか? + +100% オリジナルコンテンツのみのポリシーに移行しているため、古い背景画像はこのリポジトリから削除されました。 + +アーカイブはこちら: + + + +## スクリーンショット + +image +image + +## ライセンスとクレジット + +**このプログラムは有用であることを願って配布されていますが、いかなる保証もありません; +また、商品性または特定目的への適合性についての黙示の保証もありません。 +詳細は GNU Affero General Public License をご覧ください。** + +* Humi によるTAI Base: 不明ライセンス +* Cohee の修正と派生コード: AGPL v3 +* RossAscends の追加: AGPL v3 +* CncAnon の TavernAITurbo 改造の一部: 不明ライセンス +* kingbri のさまざまなコミットと提案 () +* city_unit の拡張機能と様々な QoL 機能 () +* StefanDanielSchwarz のさまざまなコミットとバグ報告 () +* PepperTaco の作品にインスパイアされた Waifu モード () +* ピグマリオン大学の皆さん、素晴らしいテスターとしてクールな機能を提案してくれてありがとう! +* TextGen のプリセットをコンパイルしてくれた obabooga に感謝 +* KAI Lite の KoboldAI プリセット: +* Google による Noto Sans フォント(OFLライセンス) +* Font Awesome によるアイコンテーマ (アイコン: CC BY 4.0、フォント: SIL OFL 1.1、コード: MIT License) +* ZeldaFan0225 による AI Horde クライアントライブラリ: +* AlpinDale による Linux 起動スクリプト +* FAQ を提供してくれた paniphons に感謝 +* 10K ディスコード・ユーザー記念背景 by @kallmeflocc +* デフォルトコンテンツ(キャラクターと伝承書)の提供: @OtisAlejandro、@RossAscends、@kallmeflocc +* @doloroushyeonse による韓国語翻訳 +* k_euler_a による Horde のサポート +* [@XXpE3](https://github.com/XXpE3) による中国語翻訳、中国語 ISSUES の連絡先は @XXpE3 diff --git a/.github/readme-zh_cn.md b/.github/readme-zh_cn.md index 18802cc93..bb90cd940 100644 --- a/.github/readme-zh_cn.md +++ b/.github/readme-zh_cn.md @@ -1,4 +1,4 @@ -[English](readme.md) | 中文 +[English](readme.md) | 中文 | [日本語](readme-ja_jp.md) ![image](https://github.com/SillyTavern/SillyTavern/assets/18619528/c2be4c3f-aada-4f64-87a3-ae35a68b61a4) diff --git a/.github/readme.md b/.github/readme.md index 8727cb69c..1c6dbdf4c 100644 --- a/.github/readme.md +++ b/.github/readme.md @@ -1,4 +1,4 @@ -English | [中文](readme-zh_cn.md) +English | [中文](readme-zh_cn.md) | [日本語](readme-ja_jp.md) ![SillyTavern-Banner](https://github.com/SillyTavern/SillyTavern/assets/18619528/c2be4c3f-aada-4f64-87a3-ae35a68b61a4) From a2aa8ba6a0d8a6ecf9fabc6bdb93465498511cdc Mon Sep 17 00:00:00 2001 From: LenAnderson Date: Sat, 30 Dec 2023 11:35:10 +0000 Subject: [PATCH 13/22] add export and slash command for last set expressions --- public/scripts/extensions/expressions/index.js | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/public/scripts/extensions/expressions/index.js b/public/scripts/extensions/expressions/index.js index 3e457bcc0..5293bbb64 100644 --- a/public/scripts/extensions/expressions/index.js +++ b/public/scripts/extensions/expressions/index.js @@ -49,6 +49,7 @@ let lastMessage = null; let spriteCache = {}; let inApiCall = false; let lastServerResponseTime = 0; +export let lastExpression = {}; function isVisualNovelMode() { return Boolean(!isMobile() && power_user.waifuMode && getContext().groupId); @@ -692,6 +693,7 @@ function getFolderNameByMessage(message) { } async function sendExpressionCall(name, expression, force, vnMode) { + lastExpression[name.split('/')[0]] = expression; if (!vnMode) { vnMode = isVisualNovelMode(); } @@ -1476,6 +1478,7 @@ function setExpressionOverrideHtml(forceClear = false) { // character changed removeExpression(); spriteCache = {}; + lastExpression = {}; //clear expression let imgElement = document.getElementById('expression-image'); @@ -1501,4 +1504,5 @@ function setExpressionOverrideHtml(forceClear = false) { eventSource.on(event_types.GROUP_UPDATED, updateVisualNovelModeDebounced); registerSlashCommand('sprite', setSpriteSlashCommand, ['emote'], '(spriteId) – force sets the sprite for the current character', true, true); registerSlashCommand('spriteoverride', setSpriteSetCommand, ['costume'], '(optional folder) – sets an override sprite folder for the current character. If the name starts with a slash or a backslash, selects a sub-folder in the character-named folder. Empty value to reset to default.', true, true); + registerSlashCommand('lastsprite', (_, value)=>lastExpression[value.trim()] ?? '', [], '(charName) – Returns the last set sprite / expression for the named character.', true, true); })(); From 520fa99a0004f9f13d9590994918ffa8696ac371 Mon Sep 17 00:00:00 2001 From: LenAnderson Date: Sat, 30 Dec 2023 11:42:27 +0000 Subject: [PATCH 14/22] don't trim trailing whitespace in markdown files --- .editorconfig | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.editorconfig b/.editorconfig index 2a13caebf..c170118f9 100644 --- a/.editorconfig +++ b/.editorconfig @@ -8,4 +8,7 @@ trim_trailing_whitespace = true [*.{js, conf, json}] charset = utf-8 indent_style = space -indent_size = 4 \ No newline at end of file +indent_size = 4 + +[*.md] +trim_trailing_whitespace = false From a2e4dc2950c52f8bc45c89cfbcbff016de7483d8 Mon Sep 17 00:00:00 2001 From: Cohee <18619528+Cohee1207@users.noreply.github.com> Date: Sun, 31 Dec 2023 04:00:04 +0200 Subject: [PATCH 15/22] Add chunking of vector storage messages --- public/scripts/extensions/vectors/index.js | 76 ++++++++++++++++++- .../scripts/extensions/vectors/manifest.json | 2 +- .../scripts/extensions/vectors/settings.html | 24 ++++-- public/scripts/extensions/vectors/style.css | 4 + 4 files changed, 96 insertions(+), 10 deletions(-) create mode 100644 public/scripts/extensions/vectors/style.css diff --git a/public/scripts/extensions/vectors/index.js b/public/scripts/extensions/vectors/index.js index 9e8777333..c9b28270a 100644 --- a/public/scripts/extensions/vectors/index.js +++ b/public/scripts/extensions/vectors/index.js @@ -1,6 +1,6 @@ import { eventSource, event_types, extension_prompt_types, getCurrentChatId, getRequestHeaders, is_send_press, saveSettingsDebounced, setExtensionPrompt, substituteParams } from '../../../script.js'; import { ModuleWorkerWrapper, extension_settings, getContext, renderExtensionTemplate } from '../../extensions.js'; -import { collapseNewlines, power_user, ui_mode } from '../../power-user.js'; +import { collapseNewlines } from '../../power-user.js'; import { SECRET_KEYS, secret_state } from '../../secrets.js'; import { debounce, getStringHash as calculateHash, waitUntilCondition, onlyUnique, splitRecursive } from '../../utils.js'; @@ -21,6 +21,7 @@ const settings = { protect: 5, insert: 3, query: 2, + message_chunk_size: 400, // For files enabled_files: false, @@ -87,6 +88,29 @@ async function onVectorizeAllClick() { let syncBlocked = false; +/** + * Splits messages into chunks before inserting them into the vector index. + * @param {object[]} items Array of vector items + * @returns {object[]} Array of vector items (possibly chunked) + */ +function splitByChunks(items) { + if (settings.message_chunk_size <= 0) { + return items; + } + + const chunkedItems = []; + + for (const item of items) { + const chunks = splitRecursive(item.text, settings.message_chunk_size); + for (const chunk of chunks) { + const chunkedItem = { ...item, text: chunk }; + chunkedItems.push(chunkedItem); + } + } + + return chunkedItems; +} + async function synchronizeChat(batchSize = 5) { if (!settings.enabled_chats) { return -1; @@ -116,8 +140,9 @@ async function synchronizeChat(batchSize = 5) { const deletedHashes = hashesInCollection.filter(x => !hashedMessages.some(y => y.hash === x)); if (newVectorItems.length > 0) { + const chunkedBatch = splitByChunks(newVectorItems.slice(0, batchSize)); console.log(`Vectors: Found ${newVectorItems.length} new items. Processing ${batchSize}...`); - await insertVectorItems(chatId, newVectorItems.slice(0, batchSize)); + await insertVectorItems(chatId, chunkedBatch); } if (deletedHashes.length > 0) { @@ -492,6 +517,43 @@ function toggleSettings() { $('#vectors_chats_settings').toggle(!!settings.enabled_chats); } +async function onPurgeClick() { + const chatId = getCurrentChatId(); + if (!chatId) { + toastr.info('No chat selected', 'Purge aborted'); + return; + } + await purgeVectorIndex(chatId); + toastr.success('Vector index purged', 'Purge successful'); +} + +async function onViewStatsClick() { + const chatId = getCurrentChatId(); + if (!chatId) { + toastr.info('No chat selected'); + return; + } + + const hashesInCollection = await getSavedHashes(chatId); + const totalHashes = hashesInCollection.length; + const uniqueHashes = hashesInCollection.filter(onlyUnique).length; + + toastr.info(`Total hashes: ${totalHashes}
+ Unique hashes: ${uniqueHashes}

+ I'll mark collected messages with a green circle.`, + `Stats for chat ${chatId}`, + { timeOut: 10000, escapeHtml: false }); + + const chat = getContext().chat; + for (const message of chat) { + if (hashesInCollection.includes(getStringHash(message.mes))) { + const messageElement = $(`.mes[mesid="${chat.indexOf(message)}"]`); + messageElement.addClass('vectorized'); + } + } + +} + jQuery(async () => { if (!extension_settings.vectors) { extension_settings.vectors = settings; @@ -554,9 +616,9 @@ jQuery(async () => { Object.assign(extension_settings.vectors, settings); saveSettingsDebounced(); }); - $('#vectors_advanced_settings').toggleClass('displayNone', power_user.ui_mode === ui_mode.SIMPLE); - $('#vectors_vectorize_all').on('click', onVectorizeAllClick); + $('#vectors_purge').on('click', onPurgeClick); + $('#vectors_view_stats').on('click', onViewStatsClick); $('#vectors_size_threshold').val(settings.size_threshold).on('input', () => { settings.size_threshold = Number($('#vectors_size_threshold').val()); @@ -582,6 +644,12 @@ jQuery(async () => { saveSettingsDebounced(); }); + $('#vectors_message_chunk_size').val(settings.message_chunk_size).on('input', () => { + settings.message_chunk_size = Number($('#vectors_message_chunk_size').val()); + Object.assign(extension_settings.vectors, settings); + saveSettingsDebounced(); + }); + toggleSettings(); eventSource.on(event_types.MESSAGE_DELETED, onChatEvent); eventSource.on(event_types.MESSAGE_EDITED, onChatEvent); diff --git a/public/scripts/extensions/vectors/manifest.json b/public/scripts/extensions/vectors/manifest.json index 7f84c2147..48b40a173 100644 --- a/public/scripts/extensions/vectors/manifest.json +++ b/public/scripts/extensions/vectors/manifest.json @@ -5,7 +5,7 @@ "optional": [], "generate_interceptor": "vectors_rearrangeChat", "js": "index.js", - "css": "", + "css": "style.css", "author": "Cohee#1207", "version": "1.0.0", "homePage": "https://github.com/SillyTavern/SillyTavern" diff --git a/public/scripts/extensions/vectors/settings.html b/public/scripts/extensions/vectors/settings.html index b1d74c83d..626084d58 100644 --- a/public/scripts/extensions/vectors/settings.html +++ b/public/scripts/extensions/vectors/settings.html @@ -75,7 +75,7 @@
-
+
@@ -97,17 +97,23 @@
+
+ + +
- +
- +
@@ -115,8 +121,16 @@ Old messages are vectorized gradually as you chat. To process all previous messages, click the button below. -