diff --git a/public/css/select2-overrides.css b/public/css/select2-overrides.css index 36d0e43a9..f3cc9356a 100644 --- a/public/css/select2-overrides.css +++ b/public/css/select2-overrides.css @@ -121,7 +121,7 @@ } /* Add the custom checkbox */ -.select2-results__option:before { +.select2-results__option::before { content: ''; display: inline-block; position: absolute; @@ -141,11 +141,19 @@ } /* Add the custom checkbox checkmark */ -.select2-results__option--selected.select2-results__option:before { +.select2-results__option--selected.select2-results__option::before { content: '\2713'; font-weight: bold; color: var(--SmartThemeBodyColor); background-color: var(--SmartThemeBlurTintColor); text-align: center; line-height: 14px; -} \ No newline at end of file +} + +.select2-results__option.select2-results__message { + background-color: inherit; +} + +.select2-results__option.select2-results__message::before { + display: none; +} diff --git a/public/index.html b/public/index.html index df628f01d..3fd5e0408 100644 --- a/public/index.html +++ b/public/index.html @@ -3847,7 +3847,7 @@ diff --git a/public/script.js b/public/script.js index 3ca460c01..d38432a54 100644 --- a/public/script.js +++ b/public/script.js @@ -145,7 +145,7 @@ import { Stopwatch, } from './scripts/utils.js'; -import { ModuleWorkerWrapper, doDailyExtensionUpdatesCheck, extension_settings, getContext, loadExtensionSettings, processExtensionHelpers, registerExtensionHelper, renderExtensionTemplate, runGenerationInterceptors, saveMetadataDebounced } from './scripts/extensions.js'; +import { ModuleWorkerWrapper, doDailyExtensionUpdatesCheck, extension_settings, getContext, loadExtensionSettings, renderExtensionTemplate, runGenerationInterceptors, saveMetadataDebounced } from './scripts/extensions.js'; import { COMMENT_NAME_DEFAULT, executeSlashCommands, getSlashCommandsHelp, processChatSlashCommands, registerSlashCommand } from './scripts/slash-commands.js'; import { tag_map, @@ -338,8 +338,6 @@ export const event_types = { export const eventSource = new EventEmitter(); -eventSource.on(event_types.MESSAGE_RECEIVED, processExtensionHelpers); -eventSource.on(event_types.MESSAGE_SENT, processExtensionHelpers); eventSource.on(event_types.CHAT_CHANGED, processChatSlashCommands); const characterGroupOverlay = new BulkEditOverlay(); @@ -701,8 +699,6 @@ export let user_avatar = 'you.png'; export var amount_gen = 80; //default max length of AI generated responses var max_context = 2048; -var message_already_generated = ''; - var swipes = true; let extension_prompts = {}; @@ -1478,13 +1474,11 @@ export async function reloadCurrentChat() { } function messageFormatting(mes, ch_name, isSystem, isUser) { - if (mes) { - mesForShowdownParse = mes; + if (!mes) { + return ''; } - if (!mes) { - mes = ''; - } + mesForShowdownParse = mes; // Force isSystem = false on comment messages so they get formatted properly if (ch_name === COMMENT_NAME_DEFAULT && isSystem && !isUser) { @@ -1544,7 +1538,6 @@ function messageFormatting(mes, ch_name, isSystem, isUser) { mes = mes.replaceAll('\\begin{align*}', '$$'); mes = mes.replaceAll('\\end{align*}', '$$'); mes = converter.makeHtml(mes); - mes = replaceBiasMarkup(mes); mes = mes.replace(/[\s\S]*?<\/code>/g, function (match) { // Firefox creates extra newlines from
s in code blocks, so we replace them before converting newlines to
s. @@ -2600,6 +2593,21 @@ function hideStopButton() { } class StreamingProcessor { + constructor(type, force_name2, timeStarted, messageAlreadyGenerated) { + this.result = ''; + this.messageId = -1; + this.type = type; + this.force_name2 = force_name2; + this.isStopped = false; + this.isFinished = false; + this.generator = this.nullStreamingGeneration; + this.abortController = new AbortController(); + this.firstMessageText = '...'; + this.timeStarted = timeStarted; + this.messageAlreadyGenerated = messageAlreadyGenerated; + this.swipes = []; + } + showMessageButtons(messageId) { if (messageId == -1) { return; @@ -2635,32 +2643,16 @@ class StreamingProcessor { return messageId; } - removePrefix(text) { - const name1Marker = `${name1}: `; - const name2Marker = `${name2}: `; - - if (text) { - if (text.startsWith(name1Marker)) { - text = text.replace(name1Marker, ''); - } - if (text.startsWith(name2Marker)) { - text = text.replace(name2Marker, ''); - } - } - return text; - } - onProgressStreaming(messageId, text, isFinal) { const isImpersonate = this.type == 'impersonate'; const isContinue = this.type == 'continue'; if (!isImpersonate && !isContinue && Array.isArray(this.swipes) && this.swipes.length > 0) { for (let i = 0; i < this.swipes.length; i++) { - this.swipes[i] = cleanUpMessage(this.removePrefix(this.swipes[i]), false, false, true, this.stoppingStrings); + this.swipes[i] = cleanUpMessage(this.swipes[i], false, false, true, this.stoppingStrings); } } - text = this.removePrefix(text); let processedText = cleanUpMessage(text, isImpersonate, isContinue, !isFinal, this.stoppingStrings); // Predict unbalanced asterisks / quotes during streaming @@ -2786,6 +2778,9 @@ class StreamingProcessor { } onErrorStreaming() { + this.abortController.abort(); + this.isStopped = true; + this.hideMessageButtons(this.messageId); $('#send_textarea').removeAttr('disabled'); is_send_press = false; @@ -2811,20 +2806,6 @@ class StreamingProcessor { throw new Error('Generation function for streaming is not hooked up'); } - constructor(type, force_name2, timeStarted) { - this.result = ''; - this.messageId = -1; - this.type = type; - this.force_name2 = force_name2; - this.isStopped = false; - this.isFinished = false; - this.generator = this.nullStreamingGeneration; - this.abortController = new AbortController(); - this.firstMessageText = '...'; - this.timeStarted = timeStarted; - this.swipes = []; - } - async generate() { if (this.messageId == -1) { this.messageId = await this.onStartStreaming(this.firstMessageText); @@ -2844,13 +2825,12 @@ class StreamingProcessor { for await (const { text, swipes } of this.generator()) { timestamps.push(Date.now()); if (this.isStopped) { - this.onStopStreaming(); return; } this.result = text; this.swipes = swipes; - await sw.tick(() => this.onProgressStreaming(this.messageId, message_already_generated + text)); + await sw.tick(() => this.onProgressStreaming(this.messageId, this.messageAlreadyGenerated + text)); } const seconds = (timestamps[timestamps.length - 1] - timestamps[0]) / 1000; console.warn(`Stream stats: ${timestamps.length} tokens, ${seconds.toFixed(2)} seconds, rate: ${Number(timestamps.length / seconds).toFixed(2)} TPS`); @@ -2858,7 +2838,6 @@ class StreamingProcessor { catch (err) { console.error(err); this.onErrorStreaming(); - this.isStopped = true; return; } @@ -2965,7 +2944,7 @@ async function Generate(type, { automatic_trigger, force_name2, quiet_prompt, qu const isInstruct = power_user.instruct.enabled && main_api !== 'openai'; const isImpersonate = type == 'impersonate'; - message_already_generated = isImpersonate ? `${name1}: ` : `${name2}: `; + let message_already_generated = isImpersonate ? `${name1}: ` : `${name2}: `; const interruptedByCommand = await processCommands($('#send_textarea').val(), type, dryRun); @@ -3088,7 +3067,7 @@ async function Generate(type, { automatic_trigger, force_name2, quiet_prompt, qu //for normal messages sent from user.. if ((textareaText != '' || hasPendingFileAttachment()) && !automatic_trigger && type !== 'quiet' && !dryRun) { // If user message contains no text other than bias - send as a system message - if (messageBias && replaceBiasMarkup(textareaText).trim().length === 0) { + if (messageBias && !removeMacros(textareaText)) { sendSystemMessage(system_message_types.GENERIC, ' ', { bias: messageBias }); } else { @@ -3378,10 +3357,6 @@ async function Generate(type, { automatic_trigger, force_name2, quiet_prompt, qu let mesSend = []; console.debug('calling runGenerate'); - if (!dryRun) { - streamingProcessor = isStreamingEnabled() && type !== 'quiet' ? new StreamingProcessor(type, force_name2, generation_started) : false; - } - if (isContinue) { // Coping mechanism for OAI spacing const isForceInstruct = isOpenRouterWithInstruct(); @@ -3389,652 +3364,598 @@ async function Generate(type, { automatic_trigger, force_name2, quiet_prompt, qu cyclePrompt += ' '; continue_mag += ' '; } - - // Save reply does add cycle text to the prompt, so it's not needed here - streamingProcessor && (streamingProcessor.firstMessageText = ''); message_already_generated = continue_mag; } const originalType = type; - return runGenerate(cyclePrompt); - async function runGenerate(cycleGenerationPrompt = '') { - if (!dryRun) { - is_send_press = true; - } + if (!dryRun) { + is_send_press = true; + } - generatedPromptCache += cycleGenerationPrompt; - if (generatedPromptCache.length == 0 || type === 'continue') { - console.debug('generating prompt'); - chatString = ''; - arrMes = arrMes.reverse(); - arrMes.forEach(function (item, i, arr) {// For added anchors and others - // OAI doesn't need all of this - if (main_api === 'openai') { - return; - } - - // Cohee: I'm not even sure what this is for anymore - if (i === arrMes.length - 1 && type !== 'continue') { - item = item.replace(/\n?$/, ''); - } - - mesSend[mesSend.length] = { message: item, extensionPrompts: [] }; - }); - } - - let mesExmString = ''; - - function setPromptString() { - if (main_api == 'openai') { - return; - } - - console.debug('--setting Prompt string'); - mesExmString = pinExmString ?? mesExamplesArray.slice(0, count_exm_add).join(''); - - if (mesSend.length) { - mesSend[mesSend.length - 1].message = modifyLastPromptLine(mesSend[mesSend.length - 1].message); - } - } - - function modifyLastPromptLine(lastMesString) { - //#########QUIET PROMPT STUFF PT2############## - - // Add quiet generation prompt at depth 0 - if (quiet_prompt && quiet_prompt.length) { - - // here name1 is forced for all quiet prompts..why? - const name = name1; - //checks if we are in instruct, if so, formats the chat as such, otherwise just adds the quiet prompt - const quietAppend = isInstruct ? formatInstructModeChat(name, quiet_prompt, false, true, '', name1, name2, false) : `\n${quiet_prompt}`; - - //This begins to fix quietPrompts (particularly /sysgen) for instruct - //previously instruct input sequence was being appended to the last chat message w/o '\n' - //and no output sequence was added after the input's content. - //TODO: respect output_sequence vs last_output_sequence settings - //TODO: decide how to prompt this to clarify who is talking 'Narrator', 'System', etc. - if (isInstruct) { - lastMesString += '\n' + quietAppend; // + power_user.instruct.output_sequence + '\n'; - } else { - lastMesString += quietAppend; - } - - - // Ross: bailing out early prevents quiet prompts from respecting other instruct prompt toggles - // for sysgen, SD, and summary this is desireable as it prevents the AI from responding as char.. - // but for idle prompting, we want the flexibility of the other prompt toggles, and to respect them as per settings in the extension - // need a detection for what the quiet prompt is being asked for... - - // Bail out early? - if (quietToLoud !== true) { - return lastMesString; - } - } - - - // Get instruct mode line - if (isInstruct && !isContinue) { - const name = isImpersonate ? name1 : name2; - lastMesString += formatInstructModePrompt(name, isImpersonate, promptBias, name1, name2); - } - - // Get non-instruct impersonation line - if (!isInstruct && isImpersonate && !isContinue) { - const name = name1; - if (!lastMesString.endsWith('\n')) { - lastMesString += '\n'; - } - lastMesString += name + ':'; - } - - // Add character's name - // Force name append on continue (if not continuing on user message) - if (!isInstruct && force_name2) { - if (!lastMesString.endsWith('\n')) { - lastMesString += '\n'; - } - if (!isContinue || !(chat[chat.length - 1]?.is_user)) { - lastMesString += `${name2}:`; - } - } - - return lastMesString; - } - - // Clean up the already generated prompt for seamless addition - function cleanupPromptCache(promptCache) { - // Remove the first occurrance of character's name - if (promptCache.trimStart().startsWith(`${name2}:`)) { - promptCache = promptCache.replace(`${name2}:`, '').trimStart(); - } - - // Remove the first occurrance of prompt bias - if (promptCache.trimStart().startsWith(promptBias)) { - promptCache = promptCache.replace(promptBias, ''); - } - - // Add a space if prompt cache doesn't start with one - if (!/^\s/.test(promptCache) && !isInstruct && !isContinue) { - promptCache = ' ' + promptCache; - } - - return promptCache; - } - - function checkPromptSize() { - console.debug('---checking Prompt size'); - setPromptString(); - const prompt = [ - storyString, - mesExmString, - mesSend.join(''), - generatedPromptCache, - allAnchors, - quiet_prompt, - ].join('').replace(/\r/gm, ''); - let thisPromptContextSize = getTokenCount(prompt, power_user.token_padding); - - if (thisPromptContextSize > this_max_context) { //if the prepared prompt is larger than the max context size... - if (count_exm_add > 0) { // ..and we have example mesages.. - count_exm_add--; // remove the example messages... - checkPromptSize(); // and try agin... - } else if (mesSend.length > 0) { // if the chat history is longer than 0 - mesSend.shift(); // remove the first (oldest) chat entry.. - checkPromptSize(); // and check size again.. - } else { - //end - console.debug(`---mesSend.length = ${mesSend.length}`); - } - } - } - - if (generatedPromptCache.length > 0 && main_api !== 'openai') { - console.debug('---Generated Prompt Cache length: ' + generatedPromptCache.length); - checkPromptSize(); - } else { - console.debug('---calling setPromptString ' + generatedPromptCache.length); - setPromptString(); - } - - // Fetches the combined prompt for both negative and positive prompts - const cfgGuidanceScale = getGuidanceScale(); - - // For prompt bit itemization - let mesSendString = ''; - - function getCombinedPrompt(isNegative) { - // Only return if the guidance scale doesn't exist or the value is 1 - // Also don't return if constructing the neutral prompt - if (isNegative && (!cfgGuidanceScale || cfgGuidanceScale?.value === 1)) { - return; - } - - // OAI has its own prompt manager. No need to do anything here + generatedPromptCache += cyclePrompt; + if (generatedPromptCache.length == 0 || type === 'continue') { + console.debug('generating prompt'); + chatString = ''; + arrMes = arrMes.reverse(); + arrMes.forEach(function (item, i, arr) {// For added anchors and others + // OAI doesn't need all of this if (main_api === 'openai') { - return ''; + return; } - // Deep clone - let finalMesSend = structuredClone(mesSend); - - // TODO: Rewrite getExtensionPrompt to not require multiple for loops - // Set all extension prompts where insertion depth > mesSend length - if (finalMesSend.length) { - for (let upperDepth = MAX_INJECTION_DEPTH; upperDepth >= finalMesSend.length; upperDepth--) { - const upperAnchor = getExtensionPrompt(extension_prompt_types.IN_CHAT, upperDepth); - if (upperAnchor && upperAnchor.length) { - finalMesSend[0].extensionPrompts.push(upperAnchor); - } - } + // Cohee: I'm not even sure what this is for anymore + if (i === arrMes.length - 1 && type !== 'continue') { + item = item.replace(/\n?$/, ''); } - finalMesSend.forEach((mesItem, index) => { - if (index === 0) { - return; - } + mesSend[mesSend.length] = { message: item, extensionPrompts: [] }; + }); + } - const anchorDepth = Math.abs(index - finalMesSend.length); - // NOTE: Depth injected here! - const extensionAnchor = getExtensionPrompt(extension_prompt_types.IN_CHAT, anchorDepth); + let mesExmString = ''; - if (anchorDepth >= 0 && extensionAnchor && extensionAnchor.length) { - mesItem.extensionPrompts.push(extensionAnchor); - } - }); - - // TODO: Move zero-depth anchor append to work like CFG and bias appends - if (zeroDepthAnchor?.length && !isContinue) { - console.debug(/\s/.test(finalMesSend[finalMesSend.length - 1].message.slice(-1))); - finalMesSend[finalMesSend.length - 1].message += - /\s/.test(finalMesSend[finalMesSend.length - 1].message.slice(-1)) - ? zeroDepthAnchor - : `${zeroDepthAnchor}`; - } - - let cfgPrompt = {}; - if (cfgGuidanceScale && cfgGuidanceScale?.value !== 1) { - cfgPrompt = getCfgPrompt(cfgGuidanceScale, isNegative); - } - - if (cfgPrompt && cfgPrompt?.value) { - if (cfgPrompt?.depth === 0) { - finalMesSend[finalMesSend.length - 1].message += - /\s/.test(finalMesSend[finalMesSend.length - 1].message.slice(-1)) - ? cfgPrompt.value - : ` ${cfgPrompt.value}`; - } else { - // TODO: Make all extension prompts use an array/splice method - const lengthDiff = mesSend.length - cfgPrompt.depth; - const cfgDepth = lengthDiff >= 0 ? lengthDiff : 0; - finalMesSend[cfgDepth].extensionPrompts.push(`${cfgPrompt.value}\n`); - } - } - - // Add prompt bias after everything else - // Always run with continue - if (!isInstruct && !isImpersonate) { - if (promptBias.trim().length !== 0) { - finalMesSend[finalMesSend.length - 1].message += - /\s/.test(finalMesSend[finalMesSend.length - 1].message.slice(-1)) - ? promptBias.trimStart() - : ` ${promptBias.trimStart()}`; - } - } - - // Prune from prompt cache if it exists - if (generatedPromptCache.length !== 0) { - generatedPromptCache = cleanupPromptCache(generatedPromptCache); - } - - // Flattens the multiple prompt objects to a string. - const combine = () => { - // Right now, everything is suffixed with a newline - mesSendString = finalMesSend.map((e) => `${e.extensionPrompts.join('')}${e.message}`).join(''); - - // add a custom dingus (if defined) - mesSendString = addChatsSeparator(mesSendString); - - // add chat preamble - mesSendString = addChatsPreamble(mesSendString); - - let combinedPrompt = beforeScenarioAnchor + - storyString + - afterScenarioAnchor + - mesExmString + - mesSendString + - generatedPromptCache; - - combinedPrompt = combinedPrompt.replace(/\r/gm, ''); - - if (power_user.collapse_newlines) { - combinedPrompt = collapseNewlines(combinedPrompt); - } - - return combinedPrompt; - }; - - let data = { - api: main_api, - combinedPrompt: null, - description, - personality, - persona, - scenario, - char: name2, - user: name1, - beforeScenarioAnchor, - afterScenarioAnchor, - mesExmString, - finalMesSend, - generatedPromptCache, - main: system, - jailbreak, - naiPreamble: nai_settings.preamble, - }; - - // Before returning the combined prompt, give available context related information to all subscribers. - eventSource.emitAndWait(event_types.GENERATE_BEFORE_COMBINE_PROMPTS, data); - - // If one or multiple subscribers return a value, forfeit the responsibillity of flattening the context. - return !data.combinedPrompt ? combine() : data.combinedPrompt; + function setPromptString() { + if (main_api == 'openai') { + return; } - // Get the negative prompt first since it has the unmodified mesSend array - let negativePrompt = main_api == 'textgenerationwebui' ? getCombinedPrompt(true) : undefined; - let finalPrompt = getCombinedPrompt(false); + console.debug('--setting Prompt string'); + mesExmString = pinExmString ?? mesExamplesArray.slice(0, count_exm_add).join(''); - // Include the entire guidance scale object - const cfgValues = cfgGuidanceScale && cfgGuidanceScale?.value !== 1 ? ({ guidanceScale: cfgGuidanceScale, negativePrompt: negativePrompt }) : null; - - let maxLength = Number(amount_gen); // how many tokens the AI will be requested to generate - let thisPromptBits = []; - - // TODO: Make this a switch - if (main_api == 'koboldhorde' && horde_settings.auto_adjust_response_length) { - maxLength = Math.min(maxLength, adjustedParams.maxLength); - maxLength = Math.max(maxLength, MIN_LENGTH); // prevent validation errors + if (mesSend.length) { + mesSend[mesSend.length - 1].message = modifyLastPromptLine(mesSend[mesSend.length - 1].message); } + } - let generate_data; - if (main_api == 'koboldhorde' || main_api == 'kobold') { - generate_data = { - prompt: finalPrompt, - gui_settings: true, - max_length: maxLength, - max_context_length: max_context, - }; + function modifyLastPromptLine(lastMesString) { + //#########QUIET PROMPT STUFF PT2############## - if (preset_settings != 'gui') { - const isHorde = main_api == 'koboldhorde'; - const presetSettings = koboldai_settings[koboldai_setting_names[preset_settings]]; - const maxContext = (adjustedParams && horde_settings.auto_adjust_context_length) ? adjustedParams.maxContextLength : max_context; - generate_data = getKoboldGenerationData(finalPrompt, presetSettings, maxLength, maxContext, isHorde, type); - } - } - else if (main_api == 'textgenerationwebui') { - generate_data = getTextGenGenerationData(finalPrompt, maxLength, isImpersonate, isContinue, cfgValues, type); - } - else if (main_api == 'novel') { - const presetSettings = novelai_settings[novelai_setting_names[nai_settings.preset_settings_novel]]; - generate_data = getNovelGenerationData(finalPrompt, presetSettings, maxLength, isImpersonate, isContinue, cfgValues, type); - } - else if (main_api == 'openai') { - let [prompt, counts] = await prepareOpenAIMessages({ - name2: name2, - charDescription: description, - charPersonality: personality, - Scenario: scenario, - worldInfoBefore: worldInfoBefore, - worldInfoAfter: worldInfoAfter, - extensionPrompts: extension_prompts, - bias: promptBias, - type: type, - quietPrompt: quiet_prompt, - quietImage: quietImage, - cyclePrompt: cyclePrompt, - systemPromptOverride: system, - jailbreakPromptOverride: jailbreak, - personaDescription: persona, - messages: oaiMessages, - messageExamples: oaiMessageExamples, - }, dryRun); - generate_data = { prompt: prompt }; + // Add quiet generation prompt at depth 0 + if (quiet_prompt && quiet_prompt.length) { - // counts will return false if the user has not enabled the token breakdown feature - if (counts) { - parseTokenCounts(counts, thisPromptBits); + // here name1 is forced for all quiet prompts..why? + const name = name1; + //checks if we are in instruct, if so, formats the chat as such, otherwise just adds the quiet prompt + const quietAppend = isInstruct ? formatInstructModeChat(name, quiet_prompt, false, true, '', name1, name2, false) : `\n${quiet_prompt}`; + + //This begins to fix quietPrompts (particularly /sysgen) for instruct + //previously instruct input sequence was being appended to the last chat message w/o '\n' + //and no output sequence was added after the input's content. + //TODO: respect output_sequence vs last_output_sequence settings + //TODO: decide how to prompt this to clarify who is talking 'Narrator', 'System', etc. + if (isInstruct) { + lastMesString += '\n' + quietAppend; // + power_user.instruct.output_sequence + '\n'; + } else { + lastMesString += quietAppend; } - if (!dryRun) { - setInContextMessages(openai_messages_count, type); + + // Ross: bailing out early prevents quiet prompts from respecting other instruct prompt toggles + // for sysgen, SD, and summary this is desireable as it prevents the AI from responding as char.. + // but for idle prompting, we want the flexibility of the other prompt toggles, and to respect them as per settings in the extension + // need a detection for what the quiet prompt is being asked for... + + // Bail out early? + if (quietToLoud !== true) { + return lastMesString; } } - return new Promise(async (resolve, reject) => { - if (true === dryRun) return onSuccess({ error: 'dryRun' }); - if (power_user.console_log_prompts) { - console.log(generate_data.prompt); + // Get instruct mode line + if (isInstruct && !isContinue) { + const name = isImpersonate ? name1 : name2; + lastMesString += formatInstructModePrompt(name, isImpersonate, promptBias, name1, name2); + } + + // Get non-instruct impersonation line + if (!isInstruct && isImpersonate && !isContinue) { + const name = name1; + if (!lastMesString.endsWith('\n')) { + lastMesString += '\n'; } + lastMesString += name + ':'; + } - let generate_url = getGenerateUrl(main_api); - console.debug('rungenerate calling API'); - - showStopButton(); - - //set array object for prompt token itemization of this message - let currentArrayEntry = Number(thisPromptBits.length - 1); - let additionalPromptStuff = { - ...thisPromptBits[currentArrayEntry], - rawPrompt: generate_data.prompt || generate_data.input, - mesId: getNextMessageId(type), - allAnchors: allAnchors, - summarizeString: (extension_prompts['1_memory']?.value || ''), - authorsNoteString: (extension_prompts['2_floating_prompt']?.value || ''), - smartContextString: (extension_prompts['chromadb']?.value || ''), - worldInfoString: worldInfoString, - storyString: storyString, - beforeScenarioAnchor: beforeScenarioAnchor, - afterScenarioAnchor: afterScenarioAnchor, - examplesString: examplesString, - mesSendString: mesSendString, - generatedPromptCache: generatedPromptCache, - promptBias: promptBias, - finalPrompt: finalPrompt, - charDescription: description, - charPersonality: personality, - scenarioText: scenario, - this_max_context: this_max_context, - padding: power_user.token_padding, - main_api: main_api, - instruction: isInstruct ? substituteParams(power_user.prefer_character_prompt && system ? system : power_user.instruct.system_prompt) : '', - userPersona: (power_user.persona_description || ''), - }; - - thisPromptBits = additionalPromptStuff; - - //console.log(thisPromptBits); - const itemizedIndex = itemizedPrompts.findIndex((item) => item.mesId === thisPromptBits['mesId']); - - if (itemizedIndex !== -1) { - itemizedPrompts[itemizedIndex] = thisPromptBits; + // Add character's name + // Force name append on continue (if not continuing on user message) + if (!isInstruct && force_name2) { + if (!lastMesString.endsWith('\n')) { + lastMesString += '\n'; } - else { - itemizedPrompts.push(thisPromptBits); + if (!isContinue || !(chat[chat.length - 1]?.is_user)) { + lastMesString += `${name2}:`; } + } - console.debug(`pushed prompt bits to itemizedPrompts array. Length is now: ${itemizedPrompts.length}`); - /** @type {Promise} */ - let streamingGeneratorPromise = Promise.resolve(); + return lastMesString; + } - if (main_api == 'openai') { - if (isStreamingEnabled() && type !== 'quiet') { - streamingGeneratorPromise = sendOpenAIRequest(type, generate_data.prompt, streamingProcessor.abortController.signal); - } - else { - sendOpenAIRequest(type, generate_data.prompt, abortController.signal).then(onSuccess).catch(onError); + // Clean up the already generated prompt for seamless addition + function cleanupPromptCache(promptCache) { + // Remove the first occurrance of character's name + if (promptCache.trimStart().startsWith(`${name2}:`)) { + promptCache = promptCache.replace(`${name2}:`, '').trimStart(); + } + + // Remove the first occurrance of prompt bias + if (promptCache.trimStart().startsWith(promptBias)) { + promptCache = promptCache.replace(promptBias, ''); + } + + // Add a space if prompt cache doesn't start with one + if (!/^\s/.test(promptCache) && !isInstruct && !isContinue) { + promptCache = ' ' + promptCache; + } + + return promptCache; + } + + function checkPromptSize() { + console.debug('---checking Prompt size'); + setPromptString(); + const prompt = [ + storyString, + mesExmString, + mesSend.join(''), + generatedPromptCache, + allAnchors, + quiet_prompt, + ].join('').replace(/\r/gm, ''); + let thisPromptContextSize = getTokenCount(prompt, power_user.token_padding); + + if (thisPromptContextSize > this_max_context) { //if the prepared prompt is larger than the max context size... + if (count_exm_add > 0) { // ..and we have example mesages.. + count_exm_add--; // remove the example messages... + checkPromptSize(); // and try agin... + } else if (mesSend.length > 0) { // if the chat history is longer than 0 + mesSend.shift(); // remove the first (oldest) chat entry.. + checkPromptSize(); // and check size again.. + } else { + //end + console.debug(`---mesSend.length = ${mesSend.length}`); + } + } + } + + if (generatedPromptCache.length > 0 && main_api !== 'openai') { + console.debug('---Generated Prompt Cache length: ' + generatedPromptCache.length); + checkPromptSize(); + } else { + console.debug('---calling setPromptString ' + generatedPromptCache.length); + setPromptString(); + } + + // Fetches the combined prompt for both negative and positive prompts + const cfgGuidanceScale = getGuidanceScale(); + + // For prompt bit itemization + let mesSendString = ''; + + function getCombinedPrompt(isNegative) { + // Only return if the guidance scale doesn't exist or the value is 1 + // Also don't return if constructing the neutral prompt + if (isNegative && (!cfgGuidanceScale || cfgGuidanceScale?.value === 1)) { + return; + } + + // OAI has its own prompt manager. No need to do anything here + if (main_api === 'openai') { + return ''; + } + + // Deep clone + let finalMesSend = structuredClone(mesSend); + + // TODO: Rewrite getExtensionPrompt to not require multiple for loops + // Set all extension prompts where insertion depth > mesSend length + if (finalMesSend.length) { + for (let upperDepth = MAX_INJECTION_DEPTH; upperDepth >= finalMesSend.length; upperDepth--) { + const upperAnchor = getExtensionPrompt(extension_prompt_types.IN_CHAT, upperDepth); + if (upperAnchor && upperAnchor.length) { + finalMesSend[0].extensionPrompts.push(upperAnchor); } } - else if (main_api == 'koboldhorde') { - generateHorde(finalPrompt, generate_data, abortController.signal, true).then(onSuccess).catch(onError); - } - else if (main_api == 'textgenerationwebui' && isStreamingEnabled() && type !== 'quiet') { - streamingGeneratorPromise = generateTextGenWithStreaming(generate_data, streamingProcessor.abortController.signal); - } - else if (main_api == 'novel' && isStreamingEnabled() && type !== 'quiet') { - streamingGeneratorPromise = generateNovelWithStreaming(generate_data, streamingProcessor.abortController.signal); - } - else if (main_api == 'kobold' && isStreamingEnabled() && type !== 'quiet') { - streamingGeneratorPromise = generateKoboldWithStreaming(generate_data, streamingProcessor.abortController.signal); - } - else { - try { - const response = await fetch(generate_url, { - method: 'POST', - headers: getRequestHeaders(), - cache: 'no-cache', - body: JSON.stringify(generate_data), - signal: abortController.signal, - }); + } - if (!response.ok) { - const error = await response.json(); - throw error; - } - - const data = await response.json(); - onSuccess(data); - } catch (error) { - onError(error); - } + finalMesSend.forEach((mesItem, index) => { + if (index === 0) { + return; } - if (isStreamingEnabled() && type !== 'quiet') { - try { - const streamingGenerator = await streamingGeneratorPromise; - streamingProcessor.generator = streamingGenerator; - hideSwipeButtons(); - let getMessage = await streamingProcessor.generate(); - let messageChunk = cleanUpMessage(getMessage, isImpersonate, isContinue, false); + const anchorDepth = Math.abs(index - finalMesSend.length); + // NOTE: Depth injected here! + const extensionAnchor = getExtensionPrompt(extension_prompt_types.IN_CHAT, anchorDepth); - if (isContinue) { - getMessage = continue_mag + getMessage; - } - - if (streamingProcessor && !streamingProcessor.isStopped && streamingProcessor.isFinished) { - await streamingProcessor.onFinishStreaming(streamingProcessor.messageId, getMessage); - streamingProcessor = null; - triggerAutoContinue(messageChunk, isImpersonate); - } - resolve(); - } catch (err) { - onError(err); - } - - } - - async function onSuccess(data) { - let messageChunk = ''; - - if (data.error == 'dryRun') { - generatedPromptCache = ''; - resolve(); - return; - } - - if (!data.error) { - //const getData = await response.json(); - let getMessage = extractMessageFromData(data); - let title = extractTitleFromData(data); - kobold_horde_model = title; - - const swipes = extractMultiSwipes(data, type); - - messageChunk = cleanUpMessage(getMessage, isImpersonate, isContinue, false); - - if (isContinue) { - getMessage = continue_mag + getMessage; - } - - //Formating - const displayIncomplete = type === 'quiet' && !quietToLoud; - getMessage = cleanUpMessage(getMessage, isImpersonate, isContinue, displayIncomplete); - - if (getMessage.length > 0) { - if (isImpersonate) { - $('#send_textarea').val(getMessage).trigger('input'); - generatedPromptCache = ''; - await eventSource.emit(event_types.IMPERSONATE_READY, getMessage); - } - else if (type == 'quiet') { - resolve(getMessage); - } - else { - // Without streaming we'll be having a full message on continuation. Treat it as a last chunk. - if (originalType !== 'continue') { - ({ type, getMessage } = await saveReply(type, getMessage, false, title, swipes)); - } - else { - ({ type, getMessage } = await saveReply('appendFinal', getMessage, false, title, swipes)); - } - } - - if (type !== 'quiet') { - playMessageSound(); - } - } else { - // If maxLoops is not passed in (e.g. first time generating), set it to MAX_GENERATION_LOOPS - maxLoops ??= MAX_GENERATION_LOOPS; - - if (maxLoops === 0) { - reject(new Error('Generate circuit breaker interruption')); - if (type !== 'quiet') { - throwCircuitBreakerError(); - } - return; - } - - // regenerate with character speech reenforced - // to make sure we leave on swipe type while also adding the name2 appendage - delay(1000).then(async () => { - // The first await is for waiting for the generate to start. The second one is waiting for it to finish - const result = await await Generate(type, { automatic_trigger, force_name2: true, quiet_prompt, skipWIAN, force_chid, maxLoops: maxLoops - 1 }); - resolve(result); - }); - return; - } - - if (power_user.auto_swipe) { - console.debug('checking for autoswipeblacklist on non-streaming message'); - function containsBlacklistedWords(getMessage, blacklist, threshold) { - console.debug('checking blacklisted words'); - const regex = new RegExp(`\\b(${blacklist.join('|')})\\b`, 'gi'); - const matches = getMessage.match(regex) || []; - return matches.length >= threshold; - } - - const generatedTextFiltered = (getMessage) => { - if (power_user.auto_swipe_blacklist_threshold) { - if (containsBlacklistedWords(getMessage, power_user.auto_swipe_blacklist, power_user.auto_swipe_blacklist_threshold)) { - console.debug('Generated text has blacklisted words'); - return true; - } - } - - return false; - }; - if (generatedTextFiltered(getMessage)) { - console.debug('swiping right automatically'); - is_send_press = false; - swipe_right(); - // TODO: do we want to resolve after an auto-swipe? - resolve(); - return; - } - } - } else { - generatedPromptCache = ''; - - if (data?.response) { - toastr.error(data.response, 'API Error'); - } - reject(data.response); - } - - console.debug('/api/chats/save called by /Generate'); - await saveChatConditional(); - unblockGeneration(); - streamingProcessor = null; - - if (type !== 'quiet') { - triggerAutoContinue(messageChunk, isImpersonate); - } - resolve(); - } - - function onError(exception) { - if (typeof exception?.error?.message === 'string') { - toastr.error(exception.error.message, 'Error', { timeOut: 10000, extendedTimeOut: 20000 }); - } - - reject(exception); - unblockGeneration(); - console.log(exception); - streamingProcessor = null; + if (anchorDepth >= 0 && extensionAnchor && extensionAnchor.length) { + mesItem.extensionPrompts.push(extensionAnchor); } }); - } //rungenerate ends + // TODO: Move zero-depth anchor append to work like CFG and bias appends + if (zeroDepthAnchor?.length && !isContinue) { + console.debug(/\s/.test(finalMesSend[finalMesSend.length - 1].message.slice(-1))); + finalMesSend[finalMesSend.length - 1].message += + /\s/.test(finalMesSend[finalMesSend.length - 1].message.slice(-1)) + ? zeroDepthAnchor + : `${zeroDepthAnchor}`; + } + + let cfgPrompt = {}; + if (cfgGuidanceScale && cfgGuidanceScale?.value !== 1) { + cfgPrompt = getCfgPrompt(cfgGuidanceScale, isNegative); + } + + if (cfgPrompt && cfgPrompt?.value) { + if (cfgPrompt?.depth === 0) { + finalMesSend[finalMesSend.length - 1].message += + /\s/.test(finalMesSend[finalMesSend.length - 1].message.slice(-1)) + ? cfgPrompt.value + : ` ${cfgPrompt.value}`; + } else { + // TODO: Make all extension prompts use an array/splice method + const lengthDiff = mesSend.length - cfgPrompt.depth; + const cfgDepth = lengthDiff >= 0 ? lengthDiff : 0; + finalMesSend[cfgDepth].extensionPrompts.push(`${cfgPrompt.value}\n`); + } + } + + // Add prompt bias after everything else + // Always run with continue + if (!isInstruct && !isImpersonate) { + if (promptBias.trim().length !== 0) { + finalMesSend[finalMesSend.length - 1].message += + /\s/.test(finalMesSend[finalMesSend.length - 1].message.slice(-1)) + ? promptBias.trimStart() + : ` ${promptBias.trimStart()}`; + } + } + + // Prune from prompt cache if it exists + if (generatedPromptCache.length !== 0) { + generatedPromptCache = cleanupPromptCache(generatedPromptCache); + } + + // Flattens the multiple prompt objects to a string. + const combine = () => { + // Right now, everything is suffixed with a newline + mesSendString = finalMesSend.map((e) => `${e.extensionPrompts.join('')}${e.message}`).join(''); + + // add a custom dingus (if defined) + mesSendString = addChatsSeparator(mesSendString); + + // add chat preamble + mesSendString = addChatsPreamble(mesSendString); + + let combinedPrompt = beforeScenarioAnchor + + storyString + + afterScenarioAnchor + + mesExmString + + mesSendString + + generatedPromptCache; + + combinedPrompt = combinedPrompt.replace(/\r/gm, ''); + + if (power_user.collapse_newlines) { + combinedPrompt = collapseNewlines(combinedPrompt); + } + + return combinedPrompt; + }; + + let data = { + api: main_api, + combinedPrompt: null, + description, + personality, + persona, + scenario, + char: name2, + user: name1, + beforeScenarioAnchor, + afterScenarioAnchor, + mesExmString, + finalMesSend, + generatedPromptCache, + main: system, + jailbreak, + naiPreamble: nai_settings.preamble, + }; + + // Before returning the combined prompt, give available context related information to all subscribers. + eventSource.emitAndWait(event_types.GENERATE_BEFORE_COMBINE_PROMPTS, data); + + // If one or multiple subscribers return a value, forfeit the responsibillity of flattening the context. + return !data.combinedPrompt ? combine() : data.combinedPrompt; + } + + // Get the negative prompt first since it has the unmodified mesSend array + let negativePrompt = main_api == 'textgenerationwebui' ? getCombinedPrompt(true) : undefined; + let finalPrompt = getCombinedPrompt(false); + + // Include the entire guidance scale object + const cfgValues = cfgGuidanceScale && cfgGuidanceScale?.value !== 1 ? ({ guidanceScale: cfgGuidanceScale, negativePrompt: negativePrompt }) : null; + + let maxLength = Number(amount_gen); // how many tokens the AI will be requested to generate + let thisPromptBits = []; + + // TODO: Make this a switch + if (main_api == 'koboldhorde' && horde_settings.auto_adjust_response_length) { + maxLength = Math.min(maxLength, adjustedParams.maxLength); + maxLength = Math.max(maxLength, MIN_LENGTH); // prevent validation errors + } + + let generate_data; + if (main_api == 'koboldhorde' || main_api == 'kobold') { + generate_data = { + prompt: finalPrompt, + gui_settings: true, + max_length: maxLength, + max_context_length: max_context, + }; + + if (preset_settings != 'gui') { + const isHorde = main_api == 'koboldhorde'; + const presetSettings = koboldai_settings[koboldai_setting_names[preset_settings]]; + const maxContext = (adjustedParams && horde_settings.auto_adjust_context_length) ? adjustedParams.maxContextLength : max_context; + generate_data = getKoboldGenerationData(finalPrompt, presetSettings, maxLength, maxContext, isHorde, type); + } + } + else if (main_api == 'textgenerationwebui') { + generate_data = getTextGenGenerationData(finalPrompt, maxLength, isImpersonate, isContinue, cfgValues, type); + } + else if (main_api == 'novel') { + const presetSettings = novelai_settings[novelai_setting_names[nai_settings.preset_settings_novel]]; + generate_data = getNovelGenerationData(finalPrompt, presetSettings, maxLength, isImpersonate, isContinue, cfgValues, type); + } + else if (main_api == 'openai') { + let [prompt, counts] = await prepareOpenAIMessages({ + name2: name2, + charDescription: description, + charPersonality: personality, + Scenario: scenario, + worldInfoBefore: worldInfoBefore, + worldInfoAfter: worldInfoAfter, + extensionPrompts: extension_prompts, + bias: promptBias, + type: type, + quietPrompt: quiet_prompt, + quietImage: quietImage, + cyclePrompt: cyclePrompt, + systemPromptOverride: system, + jailbreakPromptOverride: jailbreak, + personaDescription: persona, + messages: oaiMessages, + messageExamples: oaiMessageExamples, + }, dryRun); + generate_data = { prompt: prompt }; + + // counts will return false if the user has not enabled the token breakdown feature + if (counts) { + parseTokenCounts(counts, thisPromptBits); + } + + if (!dryRun) { + setInContextMessages(openai_messages_count, type); + } + } + + async function finishGenerating() { + if (dryRun) return { error: 'dryRun' }; + + if (power_user.console_log_prompts) { + console.log(generate_data.prompt); + } + + console.debug('rungenerate calling API'); + + showStopButton(); + + //set array object for prompt token itemization of this message + let currentArrayEntry = Number(thisPromptBits.length - 1); + let additionalPromptStuff = { + ...thisPromptBits[currentArrayEntry], + rawPrompt: generate_data.prompt || generate_data.input, + mesId: getNextMessageId(type), + allAnchors: allAnchors, + summarizeString: (extension_prompts['1_memory']?.value || ''), + authorsNoteString: (extension_prompts['2_floating_prompt']?.value || ''), + smartContextString: (extension_prompts['chromadb']?.value || ''), + worldInfoString: worldInfoString, + storyString: storyString, + beforeScenarioAnchor: beforeScenarioAnchor, + afterScenarioAnchor: afterScenarioAnchor, + examplesString: examplesString, + mesSendString: mesSendString, + generatedPromptCache: generatedPromptCache, + promptBias: promptBias, + finalPrompt: finalPrompt, + charDescription: description, + charPersonality: personality, + scenarioText: scenario, + this_max_context: this_max_context, + padding: power_user.token_padding, + main_api: main_api, + instruction: isInstruct ? substituteParams(power_user.prefer_character_prompt && system ? system : power_user.instruct.system_prompt) : '', + userPersona: (power_user.persona_description || ''), + }; + + thisPromptBits = additionalPromptStuff; + + //console.log(thisPromptBits); + const itemizedIndex = itemizedPrompts.findIndex((item) => item.mesId === thisPromptBits['mesId']); + + if (itemizedIndex !== -1) { + itemizedPrompts[itemizedIndex] = thisPromptBits; + } + else { + itemizedPrompts.push(thisPromptBits); + } + + console.debug(`pushed prompt bits to itemizedPrompts array. Length is now: ${itemizedPrompts.length}`); + + if (isStreamingEnabled() && type !== 'quiet') { + streamingProcessor = new StreamingProcessor(type, force_name2, generation_started, message_already_generated); + if (isContinue) { + // Save reply does add cycle text to the prompt, so it's not needed here + streamingProcessor.firstMessageText = ''; + } + + streamingProcessor.generator = await sendStreamingRequest(type, generate_data); + + hideSwipeButtons(); + let getMessage = await streamingProcessor.generate(); + let messageChunk = cleanUpMessage(getMessage, isImpersonate, isContinue, false); + + if (isContinue) { + getMessage = continue_mag + getMessage; + } + + if (streamingProcessor && !streamingProcessor.isStopped && streamingProcessor.isFinished) { + await streamingProcessor.onFinishStreaming(streamingProcessor.messageId, getMessage); + streamingProcessor = null; + triggerAutoContinue(messageChunk, isImpersonate); + } + } else { + return await sendGenerationRequest(type, generate_data); + } + } + + return finishGenerating().then(onSuccess, onError); + + async function onSuccess(data) { + if (!data) return; + let messageChunk = ''; + + if (data.error == 'dryRun') { + generatedPromptCache = ''; + return; + } + + if (!data.error) { + //const getData = await response.json(); + let getMessage = extractMessageFromData(data); + let title = extractTitleFromData(data); + kobold_horde_model = title; + + const swipes = extractMultiSwipes(data, type); + + messageChunk = cleanUpMessage(getMessage, isImpersonate, isContinue, false); + + if (isContinue) { + getMessage = continue_mag + getMessage; + } + + //Formating + const displayIncomplete = type === 'quiet' && !quietToLoud; + getMessage = cleanUpMessage(getMessage, isImpersonate, isContinue, displayIncomplete); + + if (getMessage.length > 0) { + if (isImpersonate) { + $('#send_textarea').val(getMessage).trigger('input'); + generatedPromptCache = ''; + await eventSource.emit(event_types.IMPERSONATE_READY, getMessage); + } + else if (type == 'quiet') { + unblockGeneration(); + return getMessage; + } + else { + // Without streaming we'll be having a full message on continuation. Treat it as a last chunk. + if (originalType !== 'continue') { + ({ type, getMessage } = await saveReply(type, getMessage, false, title, swipes)); + } + else { + ({ type, getMessage } = await saveReply('appendFinal', getMessage, false, title, swipes)); + } + } + + if (type !== 'quiet') { + playMessageSound(); + } + } else { + // If maxLoops is not passed in (e.g. first time generating), set it to MAX_GENERATION_LOOPS + maxLoops ??= MAX_GENERATION_LOOPS; + + if (maxLoops === 0) { + if (type !== 'quiet') { + throwCircuitBreakerError(); + } + throw new Error('Generate circuit breaker interruption'); + } + + // regenerate with character speech reenforced + // to make sure we leave on swipe type while also adding the name2 appendage + await delay(1000); + // The first await is for waiting for the generate to start. The second one is waiting for it to finish + const result = await await Generate(type, { automatic_trigger, force_name2: true, quiet_prompt, skipWIAN, force_chid, maxLoops: maxLoops - 1 }); + return result; + } + + if (power_user.auto_swipe) { + console.debug('checking for autoswipeblacklist on non-streaming message'); + function containsBlacklistedWords(getMessage, blacklist, threshold) { + console.debug('checking blacklisted words'); + const regex = new RegExp(`\\b(${blacklist.join('|')})\\b`, 'gi'); + const matches = getMessage.match(regex) || []; + return matches.length >= threshold; + } + + const generatedTextFiltered = (getMessage) => { + if (power_user.auto_swipe_blacklist_threshold) { + if (containsBlacklistedWords(getMessage, power_user.auto_swipe_blacklist, power_user.auto_swipe_blacklist_threshold)) { + console.debug('Generated text has blacklisted words'); + return true; + } + } + + return false; + }; + if (generatedTextFiltered(getMessage)) { + console.debug('swiping right automatically'); + is_send_press = false; + swipe_right(); + // TODO: do we want to resolve after an auto-swipe? + return; + } + } + } else { + generatedPromptCache = ''; + + if (data?.response) { + toastr.error(data.response, 'API Error'); + } + throw data?.response; + } + + console.debug('/api/chats/save called by /Generate'); + await saveChatConditional(); + unblockGeneration(); + streamingProcessor = null; + + if (type !== 'quiet') { + triggerAutoContinue(messageChunk, isImpersonate); + } + } + + function onError(exception) { + if (typeof exception?.error?.message === 'string') { + toastr.error(exception.error.message, 'Error', { timeOut: 10000, extendedTimeOut: 20000 }); + } + + unblockGeneration(); + console.log(exception); + streamingProcessor = null; + throw exception; + } } else { //generate's primary loop ends, after this is error handling for no-connection or safety-id if (this_chid === undefined || this_chid === 'invalid-safety-id') { toastr.warning('Сharacter is not selected'); } is_send_press = false; } - - //console.log('generate ending'); -} //generate ends +} function flushWIDepthInjections() { //prevent custom depth WI entries (which have unique random key names) from duplicating @@ -4165,13 +4086,16 @@ function formatMessageHistoryItem(chatItem, isInstruct, forceOutputSequence) { textResult = formatInstructModeChat(itemName, chatItem.mes, chatItem.is_user, isNarratorType, chatItem.force_avatar, name1, name2, forceOutputSequence); } - textResult = replaceBiasMarkup(textResult); - return textResult; } -export function replaceBiasMarkup(str) { - return (str ?? '').replace(/\{\{[\s\S]*?\}\}/gm, ''); +/** + * Removes all {{macros}} from a string. + * @param {string} str String to remove macros from. + * @returns {string} String with macros removed. + */ +export function removeMacros(str) { + return (str ?? '').replace(/\{\{[\s\S]*?\}\}/gm, '').trim(); } /** @@ -4481,22 +4405,82 @@ function setInContextMessages(lastmsg, type) { } } -function getGenerateUrl(api) { - let generate_url = ''; - if (api == 'kobold') { - generate_url = '/api/backends/kobold/generate'; - } else if (api == 'textgenerationwebui') { - generate_url = '/api/backends/text-completions/generate'; - } else if (api == 'novel') { - generate_url = '/api/novelai/generate'; +/** + * Sends a non-streaming request to the API. + * @param {string} type Generation type + * @param {object} data Generation data + * @returns {Promise} Response data from the API + */ +async function sendGenerationRequest(type, data) { + if (main_api === 'openai') { + return await sendOpenAIRequest(type, data.prompt, abortController.signal); + } + + if (main_api === 'koboldhorde') { + return await generateHorde(data.prompt, data, abortController.signal, true); + } + + const response = await fetch(getGenerateUrl(main_api), { + method: 'POST', + headers: getRequestHeaders(), + cache: 'no-cache', + body: JSON.stringify(data), + signal: abortController.signal, + }); + + if (!response.ok) { + const error = await response.json(); + throw error; + } + + const responseData = await response.json(); + return responseData; +} + +/** + * Sends a streaming request to the API. + * @param {string} type Generation type + * @param {object} data Generation data + * @returns {Promise} Streaming generator + */ +async function sendStreamingRequest(type, data) { + switch (main_api) { + case 'openai': + return await sendOpenAIRequest(type, data.prompt, streamingProcessor.abortController.signal); + case 'textgenerationwebui': + return await generateTextGenWithStreaming(data, streamingProcessor.abortController.signal); + case 'novel': + return await generateNovelWithStreaming(data, streamingProcessor.abortController.signal); + case 'kobold': + return await generateKoboldWithStreaming(data, streamingProcessor.abortController.signal); + default: + throw new Error('Streaming is enabled, but the current API does not support streaming.'); + } +} + +/** + * Gets the generation endpoint URL for the specified API. + * @param {string} api API name + * @returns {string} Generation URL + */ +function getGenerateUrl(api) { + switch (api) { + case 'kobold': + return '/api/backends/kobold/generate'; + case 'koboldhorde': + return '/api/backends/koboldhorde/generate'; + case 'textgenerationwebui': + return '/api/backends/text-completions/generate'; + case 'novel': + return '/api/novelai/generate'; + default: + throw new Error(`Unknown API: ${api}`); } - return generate_url; } function throwCircuitBreakerError() { callPopup(`Could not extract reply in ${MAX_GENERATION_LOOPS} attempts. Try generating again`, 'text'); unblockGeneration(); - throw new Error('Generate circuit breaker interruption'); } function extractTitleFromData(data) { @@ -7178,7 +7162,10 @@ window['SillyTavern'].getContext = function () { saveReply, registerSlashCommand: registerSlashCommand, executeSlashCommands: executeSlashCommands, - registerHelper: registerExtensionHelper, + /** + * @deprecated Handlebars for extensions are no longer supported. + */ + registerHelper: () => { }, registedDebugFunction: registerDebugFunction, renderExtensionTemplate: renderExtensionTemplate, callPopup: callPopup, @@ -7198,7 +7185,7 @@ function swipe_left() { // when we swipe left..but no generation. } if (isStreamingEnabled() && streamingProcessor) { - streamingProcessor.isStopped = true; + streamingProcessor.onStopStreaming(); } const swipe_duration = 120; @@ -7662,6 +7649,10 @@ export async function processDroppedFiles(files) { const allowedMimeTypes = [ 'application/json', 'image/png', + 'application/yaml', + 'application/x-yaml', + 'text/yaml', + 'text/x-yaml', ]; for (const file of files) { @@ -7675,10 +7666,7 @@ export async function processDroppedFiles(files) { async function importCharacter(file) { const ext = file.name.match(/\.(\w+)$/); - if ( - !ext || - (ext[1].toLowerCase() != 'json' && ext[1].toLowerCase() != 'png') - ) { + if (!ext || !(['json', 'png', 'yaml', 'yml'].includes(ext[1].toLowerCase()))) { return; } @@ -9327,8 +9315,6 @@ jQuery(async function () { $(document).on('click', '.mes_stop', function () { if (streamingProcessor) { - streamingProcessor.abortController.abort(); - streamingProcessor.isStopped = true; streamingProcessor.onStopStreaming(); streamingProcessor = null; } @@ -9583,7 +9569,7 @@ jQuery(async function () { cancelTtsPlay(); if (streamingProcessor) { console.log('Page reloaded. Aborting streaming...'); - streamingProcessor.abortController.abort(); + streamingProcessor.onStopStreaming(); } }); diff --git a/public/scripts/extensions.js b/public/scripts/extensions.js index 3ce240060..120791842 100644 --- a/public/scripts/extensions.js +++ b/public/scripts/extensions.js @@ -47,8 +47,6 @@ export function saveMetadataDebounced() { }, 1000); } -export const extensionsHandlebars = Handlebars.create(); - /** * Provides an ability for extensions to render HTML templates. * Templates sanitation and localization is forced. @@ -61,40 +59,6 @@ export function renderExtensionTemplate(extensionName, templateId, templateData return renderTemplate(`scripts/extensions/${extensionName}/${templateId}.html`, templateData, sanitize, localize, true); } -/** - * Registers a Handlebars helper for use in extensions. - * @param {string} name Handlebars helper name - * @param {function} helper Handlebars helper function - */ -export function registerExtensionHelper(name, helper) { - extensionsHandlebars.registerHelper(name, helper); -} - -/** - * Applies handlebars extension helpers to a message. - * @param {number} messageId Message index in the chat. - */ -export function processExtensionHelpers(messageId) { - const context = getContext(); - const message = context.chat[messageId]; - - if (!message?.mes || typeof message.mes !== 'string') { - return; - } - - // Don't waste time if there are no mustaches - if (!substituteParams(message.mes).includes('{{')) { - return; - } - - try { - const template = extensionsHandlebars.compile(substituteParams(message.mes), { noEscape: true }); - message.mes = template({}); - } catch { - // Ignore - } -} - // Disables parallel updates class ModuleWorkerWrapper { constructor(callback) { diff --git a/public/scripts/extensions/translate/index.js b/public/scripts/extensions/translate/index.js index d8e14b3d7..bf2370f1d 100644 --- a/public/scripts/extensions/translate/index.js +++ b/public/scripts/extensions/translate/index.js @@ -12,6 +12,7 @@ import { } from '../../../script.js'; import { extension_settings, getContext } from '../../extensions.js'; import { secret_state, writeSecret } from '../../secrets.js'; +import { splitRecursive } from '../../utils.js'; export const autoModeOptions = { NONE: 'none', @@ -315,6 +316,28 @@ async function translateProviderBing(text, lang) { throw new Error(response.statusText); } +/** + * Splits text into chunks and translates each chunk separately + * @param {string} text Text to translate + * @param {string} lang Target language code + * @param {(text: string, lang: string) => Promise} translateFn Function to translate a single chunk (must return a Promise) + * @param {number} chunkSize Maximum chunk size + * @returns {Promise} Translated text + */ +async function chunkedTranslate(text, lang, translateFn, chunkSize = 5000) { + if (text.length <= chunkSize) { + return await translateFn(text, lang); + } + + const chunks = splitRecursive(text, chunkSize); + + let result = ''; + for (const chunk of chunks) { + result += await translateFn(chunk, lang); + } + return result; +} + /** * Translates text using the selected translation provider * @param {string} text Text to translate @@ -331,15 +354,15 @@ async function translate(text, lang) { case 'libre': return await translateProviderLibre(text, lang); case 'google': - return await translateProviderGoogle(text, lang); + return await chunkedTranslate(text, lang, translateProviderGoogle, 5000); case 'deepl': return await translateProviderDeepl(text, lang); case 'deeplx': - return await translateProviderDeepLX(text, lang); + return await chunkedTranslate(text, lang, translateProviderDeepLX, 1500); case 'oneringtranslator': return await translateProviderOneRing(text, lang); case 'bing': - return await translateProviderBing(text, lang); + return await chunkedTranslate(text, lang, translateProviderBing, 1000); default: console.error('Unknown translation provider', extension_settings.translate.provider); return text; diff --git a/public/scripts/openai.js b/public/scripts/openai.js index c49d31259..f18f9e75e 100644 --- a/public/scripts/openai.js +++ b/public/scripts/openai.js @@ -21,7 +21,6 @@ import { MAX_INJECTION_DEPTH, name1, name2, - replaceBiasMarkup, replaceItemizedPromptText, resultCheckStatus, saveSettingsDebounced, @@ -443,8 +442,6 @@ function setOpenAIMessages(chat) { content = `${chat[j].name}: ${content}`; } } - content = replaceBiasMarkup(content); - // remove caret return (waste of tokens) content = content.replace(/\r/gm, ''); diff --git a/public/scripts/slash-commands.js b/public/scripts/slash-commands.js index 97b24b1c7..cb66726ed 100644 --- a/public/scripts/slash-commands.js +++ b/public/scripts/slash-commands.js @@ -20,7 +20,7 @@ import { main_api, name1, reloadCurrentChat, - replaceBiasMarkup, + removeMacros, saveChatConditional, sendMessageAsUser, sendSystemMessage, @@ -1260,7 +1260,7 @@ export async function sendMessageAs(args, text) { // Messages that do nothing but set bias will be hidden from the context const bias = extractMessageBias(mesText); - const isSystem = replaceBiasMarkup(mesText).trim().length === 0; + const isSystem = bias && !removeMacros(mesText).length; const character = characters.find(x => x.name === name); let force_avatar, original_avatar; @@ -1313,7 +1313,7 @@ export async function sendNarratorMessage(args, text) { const name = chat_metadata[NARRATOR_NAME_KEY] || NARRATOR_NAME_DEFAULT; // Messages that do nothing but set bias will be hidden from the context const bias = extractMessageBias(text); - const isSystem = replaceBiasMarkup(text).trim().length === 0; + const isSystem = bias && !removeMacros(text).length; const message = { name: name, diff --git a/public/style.css b/public/style.css index 2f2e6d244..864ac23f5 100644 --- a/public/style.css +++ b/public/style.css @@ -637,6 +637,9 @@ hr { order: 2; padding-right: 2px; place-self: center; + cursor: pointer; + transition: 0.3s; + opacity: 0.7; } #options_button { diff --git a/src/endpoints/backends/text-completions.js b/src/endpoints/backends/text-completions.js index 614b41557..f1ef4004d 100644 --- a/src/endpoints/backends/text-completions.js +++ b/src/endpoints/backends/text-completions.js @@ -5,7 +5,7 @@ const Readable = require('stream').Readable; const { jsonParser } = require('../../express-common'); const { TEXTGEN_TYPES, TOGETHERAI_KEYS, OLLAMA_KEYS } = require('../../constants'); -const { forwardFetchResponse } = require('../../util'); +const { forwardFetchResponse, trimV1 } = require('../../util'); const { setAdditionalHeaders } = require('../../additional-headers'); const router = express.Router(); @@ -57,6 +57,26 @@ async function parseOllamaStream(jsonStream, request, response) { } } +/** + * Abort KoboldCpp generation request. + * @param {string} url Server base URL + * @returns {Promise} Promise resolving when we are done + */ +async function abortKoboldCppRequest(url) { + try { + console.log('Aborting Kobold generation...'); + const abortResponse = await fetch(`${url}/api/extra/abort`, { + method: 'POST', + }); + + if (!abortResponse.ok) { + console.log('Error sending abort request to Kobold:', abortResponse.status, abortResponse.statusText); + } + } catch (error) { + console.log(error); + } +} + //************** Ooba/OpenAI text completions API router.post('/status', jsonParser, async function (request, response) { if (!request.body) return response.sendStatus(400); @@ -67,9 +87,7 @@ router.post('/status', jsonParser, async function (request, response) { } console.log('Trying to connect to API:', request.body); - - // Convert to string + remove trailing slash + /v1 suffix - const baseUrl = String(request.body.api_server).replace(/\/$/, '').replace(/\/v1$/, ''); + const baseUrl = trimV1(request.body.api_server); const args = { headers: { 'Content-Type': 'application/json' }, @@ -195,12 +213,15 @@ router.post('/generate', jsonParser, async function (request, response) { const controller = new AbortController(); request.socket.removeAllListeners('close'); - request.socket.on('close', function () { + request.socket.on('close', async function () { + if (request.body.api_type === TEXTGEN_TYPES.KOBOLDCPP && !response.writableEnded) { + await abortKoboldCppRequest(trimV1(baseUrl)); + } + controller.abort(); }); - // Convert to string + remove trailing slash + /v1 suffix - let url = String(baseUrl).replace(/\/$/, '').replace(/\/v1$/, ''); + let url = trimV1(baseUrl); if (request.body.legacy_api) { url += '/v1/generate'; @@ -337,8 +358,7 @@ ollama.post('/caption-image', jsonParser, async function (request, response) { } console.log('Ollama caption request:', request.body); - // Convert to string + remove trailing slash + /v1 suffix - const baseUrl = String(request.body.server_url).replace(/\/$/, '').replace(/\/v1$/, ''); + const baseUrl = trimV1(request.body.server_url); const fetchResponse = await fetch(`${baseUrl}/api/generate`, { method: 'POST', @@ -383,8 +403,7 @@ llamacpp.post('/caption-image', jsonParser, async function (request, response) { } console.log('LlamaCpp caption request:', request.body); - // Convert to string + remove trailing slash + /v1 suffix - const baseUrl = String(request.body.server_url).replace(/\/$/, '').replace(/\/v1$/, ''); + const baseUrl = trimV1(request.body.server_url); const fetchResponse = await fetch(`${baseUrl}/completion`, { method: 'POST', diff --git a/src/endpoints/characters.js b/src/endpoints/characters.js index 9e0956c04..ac8d8187d 100644 --- a/src/endpoints/characters.js +++ b/src/endpoints/characters.js @@ -4,6 +4,7 @@ const readline = require('readline'); const express = require('express'); const sanitize = require('sanitize-filename'); const writeFileAtomicSync = require('write-file-atomic').sync; +const yaml = require('yaml'); const _ = require('lodash'); const encode = require('png-chunks-encode'); @@ -19,6 +20,7 @@ const characterCardParser = require('../character-card-parser.js'); const { readWorldInfoFile } = require('./worldinfo'); const { invalidateThumbnail } = require('./thumbnails'); const { importRisuSprites } = require('./sprites'); +const defaultAvatarPath = './public/img/ai4.png'; let characters = {}; @@ -394,6 +396,36 @@ function convertWorldInfoToCharacterBook(name, entries) { return result; } +/** + * Import a character from a YAML file. + * @param {string} uploadPath Path to the uploaded file + * @param {import('express').Response} response Express response object + */ +function importFromYaml(uploadPath, response) { + const fileText = fs.readFileSync(uploadPath, 'utf8'); + fs.rmSync(uploadPath); + const yamlData = yaml.parse(fileText); + console.log('importing from yaml'); + yamlData.name = sanitize(yamlData.name); + const fileName = getPngName(yamlData.name); + let char = convertToV2({ + 'name': yamlData.name, + 'description': yamlData.context ?? '', + 'first_mes': yamlData.greeting ?? '', + 'create_date': humanizedISO8601DateTime(), + 'chat': `${yamlData.name} - ${humanizedISO8601DateTime()}`, + 'personality': '', + 'creatorcomment': '', + 'avatar': 'none', + 'mes_example': '', + 'scenario': '', + 'talkativeness': 0.5, + 'creator': '', + 'tags': '', + }); + charaWrite(defaultAvatarPath, JSON.stringify(char), fileName, response, { file_name: fileName }); +} + const router = express.Router(); router.post('/create', urlencodedParser, async function (request, response) { @@ -760,144 +792,147 @@ function getPngName(file) { } router.post('/import', urlencodedParser, async function (request, response) { - - if (!request.body || request.file === undefined) return response.sendStatus(400); + if (!request.body || !request.file) return response.sendStatus(400); let png_name = ''; let filedata = request.file; let uploadPath = path.join(UPLOADS_PATH, filedata.filename); - var format = request.body.file_type; - const defaultAvatarPath = './public/img/ai4.png'; - //console.log(format); - if (filedata) { - if (format == 'json') { - fs.readFile(uploadPath, 'utf8', async (err, data) => { - fs.unlinkSync(uploadPath); + let format = request.body.file_type; - if (err) { - console.log(err); - response.send({ error: true }); - } + if (format == 'yaml' || format == 'yml') { + try { + importFromYaml(uploadPath, response); + } catch (err) { + console.log(err); + response.send({ error: true }); + } + } else if (format == 'json') { + fs.readFile(uploadPath, 'utf8', async (err, data) => { + fs.unlinkSync(uploadPath); - let jsonData = JSON.parse(data); - - if (jsonData.spec !== undefined) { - console.log('importing from v2 json'); - importRisuSprites(jsonData); - unsetFavFlag(jsonData); - jsonData = readFromV2(jsonData); - jsonData['create_date'] = humanizedISO8601DateTime(); - png_name = getPngName(jsonData.data?.name || jsonData.name); - let char = JSON.stringify(jsonData); - charaWrite(defaultAvatarPath, char, png_name, response, { file_name: png_name }); - } else if (jsonData.name !== undefined) { - console.log('importing from v1 json'); - jsonData.name = sanitize(jsonData.name); - if (jsonData.creator_notes) { - jsonData.creator_notes = jsonData.creator_notes.replace('Creator\'s notes go here.', ''); - } - png_name = getPngName(jsonData.name); - let char = { - 'name': jsonData.name, - 'description': jsonData.description ?? '', - 'creatorcomment': jsonData.creatorcomment ?? jsonData.creator_notes ?? '', - 'personality': jsonData.personality ?? '', - 'first_mes': jsonData.first_mes ?? '', - 'avatar': 'none', - 'chat': jsonData.name + ' - ' + humanizedISO8601DateTime(), - 'mes_example': jsonData.mes_example ?? '', - 'scenario': jsonData.scenario ?? '', - 'create_date': humanizedISO8601DateTime(), - 'talkativeness': jsonData.talkativeness ?? 0.5, - 'creator': jsonData.creator ?? '', - 'tags': jsonData.tags ?? '', - }; - char = convertToV2(char); - let charJSON = JSON.stringify(char); - charaWrite(defaultAvatarPath, charJSON, png_name, response, { file_name: png_name }); - } else if (jsonData.char_name !== undefined) {//json Pygmalion notepad - console.log('importing from gradio json'); - jsonData.char_name = sanitize(jsonData.char_name); - if (jsonData.creator_notes) { - jsonData.creator_notes = jsonData.creator_notes.replace('Creator\'s notes go here.', ''); - } - png_name = getPngName(jsonData.char_name); - let char = { - 'name': jsonData.char_name, - 'description': jsonData.char_persona ?? '', - 'creatorcomment': jsonData.creatorcomment ?? jsonData.creator_notes ?? '', - 'personality': '', - 'first_mes': jsonData.char_greeting ?? '', - 'avatar': 'none', - 'chat': jsonData.name + ' - ' + humanizedISO8601DateTime(), - 'mes_example': jsonData.example_dialogue ?? '', - 'scenario': jsonData.world_scenario ?? '', - 'create_date': humanizedISO8601DateTime(), - 'talkativeness': jsonData.talkativeness ?? 0.5, - 'creator': jsonData.creator ?? '', - 'tags': jsonData.tags ?? '', - }; - char = convertToV2(char); - let charJSON = JSON.stringify(char); - charaWrite(defaultAvatarPath, charJSON, png_name, response, { file_name: png_name }); - } else { - console.log('Incorrect character format .json'); - response.send({ error: true }); - } - }); - } else { - try { - var img_data = await charaRead(uploadPath, format); - if (img_data === undefined) throw new Error('Failed to read character data'); - - let jsonData = JSON.parse(img_data); - - jsonData.name = sanitize(jsonData.data?.name || jsonData.name); - png_name = getPngName(jsonData.name); - - if (jsonData.spec !== undefined) { - console.log('Found a v2 character file.'); - importRisuSprites(jsonData); - unsetFavFlag(jsonData); - jsonData = readFromV2(jsonData); - jsonData['create_date'] = humanizedISO8601DateTime(); - const char = JSON.stringify(jsonData); - await charaWrite(uploadPath, char, png_name, response, { file_name: png_name }); - fs.unlinkSync(uploadPath); - } else if (jsonData.name !== undefined) { - console.log('Found a v1 character file.'); - - if (jsonData.creator_notes) { - jsonData.creator_notes = jsonData.creator_notes.replace('Creator\'s notes go here.', ''); - } - - let char = { - 'name': jsonData.name, - 'description': jsonData.description ?? '', - 'creatorcomment': jsonData.creatorcomment ?? jsonData.creator_notes ?? '', - 'personality': jsonData.personality ?? '', - 'first_mes': jsonData.first_mes ?? '', - 'avatar': 'none', - 'chat': jsonData.name + ' - ' + humanizedISO8601DateTime(), - 'mes_example': jsonData.mes_example ?? '', - 'scenario': jsonData.scenario ?? '', - 'create_date': humanizedISO8601DateTime(), - 'talkativeness': jsonData.talkativeness ?? 0.5, - 'creator': jsonData.creator ?? '', - 'tags': jsonData.tags ?? '', - }; - char = convertToV2(char); - const charJSON = JSON.stringify(char); - await charaWrite(uploadPath, charJSON, png_name, response, { file_name: png_name }); - fs.unlinkSync(uploadPath); - } else { - console.log('Unknown character card format'); - response.send({ error: true }); - } - } catch (err) { + if (err) { console.log(err); response.send({ error: true }); } + + let jsonData = JSON.parse(data); + + if (jsonData.spec !== undefined) { + console.log('importing from v2 json'); + importRisuSprites(jsonData); + unsetFavFlag(jsonData); + jsonData = readFromV2(jsonData); + jsonData['create_date'] = humanizedISO8601DateTime(); + png_name = getPngName(jsonData.data?.name || jsonData.name); + let char = JSON.stringify(jsonData); + charaWrite(defaultAvatarPath, char, png_name, response, { file_name: png_name }); + } else if (jsonData.name !== undefined) { + console.log('importing from v1 json'); + jsonData.name = sanitize(jsonData.name); + if (jsonData.creator_notes) { + jsonData.creator_notes = jsonData.creator_notes.replace('Creator\'s notes go here.', ''); + } + png_name = getPngName(jsonData.name); + let char = { + 'name': jsonData.name, + 'description': jsonData.description ?? '', + 'creatorcomment': jsonData.creatorcomment ?? jsonData.creator_notes ?? '', + 'personality': jsonData.personality ?? '', + 'first_mes': jsonData.first_mes ?? '', + 'avatar': 'none', + 'chat': jsonData.name + ' - ' + humanizedISO8601DateTime(), + 'mes_example': jsonData.mes_example ?? '', + 'scenario': jsonData.scenario ?? '', + 'create_date': humanizedISO8601DateTime(), + 'talkativeness': jsonData.talkativeness ?? 0.5, + 'creator': jsonData.creator ?? '', + 'tags': jsonData.tags ?? '', + }; + char = convertToV2(char); + let charJSON = JSON.stringify(char); + charaWrite(defaultAvatarPath, charJSON, png_name, response, { file_name: png_name }); + } else if (jsonData.char_name !== undefined) {//json Pygmalion notepad + console.log('importing from gradio json'); + jsonData.char_name = sanitize(jsonData.char_name); + if (jsonData.creator_notes) { + jsonData.creator_notes = jsonData.creator_notes.replace('Creator\'s notes go here.', ''); + } + png_name = getPngName(jsonData.char_name); + let char = { + 'name': jsonData.char_name, + 'description': jsonData.char_persona ?? '', + 'creatorcomment': jsonData.creatorcomment ?? jsonData.creator_notes ?? '', + 'personality': '', + 'first_mes': jsonData.char_greeting ?? '', + 'avatar': 'none', + 'chat': jsonData.name + ' - ' + humanizedISO8601DateTime(), + 'mes_example': jsonData.example_dialogue ?? '', + 'scenario': jsonData.world_scenario ?? '', + 'create_date': humanizedISO8601DateTime(), + 'talkativeness': jsonData.talkativeness ?? 0.5, + 'creator': jsonData.creator ?? '', + 'tags': jsonData.tags ?? '', + }; + char = convertToV2(char); + let charJSON = JSON.stringify(char); + charaWrite(defaultAvatarPath, charJSON, png_name, response, { file_name: png_name }); + } else { + console.log('Incorrect character format .json'); + response.send({ error: true }); + } + }); + } else { + try { + var img_data = await charaRead(uploadPath, format); + if (img_data === undefined) throw new Error('Failed to read character data'); + + let jsonData = JSON.parse(img_data); + + jsonData.name = sanitize(jsonData.data?.name || jsonData.name); + png_name = getPngName(jsonData.name); + + if (jsonData.spec !== undefined) { + console.log('Found a v2 character file.'); + importRisuSprites(jsonData); + unsetFavFlag(jsonData); + jsonData = readFromV2(jsonData); + jsonData['create_date'] = humanizedISO8601DateTime(); + const char = JSON.stringify(jsonData); + await charaWrite(uploadPath, char, png_name, response, { file_name: png_name }); + fs.unlinkSync(uploadPath); + } else if (jsonData.name !== undefined) { + console.log('Found a v1 character file.'); + + if (jsonData.creator_notes) { + jsonData.creator_notes = jsonData.creator_notes.replace('Creator\'s notes go here.', ''); + } + + let char = { + 'name': jsonData.name, + 'description': jsonData.description ?? '', + 'creatorcomment': jsonData.creatorcomment ?? jsonData.creator_notes ?? '', + 'personality': jsonData.personality ?? '', + 'first_mes': jsonData.first_mes ?? '', + 'avatar': 'none', + 'chat': jsonData.name + ' - ' + humanizedISO8601DateTime(), + 'mes_example': jsonData.mes_example ?? '', + 'scenario': jsonData.scenario ?? '', + 'create_date': humanizedISO8601DateTime(), + 'talkativeness': jsonData.talkativeness ?? 0.5, + 'creator': jsonData.creator ?? '', + 'tags': jsonData.tags ?? '', + }; + char = convertToV2(char); + const charJSON = JSON.stringify(char); + await charaWrite(uploadPath, charJSON, png_name, response, { file_name: png_name }); + fs.unlinkSync(uploadPath); + } else { + console.log('Unknown character card format'); + response.send({ error: true }); + } + } catch (err) { + console.log(err); + response.send({ error: true }); } } }); diff --git a/src/endpoints/horde.js b/src/endpoints/horde.js index b8d3227cc..e26b74436 100644 --- a/src/endpoints/horde.js +++ b/src/endpoints/horde.js @@ -36,20 +36,12 @@ function sanitizeHordeImagePrompt(prompt) { prompt = prompt.replace(/\b(boy)\b/gmi, 'man'); prompt = prompt.replace(/\b(girls)\b/gmi, 'women'); prompt = prompt.replace(/\b(boys)\b/gmi, 'men'); - //always remove these high risk words from prompt, as they add little value to image gen while increasing the risk the prompt gets flagged prompt = prompt.replace(/\b(under.age|under.aged|underage|underaged|loli|pedo|pedophile|(\w+).year.old|(\w+).years.old|minor|prepubescent|minors|shota)\b/gmi, ''); - - //if nsfw is detected, do not remove it but apply additional precautions - let isNsfw = prompt.match(/\b(cock|ahegao|hentai|uncensored|lewd|cocks|deepthroat|deepthroating|dick|dicks|cumshot|lesbian|fuck|fucked|fucking|sperm|naked|nipples|tits|boobs|breasts|boob|breast|topless|ass|butt|fingering|masturbate|masturbating|bitch|blowjob|pussy|piss|asshole|dildo|dildos|vibrator|erection|foreskin|handjob|nude|penis|porn|vibrator|virgin|vagina|vulva|threesome|orgy|bdsm|hickey|condom|testicles|anal|bareback|bukkake|creampie|stripper|strap-on|missionary|clitoris|clit|clitty|cowgirl|fleshlight|sex|buttplug|milf|oral|sucking|bondage|orgasm|scissoring|railed|slut|sluts|slutty|cumming|cunt|faggot|sissy|anal|anus|cum|semen|scat|nsfw|xxx|explicit|erotic|horny|aroused|jizz|moan|rape|raped|raping|throbbing|humping)\b/gmi); - - if (isNsfw) { - //replace risky subject nouns with person - prompt = prompt.replace(/\b(youngster|infant|baby|toddler|child|teen|kid|kiddie|kiddo|teenager|student|preteen|pre.teen)\b/gmi, 'person'); - - //remove risky adjectives and related words - prompt = prompt.replace(/\b(young|younger|youthful|youth|small|smaller|smallest|girly|boyish|lil|tiny|teenaged|lit[tl]le|school.aged|school|highschool|kindergarten|teens|children|kids)\b/gmi, ''); - } + //replace risky subject nouns with person + prompt = prompt.replace(/\b(youngster|infant|baby|toddler|child|teen|kid|kiddie|kiddo|teenager|student|preteen|pre.teen)\b/gmi, 'person'); + //remove risky adjectives and related words + prompt = prompt.replace(/\b(young|younger|youthful|youth|small|smaller|smallest|girly|boyish|lil|tiny|teenaged|lit[tl]le|school.aged|school|highschool|kindergarten|teens|children|kids)\b/gmi, ''); return prompt; } diff --git a/src/util.js b/src/util.js index 0779b88f6..ef5da97ff 100644 --- a/src/util.js +++ b/src/util.js @@ -458,6 +458,15 @@ function excludeKeysByYaml(obj, yamlString) { } } +/** + * Removes trailing slash and /v1 from a string. + * @param {string} str Input string + * @returns {string} Trimmed string + */ +function trimV1(str) { + return String(str ?? '').replace(/\/$/, '').replace(/\/v1$/, ''); +} + module.exports = { getConfig, getConfigValue, @@ -481,4 +490,5 @@ module.exports = { getHexString, mergeObjectWithYaml, excludeKeysByYaml, + trimV1, };