diff --git a/public/css/select2-overrides.css b/public/css/select2-overrides.css index 36d0e43a9..f3cc9356a 100644 --- a/public/css/select2-overrides.css +++ b/public/css/select2-overrides.css @@ -121,7 +121,7 @@ } /* Add the custom checkbox */ -.select2-results__option:before { +.select2-results__option::before { content: ''; display: inline-block; position: absolute; @@ -141,11 +141,19 @@ } /* Add the custom checkbox checkmark */ -.select2-results__option--selected.select2-results__option:before { +.select2-results__option--selected.select2-results__option::before { content: '\2713'; font-weight: bold; color: var(--SmartThemeBodyColor); background-color: var(--SmartThemeBlurTintColor); text-align: center; line-height: 14px; -} \ No newline at end of file +} + +.select2-results__option.select2-results__message { + background-color: inherit; +} + +.select2-results__option.select2-results__message::before { + display: none; +} diff --git a/public/index.html b/public/index.html index df628f01d..3fd5e0408 100644 --- a/public/index.html +++ b/public/index.html @@ -3847,7 +3847,7 @@
diff --git a/public/script.js b/public/script.js index 3ca460c01..d38432a54 100644 --- a/public/script.js +++ b/public/script.js @@ -145,7 +145,7 @@ import { Stopwatch, } from './scripts/utils.js'; -import { ModuleWorkerWrapper, doDailyExtensionUpdatesCheck, extension_settings, getContext, loadExtensionSettings, processExtensionHelpers, registerExtensionHelper, renderExtensionTemplate, runGenerationInterceptors, saveMetadataDebounced } from './scripts/extensions.js'; +import { ModuleWorkerWrapper, doDailyExtensionUpdatesCheck, extension_settings, getContext, loadExtensionSettings, renderExtensionTemplate, runGenerationInterceptors, saveMetadataDebounced } from './scripts/extensions.js'; import { COMMENT_NAME_DEFAULT, executeSlashCommands, getSlashCommandsHelp, processChatSlashCommands, registerSlashCommand } from './scripts/slash-commands.js'; import { tag_map, @@ -338,8 +338,6 @@ export const event_types = { export const eventSource = new EventEmitter(); -eventSource.on(event_types.MESSAGE_RECEIVED, processExtensionHelpers); -eventSource.on(event_types.MESSAGE_SENT, processExtensionHelpers); eventSource.on(event_types.CHAT_CHANGED, processChatSlashCommands); const characterGroupOverlay = new BulkEditOverlay(); @@ -701,8 +699,6 @@ export let user_avatar = 'you.png'; export var amount_gen = 80; //default max length of AI generated responses var max_context = 2048; -var message_already_generated = ''; - var swipes = true; let extension_prompts = {}; @@ -1478,13 +1474,11 @@ export async function reloadCurrentChat() { } function messageFormatting(mes, ch_name, isSystem, isUser) { - if (mes) { - mesForShowdownParse = mes; + if (!mes) { + return ''; } - if (!mes) { - mes = ''; - } + mesForShowdownParse = mes; // Force isSystem = false on comment messages so they get formatted properly if (ch_name === COMMENT_NAME_DEFAULT && isSystem && !isUser) { @@ -1544,7 +1538,6 @@ function messageFormatting(mes, ch_name, isSystem, isUser) { mes = mes.replaceAll('\\begin{align*}', '$$'); mes = mes.replaceAll('\\end{align*}', '$$'); mes = converter.makeHtml(mes); - mes = replaceBiasMarkup(mes); mes = mes.replace(/[\s\S]*?<\/code>/g, function (match) {
// Firefox creates extra newlines from
s in code blocks, so we replace them before converting newlines to
s.
@@ -2600,6 +2593,21 @@ function hideStopButton() {
}
class StreamingProcessor {
+ constructor(type, force_name2, timeStarted, messageAlreadyGenerated) {
+ this.result = '';
+ this.messageId = -1;
+ this.type = type;
+ this.force_name2 = force_name2;
+ this.isStopped = false;
+ this.isFinished = false;
+ this.generator = this.nullStreamingGeneration;
+ this.abortController = new AbortController();
+ this.firstMessageText = '...';
+ this.timeStarted = timeStarted;
+ this.messageAlreadyGenerated = messageAlreadyGenerated;
+ this.swipes = [];
+ }
+
showMessageButtons(messageId) {
if (messageId == -1) {
return;
@@ -2635,32 +2643,16 @@ class StreamingProcessor {
return messageId;
}
- removePrefix(text) {
- const name1Marker = `${name1}: `;
- const name2Marker = `${name2}: `;
-
- if (text) {
- if (text.startsWith(name1Marker)) {
- text = text.replace(name1Marker, '');
- }
- if (text.startsWith(name2Marker)) {
- text = text.replace(name2Marker, '');
- }
- }
- return text;
- }
-
onProgressStreaming(messageId, text, isFinal) {
const isImpersonate = this.type == 'impersonate';
const isContinue = this.type == 'continue';
if (!isImpersonate && !isContinue && Array.isArray(this.swipes) && this.swipes.length > 0) {
for (let i = 0; i < this.swipes.length; i++) {
- this.swipes[i] = cleanUpMessage(this.removePrefix(this.swipes[i]), false, false, true, this.stoppingStrings);
+ this.swipes[i] = cleanUpMessage(this.swipes[i], false, false, true, this.stoppingStrings);
}
}
- text = this.removePrefix(text);
let processedText = cleanUpMessage(text, isImpersonate, isContinue, !isFinal, this.stoppingStrings);
// Predict unbalanced asterisks / quotes during streaming
@@ -2786,6 +2778,9 @@ class StreamingProcessor {
}
onErrorStreaming() {
+ this.abortController.abort();
+ this.isStopped = true;
+
this.hideMessageButtons(this.messageId);
$('#send_textarea').removeAttr('disabled');
is_send_press = false;
@@ -2811,20 +2806,6 @@ class StreamingProcessor {
throw new Error('Generation function for streaming is not hooked up');
}
- constructor(type, force_name2, timeStarted) {
- this.result = '';
- this.messageId = -1;
- this.type = type;
- this.force_name2 = force_name2;
- this.isStopped = false;
- this.isFinished = false;
- this.generator = this.nullStreamingGeneration;
- this.abortController = new AbortController();
- this.firstMessageText = '...';
- this.timeStarted = timeStarted;
- this.swipes = [];
- }
-
async generate() {
if (this.messageId == -1) {
this.messageId = await this.onStartStreaming(this.firstMessageText);
@@ -2844,13 +2825,12 @@ class StreamingProcessor {
for await (const { text, swipes } of this.generator()) {
timestamps.push(Date.now());
if (this.isStopped) {
- this.onStopStreaming();
return;
}
this.result = text;
this.swipes = swipes;
- await sw.tick(() => this.onProgressStreaming(this.messageId, message_already_generated + text));
+ await sw.tick(() => this.onProgressStreaming(this.messageId, this.messageAlreadyGenerated + text));
}
const seconds = (timestamps[timestamps.length - 1] - timestamps[0]) / 1000;
console.warn(`Stream stats: ${timestamps.length} tokens, ${seconds.toFixed(2)} seconds, rate: ${Number(timestamps.length / seconds).toFixed(2)} TPS`);
@@ -2858,7 +2838,6 @@ class StreamingProcessor {
catch (err) {
console.error(err);
this.onErrorStreaming();
- this.isStopped = true;
return;
}
@@ -2965,7 +2944,7 @@ async function Generate(type, { automatic_trigger, force_name2, quiet_prompt, qu
const isInstruct = power_user.instruct.enabled && main_api !== 'openai';
const isImpersonate = type == 'impersonate';
- message_already_generated = isImpersonate ? `${name1}: ` : `${name2}: `;
+ let message_already_generated = isImpersonate ? `${name1}: ` : `${name2}: `;
const interruptedByCommand = await processCommands($('#send_textarea').val(), type, dryRun);
@@ -3088,7 +3067,7 @@ async function Generate(type, { automatic_trigger, force_name2, quiet_prompt, qu
//for normal messages sent from user..
if ((textareaText != '' || hasPendingFileAttachment()) && !automatic_trigger && type !== 'quiet' && !dryRun) {
// If user message contains no text other than bias - send as a system message
- if (messageBias && replaceBiasMarkup(textareaText).trim().length === 0) {
+ if (messageBias && !removeMacros(textareaText)) {
sendSystemMessage(system_message_types.GENERIC, ' ', { bias: messageBias });
}
else {
@@ -3378,10 +3357,6 @@ async function Generate(type, { automatic_trigger, force_name2, quiet_prompt, qu
let mesSend = [];
console.debug('calling runGenerate');
- if (!dryRun) {
- streamingProcessor = isStreamingEnabled() && type !== 'quiet' ? new StreamingProcessor(type, force_name2, generation_started) : false;
- }
-
if (isContinue) {
// Coping mechanism for OAI spacing
const isForceInstruct = isOpenRouterWithInstruct();
@@ -3389,652 +3364,598 @@ async function Generate(type, { automatic_trigger, force_name2, quiet_prompt, qu
cyclePrompt += ' ';
continue_mag += ' ';
}
-
- // Save reply does add cycle text to the prompt, so it's not needed here
- streamingProcessor && (streamingProcessor.firstMessageText = '');
message_already_generated = continue_mag;
}
const originalType = type;
- return runGenerate(cyclePrompt);
- async function runGenerate(cycleGenerationPrompt = '') {
- if (!dryRun) {
- is_send_press = true;
- }
+ if (!dryRun) {
+ is_send_press = true;
+ }
- generatedPromptCache += cycleGenerationPrompt;
- if (generatedPromptCache.length == 0 || type === 'continue') {
- console.debug('generating prompt');
- chatString = '';
- arrMes = arrMes.reverse();
- arrMes.forEach(function (item, i, arr) {// For added anchors and others
- // OAI doesn't need all of this
- if (main_api === 'openai') {
- return;
- }
-
- // Cohee: I'm not even sure what this is for anymore
- if (i === arrMes.length - 1 && type !== 'continue') {
- item = item.replace(/\n?$/, '');
- }
-
- mesSend[mesSend.length] = { message: item, extensionPrompts: [] };
- });
- }
-
- let mesExmString = '';
-
- function setPromptString() {
- if (main_api == 'openai') {
- return;
- }
-
- console.debug('--setting Prompt string');
- mesExmString = pinExmString ?? mesExamplesArray.slice(0, count_exm_add).join('');
-
- if (mesSend.length) {
- mesSend[mesSend.length - 1].message = modifyLastPromptLine(mesSend[mesSend.length - 1].message);
- }
- }
-
- function modifyLastPromptLine(lastMesString) {
- //#########QUIET PROMPT STUFF PT2##############
-
- // Add quiet generation prompt at depth 0
- if (quiet_prompt && quiet_prompt.length) {
-
- // here name1 is forced for all quiet prompts..why?
- const name = name1;
- //checks if we are in instruct, if so, formats the chat as such, otherwise just adds the quiet prompt
- const quietAppend = isInstruct ? formatInstructModeChat(name, quiet_prompt, false, true, '', name1, name2, false) : `\n${quiet_prompt}`;
-
- //This begins to fix quietPrompts (particularly /sysgen) for instruct
- //previously instruct input sequence was being appended to the last chat message w/o '\n'
- //and no output sequence was added after the input's content.
- //TODO: respect output_sequence vs last_output_sequence settings
- //TODO: decide how to prompt this to clarify who is talking 'Narrator', 'System', etc.
- if (isInstruct) {
- lastMesString += '\n' + quietAppend; // + power_user.instruct.output_sequence + '\n';
- } else {
- lastMesString += quietAppend;
- }
-
-
- // Ross: bailing out early prevents quiet prompts from respecting other instruct prompt toggles
- // for sysgen, SD, and summary this is desireable as it prevents the AI from responding as char..
- // but for idle prompting, we want the flexibility of the other prompt toggles, and to respect them as per settings in the extension
- // need a detection for what the quiet prompt is being asked for...
-
- // Bail out early?
- if (quietToLoud !== true) {
- return lastMesString;
- }
- }
-
-
- // Get instruct mode line
- if (isInstruct && !isContinue) {
- const name = isImpersonate ? name1 : name2;
- lastMesString += formatInstructModePrompt(name, isImpersonate, promptBias, name1, name2);
- }
-
- // Get non-instruct impersonation line
- if (!isInstruct && isImpersonate && !isContinue) {
- const name = name1;
- if (!lastMesString.endsWith('\n')) {
- lastMesString += '\n';
- }
- lastMesString += name + ':';
- }
-
- // Add character's name
- // Force name append on continue (if not continuing on user message)
- if (!isInstruct && force_name2) {
- if (!lastMesString.endsWith('\n')) {
- lastMesString += '\n';
- }
- if (!isContinue || !(chat[chat.length - 1]?.is_user)) {
- lastMesString += `${name2}:`;
- }
- }
-
- return lastMesString;
- }
-
- // Clean up the already generated prompt for seamless addition
- function cleanupPromptCache(promptCache) {
- // Remove the first occurrance of character's name
- if (promptCache.trimStart().startsWith(`${name2}:`)) {
- promptCache = promptCache.replace(`${name2}:`, '').trimStart();
- }
-
- // Remove the first occurrance of prompt bias
- if (promptCache.trimStart().startsWith(promptBias)) {
- promptCache = promptCache.replace(promptBias, '');
- }
-
- // Add a space if prompt cache doesn't start with one
- if (!/^\s/.test(promptCache) && !isInstruct && !isContinue) {
- promptCache = ' ' + promptCache;
- }
-
- return promptCache;
- }
-
- function checkPromptSize() {
- console.debug('---checking Prompt size');
- setPromptString();
- const prompt = [
- storyString,
- mesExmString,
- mesSend.join(''),
- generatedPromptCache,
- allAnchors,
- quiet_prompt,
- ].join('').replace(/\r/gm, '');
- let thisPromptContextSize = getTokenCount(prompt, power_user.token_padding);
-
- if (thisPromptContextSize > this_max_context) { //if the prepared prompt is larger than the max context size...
- if (count_exm_add > 0) { // ..and we have example mesages..
- count_exm_add--; // remove the example messages...
- checkPromptSize(); // and try agin...
- } else if (mesSend.length > 0) { // if the chat history is longer than 0
- mesSend.shift(); // remove the first (oldest) chat entry..
- checkPromptSize(); // and check size again..
- } else {
- //end
- console.debug(`---mesSend.length = ${mesSend.length}`);
- }
- }
- }
-
- if (generatedPromptCache.length > 0 && main_api !== 'openai') {
- console.debug('---Generated Prompt Cache length: ' + generatedPromptCache.length);
- checkPromptSize();
- } else {
- console.debug('---calling setPromptString ' + generatedPromptCache.length);
- setPromptString();
- }
-
- // Fetches the combined prompt for both negative and positive prompts
- const cfgGuidanceScale = getGuidanceScale();
-
- // For prompt bit itemization
- let mesSendString = '';
-
- function getCombinedPrompt(isNegative) {
- // Only return if the guidance scale doesn't exist or the value is 1
- // Also don't return if constructing the neutral prompt
- if (isNegative && (!cfgGuidanceScale || cfgGuidanceScale?.value === 1)) {
- return;
- }
-
- // OAI has its own prompt manager. No need to do anything here
+ generatedPromptCache += cyclePrompt;
+ if (generatedPromptCache.length == 0 || type === 'continue') {
+ console.debug('generating prompt');
+ chatString = '';
+ arrMes = arrMes.reverse();
+ arrMes.forEach(function (item, i, arr) {// For added anchors and others
+ // OAI doesn't need all of this
if (main_api === 'openai') {
- return '';
+ return;
}
- // Deep clone
- let finalMesSend = structuredClone(mesSend);
-
- // TODO: Rewrite getExtensionPrompt to not require multiple for loops
- // Set all extension prompts where insertion depth > mesSend length
- if (finalMesSend.length) {
- for (let upperDepth = MAX_INJECTION_DEPTH; upperDepth >= finalMesSend.length; upperDepth--) {
- const upperAnchor = getExtensionPrompt(extension_prompt_types.IN_CHAT, upperDepth);
- if (upperAnchor && upperAnchor.length) {
- finalMesSend[0].extensionPrompts.push(upperAnchor);
- }
- }
+ // Cohee: I'm not even sure what this is for anymore
+ if (i === arrMes.length - 1 && type !== 'continue') {
+ item = item.replace(/\n?$/, '');
}
- finalMesSend.forEach((mesItem, index) => {
- if (index === 0) {
- return;
- }
+ mesSend[mesSend.length] = { message: item, extensionPrompts: [] };
+ });
+ }
- const anchorDepth = Math.abs(index - finalMesSend.length);
- // NOTE: Depth injected here!
- const extensionAnchor = getExtensionPrompt(extension_prompt_types.IN_CHAT, anchorDepth);
+ let mesExmString = '';
- if (anchorDepth >= 0 && extensionAnchor && extensionAnchor.length) {
- mesItem.extensionPrompts.push(extensionAnchor);
- }
- });
-
- // TODO: Move zero-depth anchor append to work like CFG and bias appends
- if (zeroDepthAnchor?.length && !isContinue) {
- console.debug(/\s/.test(finalMesSend[finalMesSend.length - 1].message.slice(-1)));
- finalMesSend[finalMesSend.length - 1].message +=
- /\s/.test(finalMesSend[finalMesSend.length - 1].message.slice(-1))
- ? zeroDepthAnchor
- : `${zeroDepthAnchor}`;
- }
-
- let cfgPrompt = {};
- if (cfgGuidanceScale && cfgGuidanceScale?.value !== 1) {
- cfgPrompt = getCfgPrompt(cfgGuidanceScale, isNegative);
- }
-
- if (cfgPrompt && cfgPrompt?.value) {
- if (cfgPrompt?.depth === 0) {
- finalMesSend[finalMesSend.length - 1].message +=
- /\s/.test(finalMesSend[finalMesSend.length - 1].message.slice(-1))
- ? cfgPrompt.value
- : ` ${cfgPrompt.value}`;
- } else {
- // TODO: Make all extension prompts use an array/splice method
- const lengthDiff = mesSend.length - cfgPrompt.depth;
- const cfgDepth = lengthDiff >= 0 ? lengthDiff : 0;
- finalMesSend[cfgDepth].extensionPrompts.push(`${cfgPrompt.value}\n`);
- }
- }
-
- // Add prompt bias after everything else
- // Always run with continue
- if (!isInstruct && !isImpersonate) {
- if (promptBias.trim().length !== 0) {
- finalMesSend[finalMesSend.length - 1].message +=
- /\s/.test(finalMesSend[finalMesSend.length - 1].message.slice(-1))
- ? promptBias.trimStart()
- : ` ${promptBias.trimStart()}`;
- }
- }
-
- // Prune from prompt cache if it exists
- if (generatedPromptCache.length !== 0) {
- generatedPromptCache = cleanupPromptCache(generatedPromptCache);
- }
-
- // Flattens the multiple prompt objects to a string.
- const combine = () => {
- // Right now, everything is suffixed with a newline
- mesSendString = finalMesSend.map((e) => `${e.extensionPrompts.join('')}${e.message}`).join('');
-
- // add a custom dingus (if defined)
- mesSendString = addChatsSeparator(mesSendString);
-
- // add chat preamble
- mesSendString = addChatsPreamble(mesSendString);
-
- let combinedPrompt = beforeScenarioAnchor +
- storyString +
- afterScenarioAnchor +
- mesExmString +
- mesSendString +
- generatedPromptCache;
-
- combinedPrompt = combinedPrompt.replace(/\r/gm, '');
-
- if (power_user.collapse_newlines) {
- combinedPrompt = collapseNewlines(combinedPrompt);
- }
-
- return combinedPrompt;
- };
-
- let data = {
- api: main_api,
- combinedPrompt: null,
- description,
- personality,
- persona,
- scenario,
- char: name2,
- user: name1,
- beforeScenarioAnchor,
- afterScenarioAnchor,
- mesExmString,
- finalMesSend,
- generatedPromptCache,
- main: system,
- jailbreak,
- naiPreamble: nai_settings.preamble,
- };
-
- // Before returning the combined prompt, give available context related information to all subscribers.
- eventSource.emitAndWait(event_types.GENERATE_BEFORE_COMBINE_PROMPTS, data);
-
- // If one or multiple subscribers return a value, forfeit the responsibillity of flattening the context.
- return !data.combinedPrompt ? combine() : data.combinedPrompt;
+ function setPromptString() {
+ if (main_api == 'openai') {
+ return;
}
- // Get the negative prompt first since it has the unmodified mesSend array
- let negativePrompt = main_api == 'textgenerationwebui' ? getCombinedPrompt(true) : undefined;
- let finalPrompt = getCombinedPrompt(false);
+ console.debug('--setting Prompt string');
+ mesExmString = pinExmString ?? mesExamplesArray.slice(0, count_exm_add).join('');
- // Include the entire guidance scale object
- const cfgValues = cfgGuidanceScale && cfgGuidanceScale?.value !== 1 ? ({ guidanceScale: cfgGuidanceScale, negativePrompt: negativePrompt }) : null;
-
- let maxLength = Number(amount_gen); // how many tokens the AI will be requested to generate
- let thisPromptBits = [];
-
- // TODO: Make this a switch
- if (main_api == 'koboldhorde' && horde_settings.auto_adjust_response_length) {
- maxLength = Math.min(maxLength, adjustedParams.maxLength);
- maxLength = Math.max(maxLength, MIN_LENGTH); // prevent validation errors
+ if (mesSend.length) {
+ mesSend[mesSend.length - 1].message = modifyLastPromptLine(mesSend[mesSend.length - 1].message);
}
+ }
- let generate_data;
- if (main_api == 'koboldhorde' || main_api == 'kobold') {
- generate_data = {
- prompt: finalPrompt,
- gui_settings: true,
- max_length: maxLength,
- max_context_length: max_context,
- };
+ function modifyLastPromptLine(lastMesString) {
+ //#########QUIET PROMPT STUFF PT2##############
- if (preset_settings != 'gui') {
- const isHorde = main_api == 'koboldhorde';
- const presetSettings = koboldai_settings[koboldai_setting_names[preset_settings]];
- const maxContext = (adjustedParams && horde_settings.auto_adjust_context_length) ? adjustedParams.maxContextLength : max_context;
- generate_data = getKoboldGenerationData(finalPrompt, presetSettings, maxLength, maxContext, isHorde, type);
- }
- }
- else if (main_api == 'textgenerationwebui') {
- generate_data = getTextGenGenerationData(finalPrompt, maxLength, isImpersonate, isContinue, cfgValues, type);
- }
- else if (main_api == 'novel') {
- const presetSettings = novelai_settings[novelai_setting_names[nai_settings.preset_settings_novel]];
- generate_data = getNovelGenerationData(finalPrompt, presetSettings, maxLength, isImpersonate, isContinue, cfgValues, type);
- }
- else if (main_api == 'openai') {
- let [prompt, counts] = await prepareOpenAIMessages({
- name2: name2,
- charDescription: description,
- charPersonality: personality,
- Scenario: scenario,
- worldInfoBefore: worldInfoBefore,
- worldInfoAfter: worldInfoAfter,
- extensionPrompts: extension_prompts,
- bias: promptBias,
- type: type,
- quietPrompt: quiet_prompt,
- quietImage: quietImage,
- cyclePrompt: cyclePrompt,
- systemPromptOverride: system,
- jailbreakPromptOverride: jailbreak,
- personaDescription: persona,
- messages: oaiMessages,
- messageExamples: oaiMessageExamples,
- }, dryRun);
- generate_data = { prompt: prompt };
+ // Add quiet generation prompt at depth 0
+ if (quiet_prompt && quiet_prompt.length) {
- // counts will return false if the user has not enabled the token breakdown feature
- if (counts) {
- parseTokenCounts(counts, thisPromptBits);
+ // here name1 is forced for all quiet prompts..why?
+ const name = name1;
+ //checks if we are in instruct, if so, formats the chat as such, otherwise just adds the quiet prompt
+ const quietAppend = isInstruct ? formatInstructModeChat(name, quiet_prompt, false, true, '', name1, name2, false) : `\n${quiet_prompt}`;
+
+ //This begins to fix quietPrompts (particularly /sysgen) for instruct
+ //previously instruct input sequence was being appended to the last chat message w/o '\n'
+ //and no output sequence was added after the input's content.
+ //TODO: respect output_sequence vs last_output_sequence settings
+ //TODO: decide how to prompt this to clarify who is talking 'Narrator', 'System', etc.
+ if (isInstruct) {
+ lastMesString += '\n' + quietAppend; // + power_user.instruct.output_sequence + '\n';
+ } else {
+ lastMesString += quietAppend;
}
- if (!dryRun) {
- setInContextMessages(openai_messages_count, type);
+
+ // Ross: bailing out early prevents quiet prompts from respecting other instruct prompt toggles
+ // for sysgen, SD, and summary this is desireable as it prevents the AI from responding as char..
+ // but for idle prompting, we want the flexibility of the other prompt toggles, and to respect them as per settings in the extension
+ // need a detection for what the quiet prompt is being asked for...
+
+ // Bail out early?
+ if (quietToLoud !== true) {
+ return lastMesString;
}
}
- return new Promise(async (resolve, reject) => {
- if (true === dryRun) return onSuccess({ error: 'dryRun' });
- if (power_user.console_log_prompts) {
- console.log(generate_data.prompt);
+ // Get instruct mode line
+ if (isInstruct && !isContinue) {
+ const name = isImpersonate ? name1 : name2;
+ lastMesString += formatInstructModePrompt(name, isImpersonate, promptBias, name1, name2);
+ }
+
+ // Get non-instruct impersonation line
+ if (!isInstruct && isImpersonate && !isContinue) {
+ const name = name1;
+ if (!lastMesString.endsWith('\n')) {
+ lastMesString += '\n';
}
+ lastMesString += name + ':';
+ }
- let generate_url = getGenerateUrl(main_api);
- console.debug('rungenerate calling API');
-
- showStopButton();
-
- //set array object for prompt token itemization of this message
- let currentArrayEntry = Number(thisPromptBits.length - 1);
- let additionalPromptStuff = {
- ...thisPromptBits[currentArrayEntry],
- rawPrompt: generate_data.prompt || generate_data.input,
- mesId: getNextMessageId(type),
- allAnchors: allAnchors,
- summarizeString: (extension_prompts['1_memory']?.value || ''),
- authorsNoteString: (extension_prompts['2_floating_prompt']?.value || ''),
- smartContextString: (extension_prompts['chromadb']?.value || ''),
- worldInfoString: worldInfoString,
- storyString: storyString,
- beforeScenarioAnchor: beforeScenarioAnchor,
- afterScenarioAnchor: afterScenarioAnchor,
- examplesString: examplesString,
- mesSendString: mesSendString,
- generatedPromptCache: generatedPromptCache,
- promptBias: promptBias,
- finalPrompt: finalPrompt,
- charDescription: description,
- charPersonality: personality,
- scenarioText: scenario,
- this_max_context: this_max_context,
- padding: power_user.token_padding,
- main_api: main_api,
- instruction: isInstruct ? substituteParams(power_user.prefer_character_prompt && system ? system : power_user.instruct.system_prompt) : '',
- userPersona: (power_user.persona_description || ''),
- };
-
- thisPromptBits = additionalPromptStuff;
-
- //console.log(thisPromptBits);
- const itemizedIndex = itemizedPrompts.findIndex((item) => item.mesId === thisPromptBits['mesId']);
-
- if (itemizedIndex !== -1) {
- itemizedPrompts[itemizedIndex] = thisPromptBits;
+ // Add character's name
+ // Force name append on continue (if not continuing on user message)
+ if (!isInstruct && force_name2) {
+ if (!lastMesString.endsWith('\n')) {
+ lastMesString += '\n';
}
- else {
- itemizedPrompts.push(thisPromptBits);
+ if (!isContinue || !(chat[chat.length - 1]?.is_user)) {
+ lastMesString += `${name2}:`;
}
+ }
- console.debug(`pushed prompt bits to itemizedPrompts array. Length is now: ${itemizedPrompts.length}`);
- /** @type {Promise} */
- let streamingGeneratorPromise = Promise.resolve();
+ return lastMesString;
+ }
- if (main_api == 'openai') {
- if (isStreamingEnabled() && type !== 'quiet') {
- streamingGeneratorPromise = sendOpenAIRequest(type, generate_data.prompt, streamingProcessor.abortController.signal);
- }
- else {
- sendOpenAIRequest(type, generate_data.prompt, abortController.signal).then(onSuccess).catch(onError);
+ // Clean up the already generated prompt for seamless addition
+ function cleanupPromptCache(promptCache) {
+ // Remove the first occurrance of character's name
+ if (promptCache.trimStart().startsWith(`${name2}:`)) {
+ promptCache = promptCache.replace(`${name2}:`, '').trimStart();
+ }
+
+ // Remove the first occurrance of prompt bias
+ if (promptCache.trimStart().startsWith(promptBias)) {
+ promptCache = promptCache.replace(promptBias, '');
+ }
+
+ // Add a space if prompt cache doesn't start with one
+ if (!/^\s/.test(promptCache) && !isInstruct && !isContinue) {
+ promptCache = ' ' + promptCache;
+ }
+
+ return promptCache;
+ }
+
+ function checkPromptSize() {
+ console.debug('---checking Prompt size');
+ setPromptString();
+ const prompt = [
+ storyString,
+ mesExmString,
+ mesSend.join(''),
+ generatedPromptCache,
+ allAnchors,
+ quiet_prompt,
+ ].join('').replace(/\r/gm, '');
+ let thisPromptContextSize = getTokenCount(prompt, power_user.token_padding);
+
+ if (thisPromptContextSize > this_max_context) { //if the prepared prompt is larger than the max context size...
+ if (count_exm_add > 0) { // ..and we have example mesages..
+ count_exm_add--; // remove the example messages...
+ checkPromptSize(); // and try agin...
+ } else if (mesSend.length > 0) { // if the chat history is longer than 0
+ mesSend.shift(); // remove the first (oldest) chat entry..
+ checkPromptSize(); // and check size again..
+ } else {
+ //end
+ console.debug(`---mesSend.length = ${mesSend.length}`);
+ }
+ }
+ }
+
+ if (generatedPromptCache.length > 0 && main_api !== 'openai') {
+ console.debug('---Generated Prompt Cache length: ' + generatedPromptCache.length);
+ checkPromptSize();
+ } else {
+ console.debug('---calling setPromptString ' + generatedPromptCache.length);
+ setPromptString();
+ }
+
+ // Fetches the combined prompt for both negative and positive prompts
+ const cfgGuidanceScale = getGuidanceScale();
+
+ // For prompt bit itemization
+ let mesSendString = '';
+
+ function getCombinedPrompt(isNegative) {
+ // Only return if the guidance scale doesn't exist or the value is 1
+ // Also don't return if constructing the neutral prompt
+ if (isNegative && (!cfgGuidanceScale || cfgGuidanceScale?.value === 1)) {
+ return;
+ }
+
+ // OAI has its own prompt manager. No need to do anything here
+ if (main_api === 'openai') {
+ return '';
+ }
+
+ // Deep clone
+ let finalMesSend = structuredClone(mesSend);
+
+ // TODO: Rewrite getExtensionPrompt to not require multiple for loops
+ // Set all extension prompts where insertion depth > mesSend length
+ if (finalMesSend.length) {
+ for (let upperDepth = MAX_INJECTION_DEPTH; upperDepth >= finalMesSend.length; upperDepth--) {
+ const upperAnchor = getExtensionPrompt(extension_prompt_types.IN_CHAT, upperDepth);
+ if (upperAnchor && upperAnchor.length) {
+ finalMesSend[0].extensionPrompts.push(upperAnchor);
}
}
- else if (main_api == 'koboldhorde') {
- generateHorde(finalPrompt, generate_data, abortController.signal, true).then(onSuccess).catch(onError);
- }
- else if (main_api == 'textgenerationwebui' && isStreamingEnabled() && type !== 'quiet') {
- streamingGeneratorPromise = generateTextGenWithStreaming(generate_data, streamingProcessor.abortController.signal);
- }
- else if (main_api == 'novel' && isStreamingEnabled() && type !== 'quiet') {
- streamingGeneratorPromise = generateNovelWithStreaming(generate_data, streamingProcessor.abortController.signal);
- }
- else if (main_api == 'kobold' && isStreamingEnabled() && type !== 'quiet') {
- streamingGeneratorPromise = generateKoboldWithStreaming(generate_data, streamingProcessor.abortController.signal);
- }
- else {
- try {
- const response = await fetch(generate_url, {
- method: 'POST',
- headers: getRequestHeaders(),
- cache: 'no-cache',
- body: JSON.stringify(generate_data),
- signal: abortController.signal,
- });
+ }
- if (!response.ok) {
- const error = await response.json();
- throw error;
- }
-
- const data = await response.json();
- onSuccess(data);
- } catch (error) {
- onError(error);
- }
+ finalMesSend.forEach((mesItem, index) => {
+ if (index === 0) {
+ return;
}
- if (isStreamingEnabled() && type !== 'quiet') {
- try {
- const streamingGenerator = await streamingGeneratorPromise;
- streamingProcessor.generator = streamingGenerator;
- hideSwipeButtons();
- let getMessage = await streamingProcessor.generate();
- let messageChunk = cleanUpMessage(getMessage, isImpersonate, isContinue, false);
+ const anchorDepth = Math.abs(index - finalMesSend.length);
+ // NOTE: Depth injected here!
+ const extensionAnchor = getExtensionPrompt(extension_prompt_types.IN_CHAT, anchorDepth);
- if (isContinue) {
- getMessage = continue_mag + getMessage;
- }
-
- if (streamingProcessor && !streamingProcessor.isStopped && streamingProcessor.isFinished) {
- await streamingProcessor.onFinishStreaming(streamingProcessor.messageId, getMessage);
- streamingProcessor = null;
- triggerAutoContinue(messageChunk, isImpersonate);
- }
- resolve();
- } catch (err) {
- onError(err);
- }
-
- }
-
- async function onSuccess(data) {
- let messageChunk = '';
-
- if (data.error == 'dryRun') {
- generatedPromptCache = '';
- resolve();
- return;
- }
-
- if (!data.error) {
- //const getData = await response.json();
- let getMessage = extractMessageFromData(data);
- let title = extractTitleFromData(data);
- kobold_horde_model = title;
-
- const swipes = extractMultiSwipes(data, type);
-
- messageChunk = cleanUpMessage(getMessage, isImpersonate, isContinue, false);
-
- if (isContinue) {
- getMessage = continue_mag + getMessage;
- }
-
- //Formating
- const displayIncomplete = type === 'quiet' && !quietToLoud;
- getMessage = cleanUpMessage(getMessage, isImpersonate, isContinue, displayIncomplete);
-
- if (getMessage.length > 0) {
- if (isImpersonate) {
- $('#send_textarea').val(getMessage).trigger('input');
- generatedPromptCache = '';
- await eventSource.emit(event_types.IMPERSONATE_READY, getMessage);
- }
- else if (type == 'quiet') {
- resolve(getMessage);
- }
- else {
- // Without streaming we'll be having a full message on continuation. Treat it as a last chunk.
- if (originalType !== 'continue') {
- ({ type, getMessage } = await saveReply(type, getMessage, false, title, swipes));
- }
- else {
- ({ type, getMessage } = await saveReply('appendFinal', getMessage, false, title, swipes));
- }
- }
-
- if (type !== 'quiet') {
- playMessageSound();
- }
- } else {
- // If maxLoops is not passed in (e.g. first time generating), set it to MAX_GENERATION_LOOPS
- maxLoops ??= MAX_GENERATION_LOOPS;
-
- if (maxLoops === 0) {
- reject(new Error('Generate circuit breaker interruption'));
- if (type !== 'quiet') {
- throwCircuitBreakerError();
- }
- return;
- }
-
- // regenerate with character speech reenforced
- // to make sure we leave on swipe type while also adding the name2 appendage
- delay(1000).then(async () => {
- // The first await is for waiting for the generate to start. The second one is waiting for it to finish
- const result = await await Generate(type, { automatic_trigger, force_name2: true, quiet_prompt, skipWIAN, force_chid, maxLoops: maxLoops - 1 });
- resolve(result);
- });
- return;
- }
-
- if (power_user.auto_swipe) {
- console.debug('checking for autoswipeblacklist on non-streaming message');
- function containsBlacklistedWords(getMessage, blacklist, threshold) {
- console.debug('checking blacklisted words');
- const regex = new RegExp(`\\b(${blacklist.join('|')})\\b`, 'gi');
- const matches = getMessage.match(regex) || [];
- return matches.length >= threshold;
- }
-
- const generatedTextFiltered = (getMessage) => {
- if (power_user.auto_swipe_blacklist_threshold) {
- if (containsBlacklistedWords(getMessage, power_user.auto_swipe_blacklist, power_user.auto_swipe_blacklist_threshold)) {
- console.debug('Generated text has blacklisted words');
- return true;
- }
- }
-
- return false;
- };
- if (generatedTextFiltered(getMessage)) {
- console.debug('swiping right automatically');
- is_send_press = false;
- swipe_right();
- // TODO: do we want to resolve after an auto-swipe?
- resolve();
- return;
- }
- }
- } else {
- generatedPromptCache = '';
-
- if (data?.response) {
- toastr.error(data.response, 'API Error');
- }
- reject(data.response);
- }
-
- console.debug('/api/chats/save called by /Generate');
- await saveChatConditional();
- unblockGeneration();
- streamingProcessor = null;
-
- if (type !== 'quiet') {
- triggerAutoContinue(messageChunk, isImpersonate);
- }
- resolve();
- }
-
- function onError(exception) {
- if (typeof exception?.error?.message === 'string') {
- toastr.error(exception.error.message, 'Error', { timeOut: 10000, extendedTimeOut: 20000 });
- }
-
- reject(exception);
- unblockGeneration();
- console.log(exception);
- streamingProcessor = null;
+ if (anchorDepth >= 0 && extensionAnchor && extensionAnchor.length) {
+ mesItem.extensionPrompts.push(extensionAnchor);
}
});
- } //rungenerate ends
+ // TODO: Move zero-depth anchor append to work like CFG and bias appends
+ if (zeroDepthAnchor?.length && !isContinue) {
+ console.debug(/\s/.test(finalMesSend[finalMesSend.length - 1].message.slice(-1)));
+ finalMesSend[finalMesSend.length - 1].message +=
+ /\s/.test(finalMesSend[finalMesSend.length - 1].message.slice(-1))
+ ? zeroDepthAnchor
+ : `${zeroDepthAnchor}`;
+ }
+
+ let cfgPrompt = {};
+ if (cfgGuidanceScale && cfgGuidanceScale?.value !== 1) {
+ cfgPrompt = getCfgPrompt(cfgGuidanceScale, isNegative);
+ }
+
+ if (cfgPrompt && cfgPrompt?.value) {
+ if (cfgPrompt?.depth === 0) {
+ finalMesSend[finalMesSend.length - 1].message +=
+ /\s/.test(finalMesSend[finalMesSend.length - 1].message.slice(-1))
+ ? cfgPrompt.value
+ : ` ${cfgPrompt.value}`;
+ } else {
+ // TODO: Make all extension prompts use an array/splice method
+ const lengthDiff = mesSend.length - cfgPrompt.depth;
+ const cfgDepth = lengthDiff >= 0 ? lengthDiff : 0;
+ finalMesSend[cfgDepth].extensionPrompts.push(`${cfgPrompt.value}\n`);
+ }
+ }
+
+ // Add prompt bias after everything else
+ // Always run with continue
+ if (!isInstruct && !isImpersonate) {
+ if (promptBias.trim().length !== 0) {
+ finalMesSend[finalMesSend.length - 1].message +=
+ /\s/.test(finalMesSend[finalMesSend.length - 1].message.slice(-1))
+ ? promptBias.trimStart()
+ : ` ${promptBias.trimStart()}`;
+ }
+ }
+
+ // Prune from prompt cache if it exists
+ if (generatedPromptCache.length !== 0) {
+ generatedPromptCache = cleanupPromptCache(generatedPromptCache);
+ }
+
+ // Flattens the multiple prompt objects to a string.
+ const combine = () => {
+ // Right now, everything is suffixed with a newline
+ mesSendString = finalMesSend.map((e) => `${e.extensionPrompts.join('')}${e.message}`).join('');
+
+ // add a custom dingus (if defined)
+ mesSendString = addChatsSeparator(mesSendString);
+
+ // add chat preamble
+ mesSendString = addChatsPreamble(mesSendString);
+
+ let combinedPrompt = beforeScenarioAnchor +
+ storyString +
+ afterScenarioAnchor +
+ mesExmString +
+ mesSendString +
+ generatedPromptCache;
+
+ combinedPrompt = combinedPrompt.replace(/\r/gm, '');
+
+ if (power_user.collapse_newlines) {
+ combinedPrompt = collapseNewlines(combinedPrompt);
+ }
+
+ return combinedPrompt;
+ };
+
+ let data = {
+ api: main_api,
+ combinedPrompt: null,
+ description,
+ personality,
+ persona,
+ scenario,
+ char: name2,
+ user: name1,
+ beforeScenarioAnchor,
+ afterScenarioAnchor,
+ mesExmString,
+ finalMesSend,
+ generatedPromptCache,
+ main: system,
+ jailbreak,
+ naiPreamble: nai_settings.preamble,
+ };
+
+ // Before returning the combined prompt, give available context related information to all subscribers.
+ eventSource.emitAndWait(event_types.GENERATE_BEFORE_COMBINE_PROMPTS, data);
+
+ // If one or multiple subscribers return a value, forfeit the responsibillity of flattening the context.
+ return !data.combinedPrompt ? combine() : data.combinedPrompt;
+ }
+
+ // Get the negative prompt first since it has the unmodified mesSend array
+ let negativePrompt = main_api == 'textgenerationwebui' ? getCombinedPrompt(true) : undefined;
+ let finalPrompt = getCombinedPrompt(false);
+
+ // Include the entire guidance scale object
+ const cfgValues = cfgGuidanceScale && cfgGuidanceScale?.value !== 1 ? ({ guidanceScale: cfgGuidanceScale, negativePrompt: negativePrompt }) : null;
+
+ let maxLength = Number(amount_gen); // how many tokens the AI will be requested to generate
+ let thisPromptBits = [];
+
+ // TODO: Make this a switch
+ if (main_api == 'koboldhorde' && horde_settings.auto_adjust_response_length) {
+ maxLength = Math.min(maxLength, adjustedParams.maxLength);
+ maxLength = Math.max(maxLength, MIN_LENGTH); // prevent validation errors
+ }
+
+ let generate_data;
+ if (main_api == 'koboldhorde' || main_api == 'kobold') {
+ generate_data = {
+ prompt: finalPrompt,
+ gui_settings: true,
+ max_length: maxLength,
+ max_context_length: max_context,
+ };
+
+ if (preset_settings != 'gui') {
+ const isHorde = main_api == 'koboldhorde';
+ const presetSettings = koboldai_settings[koboldai_setting_names[preset_settings]];
+ const maxContext = (adjustedParams && horde_settings.auto_adjust_context_length) ? adjustedParams.maxContextLength : max_context;
+ generate_data = getKoboldGenerationData(finalPrompt, presetSettings, maxLength, maxContext, isHorde, type);
+ }
+ }
+ else if (main_api == 'textgenerationwebui') {
+ generate_data = getTextGenGenerationData(finalPrompt, maxLength, isImpersonate, isContinue, cfgValues, type);
+ }
+ else if (main_api == 'novel') {
+ const presetSettings = novelai_settings[novelai_setting_names[nai_settings.preset_settings_novel]];
+ generate_data = getNovelGenerationData(finalPrompt, presetSettings, maxLength, isImpersonate, isContinue, cfgValues, type);
+ }
+ else if (main_api == 'openai') {
+ let [prompt, counts] = await prepareOpenAIMessages({
+ name2: name2,
+ charDescription: description,
+ charPersonality: personality,
+ Scenario: scenario,
+ worldInfoBefore: worldInfoBefore,
+ worldInfoAfter: worldInfoAfter,
+ extensionPrompts: extension_prompts,
+ bias: promptBias,
+ type: type,
+ quietPrompt: quiet_prompt,
+ quietImage: quietImage,
+ cyclePrompt: cyclePrompt,
+ systemPromptOverride: system,
+ jailbreakPromptOverride: jailbreak,
+ personaDescription: persona,
+ messages: oaiMessages,
+ messageExamples: oaiMessageExamples,
+ }, dryRun);
+ generate_data = { prompt: prompt };
+
+ // counts will return false if the user has not enabled the token breakdown feature
+ if (counts) {
+ parseTokenCounts(counts, thisPromptBits);
+ }
+
+ if (!dryRun) {
+ setInContextMessages(openai_messages_count, type);
+ }
+ }
+
+ async function finishGenerating() {
+ if (dryRun) return { error: 'dryRun' };
+
+ if (power_user.console_log_prompts) {
+ console.log(generate_data.prompt);
+ }
+
+ console.debug('rungenerate calling API');
+
+ showStopButton();
+
+ //set array object for prompt token itemization of this message
+ let currentArrayEntry = Number(thisPromptBits.length - 1);
+ let additionalPromptStuff = {
+ ...thisPromptBits[currentArrayEntry],
+ rawPrompt: generate_data.prompt || generate_data.input,
+ mesId: getNextMessageId(type),
+ allAnchors: allAnchors,
+ summarizeString: (extension_prompts['1_memory']?.value || ''),
+ authorsNoteString: (extension_prompts['2_floating_prompt']?.value || ''),
+ smartContextString: (extension_prompts['chromadb']?.value || ''),
+ worldInfoString: worldInfoString,
+ storyString: storyString,
+ beforeScenarioAnchor: beforeScenarioAnchor,
+ afterScenarioAnchor: afterScenarioAnchor,
+ examplesString: examplesString,
+ mesSendString: mesSendString,
+ generatedPromptCache: generatedPromptCache,
+ promptBias: promptBias,
+ finalPrompt: finalPrompt,
+ charDescription: description,
+ charPersonality: personality,
+ scenarioText: scenario,
+ this_max_context: this_max_context,
+ padding: power_user.token_padding,
+ main_api: main_api,
+ instruction: isInstruct ? substituteParams(power_user.prefer_character_prompt && system ? system : power_user.instruct.system_prompt) : '',
+ userPersona: (power_user.persona_description || ''),
+ };
+
+ thisPromptBits = additionalPromptStuff;
+
+ //console.log(thisPromptBits);
+ const itemizedIndex = itemizedPrompts.findIndex((item) => item.mesId === thisPromptBits['mesId']);
+
+ if (itemizedIndex !== -1) {
+ itemizedPrompts[itemizedIndex] = thisPromptBits;
+ }
+ else {
+ itemizedPrompts.push(thisPromptBits);
+ }
+
+ console.debug(`pushed prompt bits to itemizedPrompts array. Length is now: ${itemizedPrompts.length}`);
+
+ if (isStreamingEnabled() && type !== 'quiet') {
+ streamingProcessor = new StreamingProcessor(type, force_name2, generation_started, message_already_generated);
+ if (isContinue) {
+ // Save reply does add cycle text to the prompt, so it's not needed here
+ streamingProcessor.firstMessageText = '';
+ }
+
+ streamingProcessor.generator = await sendStreamingRequest(type, generate_data);
+
+ hideSwipeButtons();
+ let getMessage = await streamingProcessor.generate();
+ let messageChunk = cleanUpMessage(getMessage, isImpersonate, isContinue, false);
+
+ if (isContinue) {
+ getMessage = continue_mag + getMessage;
+ }
+
+ if (streamingProcessor && !streamingProcessor.isStopped && streamingProcessor.isFinished) {
+ await streamingProcessor.onFinishStreaming(streamingProcessor.messageId, getMessage);
+ streamingProcessor = null;
+ triggerAutoContinue(messageChunk, isImpersonate);
+ }
+ } else {
+ return await sendGenerationRequest(type, generate_data);
+ }
+ }
+
+ return finishGenerating().then(onSuccess, onError);
+
+ async function onSuccess(data) {
+ if (!data) return;
+ let messageChunk = '';
+
+ if (data.error == 'dryRun') {
+ generatedPromptCache = '';
+ return;
+ }
+
+ if (!data.error) {
+ //const getData = await response.json();
+ let getMessage = extractMessageFromData(data);
+ let title = extractTitleFromData(data);
+ kobold_horde_model = title;
+
+ const swipes = extractMultiSwipes(data, type);
+
+ messageChunk = cleanUpMessage(getMessage, isImpersonate, isContinue, false);
+
+ if (isContinue) {
+ getMessage = continue_mag + getMessage;
+ }
+
+ //Formating
+ const displayIncomplete = type === 'quiet' && !quietToLoud;
+ getMessage = cleanUpMessage(getMessage, isImpersonate, isContinue, displayIncomplete);
+
+ if (getMessage.length > 0) {
+ if (isImpersonate) {
+ $('#send_textarea').val(getMessage).trigger('input');
+ generatedPromptCache = '';
+ await eventSource.emit(event_types.IMPERSONATE_READY, getMessage);
+ }
+ else if (type == 'quiet') {
+ unblockGeneration();
+ return getMessage;
+ }
+ else {
+ // Without streaming we'll be having a full message on continuation. Treat it as a last chunk.
+ if (originalType !== 'continue') {
+ ({ type, getMessage } = await saveReply(type, getMessage, false, title, swipes));
+ }
+ else {
+ ({ type, getMessage } = await saveReply('appendFinal', getMessage, false, title, swipes));
+ }
+ }
+
+ if (type !== 'quiet') {
+ playMessageSound();
+ }
+ } else {
+ // If maxLoops is not passed in (e.g. first time generating), set it to MAX_GENERATION_LOOPS
+ maxLoops ??= MAX_GENERATION_LOOPS;
+
+ if (maxLoops === 0) {
+ if (type !== 'quiet') {
+ throwCircuitBreakerError();
+ }
+ throw new Error('Generate circuit breaker interruption');
+ }
+
+ // regenerate with character speech reenforced
+ // to make sure we leave on swipe type while also adding the name2 appendage
+ await delay(1000);
+ // The first await is for waiting for the generate to start. The second one is waiting for it to finish
+ const result = await await Generate(type, { automatic_trigger, force_name2: true, quiet_prompt, skipWIAN, force_chid, maxLoops: maxLoops - 1 });
+ return result;
+ }
+
+ if (power_user.auto_swipe) {
+ console.debug('checking for autoswipeblacklist on non-streaming message');
+ function containsBlacklistedWords(getMessage, blacklist, threshold) {
+ console.debug('checking blacklisted words');
+ const regex = new RegExp(`\\b(${blacklist.join('|')})\\b`, 'gi');
+ const matches = getMessage.match(regex) || [];
+ return matches.length >= threshold;
+ }
+
+ const generatedTextFiltered = (getMessage) => {
+ if (power_user.auto_swipe_blacklist_threshold) {
+ if (containsBlacklistedWords(getMessage, power_user.auto_swipe_blacklist, power_user.auto_swipe_blacklist_threshold)) {
+ console.debug('Generated text has blacklisted words');
+ return true;
+ }
+ }
+
+ return false;
+ };
+ if (generatedTextFiltered(getMessage)) {
+ console.debug('swiping right automatically');
+ is_send_press = false;
+ swipe_right();
+ // TODO: do we want to resolve after an auto-swipe?
+ return;
+ }
+ }
+ } else {
+ generatedPromptCache = '';
+
+ if (data?.response) {
+ toastr.error(data.response, 'API Error');
+ }
+ throw data?.response;
+ }
+
+ console.debug('/api/chats/save called by /Generate');
+ await saveChatConditional();
+ unblockGeneration();
+ streamingProcessor = null;
+
+ if (type !== 'quiet') {
+ triggerAutoContinue(messageChunk, isImpersonate);
+ }
+ }
+
+ function onError(exception) {
+ if (typeof exception?.error?.message === 'string') {
+ toastr.error(exception.error.message, 'Error', { timeOut: 10000, extendedTimeOut: 20000 });
+ }
+
+ unblockGeneration();
+ console.log(exception);
+ streamingProcessor = null;
+ throw exception;
+ }
} else { //generate's primary loop ends, after this is error handling for no-connection or safety-id
if (this_chid === undefined || this_chid === 'invalid-safety-id') {
toastr.warning('Сharacter is not selected');
}
is_send_press = false;
}
-
- //console.log('generate ending');
-} //generate ends
+}
function flushWIDepthInjections() {
//prevent custom depth WI entries (which have unique random key names) from duplicating
@@ -4165,13 +4086,16 @@ function formatMessageHistoryItem(chatItem, isInstruct, forceOutputSequence) {
textResult = formatInstructModeChat(itemName, chatItem.mes, chatItem.is_user, isNarratorType, chatItem.force_avatar, name1, name2, forceOutputSequence);
}
- textResult = replaceBiasMarkup(textResult);
-
return textResult;
}
-export function replaceBiasMarkup(str) {
- return (str ?? '').replace(/\{\{[\s\S]*?\}\}/gm, '');
+/**
+ * Removes all {{macros}} from a string.
+ * @param {string} str String to remove macros from.
+ * @returns {string} String with macros removed.
+ */
+export function removeMacros(str) {
+ return (str ?? '').replace(/\{\{[\s\S]*?\}\}/gm, '').trim();
}
/**
@@ -4481,22 +4405,82 @@ function setInContextMessages(lastmsg, type) {
}
}
-function getGenerateUrl(api) {
- let generate_url = '';
- if (api == 'kobold') {
- generate_url = '/api/backends/kobold/generate';
- } else if (api == 'textgenerationwebui') {
- generate_url = '/api/backends/text-completions/generate';
- } else if (api == 'novel') {
- generate_url = '/api/novelai/generate';
+/**
+ * Sends a non-streaming request to the API.
+ * @param {string} type Generation type
+ * @param {object} data Generation data
+ * @returns {Promise