diff --git a/public/script.js b/public/script.js index 677918eb5..439c9799e 100644 --- a/public/script.js +++ b/public/script.js @@ -1788,7 +1788,7 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject, if (main_api === 'openai') { message_already_generated = ''; // OpenAI doesn't have multigen - setOpenAIMessages(coreChat, quiet_prompt); + setOpenAIMessages(coreChat); setOpenAIMessageExamples(mesExamplesArray); } @@ -1827,27 +1827,8 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject, break; } - let charName = selected_group ? coreChat[j].name : name2; - let this_mes_ch_name = ''; - if (coreChat[j]['is_user']) { - this_mes_ch_name = coreChat[j]['name']; - } else { - this_mes_ch_name = charName; - } - if (coreChat[j]['is_name'] || selected_group) { - chat2[i] = this_mes_ch_name + ': ' + coreChat[j]['mes'] + '\n'; - } else { - chat2[i] = coreChat[j]['mes'] + '\n'; - } - - if (isInstruct) { - chat2[i] = formatInstructModeChat(this_mes_ch_name, coreChat[j]['mes'], coreChat[j]['is_user']); - } - - // replace bias markup - chat2[i] = (chat2[i] ?? '').replace(/{{(\*?.*\*?)}}/g, ''); + chat2[i] = formatMessageHistoryItem(coreChat[j], isInstruct); } - //chat2 = chat2.reverse(); // Determine token limit let this_max_context = getMaxContextSize(); @@ -1867,8 +1848,6 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject, } } - console.log(); - // Extension added strings const allAnchors = getAllExtensionPrompts(); const afterScenarioAnchor = getExtensionPrompt(extension_prompt_types.AFTER_SCENARIO); @@ -2025,7 +2004,7 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject, // Add quiet generation prompt at depth 0 if (isBottom && quiet_prompt && quiet_prompt.length) { const name = is_pygmalion ? 'You' : name1; - const quietAppend = isInstruct ? formatInstructModeChat(name, quiet_prompt, true) : `\n${name}: ${quiet_prompt}`; + const quietAppend = isInstruct ? formatInstructModeChat(name, quiet_prompt, false, true) : `\n${name}: ${quiet_prompt}`; mesSendString += quietAppend; } @@ -2151,7 +2130,7 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject, generate_data = getNovelGenerationData(finalPromt, this_settings); } else if (main_api == 'openai') { - let [prompt, counts] = await prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldInfoAfter, afterScenarioAnchor, promptBias, type); + let [prompt, counts] = await prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldInfoAfter, afterScenarioAnchor, promptBias, type, quiet_prompt); generate_data = { prompt: prompt }; // counts will return false if the user has not enabled the token breakdown feature @@ -2379,6 +2358,24 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject, //console.log('generate ending'); } //generate ends +function formatMessageHistoryItem(chatItem, isInstruct) { + const isNarratorType = chatItem?.extra?.type === system_message_types.NARRATOR; + const characterName = selected_group ? chatItem.name : name2; + const itemName = chatItem.is_user ? chatItem['name'] : characterName; + const shouldPrependName = (chatItem.is_name || selected_group) && !isNarratorType; + + let textResult = shouldPrependName ? `${itemName}: ${chatItem.mes}\n` : `${chatItem.mes}\n`; + + if (isInstruct) { + textResult = formatInstructModeChat(itemName, chatItem.mes, chatItem.is_user, isNarratorType); + } + + // replace bias markup + textResult = (textResult ?? '').replace(/{{(\*?.*\*?)}}/g, ''); + + return textResult; +} + function sendMessageAsUser(textareaText, messageBias) { chat[chat.length] = {}; chat[chat.length - 1]['name'] = name1; diff --git a/public/scripts/openai.js b/public/scripts/openai.js index 74b0b99f6..b94b9074c 100644 --- a/public/scripts/openai.js +++ b/public/scripts/openai.js @@ -17,6 +17,7 @@ import { this_chid, callPopup, getRequestHeaders, + system_message_types, } from "../script.js"; import { groups, selected_group } from "./group-chats.js"; @@ -157,7 +158,7 @@ function setOpenAIOnlineStatus(value) { is_get_status_openai = value; } -function setOpenAIMessages(chat, quietPrompt) { +function setOpenAIMessages(chat) { let j = 0; // clean openai msgs openai_msgs = []; @@ -165,15 +166,20 @@ function setOpenAIMessages(chat, quietPrompt) { let role = chat[j]['is_user'] ? 'user' : 'assistant'; let content = chat[j]['mes']; + // 100% legal way to send a message as system + if (chat[j].extra?.type === system_message_types.NARRATOR) { + role = 'system'; + } + // for groups - prepend a character's name if (selected_group) { content = `${chat[j].name}: ${content}`; } // replace bias markup - //content = (content ?? '').replace(/{.*}/g, ''); content = (content ?? '').replace(/{{(\*?.*\*?)}}/g, ''); + // remove caret return (waste of tokens) content = content.replace(/\r/gm, ''); // Apply the "wrap in quotes" option @@ -182,6 +188,7 @@ function setOpenAIMessages(chat, quietPrompt) { j++; } + // Add chat injections, 100 = maximum depth of injection. (Why would you ever need more?) for (let i = 0; i < 100; i++) { const anchor = getExtensionPrompt(extension_prompt_types.IN_CHAT, i); @@ -189,10 +196,6 @@ function setOpenAIMessages(chat, quietPrompt) { openai_msgs.splice(i, 0, { "role": 'system', 'content': anchor.trim() }) } } - - if (quietPrompt) { - openai_msgs.splice(0, 0, { role: 'system', content: quietPrompt }); - } } function setOpenAIMessageExamples(mesExamplesArray) { @@ -277,7 +280,7 @@ function formatWorldInfo(value) { return `[Details of the fictional world the RP is set in:\n${value}]\n`; } -async function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldInfoAfter, extensionPrompt, bias, type) { +async function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldInfoAfter, extensionPrompt, bias, type, quietPrompt) { const isImpersonate = type == "impersonate"; let this_max_context = oai_settings.openai_max_context; let nsfw_toggle_prompt = ""; @@ -355,6 +358,12 @@ async function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldI await delay(1); } + if (quietPrompt) { + const quietPromptMessage = { role: 'system', content: quietPrompt }; + total_count += handler_instance.count([quietPromptMessage], true, 'quiet'); + openai_msgs.push(quietPromptMessage); + } + if (isImpersonate) { const impersonateMessage = { "role": "system", "content": substituteParams(oai_settings.impersonation_prompt) }; openai_msgs.push(impersonateMessage); @@ -372,8 +381,6 @@ async function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldI // get the current example block with multiple user/bot messages let example_block = element; // add the first message from the user to tell the model that it's a new dialogue - // TODO: instead of role user content use role system name example_user - // message from the user so the model doesn't confuse the context (maybe, I just think that this should be done) if (example_block.length != 0) { examples_tosend.push(new_chat_msg); } diff --git a/public/scripts/power-user.js b/public/scripts/power-user.js index 2e448e684..ba2fabb4c 100644 --- a/public/scripts/power-user.js +++ b/public/scripts/power-user.js @@ -638,9 +638,9 @@ function loadInstructMode() { }); } -export function formatInstructModeChat(name, mes, isUser) { - const includeNames = power_user.instruct.names || !!selected_group; - const sequence = isUser ? power_user.instruct.input_sequence : power_user.instruct.output_sequence; +export function formatInstructModeChat(name, mes, isUser, isNarrator) { + const includeNames = isNarrator ? false : power_user.instruct.names || !!selected_group; + const sequence = (isUser || isNarrator) ? power_user.instruct.input_sequence : power_user.instruct.output_sequence; const separator = power_user.instruct.wrap ? '\n' : ''; const textArray = includeNames ? [sequence, `${name}: ${mes}`, separator] : [sequence, mes, separator]; const text = textArray.filter(x => x).join(separator); diff --git a/public/scripts/slash-commands.js b/public/scripts/slash-commands.js index 421c7f658..b3168c92d 100644 --- a/public/scripts/slash-commands.js +++ b/public/scripts/slash-commands.js @@ -1,6 +1,8 @@ import { addOneMessage, chat, + chat_metadata, + saveChatConditional, sendSystemMessage, system_avatar, system_message_types @@ -79,15 +81,25 @@ const getSlashCommandsHelp = parser.getHelpString.bind(parser); parser.addCommand('help', helpCommandCallback, ['?'], ' – displays this help message', true, true); parser.addCommand('bg', setBackgroundCallback, ['background'], '(filename) – sets a background according to filename, partial names allowed, will set the first one alphebetically if multiple files begin with the provided argument string', false, true); -parser.addCommand('sys', sendNarratorMessage, [], ' - sends message as a narrator character', false, true); +parser.addCommand('sys', sendNarratorMessage, [], ' – sends message as a system narrator', false, true); +parser.addCommand('sysname', setNarratorName, [], '(name) – sets a name for future system narrator messages in this chat (display only). Default: System. Leave empty to reset.', true, true); + +const NARRATOR_NAME_KEY = 'narrator_name'; +const NARRATOR_NAME_DEFAULT = 'System'; + +function setNarratorName(_, text) { + chat_metadata[NARRATOR_NAME_KEY] = text || NARRATOR_NAME_DEFAULT; + saveChatConditional(); +} function sendNarratorMessage(_, text) { if (!text) { return; } + const name = chat_metadata[NARRATOR_NAME_KEY] || NARRATOR_NAME_DEFAULT; const message = { - name: 'Narrator', + name: name, is_user: false, is_name: false, is_system: false,