Simplify example message handling

This commit is contained in:
maver 2023-06-11 17:27:28 +02:00
parent 77e183f5dd
commit 05f7e5677d
1 changed files with 7 additions and 65 deletions

View File

@ -425,11 +425,14 @@ async function prepareOpenAIMessages({ name2, charDescription, charPersonality,
if (bias && bias.trim().length) chatCompletion.add(biasMessage); if (bias && bias.trim().length) chatCompletion.add(biasMessage);
// Handle chat examples // Handle chat examples
// ToDo: Update dialogueExamples prompt with only the token count that's actually sent. if (openai_msgs_example.length) {
const exampleMessages = prepareExampleMessages(openai_msgs ,openai_msgs_example, power_user.pin_examples); const exampleMessagesFlattened = openai_msgs_example.reduce((messages, prompts) => {
if (exampleMessages.length) { messages.push(prompts[0]);
return messages;
}, []);
chatCompletion.replace('newExampleChat', newChatMessage) chatCompletion.replace('newExampleChat', newChatMessage)
chatCompletion.replace('dialogueExamples', exampleMessages); chatCompletion.replace('dialogueExamples', exampleMessagesFlattened);
} }
// Handle quiet prompt // Handle quiet prompt
@ -478,67 +481,6 @@ function getGroupMembers(activeGroup) {
await delay(1); await delay(1);
} }
if (quietPrompt) {
const quietPromptMessage = { role: 'system', content: quietPrompt };
total_count += handler_instance.count([quietPromptMessage], true, 'quiet');
openai_msgs.push(quietPromptMessage);
}
if (isImpersonate) {
const impersonateMessage = { "role": "system", "content": substituteParams(oai_settings.impersonation_prompt) };
openai_msgs.push(impersonateMessage);
total_count += handler_instance.count([impersonateMessage], true, 'impersonate');
await delay(1);
}
if (type == 'continue') {
const continueNudge = { "role": "system", "content": stringFormat('[Continue the following message. Do not include ANY parts of the original message. Use capitalization and punctuation as if your reply is a part of the original message:\n\n{0}]', cyclePrompt || '') };
openai_msgs.push(continueNudge);
total_count += handler_instance.count([continueNudge], true, 'continue');
await delay(1);
}
function prepareExampleMessages(messages, exampleMessages, includeAll = false, ) {
// The user wants to always have all example messages in the context
let examples_tosend = [];
let total_count = 0;
const new_chat_msg = {role: 'system', content: '[Start new chat]'}
if (includeAll) {
// first we send *all* example messages
// we don't check their token size since if it's bigger than the context, the user is fucked anyway
// and should've have selected that option (maybe have some warning idk, too hard to add)
for (const element of exampleMessages) {
// get the current example block with multiple user/bot messages
let example_block = element;
for (const example of example_block) {
// add all the messages from the example
examples_tosend.push(example);
}
}
} else {
// each example block contains multiple user/bot messages
for (let example_block of exampleMessages) {
if (example_block.length == 0) {
continue;
}
// add the block only if there is enough space for all its messages
const example_count = countTokens(example_block)
if ((total_count + example_count) < (oai_settings.openai_max_context - oai_settings.openai_max_tokens)) {
examples_tosend.push(...example_block)
} else {
// early break since more examples probably won't fit anyway
break;
}
}
}
return examples_tosend;
}
function tryParseStreamingError(response, decoded) { function tryParseStreamingError(response, decoded) {
try { try {
const data = JSON.parse(decoded); const data = JSON.parse(decoded);