From aae934a84937a7219ca79f79a8562301e8b66ea2 Mon Sep 17 00:00:00 2001 From: Carsten Kragelund Date: Sat, 28 Sep 2024 22:36:56 +0000 Subject: [PATCH] fix: move prefill continuation handling to populateChatHistory --- public/scripts/openai.js | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/public/scripts/openai.js b/public/scripts/openai.js index df798132b..5a45b3fbe 100644 --- a/public/scripts/openai.js +++ b/public/scripts/openai.js @@ -724,6 +724,12 @@ async function populateChatHistory(messages, prompts, chatCompletion, type = nul if (chatCompletion.canAfford(chatMessage)) { if (type === 'continue' && oai_settings.continue_prefill && chatPrompt === firstNonInjected) { + // in case we are using continue_prefill and the latest message is an assistant message, we want to prepend the users assistant prefill on the message + if (chatPrompt.role === 'assistant') { + const collection = new MessageCollection('continuePrefill', new Message(chatMessage.role, substituteParams(oai_settings.assistant_prefill + '\n\n') + chatMessage.content, chatMessage.identifier)); + chatCompletion.add(collection, -1); + continue; + } const collection = new MessageCollection('continuePrefill', chatMessage); chatCompletion.add(collection, -1); continue; @@ -1771,7 +1777,7 @@ async function sendOpenAIRequest(type, messages, signal) { generate_data['stop'] = getCustomStoppingStrings(); // Claude shouldn't have limits on stop strings. generate_data['human_sysprompt_message'] = substituteParams(oai_settings.human_sysprompt_message); // Don't add a prefill on quiet gens (summarization) - if (!isQuiet) { + if (!isQuiet && !isContinue) { generate_data['assistant_prefill'] = isImpersonate ? substituteParams(oai_settings.assistant_impersonation) : substituteParams(oai_settings.assistant_prefill); } }