Atomic treatment of message examples blocks in OpenAI

This commit is contained in:
Grzegorz Gidel
2023-05-01 17:21:32 +02:00
parent 3f37de699a
commit 2f55b11afe

View File

@ -395,23 +395,22 @@ async function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldI
console.log(total_count); console.log(total_count);
for (const example of openai_msgs_example) { // each example block contains multiple user/bot messages
// get the current example block with multiple user/bot messages for (let example_block of openai_msgs_example) {
let example_block = example;
for (let k = 0; k < example_block.length; k++) {
if (example_block.length == 0) { continue; } if (example_block.length == 0) { continue; }
let example_count = countTokens(example_block[k], true);
// add all the messages from the example // include the heading
if ((total_count + example_count + start_chat_count) < (this_max_context - oai_settings.openai_max_tokens)) { example_block = [new_chat_msg, ...example_block];
if (k == 0) {
examples_tosend.push(new_chat_msg); // add the block only if there is enough space for all its messages
total_count += start_chat_count; const example_count = countTokens(example_block, true);
} if ((total_count + example_count) < (this_max_context - oai_settings.openai_max_tokens)) {
examples_tosend.push(example_block[k]); examples_tosend.push(...example_block)
total_count += example_count; total_count += example_count;
} }
else { break; } else {
// early break since more examples probably won't fit anyway
break;
} }
} }
} }