mirror of
https://github.com/SillyTavern/SillyTavern.git
synced 2025-02-10 17:10:45 +01:00
Never use token handler for openai
For now.
This commit is contained in:
parent
0513d9c8c0
commit
5a6340165c
@ -421,12 +421,9 @@ async function prepareOpenAIMessages({ systemPrompt, name2, storyString, worldIn
|
|||||||
const openai_msgs_tosend = chatCompletion.getChat();
|
const openai_msgs_tosend = chatCompletion.getChat();
|
||||||
openai_messages_count = openai_msgs_tosend.filter(x => x.role === "user" || x.role === "assistant").length;
|
openai_messages_count = openai_msgs_tosend.filter(x => x.role === "user" || x.role === "assistant").length;
|
||||||
|
|
||||||
console.log("We're sending this:")
|
|
||||||
console.log(openai_msgs_tosend);
|
console.log(openai_msgs_tosend);
|
||||||
|
|
||||||
// Integrate const handler_instance = new TokenHandler(countTokens);
|
return [openai_msgs_tosend, false];
|
||||||
|
|
||||||
return openai_msgs_tosend;
|
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -504,26 +501,24 @@ function prepareExampleMessages(messages, exampleMessages, includeAll = false, )
|
|||||||
// go from newest message to oldest, because we want to delete the older ones from the context
|
// go from newest message to oldest, because we want to delete the older ones from the context
|
||||||
for (let j = messages.length - 1; j >= 0; j--) {
|
for (let j = messages.length - 1; j >= 0; j--) {
|
||||||
let item = messages[j];
|
let item = messages[j];
|
||||||
let item_count = handler_instance.count(item, true, 'conversation');
|
let item_count = countTokens(item);
|
||||||
// If we have enough space for this message, also account for the max assistant reply size
|
// If we have enough space for this message, also account for the max assistant reply size
|
||||||
if ((total_count + item_count) < (oai_settings.openai_max_context - oai_settings.openai_max_tokens)) {
|
if ((total_count + item_count) < (oai_settings.openai_max_context - oai_settings.openai_max_tokens)) {
|
||||||
messages.push(item);
|
messages.push(item);
|
||||||
} else {
|
} else {
|
||||||
// early break since if we still have more messages, they just won't fit anyway
|
// early break since if we still have more messages, they just won't fit anyway
|
||||||
handler_instance.uncount(item_count, 'conversation');
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
for (let j = messages.length - 1; j >= 0; j--) {
|
for (let j = messages.length - 1; j >= 0; j--) {
|
||||||
let item = messages[j];
|
let item = messages[j];
|
||||||
let item_count = handler_instance.count(item, true, 'conversation');
|
let item_count = countTokens(item);
|
||||||
// If we have enough space for this message, also account for the max assistant reply size
|
// If we have enough space for this message, also account for the max assistant reply size
|
||||||
if ((total_count + item_count) < (oai_settings.openai_max_context - oai_settings.openai_max_tokens)) {
|
if ((total_count + item_count) < (oai_settings.openai_max_context - oai_settings.openai_max_tokens)) {
|
||||||
messages.push(item);
|
messages.push(item);
|
||||||
} else {
|
} else {
|
||||||
// early break since if we still have more messages, they just won't fit anyway
|
// early break since if we still have more messages, they just won't fit anyway
|
||||||
handler_instance.uncount(item_count, 'conversation');
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -538,12 +533,11 @@ function prepareExampleMessages(messages, exampleMessages, includeAll = false, )
|
|||||||
example_block = [new_chat_msg, ...example_block];
|
example_block = [new_chat_msg, ...example_block];
|
||||||
|
|
||||||
// add the block only if there is enough space for all its messages
|
// add the block only if there is enough space for all its messages
|
||||||
const example_count = handler_instance.count(example_block, true, 'examples');
|
const example_count = countTokens(example_block)
|
||||||
if ((total_count + example_count) < (oai_settings.openai_max_context - oai_settings.openai_max_tokens)) {
|
if ((total_count + example_count) < (oai_settings.openai_max_context - oai_settings.openai_max_tokens)) {
|
||||||
examples_tosend.push(...example_block)
|
examples_tosend.push(...example_block)
|
||||||
} else {
|
} else {
|
||||||
// early break since more examples probably won't fit anyway
|
// early break since more examples probably won't fit anyway
|
||||||
handler_instance.uncount(example_count, 'examples');
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user