Async OAI tokenization

This commit is contained in:
SillyLossy
2023-03-28 17:44:28 +03:00
parent 2c4876ac39
commit ae7e21d6c5
2 changed files with 33 additions and 31 deletions

View File

@ -1330,13 +1330,13 @@ async function Generate(type, automatic_trigger, force_name2) {//encode("dsfs").
storyString += !disable_scenario_formatting ? `Circumstances and context of the dialogue: ${Scenario}\n` : `${Scenario}\n`; storyString += !disable_scenario_formatting ? `Circumstances and context of the dialogue: ${Scenario}\n` : `${Scenario}\n`;
} }
console.log('calling runGenerate'); console.log('calling runGenerate');
runGenerate(); await runGenerate();
return; return;
} }
i++; i++;
} }
function runGenerate(cycleGenerationPromt = '') { async function runGenerate(cycleGenerationPromt = '') {
is_send_press = true; is_send_press = true;
generatedPromtCache += cycleGenerationPromt; generatedPromtCache += cycleGenerationPromt;
@ -1611,7 +1611,7 @@ async function Generate(type, automatic_trigger, force_name2) {//encode("dsfs").
if (main_api == 'openai') { if (main_api == 'openai') {
let prompt = prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldInfoAfter, extension_prompt, promptBias); let prompt = await prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldInfoAfter, extension_prompt, promptBias);
sendOpenAIRequest(prompt).then(onSuccess).catch(onError); sendOpenAIRequest(prompt).then(onSuccess).catch(onError);
} }
else { else {

View File

@ -210,7 +210,7 @@ function formatWorldInfo(value) {
return `[Details of the fictional world the RP set in:\n${value}\n]`; return `[Details of the fictional world the RP set in:\n${value}\n]`;
} }
function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldInfoAfter, extensionPrompt, bias) { async function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldInfoAfter, extensionPrompt, bias) {
let this_max_context = oai_settings.openai_max_context; let this_max_context = oai_settings.openai_max_context;
let nsfw_toggle_prompt = ""; let nsfw_toggle_prompt = "";
let enhance_definitions_prompt = ""; let enhance_definitions_prompt = "";
@ -243,13 +243,13 @@ function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldInfoAft
// todo: static value, maybe include in the initial context calculation // todo: static value, maybe include in the initial context calculation
let new_chat_msg = { "role": "system", "content": "[Start a new chat]" }; let new_chat_msg = { "role": "system", "content": "[Start a new chat]" };
let start_chat_count = countTokens([new_chat_msg]); let start_chat_count = await countTokens([new_chat_msg]);
let total_count = countTokens([prompt_msg], true) + start_chat_count; let total_count = await countTokens([prompt_msg], true) + start_chat_count;
if (bias && bias.trim().length) { if (bias && bias.trim().length) {
let bias_msg = { "role": "system", "content": bias.trim() }; let bias_msg = { "role": "system", "content": bias.trim() };
openai_msgs.push(bias_msg); openai_msgs.push(bias_msg);
total_count += countTokens([bias_msg], true); total_count += await countTokens([bias_msg], true);
} }
if (selected_group) { if (selected_group) {
@ -261,12 +261,12 @@ function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldInfoAft
openai_msgs.push(group_nudge); openai_msgs.push(group_nudge);
// add a group nudge count // add a group nudge count
let group_nudge_count = countTokens([group_nudge], true); let group_nudge_count = await countTokens([group_nudge], true);
total_count += group_nudge_count; total_count += group_nudge_count;
// recount tokens for new start message // recount tokens for new start message
total_count -= start_chat_count total_count -= start_chat_count
start_chat_count = countTokens([new_chat_msg]); start_chat_count = await countTokens([new_chat_msg]);
total_count += start_chat_count; total_count += start_chat_count;
} }
@ -289,11 +289,11 @@ function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldInfoAft
examples_tosend.push(example); examples_tosend.push(example);
} }
} }
total_count += countTokens(examples_tosend); total_count += await countTokens(examples_tosend);
// go from newest message to oldest, because we want to delete the older ones from the context // go from newest message to oldest, because we want to delete the older ones from the context
for (let j = openai_msgs.length - 1; j >= 0; j--) { for (let j = openai_msgs.length - 1; j >= 0; j--) {
let item = openai_msgs[j]; let item = openai_msgs[j];
let item_count = countTokens(item); let item_count = await countTokens(item);
// If we have enough space for this message, also account for the max assistant reply size // If we have enough space for this message, also account for the max assistant reply size
if ((total_count + item_count) < (this_max_context - oai_settings.openai_max_tokens)) { if ((total_count + item_count) < (this_max_context - oai_settings.openai_max_tokens)) {
openai_msgs_tosend.push(item); openai_msgs_tosend.push(item);
@ -307,7 +307,7 @@ function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldInfoAft
} else { } else {
for (let j = openai_msgs.length - 1; j >= 0; j--) { for (let j = openai_msgs.length - 1; j >= 0; j--) {
let item = openai_msgs[j]; let item = openai_msgs[j];
let item_count = countTokens(item); let item_count = await countTokens(item);
// If we have enough space for this message, also account for the max assistant reply size // If we have enough space for this message, also account for the max assistant reply size
if ((total_count + item_count) < (this_max_context - oai_settings.openai_max_tokens)) { if ((total_count + item_count) < (this_max_context - oai_settings.openai_max_tokens)) {
openai_msgs_tosend.push(item); openai_msgs_tosend.push(item);
@ -327,7 +327,7 @@ function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldInfoAft
for (let k = 0; k < example_block.length; k++) { for (let k = 0; k < example_block.length; k++) {
if (example_block.length == 0) { continue; } if (example_block.length == 0) { continue; }
let example_count = countTokens(example_block[k]); let example_count = await countTokens(example_block[k]);
// add all the messages from the example // add all the messages from the example
if ((total_count + example_count + start_chat_count) < (this_max_context - oai_settings.openai_max_tokens)) { if ((total_count + example_count + start_chat_count) < (this_max_context - oai_settings.openai_max_tokens)) {
if (k == 0) { if (k == 0) {
@ -435,24 +435,26 @@ function onStream(e, resolve, reject, last_view_mes) {
} }
} }
function countTokens(messages, full = false) { async function countTokens(messages, full = false) {
if (!Array.isArray(messages)) { return new Promise((resolve) => {
messages = [messages]; if (!Array.isArray(messages)) {
} messages = [messages];
let token_count = -1; }
jQuery.ajax({ let token_count = -1;
async: false, jQuery.ajax({
type: 'POST', // async: true,
url: `/tokenize_openai?model=${oai_settings.openai_model}`, type: 'POST', //
data: JSON.stringify(messages), url: `/tokenize_openai?model=${oai_settings.openai_model}`,
dataType: "json", data: JSON.stringify(messages),
contentType: "application/json", dataType: "json",
success: function (data) { contentType: "application/json",
token_count = data.token_count; success: function (data) {
} token_count = data.token_count;
}); if (!full) token_count -= 2;
if (!full) token_count -= 2; resolve(token_count);
return token_count; }
});
});
} }
function loadOpenAISettings(data, settings) { function loadOpenAISettings(data, settings) {