diff --git a/public/index.html b/public/index.html
index 78422fd89..e74b768d6 100644
--- a/public/index.html
+++ b/public/index.html
@@ -2431,7 +2431,7 @@
diff --git a/public/script.js b/public/script.js
index a80630f79..e3a1fad3d 100644
--- a/public/script.js
+++ b/public/script.js
@@ -125,6 +125,7 @@ import {
secret_state,
writeSecret
} from "./scripts/secrets.js";
+import uniqolor from "./scripts/uniqolor.js";
//exporting functions and vars for mods
export {
@@ -204,6 +205,7 @@ let converter;
reloadMarkdownProcessor();
// array for prompt token calculations
+console.log('initializing Prompt Itemization Array on Startup');
let itemizedPrompts = [];
/* let bg_menu_toggle = false; */
@@ -1129,28 +1131,34 @@ function addOneMessage(mes, { type = "normal", insertAfter = null, scroll = true
if (isSystem) {
newMessage.find(".mes_edit").hide();
- newMessage.find(".mes_prompt").hide(); //dont'd need prompt display for sys messages
+ newMessage.find(".mes_prompt").hide(); //don't need prompt button for sys
}
- // don't need prompt butons for user messages
+ // don't need prompt button for user
if (params.isUser === true) {
newMessage.find(".mes_prompt").hide();
+ console.log(`hiding prompt for user mesID ${params.mesId}`);
}
//shows or hides the Prompt display button
let mesIdToFind = Number(newMessage.attr('mesId'));
if (itemizedPrompts.length !== 0) {
+ console.log(`itemizedPrompt.length = ${itemizedPrompts.length}`)
for (var i = 0; i < itemizedPrompts.length; i++) {
if (itemizedPrompts[i].mesId === mesIdToFind) {
newMessage.find(".mes_prompt").show();
+ console.log(`showing prompt for mesID ${params.mesId} from ${params.characterName}`);
} else {
- console.log('no cache found for mesID, hiding prompt button and continuing search');
+ console.log(`no cache obj for mesID ${mesIdToFind}, hiding prompt button and continuing search`);
newMessage.find(".mes_prompt").hide();
+ console.log(itemizedPrompts);
}
}
- } else { //hide all when prompt cache is empty
+ } else if (params.isUser !== true) { //hide all when prompt cache is empty
+ console.log('saw empty prompt cache, hiding all prompt buttons');
$(".mes_prompt").hide();
- }
+ console.log(itemizedPrompts);
+ } else { console.log('skipping prompt data for User Message'); }
newMessage.find('.avatar img').on('error', function () {
$(this).hide();
@@ -1594,6 +1602,7 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
const isImpersonate = type == "impersonate";
const isInstruct = power_user.instruct.enabled;
+ message_already_generated = isImpersonate ? `${name1}: ` : `${name2}: `;
// Name for the multigen prefix
const magName = isImpersonate ? (is_pygmalion ? 'You' : name1) : name2;
@@ -2123,32 +2132,7 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
generatedPromtCache +
promptBias;
- //set array object for prompt token itemization of this message
- let thisPromptBits = {
- mesId: count_view_mes,
- worldInfoBefore: worldInfoBefore,
- allAnchors: allAnchors,
- summarizeString: (extension_prompts['1_memory']?.value || ''),
- authorsNoteString: (extension_prompts['2_floating_prompt']?.value || ''),
- worldInfoString: worldInfoString,
- storyString: storyString,
- worldInfoAfter: worldInfoAfter,
- afterScenarioAnchor: afterScenarioAnchor,
- examplesString: examplesString,
- mesSendString: mesSendString,
- generatedPromtCache: generatedPromtCache,
- promptBias: promptBias,
- finalPromt: finalPromt,
- charDescription: charDescription,
- charPersonality: charPersonality,
- scenarioText: scenarioText,
- promptBias: promptBias,
- storyString: storyString,
- this_max_context: this_max_context,
- padding: power_user.token_padding
- }
- itemizedPrompts.push(thisPromptBits);
if (zeroDepthAnchor && zeroDepthAnchor.length) {
if (!isMultigenEnabled() || tokens_already_generated == 0) {
@@ -2167,6 +2151,11 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
}
}
+ // Add quiet generation prompt at depth 0
+ if (quiet_prompt && quiet_prompt.length) {
+ finalPromt += `\n${quiet_prompt}`;
+ }
+
finalPromt = finalPromt.replace(/\r/gm, '');
if (power_user.collapse_newlines) {
@@ -2202,6 +2191,8 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
}
}
+ let thisPromptBits = [];
+
if (main_api == 'kobold' && horde_settings.use_horde && horde_settings.auto_adjust_response_length) {
this_amount_gen = Math.min(this_amount_gen, adjustedParams.maxLength);
this_amount_gen = Math.max(this_amount_gen, MIN_AMOUNT_GEN); // prevent validation errors
@@ -2237,7 +2228,50 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
console.log('rungenerate calling API');
if (main_api == 'openai') {
- let prompt = await prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldInfoAfter, afterScenarioAnchor, promptBias, type);
+ let [prompt, counts] = await prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldInfoAfter, afterScenarioAnchor, promptBias, type);
+
+
+ // counts will return false if the user has not enabled the token breakdown feature
+ if (counts) {
+
+ //$('#token_breakdown').css('display', 'flex');
+ const breakdown_bar = $('#token_breakdown div:first-child');
+ breakdown_bar.empty();
+
+ const total = Object.values(counts).reduce((acc, val) => acc + val, 0);
+
+ thisPromptBits.push({
+ oaiStartTokens: Object.entries(counts)[0][1],
+ oaiPromptTokens: Object.entries(counts)[1][1],
+ oaiBiasTokens: Object.entries(counts)[2][1],
+ oaiNudgeTokens: Object.entries(counts)[3][1],
+ oaiJailbreakTokens: Object.entries(counts)[4][1],
+ oaiImpersonateTokens: Object.entries(counts)[5][1],
+ oaiExamplesTokens: Object.entries(counts)[6][1],
+ oaiConversationTokens: Object.entries(counts)[7][1],
+ oaiTotalTokens: total,
+ })
+
+
+ console.log(`added OAI prompt bits to array`);
+
+ Object.entries(counts).forEach(([type, value]) => {
+ if (value === 0) {
+ return;
+ }
+ const percent_value = (value / total) * 100;
+ const color = uniqolor(type, { saturation: 50, lightness: 75, }).color;
+ const bar = document.createElement('div');
+ bar.style.width = `${percent_value}%`;
+ bar.classList.add('token_breakdown_segment');
+ bar.style.backgroundColor = color + 'AA';
+ bar.style.borderColor = color + 'FF';
+ bar.innerText = value;
+ bar.title = `${type}: ${percent_value.toFixed(2)}%`;
+ breakdown_bar.append(bar);
+ });
+ }
+
setInContextMessages(openai_messages_count, type);
if (isStreamingEnabled() && type !== 'quiet') {
@@ -2277,6 +2311,41 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
}); //end of "if not data error"
}
+ //set array object for prompt token itemization of this message
+ let currentArrayEntry = Number(thisPromptBits.length - 1);
+ let additionalPromptStuff = {
+ ...thisPromptBits[currentArrayEntry],
+ mesId: Number(count_view_mes),
+ worldInfoBefore: worldInfoBefore,
+ allAnchors: allAnchors,
+ summarizeString: (extension_prompts['1_memory']?.value || ''),
+ authorsNoteString: (extension_prompts['2_floating_prompt']?.value || ''),
+ worldInfoString: worldInfoString,
+ storyString: storyString,
+ worldInfoAfter: worldInfoAfter,
+ afterScenarioAnchor: afterScenarioAnchor,
+ examplesString: examplesString,
+ mesSendString: mesSendString,
+ generatedPromtCache: generatedPromtCache,
+ promptBias: promptBias,
+ finalPromt: finalPromt,
+ charDescription: charDescription,
+ charPersonality: charPersonality,
+ scenarioText: scenarioText,
+ this_max_context: this_max_context,
+ padding: power_user.token_padding,
+ main_api: main_api,
+ };
+
+ thisPromptBits = additionalPromptStuff;
+
+ //console.log(thisPromptBits);
+
+ itemizedPrompts.push(thisPromptBits);
+ //console.log(`pushed prompt bits to itemizedPrompts array. Length is now: ${itemizedPrompts.length}`);
+
+
+
if (isStreamingEnabled() && type !== 'quiet') {
hideSwipeButtons();
let getMessage = await streamingProcessor.generate();
@@ -2285,7 +2354,7 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
// I wasn't able to get multigen working with real streaming
// consistently without screwing the interim prompting
if (isMultigenEnabled()) {
- tokens_already_generated += this_amount_gen;
+ tokens_already_generated += this_amount_gen; // add new gen amt to any prev gen counter..
message_already_generated += getMessage;
promptBias = '';
if (!streamingProcessor.isStopped && shouldContinueMultigen(getMessage, isImpersonate)) {
@@ -2432,8 +2501,9 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
} //generate ends
function promptItemize(itemizedPrompts, requestedMesId) {
- let incomingMesId = Number(requestedMesId);
- let thisPromptSet = undefined;
+ var incomingMesId = Number(requestedMesId);
+ console.log(`looking for MesId ${incomingMesId}`);
+ var thisPromptSet = undefined;
for (var i = 0; i < itemizedPrompts.length; i++) {
if (itemizedPrompts[i].mesId === incomingMesId) {
@@ -2447,44 +2517,183 @@ function promptItemize(itemizedPrompts, requestedMesId) {
return null;
}
- let finalPromptTokens = getTokenCount(itemizedPrompts[thisPromptSet].finalPromt);
- let allAnchorsTokens = getTokenCount(itemizedPrompts[thisPromptSet].allAnchors);
- let summarizeStringTokens = getTokenCount(itemizedPrompts[thisPromptSet].summarizeString);
- let authorsNoteStringTokens = getTokenCount(itemizedPrompts[thisPromptSet].authorsNoteString);
- let afterScenarioAnchorTokens = getTokenCount(itemizedPrompts[thisPromptSet].afterScenarioAnchor);
- let zeroDepthAnchorTokens = getTokenCount(itemizedPrompts[thisPromptSet].afterScenarioAnchor);
- let worldInfoStringTokens = getTokenCount(itemizedPrompts[thisPromptSet].worldInfoString);
- let storyStringTokens = getTokenCount(itemizedPrompts[thisPromptSet].storyString);
- let examplesStringTokens = getTokenCount(itemizedPrompts[thisPromptSet].examplesString);
- let charPersonalityTokens = getTokenCount(itemizedPrompts[thisPromptSet].charPersonality);
- let charDescriptionTokens = getTokenCount(itemizedPrompts[thisPromptSet].charDescription);
- let scenarioTextTokens = getTokenCount(itemizedPrompts[thisPromptSet].scenarioText);
- let promptBiasTokens = getTokenCount(itemizedPrompts[thisPromptSet].promptBias);
- let mesSendStringTokens = getTokenCount(itemizedPrompts[thisPromptSet].mesSendString)
- let ActualChatHistoryTokens = mesSendStringTokens - allAnchorsTokens + power_user.token_padding;
- let thisPrompt_max_context = itemizedPrompts[thisPromptSet].this_max_context;
- let thisPrompt_padding = itemizedPrompts[thisPromptSet].padding;
+ //these happen regardless of API
+ var charPersonalityTokens = getTokenCount(itemizedPrompts[thisPromptSet].charPersonality);
+ var charDescriptionTokens = getTokenCount(itemizedPrompts[thisPromptSet].charDescription);
+ var scenarioTextTokens = getTokenCount(itemizedPrompts[thisPromptSet].scenarioText);
+ var allAnchorsTokens = getTokenCount(itemizedPrompts[thisPromptSet].allAnchors);
+ var summarizeStringTokens = getTokenCount(itemizedPrompts[thisPromptSet].summarizeString);
+ var authorsNoteStringTokens = getTokenCount(itemizedPrompts[thisPromptSet].authorsNoteString);
+ var afterScenarioAnchorTokens = getTokenCount(itemizedPrompts[thisPromptSet].afterScenarioAnchor);
+ var zeroDepthAnchorTokens = getTokenCount(itemizedPrompts[thisPromptSet].afterScenarioAnchor);
+ var worldInfoStringTokens = getTokenCount(itemizedPrompts[thisPromptSet].worldInfoString);
+ var thisPrompt_max_context = itemizedPrompts[thisPromptSet].this_max_context;
+ var thisPrompt_padding = itemizedPrompts[thisPromptSet].padding;
+ console.log(`"${itemizedPrompts[thisPromptSet].promptBias}"`);
+ var promptBiasTokens = getTokenCount(itemizedPrompts[thisPromptSet].promptBias);
+ var this_main_api = itemizedPrompts[thisPromptSet].main_api;
- let totalTokensInPrompt =
- storyStringTokens + //chardefs total
- worldInfoStringTokens +
- ActualChatHistoryTokens + //chat history
- allAnchorsTokens + // AN and/or legacy anchors
- //afterScenarioAnchorTokens + //only counts if AN is set to 'after scenario'
- //zeroDepthAnchorTokens + //same as above, even if AN not on 0 depth
- promptBiasTokens + //{{}}
- - thisPrompt_padding; //not sure this way of calculating is correct, but the math results in same value as 'finalPromt'
+ if (this_main_api == 'openai') {
+ //for OAI API
+ //console.log('-- Counting OAI Tokens');
+ var finalPromptTokens = itemizedPrompts[thisPromptSet].oaiTotalTokens;
+ var oaiStartTokens = itemizedPrompts[thisPromptSet].oaiStartTokens;
+ var oaiPromptTokens = itemizedPrompts[thisPromptSet].oaiPromptTokens;
+ var ActualChatHistoryTokens = itemizedPrompts[thisPromptSet].oaiConversationTokens;
+ var examplesStringTokens = itemizedPrompts[thisPromptSet].oaiExamplesTokens;
+ var oaiBiasTokens = itemizedPrompts[thisPromptSet].oaiBiasTokens;
+ var oaiJailbreakTokens = itemizedPrompts[thisPromptSet].oaiJailbreakTokens;
+ var oaiStartTokens = itemizedPrompts[thisPromptSet].oaiStartTokens;
+ var oaiNudgeTokens = itemizedPrompts[thisPromptSet].oaiNudgeTokens;
+ var oaiImpersonateTokens = itemizedPrompts[thisPromptSet].oaiImpersonateTokens;
- let storyStringTokensPercentage = ((storyStringTokens / (totalTokensInPrompt + thisPrompt_padding)) * 100).toFixed(2);
- let ActualChatHistoryTokensPercentage = ((ActualChatHistoryTokens / (totalTokensInPrompt + thisPrompt_padding)) * 100).toFixed(2);
- let promptBiasTokensPercentage = ((promptBiasTokens / (totalTokensInPrompt + thisPrompt_padding)) * 100).toFixed(2);
- let worldInfoStringTokensPercentage = ((worldInfoStringTokens / (totalTokensInPrompt + thisPrompt_padding)) * 100).toFixed(2);
- let allAnchorsTokensPercentage = ((allAnchorsTokens / (totalTokensInPrompt + thisPrompt_padding)) * 100).toFixed(2);
- let selectedTokenizer = $("#tokenizer").find(':selected').text();
- callPopup(
- `
+
+ } else {
+ //for non-OAI APIs
+ //console.log('-- Counting non-OAI Tokens');
+ var finalPromptTokens = getTokenCount(itemizedPrompts[thisPromptSet].finalPromt);
+ var storyStringTokens = getTokenCount(itemizedPrompts[thisPromptSet].storyString);
+ var examplesStringTokens = getTokenCount(itemizedPrompts[thisPromptSet].examplesString);
+ var mesSendStringTokens = getTokenCount(itemizedPrompts[thisPromptSet].mesSendString)
+ var ActualChatHistoryTokens = mesSendStringTokens - allAnchorsTokens + power_user.token_padding;
+
+ var totalTokensInPrompt =
+ storyStringTokens + //chardefs total
+ worldInfoStringTokens +
+ ActualChatHistoryTokens + //chat history
+ allAnchorsTokens + // AN and/or legacy anchors
+ //afterScenarioAnchorTokens + //only counts if AN is set to 'after scenario'
+ //zeroDepthAnchorTokens + //same as above, even if AN not on 0 depth
+ promptBiasTokens; //{{}}
+ //- thisPrompt_padding; //not sure this way of calculating is correct, but the math results in same value as 'finalPromt'
+ }
+
+ if (this_main_api == 'openai') {
+ //console.log('-- applying % on OAI tokens');
+ var oaiStartTokensPercentage = ((oaiStartTokens / (finalPromptTokens)) * 100).toFixed(2);
+ var storyStringTokensPercentage = ((oaiPromptTokens / (finalPromptTokens)) * 100).toFixed(2);
+ var ActualChatHistoryTokensPercentage = ((ActualChatHistoryTokens / (finalPromptTokens)) * 100).toFixed(2);
+ var promptBiasTokensPercentage = ((oaiBiasTokens / (finalPromptTokens)) * 100).toFixed(2);
+ var worldInfoStringTokensPercentage = ((worldInfoStringTokens / (finalPromptTokens)) * 100).toFixed(2);
+ var allAnchorsTokensPercentage = ((allAnchorsTokens / (finalPromptTokens)) * 100).toFixed(2);
+ var selectedTokenizer = $("#tokenizer").find(':selected').text();
+
+ } else {
+ //console.log('-- applying % on non-OAI tokens');
+ var storyStringTokensPercentage = ((storyStringTokens / (totalTokensInPrompt)) * 100).toFixed(2);
+ var ActualChatHistoryTokensPercentage = ((ActualChatHistoryTokens / (totalTokensInPrompt)) * 100).toFixed(2);
+ var promptBiasTokensPercentage = ((promptBiasTokens / (totalTokensInPrompt)) * 100).toFixed(2);
+ var worldInfoStringTokensPercentage = ((worldInfoStringTokens / (totalTokensInPrompt)) * 100).toFixed(2);
+ var allAnchorsTokensPercentage = ((allAnchorsTokens / (totalTokensInPrompt)) * 100).toFixed(2);
+ var selectedTokenizer = $("#tokenizer").find(':selected').text();
+ }
+
+ if (this_main_api == 'openai') {
+ //console.log('-- calling popup for OAI tokens');
+ callPopup(
+ `
Prompt Itemization
Tokenizer: ${selectedTokenizer}
+ API Used: ${this_main_api}
+
+ Only the white numbers really matter. All numbers are estimates.
+ Grey color items may not have been included in the context due to certain prompt format settings.
+
+
+
+
+
+
+
+
+
Chat Startup:
+
${oaiStartTokens}
+
+
+
+
+
Prompt Tokens:
+
${oaiPromptTokens}
+
+
+
-- Description:
+
${charDescriptionTokens}
+
+
+
-- Personality:
+
${charPersonalityTokens}
+
+
+
-- Scenario:
+
${scenarioTextTokens}
+
+
+
-- Examples:
+
${examplesStringTokens}
+
+
+
+
World Info:
+
${worldInfoStringTokens}
+
+
+
Chat History:
+
${ActualChatHistoryTokens}
+
+
+
+
Extensions:
+
${allAnchorsTokens}
+
+
+
-- Summarize:
+
${summarizeStringTokens}
+
+
+
-- Author's Note:
+
${authorsNoteStringTokens}
+
+
+
+
{{}} Bias:
${oaiBiasTokens}
+
+
+
+
+
+
+
+
Total Tokens in Prompt:
${finalPromptTokens}
+
+
+
Max Context:
${thisPrompt_max_context}
+
+
+
- Padding:
${thisPrompt_padding}
+
+
+
Actual Max Context Allowed:
${thisPrompt_max_context - thisPrompt_padding}
+
+
+
+
+ `, 'text'
+ );
+
+ } else {
+ //console.log('-- calling popup for non-OAI tokens');
+ callPopup(
+ `
+ Prompt Itemization
+ Tokenizer: ${selectedTokenizer}
+ API Used: ${this_main_api}
Only the white numbers really matter. All numbers are estimates.
Grey color items may not have been included in the context due to certain prompt format settings.
@@ -2569,7 +2778,8 @@ function promptItemize(itemizedPrompts, requestedMesId) {
`, 'text'
- );
+ );
+ }
}
function setInContextMessages(lastmsg, type) {
@@ -3295,8 +3505,10 @@ function changeMainAPI() {
// Hide common settings for OpenAI
if (selectedVal == "openai") {
$("#common-gen-settings-block").css("display", "none");
+ $("#token_breakdown").css("display", "flex");
} else {
$("#common-gen-settings-block").css("display", "block");
+ $("#token_breakdown").css("display", "none");
}
// Hide amount gen for poe
if (selectedVal == "poe") {
diff --git a/public/scripts/openai.js b/public/scripts/openai.js
index e8053b44a..88ad3d25f 100644
--- a/public/scripts/openai.js
+++ b/public/scripts/openai.js
@@ -101,6 +101,7 @@ const default_settings = {
openai_model: 'gpt-3.5-turbo',
jailbreak_system: false,
reverse_proxy: '',
+ oai_breakdown: false,
};
const oai_settings = {
@@ -125,6 +126,7 @@ const oai_settings = {
openai_model: 'gpt-3.5-turbo',
jailbreak_system: false,
reverse_proxy: '',
+ oai_breakdown: false,
};
let openai_setting_names;
@@ -317,16 +319,18 @@ async function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldI
let openai_msgs_tosend = [];
// todo: static value, maybe include in the initial context calculation
+ const handler_instance = new TokenHandler(countTokens);
+
let new_chat_msg = { "role": "system", "content": "[Start a new chat]" };
- let start_chat_count = countTokens([new_chat_msg], true);
+ let start_chat_count = handler_instance.count([new_chat_msg], true, 'start_chat');
await delay(1);
- let total_count = countTokens([prompt_msg], true) + start_chat_count;
+ let total_count = handler_instance.count([prompt_msg], true, 'prompt') + start_chat_count;
await delay(1);
if (bias && bias.trim().length) {
let bias_msg = { "role": "system", "content": bias.trim() };
openai_msgs.push(bias_msg);
- total_count += countTokens([bias_msg], true);
+ total_count += handler_instance.count([bias_msg], true, 'bias');
await delay(1);
}
@@ -343,13 +347,14 @@ async function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldI
openai_msgs.push(group_nudge);
// add a group nudge count
- let group_nudge_count = countTokens([group_nudge], true);
+ let group_nudge_count = handler_instance.count([group_nudge], true, 'nudge');
await delay(1);
total_count += group_nudge_count;
// recount tokens for new start message
total_count -= start_chat_count
- start_chat_count = countTokens([new_chat_msg], true);
+ handler_instance.uncount(start_chat_count, 'start_chat');
+ start_chat_count = handler_instance.count([new_chat_msg], true);
await delay(1);
total_count += start_chat_count;
}
@@ -358,7 +363,7 @@ async function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldI
const jailbreakMessage = { "role": "system", "content": substituteParams(oai_settings.jailbreak_prompt) };
openai_msgs.push(jailbreakMessage);
- total_count += countTokens([jailbreakMessage], true);
+ total_count += handler_instance.count([jailbreakMessage], true, 'jailbreak');
await delay(1);
}
@@ -366,7 +371,7 @@ async function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldI
const impersonateMessage = { "role": "system", "content": substituteParams(oai_settings.impersonation_prompt) };
openai_msgs.push(impersonateMessage);
- total_count += countTokens([impersonateMessage], true);
+ total_count += handler_instance.count([impersonateMessage], true, 'impersonate');
await delay(1);
}
@@ -389,12 +394,12 @@ async function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldI
examples_tosend.push(example);
}
}
- total_count += countTokens(examples_tosend, true);
+ total_count += handler_instance.count(examples_tosend, true, 'examples');
await delay(1);
// go from newest message to oldest, because we want to delete the older ones from the context
for (let j = openai_msgs.length - 1; j >= 0; j--) {
let item = openai_msgs[j];
- let item_count = countTokens(item, true);
+ let item_count = handler_instance.count(item, true, 'conversation');
await delay(1);
// If we have enough space for this message, also account for the max assistant reply size
if ((total_count + item_count) < (this_max_context - oai_settings.openai_max_tokens)) {
@@ -403,13 +408,14 @@ async function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldI
}
else {
// early break since if we still have more messages, they just won't fit anyway
+ handler_instance.uncount(item_count, 'conversation');
break;
}
}
} else {
for (let j = openai_msgs.length - 1; j >= 0; j--) {
let item = openai_msgs[j];
- let item_count = countTokens(item, true);
+ let item_count = handler_instance.count(item, true, 'conversation');
await delay(1);
// If we have enough space for this message, also account for the max assistant reply size
if ((total_count + item_count) < (this_max_context - oai_settings.openai_max_tokens)) {
@@ -418,11 +424,12 @@ async function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldI
}
else {
// early break since if we still have more messages, they just won't fit anyway
+ handler_instance.uncount(item_count, 'conversation');
break;
}
}
- console.log(total_count);
+ //console.log(total_count);
// each example block contains multiple user/bot messages
for (let example_block of openai_msgs_example) {
@@ -432,7 +439,7 @@ async function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldI
example_block = [new_chat_msg, ...example_block];
// add the block only if there is enough space for all its messages
- const example_count = countTokens(example_block, true);
+ const example_count = handler_instance.count(example_block, true, 'examples');
await delay(1);
if ((total_count + example_count) < (this_max_context - oai_settings.openai_max_tokens)) {
examples_tosend.push(...example_block)
@@ -440,6 +447,7 @@ async function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldI
}
else {
// early break since more examples probably won't fit anyway
+ handler_instance.uncount(example_count, 'examples');
break;
}
}
@@ -451,10 +459,14 @@ async function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldI
openai_msgs_tosend.reverse();
openai_msgs_tosend = [prompt_msg, ...examples_tosend, new_chat_msg, ...openai_msgs_tosend]
- console.log("We're sending this:")
- console.log(openai_msgs_tosend);
- console.log(`Calculated the total context to be ${total_count} tokens`);
- return openai_msgs_tosend;
+ //console.log("We're sending this:")
+ //console.log(openai_msgs_tosend);
+ //console.log(`Calculated the total context to be ${total_count} tokens`);
+ handler_instance.log();
+ return [
+ openai_msgs_tosend,
+ oai_settings.oai_breakdown ? handler_instance.counts : false,
+ ];
}
function getSystemPrompt(nsfw_toggle_prompt, enhance_definitions_prompt, wiBefore, storyString, wiAfter, extensionPrompt, isImpersonate) {
@@ -616,9 +628,42 @@ async function calculateLogitBias() {
}
}
+class TokenHandler {
+ constructor(countTokenFn) {
+ this.countTokenFn = countTokenFn;
+ this.counts = {
+ 'start_chat': 0,
+ 'prompt': 0,
+ 'bias': 0,
+ 'nudge': 0,
+ 'jailbreak': 0,
+ 'impersonate': 0,
+ 'examples': 0,
+ 'conversation': 0,
+ };
+ }
+
+ uncount(value, type) {
+ this.counts[type] -= value;
+ }
+
+ count(messages, full, type) {
+ console.log(messages);
+ const token_count = this.countTokenFn(messages, full);
+ this.counts[type] += token_count;
+
+ return token_count;
+ }
+
+ log() {
+ const total = Object.values(this.counts).reduce((a, b) => a + b);
+ console.table({ ...this.counts, 'total': total });
+ }
+}
+
function countTokens(messages, full = false) {
let chatId = 'undefined';
-
+
try {
if (selected_group) {
chatId = groups.find(x => x.id == selected_group)?.chat_id;
@@ -705,6 +750,7 @@ function loadOpenAISettings(data, settings) {
if (settings.nsfw_first !== undefined) oai_settings.nsfw_first = !!settings.nsfw_first;
if (settings.openai_model !== undefined) oai_settings.openai_model = settings.openai_model;
if (settings.jailbreak_system !== undefined) oai_settings.jailbreak_system = !!settings.jailbreak_system;
+ if (settings.oai_breakdown !== undefined) oai_settings.oai_breakdown = !!settings.oai_breakdown;
$('#stream_toggle').prop('checked', oai_settings.stream_openai);
@@ -720,6 +766,7 @@ function loadOpenAISettings(data, settings) {
$('#wrap_in_quotes').prop('checked', oai_settings.wrap_in_quotes);
$('#nsfw_first').prop('checked', oai_settings.nsfw_first);
$('#jailbreak_system').prop('checked', oai_settings.jailbreak_system);
+ $('#oai_breakdown').prop('checked', oai_settings.oai_breakdown);
if (settings.main_prompt !== undefined) oai_settings.main_prompt = settings.main_prompt;
if (settings.nsfw_prompt !== undefined) oai_settings.nsfw_prompt = settings.nsfw_prompt;
@@ -839,6 +886,7 @@ async function saveOpenAIPreset(name, settings) {
jailbreak_system: settings.jailbreak_system,
impersonation_prompt: settings.impersonation_prompt,
bias_preset_selected: settings.bias_preset_selected,
+ oai_breakdown: settings.oai_breakdown,
};
const savePresetSettings = await fetch(`/savepreset_openai?name=${name}`, {
@@ -1046,7 +1094,7 @@ async function onDeletePresetClick() {
const response = await fetch('/deletepreset_openai', {
method: 'POST',
headers: getRequestHeaders(),
- body: JSON.stringify({name: nameToDelete}),
+ body: JSON.stringify({ name: nameToDelete }),
});
if (!response.ok) {
@@ -1097,6 +1145,7 @@ function onSettingsPresetChange() {
wrap_in_quotes: ['#wrap_in_quotes', 'wrap_in_quotes', true],
nsfw_first: ['#nsfw_first', 'nsfw_first', true],
jailbreak_system: ['#jailbreak_system', 'jailbreak_system', true],
+ oai_breakdown: ['#oai_breakdown', 'oai_breakdown', true],
main_prompt: ['#main_prompt_textarea', 'main_prompt', false],
nsfw_prompt: ['#nsfw_prompt_textarea', 'nsfw_prompt', false],
jailbreak_prompt: ['#jailbreak_prompt_textarea', 'jailbreak_prompt', false],
@@ -1163,7 +1212,7 @@ function onReverseProxyInput() {
async function onConnectButtonClick(e) {
e.stopPropagation();
const api_key_openai = $('#api_key_openai').val().trim();
-
+
if (api_key_openai.length) {
await writeSecret(SECRET_KEYS.OPENAI, api_key_openai);
}
@@ -1269,6 +1318,16 @@ $(document).ready(function () {
saveSettingsDebounced();
});
+ $("#oai_breakdown").on('change', function () {
+ oai_settings.oai_breakdown = !!$(this).prop("checked");
+ if (!oai_settings.oai_breakdown) {
+ $("#token_breakdown").css('display', 'none');
+ } else {
+ $("#token_breakdown").css('display', 'flex');
+ }
+ saveSettingsDebounced();
+ });
+
// auto-select a preset based on character/group name
$(document).on("click", ".character_select", function () {
const chid = $(this).attr('chid');
@@ -1322,18 +1381,18 @@ $(document).ready(function () {
saveSettingsDebounced();
});
- $("#api_button_openai").on('click', onConnectButtonClick);
- $("#openai_reverse_proxy").on('input', onReverseProxyInput);
- $("#model_openai_select").on('change', onModelChange);
- $("#settings_perset_openai").on('change', onSettingsPresetChange);
- $("#new_oai_preset").on('click', onNewPresetClick);
- $("#delete_oai_preset").on('click', onDeletePresetClick);
- $("#openai_api_usage").on('click', showApiKeyUsage);
- $('#openai_logit_bias_preset').on('change', onLogitBiasPresetChange);
- $('#openai_logit_bias_new_preset').on('click', createNewLogitBiasPreset);
- $('#openai_logit_bias_new_entry').on('click', createNewLogitBiasEntry);
- $('#openai_logit_bias_import_file').on('input', onLogitBiasPresetImportFileChange);
- $('#openai_logit_bias_import_preset').on('click', onLogitBiasPresetImportClick);
- $('#openai_logit_bias_export_preset').on('click', onLogitBiasPresetExportClick);
- $('#openai_logit_bias_delete_preset').on('click', onLogitBiasPresetDeleteClick);
+ $("#api_button_openai").on("click", onConnectButtonClick);
+ $("#openai_reverse_proxy").on("input", onReverseProxyInput);
+ $("#model_openai_select").on("change", onModelChange);
+ $("#settings_perset_openai").on("change", onSettingsPresetChange);
+ $("#new_oai_preset").on("click", onNewPresetClick);
+ $("#delete_oai_preset").on("click", onDeletePresetClick);
+ $("#openai_api_usage").on("click", showApiKeyUsage);
+ $("#openai_logit_bias_preset").on("change", onLogitBiasPresetChange);
+ $("#openai_logit_bias_new_preset").on("click", createNewLogitBiasPreset);
+ $("#openai_logit_bias_new_entry").on("click", createNewLogitBiasEntry);
+ $("#openai_logit_bias_import_file").on("input", onLogitBiasPresetImportFileChange);
+ $("#openai_logit_bias_import_preset").on("click", onLogitBiasPresetImportClick);
+ $("#openai_logit_bias_export_preset").on("click", onLogitBiasPresetExportClick);
+ $("#openai_logit_bias_delete_preset").on("click", onLogitBiasPresetDeleteClick);
});