#2085 Switch to async token counters

This commit is contained in:
Cohee
2024-04-13 21:33:19 +03:00
parent 1c4bad35b2
commit 306bff0a92
7 changed files with 75 additions and 78 deletions

View File

@ -203,7 +203,7 @@ import {
selectContextPreset,
} from './scripts/instruct-mode.js';
import { applyLocale, initLocales } from './scripts/i18n.js';
import { getFriendlyTokenizerName, getTokenCount, getTokenizerModel, initTokenizers, saveTokenCache } from './scripts/tokenizers.js';
import { getFriendlyTokenizerName, getTokenCount, getTokenCountAsync, getTokenizerModel, initTokenizers, saveTokenCache } from './scripts/tokenizers.js';
import { createPersona, initPersonas, selectCurrentPersona, setPersonaDescription, updatePersonaNameIfExists } from './scripts/personas.js';
import { getBackgrounds, initBackgrounds, loadBackgroundSettings, background_settings } from './scripts/backgrounds.js';
import { hideLoader, showLoader } from './scripts/loader.js';
@ -3469,7 +3469,7 @@ async function Generate(type, { automatic_trigger, force_name2, quiet_prompt, qu
let chatString = '';
let cyclePrompt = '';
function getMessagesTokenCount() {
async function getMessagesTokenCount() {
const encodeString = [
beforeScenarioAnchor,
storyString,
@ -3480,7 +3480,7 @@ async function Generate(type, { automatic_trigger, force_name2, quiet_prompt, qu
cyclePrompt,
userAlignmentMessage,
].join('').replace(/\r/gm, '');
return getTokenCount(encodeString, power_user.token_padding);
return getTokenCountAsync(encodeString, power_user.token_padding);
}
// Force pinned examples into the context
@ -3496,7 +3496,7 @@ async function Generate(type, { automatic_trigger, force_name2, quiet_prompt, qu
// Collect enough messages to fill the context
let arrMes = new Array(chat2.length);
let tokenCount = getMessagesTokenCount();
let tokenCount = await getMessagesTokenCount();
let lastAddedIndex = -1;
// Pre-allocate all injections first.
@ -3508,7 +3508,7 @@ async function Generate(type, { automatic_trigger, force_name2, quiet_prompt, qu
continue;
}
tokenCount += getTokenCount(item.replace(/\r/gm, ''));
tokenCount += await getTokenCountAsync(item.replace(/\r/gm, ''));
chatString = item + chatString;
if (tokenCount < this_max_context) {
arrMes[index] = item;
@ -3538,7 +3538,7 @@ async function Generate(type, { automatic_trigger, force_name2, quiet_prompt, qu
continue;
}
tokenCount += getTokenCount(item.replace(/\r/gm, ''));
tokenCount += await getTokenCountAsync(item.replace(/\r/gm, ''));
chatString = item + chatString;
if (tokenCount < this_max_context) {
arrMes[i] = item;
@ -3554,7 +3554,7 @@ async function Generate(type, { automatic_trigger, force_name2, quiet_prompt, qu
// Add user alignment message if last message is not a user message
const stoppedAtUser = userMessageIndices.includes(lastAddedIndex);
if (addUserAlignment && !stoppedAtUser) {
tokenCount += getTokenCount(userAlignmentMessage.replace(/\r/gm, ''));
tokenCount += await getTokenCountAsync(userAlignmentMessage.replace(/\r/gm, ''));
chatString = userAlignmentMessage + chatString;
arrMes.push(userAlignmentMessage);
injectedIndices.push(arrMes.length - 1);
@ -3580,11 +3580,11 @@ async function Generate(type, { automatic_trigger, force_name2, quiet_prompt, qu
}
// Estimate how many unpinned example messages fit in the context
tokenCount = getMessagesTokenCount();
tokenCount = await getMessagesTokenCount();
let count_exm_add = 0;
if (!power_user.pin_examples) {
for (let example of mesExamplesArray) {
tokenCount += getTokenCount(example.replace(/\r/gm, ''));
tokenCount += await getTokenCountAsync(example.replace(/\r/gm, ''));
examplesString += example;
if (tokenCount < this_max_context) {
count_exm_add++;
@ -3739,7 +3739,7 @@ async function Generate(type, { automatic_trigger, force_name2, quiet_prompt, qu
return promptCache;
}
function checkPromptSize() {
async function checkPromptSize() {
console.debug('---checking Prompt size');
setPromptString();
const prompt = [
@ -3752,15 +3752,15 @@ async function Generate(type, { automatic_trigger, force_name2, quiet_prompt, qu
generatedPromptCache,
quiet_prompt,
].join('').replace(/\r/gm, '');
let thisPromptContextSize = getTokenCount(prompt, power_user.token_padding);
let thisPromptContextSize = await getTokenCountAsync(prompt, power_user.token_padding);
if (thisPromptContextSize > this_max_context) { //if the prepared prompt is larger than the max context size...
if (count_exm_add > 0) { // ..and we have example mesages..
count_exm_add--; // remove the example messages...
checkPromptSize(); // and try agin...
await checkPromptSize(); // and try agin...
} else if (mesSend.length > 0) { // if the chat history is longer than 0
mesSend.shift(); // remove the first (oldest) chat entry..
checkPromptSize(); // and check size again..
await checkPromptSize(); // and check size again..
} else {
//end
console.debug(`---mesSend.length = ${mesSend.length}`);
@ -3770,7 +3770,7 @@ async function Generate(type, { automatic_trigger, force_name2, quiet_prompt, qu
if (generatedPromptCache.length > 0 && main_api !== 'openai') {
console.debug('---Generated Prompt Cache length: ' + generatedPromptCache.length);
checkPromptSize();
await checkPromptSize();
} else {
console.debug('---calling setPromptString ' + generatedPromptCache.length);
setPromptString();
@ -4433,7 +4433,7 @@ export async function sendMessageAsUser(messageText, messageBias, insertAt = nul
};
if (power_user.message_token_count_enabled) {
message.extra.token_count = getTokenCount(message.mes, 0);
message.extra.token_count = await getTokenCountAsync(message.mes, 0);
}
// Lock user avatar to a persona.
@ -4596,21 +4596,21 @@ async function promptItemize(itemizedPrompts, requestedMesId) {
}
const params = {
charDescriptionTokens: getTokenCount(itemizedPrompts[thisPromptSet].charDescription),
charPersonalityTokens: getTokenCount(itemizedPrompts[thisPromptSet].charPersonality),
scenarioTextTokens: getTokenCount(itemizedPrompts[thisPromptSet].scenarioText),
userPersonaStringTokens: getTokenCount(itemizedPrompts[thisPromptSet].userPersona),
worldInfoStringTokens: getTokenCount(itemizedPrompts[thisPromptSet].worldInfoString),
allAnchorsTokens: getTokenCount(itemizedPrompts[thisPromptSet].allAnchors),
summarizeStringTokens: getTokenCount(itemizedPrompts[thisPromptSet].summarizeString),
authorsNoteStringTokens: getTokenCount(itemizedPrompts[thisPromptSet].authorsNoteString),
smartContextStringTokens: getTokenCount(itemizedPrompts[thisPromptSet].smartContextString),
beforeScenarioAnchorTokens: getTokenCount(itemizedPrompts[thisPromptSet].beforeScenarioAnchor),
afterScenarioAnchorTokens: getTokenCount(itemizedPrompts[thisPromptSet].afterScenarioAnchor),
zeroDepthAnchorTokens: getTokenCount(itemizedPrompts[thisPromptSet].zeroDepthAnchor), // TODO: unused
charDescriptionTokens: await getTokenCountAsync(itemizedPrompts[thisPromptSet].charDescription),
charPersonalityTokens: await getTokenCountAsync(itemizedPrompts[thisPromptSet].charPersonality),
scenarioTextTokens: await getTokenCountAsync(itemizedPrompts[thisPromptSet].scenarioText),
userPersonaStringTokens: await getTokenCountAsync(itemizedPrompts[thisPromptSet].userPersona),
worldInfoStringTokens: await getTokenCountAsync(itemizedPrompts[thisPromptSet].worldInfoString),
allAnchorsTokens: await getTokenCountAsync(itemizedPrompts[thisPromptSet].allAnchors),
summarizeStringTokens: await getTokenCountAsync(itemizedPrompts[thisPromptSet].summarizeString),
authorsNoteStringTokens: await getTokenCountAsync(itemizedPrompts[thisPromptSet].authorsNoteString),
smartContextStringTokens: await getTokenCountAsync(itemizedPrompts[thisPromptSet].smartContextString),
beforeScenarioAnchorTokens: await getTokenCountAsync(itemizedPrompts[thisPromptSet].beforeScenarioAnchor),
afterScenarioAnchorTokens: await getTokenCountAsync(itemizedPrompts[thisPromptSet].afterScenarioAnchor),
zeroDepthAnchorTokens: await getTokenCountAsync(itemizedPrompts[thisPromptSet].zeroDepthAnchor), // TODO: unused
thisPrompt_padding: itemizedPrompts[thisPromptSet].padding,
this_main_api: itemizedPrompts[thisPromptSet].main_api,
chatInjects: getTokenCount(itemizedPrompts[thisPromptSet].chatInjects),
chatInjects: await getTokenCountAsync(itemizedPrompts[thisPromptSet].chatInjects),
};
if (params.chatInjects) {
@ -4664,13 +4664,13 @@ async function promptItemize(itemizedPrompts, requestedMesId) {
} else {
//for non-OAI APIs
//console.log('-- Counting non-OAI Tokens');
params.finalPromptTokens = getTokenCount(itemizedPrompts[thisPromptSet].finalPrompt);
params.storyStringTokens = getTokenCount(itemizedPrompts[thisPromptSet].storyString) - params.worldInfoStringTokens;
params.examplesStringTokens = getTokenCount(itemizedPrompts[thisPromptSet].examplesString);
params.mesSendStringTokens = getTokenCount(itemizedPrompts[thisPromptSet].mesSendString);
params.finalPromptTokens = await getTokenCountAsync(itemizedPrompts[thisPromptSet].finalPrompt);
params.storyStringTokens = await getTokenCountAsync(itemizedPrompts[thisPromptSet].storyString) - params.worldInfoStringTokens;
params.examplesStringTokens = await getTokenCountAsync(itemizedPrompts[thisPromptSet].examplesString);
params.mesSendStringTokens = await getTokenCountAsync(itemizedPrompts[thisPromptSet].mesSendString);
params.ActualChatHistoryTokens = params.mesSendStringTokens - (params.allAnchorsTokens - (params.beforeScenarioAnchorTokens + params.afterScenarioAnchorTokens)) + power_user.token_padding;
params.instructionTokens = getTokenCount(itemizedPrompts[thisPromptSet].instruction);
params.promptBiasTokens = getTokenCount(itemizedPrompts[thisPromptSet].promptBias);
params.instructionTokens = await getTokenCountAsync(itemizedPrompts[thisPromptSet].instruction);
params.promptBiasTokens = await getTokenCountAsync(itemizedPrompts[thisPromptSet].promptBias);
params.totalTokensInPrompt =
params.storyStringTokens + //chardefs total
@ -5073,7 +5073,7 @@ async function saveReply(type, getMessage, fromStreaming, title, swipes) {
chat[chat.length - 1]['extra']['api'] = getGeneratingApi();
chat[chat.length - 1]['extra']['model'] = getGeneratingModel();
if (power_user.message_token_count_enabled) {
chat[chat.length - 1]['extra']['token_count'] = getTokenCount(chat[chat.length - 1]['mes'], 0);
chat[chat.length - 1]['extra']['token_count'] = await getTokenCountAsync(chat[chat.length - 1]['mes'], 0);
}
const chat_id = (chat.length - 1);
await eventSource.emit(event_types.MESSAGE_RECEIVED, chat_id);
@ -5093,7 +5093,7 @@ async function saveReply(type, getMessage, fromStreaming, title, swipes) {
chat[chat.length - 1]['extra']['api'] = getGeneratingApi();
chat[chat.length - 1]['extra']['model'] = getGeneratingModel();
if (power_user.message_token_count_enabled) {
chat[chat.length - 1]['extra']['token_count'] = getTokenCount(chat[chat.length - 1]['mes'], 0);
chat[chat.length - 1]['extra']['token_count'] = await getTokenCountAsync(chat[chat.length - 1]['mes'], 0);
}
const chat_id = (chat.length - 1);
await eventSource.emit(event_types.MESSAGE_RECEIVED, chat_id);
@ -5110,7 +5110,7 @@ async function saveReply(type, getMessage, fromStreaming, title, swipes) {
chat[chat.length - 1]['extra']['api'] = getGeneratingApi();
chat[chat.length - 1]['extra']['model'] = getGeneratingModel();
if (power_user.message_token_count_enabled) {
chat[chat.length - 1]['extra']['token_count'] = getTokenCount(chat[chat.length - 1]['mes'], 0);
chat[chat.length - 1]['extra']['token_count'] = await getTokenCountAsync(chat[chat.length - 1]['mes'], 0);
}
const chat_id = (chat.length - 1);
await eventSource.emit(event_types.MESSAGE_RECEIVED, chat_id);
@ -5135,7 +5135,7 @@ async function saveReply(type, getMessage, fromStreaming, title, swipes) {
chat[chat.length - 1]['gen_finished'] = generationFinished;
if (power_user.message_token_count_enabled) {
chat[chat.length - 1]['extra']['token_count'] = getTokenCount(chat[chat.length - 1]['mes'], 0);
chat[chat.length - 1]['extra']['token_count'] = await getTokenCountAsync(chat[chat.length - 1]['mes'], 0);
}
if (selected_group) {
@ -7853,7 +7853,7 @@ function swipe_left() { // when we swipe left..but no generation.
duration: swipe_duration,
easing: animation_easing,
queue: false,
complete: function () {
complete: async function () {
const is_animation_scroll = ($('#chat').scrollTop() >= ($('#chat').prop('scrollHeight') - $('#chat').outerHeight()) - 10);
//console.log('on left swipe click calling addOneMessage');
addOneMessage(chat[chat.length - 1], { type: 'swipe' });
@ -7864,7 +7864,7 @@ function swipe_left() { // when we swipe left..but no generation.
}
const swipeMessage = $('#chat').find(`[mesid="${chat.length - 1}"]`);
const tokenCount = getTokenCount(chat[chat.length - 1].mes, 0);
const tokenCount = await getTokenCountAsync(chat[chat.length - 1].mes, 0);
chat[chat.length - 1]['extra']['token_count'] = tokenCount;
swipeMessage.find('.tokenCounterDisplay').text(`${tokenCount}t`);
}
@ -8029,7 +8029,7 @@ const swipe_right = () => {
duration: swipe_duration,
easing: animation_easing,
queue: false,
complete: function () {
complete: async function () {
/*if (!selected_group) {
var typingIndicator = $("#typing_indicator_template .typing_indicator").clone();
typingIndicator.find(".typing_indicator_name").text(characters[this_chid].name);
@ -8055,7 +8055,7 @@ const swipe_right = () => {
chat[chat.length - 1].extra = {};
}
const tokenCount = getTokenCount(chat[chat.length - 1].mes, 0);
const tokenCount = await getTokenCountAsync(chat[chat.length - 1].mes, 0);
chat[chat.length - 1]['extra']['token_count'] = tokenCount;
swipeMessage.find('.tokenCounterDisplay').text(`${tokenCount}t`);
}
@ -8565,7 +8565,7 @@ function addDebugFunctions() {
message.extra = {};
}
message.extra.token_count = getTokenCount(message.mes, 0);
message.extra.token_count = await getTokenCountAsync(message.mes, 0);
}
await saveChatConditional();

View File

@ -11,7 +11,7 @@ import { selected_group } from './group-chats.js';
import { extension_settings, getContext, saveMetadataDebounced } from './extensions.js';
import { registerSlashCommand } from './slash-commands.js';
import { getCharaFilename, debounce, delay } from './utils.js';
import { getTokenCount } from './tokenizers.js';
import { getTokenCountAsync } from './tokenizers.js';
export { MODULE_NAME as NOTE_MODULE_NAME };
const MODULE_NAME = '2_floating_prompt'; // <= Deliberate, for sorting lower than memory
@ -84,9 +84,9 @@ function updateSettings() {
setFloatingPrompt();
}
const setMainPromptTokenCounterDebounced = debounce((value) => $('#extension_floating_prompt_token_counter').text(getTokenCount(value)), 1000);
const setCharaPromptTokenCounterDebounced = debounce((value) => $('#extension_floating_chara_token_counter').text(getTokenCount(value)), 1000);
const setDefaultPromptTokenCounterDebounced = debounce((value) => $('#extension_floating_default_token_counter').text(getTokenCount(value)), 1000);
const setMainPromptTokenCounterDebounced = debounce(async (value) => $('#extension_floating_prompt_token_counter').text(await getTokenCountAsync(value)), 1000);
const setCharaPromptTokenCounterDebounced = debounce(async (value) => $('#extension_floating_chara_token_counter').text(await getTokenCountAsync(value)), 1000);
const setDefaultPromptTokenCounterDebounced = debounce(async (value) => $('#extension_floating_default_token_counter').text(await getTokenCountAsync(value)), 1000);
async function onExtensionFloatingPromptInput() {
chat_metadata[metadata_keys.prompt] = $(this).val();
@ -394,7 +394,7 @@ function onANMenuItemClick() {
}
}
function onChatChanged() {
async function onChatChanged() {
loadSettings();
setFloatingPrompt();
const context = getContext();
@ -402,7 +402,7 @@ function onChatChanged() {
// Disable the chara note if in a group
$('#extension_floating_chara').prop('disabled', context.groupId ? true : false);
const tokenCounter1 = chat_metadata[metadata_keys.prompt] ? getTokenCount(chat_metadata[metadata_keys.prompt]) : 0;
const tokenCounter1 = chat_metadata[metadata_keys.prompt] ? await getTokenCountAsync(chat_metadata[metadata_keys.prompt]) : 0;
$('#extension_floating_prompt_token_counter').text(tokenCounter1);
let tokenCounter2;
@ -410,15 +410,13 @@ function onChatChanged() {
const charaNote = extension_settings.note.chara.find((e) => e.name === getCharaFilename());
if (charaNote) {
tokenCounter2 = getTokenCount(charaNote.prompt);
tokenCounter2 = await getTokenCountAsync(charaNote.prompt);
}
}
if (tokenCounter2) {
$('#extension_floating_chara_token_counter').text(tokenCounter2);
}
$('#extension_floating_chara_token_counter').text(tokenCounter2 || 0);
const tokenCounter3 = extension_settings.note.default ? getTokenCount(extension_settings.note.default) : 0;
const tokenCounter3 = extension_settings.note.default ? await getTokenCountAsync(extension_settings.note.default) : 0;
$('#extension_floating_default_token_counter').text(tokenCounter3);
}

View File

@ -19,7 +19,7 @@ import { is_group_generating, selected_group } from '../../group-chats.js';
import { registerSlashCommand } from '../../slash-commands.js';
import { loadMovingUIState } from '../../power-user.js';
import { dragElement } from '../../RossAscends-mods.js';
import { getTextTokens, getTokenCount, tokenizers } from '../../tokenizers.js';
import { getTextTokens, getTokenCountAsync, tokenizers } from '../../tokenizers.js';
export { MODULE_NAME };
const MODULE_NAME = '1_memory';
@ -129,7 +129,7 @@ async function onPromptForceWordsAutoClick() {
const allMessages = chat.filter(m => !m.is_system && m.mes).map(m => m.mes);
const messagesWordCount = allMessages.map(m => extractAllWords(m)).flat().length;
const averageMessageWordCount = messagesWordCount / allMessages.length;
const tokensPerWord = getTokenCount(allMessages.join('\n')) / messagesWordCount;
const tokensPerWord = await getTokenCountAsync(allMessages.join('\n')) / messagesWordCount;
const wordsPerToken = 1 / tokensPerWord;
const maxPromptLengthWords = Math.round(maxPromptLength * wordsPerToken);
// How many words should pass so that messages will start be dropped out of context;
@ -166,11 +166,11 @@ async function onPromptIntervalAutoClick() {
const chat = context.chat;
const allMessages = chat.filter(m => !m.is_system && m.mes).map(m => m.mes);
const messagesWordCount = allMessages.map(m => extractAllWords(m)).flat().length;
const messagesTokenCount = getTokenCount(allMessages.join('\n'));
const messagesTokenCount = await getTokenCountAsync(allMessages.join('\n'));
const tokensPerWord = messagesTokenCount / messagesWordCount;
const averageMessageTokenCount = messagesTokenCount / allMessages.length;
const targetSummaryTokens = Math.round(extension_settings.memory.promptWords * tokensPerWord);
const promptTokens = getTokenCount(extension_settings.memory.prompt);
const promptTokens = await getTokenCountAsync(extension_settings.memory.prompt);
const promptAllowance = maxPromptLength - promptTokens - targetSummaryTokens;
const maxMessagesPerSummary = extension_settings.memory.maxMessagesPerRequest || 0;
const averageMessagesPerPrompt = Math.floor(promptAllowance / averageMessageTokenCount);
@ -603,8 +603,7 @@ async function getRawSummaryPrompt(context, prompt) {
const entry = `${message.name}:\n${message.mes}`;
chatBuffer.push(entry);
const tokens = getTokenCount(getMemoryString(true), PADDING);
await delay(1);
const tokens = await getTokenCountAsync(getMemoryString(true), PADDING);
if (tokens > PROMPT_SIZE) {
chatBuffer.pop();

View File

@ -1,7 +1,7 @@
import { callPopup, main_api } from '../../../script.js';
import { getContext } from '../../extensions.js';
import { registerSlashCommand } from '../../slash-commands.js';
import { getFriendlyTokenizerName, getTextTokens, getTokenCount, tokenizers } from '../../tokenizers.js';
import { getFriendlyTokenizerName, getTextTokens, getTokenCountAsync, tokenizers } from '../../tokenizers.js';
import { resetScrollHeight, debounce } from '../../utils.js';
function rgb2hex(rgb) {
@ -38,7 +38,7 @@ async function doTokenCounter() {
</div>`;
const dialog = $(html);
const countDebounced = debounce(() => {
const countDebounced = debounce(async () => {
const text = String($('#token_counter_textarea').val());
const ids = main_api == 'openai' ? getTextTokens(tokenizers.OPENAI, text) : getTextTokens(tokenizerId, text);
@ -50,8 +50,7 @@ async function doTokenCounter() {
drawChunks(Object.getOwnPropertyDescriptor(ids, 'chunks').value, ids);
}
} else {
const context = getContext();
const count = context.getTokenCount(text);
const count = await getTokenCountAsync(text);
$('#token_counter_ids').text('—');
$('#token_counter_result').text(count);
$('#tokenized_chunks_display').text('—');
@ -109,7 +108,7 @@ function drawChunks(chunks, ids) {
}
}
function doCount() {
async function doCount() {
// get all of the messages in the chat
const context = getContext();
const messages = context.chat.filter(x => x.mes && !x.is_system).map(x => x.mes);
@ -120,7 +119,8 @@ function doCount() {
console.debug('All messages:', allMessages);
//toastr success with the token count of the chat
toastr.success(`Token count: ${getTokenCount(allMessages)}`);
const count = await getTokenCountAsync(allMessages);
toastr.success(`Token count: ${count}`);
}
jQuery(() => {

View File

@ -17,7 +17,7 @@ import {
user_avatar,
} from '../script.js';
import { persona_description_positions, power_user } from './power-user.js';
import { getTokenCount } from './tokenizers.js';
import { getTokenCountAsync } from './tokenizers.js';
import { debounce, delay, download, parseJsonFile } from './utils.js';
const GRID_STORAGE_KEY = 'Personas_GridView';
@ -171,9 +171,9 @@ export async function convertCharacterToPersona(characterId = null) {
/**
* Counts the number of tokens in a persona description.
*/
const countPersonaDescriptionTokens = debounce(() => {
const countPersonaDescriptionTokens = debounce(async () => {
const description = String($('#persona_description').val());
const count = getTokenCount(description);
const count = await getTokenCountAsync(description);
$('#persona_description_token_count').text(String(count));
}, 1000);

View File

@ -46,7 +46,7 @@ import { chat_completion_sources, oai_settings } from './openai.js';
import { autoSelectPersona } from './personas.js';
import { addEphemeralStoppingString, chat_styles, flushEphemeralStoppingStrings, power_user } from './power-user.js';
import { textgen_types, textgenerationwebui_settings } from './textgen-settings.js';
import { decodeTextTokens, getFriendlyTokenizerName, getTextTokens, getTokenCount } from './tokenizers.js';
import { decodeTextTokens, getFriendlyTokenizerName, getTextTokens, getTokenCountAsync } from './tokenizers.js';
import { delay, isFalseBoolean, isTrueBoolean, stringToRange, trimToEndSentence, trimToStartSentence, waitUntilCondition } from './utils.js';
import { registerVariableCommands, resolveVariable } from './variables.js';
import { background_settings } from './backgrounds.js';
@ -249,7 +249,7 @@ parser.addCommand('trimend', trimEndCallback, [], '<span class="monospace">(text
parser.addCommand('inject', injectCallback, [], '<span class="monospace">id=injectId (position=before/after/chat depth=number scan=true/false role=system/user/assistant [text])</span> injects a text into the LLM prompt for the current chat. Requires a unique injection ID. Positions: "before" main prompt, "after" main prompt, in-"chat" (default: after). Depth: injection depth for the prompt (default: 4). Role: role for in-chat injections (default: system). Scan: include injection content into World Info scans (default: false).', true, true);
parser.addCommand('listinjects', listInjectsCallback, [], ' lists all script injections for the current chat.', true, true);
parser.addCommand('flushinjects', flushInjectsCallback, [], ' removes all script injections for the current chat.', true, true);
parser.addCommand('tokens', (_, text) => getTokenCount(text), [], '<span class="monospace">(text)</span> counts the number of tokens in the text.', true, true);
parser.addCommand('tokens', (_, text) => getTokenCountAsync(text), [], '<span class="monospace">(text)</span> counts the number of tokens in the text.', true, true);
parser.addCommand('model', modelCallback, [], '<span class="monospace">(model name)</span> sets the model for the current API. Gets the current model name if no argument is provided.', true, true);
registerVariableCommands();
@ -388,7 +388,7 @@ function trimEndCallback(_, value) {
return trimToEndSentence(value);
}
function trimTokensCallback(arg, value) {
async function trimTokensCallback(arg, value) {
if (!value) {
console.warn('WARN: No argument provided for /trimtokens command');
return '';
@ -406,7 +406,7 @@ function trimTokensCallback(arg, value) {
}
const direction = arg.direction || 'end';
const tokenCount = getTokenCount(value);
const tokenCount = await getTokenCountAsync(value);
// Token count is less than the limit, do nothing
if (tokenCount <= limit) {

View File

@ -5,7 +5,7 @@ import { NOTE_MODULE_NAME, metadata_keys, shouldWIAddPrompt } from './authors-no
import { registerSlashCommand } from './slash-commands.js';
import { isMobile } from './RossAscends-mods.js';
import { FILTER_TYPES, FilterHelper } from './filters.js';
import { getTokenCount } from './tokenizers.js';
import { getTokenCountAsync } from './tokenizers.js';
import { power_user } from './power-user.js';
import { getTagKeyForEntity } from './tags.js';
import { resolveVariable } from './variables.js';
@ -1189,8 +1189,8 @@ function getWorldEntry(name, data, entry) {
// content
const counter = template.find('.world_entry_form_token_counter');
const countTokensDebounced = debounce(function (counter, value) {
const numberOfTokens = getTokenCount(value);
const countTokensDebounced = debounce(async function (counter, value) {
const numberOfTokens = await getTokenCountAsync(value);
$(counter).text(numberOfTokens);
}, 1000);
@ -2177,7 +2177,7 @@ async function checkWorldInfo(chat, maxContext) {
const newEntries = [...activatedNow]
.sort((a, b) => sortedEntries.indexOf(a) - sortedEntries.indexOf(b));
let newContent = '';
const textToScanTokens = getTokenCount(allActivatedText);
const textToScanTokens = await getTokenCountAsync(allActivatedText);
const probabilityChecksBefore = failedProbabilityChecks.size;
filterByInclusionGroups(newEntries, allActivatedEntries);
@ -2194,7 +2194,7 @@ async function checkWorldInfo(chat, maxContext) {
newContent += `${substituteParams(entry.content)}\n`;
if (textToScanTokens + getTokenCount(newContent) >= budget) {
if ((textToScanTokens + (await getTokenCountAsync(newContent))) >= budget) {
console.debug('WI budget reached, stopping');
if (world_info_overflow_alert) {
console.log('Alerting');