mirror of
https://github.com/SillyTavern/SillyTavern.git
synced 2025-06-05 21:59:27 +02:00
Add types for SillyTavern.getContext
This commit is contained in:
@@ -5,6 +5,7 @@ import { showLoader } from './loader.js';
|
||||
import { POPUP_RESULT, POPUP_TYPE, Popup, callGenericPopup } from './popup.js';
|
||||
import { renderTemplate, renderTemplateAsync } from './templates.js';
|
||||
import { isSubsetOf, setValueByPath } from './utils.js';
|
||||
import { getContext } from './st-context.js';
|
||||
export {
|
||||
getContext,
|
||||
getApiUrl,
|
||||
@@ -174,7 +175,6 @@ const extension_settings = {
|
||||
let modules = [];
|
||||
let activeExtensions = new Set();
|
||||
|
||||
const getContext = () => window['SillyTavern'].getContext();
|
||||
const getApiUrl = () => extension_settings.apiUrl;
|
||||
let connectedToApi = false;
|
||||
|
||||
|
170
public/scripts/st-context.js
Normal file
170
public/scripts/st-context.js
Normal file
@@ -0,0 +1,170 @@
|
||||
import {
|
||||
activateSendButtons,
|
||||
addOneMessage,
|
||||
callPopup,
|
||||
characters,
|
||||
chat,
|
||||
chat_metadata,
|
||||
create_save,
|
||||
deactivateSendButtons,
|
||||
event_types,
|
||||
eventSource,
|
||||
extension_prompts,
|
||||
Generate,
|
||||
generateQuietPrompt,
|
||||
getCurrentChatId,
|
||||
getRequestHeaders,
|
||||
getThumbnailUrl,
|
||||
main_api,
|
||||
max_context,
|
||||
menu_type,
|
||||
messageFormatting,
|
||||
name1,
|
||||
name2,
|
||||
online_status,
|
||||
openCharacterChat,
|
||||
reloadCurrentChat,
|
||||
renameChat,
|
||||
saveChatConditional,
|
||||
saveMetadata,
|
||||
saveReply,
|
||||
saveSettingsDebounced,
|
||||
selectCharacterById,
|
||||
sendGenerationRequest,
|
||||
sendStreamingRequest,
|
||||
sendSystemMessage,
|
||||
setExtensionPrompt,
|
||||
stopGeneration,
|
||||
streamingProcessor,
|
||||
substituteParams,
|
||||
substituteParamsExtended,
|
||||
this_chid,
|
||||
updateChatMetadata,
|
||||
} from '../script.js';
|
||||
import {
|
||||
extension_settings,
|
||||
ModuleWorkerWrapper,
|
||||
renderExtensionTemplate,
|
||||
renderExtensionTemplateAsync,
|
||||
writeExtensionField,
|
||||
} from './extensions.js';
|
||||
import { groups, openGroupChat, selected_group } from './group-chats.js';
|
||||
import { t, translate } from './i18n.js';
|
||||
import { hideLoader, showLoader } from './loader.js';
|
||||
import { MacrosParser } from './macros.js';
|
||||
import { oai_settings } from './openai.js';
|
||||
import { callGenericPopup, Popup, POPUP_RESULT, POPUP_TYPE } from './popup.js';
|
||||
import { registerDebugFunction } from './power-user.js';
|
||||
import { isMobile, shouldSendOnEnter } from './RossAscends-mods.js';
|
||||
import { ScraperManager } from './scrapers.js';
|
||||
import { executeSlashCommands, executeSlashCommandsWithOptions, registerSlashCommand } from './slash-commands.js';
|
||||
import { SlashCommand } from './slash-commands/SlashCommand.js';
|
||||
import { ARGUMENT_TYPE, SlashCommandArgument, SlashCommandNamedArgument } from './slash-commands/SlashCommandArgument.js';
|
||||
import { SlashCommandParser } from './slash-commands/SlashCommandParser.js';
|
||||
import { tag_map, tags } from './tags.js';
|
||||
import { textgenerationwebui_settings } from './textgen-settings.js';
|
||||
import { getTokenCount, getTokenCountAsync, getTokenizerModel } from './tokenizers.js';
|
||||
import { ToolManager } from './tool-calling.js';
|
||||
import { timestampToMoment } from './utils.js';
|
||||
|
||||
export function getContext() {
|
||||
return {
|
||||
chatCompletionSettings: oai_settings,
|
||||
textCompletionSettings: textgenerationwebui_settings,
|
||||
chat,
|
||||
characters,
|
||||
groups,
|
||||
name1,
|
||||
name2,
|
||||
characterId: this_chid,
|
||||
groupId: selected_group,
|
||||
chatId: selected_group
|
||||
? groups.find(x => x.id == selected_group)?.chat_id
|
||||
: (this_chid && characters[this_chid] && characters[this_chid].chat),
|
||||
getCurrentChatId,
|
||||
getRequestHeaders,
|
||||
reloadCurrentChat,
|
||||
renameChat,
|
||||
saveSettingsDebounced,
|
||||
onlineStatus: online_status,
|
||||
maxContext: Number(max_context),
|
||||
chatMetadata: chat_metadata,
|
||||
streamingProcessor,
|
||||
eventSource,
|
||||
eventTypes: event_types,
|
||||
addOneMessage,
|
||||
generate: Generate,
|
||||
sendStreamingRequest,
|
||||
sendGenerationRequest,
|
||||
stopGeneration,
|
||||
/** @deprecated Use getTokenCountAsync instead */
|
||||
getTokenCount,
|
||||
getTokenCountAsync,
|
||||
extensionPrompts: extension_prompts,
|
||||
setExtensionPrompt: setExtensionPrompt,
|
||||
updateChatMetadata: updateChatMetadata,
|
||||
saveChat: saveChatConditional,
|
||||
openCharacterChat,
|
||||
openGroupChat,
|
||||
saveMetadata,
|
||||
sendSystemMessage,
|
||||
activateSendButtons,
|
||||
deactivateSendButtons,
|
||||
saveReply,
|
||||
substituteParams,
|
||||
substituteParamsExtended,
|
||||
SlashCommandParser,
|
||||
SlashCommand,
|
||||
SlashCommandArgument,
|
||||
SlashCommandNamedArgument,
|
||||
ARGUMENT_TYPE,
|
||||
executeSlashCommandsWithOptions,
|
||||
/** @deprecated Use SlashCommandParser.addCommandObject() instead */
|
||||
registerSlashCommand,
|
||||
/** @deprecated Use executeSlashCommandWithOptions instead */
|
||||
executeSlashCommands,
|
||||
timestampToMoment,
|
||||
/** @deprecated Handlebars for extensions are no longer supported. */
|
||||
registerHelper: () => { },
|
||||
registerMacro: MacrosParser.registerMacro.bind(MacrosParser),
|
||||
unregisterMacro: MacrosParser.unregisterMacro.bind(MacrosParser),
|
||||
registerFunctionTool: ToolManager.registerFunctionTool.bind(ToolManager),
|
||||
unregisterFunctionTool: ToolManager.unregisterFunctionTool.bind(ToolManager),
|
||||
isToolCallingSupported: ToolManager.isToolCallingSupported.bind(ToolManager),
|
||||
canPerformToolCalls: ToolManager.canPerformToolCalls.bind(ToolManager),
|
||||
registerDebugFunction: registerDebugFunction,
|
||||
/** @deprecated Use renderExtensionTemplateAsync instead. */
|
||||
renderExtensionTemplate: renderExtensionTemplate,
|
||||
renderExtensionTemplateAsync: renderExtensionTemplateAsync,
|
||||
registerDataBankScraper: ScraperManager.registerDataBankScraper,
|
||||
/** @deprecated Use callGenericPopup or Popup instead. */
|
||||
callPopup,
|
||||
callGenericPopup,
|
||||
showLoader,
|
||||
hideLoader,
|
||||
mainApi: main_api,
|
||||
extensionSettings: extension_settings,
|
||||
ModuleWorkerWrapper,
|
||||
getTokenizerModel,
|
||||
generateQuietPrompt,
|
||||
writeExtensionField,
|
||||
getThumbnailUrl,
|
||||
selectCharacterById,
|
||||
messageFormatting,
|
||||
shouldSendOnEnter,
|
||||
isMobile,
|
||||
t,
|
||||
translate,
|
||||
tags,
|
||||
tagMap: tag_map,
|
||||
menuType: menu_type,
|
||||
createCharacterData: create_save,
|
||||
/** @deprecated Legacy snake-case naming, compatibility with old extensions */
|
||||
event_types: event_types,
|
||||
Popup: Popup,
|
||||
POPUP_TYPE,
|
||||
POPUP_RESULT,
|
||||
};
|
||||
}
|
||||
|
||||
export default getContext;
|
@@ -18,13 +18,6 @@ import { getCurrentDreamGenModelTokenizer, getCurrentOpenRouterModelTokenizer }
|
||||
import { ENCODE_TOKENIZERS, TEXTGEN_TOKENIZERS, getTextTokens, tokenizers } from './tokenizers.js';
|
||||
import { getSortableDelay, onlyUnique } from './utils.js';
|
||||
|
||||
export {
|
||||
settings as textgenerationwebui_settings,
|
||||
loadTextGenSettings,
|
||||
generateTextGenWithStreaming,
|
||||
formatTextGenURL,
|
||||
};
|
||||
|
||||
export const textgen_types = {
|
||||
OOBA: 'ooba',
|
||||
MANCER: 'mancer',
|
||||
@@ -197,6 +190,10 @@ const settings = {
|
||||
featherless_model: '',
|
||||
};
|
||||
|
||||
export {
|
||||
settings as textgenerationwebui_settings,
|
||||
};
|
||||
|
||||
export let textgenerationwebui_banned_in_macros = [];
|
||||
|
||||
export let textgenerationwebui_presets = [];
|
||||
@@ -327,7 +324,7 @@ async function selectPreset(name) {
|
||||
saveSettingsDebounced();
|
||||
}
|
||||
|
||||
function formatTextGenURL(value) {
|
||||
export function formatTextGenURL(value) {
|
||||
try {
|
||||
const noFormatTypes = [MANCER, TOGETHERAI, INFERMATICAI, DREAMGEN, OPENROUTER];
|
||||
if (noFormatTypes.includes(settings.type)) {
|
||||
@@ -465,7 +462,7 @@ function calculateLogitBias() {
|
||||
return result;
|
||||
}
|
||||
|
||||
function loadTextGenSettings(data, loadedSettings) {
|
||||
export function loadTextGenSettings(data, loadedSettings) {
|
||||
textgenerationwebui_presets = convertPresets(data.textgenerationwebui_presets);
|
||||
textgenerationwebui_preset_names = data.textgenerationwebui_preset_names ?? [];
|
||||
Object.assign(settings, loadedSettings.textgenerationwebui_settings ?? {});
|
||||
@@ -889,7 +886,7 @@ function setSettingByName(setting, value, trigger) {
|
||||
* @returns {Promise<(function(): AsyncGenerator<{swipes: [], text: string, toolCalls: [], logprobs: {token: string, topLogprobs: Candidate[]}|null}, void, *>)|*>}
|
||||
* @throws {Error} - If the response status is not OK, or from within the generator
|
||||
*/
|
||||
async function generateTextGenWithStreaming(generate_data, signal) {
|
||||
export async function generateTextGenWithStreaming(generate_data, signal) {
|
||||
generate_data.stream = true;
|
||||
|
||||
const response = await fetch('/api/backends/text-completions/generate', {
|
||||
|
@@ -8,8 +8,6 @@ import { kai_flags } from './kai-settings.js';
|
||||
import { textgen_types, textgenerationwebui_settings as textgen_settings, getTextGenServer, getTextGenModel } from './textgen-settings.js';
|
||||
import { getCurrentDreamGenModelTokenizer, getCurrentOpenRouterModelTokenizer, openRouterModels } from './textgen-models.js';
|
||||
|
||||
const { OOBA, TABBY, KOBOLDCPP, VLLM, APHRODITE, LLAMACPP, OPENROUTER, DREAMGEN } = textgen_types;
|
||||
|
||||
export const CHARACTERS_PER_TOKEN_RATIO = 3.35;
|
||||
export const TOKENIZER_WARNING_KEY = 'tokenizationWarningShown';
|
||||
export const TOKENIZER_SUPPORTED_KEY = 'tokenizationSupported';
|
||||
@@ -52,8 +50,12 @@ export const ENCODE_TOKENIZERS = [
|
||||
//tokenizers.NERD2,
|
||||
];
|
||||
|
||||
// A list of Text Completion sources that support remote tokenization.
|
||||
export const TEXTGEN_TOKENIZERS = [OOBA, TABBY, KOBOLDCPP, LLAMACPP, VLLM, APHRODITE];
|
||||
/**
|
||||
* A list of Text Completion sources that support remote tokenization.
|
||||
* Populated in initTokenziers due to circular dependencies.
|
||||
* @type {string[]}
|
||||
*/
|
||||
export const TEXTGEN_TOKENIZERS = [];
|
||||
|
||||
const TOKENIZER_URLS = {
|
||||
[tokenizers.GPT2]: {
|
||||
@@ -287,7 +289,7 @@ export function getTokenizerBestMatch(forApi) {
|
||||
const hasTokenizerError = sessionStorage.getItem(TOKENIZER_WARNING_KEY);
|
||||
const hasValidEndpoint = sessionStorage.getItem(TOKENIZER_SUPPORTED_KEY);
|
||||
const isConnected = online_status !== 'no_connection';
|
||||
const isTokenizerSupported = TEXTGEN_TOKENIZERS.includes(textgen_settings.type) && (textgen_settings.type !== OOBA || hasValidEndpoint);
|
||||
const isTokenizerSupported = TEXTGEN_TOKENIZERS.includes(textgen_settings.type) && (textgen_settings.type !== textgen_types.OOBA || hasValidEndpoint);
|
||||
|
||||
if (!hasTokenizerError && isConnected) {
|
||||
if (forApi === 'kobold' && kai_flags.can_use_tokenization) {
|
||||
@@ -297,10 +299,10 @@ export function getTokenizerBestMatch(forApi) {
|
||||
if (forApi === 'textgenerationwebui' && isTokenizerSupported) {
|
||||
return tokenizers.API_TEXTGENERATIONWEBUI;
|
||||
}
|
||||
if (forApi === 'textgenerationwebui' && textgen_settings.type === OPENROUTER) {
|
||||
if (forApi === 'textgenerationwebui' && textgen_settings.type === textgen_types.OPENROUTER) {
|
||||
return getCurrentOpenRouterModelTokenizer();
|
||||
}
|
||||
if (forApi === 'textgenerationwebui' && textgen_settings.type === DREAMGEN) {
|
||||
if (forApi === 'textgenerationwebui' && textgen_settings.type === textgen_types.DREAMGEN) {
|
||||
return getCurrentDreamGenModelTokenizer();
|
||||
}
|
||||
}
|
||||
@@ -576,7 +578,7 @@ export function getTokenizerModel() {
|
||||
|
||||
// And for OpenRouter (if not a site model, then it's impossible to determine the tokenizer)
|
||||
if (main_api == 'openai' && oai_settings.chat_completion_source == chat_completion_sources.OPENROUTER && oai_settings.openrouter_model ||
|
||||
main_api == 'textgenerationwebui' && textgen_settings.type === OPENROUTER && textgen_settings.openrouter_model) {
|
||||
main_api == 'textgenerationwebui' && textgen_settings.type === textgen_types.OPENROUTER && textgen_settings.openrouter_model) {
|
||||
const model = main_api == 'openai'
|
||||
? model_list.find(x => x.id === oai_settings.openrouter_model)
|
||||
: openRouterModels.find(x => x.id === textgen_settings.openrouter_model);
|
||||
@@ -652,7 +654,7 @@ export function getTokenizerModel() {
|
||||
return oai_settings.custom_model;
|
||||
}
|
||||
|
||||
if (oai_settings.chat_completion_source === chat_completion_sources.PERPLEXITY) {
|
||||
if (oai_settings.chat_completion_source === chat_completion_sources.PERPLEXITY) {
|
||||
if (oai_settings.perplexity_model.includes('llama-3') || oai_settings.perplexity_model.includes('llama3')) {
|
||||
return llama3Tokenizer;
|
||||
}
|
||||
@@ -680,7 +682,7 @@ export function getTokenizerModel() {
|
||||
return yiTokenizer;
|
||||
}
|
||||
|
||||
if (oai_settings.chat_completion_source === chat_completion_sources.BLOCKENTROPY) {
|
||||
if (oai_settings.chat_completion_source === chat_completion_sources.BLOCKENTROPY) {
|
||||
if (oai_settings.blockentropy_model.includes('llama3')) {
|
||||
return llama3Tokenizer;
|
||||
}
|
||||
@@ -1121,6 +1123,14 @@ export function decodeTextTokens(tokenizerType, ids) {
|
||||
}
|
||||
|
||||
export async function initTokenizers() {
|
||||
TEXTGEN_TOKENIZERS.push(
|
||||
textgen_types.OOBA,
|
||||
textgen_types.TABBY,
|
||||
textgen_types.KOBOLDCPP,
|
||||
textgen_types.LLAMACPP,
|
||||
textgen_types.VLLM,
|
||||
textgen_types.APHRODITE,
|
||||
);
|
||||
await loadTokenCache();
|
||||
registerDebugFunction('resetTokenCache', 'Reset token cache', 'Purges the calculated token counts. Use this if you want to force a full re-tokenization of all chats or suspect the token counts are wrong.', resetTokenCache);
|
||||
}
|
||||
|
Reference in New Issue
Block a user