diff --git a/public/scripts/tokenizers.js b/public/scripts/tokenizers.js index 8430c36ab..61d4d0041 100644 --- a/public/scripts/tokenizers.js +++ b/public/scripts/tokenizers.js @@ -4,7 +4,7 @@ import { chat_completion_sources, model_list, oai_settings } from "./openai.js"; import { groups, selected_group } from "./group-chats.js"; import { getStringHash } from "./utils.js"; import { kai_flags } from "./kai-settings.js"; -import { isMancer, textgenerationwebui_settings } from "./textgen-settings.js"; +import { isMancer, isTabby, textgenerationwebui_settings } from "./textgen-settings.js"; export const CHARACTERS_PER_TOKEN_RATIO = 3.35; const TOKENIZER_WARNING_KEY = 'tokenizationWarningShown'; @@ -369,6 +369,7 @@ function getRemoteTokenizationParams(str) { api: main_api, url: getAPIServerUrl(), legacy_api: main_api === 'textgenerationwebui' && textgenerationwebui_settings.legacy_api && !isMancer(), + use_tabby: isTabby() }; } diff --git a/server.js b/server.js index 08ce7e641..e05a63b95 100644 --- a/server.js +++ b/server.js @@ -3424,7 +3424,7 @@ app.post("/tokenize_via_api", jsonParser, async function (request, response) { url += '/v1/token-count'; args.body = JSON.stringify({ "prompt": text }); } else { - url += '/v1/internal/encode'; + url += request.body.use_tabby ? '/v1/token/encode' : '/v1/internal/encode'; args.body = JSON.stringify({ "text": text }); }