#228 Don't use a selected tokenizer for parallel prompt building of OAI prompts

This commit is contained in:
Cohee1207
2023-05-06 15:30:15 +03:00
parent 8baf6d22b5
commit 7f718e09be
2 changed files with 20 additions and 1 deletions

View File

@ -73,6 +73,7 @@ import {
oai_settings,
is_get_status_openai,
openai_messages_count,
getTokenCountOpenAI,
} from "./scripts/openai.js";
import {
@ -368,7 +369,20 @@ $(document).ajaxError(function myErrorHandler(_, xhr) {
});
function getTokenCount(str, padding = 0) {
switch (power_user.tokenizer) {
let tokenizerType = power_user.tokenizer;
if (main_api === 'openai') {
// For main prompt building
if (padding == power_user.token_padding) {
tokenizerType = tokenizers.NONE;
// For extensions and WI
} else {
return getTokenCountOpenAI(str);
}
}
switch (tokenizerType) {
case tokenizers.NONE:
return Math.ceil(str.length / CHARACTERS_PER_TOKEN_RATIO) + padding;
case tokenizers.GPT3:

View File

@ -127,6 +127,11 @@ const oai_settings = {
let openai_setting_names;
let openai_settings;
export function getTokenCountOpenAI(text) {
const message = { role: 'system', content: text };
return countTokens(message, true);
}
function validateReverseProxy() {
if (!oai_settings.reverse_proxy) {
return;