Actually use getCurrentDreamGenModelTokenizer

This commit is contained in:
Cohee 2024-03-08 08:40:03 +02:00
parent 7639efb0fb
commit 2cdfda9d69
2 changed files with 10 additions and 3 deletions

View File

@ -14,7 +14,7 @@ import { BIAS_CACHE, createNewLogitBiasEntry, displayLogitBias, getLogitBiasList
import { power_user, registerDebugFunction } from './power-user.js';
import EventSourceStream from './sse-stream.js';
import { getCurrentOpenRouterModelTokenizer } from './textgen-models.js';
import { getCurrentDreamGenModelTokenizer, getCurrentOpenRouterModelTokenizer } from './textgen-models.js';
import { SENTENCEPIECE_TOKENIZERS, TEXTGEN_TOKENIZERS, getTextTokens, tokenizers } from './tokenizers.js';
import { getSortableDelay, onlyUnique } from './utils.js';
@ -319,6 +319,10 @@ function getTokenizerForTokenIds() {
return getCurrentOpenRouterModelTokenizer();
}
if (settings.type === DREAMGEN) {
return getCurrentDreamGenModelTokenizer();
}
return tokenizers.LLAMA;
}

View File

@ -5,9 +5,9 @@ import { groups, selected_group } from './group-chats.js';
import { getStringHash } from './utils.js';
import { kai_flags } from './kai-settings.js';
import { textgen_types, textgenerationwebui_settings as textgen_settings, getTextGenServer } from './textgen-settings.js';
import { getCurrentOpenRouterModelTokenizer, openRouterModels } from './textgen-models.js';
import { getCurrentDreamGenModelTokenizer, getCurrentOpenRouterModelTokenizer, openRouterModels } from './textgen-models.js';
const { OOBA, TABBY, KOBOLDCPP, APHRODITE, LLAMACPP, OPENROUTER } = textgen_types;
const { OOBA, TABBY, KOBOLDCPP, APHRODITE, LLAMACPP, OPENROUTER, DREAMGEN } = textgen_types;
export const CHARACTERS_PER_TOKEN_RATIO = 3.35;
const TOKENIZER_WARNING_KEY = 'tokenizationWarningShown';
@ -206,6 +206,9 @@ export function getTokenizerBestMatch(forApi) {
if (forApi === 'textgenerationwebui' && textgen_settings.type === OPENROUTER) {
return getCurrentOpenRouterModelTokenizer();
}
if (forApi === 'textgenerationwebui' && textgen_settings.type === DREAMGEN) {
return getCurrentDreamGenModelTokenizer();
}
}
return tokenizers.LLAMA;