Fix Qwen and Command tokenizers not used for logit bias
This commit is contained in:
parent
0b0bd27321
commit
0207794a2b
|
@ -15,7 +15,7 @@ import { BIAS_CACHE, createNewLogitBiasEntry, displayLogitBias, getLogitBiasList
|
|||
import { power_user, registerDebugFunction } from './power-user.js';
|
||||
import { getEventSourceStream } from './sse-stream.js';
|
||||
import { getCurrentDreamGenModelTokenizer, getCurrentOpenRouterModelTokenizer } from './textgen-models.js';
|
||||
import { SENTENCEPIECE_TOKENIZERS, TEXTGEN_TOKENIZERS, getTextTokens, tokenizers } from './tokenizers.js';
|
||||
import { ENCODE_TOKENIZERS, TEXTGEN_TOKENIZERS, getTextTokens, tokenizers } from './tokenizers.js';
|
||||
import { getSortableDelay, onlyUnique } from './utils.js';
|
||||
|
||||
export {
|
||||
|
@ -353,7 +353,7 @@ function getTokenizerForTokenIds() {
|
|||
return tokenizers.API_CURRENT;
|
||||
}
|
||||
|
||||
if (SENTENCEPIECE_TOKENIZERS.includes(power_user.tokenizer)) {
|
||||
if (ENCODE_TOKENIZERS.includes(power_user.tokenizer)) {
|
||||
return power_user.tokenizer;
|
||||
}
|
||||
|
||||
|
|
|
@ -33,18 +33,22 @@ export const tokenizers = {
|
|||
BEST_MATCH: 99,
|
||||
};
|
||||
|
||||
export const SENTENCEPIECE_TOKENIZERS = [
|
||||
// A list of local tokenizers that support encoding and decoding token ids.
|
||||
export const ENCODE_TOKENIZERS = [
|
||||
tokenizers.LLAMA,
|
||||
tokenizers.MISTRAL,
|
||||
tokenizers.YI,
|
||||
tokenizers.LLAMA3,
|
||||
tokenizers.GEMMA,
|
||||
tokenizers.JAMBA,
|
||||
tokenizers.QWEN2,
|
||||
tokenizers.COMMAND_R,
|
||||
// uncomment when NovelAI releases Kayra and Clio weights, lol
|
||||
//tokenizers.NERD,
|
||||
//tokenizers.NERD2,
|
||||
];
|
||||
|
||||
// A list of Text Completion sources that support remote tokenization.
|
||||
export const TEXTGEN_TOKENIZERS = [OOBA, TABBY, KOBOLDCPP, LLAMACPP, VLLM, APHRODITE];
|
||||
|
||||
const TOKENIZER_URLS = {
|
||||
|
|
Loading…
Reference in New Issue