Add extra padding for 0301 turbo model.
This commit is contained in:
parent
5b954a3584
commit
02dca52948
|
@ -288,7 +288,8 @@ function registerEndpoints(app, jsonParser) {
|
|||
if (!req.body) return res.sendStatus(400);
|
||||
|
||||
let num_tokens = 0;
|
||||
const model = getTokenizerModel(String(req.query.model || ''));
|
||||
const queryModel = String(req.query.model || '');
|
||||
const model = getTokenizerModel(queryModel);
|
||||
|
||||
if (model == 'claude') {
|
||||
num_tokens = countClaudeTokens(claude_tokenizer, req.body);
|
||||
|
@ -316,6 +317,12 @@ function registerEndpoints(app, jsonParser) {
|
|||
}
|
||||
num_tokens += tokensPadding;
|
||||
|
||||
// NB: Since 2023-10-14, the GPT-3.5 Turbo 0301 model shoves in 7 extra tokens to every message.
|
||||
// More details: https://community.openai.com/t/gpt-3-5-turbo-0301-showing-different-behavior-suddenly/431326/14
|
||||
if (queryModel.endsWith('-0301')) {
|
||||
num_tokens += 7;
|
||||
}
|
||||
|
||||
// not needed for cached tokenizers
|
||||
//tokenizer.free();
|
||||
|
||||
|
|
Loading…
Reference in New Issue