diff --git a/public/index.html b/public/index.html index f2efb2b5d..2e0ddf536 100644 --- a/public/index.html +++ b/public/index.html @@ -126,6 +126,7 @@ + diff --git a/public/scripts/openai.js b/public/scripts/openai.js index 7fd17d158..d636ca36a 100644 --- a/public/scripts/openai.js +++ b/public/scripts/openai.js @@ -1039,7 +1039,7 @@ $(document).ready(function () { const value = $(this).val(); oai_settings.openai_model = value; - if (value == 'gpt-4') { + if (value == 'gpt-4' || value == 'gpt-4-0314') { $('#openai_max_context').attr('max', gpt4_max); } else if (value == 'gpt-4-32k') { diff --git a/server.js b/server.js index 9237e5fd3..8a343ba20 100644 --- a/server.js +++ b/server.js @@ -2159,7 +2159,7 @@ app.post("/openai_bias", jsonParser, async function (request, response) { let result = {}; - const tokenizer = tiktoken.encoding_for_model(request.query.model); + const tokenizer = tiktoken.encoding_for_model(request.query.model === 'gpt-4-0314' ? 'gpt-4' : request.query.model); for (const entry of request.body) { if (!entry || !entry.text) { @@ -2305,7 +2305,7 @@ app.post("/tokenize_openai", jsonParser, function (request, response_tokenize_op const tokensPerMessage = request.query.model.includes('gpt-4') ? 3 : 4; const tokensPadding = 3; - const tokenizer = tiktoken.encoding_for_model(request.query.model); + const tokenizer = tiktoken.encoding_for_model(request.query.model === 'gpt-4-0314' ? 'gpt-4' : request.query.model); let num_tokens = 0; for (const msg of request.body) {