mirror of
https://github.com/SillyTavern/SillyTavern.git
synced 2025-06-05 21:59:27 +02:00
Merge pull request #208 from bf62963/dev
This commit is contained in:
@ -126,6 +126,7 @@
|
||||
<option value="gpt-3.5-turbo">gpt-3.5-turbo</option>
|
||||
<option value="gpt-3.5-turbo-0301">gpt-3.5-turbo-0301</option>
|
||||
<option value="gpt-4">gpt-4</option>
|
||||
<option value="gpt-4-0314">gpt-4-0314</option>
|
||||
<option value="gpt-4-32k">gpt-4-32k</option>
|
||||
</select>
|
||||
</div>
|
||||
|
@ -1039,7 +1039,7 @@ $(document).ready(function () {
|
||||
const value = $(this).val();
|
||||
oai_settings.openai_model = value;
|
||||
|
||||
if (value == 'gpt-4') {
|
||||
if (value == 'gpt-4' || value == 'gpt-4-0314') {
|
||||
$('#openai_max_context').attr('max', gpt4_max);
|
||||
}
|
||||
else if (value == 'gpt-4-32k') {
|
||||
|
@ -2159,7 +2159,7 @@ app.post("/openai_bias", jsonParser, async function (request, response) {
|
||||
|
||||
let result = {};
|
||||
|
||||
const tokenizer = tiktoken.encoding_for_model(request.query.model);
|
||||
const tokenizer = tiktoken.encoding_for_model(request.query.model === 'gpt-4-0314' ? 'gpt-4' : request.query.model);
|
||||
|
||||
for (const entry of request.body) {
|
||||
if (!entry || !entry.text) {
|
||||
@ -2305,7 +2305,7 @@ app.post("/tokenize_openai", jsonParser, function (request, response_tokenize_op
|
||||
const tokensPerMessage = request.query.model.includes('gpt-4') ? 3 : 4;
|
||||
const tokensPadding = 3;
|
||||
|
||||
const tokenizer = tiktoken.encoding_for_model(request.query.model);
|
||||
const tokenizer = tiktoken.encoding_for_model(request.query.model === 'gpt-4-0314' ? 'gpt-4' : request.query.model);
|
||||
|
||||
let num_tokens = 0;
|
||||
for (const msg of request.body) {
|
||||
|
Reference in New Issue
Block a user