Add OpenAI o1

This commit is contained in:
Cohee 2024-09-13 19:44:12 +03:00
parent 854541f4ba
commit 6d79cc015a
6 changed files with 48 additions and 10 deletions

9
package-lock.json generated
View File

@ -46,7 +46,7 @@
"sanitize-filename": "^1.6.3",
"sillytavern-transformers": "2.14.6",
"simple-git": "^3.19.1",
"tiktoken": "^1.0.15",
"tiktoken": "^1.0.16",
"vectra": "^0.2.2",
"wavefile": "^11.0.0",
"write-file-atomic": "^5.0.1",
@ -5751,9 +5751,10 @@
"license": "MIT"
},
"node_modules/tiktoken": {
"version": "1.0.15",
"resolved": "https://registry.npmjs.org/tiktoken/-/tiktoken-1.0.15.tgz",
"integrity": "sha512-sCsrq/vMWUSEW29CJLNmPvWxlVp7yh2tlkAjpJltIKqp5CKf98ZNpdeHRmAlPVFlGEbswDc6SmI8vz64W/qErw=="
"version": "1.0.16",
"resolved": "https://registry.npmjs.org/tiktoken/-/tiktoken-1.0.16.tgz",
"integrity": "sha512-hRcORIGF2YlAgWx3nzrGJOrKSJwLoc81HpXmMQk89632XAgURc7IeV2FgQ2iXo9z/J96fCvpsHg2kWoHcbj9fg==",
"license": "MIT"
},
"node_modules/timm": {
"version": "1.7.1",

View File

@ -36,7 +36,7 @@
"sanitize-filename": "^1.6.3",
"sillytavern-transformers": "2.14.6",
"simple-git": "^3.19.1",
"tiktoken": "^1.0.15",
"tiktoken": "^1.0.16",
"vectra": "^0.2.2",
"wavefile": "^11.0.0",
"write-file-atomic": "^5.0.1",

View File

@ -383,7 +383,7 @@
Max Response Length (tokens)
</div>
<div class="wide100p">
<input type="number" id="openai_max_tokens" name="openai_max_tokens" class="text_pole" min="1" max="16384">
<input type="number" id="openai_max_tokens" name="openai_max_tokens" class="text_pole" min="1" max="65536 ">
</div>
</div>
<div class="range-block" data-source="openai,custom">
@ -2611,6 +2611,10 @@
<option value="gpt-4-0125-preview">gpt-4-0125-preview (2024)</option>
<option value="gpt-4-1106-preview">gpt-4-1106-preview (2023)</option>
</optgroup>
<optgroup label="o1">
<option value="o1-preview">o1-preview</option>
<option value="o1-mini">o1-mini</option>
</optgroup>
<optgroup label="Other">
<option value="text-davinci-003">text-davinci-003</option>
<option value="text-davinci-002">text-davinci-002</option>

View File

@ -2862,7 +2862,12 @@ export function getCharacterCardFields() {
export function isStreamingEnabled() {
const noStreamSources = [chat_completion_sources.SCALE];
return ((main_api == 'openai' && oai_settings.stream_openai && !noStreamSources.includes(oai_settings.chat_completion_source) && !(oai_settings.chat_completion_source == chat_completion_sources.MAKERSUITE && oai_settings.google_model.includes('bison')))
return (
(main_api == 'openai' &&
oai_settings.stream_openai &&
!noStreamSources.includes(oai_settings.chat_completion_source) &&
!(oai_settings.chat_completion_source == chat_completion_sources.OPENAI && oai_settings.openai_model.startsWith('o1-')) &&
!(oai_settings.chat_completion_source == chat_completion_sources.MAKERSUITE && oai_settings.google_model.includes('bison')))
|| (main_api == 'kobold' && kai_settings.streaming_kobold && kai_flags.can_use_streaming)
|| (main_api == 'novel' && nai_settings.streaming_novel)
|| (main_api == 'textgenerationwebui' && textgen_settings.streaming));

View File

@ -1797,7 +1797,7 @@ async function sendOpenAIRequest(type, messages, signal) {
const isQuiet = type === 'quiet';
const isImpersonate = type === 'impersonate';
const isContinue = type === 'continue';
const stream = oai_settings.stream_openai && !isQuiet && !isScale && !(isGoogle && oai_settings.google_model.includes('bison'));
const stream = oai_settings.stream_openai && !isQuiet && !isScale && !(isGoogle && oai_settings.google_model.includes('bison')) && !(isOAI && oai_settings.openai_model.startsWith('o1-'));
const useLogprobs = !!power_user.request_token_probabilities;
const canMultiSwipe = oai_settings.n > 1 && !isContinue && !isImpersonate && !isQuiet && (isOAI || isCustom);
@ -1960,12 +1960,33 @@ async function sendOpenAIRequest(type, messages, signal) {
generate_data['seed'] = oai_settings.seed;
}
await eventSource.emit(event_types.CHAT_COMPLETION_SETTINGS_READY, generate_data);
if (isFunctionCallingSupported() && !stream) {
await registerFunctionTools(type, generate_data);
}
if (isOAI && oai_settings.openai_model.startsWith('o1-')) {
generate_data.messages.forEach((msg) => {
if (msg.role === 'system') {
msg.role = 'user';
}
});
delete generate_data.stream;
delete generate_data.logprobs;
delete generate_data.top_logprobs;
delete generate_data.n;
delete generate_data.temperature;
delete generate_data.top_p;
delete generate_data.frequency_penalty;
delete generate_data.presence_penalty;
delete generate_data.tools;
delete generate_data.tool_choice;
// IDK if it supports it and I have no way to test it
// delete generate_data.logit_bias;
// delete generate_data.stop;
}
await eventSource.emit(event_types.CHAT_COMPLETION_SETTINGS_READY, generate_data);
const generate_url = '/api/backends/chat-completions/generate';
const response = await fetch(generate_url, {
method: 'POST',
@ -3905,6 +3926,9 @@ function getMaxContextOpenAI(value) {
if (oai_settings.max_context_unlocked) {
return unlocked_max;
}
else if (value.startsWith('o1-')) {
return max_128k;
}
else if (value.includes('chatgpt-4o-latest') || value.includes('gpt-4-turbo') || value.includes('gpt-4o') || value.includes('gpt-4-1106') || value.includes('gpt-4-0125') || value.includes('gpt-4-vision')) {
return max_128k;
}

View File

@ -350,6 +350,10 @@ function getWebTokenizersChunks(tokenizer, ids) {
* @returns {string} Tokenizer model to use
*/
function getTokenizerModel(requestModel) {
if (requestModel.includes('o1-preview') || requestModel.includes('o1-mini')) {
return 'gpt-4o';
}
if (requestModel.includes('gpt-4o')) {
return 'gpt-4o';
}