mirror of
https://github.com/SillyTavern/SillyTavern.git
synced 2025-06-05 21:59:27 +02:00
Merge branch 'dev' of https://github.com/SillyLossy/TavernAI into dev
This commit is contained in:
@ -101,6 +101,7 @@ const default_settings = {
|
||||
openai_model: 'gpt-3.5-turbo',
|
||||
jailbreak_system: false,
|
||||
reverse_proxy: '',
|
||||
oai_breakdown: false,
|
||||
};
|
||||
|
||||
const oai_settings = {
|
||||
@ -125,6 +126,7 @@ const oai_settings = {
|
||||
openai_model: 'gpt-3.5-turbo',
|
||||
jailbreak_system: false,
|
||||
reverse_proxy: '',
|
||||
oai_breakdown: false,
|
||||
};
|
||||
|
||||
let openai_setting_names;
|
||||
@ -305,16 +307,18 @@ async function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldI
|
||||
let openai_msgs_tosend = [];
|
||||
|
||||
// todo: static value, maybe include in the initial context calculation
|
||||
const handler_instance = new TokenHandler(countTokens);
|
||||
|
||||
let new_chat_msg = { "role": "system", "content": "[Start a new chat]" };
|
||||
let start_chat_count = countTokens([new_chat_msg], true);
|
||||
let start_chat_count = handler_instance.count([new_chat_msg], true, 'start_chat');
|
||||
await delay(1);
|
||||
let total_count = countTokens([prompt_msg], true) + start_chat_count;
|
||||
let total_count = handler_instance.count([prompt_msg], true, 'prompt') + start_chat_count;
|
||||
await delay(1);
|
||||
|
||||
if (bias && bias.trim().length) {
|
||||
let bias_msg = { "role": "system", "content": bias.trim() };
|
||||
openai_msgs.push(bias_msg);
|
||||
total_count += countTokens([bias_msg], true);
|
||||
total_count += handler_instance.count([bias_msg], true, 'bias');
|
||||
await delay(1);
|
||||
}
|
||||
|
||||
@ -331,13 +335,14 @@ async function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldI
|
||||
openai_msgs.push(group_nudge);
|
||||
|
||||
// add a group nudge count
|
||||
let group_nudge_count = countTokens([group_nudge], true);
|
||||
let group_nudge_count = handler_instance.count([group_nudge], true, 'nudge');
|
||||
await delay(1);
|
||||
total_count += group_nudge_count;
|
||||
|
||||
// recount tokens for new start message
|
||||
total_count -= start_chat_count
|
||||
start_chat_count = countTokens([new_chat_msg], true);
|
||||
handler_instance.uncount(start_chat_count, 'start_chat');
|
||||
start_chat_count = handler_instance.count([new_chat_msg], true);
|
||||
await delay(1);
|
||||
total_count += start_chat_count;
|
||||
}
|
||||
@ -346,7 +351,7 @@ async function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldI
|
||||
const jailbreakMessage = { "role": "system", "content": substituteParams(oai_settings.jailbreak_prompt) };
|
||||
openai_msgs.push(jailbreakMessage);
|
||||
|
||||
total_count += countTokens([jailbreakMessage], true);
|
||||
total_count += handler_instance.count([jailbreakMessage], true, 'jailbreak');
|
||||
await delay(1);
|
||||
}
|
||||
|
||||
@ -354,7 +359,7 @@ async function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldI
|
||||
const impersonateMessage = { "role": "system", "content": substituteParams(oai_settings.impersonation_prompt) };
|
||||
openai_msgs.push(impersonateMessage);
|
||||
|
||||
total_count += countTokens([impersonateMessage], true);
|
||||
total_count += handler_instance.count([impersonateMessage], true, 'impersonate');
|
||||
await delay(1);
|
||||
}
|
||||
|
||||
@ -377,12 +382,12 @@ async function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldI
|
||||
examples_tosend.push(example);
|
||||
}
|
||||
}
|
||||
total_count += countTokens(examples_tosend, true);
|
||||
total_count += handler_instance.count(examples_tosend, true, 'examples');
|
||||
await delay(1);
|
||||
// go from newest message to oldest, because we want to delete the older ones from the context
|
||||
for (let j = openai_msgs.length - 1; j >= 0; j--) {
|
||||
let item = openai_msgs[j];
|
||||
let item_count = countTokens(item, true);
|
||||
let item_count = handler_instance.count(item, true, 'conversation');
|
||||
await delay(1);
|
||||
// If we have enough space for this message, also account for the max assistant reply size
|
||||
if ((total_count + item_count) < (this_max_context - oai_settings.openai_max_tokens)) {
|
||||
@ -391,13 +396,14 @@ async function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldI
|
||||
}
|
||||
else {
|
||||
// early break since if we still have more messages, they just won't fit anyway
|
||||
handler_instance.uncount(item_count, 'conversation');
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for (let j = openai_msgs.length - 1; j >= 0; j--) {
|
||||
let item = openai_msgs[j];
|
||||
let item_count = countTokens(item, true);
|
||||
let item_count = handler_instance.count(item, true, 'conversation');
|
||||
await delay(1);
|
||||
// If we have enough space for this message, also account for the max assistant reply size
|
||||
if ((total_count + item_count) < (this_max_context - oai_settings.openai_max_tokens)) {
|
||||
@ -406,11 +412,12 @@ async function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldI
|
||||
}
|
||||
else {
|
||||
// early break since if we still have more messages, they just won't fit anyway
|
||||
handler_instance.uncount(item_count, 'conversation');
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
console.log(total_count);
|
||||
//console.log(total_count);
|
||||
|
||||
// each example block contains multiple user/bot messages
|
||||
for (let example_block of openai_msgs_example) {
|
||||
@ -420,7 +427,7 @@ async function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldI
|
||||
example_block = [new_chat_msg, ...example_block];
|
||||
|
||||
// add the block only if there is enough space for all its messages
|
||||
const example_count = countTokens(example_block, true);
|
||||
const example_count = handler_instance.count(example_block, true, 'examples');
|
||||
await delay(1);
|
||||
if ((total_count + example_count) < (this_max_context - oai_settings.openai_max_tokens)) {
|
||||
examples_tosend.push(...example_block)
|
||||
@ -428,6 +435,7 @@ async function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldI
|
||||
}
|
||||
else {
|
||||
// early break since more examples probably won't fit anyway
|
||||
handler_instance.uncount(example_count, 'examples');
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -439,10 +447,14 @@ async function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldI
|
||||
openai_msgs_tosend.reverse();
|
||||
openai_msgs_tosend = [prompt_msg, ...examples_tosend, new_chat_msg, ...openai_msgs_tosend]
|
||||
|
||||
console.log("We're sending this:")
|
||||
console.log(openai_msgs_tosend);
|
||||
console.log(`Calculated the total context to be ${total_count} tokens`);
|
||||
return openai_msgs_tosend;
|
||||
//console.log("We're sending this:")
|
||||
//console.log(openai_msgs_tosend);
|
||||
//console.log(`Calculated the total context to be ${total_count} tokens`);
|
||||
handler_instance.log();
|
||||
return [
|
||||
openai_msgs_tosend,
|
||||
oai_settings.oai_breakdown ? handler_instance.counts : false,
|
||||
];
|
||||
}
|
||||
|
||||
function getSystemPrompt(nsfw_toggle_prompt, enhance_definitions_prompt, wiBefore, storyString, wiAfter, extensionPrompt, isImpersonate) {
|
||||
@ -604,9 +616,42 @@ async function calculateLogitBias() {
|
||||
}
|
||||
}
|
||||
|
||||
class TokenHandler {
|
||||
constructor(countTokenFn) {
|
||||
this.countTokenFn = countTokenFn;
|
||||
this.counts = {
|
||||
'start_chat': 0,
|
||||
'prompt': 0,
|
||||
'bias': 0,
|
||||
'nudge': 0,
|
||||
'jailbreak': 0,
|
||||
'impersonate': 0,
|
||||
'examples': 0,
|
||||
'conversation': 0,
|
||||
};
|
||||
}
|
||||
|
||||
uncount(value, type) {
|
||||
this.counts[type] -= value;
|
||||
}
|
||||
|
||||
count(messages, full, type) {
|
||||
//console.log(messages);
|
||||
const token_count = this.countTokenFn(messages, full);
|
||||
this.counts[type] += token_count;
|
||||
|
||||
return token_count;
|
||||
}
|
||||
|
||||
log() {
|
||||
const total = Object.values(this.counts).reduce((a, b) => a + b);
|
||||
console.table({ ...this.counts, 'total': total });
|
||||
}
|
||||
}
|
||||
|
||||
function countTokens(messages, full = false) {
|
||||
let chatId = 'undefined';
|
||||
|
||||
|
||||
try {
|
||||
if (selected_group) {
|
||||
chatId = groups.find(x => x.id == selected_group)?.chat_id;
|
||||
@ -693,6 +738,7 @@ function loadOpenAISettings(data, settings) {
|
||||
if (settings.nsfw_first !== undefined) oai_settings.nsfw_first = !!settings.nsfw_first;
|
||||
if (settings.openai_model !== undefined) oai_settings.openai_model = settings.openai_model;
|
||||
if (settings.jailbreak_system !== undefined) oai_settings.jailbreak_system = !!settings.jailbreak_system;
|
||||
if (settings.oai_breakdown !== undefined) oai_settings.oai_breakdown = !!settings.oai_breakdown;
|
||||
|
||||
$('#stream_toggle').prop('checked', oai_settings.stream_openai);
|
||||
|
||||
@ -708,6 +754,7 @@ function loadOpenAISettings(data, settings) {
|
||||
$('#wrap_in_quotes').prop('checked', oai_settings.wrap_in_quotes);
|
||||
$('#nsfw_first').prop('checked', oai_settings.nsfw_first);
|
||||
$('#jailbreak_system').prop('checked', oai_settings.jailbreak_system);
|
||||
$('#oai_breakdown').prop('checked', oai_settings.oai_breakdown);
|
||||
|
||||
if (settings.main_prompt !== undefined) oai_settings.main_prompt = settings.main_prompt;
|
||||
if (settings.nsfw_prompt !== undefined) oai_settings.nsfw_prompt = settings.nsfw_prompt;
|
||||
@ -827,6 +874,7 @@ async function saveOpenAIPreset(name, settings) {
|
||||
jailbreak_system: settings.jailbreak_system,
|
||||
impersonation_prompt: settings.impersonation_prompt,
|
||||
bias_preset_selected: settings.bias_preset_selected,
|
||||
oai_breakdown: settings.oai_breakdown,
|
||||
};
|
||||
|
||||
const savePresetSettings = await fetch(`/savepreset_openai?name=${name}`, {
|
||||
@ -1034,7 +1082,7 @@ async function onDeletePresetClick() {
|
||||
const response = await fetch('/deletepreset_openai', {
|
||||
method: 'POST',
|
||||
headers: getRequestHeaders(),
|
||||
body: JSON.stringify({name: nameToDelete}),
|
||||
body: JSON.stringify({ name: nameToDelete }),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
@ -1085,6 +1133,7 @@ function onSettingsPresetChange() {
|
||||
wrap_in_quotes: ['#wrap_in_quotes', 'wrap_in_quotes', true],
|
||||
nsfw_first: ['#nsfw_first', 'nsfw_first', true],
|
||||
jailbreak_system: ['#jailbreak_system', 'jailbreak_system', true],
|
||||
oai_breakdown: ['#oai_breakdown', 'oai_breakdown', true],
|
||||
main_prompt: ['#main_prompt_textarea', 'main_prompt', false],
|
||||
nsfw_prompt: ['#nsfw_prompt_textarea', 'nsfw_prompt', false],
|
||||
jailbreak_prompt: ['#jailbreak_prompt_textarea', 'jailbreak_prompt', false],
|
||||
@ -1151,7 +1200,7 @@ function onReverseProxyInput() {
|
||||
async function onConnectButtonClick(e) {
|
||||
e.stopPropagation();
|
||||
const api_key_openai = $('#api_key_openai').val().trim();
|
||||
|
||||
|
||||
if (api_key_openai.length) {
|
||||
await writeSecret(SECRET_KEYS.OPENAI, api_key_openai);
|
||||
}
|
||||
@ -1257,6 +1306,16 @@ $(document).ready(function () {
|
||||
saveSettingsDebounced();
|
||||
});
|
||||
|
||||
$("#oai_breakdown").on('change', function () {
|
||||
oai_settings.oai_breakdown = !!$(this).prop("checked");
|
||||
if (!oai_settings.oai_breakdown) {
|
||||
$("#token_breakdown").css('display', 'none');
|
||||
} else {
|
||||
$("#token_breakdown").css('display', 'flex');
|
||||
}
|
||||
saveSettingsDebounced();
|
||||
});
|
||||
|
||||
// auto-select a preset based on character/group name
|
||||
$(document).on("click", ".character_select", function () {
|
||||
const chid = $(this).attr('chid');
|
||||
@ -1310,18 +1369,18 @@ $(document).ready(function () {
|
||||
saveSettingsDebounced();
|
||||
});
|
||||
|
||||
$("#api_button_openai").on('click', onConnectButtonClick);
|
||||
$("#openai_reverse_proxy").on('input', onReverseProxyInput);
|
||||
$("#model_openai_select").on('change', onModelChange);
|
||||
$("#settings_perset_openai").on('change', onSettingsPresetChange);
|
||||
$("#new_oai_preset").on('click', onNewPresetClick);
|
||||
$("#delete_oai_preset").on('click', onDeletePresetClick);
|
||||
$("#openai_api_usage").on('click', showApiKeyUsage);
|
||||
$('#openai_logit_bias_preset').on('change', onLogitBiasPresetChange);
|
||||
$('#openai_logit_bias_new_preset').on('click', createNewLogitBiasPreset);
|
||||
$('#openai_logit_bias_new_entry').on('click', createNewLogitBiasEntry);
|
||||
$('#openai_logit_bias_import_file').on('input', onLogitBiasPresetImportFileChange);
|
||||
$('#openai_logit_bias_import_preset').on('click', onLogitBiasPresetImportClick);
|
||||
$('#openai_logit_bias_export_preset').on('click', onLogitBiasPresetExportClick);
|
||||
$('#openai_logit_bias_delete_preset').on('click', onLogitBiasPresetDeleteClick);
|
||||
$("#api_button_openai").on("click", onConnectButtonClick);
|
||||
$("#openai_reverse_proxy").on("input", onReverseProxyInput);
|
||||
$("#model_openai_select").on("change", onModelChange);
|
||||
$("#settings_perset_openai").on("change", onSettingsPresetChange);
|
||||
$("#new_oai_preset").on("click", onNewPresetClick);
|
||||
$("#delete_oai_preset").on("click", onDeletePresetClick);
|
||||
$("#openai_api_usage").on("click", showApiKeyUsage);
|
||||
$("#openai_logit_bias_preset").on("change", onLogitBiasPresetChange);
|
||||
$("#openai_logit_bias_new_preset").on("click", createNewLogitBiasPreset);
|
||||
$("#openai_logit_bias_new_entry").on("click", createNewLogitBiasEntry);
|
||||
$("#openai_logit_bias_import_file").on("input", onLogitBiasPresetImportFileChange);
|
||||
$("#openai_logit_bias_import_preset").on("click", onLogitBiasPresetImportClick);
|
||||
$("#openai_logit_bias_export_preset").on("click", onLogitBiasPresetExportClick);
|
||||
$("#openai_logit_bias_delete_preset").on("click", onLogitBiasPresetDeleteClick);
|
||||
});
|
||||
|
Reference in New Issue
Block a user