mirror of
https://github.com/SillyTavern/SillyTavern.git
synced 2025-06-05 21:59:27 +02:00
OAI token itemization WIP (integrate PR299)
This commit is contained in:
@ -2431,7 +2431,7 @@
|
|||||||
<textarea id="send_textarea" placeholder="Not connected to API!" name="text"></textarea>
|
<textarea id="send_textarea" placeholder="Not connected to API!" name="text"></textarea>
|
||||||
<div id="send_but_sheld">
|
<div id="send_but_sheld">
|
||||||
<div id="loading_mes">
|
<div id="loading_mes">
|
||||||
<div alt="" class="fa-solid fa-hourglass-half"></div>
|
<div title="Loading" class="fa-solid fa-hourglass-half"></div>
|
||||||
</div>
|
</div>
|
||||||
<div id="send_but" class="fa-solid fa-feather-pointed" title="Send a message"></div>
|
<div id="send_but" class="fa-solid fa-feather-pointed" title="Send a message"></div>
|
||||||
</div>
|
</div>
|
||||||
|
350
public/script.js
350
public/script.js
@ -125,6 +125,7 @@ import {
|
|||||||
secret_state,
|
secret_state,
|
||||||
writeSecret
|
writeSecret
|
||||||
} from "./scripts/secrets.js";
|
} from "./scripts/secrets.js";
|
||||||
|
import uniqolor from "./scripts/uniqolor.js";
|
||||||
|
|
||||||
//exporting functions and vars for mods
|
//exporting functions and vars for mods
|
||||||
export {
|
export {
|
||||||
@ -204,6 +205,7 @@ let converter;
|
|||||||
reloadMarkdownProcessor();
|
reloadMarkdownProcessor();
|
||||||
|
|
||||||
// array for prompt token calculations
|
// array for prompt token calculations
|
||||||
|
console.log('initializing Prompt Itemization Array on Startup');
|
||||||
let itemizedPrompts = [];
|
let itemizedPrompts = [];
|
||||||
|
|
||||||
/* let bg_menu_toggle = false; */
|
/* let bg_menu_toggle = false; */
|
||||||
@ -1129,28 +1131,34 @@ function addOneMessage(mes, { type = "normal", insertAfter = null, scroll = true
|
|||||||
|
|
||||||
if (isSystem) {
|
if (isSystem) {
|
||||||
newMessage.find(".mes_edit").hide();
|
newMessage.find(".mes_edit").hide();
|
||||||
newMessage.find(".mes_prompt").hide(); //dont'd need prompt display for sys messages
|
newMessage.find(".mes_prompt").hide(); //don't need prompt button for sys
|
||||||
}
|
}
|
||||||
|
|
||||||
// don't need prompt butons for user messages
|
// don't need prompt button for user
|
||||||
if (params.isUser === true) {
|
if (params.isUser === true) {
|
||||||
newMessage.find(".mes_prompt").hide();
|
newMessage.find(".mes_prompt").hide();
|
||||||
|
console.log(`hiding prompt for user mesID ${params.mesId}`);
|
||||||
}
|
}
|
||||||
|
|
||||||
//shows or hides the Prompt display button
|
//shows or hides the Prompt display button
|
||||||
let mesIdToFind = Number(newMessage.attr('mesId'));
|
let mesIdToFind = Number(newMessage.attr('mesId'));
|
||||||
if (itemizedPrompts.length !== 0) {
|
if (itemizedPrompts.length !== 0) {
|
||||||
|
console.log(`itemizedPrompt.length = ${itemizedPrompts.length}`)
|
||||||
for (var i = 0; i < itemizedPrompts.length; i++) {
|
for (var i = 0; i < itemizedPrompts.length; i++) {
|
||||||
if (itemizedPrompts[i].mesId === mesIdToFind) {
|
if (itemizedPrompts[i].mesId === mesIdToFind) {
|
||||||
newMessage.find(".mes_prompt").show();
|
newMessage.find(".mes_prompt").show();
|
||||||
|
console.log(`showing prompt for mesID ${params.mesId} from ${params.characterName}`);
|
||||||
} else {
|
} else {
|
||||||
console.log('no cache found for mesID, hiding prompt button and continuing search');
|
console.log(`no cache obj for mesID ${mesIdToFind}, hiding prompt button and continuing search`);
|
||||||
newMessage.find(".mes_prompt").hide();
|
newMessage.find(".mes_prompt").hide();
|
||||||
|
console.log(itemizedPrompts);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else { //hide all when prompt cache is empty
|
} else if (params.isUser !== true) { //hide all when prompt cache is empty
|
||||||
|
console.log('saw empty prompt cache, hiding all prompt buttons');
|
||||||
$(".mes_prompt").hide();
|
$(".mes_prompt").hide();
|
||||||
}
|
console.log(itemizedPrompts);
|
||||||
|
} else { console.log('skipping prompt data for User Message'); }
|
||||||
|
|
||||||
newMessage.find('.avatar img').on('error', function () {
|
newMessage.find('.avatar img').on('error', function () {
|
||||||
$(this).hide();
|
$(this).hide();
|
||||||
@ -1594,6 +1602,7 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
|
|||||||
const isImpersonate = type == "impersonate";
|
const isImpersonate = type == "impersonate";
|
||||||
const isInstruct = power_user.instruct.enabled;
|
const isInstruct = power_user.instruct.enabled;
|
||||||
|
|
||||||
|
message_already_generated = isImpersonate ? `${name1}: ` : `${name2}: `;
|
||||||
// Name for the multigen prefix
|
// Name for the multigen prefix
|
||||||
const magName = isImpersonate ? (is_pygmalion ? 'You' : name1) : name2;
|
const magName = isImpersonate ? (is_pygmalion ? 'You' : name1) : name2;
|
||||||
|
|
||||||
@ -2123,32 +2132,7 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
|
|||||||
generatedPromtCache +
|
generatedPromtCache +
|
||||||
promptBias;
|
promptBias;
|
||||||
|
|
||||||
//set array object for prompt token itemization of this message
|
|
||||||
let thisPromptBits = {
|
|
||||||
mesId: count_view_mes,
|
|
||||||
worldInfoBefore: worldInfoBefore,
|
|
||||||
allAnchors: allAnchors,
|
|
||||||
summarizeString: (extension_prompts['1_memory']?.value || ''),
|
|
||||||
authorsNoteString: (extension_prompts['2_floating_prompt']?.value || ''),
|
|
||||||
worldInfoString: worldInfoString,
|
|
||||||
storyString: storyString,
|
|
||||||
worldInfoAfter: worldInfoAfter,
|
|
||||||
afterScenarioAnchor: afterScenarioAnchor,
|
|
||||||
examplesString: examplesString,
|
|
||||||
mesSendString: mesSendString,
|
|
||||||
generatedPromtCache: generatedPromtCache,
|
|
||||||
promptBias: promptBias,
|
|
||||||
finalPromt: finalPromt,
|
|
||||||
charDescription: charDescription,
|
|
||||||
charPersonality: charPersonality,
|
|
||||||
scenarioText: scenarioText,
|
|
||||||
promptBias: promptBias,
|
|
||||||
storyString: storyString,
|
|
||||||
this_max_context: this_max_context,
|
|
||||||
padding: power_user.token_padding
|
|
||||||
}
|
|
||||||
|
|
||||||
itemizedPrompts.push(thisPromptBits);
|
|
||||||
|
|
||||||
if (zeroDepthAnchor && zeroDepthAnchor.length) {
|
if (zeroDepthAnchor && zeroDepthAnchor.length) {
|
||||||
if (!isMultigenEnabled() || tokens_already_generated == 0) {
|
if (!isMultigenEnabled() || tokens_already_generated == 0) {
|
||||||
@ -2167,6 +2151,11 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Add quiet generation prompt at depth 0
|
||||||
|
if (quiet_prompt && quiet_prompt.length) {
|
||||||
|
finalPromt += `\n${quiet_prompt}`;
|
||||||
|
}
|
||||||
|
|
||||||
finalPromt = finalPromt.replace(/\r/gm, '');
|
finalPromt = finalPromt.replace(/\r/gm, '');
|
||||||
|
|
||||||
if (power_user.collapse_newlines) {
|
if (power_user.collapse_newlines) {
|
||||||
@ -2202,6 +2191,8 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let thisPromptBits = [];
|
||||||
|
|
||||||
if (main_api == 'kobold' && horde_settings.use_horde && horde_settings.auto_adjust_response_length) {
|
if (main_api == 'kobold' && horde_settings.use_horde && horde_settings.auto_adjust_response_length) {
|
||||||
this_amount_gen = Math.min(this_amount_gen, adjustedParams.maxLength);
|
this_amount_gen = Math.min(this_amount_gen, adjustedParams.maxLength);
|
||||||
this_amount_gen = Math.max(this_amount_gen, MIN_AMOUNT_GEN); // prevent validation errors
|
this_amount_gen = Math.max(this_amount_gen, MIN_AMOUNT_GEN); // prevent validation errors
|
||||||
@ -2237,7 +2228,50 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
|
|||||||
console.log('rungenerate calling API');
|
console.log('rungenerate calling API');
|
||||||
|
|
||||||
if (main_api == 'openai') {
|
if (main_api == 'openai') {
|
||||||
let prompt = await prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldInfoAfter, afterScenarioAnchor, promptBias, type);
|
let [prompt, counts] = await prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldInfoAfter, afterScenarioAnchor, promptBias, type);
|
||||||
|
|
||||||
|
|
||||||
|
// counts will return false if the user has not enabled the token breakdown feature
|
||||||
|
if (counts) {
|
||||||
|
|
||||||
|
//$('#token_breakdown').css('display', 'flex');
|
||||||
|
const breakdown_bar = $('#token_breakdown div:first-child');
|
||||||
|
breakdown_bar.empty();
|
||||||
|
|
||||||
|
const total = Object.values(counts).reduce((acc, val) => acc + val, 0);
|
||||||
|
|
||||||
|
thisPromptBits.push({
|
||||||
|
oaiStartTokens: Object.entries(counts)[0][1],
|
||||||
|
oaiPromptTokens: Object.entries(counts)[1][1],
|
||||||
|
oaiBiasTokens: Object.entries(counts)[2][1],
|
||||||
|
oaiNudgeTokens: Object.entries(counts)[3][1],
|
||||||
|
oaiJailbreakTokens: Object.entries(counts)[4][1],
|
||||||
|
oaiImpersonateTokens: Object.entries(counts)[5][1],
|
||||||
|
oaiExamplesTokens: Object.entries(counts)[6][1],
|
||||||
|
oaiConversationTokens: Object.entries(counts)[7][1],
|
||||||
|
oaiTotalTokens: total,
|
||||||
|
})
|
||||||
|
|
||||||
|
|
||||||
|
console.log(`added OAI prompt bits to array`);
|
||||||
|
|
||||||
|
Object.entries(counts).forEach(([type, value]) => {
|
||||||
|
if (value === 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
const percent_value = (value / total) * 100;
|
||||||
|
const color = uniqolor(type, { saturation: 50, lightness: 75, }).color;
|
||||||
|
const bar = document.createElement('div');
|
||||||
|
bar.style.width = `${percent_value}%`;
|
||||||
|
bar.classList.add('token_breakdown_segment');
|
||||||
|
bar.style.backgroundColor = color + 'AA';
|
||||||
|
bar.style.borderColor = color + 'FF';
|
||||||
|
bar.innerText = value;
|
||||||
|
bar.title = `${type}: ${percent_value.toFixed(2)}%`;
|
||||||
|
breakdown_bar.append(bar);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
setInContextMessages(openai_messages_count, type);
|
setInContextMessages(openai_messages_count, type);
|
||||||
|
|
||||||
if (isStreamingEnabled() && type !== 'quiet') {
|
if (isStreamingEnabled() && type !== 'quiet') {
|
||||||
@ -2277,6 +2311,41 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
|
|||||||
}); //end of "if not data error"
|
}); //end of "if not data error"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//set array object for prompt token itemization of this message
|
||||||
|
let currentArrayEntry = Number(thisPromptBits.length - 1);
|
||||||
|
let additionalPromptStuff = {
|
||||||
|
...thisPromptBits[currentArrayEntry],
|
||||||
|
mesId: Number(count_view_mes),
|
||||||
|
worldInfoBefore: worldInfoBefore,
|
||||||
|
allAnchors: allAnchors,
|
||||||
|
summarizeString: (extension_prompts['1_memory']?.value || ''),
|
||||||
|
authorsNoteString: (extension_prompts['2_floating_prompt']?.value || ''),
|
||||||
|
worldInfoString: worldInfoString,
|
||||||
|
storyString: storyString,
|
||||||
|
worldInfoAfter: worldInfoAfter,
|
||||||
|
afterScenarioAnchor: afterScenarioAnchor,
|
||||||
|
examplesString: examplesString,
|
||||||
|
mesSendString: mesSendString,
|
||||||
|
generatedPromtCache: generatedPromtCache,
|
||||||
|
promptBias: promptBias,
|
||||||
|
finalPromt: finalPromt,
|
||||||
|
charDescription: charDescription,
|
||||||
|
charPersonality: charPersonality,
|
||||||
|
scenarioText: scenarioText,
|
||||||
|
this_max_context: this_max_context,
|
||||||
|
padding: power_user.token_padding,
|
||||||
|
main_api: main_api,
|
||||||
|
};
|
||||||
|
|
||||||
|
thisPromptBits = additionalPromptStuff;
|
||||||
|
|
||||||
|
//console.log(thisPromptBits);
|
||||||
|
|
||||||
|
itemizedPrompts.push(thisPromptBits);
|
||||||
|
//console.log(`pushed prompt bits to itemizedPrompts array. Length is now: ${itemizedPrompts.length}`);
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if (isStreamingEnabled() && type !== 'quiet') {
|
if (isStreamingEnabled() && type !== 'quiet') {
|
||||||
hideSwipeButtons();
|
hideSwipeButtons();
|
||||||
let getMessage = await streamingProcessor.generate();
|
let getMessage = await streamingProcessor.generate();
|
||||||
@ -2285,7 +2354,7 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
|
|||||||
// I wasn't able to get multigen working with real streaming
|
// I wasn't able to get multigen working with real streaming
|
||||||
// consistently without screwing the interim prompting
|
// consistently without screwing the interim prompting
|
||||||
if (isMultigenEnabled()) {
|
if (isMultigenEnabled()) {
|
||||||
tokens_already_generated += this_amount_gen;
|
tokens_already_generated += this_amount_gen; // add new gen amt to any prev gen counter..
|
||||||
message_already_generated += getMessage;
|
message_already_generated += getMessage;
|
||||||
promptBias = '';
|
promptBias = '';
|
||||||
if (!streamingProcessor.isStopped && shouldContinueMultigen(getMessage, isImpersonate)) {
|
if (!streamingProcessor.isStopped && shouldContinueMultigen(getMessage, isImpersonate)) {
|
||||||
@ -2432,8 +2501,9 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
|
|||||||
} //generate ends
|
} //generate ends
|
||||||
|
|
||||||
function promptItemize(itemizedPrompts, requestedMesId) {
|
function promptItemize(itemizedPrompts, requestedMesId) {
|
||||||
let incomingMesId = Number(requestedMesId);
|
var incomingMesId = Number(requestedMesId);
|
||||||
let thisPromptSet = undefined;
|
console.log(`looking for MesId ${incomingMesId}`);
|
||||||
|
var thisPromptSet = undefined;
|
||||||
|
|
||||||
for (var i = 0; i < itemizedPrompts.length; i++) {
|
for (var i = 0; i < itemizedPrompts.length; i++) {
|
||||||
if (itemizedPrompts[i].mesId === incomingMesId) {
|
if (itemizedPrompts[i].mesId === incomingMesId) {
|
||||||
@ -2447,44 +2517,183 @@ function promptItemize(itemizedPrompts, requestedMesId) {
|
|||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
let finalPromptTokens = getTokenCount(itemizedPrompts[thisPromptSet].finalPromt);
|
//these happen regardless of API
|
||||||
let allAnchorsTokens = getTokenCount(itemizedPrompts[thisPromptSet].allAnchors);
|
var charPersonalityTokens = getTokenCount(itemizedPrompts[thisPromptSet].charPersonality);
|
||||||
let summarizeStringTokens = getTokenCount(itemizedPrompts[thisPromptSet].summarizeString);
|
var charDescriptionTokens = getTokenCount(itemizedPrompts[thisPromptSet].charDescription);
|
||||||
let authorsNoteStringTokens = getTokenCount(itemizedPrompts[thisPromptSet].authorsNoteString);
|
var scenarioTextTokens = getTokenCount(itemizedPrompts[thisPromptSet].scenarioText);
|
||||||
let afterScenarioAnchorTokens = getTokenCount(itemizedPrompts[thisPromptSet].afterScenarioAnchor);
|
var allAnchorsTokens = getTokenCount(itemizedPrompts[thisPromptSet].allAnchors);
|
||||||
let zeroDepthAnchorTokens = getTokenCount(itemizedPrompts[thisPromptSet].afterScenarioAnchor);
|
var summarizeStringTokens = getTokenCount(itemizedPrompts[thisPromptSet].summarizeString);
|
||||||
let worldInfoStringTokens = getTokenCount(itemizedPrompts[thisPromptSet].worldInfoString);
|
var authorsNoteStringTokens = getTokenCount(itemizedPrompts[thisPromptSet].authorsNoteString);
|
||||||
let storyStringTokens = getTokenCount(itemizedPrompts[thisPromptSet].storyString);
|
var afterScenarioAnchorTokens = getTokenCount(itemizedPrompts[thisPromptSet].afterScenarioAnchor);
|
||||||
let examplesStringTokens = getTokenCount(itemizedPrompts[thisPromptSet].examplesString);
|
var zeroDepthAnchorTokens = getTokenCount(itemizedPrompts[thisPromptSet].afterScenarioAnchor);
|
||||||
let charPersonalityTokens = getTokenCount(itemizedPrompts[thisPromptSet].charPersonality);
|
var worldInfoStringTokens = getTokenCount(itemizedPrompts[thisPromptSet].worldInfoString);
|
||||||
let charDescriptionTokens = getTokenCount(itemizedPrompts[thisPromptSet].charDescription);
|
var thisPrompt_max_context = itemizedPrompts[thisPromptSet].this_max_context;
|
||||||
let scenarioTextTokens = getTokenCount(itemizedPrompts[thisPromptSet].scenarioText);
|
var thisPrompt_padding = itemizedPrompts[thisPromptSet].padding;
|
||||||
let promptBiasTokens = getTokenCount(itemizedPrompts[thisPromptSet].promptBias);
|
console.log(`"${itemizedPrompts[thisPromptSet].promptBias}"`);
|
||||||
let mesSendStringTokens = getTokenCount(itemizedPrompts[thisPromptSet].mesSendString)
|
var promptBiasTokens = getTokenCount(itemizedPrompts[thisPromptSet].promptBias);
|
||||||
let ActualChatHistoryTokens = mesSendStringTokens - allAnchorsTokens + power_user.token_padding;
|
var this_main_api = itemizedPrompts[thisPromptSet].main_api;
|
||||||
let thisPrompt_max_context = itemizedPrompts[thisPromptSet].this_max_context;
|
|
||||||
let thisPrompt_padding = itemizedPrompts[thisPromptSet].padding;
|
|
||||||
|
|
||||||
let totalTokensInPrompt =
|
if (this_main_api == 'openai') {
|
||||||
storyStringTokens + //chardefs total
|
//for OAI API
|
||||||
worldInfoStringTokens +
|
//console.log('-- Counting OAI Tokens');
|
||||||
ActualChatHistoryTokens + //chat history
|
var finalPromptTokens = itemizedPrompts[thisPromptSet].oaiTotalTokens;
|
||||||
allAnchorsTokens + // AN and/or legacy anchors
|
var oaiStartTokens = itemizedPrompts[thisPromptSet].oaiStartTokens;
|
||||||
//afterScenarioAnchorTokens + //only counts if AN is set to 'after scenario'
|
var oaiPromptTokens = itemizedPrompts[thisPromptSet].oaiPromptTokens;
|
||||||
//zeroDepthAnchorTokens + //same as above, even if AN not on 0 depth
|
var ActualChatHistoryTokens = itemizedPrompts[thisPromptSet].oaiConversationTokens;
|
||||||
promptBiasTokens + //{{}}
|
var examplesStringTokens = itemizedPrompts[thisPromptSet].oaiExamplesTokens;
|
||||||
- thisPrompt_padding; //not sure this way of calculating is correct, but the math results in same value as 'finalPromt'
|
var oaiBiasTokens = itemizedPrompts[thisPromptSet].oaiBiasTokens;
|
||||||
|
var oaiJailbreakTokens = itemizedPrompts[thisPromptSet].oaiJailbreakTokens;
|
||||||
|
var oaiStartTokens = itemizedPrompts[thisPromptSet].oaiStartTokens;
|
||||||
|
var oaiNudgeTokens = itemizedPrompts[thisPromptSet].oaiNudgeTokens;
|
||||||
|
var oaiImpersonateTokens = itemizedPrompts[thisPromptSet].oaiImpersonateTokens;
|
||||||
|
|
||||||
let storyStringTokensPercentage = ((storyStringTokens / (totalTokensInPrompt + thisPrompt_padding)) * 100).toFixed(2);
|
|
||||||
let ActualChatHistoryTokensPercentage = ((ActualChatHistoryTokens / (totalTokensInPrompt + thisPrompt_padding)) * 100).toFixed(2);
|
} else {
|
||||||
let promptBiasTokensPercentage = ((promptBiasTokens / (totalTokensInPrompt + thisPrompt_padding)) * 100).toFixed(2);
|
//for non-OAI APIs
|
||||||
let worldInfoStringTokensPercentage = ((worldInfoStringTokens / (totalTokensInPrompt + thisPrompt_padding)) * 100).toFixed(2);
|
//console.log('-- Counting non-OAI Tokens');
|
||||||
let allAnchorsTokensPercentage = ((allAnchorsTokens / (totalTokensInPrompt + thisPrompt_padding)) * 100).toFixed(2);
|
var finalPromptTokens = getTokenCount(itemizedPrompts[thisPromptSet].finalPromt);
|
||||||
let selectedTokenizer = $("#tokenizer").find(':selected').text();
|
var storyStringTokens = getTokenCount(itemizedPrompts[thisPromptSet].storyString);
|
||||||
callPopup(
|
var examplesStringTokens = getTokenCount(itemizedPrompts[thisPromptSet].examplesString);
|
||||||
`
|
var mesSendStringTokens = getTokenCount(itemizedPrompts[thisPromptSet].mesSendString)
|
||||||
|
var ActualChatHistoryTokens = mesSendStringTokens - allAnchorsTokens + power_user.token_padding;
|
||||||
|
|
||||||
|
var totalTokensInPrompt =
|
||||||
|
storyStringTokens + //chardefs total
|
||||||
|
worldInfoStringTokens +
|
||||||
|
ActualChatHistoryTokens + //chat history
|
||||||
|
allAnchorsTokens + // AN and/or legacy anchors
|
||||||
|
//afterScenarioAnchorTokens + //only counts if AN is set to 'after scenario'
|
||||||
|
//zeroDepthAnchorTokens + //same as above, even if AN not on 0 depth
|
||||||
|
promptBiasTokens; //{{}}
|
||||||
|
//- thisPrompt_padding; //not sure this way of calculating is correct, but the math results in same value as 'finalPromt'
|
||||||
|
}
|
||||||
|
|
||||||
|
if (this_main_api == 'openai') {
|
||||||
|
//console.log('-- applying % on OAI tokens');
|
||||||
|
var oaiStartTokensPercentage = ((oaiStartTokens / (finalPromptTokens)) * 100).toFixed(2);
|
||||||
|
var storyStringTokensPercentage = ((oaiPromptTokens / (finalPromptTokens)) * 100).toFixed(2);
|
||||||
|
var ActualChatHistoryTokensPercentage = ((ActualChatHistoryTokens / (finalPromptTokens)) * 100).toFixed(2);
|
||||||
|
var promptBiasTokensPercentage = ((oaiBiasTokens / (finalPromptTokens)) * 100).toFixed(2);
|
||||||
|
var worldInfoStringTokensPercentage = ((worldInfoStringTokens / (finalPromptTokens)) * 100).toFixed(2);
|
||||||
|
var allAnchorsTokensPercentage = ((allAnchorsTokens / (finalPromptTokens)) * 100).toFixed(2);
|
||||||
|
var selectedTokenizer = $("#tokenizer").find(':selected').text();
|
||||||
|
|
||||||
|
} else {
|
||||||
|
//console.log('-- applying % on non-OAI tokens');
|
||||||
|
var storyStringTokensPercentage = ((storyStringTokens / (totalTokensInPrompt)) * 100).toFixed(2);
|
||||||
|
var ActualChatHistoryTokensPercentage = ((ActualChatHistoryTokens / (totalTokensInPrompt)) * 100).toFixed(2);
|
||||||
|
var promptBiasTokensPercentage = ((promptBiasTokens / (totalTokensInPrompt)) * 100).toFixed(2);
|
||||||
|
var worldInfoStringTokensPercentage = ((worldInfoStringTokens / (totalTokensInPrompt)) * 100).toFixed(2);
|
||||||
|
var allAnchorsTokensPercentage = ((allAnchorsTokens / (totalTokensInPrompt)) * 100).toFixed(2);
|
||||||
|
var selectedTokenizer = $("#tokenizer").find(':selected').text();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (this_main_api == 'openai') {
|
||||||
|
//console.log('-- calling popup for OAI tokens');
|
||||||
|
callPopup(
|
||||||
|
`
|
||||||
<h3>Prompt Itemization</h3>
|
<h3>Prompt Itemization</h3>
|
||||||
Tokenizer: ${selectedTokenizer}<br>
|
Tokenizer: ${selectedTokenizer}<br>
|
||||||
|
API Used: ${this_main_api}<br>
|
||||||
|
<span class="tokenItemizingSubclass">
|
||||||
|
Only the white numbers really matter. All numbers are estimates.
|
||||||
|
Grey color items may not have been included in the context due to certain prompt format settings.
|
||||||
|
</span>
|
||||||
|
<hr class="sysHR">
|
||||||
|
<div class="justifyLeft">
|
||||||
|
<div class="flex-container">
|
||||||
|
<div class="flex-container flex1 flexFlowColumns flexNoGap wide50p tokenGraph">
|
||||||
|
<div class="wide100p" style="background-color: grey; height: ${oaiStartTokensPercentage}%;"></div>
|
||||||
|
<div class="wide100p" style="background-color: indianred; height: ${storyStringTokensPercentage}%;"></div>
|
||||||
|
<div class="wide100p" style="background-color: gold; height: ${worldInfoStringTokensPercentage}%;"></div>
|
||||||
|
<div class="wide100p" style="background-color: palegreen; height: ${ActualChatHistoryTokensPercentage}%;"></div>
|
||||||
|
<div class="wide100p" style="background-color: cornflowerblue; height: ${allAnchorsTokensPercentage}%;"></div>
|
||||||
|
<div class="wide100p" style="background-color: mediumpurple; height: ${promptBiasTokensPercentage}%;"></div>
|
||||||
|
</div>
|
||||||
|
<div class="flex-container wide50p">
|
||||||
|
<div class="wide100p flex-container flexNoGap flexFlowColumn">
|
||||||
|
<div class="flex-container wide100p">
|
||||||
|
<div class="flex1" style="color: grey;">Chat Startup:</div>
|
||||||
|
<div class=""> ${oaiStartTokens}</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div class="wide100p flex-container flexNoGap flexFlowColumn">
|
||||||
|
<div class="flex-container wide100p">
|
||||||
|
<div class="flex1" style="color: indianred;">Prompt Tokens:</div>
|
||||||
|
<div class=""> ${oaiPromptTokens}</div>
|
||||||
|
</div>
|
||||||
|
<div class="flex-container ">
|
||||||
|
<div class=" flex1 tokenItemizingSubclass">-- Description: </div>
|
||||||
|
<div class="tokenItemizingSubclass">${charDescriptionTokens}</div>
|
||||||
|
</div>
|
||||||
|
<div class="flex-container ">
|
||||||
|
<div class=" flex1 tokenItemizingSubclass">-- Personality:</div>
|
||||||
|
<div class="tokenItemizingSubclass"> ${charPersonalityTokens}</div>
|
||||||
|
</div>
|
||||||
|
<div class="flex-container ">
|
||||||
|
<div class=" flex1 tokenItemizingSubclass">-- Scenario: </div>
|
||||||
|
<div class="tokenItemizingSubclass">${scenarioTextTokens}</div>
|
||||||
|
</div>
|
||||||
|
<div class="flex-container ">
|
||||||
|
<div class=" flex1 tokenItemizingSubclass">-- Examples:</div>
|
||||||
|
<div class="tokenItemizingSubclass"> ${examplesStringTokens}</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div class="wide100p flex-container">
|
||||||
|
<div class="flex1" style="color: gold;">World Info:</div>
|
||||||
|
<div class="">${worldInfoStringTokens}</div>
|
||||||
|
</div>
|
||||||
|
<div class="wide100p flex-container">
|
||||||
|
<div class="flex1" style="color: palegreen;">Chat History:</div>
|
||||||
|
<div class=""> ${ActualChatHistoryTokens}</div>
|
||||||
|
</div>
|
||||||
|
<div class="wide100p flex-container flexNoGap flexFlowColumn">
|
||||||
|
<div class="wide100p flex-container">
|
||||||
|
<div class="flex1" style="color: cornflowerblue;">Extensions:</div>
|
||||||
|
<div class="">${allAnchorsTokens}</div>
|
||||||
|
</div>
|
||||||
|
<div class="flex-container ">
|
||||||
|
<div class=" flex1 tokenItemizingSubclass">-- Summarize: </div>
|
||||||
|
<div class="tokenItemizingSubclass">${summarizeStringTokens}</div>
|
||||||
|
</div>
|
||||||
|
<div class="flex-container ">
|
||||||
|
<div class=" flex1 tokenItemizingSubclass">-- Author's Note:</div>
|
||||||
|
<div class="tokenItemizingSubclass"> ${authorsNoteStringTokens}</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div class="wide100p flex-container">
|
||||||
|
<div class="flex1" style="color: mediumpurple;">{{}} Bias:</div><div class="">${oaiBiasTokens}</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
</div>
|
||||||
|
<hr class="sysHR">
|
||||||
|
<div class="wide100p flex-container flexFlowColumns">
|
||||||
|
<div class="flex-container wide100p">
|
||||||
|
<div class="flex1">Total Tokens in Prompt:</div><div class=""> ${finalPromptTokens}</div>
|
||||||
|
</div>
|
||||||
|
<div class="flex-container wide100p">
|
||||||
|
<div class="flex1">Max Context:</div><div class="">${thisPrompt_max_context}</div>
|
||||||
|
</div>
|
||||||
|
<div class="flex-container wide100p">
|
||||||
|
<div class="flex1">- Padding:</div><div class=""> ${thisPrompt_padding}</div>
|
||||||
|
</div>
|
||||||
|
<div class="flex-container wide100p">
|
||||||
|
<div class="flex1">Actual Max Context Allowed:</div><div class="">${thisPrompt_max_context - thisPrompt_padding}</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<hr class="sysHR">
|
||||||
|
`, 'text'
|
||||||
|
);
|
||||||
|
|
||||||
|
} else {
|
||||||
|
//console.log('-- calling popup for non-OAI tokens');
|
||||||
|
callPopup(
|
||||||
|
`
|
||||||
|
<h3>Prompt Itemization</h3>
|
||||||
|
Tokenizer: ${selectedTokenizer}<br>
|
||||||
|
API Used: ${this_main_api}<br>
|
||||||
<span class="tokenItemizingSubclass">
|
<span class="tokenItemizingSubclass">
|
||||||
Only the white numbers really matter. All numbers are estimates.
|
Only the white numbers really matter. All numbers are estimates.
|
||||||
Grey color items may not have been included in the context due to certain prompt format settings.
|
Grey color items may not have been included in the context due to certain prompt format settings.
|
||||||
@ -2569,7 +2778,8 @@ function promptItemize(itemizedPrompts, requestedMesId) {
|
|||||||
</div>
|
</div>
|
||||||
<hr class="sysHR">
|
<hr class="sysHR">
|
||||||
`, 'text'
|
`, 'text'
|
||||||
);
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
function setInContextMessages(lastmsg, type) {
|
function setInContextMessages(lastmsg, type) {
|
||||||
@ -3295,8 +3505,10 @@ function changeMainAPI() {
|
|||||||
// Hide common settings for OpenAI
|
// Hide common settings for OpenAI
|
||||||
if (selectedVal == "openai") {
|
if (selectedVal == "openai") {
|
||||||
$("#common-gen-settings-block").css("display", "none");
|
$("#common-gen-settings-block").css("display", "none");
|
||||||
|
$("#token_breakdown").css("display", "flex");
|
||||||
} else {
|
} else {
|
||||||
$("#common-gen-settings-block").css("display", "block");
|
$("#common-gen-settings-block").css("display", "block");
|
||||||
|
$("#token_breakdown").css("display", "none");
|
||||||
}
|
}
|
||||||
// Hide amount gen for poe
|
// Hide amount gen for poe
|
||||||
if (selectedVal == "poe") {
|
if (selectedVal == "poe") {
|
||||||
|
@ -101,6 +101,7 @@ const default_settings = {
|
|||||||
openai_model: 'gpt-3.5-turbo',
|
openai_model: 'gpt-3.5-turbo',
|
||||||
jailbreak_system: false,
|
jailbreak_system: false,
|
||||||
reverse_proxy: '',
|
reverse_proxy: '',
|
||||||
|
oai_breakdown: false,
|
||||||
};
|
};
|
||||||
|
|
||||||
const oai_settings = {
|
const oai_settings = {
|
||||||
@ -125,6 +126,7 @@ const oai_settings = {
|
|||||||
openai_model: 'gpt-3.5-turbo',
|
openai_model: 'gpt-3.5-turbo',
|
||||||
jailbreak_system: false,
|
jailbreak_system: false,
|
||||||
reverse_proxy: '',
|
reverse_proxy: '',
|
||||||
|
oai_breakdown: false,
|
||||||
};
|
};
|
||||||
|
|
||||||
let openai_setting_names;
|
let openai_setting_names;
|
||||||
@ -317,16 +319,18 @@ async function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldI
|
|||||||
let openai_msgs_tosend = [];
|
let openai_msgs_tosend = [];
|
||||||
|
|
||||||
// todo: static value, maybe include in the initial context calculation
|
// todo: static value, maybe include in the initial context calculation
|
||||||
|
const handler_instance = new TokenHandler(countTokens);
|
||||||
|
|
||||||
let new_chat_msg = { "role": "system", "content": "[Start a new chat]" };
|
let new_chat_msg = { "role": "system", "content": "[Start a new chat]" };
|
||||||
let start_chat_count = countTokens([new_chat_msg], true);
|
let start_chat_count = handler_instance.count([new_chat_msg], true, 'start_chat');
|
||||||
await delay(1);
|
await delay(1);
|
||||||
let total_count = countTokens([prompt_msg], true) + start_chat_count;
|
let total_count = handler_instance.count([prompt_msg], true, 'prompt') + start_chat_count;
|
||||||
await delay(1);
|
await delay(1);
|
||||||
|
|
||||||
if (bias && bias.trim().length) {
|
if (bias && bias.trim().length) {
|
||||||
let bias_msg = { "role": "system", "content": bias.trim() };
|
let bias_msg = { "role": "system", "content": bias.trim() };
|
||||||
openai_msgs.push(bias_msg);
|
openai_msgs.push(bias_msg);
|
||||||
total_count += countTokens([bias_msg], true);
|
total_count += handler_instance.count([bias_msg], true, 'bias');
|
||||||
await delay(1);
|
await delay(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -343,13 +347,14 @@ async function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldI
|
|||||||
openai_msgs.push(group_nudge);
|
openai_msgs.push(group_nudge);
|
||||||
|
|
||||||
// add a group nudge count
|
// add a group nudge count
|
||||||
let group_nudge_count = countTokens([group_nudge], true);
|
let group_nudge_count = handler_instance.count([group_nudge], true, 'nudge');
|
||||||
await delay(1);
|
await delay(1);
|
||||||
total_count += group_nudge_count;
|
total_count += group_nudge_count;
|
||||||
|
|
||||||
// recount tokens for new start message
|
// recount tokens for new start message
|
||||||
total_count -= start_chat_count
|
total_count -= start_chat_count
|
||||||
start_chat_count = countTokens([new_chat_msg], true);
|
handler_instance.uncount(start_chat_count, 'start_chat');
|
||||||
|
start_chat_count = handler_instance.count([new_chat_msg], true);
|
||||||
await delay(1);
|
await delay(1);
|
||||||
total_count += start_chat_count;
|
total_count += start_chat_count;
|
||||||
}
|
}
|
||||||
@ -358,7 +363,7 @@ async function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldI
|
|||||||
const jailbreakMessage = { "role": "system", "content": substituteParams(oai_settings.jailbreak_prompt) };
|
const jailbreakMessage = { "role": "system", "content": substituteParams(oai_settings.jailbreak_prompt) };
|
||||||
openai_msgs.push(jailbreakMessage);
|
openai_msgs.push(jailbreakMessage);
|
||||||
|
|
||||||
total_count += countTokens([jailbreakMessage], true);
|
total_count += handler_instance.count([jailbreakMessage], true, 'jailbreak');
|
||||||
await delay(1);
|
await delay(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -366,7 +371,7 @@ async function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldI
|
|||||||
const impersonateMessage = { "role": "system", "content": substituteParams(oai_settings.impersonation_prompt) };
|
const impersonateMessage = { "role": "system", "content": substituteParams(oai_settings.impersonation_prompt) };
|
||||||
openai_msgs.push(impersonateMessage);
|
openai_msgs.push(impersonateMessage);
|
||||||
|
|
||||||
total_count += countTokens([impersonateMessage], true);
|
total_count += handler_instance.count([impersonateMessage], true, 'impersonate');
|
||||||
await delay(1);
|
await delay(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -389,12 +394,12 @@ async function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldI
|
|||||||
examples_tosend.push(example);
|
examples_tosend.push(example);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
total_count += countTokens(examples_tosend, true);
|
total_count += handler_instance.count(examples_tosend, true, 'examples');
|
||||||
await delay(1);
|
await delay(1);
|
||||||
// go from newest message to oldest, because we want to delete the older ones from the context
|
// go from newest message to oldest, because we want to delete the older ones from the context
|
||||||
for (let j = openai_msgs.length - 1; j >= 0; j--) {
|
for (let j = openai_msgs.length - 1; j >= 0; j--) {
|
||||||
let item = openai_msgs[j];
|
let item = openai_msgs[j];
|
||||||
let item_count = countTokens(item, true);
|
let item_count = handler_instance.count(item, true, 'conversation');
|
||||||
await delay(1);
|
await delay(1);
|
||||||
// If we have enough space for this message, also account for the max assistant reply size
|
// If we have enough space for this message, also account for the max assistant reply size
|
||||||
if ((total_count + item_count) < (this_max_context - oai_settings.openai_max_tokens)) {
|
if ((total_count + item_count) < (this_max_context - oai_settings.openai_max_tokens)) {
|
||||||
@ -403,13 +408,14 @@ async function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldI
|
|||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
// early break since if we still have more messages, they just won't fit anyway
|
// early break since if we still have more messages, they just won't fit anyway
|
||||||
|
handler_instance.uncount(item_count, 'conversation');
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
for (let j = openai_msgs.length - 1; j >= 0; j--) {
|
for (let j = openai_msgs.length - 1; j >= 0; j--) {
|
||||||
let item = openai_msgs[j];
|
let item = openai_msgs[j];
|
||||||
let item_count = countTokens(item, true);
|
let item_count = handler_instance.count(item, true, 'conversation');
|
||||||
await delay(1);
|
await delay(1);
|
||||||
// If we have enough space for this message, also account for the max assistant reply size
|
// If we have enough space for this message, also account for the max assistant reply size
|
||||||
if ((total_count + item_count) < (this_max_context - oai_settings.openai_max_tokens)) {
|
if ((total_count + item_count) < (this_max_context - oai_settings.openai_max_tokens)) {
|
||||||
@ -418,11 +424,12 @@ async function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldI
|
|||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
// early break since if we still have more messages, they just won't fit anyway
|
// early break since if we still have more messages, they just won't fit anyway
|
||||||
|
handler_instance.uncount(item_count, 'conversation');
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
console.log(total_count);
|
//console.log(total_count);
|
||||||
|
|
||||||
// each example block contains multiple user/bot messages
|
// each example block contains multiple user/bot messages
|
||||||
for (let example_block of openai_msgs_example) {
|
for (let example_block of openai_msgs_example) {
|
||||||
@ -432,7 +439,7 @@ async function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldI
|
|||||||
example_block = [new_chat_msg, ...example_block];
|
example_block = [new_chat_msg, ...example_block];
|
||||||
|
|
||||||
// add the block only if there is enough space for all its messages
|
// add the block only if there is enough space for all its messages
|
||||||
const example_count = countTokens(example_block, true);
|
const example_count = handler_instance.count(example_block, true, 'examples');
|
||||||
await delay(1);
|
await delay(1);
|
||||||
if ((total_count + example_count) < (this_max_context - oai_settings.openai_max_tokens)) {
|
if ((total_count + example_count) < (this_max_context - oai_settings.openai_max_tokens)) {
|
||||||
examples_tosend.push(...example_block)
|
examples_tosend.push(...example_block)
|
||||||
@ -440,6 +447,7 @@ async function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldI
|
|||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
// early break since more examples probably won't fit anyway
|
// early break since more examples probably won't fit anyway
|
||||||
|
handler_instance.uncount(example_count, 'examples');
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -451,10 +459,14 @@ async function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldI
|
|||||||
openai_msgs_tosend.reverse();
|
openai_msgs_tosend.reverse();
|
||||||
openai_msgs_tosend = [prompt_msg, ...examples_tosend, new_chat_msg, ...openai_msgs_tosend]
|
openai_msgs_tosend = [prompt_msg, ...examples_tosend, new_chat_msg, ...openai_msgs_tosend]
|
||||||
|
|
||||||
console.log("We're sending this:")
|
//console.log("We're sending this:")
|
||||||
console.log(openai_msgs_tosend);
|
//console.log(openai_msgs_tosend);
|
||||||
console.log(`Calculated the total context to be ${total_count} tokens`);
|
//console.log(`Calculated the total context to be ${total_count} tokens`);
|
||||||
return openai_msgs_tosend;
|
handler_instance.log();
|
||||||
|
return [
|
||||||
|
openai_msgs_tosend,
|
||||||
|
oai_settings.oai_breakdown ? handler_instance.counts : false,
|
||||||
|
];
|
||||||
}
|
}
|
||||||
|
|
||||||
function getSystemPrompt(nsfw_toggle_prompt, enhance_definitions_prompt, wiBefore, storyString, wiAfter, extensionPrompt, isImpersonate) {
|
function getSystemPrompt(nsfw_toggle_prompt, enhance_definitions_prompt, wiBefore, storyString, wiAfter, extensionPrompt, isImpersonate) {
|
||||||
@ -616,9 +628,42 @@ async function calculateLogitBias() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
class TokenHandler {
|
||||||
|
constructor(countTokenFn) {
|
||||||
|
this.countTokenFn = countTokenFn;
|
||||||
|
this.counts = {
|
||||||
|
'start_chat': 0,
|
||||||
|
'prompt': 0,
|
||||||
|
'bias': 0,
|
||||||
|
'nudge': 0,
|
||||||
|
'jailbreak': 0,
|
||||||
|
'impersonate': 0,
|
||||||
|
'examples': 0,
|
||||||
|
'conversation': 0,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
uncount(value, type) {
|
||||||
|
this.counts[type] -= value;
|
||||||
|
}
|
||||||
|
|
||||||
|
count(messages, full, type) {
|
||||||
|
console.log(messages);
|
||||||
|
const token_count = this.countTokenFn(messages, full);
|
||||||
|
this.counts[type] += token_count;
|
||||||
|
|
||||||
|
return token_count;
|
||||||
|
}
|
||||||
|
|
||||||
|
log() {
|
||||||
|
const total = Object.values(this.counts).reduce((a, b) => a + b);
|
||||||
|
console.table({ ...this.counts, 'total': total });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
function countTokens(messages, full = false) {
|
function countTokens(messages, full = false) {
|
||||||
let chatId = 'undefined';
|
let chatId = 'undefined';
|
||||||
|
|
||||||
try {
|
try {
|
||||||
if (selected_group) {
|
if (selected_group) {
|
||||||
chatId = groups.find(x => x.id == selected_group)?.chat_id;
|
chatId = groups.find(x => x.id == selected_group)?.chat_id;
|
||||||
@ -705,6 +750,7 @@ function loadOpenAISettings(data, settings) {
|
|||||||
if (settings.nsfw_first !== undefined) oai_settings.nsfw_first = !!settings.nsfw_first;
|
if (settings.nsfw_first !== undefined) oai_settings.nsfw_first = !!settings.nsfw_first;
|
||||||
if (settings.openai_model !== undefined) oai_settings.openai_model = settings.openai_model;
|
if (settings.openai_model !== undefined) oai_settings.openai_model = settings.openai_model;
|
||||||
if (settings.jailbreak_system !== undefined) oai_settings.jailbreak_system = !!settings.jailbreak_system;
|
if (settings.jailbreak_system !== undefined) oai_settings.jailbreak_system = !!settings.jailbreak_system;
|
||||||
|
if (settings.oai_breakdown !== undefined) oai_settings.oai_breakdown = !!settings.oai_breakdown;
|
||||||
|
|
||||||
$('#stream_toggle').prop('checked', oai_settings.stream_openai);
|
$('#stream_toggle').prop('checked', oai_settings.stream_openai);
|
||||||
|
|
||||||
@ -720,6 +766,7 @@ function loadOpenAISettings(data, settings) {
|
|||||||
$('#wrap_in_quotes').prop('checked', oai_settings.wrap_in_quotes);
|
$('#wrap_in_quotes').prop('checked', oai_settings.wrap_in_quotes);
|
||||||
$('#nsfw_first').prop('checked', oai_settings.nsfw_first);
|
$('#nsfw_first').prop('checked', oai_settings.nsfw_first);
|
||||||
$('#jailbreak_system').prop('checked', oai_settings.jailbreak_system);
|
$('#jailbreak_system').prop('checked', oai_settings.jailbreak_system);
|
||||||
|
$('#oai_breakdown').prop('checked', oai_settings.oai_breakdown);
|
||||||
|
|
||||||
if (settings.main_prompt !== undefined) oai_settings.main_prompt = settings.main_prompt;
|
if (settings.main_prompt !== undefined) oai_settings.main_prompt = settings.main_prompt;
|
||||||
if (settings.nsfw_prompt !== undefined) oai_settings.nsfw_prompt = settings.nsfw_prompt;
|
if (settings.nsfw_prompt !== undefined) oai_settings.nsfw_prompt = settings.nsfw_prompt;
|
||||||
@ -839,6 +886,7 @@ async function saveOpenAIPreset(name, settings) {
|
|||||||
jailbreak_system: settings.jailbreak_system,
|
jailbreak_system: settings.jailbreak_system,
|
||||||
impersonation_prompt: settings.impersonation_prompt,
|
impersonation_prompt: settings.impersonation_prompt,
|
||||||
bias_preset_selected: settings.bias_preset_selected,
|
bias_preset_selected: settings.bias_preset_selected,
|
||||||
|
oai_breakdown: settings.oai_breakdown,
|
||||||
};
|
};
|
||||||
|
|
||||||
const savePresetSettings = await fetch(`/savepreset_openai?name=${name}`, {
|
const savePresetSettings = await fetch(`/savepreset_openai?name=${name}`, {
|
||||||
@ -1046,7 +1094,7 @@ async function onDeletePresetClick() {
|
|||||||
const response = await fetch('/deletepreset_openai', {
|
const response = await fetch('/deletepreset_openai', {
|
||||||
method: 'POST',
|
method: 'POST',
|
||||||
headers: getRequestHeaders(),
|
headers: getRequestHeaders(),
|
||||||
body: JSON.stringify({name: nameToDelete}),
|
body: JSON.stringify({ name: nameToDelete }),
|
||||||
});
|
});
|
||||||
|
|
||||||
if (!response.ok) {
|
if (!response.ok) {
|
||||||
@ -1097,6 +1145,7 @@ function onSettingsPresetChange() {
|
|||||||
wrap_in_quotes: ['#wrap_in_quotes', 'wrap_in_quotes', true],
|
wrap_in_quotes: ['#wrap_in_quotes', 'wrap_in_quotes', true],
|
||||||
nsfw_first: ['#nsfw_first', 'nsfw_first', true],
|
nsfw_first: ['#nsfw_first', 'nsfw_first', true],
|
||||||
jailbreak_system: ['#jailbreak_system', 'jailbreak_system', true],
|
jailbreak_system: ['#jailbreak_system', 'jailbreak_system', true],
|
||||||
|
oai_breakdown: ['#oai_breakdown', 'oai_breakdown', true],
|
||||||
main_prompt: ['#main_prompt_textarea', 'main_prompt', false],
|
main_prompt: ['#main_prompt_textarea', 'main_prompt', false],
|
||||||
nsfw_prompt: ['#nsfw_prompt_textarea', 'nsfw_prompt', false],
|
nsfw_prompt: ['#nsfw_prompt_textarea', 'nsfw_prompt', false],
|
||||||
jailbreak_prompt: ['#jailbreak_prompt_textarea', 'jailbreak_prompt', false],
|
jailbreak_prompt: ['#jailbreak_prompt_textarea', 'jailbreak_prompt', false],
|
||||||
@ -1163,7 +1212,7 @@ function onReverseProxyInput() {
|
|||||||
async function onConnectButtonClick(e) {
|
async function onConnectButtonClick(e) {
|
||||||
e.stopPropagation();
|
e.stopPropagation();
|
||||||
const api_key_openai = $('#api_key_openai').val().trim();
|
const api_key_openai = $('#api_key_openai').val().trim();
|
||||||
|
|
||||||
if (api_key_openai.length) {
|
if (api_key_openai.length) {
|
||||||
await writeSecret(SECRET_KEYS.OPENAI, api_key_openai);
|
await writeSecret(SECRET_KEYS.OPENAI, api_key_openai);
|
||||||
}
|
}
|
||||||
@ -1269,6 +1318,16 @@ $(document).ready(function () {
|
|||||||
saveSettingsDebounced();
|
saveSettingsDebounced();
|
||||||
});
|
});
|
||||||
|
|
||||||
|
$("#oai_breakdown").on('change', function () {
|
||||||
|
oai_settings.oai_breakdown = !!$(this).prop("checked");
|
||||||
|
if (!oai_settings.oai_breakdown) {
|
||||||
|
$("#token_breakdown").css('display', 'none');
|
||||||
|
} else {
|
||||||
|
$("#token_breakdown").css('display', 'flex');
|
||||||
|
}
|
||||||
|
saveSettingsDebounced();
|
||||||
|
});
|
||||||
|
|
||||||
// auto-select a preset based on character/group name
|
// auto-select a preset based on character/group name
|
||||||
$(document).on("click", ".character_select", function () {
|
$(document).on("click", ".character_select", function () {
|
||||||
const chid = $(this).attr('chid');
|
const chid = $(this).attr('chid');
|
||||||
@ -1322,18 +1381,18 @@ $(document).ready(function () {
|
|||||||
saveSettingsDebounced();
|
saveSettingsDebounced();
|
||||||
});
|
});
|
||||||
|
|
||||||
$("#api_button_openai").on('click', onConnectButtonClick);
|
$("#api_button_openai").on("click", onConnectButtonClick);
|
||||||
$("#openai_reverse_proxy").on('input', onReverseProxyInput);
|
$("#openai_reverse_proxy").on("input", onReverseProxyInput);
|
||||||
$("#model_openai_select").on('change', onModelChange);
|
$("#model_openai_select").on("change", onModelChange);
|
||||||
$("#settings_perset_openai").on('change', onSettingsPresetChange);
|
$("#settings_perset_openai").on("change", onSettingsPresetChange);
|
||||||
$("#new_oai_preset").on('click', onNewPresetClick);
|
$("#new_oai_preset").on("click", onNewPresetClick);
|
||||||
$("#delete_oai_preset").on('click', onDeletePresetClick);
|
$("#delete_oai_preset").on("click", onDeletePresetClick);
|
||||||
$("#openai_api_usage").on('click', showApiKeyUsage);
|
$("#openai_api_usage").on("click", showApiKeyUsage);
|
||||||
$('#openai_logit_bias_preset').on('change', onLogitBiasPresetChange);
|
$("#openai_logit_bias_preset").on("change", onLogitBiasPresetChange);
|
||||||
$('#openai_logit_bias_new_preset').on('click', createNewLogitBiasPreset);
|
$("#openai_logit_bias_new_preset").on("click", createNewLogitBiasPreset);
|
||||||
$('#openai_logit_bias_new_entry').on('click', createNewLogitBiasEntry);
|
$("#openai_logit_bias_new_entry").on("click", createNewLogitBiasEntry);
|
||||||
$('#openai_logit_bias_import_file').on('input', onLogitBiasPresetImportFileChange);
|
$("#openai_logit_bias_import_file").on("input", onLogitBiasPresetImportFileChange);
|
||||||
$('#openai_logit_bias_import_preset').on('click', onLogitBiasPresetImportClick);
|
$("#openai_logit_bias_import_preset").on("click", onLogitBiasPresetImportClick);
|
||||||
$('#openai_logit_bias_export_preset').on('click', onLogitBiasPresetExportClick);
|
$("#openai_logit_bias_export_preset").on("click", onLogitBiasPresetExportClick);
|
||||||
$('#openai_logit_bias_delete_preset').on('click', onLogitBiasPresetDeleteClick);
|
$("#openai_logit_bias_delete_preset").on("click", onLogitBiasPresetDeleteClick);
|
||||||
});
|
});
|
||||||
|
Reference in New Issue
Block a user