This commit is contained in:
SillyLossy
2023-05-14 19:47:34 +03:00
7 changed files with 733 additions and 108 deletions

6
package-lock.json generated
View File

@ -34,6 +34,7 @@
"png-chunks-extract": "^1.0.0",
"rimraf": "^3.0.2",
"sanitize-filename": "^1.6.3",
"uniqolor": "^1.1.0",
"webp-converter": "2.3.2",
"ws": "^8.13.0",
"yargs": "^17.7.1"
@ -1935,6 +1936,11 @@
"version": "0.0.6",
"license": "MIT"
},
"node_modules/uniqolor": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/uniqolor/-/uniqolor-1.1.0.tgz",
"integrity": "sha512-j2XyokF24fsj+L5u6fbu4rM3RQc6VWJuAngYM2k0ZdG3yiVxt0smLkps2GmQIYqK8VkELGdM9vFU/HfOkK/zoQ=="
},
"node_modules/unpipe": {
"version": "1.0.0",
"license": "MIT",

View File

@ -25,6 +25,7 @@
"png-chunks-extract": "^1.0.0",
"rimraf": "^3.0.2",
"sanitize-filename": "^1.6.3",
"uniqolor": "^1.1.0",
"webp-converter": "2.3.2",
"ws": "^8.13.0",
"yargs": "^17.7.1"

View File

@ -366,6 +366,15 @@
<input id="openai_reverse_proxy" type="text" class="text_pole" placeholder="https://api.openai.com/v1" maxlength="100" />
</div>
</div>
<div class="range-block">
<label for="oai_breakdown" class="checkbox_label widthFreeExpand">
<input id="oai_breakdown" type="checkbox" />
Token Breakdown
</label>
<div class="toggle-description justifyLeft">
Display a breakdown of the tokens used in the request.
</div>
</div>
<div class="range-block">
<div class="range-block-title">
Context Size (tokens)
@ -1176,7 +1185,7 @@
<div id="advanced-formatting-button" class="drawer">
<div class="drawer-toggle">
<div class="drawer-icon fa-solid fa-font closedIcon" title="AI Reponse Formatting"></div>
<div class="drawer-icon fa-solid fa-font closedIcon" title="AI Response Formatting"></div>
</div>
<div class="drawer-content">
<h3>Advanced Formatting
@ -2011,7 +2020,7 @@
</div>
<div>
<h3 id="character_popup_text_h3"></h3> - Advanced Defininitions
<h3 id="character_popup_text_h3"></h3> - Advanced Definitions
</div>
</div>
@ -2036,7 +2045,7 @@
<div id="talkativeness_div">
<h4>Talkativeness</h4>
<h5>How often the chracter speaks in &nbsp;<span class="warning">group chats!</span>
<h5>How often the character speaks in &nbsp;<span class="warning">group chats!</span>
</h5>
<input id="talkativeness_slider" name="talkativeness" type="range" min="0" max="1" step="0.05" value="0.5" form="form_create">
<div id="talkativeness_hint">
@ -2411,6 +2420,11 @@
<div id="chat">
</div>
<div id="form_sheld">
<div id="token_breakdown" style="display:none;">
<div>
<!-- Token Breakdown Goes Here -->
</div>
</div>
<div id="dialogue_del_mes">
<div id="dialogue_del_mes_ok" class="menu_button">Delete</div>
<div id="dialogue_del_mes_cancel" class="menu_button">Cancel</div>
@ -2420,7 +2434,7 @@
<textarea id="send_textarea" placeholder="Not connected to API!" name="text"></textarea>
<div id="send_but_sheld">
<div id="loading_mes">
<div alt="" class="fa-solid fa-hourglass-half"></div>
<div title="Loading" class="fa-solid fa-hourglass-half"></div>
</div>
<div id="send_but" class="fa-solid fa-feather-pointed" title="Send a message"></div>
</div>

View File

@ -125,6 +125,7 @@ import {
secret_state,
writeSecret
} from "./scripts/secrets.js";
import uniqolor from "./scripts/uniqolor.js";
//exporting functions and vars for mods
export {
@ -204,6 +205,7 @@ let converter;
reloadMarkdownProcessor();
// array for prompt token calculations
console.log('initializing Prompt Itemization Array on Startup');
let itemizedPrompts = [];
/* let bg_menu_toggle = false; */
@ -1125,28 +1127,34 @@ function addOneMessage(mes, { type = "normal", insertAfter = null, scroll = true
if (isSystem) {
newMessage.find(".mes_edit").hide();
newMessage.find(".mes_prompt").hide(); //dont'd need prompt display for sys messages
newMessage.find(".mes_prompt").hide(); //don't need prompt button for sys
}
// don't need prompt butons for user messages
// don't need prompt button for user
if (params.isUser === true) {
newMessage.find(".mes_prompt").hide();
console.log(`hiding prompt for user mesID ${params.mesId}`);
}
//shows or hides the Prompt display button
let mesIdToFind = Number(newMessage.attr('mesId'));
if (itemizedPrompts.length !== 0) {
console.log(`itemizedPrompt.length = ${itemizedPrompts.length}`)
for (var i = 0; i < itemizedPrompts.length; i++) {
if (itemizedPrompts[i].mesId === mesIdToFind) {
newMessage.find(".mes_prompt").show();
console.log(`showing prompt for mesID ${params.mesId} from ${params.characterName}`);
} else {
console.log('no cache found for mesID, hiding prompt button and continuing search');
console.log(`no cache obj for mesID ${mesIdToFind}, hiding prompt button and continuing search`);
newMessage.find(".mes_prompt").hide();
console.log(itemizedPrompts);
}
}
} else { //hide all when prompt cache is empty
} else if (params.isUser !== true) { //hide all when prompt cache is empty
console.log('saw empty prompt cache, hiding all prompt buttons');
$(".mes_prompt").hide();
}
//console.log(itemizedPrompts);
} else { console.log('skipping prompt data for User Message'); }
newMessage.find('.avatar img').on('error', function () {
$(this).hide();
@ -1590,6 +1598,7 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
const isImpersonate = type == "impersonate";
const isInstruct = power_user.instruct.enabled;
message_already_generated = isImpersonate ? `${name1}: ` : `${name2}: `;
// Name for the multigen prefix
const magName = isImpersonate ? (is_pygmalion ? 'You' : name1) : name2;
@ -1630,10 +1639,10 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
// Set empty promise resolution functions
if (typeof resolve !== 'function') {
resolve = () => {};
resolve = () => { };
}
if (typeof reject !== 'function') {
reject = () => {};
reject = () => { };
}
if (selected_group && !is_group_generating) {
@ -2083,32 +2092,7 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
generatedPromtCache +
promptBias;
//set array object for prompt token itemization of this message
let thisPromptBits = {
mesId: count_view_mes,
worldInfoBefore: worldInfoBefore,
allAnchors: allAnchors,
summarizeString: (extension_prompts['1_memory']?.value || ''),
authorsNoteString: (extension_prompts['2_floating_prompt']?.value || ''),
worldInfoString: worldInfoString,
storyString: storyString,
worldInfoAfter: worldInfoAfter,
afterScenarioAnchor: afterScenarioAnchor,
examplesString: examplesString,
mesSendString: mesSendString,
generatedPromtCache: generatedPromtCache,
promptBias: promptBias,
finalPromt: finalPromt,
charDescription: charDescription,
charPersonality: charPersonality,
scenarioText: scenarioText,
promptBias: promptBias,
storyString: storyString,
this_max_context: this_max_context,
padding: power_user.token_padding
}
itemizedPrompts.push(thisPromptBits);
if (zeroDepthAnchor && zeroDepthAnchor.length) {
if (!isMultigenEnabled() || tokens_already_generated == 0) {
@ -2127,6 +2111,11 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
}
}
// Add quiet generation prompt at depth 0
if (quiet_prompt && quiet_prompt.length) {
finalPromt += `\n${quiet_prompt}`;
}
finalPromt = finalPromt.replace(/\r/gm, '');
if (power_user.collapse_newlines) {
@ -2162,6 +2151,8 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
}
}
let thisPromptBits = [];
if (main_api == 'kobold' && horde_settings.use_horde && horde_settings.auto_adjust_response_length) {
this_amount_gen = Math.min(this_amount_gen, adjustedParams.maxLength);
this_amount_gen = Math.max(this_amount_gen, MIN_AMOUNT_GEN); // prevent validation errors
@ -2197,7 +2188,51 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
console.log('rungenerate calling API');
if (main_api == 'openai') {
let prompt = await prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldInfoAfter, afterScenarioAnchor, promptBias, type);
let [prompt, counts] = await prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldInfoAfter, afterScenarioAnchor, promptBias, type);
// counts will return false if the user has not enabled the token breakdown feature
if (counts) {
//$('#token_breakdown').css('display', 'flex');
const breakdown_bar = $('#token_breakdown div:first-child');
breakdown_bar.empty();
const total = Object.values(counts).reduce((acc, val) => acc + val, 0);
console.log(`oai start tokens: ${Object.entries(counts)[0][1]}`);
thisPromptBits.push({
oaiStartTokens: Object.entries(counts)[0][1],
oaiPromptTokens: Object.entries(counts)[1][1],
oaiBiasTokens: Object.entries(counts)[2][1],
oaiNudgeTokens: Object.entries(counts)[3][1],
oaiJailbreakTokens: Object.entries(counts)[4][1],
oaiImpersonateTokens: Object.entries(counts)[5][1],
oaiExamplesTokens: Object.entries(counts)[6][1],
oaiConversationTokens: Object.entries(counts)[7][1],
oaiTotalTokens: total,
})
console.log(`added OAI prompt bits to array`);
Object.entries(counts).forEach(([type, value]) => {
if (value === 0) {
return;
}
const percent_value = (value / total) * 100;
const color = uniqolor(type, { saturation: 50, lightness: 75, }).color;
const bar = document.createElement('div');
bar.style.width = `${percent_value}%`;
bar.classList.add('token_breakdown_segment');
bar.style.backgroundColor = color + 'AA';
bar.style.borderColor = color + 'FF';
bar.innerText = value;
bar.title = `${type}: ${percent_value.toFixed(2)}%`;
breakdown_bar.append(bar);
});
}
setInContextMessages(openai_messages_count, type);
if (isStreamingEnabled() && type !== 'quiet') {
@ -2237,10 +2272,62 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
}); //end of "if not data error"
}
//set array object for prompt token itemization of this message
let currentArrayEntry = Number(thisPromptBits.length - 1);
let additionalPromptStuff = {
...thisPromptBits[currentArrayEntry],
mesId: Number(count_view_mes),
worldInfoBefore: worldInfoBefore,
allAnchors: allAnchors,
summarizeString: (extension_prompts['1_memory']?.value || ''),
authorsNoteString: (extension_prompts['2_floating_prompt']?.value || ''),
worldInfoString: worldInfoString,
storyString: storyString,
worldInfoAfter: worldInfoAfter,
afterScenarioAnchor: afterScenarioAnchor,
examplesString: examplesString,
mesSendString: mesSendString,
generatedPromtCache: generatedPromtCache,
promptBias: promptBias,
finalPromt: finalPromt,
charDescription: charDescription,
charPersonality: charPersonality,
scenarioText: scenarioText,
this_max_context: this_max_context,
padding: power_user.token_padding,
main_api: main_api,
};
thisPromptBits = additionalPromptStuff;
console.log(thisPromptBits);
itemizedPrompts.push(thisPromptBits);
//console.log(`pushed prompt bits to itemizedPrompts array. Length is now: ${itemizedPrompts.length}`);
if (isStreamingEnabled() && type !== 'quiet') {
hideSwipeButtons();
let getMessage = await streamingProcessor.generate();
// Cohee: Basically a dead-end code... (disabled by isStreamingEnabled)
// I wasn't able to get multigen working with real streaming
// consistently without screwing the interim prompting
if (isMultigenEnabled()) {
tokens_already_generated += this_amount_gen; // add new gen amt to any prev gen counter..
message_already_generated += getMessage;
promptBias = '';
if (!streamingProcessor.isStopped && shouldContinueMultigen(getMessage, isImpersonate)) {
streamingProcessor.isFinished = false;
runGenerate(getMessage);
console.log('returning to make generate again');
return;
}
getMessage = message_already_generated;
}
if (streamingProcessor && !streamingProcessor.isStopped && streamingProcessor.isFinished) {
streamingProcessor.onFinishStreaming(streamingProcessor.messageId, getMessage);
streamingProcessor = null;
@ -2376,8 +2463,9 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
} //generate ends
function promptItemize(itemizedPrompts, requestedMesId) {
let incomingMesId = Number(requestedMesId);
let thisPromptSet = undefined;
var incomingMesId = Number(requestedMesId);
console.log(`looking for MesId ${incomingMesId}`);
var thisPromptSet = undefined;
for (var i = 0; i < itemizedPrompts.length; i++) {
if (itemizedPrompts[i].mesId === incomingMesId) {
@ -2391,44 +2479,183 @@ function promptItemize(itemizedPrompts, requestedMesId) {
return null;
}
let finalPromptTokens = getTokenCount(itemizedPrompts[thisPromptSet].finalPromt);
let allAnchorsTokens = getTokenCount(itemizedPrompts[thisPromptSet].allAnchors);
let summarizeStringTokens = getTokenCount(itemizedPrompts[thisPromptSet].summarizeString);
let authorsNoteStringTokens = getTokenCount(itemizedPrompts[thisPromptSet].authorsNoteString);
let afterScenarioAnchorTokens = getTokenCount(itemizedPrompts[thisPromptSet].afterScenarioAnchor);
let zeroDepthAnchorTokens = getTokenCount(itemizedPrompts[thisPromptSet].afterScenarioAnchor);
let worldInfoStringTokens = getTokenCount(itemizedPrompts[thisPromptSet].worldInfoString);
let storyStringTokens = getTokenCount(itemizedPrompts[thisPromptSet].storyString);
let examplesStringTokens = getTokenCount(itemizedPrompts[thisPromptSet].examplesString);
let charPersonalityTokens = getTokenCount(itemizedPrompts[thisPromptSet].charPersonality);
let charDescriptionTokens = getTokenCount(itemizedPrompts[thisPromptSet].charDescription);
let scenarioTextTokens = getTokenCount(itemizedPrompts[thisPromptSet].scenarioText);
let promptBiasTokens = getTokenCount(itemizedPrompts[thisPromptSet].promptBias);
let mesSendStringTokens = getTokenCount(itemizedPrompts[thisPromptSet].mesSendString)
let ActualChatHistoryTokens = mesSendStringTokens - allAnchorsTokens + power_user.token_padding;
let thisPrompt_max_context = itemizedPrompts[thisPromptSet].this_max_context;
let thisPrompt_padding = itemizedPrompts[thisPromptSet].padding;
//these happen regardless of API
var charPersonalityTokens = getTokenCount(itemizedPrompts[thisPromptSet].charPersonality);
var charDescriptionTokens = getTokenCount(itemizedPrompts[thisPromptSet].charDescription);
var scenarioTextTokens = getTokenCount(itemizedPrompts[thisPromptSet].scenarioText);
var allAnchorsTokens = getTokenCount(itemizedPrompts[thisPromptSet].allAnchors);
var summarizeStringTokens = getTokenCount(itemizedPrompts[thisPromptSet].summarizeString);
var authorsNoteStringTokens = getTokenCount(itemizedPrompts[thisPromptSet].authorsNoteString);
var afterScenarioAnchorTokens = getTokenCount(itemizedPrompts[thisPromptSet].afterScenarioAnchor);
var zeroDepthAnchorTokens = getTokenCount(itemizedPrompts[thisPromptSet].afterScenarioAnchor);
var worldInfoStringTokens = getTokenCount(itemizedPrompts[thisPromptSet].worldInfoString);
var thisPrompt_max_context = itemizedPrompts[thisPromptSet].this_max_context;
var thisPrompt_padding = itemizedPrompts[thisPromptSet].padding;
var promptBiasTokens = getTokenCount(itemizedPrompts[thisPromptSet].promptBias);
var this_main_api = itemizedPrompts[thisPromptSet].main_api;
let totalTokensInPrompt =
storyStringTokens + //chardefs total
worldInfoStringTokens +
ActualChatHistoryTokens + //chat history
allAnchorsTokens + // AN and/or legacy anchors
//afterScenarioAnchorTokens + //only counts if AN is set to 'after scenario'
//zeroDepthAnchorTokens + //same as above, even if AN not on 0 depth
promptBiasTokens + //{{}}
- thisPrompt_padding; //not sure this way of calculating is correct, but the math results in same value as 'finalPromt'
if (this_main_api == 'openai') {
//for OAI API
//console.log('-- Counting OAI Tokens');
var finalPromptTokens = itemizedPrompts[thisPromptSet].oaiTotalTokens;
var oaiStartTokens = itemizedPrompts[thisPromptSet].oaiStartTokens;
console.log(oaiStartTokens);
var oaiPromptTokens = itemizedPrompts[thisPromptSet].oaiPromptTokens;
var ActualChatHistoryTokens = itemizedPrompts[thisPromptSet].oaiConversationTokens;
var examplesStringTokens = itemizedPrompts[thisPromptSet].oaiExamplesTokens;
var oaiBiasTokens = itemizedPrompts[thisPromptSet].oaiBiasTokens;
var oaiJailbreakTokens = itemizedPrompts[thisPromptSet].oaiJailbreakTokens;
var oaiNudgeTokens = itemizedPrompts[thisPromptSet].oaiNudgeTokens;
var oaiImpersonateTokens = itemizedPrompts[thisPromptSet].oaiImpersonateTokens;
let storyStringTokensPercentage = ((storyStringTokens / (totalTokensInPrompt + thisPrompt_padding)) * 100).toFixed(2);
let ActualChatHistoryTokensPercentage = ((ActualChatHistoryTokens / (totalTokensInPrompt + thisPrompt_padding)) * 100).toFixed(2);
let promptBiasTokensPercentage = ((promptBiasTokens / (totalTokensInPrompt + thisPrompt_padding)) * 100).toFixed(2);
let worldInfoStringTokensPercentage = ((worldInfoStringTokens / (totalTokensInPrompt + thisPrompt_padding)) * 100).toFixed(2);
let allAnchorsTokensPercentage = ((allAnchorsTokens / (totalTokensInPrompt + thisPrompt_padding)) * 100).toFixed(2);
let selectedTokenizer = $("#tokenizer").find(':selected').text();
callPopup(
`
} else {
//for non-OAI APIs
//console.log('-- Counting non-OAI Tokens');
var finalPromptTokens = getTokenCount(itemizedPrompts[thisPromptSet].finalPromt);
var storyStringTokens = getTokenCount(itemizedPrompts[thisPromptSet].storyString);
var examplesStringTokens = getTokenCount(itemizedPrompts[thisPromptSet].examplesString);
var mesSendStringTokens = getTokenCount(itemizedPrompts[thisPromptSet].mesSendString)
var ActualChatHistoryTokens = mesSendStringTokens - allAnchorsTokens + power_user.token_padding;
var totalTokensInPrompt =
storyStringTokens + //chardefs total
worldInfoStringTokens +
ActualChatHistoryTokens + //chat history
allAnchorsTokens + // AN and/or legacy anchors
//afterScenarioAnchorTokens + //only counts if AN is set to 'after scenario'
//zeroDepthAnchorTokens + //same as above, even if AN not on 0 depth
promptBiasTokens; //{{}}
//- thisPrompt_padding; //not sure this way of calculating is correct, but the math results in same value as 'finalPromt'
}
if (this_main_api == 'openai') {
//console.log('-- applying % on OAI tokens');
var oaiStartTokensPercentage = ((oaiStartTokens / (finalPromptTokens)) * 100).toFixed(2);
console.log(oaiStartTokensPercentage);
var storyStringTokensPercentage = ((oaiPromptTokens / (finalPromptTokens)) * 100).toFixed(2);
var ActualChatHistoryTokensPercentage = ((ActualChatHistoryTokens / (finalPromptTokens)) * 100).toFixed(2);
var promptBiasTokensPercentage = ((oaiBiasTokens / (finalPromptTokens)) * 100).toFixed(2);
var worldInfoStringTokensPercentage = ((worldInfoStringTokens / (finalPromptTokens)) * 100).toFixed(2);
var allAnchorsTokensPercentage = ((allAnchorsTokens / (finalPromptTokens)) * 100).toFixed(2);
var selectedTokenizer = $("#tokenizer").find(':selected').text();
} else {
//console.log('-- applying % on non-OAI tokens');
var storyStringTokensPercentage = ((storyStringTokens / (totalTokensInPrompt)) * 100).toFixed(2);
var ActualChatHistoryTokensPercentage = ((ActualChatHistoryTokens / (totalTokensInPrompt)) * 100).toFixed(2);
var promptBiasTokensPercentage = ((promptBiasTokens / (totalTokensInPrompt)) * 100).toFixed(2);
var worldInfoStringTokensPercentage = ((worldInfoStringTokens / (totalTokensInPrompt)) * 100).toFixed(2);
var allAnchorsTokensPercentage = ((allAnchorsTokens / (totalTokensInPrompt)) * 100).toFixed(2);
var selectedTokenizer = $("#tokenizer").find(':selected').text();
}
if (this_main_api == 'openai') {
//console.log('-- calling popup for OAI tokens');
callPopup(
`
<h3>Prompt Itemization</h3>
Tokenizer: ${selectedTokenizer}<br>
API Used: ${this_main_api}<br>
<span class="tokenItemizingSubclass">
Only the white numbers really matter. All numbers are estimates.
Grey color items may not have been included in the context due to certain prompt format settings.
</span>
<hr class="sysHR">
<div class="justifyLeft">
<div class="flex-container">
<div class="flex-container flex1 flexFlowColumns flexNoGap wide50p tokenGraph">
<div class="wide100p" style="background-color: grey; height: ${oaiStartTokensPercentage}%;"></div>
<div class="wide100p" style="background-color: indianred; height: ${storyStringTokensPercentage}%;"></div>
<div class="wide100p" style="background-color: gold; height: ${worldInfoStringTokensPercentage}%;"></div>
<div class="wide100p" style="background-color: palegreen; height: ${ActualChatHistoryTokensPercentage}%;"></div>
<div class="wide100p" style="background-color: cornflowerblue; height: ${allAnchorsTokensPercentage}%;"></div>
<div class="wide100p" style="background-color: mediumpurple; height: ${promptBiasTokensPercentage}%;"></div>
</div>
<div class="flex-container wide50p">
<div class="wide100p flex-container flexNoGap flexFlowColumn">
<div class="flex-container wide100p">
<div class="flex1" style="color: grey;">Chat Startup:</div>
<div class=""> ${oaiStartTokens}</div>
</div>
</div>
<div class="wide100p flex-container flexNoGap flexFlowColumn">
<div class="flex-container wide100p">
<div class="flex1" style="color: indianred;">Prompt Tokens:</div>
<div class=""> ${oaiPromptTokens}</div>
</div>
<div class="flex-container ">
<div class=" flex1 tokenItemizingSubclass">-- Description: </div>
<div class="tokenItemizingSubclass">${charDescriptionTokens}</div>
</div>
<div class="flex-container ">
<div class=" flex1 tokenItemizingSubclass">-- Personality:</div>
<div class="tokenItemizingSubclass"> ${charPersonalityTokens}</div>
</div>
<div class="flex-container ">
<div class=" flex1 tokenItemizingSubclass">-- Scenario: </div>
<div class="tokenItemizingSubclass">${scenarioTextTokens}</div>
</div>
<div class="flex-container ">
<div class=" flex1 tokenItemizingSubclass">-- Examples:</div>
<div class="tokenItemizingSubclass"> ${examplesStringTokens}</div>
</div>
</div>
<div class="wide100p flex-container">
<div class="flex1" style="color: gold;">World Info:</div>
<div class="">${worldInfoStringTokens}</div>
</div>
<div class="wide100p flex-container">
<div class="flex1" style="color: palegreen;">Chat History:</div>
<div class=""> ${ActualChatHistoryTokens}</div>
</div>
<div class="wide100p flex-container flexNoGap flexFlowColumn">
<div class="wide100p flex-container">
<div class="flex1" style="color: cornflowerblue;">Extensions:</div>
<div class="">${allAnchorsTokens}</div>
</div>
<div class="flex-container ">
<div class=" flex1 tokenItemizingSubclass">-- Summarize: </div>
<div class="tokenItemizingSubclass">${summarizeStringTokens}</div>
</div>
<div class="flex-container ">
<div class=" flex1 tokenItemizingSubclass">-- Author's Note:</div>
<div class="tokenItemizingSubclass"> ${authorsNoteStringTokens}</div>
</div>
</div>
<div class="wide100p flex-container">
<div class="flex1" style="color: mediumpurple;">{{}} Bias:</div><div class="">${oaiBiasTokens}</div>
</div>
</div>
</div>
<hr class="sysHR">
<div class="wide100p flex-container flexFlowColumns">
<div class="flex-container wide100p">
<div class="flex1">Total Tokens in Prompt:</div><div class=""> ${finalPromptTokens}</div>
</div>
<div class="flex-container wide100p">
<div class="flex1">Max Context:</div><div class="">${thisPrompt_max_context}</div>
</div>
<div class="flex-container wide100p">
<div class="flex1">- Padding:</div><div class=""> ${thisPrompt_padding}</div>
</div>
<div class="flex-container wide100p">
<div class="flex1">Actual Max Context Allowed:</div><div class="">${thisPrompt_max_context - thisPrompt_padding}</div>
</div>
</div>
</div>
<hr class="sysHR">
`, 'text'
);
} else {
//console.log('-- calling popup for non-OAI tokens');
callPopup(
`
<h3>Prompt Itemization</h3>
Tokenizer: ${selectedTokenizer}<br>
API Used: ${this_main_api}<br>
<span class="tokenItemizingSubclass">
Only the white numbers really matter. All numbers are estimates.
Grey color items may not have been included in the context due to certain prompt format settings.
@ -2513,7 +2740,8 @@ function promptItemize(itemizedPrompts, requestedMesId) {
</div>
<hr class="sysHR">
`, 'text'
);
);
}
}
function setInContextMessages(lastmsg, type) {
@ -2830,7 +3058,7 @@ function saveReply(type, getMessage, this_mes_is_name, title) {
} else {
item['swipe_id'] = 0;
item['swipes'] = [];
item['swipes'][0] = chat[chat.length - 1]['mes'];
item['swipes'][0] = chat[chat.length - 1]['mes'];
}
return { type, getMessage };
@ -3243,8 +3471,10 @@ function changeMainAPI() {
// Hide common settings for OpenAI
if (selectedVal == "openai") {
$("#common-gen-settings-block").css("display", "none");
//$("#token_breakdown").css("display", "flex");
} else {
$("#common-gen-settings-block").css("display", "block");
//$("#token_breakdown").css("display", "none");
}
// Hide amount gen for poe
if (selectedVal == "poe") {

View File

@ -101,6 +101,7 @@ const default_settings = {
openai_model: 'gpt-3.5-turbo',
jailbreak_system: false,
reverse_proxy: '',
oai_breakdown: false,
};
const oai_settings = {
@ -125,6 +126,7 @@ const oai_settings = {
openai_model: 'gpt-3.5-turbo',
jailbreak_system: false,
reverse_proxy: '',
oai_breakdown: false,
};
let openai_setting_names;
@ -305,16 +307,18 @@ async function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldI
let openai_msgs_tosend = [];
// todo: static value, maybe include in the initial context calculation
const handler_instance = new TokenHandler(countTokens);
let new_chat_msg = { "role": "system", "content": "[Start a new chat]" };
let start_chat_count = countTokens([new_chat_msg], true);
let start_chat_count = handler_instance.count([new_chat_msg], true, 'start_chat');
await delay(1);
let total_count = countTokens([prompt_msg], true) + start_chat_count;
let total_count = handler_instance.count([prompt_msg], true, 'prompt') + start_chat_count;
await delay(1);
if (bias && bias.trim().length) {
let bias_msg = { "role": "system", "content": bias.trim() };
openai_msgs.push(bias_msg);
total_count += countTokens([bias_msg], true);
total_count += handler_instance.count([bias_msg], true, 'bias');
await delay(1);
}
@ -331,13 +335,14 @@ async function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldI
openai_msgs.push(group_nudge);
// add a group nudge count
let group_nudge_count = countTokens([group_nudge], true);
let group_nudge_count = handler_instance.count([group_nudge], true, 'nudge');
await delay(1);
total_count += group_nudge_count;
// recount tokens for new start message
total_count -= start_chat_count
start_chat_count = countTokens([new_chat_msg], true);
handler_instance.uncount(start_chat_count, 'start_chat');
start_chat_count = handler_instance.count([new_chat_msg], true);
await delay(1);
total_count += start_chat_count;
}
@ -346,7 +351,7 @@ async function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldI
const jailbreakMessage = { "role": "system", "content": substituteParams(oai_settings.jailbreak_prompt) };
openai_msgs.push(jailbreakMessage);
total_count += countTokens([jailbreakMessage], true);
total_count += handler_instance.count([jailbreakMessage], true, 'jailbreak');
await delay(1);
}
@ -354,7 +359,7 @@ async function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldI
const impersonateMessage = { "role": "system", "content": substituteParams(oai_settings.impersonation_prompt) };
openai_msgs.push(impersonateMessage);
total_count += countTokens([impersonateMessage], true);
total_count += handler_instance.count([impersonateMessage], true, 'impersonate');
await delay(1);
}
@ -377,12 +382,12 @@ async function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldI
examples_tosend.push(example);
}
}
total_count += countTokens(examples_tosend, true);
total_count += handler_instance.count(examples_tosend, true, 'examples');
await delay(1);
// go from newest message to oldest, because we want to delete the older ones from the context
for (let j = openai_msgs.length - 1; j >= 0; j--) {
let item = openai_msgs[j];
let item_count = countTokens(item, true);
let item_count = handler_instance.count(item, true, 'conversation');
await delay(1);
// If we have enough space for this message, also account for the max assistant reply size
if ((total_count + item_count) < (this_max_context - oai_settings.openai_max_tokens)) {
@ -391,13 +396,14 @@ async function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldI
}
else {
// early break since if we still have more messages, they just won't fit anyway
handler_instance.uncount(item_count, 'conversation');
break;
}
}
} else {
for (let j = openai_msgs.length - 1; j >= 0; j--) {
let item = openai_msgs[j];
let item_count = countTokens(item, true);
let item_count = handler_instance.count(item, true, 'conversation');
await delay(1);
// If we have enough space for this message, also account for the max assistant reply size
if ((total_count + item_count) < (this_max_context - oai_settings.openai_max_tokens)) {
@ -406,11 +412,12 @@ async function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldI
}
else {
// early break since if we still have more messages, they just won't fit anyway
handler_instance.uncount(item_count, 'conversation');
break;
}
}
console.log(total_count);
//console.log(total_count);
// each example block contains multiple user/bot messages
for (let example_block of openai_msgs_example) {
@ -420,7 +427,7 @@ async function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldI
example_block = [new_chat_msg, ...example_block];
// add the block only if there is enough space for all its messages
const example_count = countTokens(example_block, true);
const example_count = handler_instance.count(example_block, true, 'examples');
await delay(1);
if ((total_count + example_count) < (this_max_context - oai_settings.openai_max_tokens)) {
examples_tosend.push(...example_block)
@ -428,6 +435,7 @@ async function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldI
}
else {
// early break since more examples probably won't fit anyway
handler_instance.uncount(example_count, 'examples');
break;
}
}
@ -439,10 +447,14 @@ async function prepareOpenAIMessages(name2, storyString, worldInfoBefore, worldI
openai_msgs_tosend.reverse();
openai_msgs_tosend = [prompt_msg, ...examples_tosend, new_chat_msg, ...openai_msgs_tosend]
console.log("We're sending this:")
console.log(openai_msgs_tosend);
console.log(`Calculated the total context to be ${total_count} tokens`);
return openai_msgs_tosend;
//console.log("We're sending this:")
//console.log(openai_msgs_tosend);
//console.log(`Calculated the total context to be ${total_count} tokens`);
handler_instance.log();
return [
openai_msgs_tosend,
oai_settings.oai_breakdown ? handler_instance.counts : false,
];
}
function getSystemPrompt(nsfw_toggle_prompt, enhance_definitions_prompt, wiBefore, storyString, wiAfter, extensionPrompt, isImpersonate) {
@ -604,9 +616,42 @@ async function calculateLogitBias() {
}
}
class TokenHandler {
constructor(countTokenFn) {
this.countTokenFn = countTokenFn;
this.counts = {
'start_chat': 0,
'prompt': 0,
'bias': 0,
'nudge': 0,
'jailbreak': 0,
'impersonate': 0,
'examples': 0,
'conversation': 0,
};
}
uncount(value, type) {
this.counts[type] -= value;
}
count(messages, full, type) {
//console.log(messages);
const token_count = this.countTokenFn(messages, full);
this.counts[type] += token_count;
return token_count;
}
log() {
const total = Object.values(this.counts).reduce((a, b) => a + b);
console.table({ ...this.counts, 'total': total });
}
}
function countTokens(messages, full = false) {
let chatId = 'undefined';
try {
if (selected_group) {
chatId = groups.find(x => x.id == selected_group)?.chat_id;
@ -693,6 +738,7 @@ function loadOpenAISettings(data, settings) {
if (settings.nsfw_first !== undefined) oai_settings.nsfw_first = !!settings.nsfw_first;
if (settings.openai_model !== undefined) oai_settings.openai_model = settings.openai_model;
if (settings.jailbreak_system !== undefined) oai_settings.jailbreak_system = !!settings.jailbreak_system;
if (settings.oai_breakdown !== undefined) oai_settings.oai_breakdown = !!settings.oai_breakdown;
$('#stream_toggle').prop('checked', oai_settings.stream_openai);
@ -708,6 +754,7 @@ function loadOpenAISettings(data, settings) {
$('#wrap_in_quotes').prop('checked', oai_settings.wrap_in_quotes);
$('#nsfw_first').prop('checked', oai_settings.nsfw_first);
$('#jailbreak_system').prop('checked', oai_settings.jailbreak_system);
$('#oai_breakdown').prop('checked', oai_settings.oai_breakdown);
if (settings.main_prompt !== undefined) oai_settings.main_prompt = settings.main_prompt;
if (settings.nsfw_prompt !== undefined) oai_settings.nsfw_prompt = settings.nsfw_prompt;
@ -827,6 +874,7 @@ async function saveOpenAIPreset(name, settings) {
jailbreak_system: settings.jailbreak_system,
impersonation_prompt: settings.impersonation_prompt,
bias_preset_selected: settings.bias_preset_selected,
oai_breakdown: settings.oai_breakdown,
};
const savePresetSettings = await fetch(`/savepreset_openai?name=${name}`, {
@ -1034,7 +1082,7 @@ async function onDeletePresetClick() {
const response = await fetch('/deletepreset_openai', {
method: 'POST',
headers: getRequestHeaders(),
body: JSON.stringify({name: nameToDelete}),
body: JSON.stringify({ name: nameToDelete }),
});
if (!response.ok) {
@ -1085,6 +1133,7 @@ function onSettingsPresetChange() {
wrap_in_quotes: ['#wrap_in_quotes', 'wrap_in_quotes', true],
nsfw_first: ['#nsfw_first', 'nsfw_first', true],
jailbreak_system: ['#jailbreak_system', 'jailbreak_system', true],
oai_breakdown: ['#oai_breakdown', 'oai_breakdown', true],
main_prompt: ['#main_prompt_textarea', 'main_prompt', false],
nsfw_prompt: ['#nsfw_prompt_textarea', 'nsfw_prompt', false],
jailbreak_prompt: ['#jailbreak_prompt_textarea', 'jailbreak_prompt', false],
@ -1151,7 +1200,7 @@ function onReverseProxyInput() {
async function onConnectButtonClick(e) {
e.stopPropagation();
const api_key_openai = $('#api_key_openai').val().trim();
if (api_key_openai.length) {
await writeSecret(SECRET_KEYS.OPENAI, api_key_openai);
}
@ -1257,6 +1306,16 @@ $(document).ready(function () {
saveSettingsDebounced();
});
$("#oai_breakdown").on('change', function () {
oai_settings.oai_breakdown = !!$(this).prop("checked");
if (!oai_settings.oai_breakdown) {
$("#token_breakdown").css('display', 'none');
} else {
$("#token_breakdown").css('display', 'flex');
}
saveSettingsDebounced();
});
// auto-select a preset based on character/group name
$(document).on("click", ".character_select", function () {
const chid = $(this).attr('chid');
@ -1310,18 +1369,18 @@ $(document).ready(function () {
saveSettingsDebounced();
});
$("#api_button_openai").on('click', onConnectButtonClick);
$("#openai_reverse_proxy").on('input', onReverseProxyInput);
$("#model_openai_select").on('change', onModelChange);
$("#settings_perset_openai").on('change', onSettingsPresetChange);
$("#new_oai_preset").on('click', onNewPresetClick);
$("#delete_oai_preset").on('click', onDeletePresetClick);
$("#openai_api_usage").on('click', showApiKeyUsage);
$('#openai_logit_bias_preset').on('change', onLogitBiasPresetChange);
$('#openai_logit_bias_new_preset').on('click', createNewLogitBiasPreset);
$('#openai_logit_bias_new_entry').on('click', createNewLogitBiasEntry);
$('#openai_logit_bias_import_file').on('input', onLogitBiasPresetImportFileChange);
$('#openai_logit_bias_import_preset').on('click', onLogitBiasPresetImportClick);
$('#openai_logit_bias_export_preset').on('click', onLogitBiasPresetExportClick);
$('#openai_logit_bias_delete_preset').on('click', onLogitBiasPresetDeleteClick);
$("#api_button_openai").on("click", onConnectButtonClick);
$("#openai_reverse_proxy").on("input", onReverseProxyInput);
$("#model_openai_select").on("change", onModelChange);
$("#settings_perset_openai").on("change", onSettingsPresetChange);
$("#new_oai_preset").on("click", onNewPresetClick);
$("#delete_oai_preset").on("click", onDeletePresetClick);
$("#openai_api_usage").on("click", showApiKeyUsage);
$("#openai_logit_bias_preset").on("change", onLogitBiasPresetChange);
$("#openai_logit_bias_new_preset").on("click", createNewLogitBiasPreset);
$("#openai_logit_bias_new_entry").on("click", createNewLogitBiasEntry);
$("#openai_logit_bias_import_file").on("input", onLogitBiasPresetImportFileChange);
$("#openai_logit_bias_import_preset").on("click", onLogitBiasPresetImportClick);
$("#openai_logit_bias_export_preset").on("click", onLogitBiasPresetExportClick);
$("#openai_logit_bias_delete_preset").on("click", onLogitBiasPresetDeleteClick);
});

303
public/scripts/uniqolor.js Normal file
View File

@ -0,0 +1,303 @@
const SATURATION_BOUND = [0, 100];
const LIGHTNESS_BOUND = [0, 100];
const pad2 = str => `${str.length === 1 ? '0' : ''}${str}`;
const clamp = (num, min, max) => Math.max(Math.min(num, max), min);
const random = (min, max) => Math.floor(Math.random() * ((max - min) + 1)) + min;
const randomExclude = (min, max, exclude) => {
const r = random(min, max);
for (let i = 0; i < exclude?.length; i++) {
const value = exclude[i];
if (value?.length === 2 && r >= value[0] && r <= value[1]) {
return randomExclude(min, max, exclude);
}
}
return r;
};
/**
* Generate hashCode
* @param {string} str
* @return {number}
*/
const hashCode = str => {
const len = str.length;
let hash = 0;
for (let i = 0; i < len; i++) {
hash = ((hash << 5) - hash) + str.charCodeAt(i);
hash &= hash; // Convert to 32bit integer
}
return hash;
};
/**
* Clamps `num` within the inclusive `range` bounds
* @param {number} num
* @param {number|Array} range
* @return {number}
*/
const boundHashCode = (num, range) => {
if (typeof range === 'number') {
return range;
}
return (num % Math.abs(range[1] - range[0])) + range[0];
};
/**
* Sanitizing the `range`
* @param {number|Array} range
* @param {Array} bound
* @return {number|Array}
*/
const sanitizeRange = (range, bound) => {
if (typeof range === 'number') {
return clamp(Math.abs(range), ...bound);
}
if (range.length === 1 || range[0] === range[1]) {
return clamp(Math.abs(range[0]), ...bound);
}
return [
Math.abs(clamp(range[0], ...bound)),
clamp(Math.abs(range[1]), ...bound),
];
};
/**
* @param {number} p
* @param {number} q
* @param {number} t
* @return {number}
*/
const hueToRgb = (p, q, t) => {
if (t < 0) {
t += 1;
} else if (t > 1) {
t -= 1;
}
if (t < 1 / 6) {
return p + ((q - p) * 6 * t);
}
if (t < 1 / 2) {
return q;
}
if (t < 2 / 3) {
return p + ((q - p) * ((2 / 3) - t) * 6);
}
return p;
};
/**
* Converts an HSL color to RGB
* @param {number} h Hue
* @param {number} s Saturation
* @param {number} l Lightness
* @return {Array}
*/
const hslToRgb = (h, s, l) => {
let r;
let g;
let b;
h /= 360;
s /= 100;
l /= 100;
if (s === 0) {
// achromatic
r = g = b = l;
} else {
const q = l < 0.5
? l * (1 + s)
: (l + s) - (l * s);
const p = (2 * l) - q;
r = hueToRgb(p, q, h + (1 / 3));
g = hueToRgb(p, q, h);
b = hueToRgb(p, q, h - (1 / 3));
}
return [
Math.round(r * 255),
Math.round(g * 255),
Math.round(b * 255),
];
};
/**
* Determines whether the RGB color is light or not
* http://www.w3.org/TR/AERT#color-contrast
* @param {number} r Red
* @param {number} g Green
* @param {number} b Blue
* @param {number} differencePoint
* @return {boolean}
*/
const rgbIsLight = (r, g, b, differencePoint) => ((r * 299) + (g * 587) + (b * 114)) / 1000 >= differencePoint; // eslint-disable-line max-len
/**
* Converts an HSL color to string format
* @param {number} h Hue
* @param {number} s Saturation
* @param {number} l Lightness
* @return {string}
*/
const hslToString = (h, s, l) => `hsl(${h}, ${s}%, ${l}%)`;
/**
* Converts RGB color to string format
* @param {number} r Red
* @param {number} g Green
* @param {number} b Blue
* @param {string} format Color format
* @return {string}
*/
const rgbFormat = (r, g, b, format) => {
switch (format) {
case 'rgb':
return `rgb(${r}, ${g}, ${b})`;
case 'hex':
default:
return `#${pad2(r.toString(16))}${pad2(g.toString(16))}${pad2(b.toString(16))}`;
}
};
/**
* Generate unique color from `value`
* @param {string|number} value
* @param {Object} [options={}]
* @param {string} [options.format='hex']
* The color format, it can be one of `hex`, `rgb` or `hsl`
* @param {number|Array} [options.saturation=[50, 55]]
* Determines the color saturation, it can be a number or a range between 0 and 100
* @param {number|Array} [options.lightness=[50, 60]]
* Determines the color lightness, it can be a number or a range between 0 and 100
* @param {number} [options.differencePoint=130]
* Determines the color brightness difference point. We use it to obtain the `isLight` value
* in the output, it can be a number between 0 and 255
* @return {Object}
* @example
*
* ```js
* uniqolor('Hello world!')
* // { color: "#5cc653", isLight: true }
*
* uniqolor('Hello world!', { format: 'rgb' })
* // { color: "rgb(92, 198, 83)", isLight: true }
*
* uniqolor('Hello world!', {
* saturation: 30,
* lightness: [70, 80],
* })
* // { color: "#afd2ac", isLight: true }
*
* uniqolor('Hello world!', {
* saturation: 30,
* lightness: [70, 80],
* differencePoint: 200,
* })
* // { color: "#afd2ac", isLight: false }
* ```
*/
const uniqolor = (value, {
format = 'hex',
saturation = [50, 55],
lightness = [50, 60],
differencePoint = 130,
} = {}) => {
const hash = Math.abs(hashCode(String(value)));
const h = boundHashCode(hash, [0, 360]);
const s = boundHashCode(hash, sanitizeRange(saturation, SATURATION_BOUND));
const l = boundHashCode(hash, sanitizeRange(lightness, LIGHTNESS_BOUND));
const [r, g, b] = hslToRgb(h, s, l);
return {
color: format === 'hsl'
? hslToString(h, s, l)
: rgbFormat(r, g, b, format),
isLight: rgbIsLight(r, g, b, differencePoint),
};
};
/**
* Generate random color
* @param {Object} [options={}]
* @param {string} [options.format='hex']
* The color format, it can be one of `hex`, `rgb` or `hsl`
* @param {number|Array} [options.saturation=[50, 55]]
* Determines the color saturation, it can be a number or a range between 0 and 100
* @param {number|Array} [options.lightness=[50, 60]]
* Determines the color lightness, it can be a number or a range between 0 and 100
* @param {number} [options.differencePoint=130]
* Determines the color brightness difference point. We use it to obtain the `isLight` value
* in the output, it can be a number between 0 and 255
* @param {Array} [options.excludeHue]
* Exclude certain hue ranges. For example to exclude red color range: `[[0, 20], [325, 359]]`
* @return {Object}
* @example
*
* ```js
* // Generate random color
* uniqolor.random()
* // { color: "#644cc8", isLight: false }
*
* // Generate a random color with HSL format
* uniqolor.random({ format: 'hsl' })
* // { color: "hsl(89, 55%, 60%)", isLight: true }
*
* // Generate a random color in specific saturation and lightness
* uniqolor.random({
* saturation: 80,
* lightness: [70, 80],
* })
* // { color: "#c7b9da", isLight: true }
*
* // Generate a random color but exclude red color range
* uniqolor.random({
* excludeHue: [[0, 20], [325, 359]],
* })
* // {color: '#53caab', isLight: true}
* ```
*/
uniqolor.random = ({
format = 'hex',
saturation = [50, 55],
lightness = [50, 60],
differencePoint = 130,
excludeHue,
} = {}) => {
saturation = sanitizeRange(saturation, SATURATION_BOUND);
lightness = sanitizeRange(lightness, LIGHTNESS_BOUND);
const h = excludeHue ? randomExclude(0, 359, excludeHue) : random(0, 359);
const s = typeof saturation === 'number'
? saturation
: random(...saturation);
const l = typeof lightness === 'number'
? lightness
: random(...lightness);
const [r, g, b] = hslToRgb(h, s, l);
return {
color: format === 'hsl'
? hslToString(h, s, l)
: rgbFormat(r, g, b, format),
isLight: rgbIsLight(r, g, b, differencePoint),
};
};
export default uniqolor;

View File

@ -385,6 +385,18 @@ code {
justify-content: center;
}
#token_breakdown div {
display: flex;
width: 100%;
justify-content: center;
}
.token_breakdown_segment {
min-width: 40px !important;
border: solid 2px;
border-radius: 5px;
}
#loading_mes {
display: none;