mirror of
https://github.com/SillyTavern/SillyTavern.git
synced 2025-06-05 21:59:27 +02:00
Clean-up Generate function for better readability
This commit is contained in:
295
public/script.js
295
public/script.js
@ -1700,20 +1700,7 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
|
|||||||
|
|
||||||
//for normal messages sent from user..
|
//for normal messages sent from user..
|
||||||
if (textareaText != "" && !automatic_trigger && type !== 'quiet') {
|
if (textareaText != "" && !automatic_trigger && type !== 'quiet') {
|
||||||
chat[chat.length] = {};
|
sendMessageAsUser(textareaText, messageBias);
|
||||||
chat[chat.length - 1]['name'] = name1;
|
|
||||||
chat[chat.length - 1]['is_user'] = true;
|
|
||||||
chat[chat.length - 1]['is_name'] = true;
|
|
||||||
chat[chat.length - 1]['send_date'] = humanizedDateTime();
|
|
||||||
chat[chat.length - 1]['mes'] = textareaText;
|
|
||||||
chat[chat.length - 1]['extra'] = {};
|
|
||||||
|
|
||||||
if (messageBias) {
|
|
||||||
console.log('checking bias');
|
|
||||||
chat[chat.length - 1]['extra']['bias'] = messageBias;
|
|
||||||
}
|
|
||||||
//console.log('Generate calls addOneMessage');
|
|
||||||
addOneMessage(chat[chat.length - 1]);
|
|
||||||
}
|
}
|
||||||
////////////////////////////////////
|
////////////////////////////////////
|
||||||
const scenarioText = chat_metadata['scenario'] || characters[this_chid].scenario;
|
const scenarioText = chat_metadata['scenario'] || characters[this_chid].scenario;
|
||||||
@ -1812,28 +1799,7 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
|
|||||||
//chat2 = chat2.reverse();
|
//chat2 = chat2.reverse();
|
||||||
|
|
||||||
// Determine token limit
|
// Determine token limit
|
||||||
let this_max_context = 1487;
|
let this_max_context = getMaxContextSize();
|
||||||
if (main_api == 'kobold' || main_api == 'textgenerationwebui') {
|
|
||||||
this_max_context = (max_context - amount_gen);
|
|
||||||
}
|
|
||||||
if (main_api == 'novel') {
|
|
||||||
if (novel_tier === 1) {
|
|
||||||
this_max_context = 1024;
|
|
||||||
} else {
|
|
||||||
this_max_context = 2048 - 60;//fix for fat tokens
|
|
||||||
if (nai_settings.model_novel == 'krake-v2') {
|
|
||||||
this_max_context -= 160;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (main_api == 'openai') {
|
|
||||||
this_max_context = oai_settings.openai_max_context;
|
|
||||||
}
|
|
||||||
if (main_api == 'poe') {
|
|
||||||
this_max_context = Number(max_context);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
// Adjust token limit for Horde
|
// Adjust token limit for Horde
|
||||||
let adjustedParams;
|
let adjustedParams;
|
||||||
@ -2072,22 +2038,8 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// add a custom dingus (if defined)
|
// add a custom dingus (if defined)
|
||||||
if (power_user.custom_chat_separator && power_user.custom_chat_separator.length) {
|
mesSendString = adjustChatsSeparator(mesSendString);
|
||||||
mesSendString = power_user.custom_chat_separator + '\n' + mesSendString;
|
|
||||||
}
|
|
||||||
// if chat start formatting is disabled
|
|
||||||
else if (power_user.disable_start_formatting) {
|
|
||||||
mesSendString = mesSendString;
|
|
||||||
}
|
|
||||||
// add non-pygma dingus
|
|
||||||
else if (!is_pygmalion) {
|
|
||||||
mesSendString = '\nThen the roleplay chat between ' + name1 + ' and ' + name2 + ' begins.\n' + mesSendString;
|
|
||||||
}
|
|
||||||
// add pygma <START>
|
|
||||||
else {
|
|
||||||
mesSendString = '<START>\n' + mesSendString;
|
|
||||||
//mesSendString = mesSendString; //This edit simply removes the first "<START>" that is prepended to all context prompts
|
|
||||||
}
|
|
||||||
let finalPromt =
|
let finalPromt =
|
||||||
worldInfoBefore +
|
worldInfoBefore +
|
||||||
storyString +
|
storyString +
|
||||||
@ -2098,22 +2050,9 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
|
|||||||
generatedPromtCache +
|
generatedPromtCache +
|
||||||
promptBias;
|
promptBias;
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if (zeroDepthAnchor && zeroDepthAnchor.length) {
|
if (zeroDepthAnchor && zeroDepthAnchor.length) {
|
||||||
if (!isMultigenEnabled() || tokens_already_generated == 0) {
|
if (!isMultigenEnabled() || tokens_already_generated == 0) {
|
||||||
const trimBothEnds = !force_name2 && !is_pygmalion;
|
finalPromt = appendZeroDepthAnchor(force_name2, zeroDepthAnchor, finalPromt);
|
||||||
let trimmedPrompt = (trimBothEnds ? zeroDepthAnchor.trim() : zeroDepthAnchor.trimEnd());
|
|
||||||
|
|
||||||
if (trimBothEnds && !finalPromt.endsWith('\n')) {
|
|
||||||
finalPromt += '\n';
|
|
||||||
}
|
|
||||||
|
|
||||||
finalPromt += trimmedPrompt;
|
|
||||||
|
|
||||||
if (force_name2 || is_pygmalion) {
|
|
||||||
finalPromt += ' ';
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2132,29 +2071,7 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
|
|||||||
|
|
||||||
if (isMultigenEnabled() && type !== 'quiet') {
|
if (isMultigenEnabled() && type !== 'quiet') {
|
||||||
// if nothing has been generated yet..
|
// if nothing has been generated yet..
|
||||||
if (tokens_already_generated === 0) {
|
this_amount_gen = getMultigenAmount();
|
||||||
// if the max gen setting is > 50...(
|
|
||||||
if (parseInt(amount_gen) >= power_user.multigen_first_chunk) {
|
|
||||||
// then only try to make 50 this cycle..
|
|
||||||
this_amount_gen = power_user.multigen_first_chunk;
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
// otherwise, make as much as the max amount request.
|
|
||||||
this_amount_gen = parseInt(amount_gen);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// if we already received some generated text...
|
|
||||||
else {
|
|
||||||
// if the remaining tokens to be made is less than next potential cycle count
|
|
||||||
if (parseInt(amount_gen) - tokens_already_generated < power_user.multigen_next_chunks) {
|
|
||||||
// subtract already generated amount from the desired max gen amount
|
|
||||||
this_amount_gen = parseInt(amount_gen) - tokens_already_generated;
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
// otherwise make the standard cycle amount (first 50, and 30 after that)
|
|
||||||
this_amount_gen = power_user.multigen_next_chunks;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let thisPromptBits = [];
|
let thisPromptBits = [];
|
||||||
@ -2193,40 +2110,7 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
|
|||||||
|
|
||||||
// counts will return false if the user has not enabled the token breakdown feature
|
// counts will return false if the user has not enabled the token breakdown feature
|
||||||
if (counts) {
|
if (counts) {
|
||||||
|
parseTokenCounts(counts, thisPromptBits);
|
||||||
//$('#token_breakdown').css('display', 'flex');
|
|
||||||
const breakdown_bar = $('#token_breakdown div:first-child');
|
|
||||||
breakdown_bar.empty();
|
|
||||||
|
|
||||||
const total = Object.values(counts).filter(x => !Number.isNaN(x)).reduce((acc, val) => acc + val, 0);
|
|
||||||
|
|
||||||
thisPromptBits.push({
|
|
||||||
oaiStartTokens: Object.entries(counts)[0][1],
|
|
||||||
oaiPromptTokens: Object.entries(counts)[1][1],
|
|
||||||
oaiBiasTokens: Object.entries(counts)[2][1],
|
|
||||||
oaiNudgeTokens: Object.entries(counts)[3][1],
|
|
||||||
oaiJailbreakTokens: Object.entries(counts)[4][1],
|
|
||||||
oaiImpersonateTokens: Object.entries(counts)[5][1],
|
|
||||||
oaiExamplesTokens: Object.entries(counts)[6][1],
|
|
||||||
oaiConversationTokens: Object.entries(counts)[7][1],
|
|
||||||
oaiTotalTokens: total,
|
|
||||||
})
|
|
||||||
|
|
||||||
Object.entries(counts).forEach(([type, value]) => {
|
|
||||||
if (value === 0) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
const percent_value = (value / total) * 100;
|
|
||||||
const color = uniqolor(type, { saturation: 50, lightness: 75, }).color;
|
|
||||||
const bar = document.createElement('div');
|
|
||||||
bar.style.width = `${percent_value}%`;
|
|
||||||
bar.classList.add('token_breakdown_segment');
|
|
||||||
bar.style.backgroundColor = color + 'AA';
|
|
||||||
bar.style.borderColor = color + 'FF';
|
|
||||||
bar.innerText = value;
|
|
||||||
bar.title = `${type}: ${percent_value.toFixed(2)}%`;
|
|
||||||
breakdown_bar.append(bar);
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
setInContextMessages(openai_messages_count, type);
|
setInContextMessages(openai_messages_count, type);
|
||||||
@ -2314,29 +2198,10 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
|
|||||||
itemizedPrompts.push(thisPromptBits);
|
itemizedPrompts.push(thisPromptBits);
|
||||||
//console.log(`pushed prompt bits to itemizedPrompts array. Length is now: ${itemizedPrompts.length}`);
|
//console.log(`pushed prompt bits to itemizedPrompts array. Length is now: ${itemizedPrompts.length}`);
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if (isStreamingEnabled() && type !== 'quiet') {
|
if (isStreamingEnabled() && type !== 'quiet') {
|
||||||
hideSwipeButtons();
|
hideSwipeButtons();
|
||||||
let getMessage = await streamingProcessor.generate();
|
let getMessage = await streamingProcessor.generate();
|
||||||
|
|
||||||
// Cohee: Basically a dead-end code... (disabled by isStreamingEnabled)
|
|
||||||
// I wasn't able to get multigen working with real streaming
|
|
||||||
// consistently without screwing the interim prompting
|
|
||||||
if (isMultigenEnabled()) {
|
|
||||||
tokens_already_generated += this_amount_gen; // add new gen amt to any prev gen counter..
|
|
||||||
message_already_generated += getMessage;
|
|
||||||
promptBias = '';
|
|
||||||
if (!streamingProcessor.isStopped && shouldContinueMultigen(getMessage, isImpersonate)) {
|
|
||||||
streamingProcessor.isFinished = false;
|
|
||||||
runGenerate(getMessage);
|
|
||||||
console.log('returning to make generate again');
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
getMessage = message_already_generated;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (streamingProcessor && !streamingProcessor.isStopped && streamingProcessor.isFinished) {
|
if (streamingProcessor && !streamingProcessor.isStopped && streamingProcessor.isFinished) {
|
||||||
streamingProcessor.onFinishStreaming(streamingProcessor.messageId, getMessage);
|
streamingProcessor.onFinishStreaming(streamingProcessor.messageId, getMessage);
|
||||||
streamingProcessor = null;
|
streamingProcessor = null;
|
||||||
@ -2468,6 +2333,152 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
|
|||||||
//console.log('generate ending');
|
//console.log('generate ending');
|
||||||
} //generate ends
|
} //generate ends
|
||||||
|
|
||||||
|
function sendMessageAsUser(textareaText, messageBias) {
|
||||||
|
chat[chat.length] = {};
|
||||||
|
chat[chat.length - 1]['name'] = name1;
|
||||||
|
chat[chat.length - 1]['is_user'] = true;
|
||||||
|
chat[chat.length - 1]['is_name'] = true;
|
||||||
|
chat[chat.length - 1]['send_date'] = humanizedDateTime();
|
||||||
|
chat[chat.length - 1]['mes'] = textareaText;
|
||||||
|
chat[chat.length - 1]['extra'] = {};
|
||||||
|
|
||||||
|
if (messageBias) {
|
||||||
|
console.log('checking bias');
|
||||||
|
chat[chat.length - 1]['extra']['bias'] = messageBias;
|
||||||
|
}
|
||||||
|
//console.log('Generate calls addOneMessage');
|
||||||
|
addOneMessage(chat[chat.length - 1]);
|
||||||
|
}
|
||||||
|
|
||||||
|
function getMaxContextSize() {
|
||||||
|
let this_max_context = 1487;
|
||||||
|
if (main_api == 'kobold' || main_api == 'textgenerationwebui') {
|
||||||
|
this_max_context = (max_context - amount_gen);
|
||||||
|
}
|
||||||
|
if (main_api == 'novel') {
|
||||||
|
if (novel_tier === 1) {
|
||||||
|
this_max_context = 1024;
|
||||||
|
} else {
|
||||||
|
this_max_context = 2048 - 60; //fix for fat tokens
|
||||||
|
if (nai_settings.model_novel == 'krake-v2') {
|
||||||
|
this_max_context -= 160;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (main_api == 'openai') {
|
||||||
|
this_max_context = oai_settings.openai_max_context;
|
||||||
|
}
|
||||||
|
if (main_api == 'poe') {
|
||||||
|
this_max_context = Number(max_context);
|
||||||
|
}
|
||||||
|
return this_max_context;
|
||||||
|
}
|
||||||
|
|
||||||
|
function parseTokenCounts(counts, thisPromptBits) {
|
||||||
|
const breakdown_bar = $('#token_breakdown div:first-child');
|
||||||
|
breakdown_bar.empty();
|
||||||
|
|
||||||
|
const total = Object.values(counts).filter(x => !Number.isNaN(x)).reduce((acc, val) => acc + val, 0);
|
||||||
|
|
||||||
|
thisPromptBits.push({
|
||||||
|
oaiStartTokens: Object.entries(counts)[0][1],
|
||||||
|
oaiPromptTokens: Object.entries(counts)[1][1],
|
||||||
|
oaiBiasTokens: Object.entries(counts)[2][1],
|
||||||
|
oaiNudgeTokens: Object.entries(counts)[3][1],
|
||||||
|
oaiJailbreakTokens: Object.entries(counts)[4][1],
|
||||||
|
oaiImpersonateTokens: Object.entries(counts)[5][1],
|
||||||
|
oaiExamplesTokens: Object.entries(counts)[6][1],
|
||||||
|
oaiConversationTokens: Object.entries(counts)[7][1],
|
||||||
|
oaiTotalTokens: total,
|
||||||
|
});
|
||||||
|
|
||||||
|
Object.entries(counts).forEach(([type, value]) => {
|
||||||
|
if (value === 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
const percent_value = (value / total) * 100;
|
||||||
|
const color = uniqolor(type, { saturation: 50, lightness: 75, }).color;
|
||||||
|
const bar = document.createElement('div');
|
||||||
|
bar.style.width = `${percent_value}%`;
|
||||||
|
bar.classList.add('token_breakdown_segment');
|
||||||
|
bar.style.backgroundColor = color + 'AA';
|
||||||
|
bar.style.borderColor = color + 'FF';
|
||||||
|
bar.innerText = value;
|
||||||
|
bar.title = `${type}: ${percent_value.toFixed(2)}%`;
|
||||||
|
breakdown_bar.append(bar);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function adjustChatsSeparator(mesSendString) {
|
||||||
|
if (power_user.custom_chat_separator && power_user.custom_chat_separator.length) {
|
||||||
|
mesSendString = power_user.custom_chat_separator + '\n' + mesSendString;
|
||||||
|
}
|
||||||
|
|
||||||
|
// if chat start formatting is disabled
|
||||||
|
else if (power_user.disable_start_formatting) {
|
||||||
|
mesSendString = mesSendString;
|
||||||
|
}
|
||||||
|
|
||||||
|
// add non-pygma dingus
|
||||||
|
else if (!is_pygmalion) {
|
||||||
|
mesSendString = '\nThen the roleplay chat between ' + name1 + ' and ' + name2 + ' begins.\n' + mesSendString;
|
||||||
|
}
|
||||||
|
|
||||||
|
// add pygma <START>
|
||||||
|
else {
|
||||||
|
mesSendString = '<START>\n' + mesSendString;
|
||||||
|
//mesSendString = mesSendString; //This edit simply removes the first "<START>" that is prepended to all context prompts
|
||||||
|
}
|
||||||
|
|
||||||
|
return mesSendString;
|
||||||
|
}
|
||||||
|
|
||||||
|
function appendZeroDepthAnchor(force_name2, zeroDepthAnchor, finalPromt) {
|
||||||
|
const trimBothEnds = !force_name2 && !is_pygmalion;
|
||||||
|
let trimmedPrompt = (trimBothEnds ? zeroDepthAnchor.trim() : zeroDepthAnchor.trimEnd());
|
||||||
|
|
||||||
|
if (trimBothEnds && !finalPromt.endsWith('\n')) {
|
||||||
|
finalPromt += '\n';
|
||||||
|
}
|
||||||
|
|
||||||
|
finalPromt += trimmedPrompt;
|
||||||
|
|
||||||
|
if (force_name2 || is_pygmalion) {
|
||||||
|
finalPromt += ' ';
|
||||||
|
}
|
||||||
|
|
||||||
|
return finalPromt;
|
||||||
|
}
|
||||||
|
|
||||||
|
function getMultigenAmount() {
|
||||||
|
let this_amount_gen = parseInt(amount_gen);
|
||||||
|
|
||||||
|
if (tokens_already_generated === 0) {
|
||||||
|
// if the max gen setting is > 50...(
|
||||||
|
if (parseInt(amount_gen) >= power_user.multigen_first_chunk) {
|
||||||
|
// then only try to make 50 this cycle..
|
||||||
|
this_amount_gen = power_user.multigen_first_chunk;
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
// otherwise, make as much as the max amount request.
|
||||||
|
this_amount_gen = parseInt(amount_gen);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// if we already received some generated text...
|
||||||
|
else {
|
||||||
|
// if the remaining tokens to be made is less than next potential cycle count
|
||||||
|
if (parseInt(amount_gen) - tokens_already_generated < power_user.multigen_next_chunks) {
|
||||||
|
// subtract already generated amount from the desired max gen amount
|
||||||
|
this_amount_gen = parseInt(amount_gen) - tokens_already_generated;
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
// otherwise make the standard cycle amount (first 50, and 30 after that)
|
||||||
|
this_amount_gen = power_user.multigen_next_chunks;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return this_amount_gen;
|
||||||
|
}
|
||||||
|
|
||||||
function promptItemize(itemizedPrompts, requestedMesId) {
|
function promptItemize(itemizedPrompts, requestedMesId) {
|
||||||
var incomingMesId = Number(requestedMesId);
|
var incomingMesId = Number(requestedMesId);
|
||||||
console.log(`looking for MesId ${incomingMesId}`);
|
console.log(`looking for MesId ${incomingMesId}`);
|
||||||
|
Reference in New Issue
Block a user