mirror of
https://github.com/SillyTavern/SillyTavern.git
synced 2025-06-05 21:59:27 +02:00
Merge branch 'main' into dev
This commit is contained in:
@ -95,6 +95,7 @@
|
||||
"%cd /\n",
|
||||
"!git clone https://github.com/Cohee1207/SillyTavern-extras\n",
|
||||
"%cd /SillyTavern-extras\n",
|
||||
"!git clone https://github.com/Cohee1207/tts_samples\n",
|
||||
"!npm install -g localtunnel\n",
|
||||
"!pip install -r requirements-complete.txt\n",
|
||||
"!pip install tensorflow==2.11\n",
|
||||
|
@ -1393,7 +1393,7 @@ function getExtensionPrompt(position = 0, depth = undefined, separator = "\n") {
|
||||
|
||||
function baseChatReplace(value, name1, name2) {
|
||||
if (value !== undefined && value.length > 0) {
|
||||
value = substituteParams(value, is_pygmalion ? "You:" : name1, name2);
|
||||
value = substituteParams(value, is_pygmalion ? "You" : name1, name2);
|
||||
|
||||
if (power_user.collapse_newlines) {
|
||||
value = collapseNewlines(value);
|
||||
@ -1410,9 +1410,10 @@ function appendToStoryString(value, prefix) {
|
||||
}
|
||||
|
||||
function isStreamingEnabled() {
|
||||
return (main_api == 'openai' && oai_settings.stream_openai)
|
||||
return ((main_api == 'openai' && oai_settings.stream_openai)
|
||||
|| (main_api == 'poe' && poe_settings.streaming)
|
||||
|| (main_api == 'textgenerationwebui' && textgenerationwebui_settings.streaming);
|
||||
|| (main_api == 'textgenerationwebui' && textgenerationwebui_settings.streaming))
|
||||
&& !isMultigenEnabled(); // Multigen has a quasi-streaming mode which breaks the real streaming
|
||||
}
|
||||
|
||||
class StreamingProcessor {
|
||||
@ -1592,7 +1593,15 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
|
||||
|
||||
const isImpersonate = type == "impersonate";
|
||||
const isInstruct = power_user.instruct.enabled;
|
||||
message_already_generated = isImpersonate ? `${name1}: ` : `${name2}: `;
|
||||
|
||||
// Name for the multigen prefix
|
||||
const magName = isImpersonate ? (is_pygmalion ? 'You' : name1) : name2;
|
||||
|
||||
if (isInstruct) {
|
||||
message_already_generated = formatInstructModePrompt(magName, isImpersonate);
|
||||
} else {
|
||||
message_already_generated = `${magName}: `;
|
||||
}
|
||||
|
||||
const interruptedByCommand = processCommands($("#send_textarea").val(), type);
|
||||
|
||||
@ -2018,6 +2027,13 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
|
||||
const isBottom = j === mesSend.length - 1;
|
||||
mesSendString += mesSend[j];
|
||||
|
||||
// Add quiet generation prompt at depth 0
|
||||
if (isBottom && quiet_prompt && quiet_prompt.length) {
|
||||
const name = is_pygmalion ? 'You' : name1;
|
||||
const quietAppend = isInstruct ? formatInstructModeChat(name, quiet_prompt, true) : `\n${name}: ${quiet_prompt}`;
|
||||
mesSendString += quietAppend;
|
||||
}
|
||||
|
||||
if (isInstruct && isBottom && tokens_already_generated === 0) {
|
||||
const name = isImpersonate ? (is_pygmalion ? 'You' : name1) : name2;
|
||||
mesSendString += formatInstructModePrompt(name, isImpersonate);
|
||||
@ -2151,11 +2167,6 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
|
||||
}
|
||||
}
|
||||
|
||||
// Add quiet generation prompt at depth 0
|
||||
if (quiet_prompt && quiet_prompt.length) {
|
||||
finalPromt += `\n${quiet_prompt}`;
|
||||
}
|
||||
|
||||
finalPromt = finalPromt.replace(/\r/gm, '');
|
||||
|
||||
if (power_user.collapse_newlines) {
|
||||
@ -2270,11 +2281,14 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
|
||||
hideSwipeButtons();
|
||||
let getMessage = await streamingProcessor.generate();
|
||||
|
||||
// Cohee: Basically a dead-end code... (disabled by isStreamingEnabled)
|
||||
// I wasn't able to get multigen working with real streaming
|
||||
// consistently without screwing the interim prompting
|
||||
if (isMultigenEnabled()) {
|
||||
tokens_already_generated += this_amount_gen; // add new gen amt to any prev gen counter..
|
||||
tokens_already_generated += this_amount_gen;
|
||||
message_already_generated += getMessage;
|
||||
promptBias = '';
|
||||
if (!streamingProcessor.isStopped && shouldContinueMultigen(getMessage)) {
|
||||
if (!streamingProcessor.isStopped && shouldContinueMultigen(getMessage, isImpersonate)) {
|
||||
streamingProcessor.isFinished = false;
|
||||
runGenerate(getMessage);
|
||||
console.log('returning to make generate again');
|
||||
@ -2306,6 +2320,8 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
|
||||
|
||||
let this_mes_is_name;
|
||||
({ this_mes_is_name, getMessage } = extractNameFromMessage(getMessage, force_name2, isImpersonate));
|
||||
|
||||
if (!isImpersonate) {
|
||||
if (tokens_already_generated == 0) {
|
||||
console.log("New message");
|
||||
({ type, getMessage } = saveReply(type, getMessage, this_mes_is_name, title));
|
||||
@ -2314,8 +2330,13 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
|
||||
console.log("Should append message");
|
||||
({ type, getMessage } = saveReply('append', getMessage, this_mes_is_name, title));
|
||||
}
|
||||
} else {
|
||||
let chunk = cleanUpMessage(message_already_generated, true);
|
||||
let extract = extractNameFromMessage(chunk, force_name2, isImpersonate);
|
||||
$('#send_textarea').val(extract.getMessage).trigger('input');
|
||||
}
|
||||
|
||||
if (shouldContinueMultigen(getMessage)) {
|
||||
if (shouldContinueMultigen(getMessage, isImpersonate)) {
|
||||
hideSwipeButtons();
|
||||
tokens_already_generated += this_amount_gen; // add new gen amt to any prev gen counter..
|
||||
getMessage = message_already_generated;
|
||||
@ -2335,6 +2356,7 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
|
||||
if (getMessage.length > 0) {
|
||||
if (isImpersonate) {
|
||||
$('#send_textarea').val(getMessage).trigger('input');
|
||||
generatedPromtCache = "";
|
||||
}
|
||||
else if (type == 'quiet') {
|
||||
resolve(getMessage);
|
||||
@ -2624,12 +2646,24 @@ function getGenerateUrl() {
|
||||
return generate_url;
|
||||
}
|
||||
|
||||
function shouldContinueMultigen(getMessage) {
|
||||
const nameString = is_pygmalion ? 'You:' : `${name1}:`;
|
||||
return message_already_generated.indexOf(nameString) === -1 && //if there is no 'You:' in the response msg
|
||||
message_already_generated.indexOf('<|endoftext|>') === -1 && //if there is no <endoftext> stamp in the response msg
|
||||
tokens_already_generated < parseInt(amount_gen) && //if the gen'd msg is less than the max response length..
|
||||
getMessage.length > 0; //if we actually have gen'd text at all...
|
||||
function shouldContinueMultigen(getMessage, isImpersonate) {
|
||||
if (power_user.instruct.enabled && power_user.instruct.stop_sequence) {
|
||||
if (message_already_generated.indexOf(power_user.instruct.stop_sequence) !== -1) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// stopping name string
|
||||
const nameString = isImpersonate ? `${name2}:` : (is_pygmalion ? 'You:' : `${name1}:`);
|
||||
// if there is no 'You:' in the response msg
|
||||
const doesNotContainName = message_already_generated.indexOf(nameString) === -1;
|
||||
//if there is no <endoftext> stamp in the response msg
|
||||
const isNotEndOfText = message_already_generated.indexOf('<|endoftext|>') === -1;
|
||||
//if the gen'd msg is less than the max response length..
|
||||
const notReachedMax = tokens_already_generated < parseInt(amount_gen);
|
||||
//if we actually have gen'd text at all...
|
||||
const msgHasText = getMessage.length > 0;
|
||||
return doesNotContainName && isNotEndOfText && notReachedMax && msgHasText;
|
||||
}
|
||||
|
||||
function extractNameFromMessage(getMessage, force_name2, isImpersonate) {
|
||||
@ -2745,6 +2779,12 @@ function cleanUpMessage(getMessage, isImpersonate) {
|
||||
getMessage = getMessage.substring(0, getMessage.indexOf(power_user.instruct.stop_sequence));
|
||||
}
|
||||
}
|
||||
if (power_user.instruct.enabled && power_user.instruct.input_sequence && isImpersonate) {
|
||||
getMessage = getMessage.replaceAll(power_user.instruct.input_sequence, '');
|
||||
}
|
||||
if (power_user.instruct.enabled && power_user.instruct.output_sequence && !isImpersonate) {
|
||||
getMessage = getMessage.replaceAll(power_user.instruct.output_sequence, '');
|
||||
}
|
||||
// clean-up group message from excessive generations
|
||||
if (selected_group) {
|
||||
getMessage = cleanGroupMessage(getMessage);
|
||||
|
@ -237,9 +237,17 @@ function getQuietPrompt(mode, trigger) {
|
||||
function processReply(str) {
|
||||
str = str.replaceAll('"', '')
|
||||
str = str.replaceAll('“', '')
|
||||
str = str.replaceAll('\n', ' ')
|
||||
str = str.replaceAll('\n', ', ')
|
||||
str = str.replace(/[^a-zA-Z0-9,:]+/g, ' ') // Replace everything except alphanumeric characters and commas with spaces
|
||||
str = str.replace(/\s+/g, ' '); // Collapse multiple whitespaces into one
|
||||
str = str.trim();
|
||||
|
||||
str = str
|
||||
.split(',') // list split by commas
|
||||
.map(x => x.trim()) // trim each entry
|
||||
.filter(x => x) // remove empty entries
|
||||
.join(', '); // join it back with proper spacing
|
||||
|
||||
return str;
|
||||
}
|
||||
|
||||
@ -259,7 +267,7 @@ async function generatePicture(_, trigger) {
|
||||
const prompt = processReply(await new Promise(
|
||||
async function promptPromise(resolve, reject) {
|
||||
try {
|
||||
await context.generate('quiet', { resolve, reject, quiet_prompt });
|
||||
await context.generate('quiet', { resolve, reject, quiet_prompt, force_name2: true, });
|
||||
}
|
||||
catch {
|
||||
reject();
|
||||
@ -269,6 +277,8 @@ async function generatePicture(_, trigger) {
|
||||
context.deactivateSendButtons();
|
||||
hideSwipeButtons();
|
||||
|
||||
console.log('Processed Stable Diffusion prompt:', prompt);
|
||||
|
||||
const url = new URL(getApiUrl());
|
||||
url.pathname = '/api/image';
|
||||
const result = await fetch(url, {
|
||||
@ -295,7 +305,7 @@ async function generatePicture(_, trigger) {
|
||||
sendMessage(prompt, base64Image);
|
||||
}
|
||||
} catch (err) {
|
||||
console.error(err);
|
||||
console.trace(err);
|
||||
throw new Error('SD prompt text generation failed.')
|
||||
}
|
||||
finally {
|
||||
|
Reference in New Issue
Block a user