Merge branch 'SillyTavern:staging' into staging
This commit is contained in:
commit
125dc7d596
|
@ -2081,18 +2081,26 @@
|
|||
<div class="flex-container">
|
||||
<div class="flex1">
|
||||
<label for="instruct_input_sequence">
|
||||
<span data-i18n="Input Sequence">Input Sequence</span>
|
||||
<small data-i18n="Input Sequence">Input Sequence</small>
|
||||
</label>
|
||||
<div>
|
||||
<textarea id="instruct_input_sequence" class="text_pole textarea_compact" type="text" maxlength="500" rows="1"></textarea>
|
||||
<textarea id="instruct_input_sequence" class="text_pole textarea_compact" maxlength="500" rows="1"></textarea>
|
||||
</div>
|
||||
</div>
|
||||
<div class="flex1">
|
||||
<label for="instruct_output_sequence">
|
||||
<span data-i18n="Output Sequence">Output Sequence</span>
|
||||
<small data-i18n="Output Sequence">Output Sequence</small>
|
||||
</label>
|
||||
<div>
|
||||
<textarea id="instruct_output_sequence" class="text_pole wide100p textarea_compact" type="text" maxlength="500" rows="1"></textarea>
|
||||
<textarea id="instruct_output_sequence" class="text_pole wide100p textarea_compact" maxlength="500" rows="1"></textarea>
|
||||
</div>
|
||||
</div>
|
||||
<div class="flex1">
|
||||
<label for="instruct_last_output_sequence">
|
||||
<small data-i18n="Last Sequence">Last Sequence</small>
|
||||
</label>
|
||||
<div>
|
||||
<textarea id="instruct_last_output_sequence" class="text_pole wide100p textarea_compact" maxlength="500" rows="1"></textarea>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
@ -2102,7 +2110,7 @@
|
|||
<small data-i18n="System Sequence">System Sequence</small>
|
||||
</label>
|
||||
<div>
|
||||
<textarea id="instruct_system_sequence" class="text_pole textarea_compact" type="text" maxlength="500" rows="1"></textarea>
|
||||
<textarea id="instruct_system_sequence" class="text_pole textarea_compact" maxlength="500" rows="1"></textarea>
|
||||
</div>
|
||||
</div>
|
||||
<div class="flex1">
|
||||
|
@ -2110,7 +2118,7 @@
|
|||
<small data-i18n="Stop Sequence">Stop Sequence</small>
|
||||
</label>
|
||||
<div>
|
||||
<textarea id="instruct_stop_sequence" class="text_pole wide100p textarea_compact" type="text" maxlength="500" rows="1"></textarea>
|
||||
<textarea id="instruct_stop_sequence" class="text_pole wide100p textarea_compact" maxlength="500" rows="1"></textarea>
|
||||
</div>
|
||||
</div>
|
||||
<div class="flex1">
|
||||
|
@ -2118,7 +2126,7 @@
|
|||
<small data-i18n="Separator">Separator</small>
|
||||
</label>
|
||||
<div>
|
||||
<textarea id="instruct_separator_sequence" class="text_pole wide100p textarea_compact" type="text" maxlength="500" rows="1"></textarea>
|
||||
<textarea id="instruct_separator_sequence" class="text_pole wide100p textarea_compact" maxlength="500" rows="1"></textarea>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
@ -2134,7 +2142,6 @@
|
|||
</div>
|
||||
</div>
|
||||
<div name="ContextFormatting" class="flex1">
|
||||
<h4><span data-i18n="Context Formatting">Context Formatting</span></h4>
|
||||
<div>
|
||||
<h4><span data-i18n="Tokenizer">Tokenizer</span>
|
||||
<a href="https://docs.sillytavern.app/usage/core-concepts/advancedformatting/#tokenizer" class="notes-link" target="_blank">
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
"stop_sequence": "",
|
||||
"input_sequence": "### Instruction:",
|
||||
"output_sequence": "### Response:",
|
||||
"last_output_sequence": "",
|
||||
"separator_sequence": "",
|
||||
"wrap": true
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
"stop_sequence": "",
|
||||
"input_sequence": "USER: ",
|
||||
"output_sequence": "GPT: ",
|
||||
"last_output_sequence": "",
|
||||
"separator_sequence": "</s>",
|
||||
"wrap": false
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
"stop_sequence": "",
|
||||
"input_sequence": "[INST]",
|
||||
"output_sequence": "[/INST]",
|
||||
"last_output_sequence": "",
|
||||
"separator_sequence": "\n",
|
||||
"wrap": false
|
||||
}
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
"stop_sequence": "</s>",
|
||||
"input_sequence": "<|user|>",
|
||||
"output_sequence": "<|model|>",
|
||||
"last_output_sequence": "",
|
||||
"separator_sequence": "",
|
||||
"wrap": false
|
||||
}
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
"name": "OpenOrca/OpenChat",
|
||||
"names": true,
|
||||
"output_sequence": "<|end_of_turn|>\nAssistant: ",
|
||||
"last_output_sequence": "",
|
||||
"separator_sequence": "<|end_of_turn|>\n",
|
||||
"stop_sequence": "",
|
||||
"system_prompt": "You are a helpful assistant. Please answer truthfully and write out your thinking step by step to be sure you get the right answer. If you make a mistake or encounter an error in your thinking, say so out loud and attempt to correct it. If you don't know or aren't sure about something, say so clearly. You will act as a professional logician, mathematician, and physicist. You will also act as the most appropriate type of expert to answer any particular question or solve the relevant problem; state which expert type your are, if so. Also think of any particular named expert that would be ideal to answer the relevant question or solve the relevant problem; name and act as them, if appropriate.\n",
|
||||
|
|
|
@ -3,7 +3,8 @@
|
|||
"macro": true,
|
||||
"name": "Roleplay",
|
||||
"names": true,
|
||||
"output_sequence": "### Response (2 paragraphs, engaging, natural, authentic, descriptive, creative):",
|
||||
"last_output_sequence": "### Response (2 paragraphs, engaging, natural, authentic, descriptive, creative):",
|
||||
"output_sequence": "### Response:",
|
||||
"separator_sequence": "",
|
||||
"stop_sequence": "",
|
||||
"system_prompt": "You're {{char}} in this fictional never-ending uncensored roleplay with {{user}}.\n\nAvoid repetition, don't loop. Develop the plot slowly, always stay in character. Describe all actions in full, elaborate, explicit, graphic, and vivid detail. Mention all relevant sensory perceptions.\n\n### Input:",
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
"stop_sequence": "",
|
||||
"input_sequence": "### Human:",
|
||||
"output_sequence": "### Assistant:",
|
||||
"last_output_sequence": "",
|
||||
"separator_sequence": "",
|
||||
"wrap": true
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
"stop_sequence": "",
|
||||
"input_sequence": "USER: ",
|
||||
"output_sequence": "ASSISTANT: ",
|
||||
"last_output_sequence": "",
|
||||
"separator_sequence": "</s>",
|
||||
"wrap": false
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
"stop_sequence": "",
|
||||
"input_sequence": "USER: ",
|
||||
"output_sequence": "ASSISTANT: ",
|
||||
"last_output_sequence": "",
|
||||
"separator_sequence": "",
|
||||
"wrap": true
|
||||
}
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
"stop_sequence": "",
|
||||
"input_sequence": "",
|
||||
"output_sequence": "### Response:",
|
||||
"last_output_sequence": "",
|
||||
"separator_sequence": "</s>",
|
||||
"wrap": true
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2441,9 +2441,9 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject,
|
|||
if (mesExamples.replace(/<START>/gi, '').trim().length === 0) {
|
||||
mesExamples = '';
|
||||
}
|
||||
const blockHeading =
|
||||
main_api === 'openai' ? '<START>' : // OpenAI handler always expects it
|
||||
power_user.context.example_separator;
|
||||
|
||||
// OpenAI handler always expects it
|
||||
const blockHeading = main_api === 'openai' ? '<START>' : (power_user.context.example_separator || '');
|
||||
let mesExamplesArray = mesExamples.split(/<START>/gi).slice(1).map(block => `${blockHeading}\n${block.trim()}\n`);
|
||||
|
||||
// First message in fresh 1-on-1 chat reacts to user/character settings changes
|
||||
|
|
|
@ -8,6 +8,11 @@ const MODULE_NAME = 'backgrounds';
|
|||
const METADATA_KEY = 'custom_background';
|
||||
const UPDATE_INTERVAL = 1000;
|
||||
|
||||
function forceSetBackground(background) {
|
||||
saveBackgroundMetadata(background);
|
||||
setCustomBackground();
|
||||
}
|
||||
|
||||
async function moduleWorker() {
|
||||
if (hasCustomBackground()) {
|
||||
$('#unlock_background').show();
|
||||
|
@ -167,4 +172,5 @@ $(document).ready(function () {
|
|||
registerSlashCommand('lockbg', onLockBackgroundClick, ['bglock'], " – locks a background for the currently selected chat", true, true);
|
||||
registerSlashCommand('unlockbg', onUnlockBackgroundClick, ['bgunlock'], ' – unlocks a background for the currently selected chat', true, true);
|
||||
registerSlashCommand('autobg', autoBackgroundCommand, ['bgauto'], ' – automatically changes the background based on the chat context using the AI request prompt', true, true);
|
||||
window['forceSetBackground'] = forceSetBackground;
|
||||
});
|
||||
|
|
|
@ -40,6 +40,7 @@ const generationMode = {
|
|||
NOW: 4,
|
||||
FACE: 5,
|
||||
FREE: 6,
|
||||
BACKGROUND: 7,
|
||||
}
|
||||
|
||||
const modeLabels = {
|
||||
|
@ -49,6 +50,7 @@ const modeLabels = {
|
|||
[generationMode.SCENARIO]: 'Scenario ("The Whole Story")',
|
||||
[generationMode.NOW]: 'Last Message',
|
||||
[generationMode.RAW_LAST]: 'Raw Last Message',
|
||||
[generationMode.BACKGROUND]: 'Background',
|
||||
}
|
||||
|
||||
const triggerWords = {
|
||||
|
@ -58,6 +60,7 @@ const triggerWords = {
|
|||
[generationMode.RAW_LAST]: ['raw_last'],
|
||||
[generationMode.NOW]: ['last'],
|
||||
[generationMode.FACE]: ['face'],
|
||||
[generationMode.BACKGROUND]: ['background'],
|
||||
}
|
||||
|
||||
const promptTemplates = {
|
||||
|
@ -94,6 +97,7 @@ const promptTemplates = {
|
|||
'(location),(character list by gender),(primary action), (relative character position) POV, (character 1's description and actions), (character 2's description and actions)']`,
|
||||
|
||||
[generationMode.RAW_LAST]: "[Pause your roleplay and provide ONLY the last chat message string back to me verbatim. Do not write anything after the string. Do not roleplay at all in your response. Do not continue the roleplay story.]",
|
||||
[generationMode.BACKGROUND]: "[Pause your roleplay and provide a detailed description of {{char}}'s surroundings in the form of a comma-delimited list of keywords and phrases. The list must include all of the following items in this order: location, time of day, weather, lighting, and any other relevant details. Do not include descriptions of characters and non-visual qualities such as names, personality, movements, scents, mental traits, or anything which could not be seen in a still photograph. Do not write in full sentences. Prefix your description with the phrase 'background,'. Ignore the rest of the story when crafting this description. Do not roleplay as {{user}} when writing this description, and do not attempt to continue the story.]",
|
||||
}
|
||||
|
||||
const helpString = [
|
||||
|
@ -105,6 +109,7 @@ const helpString = [
|
|||
`<li>${m(j(triggerWords[generationMode.SCENARIO]))} – visual recap of the whole chat scenario</li>`,
|
||||
`<li>${m(j(triggerWords[generationMode.NOW]))} – visual recap of the last chat message</li>`,
|
||||
`<li>${m(j(triggerWords[generationMode.RAW_LAST]))} – visual recap of the last chat message with no summary</li>`,
|
||||
`<li>${m(j(triggerWords[generationMode.BACKGROUND]))} – generate a background for this chat based on the chat's context</li>`,
|
||||
'</ul>',
|
||||
`Anything else would trigger a "free mode" to make SD generate whatever you prompted.<Br>
|
||||
example: '/sd apple tree' would generate a picture of an apple tree.`,
|
||||
|
@ -159,6 +164,13 @@ async function loadSettings() {
|
|||
extension_settings.sd.prompts = promptTemplates;
|
||||
}
|
||||
|
||||
// Insert missing templates
|
||||
for (const [key, value] of Object.entries(promptTemplates)) {
|
||||
if (extension_settings.sd.prompts[key] === undefined) {
|
||||
extension_settings.sd.prompts[key] = value;
|
||||
}
|
||||
}
|
||||
|
||||
if (extension_settings.sd.character_prompts === undefined) {
|
||||
extension_settings.sd.character_prompts = {};
|
||||
}
|
||||
|
@ -554,9 +566,35 @@ async function generatePicture(_, trigger, message, callback) {
|
|||
const context = getContext();
|
||||
|
||||
const prevSDHeight = extension_settings.sd.height;
|
||||
if (generationType == generationMode.FACE) {
|
||||
const prevSDWidth = extension_settings.sd.width;
|
||||
const aspectRatio = extension_settings.sd.width / extension_settings.sd.height;
|
||||
|
||||
// Face images are always portrait (pun intended)
|
||||
if (generationType == generationMode.FACE && aspectRatio >= 1) {
|
||||
// Round to nearest multiple of 64
|
||||
extension_settings.sd.height = Math.round(extension_settings.sd.height * 1.5 / 64) * 64;
|
||||
extension_settings.sd.height = Math.round(extension_settings.sd.width * 1.5 / 64) * 64;
|
||||
}
|
||||
|
||||
// Background images are always landscape
|
||||
if (generationType == generationMode.BACKGROUND && aspectRatio <= 1) {
|
||||
// Round to nearest multiple of 64
|
||||
extension_settings.sd.width = Math.round(extension_settings.sd.height * 1.8 / 64) * 64;
|
||||
const callbackOriginal = callback;
|
||||
callback = function (prompt, base64Image) {
|
||||
const imgUrl = `url(${base64Image})`;
|
||||
if ('forceSetBackground' in window) {
|
||||
forceSetBackground(imgUrl);
|
||||
} else {
|
||||
toastr.info('Background image will not be preserved.', '"Chat backgrounds" extension is disabled.');
|
||||
$('#bg_custom').css('background-image', imgUrl);
|
||||
}
|
||||
|
||||
if (typeof callbackOriginal === 'function') {
|
||||
callbackOriginal(prompt, base64Image);
|
||||
} else {
|
||||
sendMessage(prompt, base64Image);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
|
@ -566,13 +604,14 @@ async function generatePicture(_, trigger, message, callback) {
|
|||
context.deactivateSendButtons();
|
||||
hideSwipeButtons();
|
||||
|
||||
await sendGenerationRequest(prompt, callback);
|
||||
await sendGenerationRequest(generationType, prompt, callback);
|
||||
} catch (err) {
|
||||
console.trace(err);
|
||||
throw new Error('SD prompt text generation failed.')
|
||||
}
|
||||
finally {
|
||||
extension_settings.sd.height = prevSDHeight;
|
||||
extension_settings.sd.width = prevSDWidth;
|
||||
context.activateSendButtons();
|
||||
showSwipeButtons();
|
||||
}
|
||||
|
@ -605,16 +644,20 @@ async function generatePrompt(quiet_prompt) {
|
|||
return processReply(reply);
|
||||
}
|
||||
|
||||
async function sendGenerationRequest(prompt, callback) {
|
||||
async function sendGenerationRequest(generationType, prompt, callback) {
|
||||
const prefix = generationType !== generationMode.BACKGROUND
|
||||
? combinePrefixes(extension_settings.sd.prompt_prefix, getCharacterPrefix())
|
||||
: extension_settings.sd.prompt_prefix;
|
||||
|
||||
if (extension_settings.sd.horde) {
|
||||
await generateHordeImage(prompt, callback);
|
||||
await generateHordeImage(prompt, prefix, callback);
|
||||
} else {
|
||||
await generateExtrasImage(prompt, callback);
|
||||
await generateExtrasImage(prompt, prefix, callback);
|
||||
}
|
||||
}
|
||||
|
||||
async function generateExtrasImage(prompt, callback) {
|
||||
console.log(extension_settings.sd);
|
||||
async function generateExtrasImage(prompt, prefix, callback) {
|
||||
console.debug(extension_settings.sd);
|
||||
const url = new URL(getApiUrl());
|
||||
url.pathname = '/api/image';
|
||||
const result = await doExtrasFetch(url, {
|
||||
|
@ -627,7 +670,7 @@ async function generateExtrasImage(prompt, callback) {
|
|||
scale: extension_settings.sd.scale,
|
||||
width: extension_settings.sd.width,
|
||||
height: extension_settings.sd.height,
|
||||
prompt_prefix: combinePrefixes(extension_settings.sd.prompt_prefix, getCharacterPrefix()),
|
||||
prompt_prefix: prefix,
|
||||
negative_prompt: extension_settings.sd.negative_prompt,
|
||||
restore_faces: !!extension_settings.sd.restore_faces,
|
||||
enable_hr: !!extension_settings.sd.enable_hr,
|
||||
|
@ -644,7 +687,7 @@ async function generateExtrasImage(prompt, callback) {
|
|||
}
|
||||
}
|
||||
|
||||
async function generateHordeImage(prompt, callback) {
|
||||
async function generateHordeImage(prompt, prefix, callback) {
|
||||
const result = await fetch('/horde_generateimage', {
|
||||
method: 'POST',
|
||||
headers: getRequestHeaders(),
|
||||
|
@ -655,7 +698,7 @@ async function generateHordeImage(prompt, callback) {
|
|||
scale: extension_settings.sd.scale,
|
||||
width: extension_settings.sd.width,
|
||||
height: extension_settings.sd.height,
|
||||
prompt_prefix: combinePrefixes(extension_settings.sd.prompt_prefix, getCharacterPrefix()),
|
||||
prompt_prefix: prefix,
|
||||
negative_prompt: extension_settings.sd.negative_prompt,
|
||||
model: extension_settings.sd.model,
|
||||
nsfw: extension_settings.sd.horde_nsfw,
|
||||
|
@ -680,6 +723,7 @@ async function sendMessage(prompt, image) {
|
|||
name: context.groupId ? systemUserName : context.name2,
|
||||
is_system: context.groupId ? true : false,
|
||||
is_user: false,
|
||||
is_system: true,
|
||||
is_name: true,
|
||||
send_date: timestampToMoment(Date.now()).format('LL LT'),
|
||||
mes: context.groupId ? p(messageText) : messageText,
|
||||
|
@ -715,6 +759,7 @@ function addSDGenButtons() {
|
|||
<li class="list-group-item" id="sd_world" data-value="world">The Whole Story</li>
|
||||
<li class="list-group-item" id="sd_last" data-value="last">The Last Message</li>
|
||||
<li class="list-group-item" id="sd_raw_last" data-value="raw_last">Raw Last Message</li>
|
||||
<li class="list-group-item" id="sd_background" data-value="background">Background</li>
|
||||
</ul>
|
||||
</div>`;
|
||||
|
||||
|
@ -797,7 +842,7 @@ async function sdMessageButton(e) {
|
|||
message.extra.title = prompt;
|
||||
|
||||
console.log('Regenerating an image, using existing prompt:', prompt);
|
||||
await sendGenerationRequest(prompt, saveGeneratedImage);
|
||||
await sendGenerationRequest(generationMode.FREE, prompt, saveGeneratedImage);
|
||||
}
|
||||
else {
|
||||
console.log("doing /sd raw last");
|
||||
|
@ -828,36 +873,22 @@ async function sdMessageButton(e) {
|
|||
};
|
||||
|
||||
$("#sd_dropdown [id]").on("click", function () {
|
||||
var id = $(this).attr("id");
|
||||
if (id == "sd_you") {
|
||||
console.log("doing /sd you");
|
||||
generatePicture('sd', 'you');
|
||||
}
|
||||
const id = $(this).attr("id");
|
||||
const idParamMap = {
|
||||
"sd_you": "you",
|
||||
"sd_face": "face",
|
||||
"sd_me": "me",
|
||||
"sd_world": "scene",
|
||||
"sd_last": "last",
|
||||
"sd_raw_last": "raw_last",
|
||||
"sd_background": "background"
|
||||
};
|
||||
|
||||
else if (id == "sd_face") {
|
||||
console.log("doing /sd face");
|
||||
generatePicture('sd', 'face');
|
||||
const param = idParamMap[id];
|
||||
|
||||
}
|
||||
|
||||
else if (id == "sd_me") {
|
||||
console.log("doing /sd me");
|
||||
generatePicture('sd', 'me');
|
||||
}
|
||||
|
||||
else if (id == "sd_world") {
|
||||
console.log("doing /sd scene");
|
||||
generatePicture('sd', 'scene');
|
||||
}
|
||||
|
||||
else if (id == "sd_last") {
|
||||
console.log("doing /sd last");
|
||||
generatePicture('sd', 'last');
|
||||
}
|
||||
|
||||
else if (id == "sd_raw_last") {
|
||||
console.log("doing /sd raw last");
|
||||
generatePicture('sd', 'raw_last');
|
||||
if (param) {
|
||||
console.log("doing /sd " + param)
|
||||
generatePicture('sd', param);
|
||||
}
|
||||
});
|
||||
|
||||
|
|
|
@ -176,6 +176,7 @@ let power_user = {
|
|||
stop_sequence: '',
|
||||
input_sequence: '### Instruction:',
|
||||
output_sequence: '### Response:',
|
||||
last_output_sequence: '',
|
||||
preset: 'Alpaca',
|
||||
separator_sequence: '',
|
||||
macro: false,
|
||||
|
@ -942,6 +943,7 @@ function loadInstructMode() {
|
|||
{ id: "instruct_names", property: "names", isCheckbox: true },
|
||||
{ id: "instruct_macro", property: "macro", isCheckbox: true },
|
||||
{ id: "instruct_names_force_groups", property: "names_force_groups", isCheckbox: true },
|
||||
{ id: "instruct_last_output_sequence", property: "last_output_sequence", isCheckbox: false },
|
||||
];
|
||||
|
||||
if (power_user.instruct.names_force_groups === undefined) {
|
||||
|
@ -1087,7 +1089,8 @@ export function formatInstructStoryString(story, systemPrompt) {
|
|||
|
||||
export function formatInstructModePrompt(name, isImpersonate, promptBias, name1, name2) {
|
||||
const includeNames = power_user.instruct.names || (!!selected_group && power_user.instruct.names_force_groups);
|
||||
let sequence = isImpersonate ? power_user.instruct.input_sequence : power_user.instruct.output_sequence;
|
||||
const getOutputSequence = () => power_user.instruct.last_output_sequence || power_user.instruct.output_sequence;
|
||||
let sequence = isImpersonate ? power_user.instruct.input_sequence : getOutputSequence();
|
||||
|
||||
if (power_user.instruct.macro) {
|
||||
sequence = substituteParams(sequence, name1, name2);
|
||||
|
|
|
@ -604,7 +604,8 @@ app.post("/generate_textgenerationwebui", jsonParser, async function (request, r
|
|||
|
||||
websocket.on('open', async function () {
|
||||
console.log('WebSocket opened');
|
||||
websocket.send(JSON.stringify(request.body));
|
||||
const combined_args = Object.assign(request.body.use_mancer ? get_mancer_headers() : {}, request.body);
|
||||
websocket.send(JSON.stringify(combined_args));
|
||||
});
|
||||
|
||||
websocket.on('close', (code, buffer) => {
|
||||
|
|
Loading…
Reference in New Issue