mirror of
https://github.com/SillyTavern/SillyTavern.git
synced 2025-06-05 21:59:27 +02:00
Merge branch 'staging' of https://github.com/joenunezb/SillyTavern into optimize/improve-search
This commit is contained in:
@@ -65,6 +65,11 @@ const parse_derivation = derivation => (typeof derivation === 'string') ? {
|
||||
} : derivation;
|
||||
|
||||
export async function deriveTemplatesFromChatTemplate(chat_template, hash) {
|
||||
if (chat_template.trim() === '') {
|
||||
console.log('Missing chat template.');
|
||||
return null;
|
||||
}
|
||||
|
||||
if (hash in hash_derivations) {
|
||||
return parse_derivation(hash_derivations[hash]);
|
||||
}
|
||||
|
@@ -2373,6 +2373,7 @@ function ensureSelectionExists(setting, selector) {
|
||||
* @param {string} [message] Chat message
|
||||
* @param {function} [callback] Callback function
|
||||
* @returns {Promise<string|undefined>} Image path
|
||||
* @throws {Error} If the prompt or image generation fails
|
||||
*/
|
||||
async function generatePicture(initiator, args, trigger, message, callback) {
|
||||
if (!trigger || trigger.trim().length === 0) {
|
||||
@@ -2391,7 +2392,7 @@ async function generatePicture(initiator, args, trigger, message, callback) {
|
||||
trigger = trigger.trim();
|
||||
const generationType = getGenerationType(trigger);
|
||||
const generationTypeKey = Object.keys(generationMode).find(key => generationMode[key] === generationType);
|
||||
console.log(`Generation mode ${generationTypeKey} triggered with "${trigger}"`);
|
||||
console.log(`Image generation mode ${generationTypeKey} triggered with "${trigger}"`);
|
||||
|
||||
const quietPrompt = getQuietPrompt(generationType, trigger);
|
||||
const context = getContext();
|
||||
@@ -2428,6 +2429,8 @@ async function generatePicture(initiator, args, trigger, message, callback) {
|
||||
|
||||
try {
|
||||
const combineNegatives = (prefix) => { negativePromptPrefix = combinePrefixes(negativePromptPrefix, prefix); };
|
||||
|
||||
// generate the text prompt for the image
|
||||
const prompt = await getPrompt(generationType, message, trigger, quietPrompt, combineNegatives);
|
||||
console.log('Processed image prompt:', prompt);
|
||||
|
||||
@@ -2438,11 +2441,16 @@ async function generatePicture(initiator, args, trigger, message, callback) {
|
||||
args._abortController.addEventListener('abort', stopListener);
|
||||
}
|
||||
|
||||
// generate the image
|
||||
imagePath = await sendGenerationRequest(generationType, prompt, negativePromptPrefix, characterName, callback, initiator, abortController.signal);
|
||||
} catch (err) {
|
||||
console.trace(err);
|
||||
toastr.error('SD prompt text generation failed. Reason: ' + err, 'Image Generation');
|
||||
throw new Error('SD prompt text generation failed. Reason: ' + err);
|
||||
// errors here are most likely due to text generation failure
|
||||
// sendGenerationRequest mostly deals with its own errors
|
||||
const reason = err.error?.message || err.message || 'Unknown error';
|
||||
const errorText = 'SD prompt text generation failed. ' + reason;
|
||||
toastr.error(errorText, 'Image Generation');
|
||||
throw new Error(errorText);
|
||||
}
|
||||
finally {
|
||||
$(stopButton).hide();
|
||||
@@ -2513,7 +2521,7 @@ function restoreOriginalDimensions(savedParams) {
|
||||
*/
|
||||
async function getPrompt(generationType, message, trigger, quietPrompt, combineNegatives) {
|
||||
let prompt;
|
||||
|
||||
console.log('getPrompt: Generation mode', generationType, 'triggered with', trigger);
|
||||
switch (generationType) {
|
||||
case generationMode.RAW_LAST:
|
||||
prompt = message || getRawLastMessage();
|
||||
@@ -2729,7 +2737,7 @@ async function sendGenerationRequest(generationType, prompt, additionalNegativeP
|
||||
throw new Error('Endpoint did not return image data.');
|
||||
}
|
||||
} catch (err) {
|
||||
console.error(err);
|
||||
console.error('Image generation request error: ', err);
|
||||
toastr.error('Image generation failed. Please try again.' + '\n\n' + String(err), 'Image Generation');
|
||||
return;
|
||||
}
|
||||
|
@@ -181,6 +181,14 @@ function setContextSizePreview() {
|
||||
}
|
||||
}
|
||||
|
||||
/** Generates text using the Horde API.
|
||||
* @param {string} prompt
|
||||
* @param params
|
||||
* @param signal
|
||||
* @param reportProgress
|
||||
* @returns {Promise<{text: *, workerName: string}>}
|
||||
* @throws {Error}
|
||||
*/
|
||||
async function generateHorde(prompt, params, signal, reportProgress) {
|
||||
validateHordeModel();
|
||||
delete params.prompt;
|
||||
|
@@ -99,7 +99,6 @@ const default_wi_format = '{0}';
|
||||
const default_new_chat_prompt = '[Start a new Chat]';
|
||||
const default_new_group_chat_prompt = '[Start a new group chat. Group members: {{group}}]';
|
||||
const default_new_example_chat_prompt = '[Example Chat]';
|
||||
const default_claude_human_sysprompt_message = 'Let\'s get started. Please generate your response based on the information and instructions provided above.';
|
||||
const default_continue_nudge_prompt = '[Continue the following message. Do not include ANY parts of the original message. Use capitalization and punctuation as if your reply is a part of the original message: {{lastChatMessage}}]';
|
||||
const default_bias = 'Default (none)';
|
||||
const default_personality_format = '[{{char}}\'s personality: {{personality}}]';
|
||||
@@ -276,7 +275,6 @@ const default_settings = {
|
||||
proxy_password: '',
|
||||
assistant_prefill: '',
|
||||
assistant_impersonation: '',
|
||||
human_sysprompt_message: default_claude_human_sysprompt_message,
|
||||
claude_use_sysprompt: false,
|
||||
use_makersuite_sysprompt: true,
|
||||
use_alt_scale: false,
|
||||
@@ -353,7 +351,6 @@ const oai_settings = {
|
||||
proxy_password: '',
|
||||
assistant_prefill: '',
|
||||
assistant_impersonation: '',
|
||||
human_sysprompt_message: default_claude_human_sysprompt_message,
|
||||
claude_use_sysprompt: false,
|
||||
use_makersuite_sysprompt: true,
|
||||
use_alt_scale: false,
|
||||
@@ -1313,6 +1310,11 @@ export async function prepareOpenAIMessages({
|
||||
return [chat, promptManager.tokenHandler.counts];
|
||||
}
|
||||
|
||||
/**
|
||||
* Handles errors during streaming requests.
|
||||
* @param {Response} response
|
||||
* @param {string} decoded - response text or decoded stream data
|
||||
*/
|
||||
function tryParseStreamingError(response, decoded) {
|
||||
try {
|
||||
const data = JSON.parse(decoded);
|
||||
@@ -1324,6 +1326,9 @@ function tryParseStreamingError(response, decoded) {
|
||||
checkQuotaError(data);
|
||||
checkModerationError(data);
|
||||
|
||||
// these do not throw correctly (equiv to Error("[object Object]"))
|
||||
// if trying to fix "[object Object]" displayed to users, start here
|
||||
|
||||
if (data.error) {
|
||||
toastr.error(data.error.message || response.statusText, 'Chat Completion API');
|
||||
throw new Error(data);
|
||||
@@ -1339,15 +1344,22 @@ function tryParseStreamingError(response, decoded) {
|
||||
}
|
||||
}
|
||||
|
||||
async function checkQuotaError(data) {
|
||||
const errorText = await renderTemplateAsync('quotaError');
|
||||
|
||||
/**
|
||||
* Checks if the response contains a quota error and displays a popup if it does.
|
||||
* @param data
|
||||
* @returns {void}
|
||||
* @throws {object} - response JSON
|
||||
*/
|
||||
function checkQuotaError(data) {
|
||||
if (!data) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (data.quota_error) {
|
||||
callPopup(errorText, 'text');
|
||||
renderTemplateAsync('quotaError').then((html) => Popup.show.text('Quota Error', html));
|
||||
|
||||
// this does not throw correctly (equiv to Error("[object Object]"))
|
||||
// if trying to fix "[object Object]" displayed to users, start here
|
||||
throw new Error(data);
|
||||
}
|
||||
}
|
||||
@@ -1766,6 +1778,15 @@ async function sendAltScaleRequest(messages, logit_bias, signal, type) {
|
||||
return data.output;
|
||||
}
|
||||
|
||||
/**
|
||||
* Send a chat completion request to backend
|
||||
* @param {string} type (impersonate, quiet, continue, etc)
|
||||
* @param {Array} messages
|
||||
* @param {AbortSignal?} signal
|
||||
* @returns {Promise<unknown>}
|
||||
* @throws {Error}
|
||||
*/
|
||||
|
||||
async function sendOpenAIRequest(type, messages, signal) {
|
||||
// Provide default abort signal
|
||||
if (!signal) {
|
||||
@@ -1868,7 +1889,6 @@ async function sendOpenAIRequest(type, messages, signal) {
|
||||
generate_data['top_k'] = Number(oai_settings.top_k_openai);
|
||||
generate_data['claude_use_sysprompt'] = oai_settings.claude_use_sysprompt;
|
||||
generate_data['stop'] = getCustomStoppingStrings(); // Claude shouldn't have limits on stop strings.
|
||||
generate_data['human_sysprompt_message'] = substituteParams(oai_settings.human_sysprompt_message);
|
||||
// Don't add a prefill on quiet gens (summarization) and when using continue prefill.
|
||||
if (!isQuiet && !(isContinue && oai_settings.continue_prefill)) {
|
||||
generate_data['assistant_prefill'] = isImpersonate ? substituteParams(oai_settings.assistant_impersonation) : substituteParams(oai_settings.assistant_prefill);
|
||||
@@ -2028,12 +2048,13 @@ async function sendOpenAIRequest(type, messages, signal) {
|
||||
else {
|
||||
const data = await response.json();
|
||||
|
||||
await checkQuotaError(data);
|
||||
checkQuotaError(data);
|
||||
checkModerationError(data);
|
||||
|
||||
if (data.error) {
|
||||
toastr.error(data.error.message || response.statusText, t`API returned an error`);
|
||||
throw new Error(data);
|
||||
const message = data.error.message || response.statusText || t`Unknown error`;
|
||||
toastr.error(message, t`API returned an error`);
|
||||
throw new Error(message);
|
||||
}
|
||||
|
||||
if (type !== 'quiet') {
|
||||
@@ -3005,7 +3026,6 @@ function loadOpenAISettings(data, settings) {
|
||||
oai_settings.proxy_password = settings.proxy_password ?? default_settings.proxy_password;
|
||||
oai_settings.assistant_prefill = settings.assistant_prefill ?? default_settings.assistant_prefill;
|
||||
oai_settings.assistant_impersonation = settings.assistant_impersonation ?? default_settings.assistant_impersonation;
|
||||
oai_settings.human_sysprompt_message = settings.human_sysprompt_message ?? default_settings.human_sysprompt_message;
|
||||
oai_settings.image_inlining = settings.image_inlining ?? default_settings.image_inlining;
|
||||
oai_settings.inline_image_quality = settings.inline_image_quality ?? default_settings.inline_image_quality;
|
||||
oai_settings.bypass_status_check = settings.bypass_status_check ?? default_settings.bypass_status_check;
|
||||
@@ -3045,7 +3065,6 @@ function loadOpenAISettings(data, settings) {
|
||||
$('#openai_proxy_password').val(oai_settings.proxy_password);
|
||||
$('#claude_assistant_prefill').val(oai_settings.assistant_prefill);
|
||||
$('#claude_assistant_impersonation').val(oai_settings.assistant_impersonation);
|
||||
$('#claude_human_sysprompt_textarea').val(oai_settings.human_sysprompt_message);
|
||||
$('#openai_image_inlining').prop('checked', oai_settings.image_inlining);
|
||||
$('#openai_bypass_status_check').prop('checked', oai_settings.bypass_status_check);
|
||||
|
||||
@@ -3375,7 +3394,6 @@ async function saveOpenAIPreset(name, settings, triggerUi = true) {
|
||||
show_external_models: settings.show_external_models,
|
||||
assistant_prefill: settings.assistant_prefill,
|
||||
assistant_impersonation: settings.assistant_impersonation,
|
||||
human_sysprompt_message: settings.human_sysprompt_message,
|
||||
claude_use_sysprompt: settings.claude_use_sysprompt,
|
||||
use_makersuite_sysprompt: settings.use_makersuite_sysprompt,
|
||||
use_alt_scale: settings.use_alt_scale,
|
||||
@@ -3800,7 +3818,6 @@ function onSettingsPresetChange() {
|
||||
proxy_password: ['#openai_proxy_password', 'proxy_password', false],
|
||||
assistant_prefill: ['#claude_assistant_prefill', 'assistant_prefill', false],
|
||||
assistant_impersonation: ['#claude_assistant_impersonation', 'assistant_impersonation', false],
|
||||
human_sysprompt_message: ['#claude_human_sysprompt_textarea', 'human_sysprompt_message', false],
|
||||
claude_use_sysprompt: ['#claude_use_sysprompt', 'claude_use_sysprompt', true],
|
||||
use_makersuite_sysprompt: ['#use_makersuite_sysprompt', 'use_makersuite_sysprompt', true],
|
||||
use_alt_scale: ['#use_alt_scale', 'use_alt_scale', true],
|
||||
@@ -4652,10 +4669,6 @@ function toggleChatCompletionForms() {
|
||||
const validSources = $(this).data('source').split(',');
|
||||
$(this).toggle(validSources.includes(oai_settings.chat_completion_source));
|
||||
});
|
||||
|
||||
if (chat_completion_sources.CLAUDE == oai_settings.chat_completion_source) {
|
||||
$('#claude_human_sysprompt_message_block').toggle(oai_settings.claude_use_sysprompt);
|
||||
}
|
||||
}
|
||||
|
||||
async function testApiConnection() {
|
||||
@@ -5011,7 +5024,6 @@ export function initOpenAI() {
|
||||
|
||||
$('#claude_use_sysprompt').on('change', function () {
|
||||
oai_settings.claude_use_sysprompt = !!$('#claude_use_sysprompt').prop('checked');
|
||||
$('#claude_human_sysprompt_message_block').toggle(oai_settings.claude_use_sysprompt);
|
||||
saveSettingsDebounced();
|
||||
});
|
||||
|
||||
@@ -5088,12 +5100,6 @@ export function initOpenAI() {
|
||||
saveSettingsDebounced();
|
||||
});
|
||||
|
||||
$('#claude_human_sysprompt_message_restore').on('click', function () {
|
||||
oai_settings.human_sysprompt_message = default_claude_human_sysprompt_message;
|
||||
$('#claude_human_sysprompt_textarea').val(oai_settings.human_sysprompt_message);
|
||||
saveSettingsDebounced();
|
||||
});
|
||||
|
||||
$('#newgroupchat_prompt_restore').on('click', function () {
|
||||
oai_settings.new_group_chat_prompt = default_new_group_chat_prompt;
|
||||
$('#newgroupchat_prompt_textarea').val(oai_settings.new_group_chat_prompt);
|
||||
@@ -5185,11 +5191,6 @@ export function initOpenAI() {
|
||||
saveSettingsDebounced();
|
||||
});
|
||||
|
||||
$('#claude_human_sysprompt_textarea').on('input', function () {
|
||||
oai_settings.human_sysprompt_message = String($('#claude_human_sysprompt_textarea').val());
|
||||
saveSettingsDebounced();
|
||||
});
|
||||
|
||||
$('#openrouter_use_fallback').on('input', function () {
|
||||
oai_settings.openrouter_use_fallback = !!$(this).prop('checked');
|
||||
saveSettingsDebounced();
|
||||
|
@@ -658,6 +658,10 @@ async function CreateZenSliders(elmnt) {
|
||||
numSteps = 50;
|
||||
decimals = 1;
|
||||
}
|
||||
if (sliderID == 'nsigma') {
|
||||
numSteps = 50;
|
||||
decimals = 1;
|
||||
}
|
||||
//customize steps
|
||||
if (sliderID == 'mirostat_mode_textgenerationwebui' ||
|
||||
sliderID == 'mirostat_mode_kobold') {
|
||||
@@ -702,6 +706,7 @@ async function CreateZenSliders(elmnt) {
|
||||
sliderID == 'penalty_alpha_textgenerationwebui' ||
|
||||
sliderID == 'length_penalty_textgenerationwebui' ||
|
||||
sliderID == 'epsilon_cutoff_textgenerationwebui' ||
|
||||
sliderID == 'nsigma' ||
|
||||
sliderID == 'rep_pen_range' ||
|
||||
sliderID == 'eta_cutoff_textgenerationwebui' ||
|
||||
sliderID == 'top_a_textgenerationwebui' ||
|
||||
|
@@ -1,5 +1,4 @@
|
||||
import { escapeRegex } from '../utils.js';
|
||||
import { SlashCommand } from './SlashCommand.js';
|
||||
import { SlashCommandParser } from './SlashCommandParser.js';
|
||||
|
||||
export class SlashCommandBrowser {
|
||||
@@ -30,7 +29,7 @@ export class SlashCommandBrowser {
|
||||
this.details?.remove();
|
||||
this.details = null;
|
||||
let query = inp.value.trim();
|
||||
if (query.slice(-1) == '"' && !/(?:^|\s+)"/.test(query)) {
|
||||
if (query.slice(-1) === '"' && !/(?:^|\s+)"/.test(query)) {
|
||||
query = `"${query}`;
|
||||
}
|
||||
let fuzzyList = [];
|
||||
@@ -59,7 +58,7 @@ export class SlashCommandBrowser {
|
||||
cmd.helpString,
|
||||
];
|
||||
const find = ()=>targets.find(t=>(fuzzyList.find(f=>f.test(t)) ?? quotedList.find(q=>t.includes(q))) !== undefined) !== undefined;
|
||||
if (fuzzyList.length + quotedList.length == 0 || find()) {
|
||||
if (fuzzyList.length + quotedList.length === 0 || find()) {
|
||||
this.itemMap[cmd.name].classList.remove('isFiltered');
|
||||
} else {
|
||||
this.itemMap[cmd.name].classList.add('isFiltered');
|
||||
@@ -78,7 +77,7 @@ export class SlashCommandBrowser {
|
||||
list.classList.add('autoComplete');
|
||||
this.cmdList = Object
|
||||
.keys(SlashCommandParser.commands)
|
||||
.filter(key => SlashCommandParser.commands[key].name == key) // exclude aliases
|
||||
.filter(key => SlashCommandParser.commands[key].name === key) // exclude aliases
|
||||
.sort((a, b) => a.toLowerCase().localeCompare(b.toLowerCase()))
|
||||
.map(key => SlashCommandParser.commands[key])
|
||||
;
|
||||
@@ -97,7 +96,7 @@ export class SlashCommandBrowser {
|
||||
}
|
||||
}
|
||||
}
|
||||
if (this.details != details) {
|
||||
if (this.details !== details) {
|
||||
Array.from(list.querySelectorAll('.selected')).forEach(it=>it.classList.remove('selected'));
|
||||
item.classList.add('selected');
|
||||
this.details?.remove();
|
||||
@@ -124,7 +123,7 @@ export class SlashCommandBrowser {
|
||||
parent.append(this.dom);
|
||||
|
||||
this.mo = new MutationObserver(muts=>{
|
||||
if (muts.find(mut=>Array.from(mut.removedNodes).find(it=>it == this.dom || it.contains(this.dom)))) {
|
||||
if (muts.find(mut=>Array.from(mut.removedNodes).find(it=>it === this.dom || it.contains(this.dom)))) {
|
||||
this.mo.disconnect();
|
||||
window.removeEventListener('keydown', boundHandler);
|
||||
}
|
||||
@@ -136,7 +135,7 @@ export class SlashCommandBrowser {
|
||||
}
|
||||
|
||||
handleKeyDown(evt) {
|
||||
if (!evt.shiftKey && !evt.altKey && evt.ctrlKey && evt.key.toLowerCase() == 'f') {
|
||||
if (!evt.shiftKey && !evt.altKey && evt.ctrlKey && evt.key.toLowerCase() === 'f') {
|
||||
if (!this.dom.closest('body')) return;
|
||||
if (this.dom.closest('.mes') && !this.dom.closest('.last_mes')) return;
|
||||
evt.preventDefault();
|
||||
|
@@ -193,6 +193,7 @@ const settings = {
|
||||
openrouter_allow_fallbacks: true,
|
||||
xtc_threshold: 0.1,
|
||||
xtc_probability: 0,
|
||||
nsigma: 0.0,
|
||||
featherless_model: '',
|
||||
};
|
||||
|
||||
@@ -265,6 +266,7 @@ export const setting_names = [
|
||||
'openrouter_allow_fallbacks',
|
||||
'xtc_threshold',
|
||||
'xtc_probability',
|
||||
'nsigma',
|
||||
];
|
||||
|
||||
const DYNATEMP_BLOCK = document.getElementById('dynatemp_block_ooba');
|
||||
@@ -880,6 +882,13 @@ function setSettingByName(setting, value, trigger) {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends a streaming request for textgenerationwebui.
|
||||
* @param generate_data
|
||||
* @param signal
|
||||
* @returns {Promise<(function(): AsyncGenerator<{swipes: [], text: string, toolCalls: [], logprobs: {token: string, topLogprobs: Candidate[]}|null}, void, *>)|*>}
|
||||
* @throws {Error} - If the response status is not OK, or from within the generator
|
||||
*/
|
||||
async function generateTextGenWithStreaming(generate_data, signal) {
|
||||
generate_data.stream = true;
|
||||
|
||||
@@ -995,6 +1004,7 @@ export function parseTabbyLogprobs(data) {
|
||||
* @param {Response} response - Response from the server.
|
||||
* @param {string} decoded - Decoded response body.
|
||||
* @returns {void} Nothing.
|
||||
* @throws {Error} If the response contains an error message, throws Error with the message.
|
||||
*/
|
||||
function tryParseStreamingError(response, decoded) {
|
||||
let data = {};
|
||||
@@ -1178,6 +1188,7 @@ export function getTextGenGenerationData(finalPrompt, maxTokens, isImpersonate,
|
||||
'sampler_order': settings.type === textgen_types.KOBOLDCPP ? settings.sampler_order : undefined,
|
||||
'xtc_threshold': settings.xtc_threshold,
|
||||
'xtc_probability': settings.xtc_probability,
|
||||
'nsigma': settings.nsigma,
|
||||
};
|
||||
const nonAphroditeParams = {
|
||||
'rep_pen': settings.rep_pen,
|
||||
@@ -1245,7 +1256,9 @@ export function getTextGenGenerationData(finalPrompt, maxTokens, isImpersonate,
|
||||
'dynatemp_exponent': dynatemp ? settings.dynatemp_exponent : undefined,
|
||||
'xtc_threshold': settings.xtc_threshold,
|
||||
'xtc_probability': settings.xtc_probability,
|
||||
'nsigma': settings.nsigma,
|
||||
'custom_token_bans': toIntArray(banned_tokens),
|
||||
'no_repeat_ngram_size': settings.no_repeat_ngram_size,
|
||||
};
|
||||
|
||||
if (settings.type === OPENROUTER) {
|
||||
|
Reference in New Issue
Block a user