Merge pull request #3099 from ceruleandeep/fix/connRefusedErrMsg

Fix/conn refused err msg
This commit is contained in:
Cohee 2024-11-24 02:32:57 +02:00 committed by GitHub
commit 0afbd95d09
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
7 changed files with 103 additions and 32 deletions

View File

@ -2705,8 +2705,7 @@ export async function generateQuietPrompt(quiet_prompt, quietToLoud, skipWIAN, q
quietName: quietName,
};
originalResponseLength = responseLengthCustomized ? saveResponseLength(main_api, responseLength) : -1;
const generateFinished = await Generate('quiet', options);
return generateFinished;
return await Generate('quiet', options);
} finally {
if (responseLengthCustomized) {
restoreResponseLength(main_api, originalResponseLength);
@ -3361,9 +3360,9 @@ export async function generateRaw(prompt, api, instructOverride, quietToLoud, sy
let data = {};
if (api == 'koboldhorde') {
if (api === 'koboldhorde') {
data = await generateHorde(prompt, generateData, abortController.signal, false);
} else if (api == 'openai') {
} else if (api === 'openai') {
data = await sendOpenAIRequest('quiet', generateData, abortController.signal);
} else {
const generateUrl = getGenerateUrl(api);
@ -3376,13 +3375,15 @@ export async function generateRaw(prompt, api, instructOverride, quietToLoud, sy
});
if (!response.ok) {
const error = await response.json();
throw error;
throw await response.json();
}
data = await response.json();
}
// should only happen for text completions
// other frontend paths do not return data if calling the backend fails,
// they throw things instead
if (data.error) {
throw new Error(data.response);
}
@ -4434,6 +4435,11 @@ export async function Generate(type, { automatic_trigger, force_name2, quiet_pro
return Promise.resolve();
}
/**
* Saves itemized prompt bits and calls streaming or non-streaming generation API.
* @returns {Promise<void|*|Awaited<*>|String|{fromStream}|string|undefined|Object>}
* @throws {Error|object} Error with message text, or Error with response JSON (OAI/Horde), or the actual response JSON (novel|textgenerationwebui|kobold)
*/
async function finishGenerating() {
if (power_user.console_log_prompts) {
console.log(generate_data.prompt);
@ -4545,6 +4551,12 @@ export async function Generate(type, { automatic_trigger, force_name2, quiet_pro
return finishGenerating().then(onSuccess, onError);
/**
* Handles the successful response from the generation API.
* @param data
* @returns {Promise<String|{fromStream}|*|string|string|void|Awaited<*>|undefined>}
* @throws {Error} Throws an error if the response data contains an error message
*/
async function onSuccess(data) {
if (!data) return;
@ -4554,6 +4566,7 @@ export async function Generate(type, { automatic_trigger, force_name2, quiet_pro
let messageChunk = '';
// if an error was returned in data (textgenwebui), show it and throw it
if (data.error) {
unblockGeneration(type);
generatedPromptCache = '';
@ -4668,9 +4681,15 @@ export async function Generate(type, { automatic_trigger, force_name2, quiet_pro
return Object.defineProperty(new String(getMessage), 'messageChunk', { value: messageChunk });
}
/**
* Exception handler for finishGenerating
* @param {Error|object} exception Error or response JSON
* @throws {Error|object} Re-throws the exception
*/
function onError(exception) {
// if the response JSON was thrown (novel|textgenerationwebui|kobold), show the error message
if (typeof exception?.error?.message === 'string') {
toastr.error(exception.error.message, t`Error`, { timeOut: 10000, extendedTimeOut: 20000 });
toastr.error(exception.error.message, t`Text generation error`, { timeOut: 10000, extendedTimeOut: 20000 });
}
generatedPromptCache = '';
@ -5338,6 +5357,7 @@ function setInContextMessages(lastmsg, type) {
* @param {string} type Generation type
* @param {object} data Generation data
* @returns {Promise<object>} Response data from the API
* @throws {Error|object}
*/
export async function sendGenerationRequest(type, data) {
if (main_api === 'openai') {
@ -5357,12 +5377,10 @@ export async function sendGenerationRequest(type, data) {
});
if (!response.ok) {
const error = await response.json();
throw error;
throw await response.json();
}
const responseData = await response.json();
return responseData;
return await response.json();
}
/**
@ -5394,6 +5412,7 @@ export async function sendStreamingRequest(type, data) {
* Gets the generation endpoint URL for the specified API.
* @param {string} api API name
* @returns {string} Generation URL
* @throws {Error} If the API is unknown
*/
function getGenerateUrl(api) {
switch (api) {

View File

@ -2373,6 +2373,7 @@ function ensureSelectionExists(setting, selector) {
* @param {string} [message] Chat message
* @param {function} [callback] Callback function
* @returns {Promise<string|undefined>} Image path
* @throws {Error} If the prompt or image generation fails
*/
async function generatePicture(initiator, args, trigger, message, callback) {
if (!trigger || trigger.trim().length === 0) {
@ -2391,7 +2392,7 @@ async function generatePicture(initiator, args, trigger, message, callback) {
trigger = trigger.trim();
const generationType = getGenerationType(trigger);
const generationTypeKey = Object.keys(generationMode).find(key => generationMode[key] === generationType);
console.log(`Generation mode ${generationTypeKey} triggered with "${trigger}"`);
console.log(`Image generation mode ${generationTypeKey} triggered with "${trigger}"`);
const quietPrompt = getQuietPrompt(generationType, trigger);
const context = getContext();
@ -2428,6 +2429,8 @@ async function generatePicture(initiator, args, trigger, message, callback) {
try {
const combineNegatives = (prefix) => { negativePromptPrefix = combinePrefixes(negativePromptPrefix, prefix); };
// generate the text prompt for the image
const prompt = await getPrompt(generationType, message, trigger, quietPrompt, combineNegatives);
console.log('Processed image prompt:', prompt);
@ -2438,11 +2441,16 @@ async function generatePicture(initiator, args, trigger, message, callback) {
args._abortController.addEventListener('abort', stopListener);
}
// generate the image
imagePath = await sendGenerationRequest(generationType, prompt, negativePromptPrefix, characterName, callback, initiator, abortController.signal);
} catch (err) {
console.trace(err);
toastr.error('SD prompt text generation failed. Reason: ' + err, 'Image Generation');
throw new Error('SD prompt text generation failed. Reason: ' + err);
// errors here are most likely due to text generation failure
// sendGenerationRequest mostly deals with its own errors
const reason = err.error?.message || err.message || 'Unknown error';
const errorText = 'SD prompt text generation failed. ' + reason;
toastr.error(errorText, 'Image Generation');
throw new Error(errorText);
}
finally {
$(stopButton).hide();
@ -2513,7 +2521,7 @@ function restoreOriginalDimensions(savedParams) {
*/
async function getPrompt(generationType, message, trigger, quietPrompt, combineNegatives) {
let prompt;
console.log('getPrompt: Generation mode', generationType, 'triggered with', trigger);
switch (generationType) {
case generationMode.RAW_LAST:
prompt = message || getRawLastMessage();
@ -2729,7 +2737,7 @@ async function sendGenerationRequest(generationType, prompt, additionalNegativeP
throw new Error('Endpoint did not return image data.');
}
} catch (err) {
console.error(err);
console.error('Image generation request error: ', err);
toastr.error('Image generation failed. Please try again.' + '\n\n' + String(err), 'Image Generation');
return;
}

View File

@ -181,6 +181,14 @@ function setContextSizePreview() {
}
}
/** Generates text using the Horde API.
* @param {string} prompt
* @param params
* @param signal
* @param reportProgress
* @returns {Promise<{text: *, workerName: string}>}
* @throws {Error}
*/
async function generateHorde(prompt, params, signal, reportProgress) {
validateHordeModel();
delete params.prompt;

View File

@ -1313,6 +1313,11 @@ export async function prepareOpenAIMessages({
return [chat, promptManager.tokenHandler.counts];
}
/**
* Handles errors during streaming requests.
* @param {Response} response
* @param {string} decoded - response text or decoded stream data
*/
function tryParseStreamingError(response, decoded) {
try {
const data = JSON.parse(decoded);
@ -1324,6 +1329,9 @@ function tryParseStreamingError(response, decoded) {
checkQuotaError(data);
checkModerationError(data);
// these do not throw correctly (equiv to Error("[object Object]"))
// if trying to fix "[object Object]" displayed to users, start here
if (data.error) {
toastr.error(data.error.message || response.statusText, 'Chat Completion API');
throw new Error(data);
@ -1339,15 +1347,22 @@ function tryParseStreamingError(response, decoded) {
}
}
async function checkQuotaError(data) {
const errorText = await renderTemplateAsync('quotaError');
/**
* Checks if the response contains a quota error and displays a popup if it does.
* @param data
* @returns {void}
* @throws {object} - response JSON
*/
function checkQuotaError(data) {
if (!data) {
return;
}
if (data.quota_error) {
callPopup(errorText, 'text');
renderTemplateAsync('quotaError').then((html) => Popup.show.text('Quota Error', html));
// this does not throw correctly (equiv to Error("[object Object]"))
// if trying to fix "[object Object]" displayed to users, start here
throw new Error(data);
}
}
@ -1766,6 +1781,15 @@ async function sendAltScaleRequest(messages, logit_bias, signal, type) {
return data.output;
}
/**
* Send a chat completion request to backend
* @param {string} type (impersonate, quiet, continue, etc)
* @param {Array} messages
* @param {AbortSignal?} signal
* @returns {Promise<unknown>}
* @throws {Error}
*/
async function sendOpenAIRequest(type, messages, signal) {
// Provide default abort signal
if (!signal) {
@ -2028,12 +2052,13 @@ async function sendOpenAIRequest(type, messages, signal) {
else {
const data = await response.json();
await checkQuotaError(data);
checkQuotaError(data);
checkModerationError(data);
if (data.error) {
toastr.error(data.error.message || response.statusText, t`API returned an error`);
throw new Error(data);
const message = data.error.message || response.statusText || t`Unknown error`;
toastr.error(message, t`API returned an error`);
throw new Error(message);
}
if (type !== 'quiet') {

View File

@ -1,5 +1,4 @@
import { escapeRegex } from '../utils.js';
import { SlashCommand } from './SlashCommand.js';
import { SlashCommandParser } from './SlashCommandParser.js';
export class SlashCommandBrowser {
@ -30,7 +29,7 @@ export class SlashCommandBrowser {
this.details?.remove();
this.details = null;
let query = inp.value.trim();
if (query.slice(-1) == '"' && !/(?:^|\s+)"/.test(query)) {
if (query.slice(-1) === '"' && !/(?:^|\s+)"/.test(query)) {
query = `"${query}`;
}
let fuzzyList = [];
@ -59,7 +58,7 @@ export class SlashCommandBrowser {
cmd.helpString,
];
const find = ()=>targets.find(t=>(fuzzyList.find(f=>f.test(t)) ?? quotedList.find(q=>t.includes(q))) !== undefined) !== undefined;
if (fuzzyList.length + quotedList.length == 0 || find()) {
if (fuzzyList.length + quotedList.length === 0 || find()) {
this.itemMap[cmd.name].classList.remove('isFiltered');
} else {
this.itemMap[cmd.name].classList.add('isFiltered');
@ -78,7 +77,7 @@ export class SlashCommandBrowser {
list.classList.add('autoComplete');
this.cmdList = Object
.keys(SlashCommandParser.commands)
.filter(key => SlashCommandParser.commands[key].name == key) // exclude aliases
.filter(key => SlashCommandParser.commands[key].name === key) // exclude aliases
.sort((a, b) => a.toLowerCase().localeCompare(b.toLowerCase()))
.map(key => SlashCommandParser.commands[key])
;
@ -97,7 +96,7 @@ export class SlashCommandBrowser {
}
}
}
if (this.details != details) {
if (this.details !== details) {
Array.from(list.querySelectorAll('.selected')).forEach(it=>it.classList.remove('selected'));
item.classList.add('selected');
this.details?.remove();
@ -124,7 +123,7 @@ export class SlashCommandBrowser {
parent.append(this.dom);
this.mo = new MutationObserver(muts=>{
if (muts.find(mut=>Array.from(mut.removedNodes).find(it=>it == this.dom || it.contains(this.dom)))) {
if (muts.find(mut=>Array.from(mut.removedNodes).find(it=>it === this.dom || it.contains(this.dom)))) {
this.mo.disconnect();
window.removeEventListener('keydown', boundHandler);
}
@ -136,7 +135,7 @@ export class SlashCommandBrowser {
}
handleKeyDown(evt) {
if (!evt.shiftKey && !evt.altKey && evt.ctrlKey && evt.key.toLowerCase() == 'f') {
if (!evt.shiftKey && !evt.altKey && evt.ctrlKey && evt.key.toLowerCase() === 'f') {
if (!this.dom.closest('body')) return;
if (this.dom.closest('.mes') && !this.dom.closest('.last_mes')) return;
evt.preventDefault();

View File

@ -880,6 +880,13 @@ function setSettingByName(setting, value, trigger) {
}
}
/**
* Sends a streaming request for textgenerationwebui.
* @param generate_data
* @param signal
* @returns {Promise<(function(): AsyncGenerator<{swipes: [], text: string, toolCalls: [], logprobs: {token: string, topLogprobs: Candidate[]}|null}, void, *>)|*>}
* @throws {Error} - If the response status is not OK, or from within the generator
*/
async function generateTextGenWithStreaming(generate_data, signal) {
generate_data.stream = true;
@ -995,6 +1002,7 @@ export function parseTabbyLogprobs(data) {
* @param {Response} response - Response from the server.
* @param {string} decoded - Decoded response body.
* @returns {void} Nothing.
* @throws {Error} If the response contains an error message, throws Error with the message.
*/
function tryParseStreamingError(response, decoded) {
let data = {};

View File

@ -1051,8 +1051,12 @@ router.post('/generate', jsonParser, function (request, response) {
}
} catch (error) {
console.log('Generation failed', error);
const message = error.code === 'ECONNREFUSED'
? `Connection refused: ${error.message}`
: error.message || 'Unknown error occurred';
if (!response.headersSent) {
response.send({ error: true });
response.status(502).send({ error: { message, ...error } });
} else {
response.end();
}
@ -1068,7 +1072,7 @@ router.post('/generate', jsonParser, function (request, response) {
const message = errorResponse.statusText || 'Unknown error occurred';
const quota_error = errorResponse.status === 429 && errorData?.error?.type === 'insufficient_quota';
console.log(message, responseText);
console.log('Chat completion request error: ', message, responseText);
if (!response.headersSent) {
response.send({ error: { message }, quota_error: quota_error });