diff --git a/public/script.js b/public/script.js index f655d6fe1..73ccde163 100644 --- a/public/script.js +++ b/public/script.js @@ -2672,8 +2672,7 @@ export async function generateQuietPrompt(quiet_prompt, quietToLoud, skipWIAN, q quietName: quietName, }; originalResponseLength = responseLengthCustomized ? saveResponseLength(main_api, responseLength) : -1; - const generateFinished = await Generate('quiet', options); - return generateFinished; + return await Generate('quiet', options); } finally { if (responseLengthCustomized) { restoreResponseLength(main_api, originalResponseLength); @@ -3328,9 +3327,9 @@ export async function generateRaw(prompt, api, instructOverride, quietToLoud, sy let data = {}; - if (api == 'koboldhorde') { + if (api === 'koboldhorde') { data = await generateHorde(prompt, generateData, abortController.signal, false); - } else if (api == 'openai') { + } else if (api === 'openai') { data = await sendOpenAIRequest('quiet', generateData, abortController.signal); } else { const generateUrl = getGenerateUrl(api); @@ -3343,13 +3342,15 @@ export async function generateRaw(prompt, api, instructOverride, quietToLoud, sy }); if (!response.ok) { - const error = await response.json(); - throw error; + throw await response.json(); } data = await response.json(); } + // should only happen for text completions + // other frontend paths do not return data if calling the backend fails, + // they throw things instead if (data.error) { throw new Error(data.response); } @@ -4401,6 +4402,11 @@ export async function Generate(type, { automatic_trigger, force_name2, quiet_pro return Promise.resolve(); } + /** + * Saves itemized prompt bits and calls streaming or non-streaming generation API. + * @returns {Promise|String|{fromStream}|string|undefined|Object>} + * @throws {Error|object} Error with message text, or Error with response JSON (OAI/Horde), or the actual response JSON (novel|textgenerationwebui|kobold) + */ async function finishGenerating() { if (power_user.console_log_prompts) { console.log(generate_data.prompt); @@ -4512,6 +4518,12 @@ export async function Generate(type, { automatic_trigger, force_name2, quiet_pro return finishGenerating().then(onSuccess, onError); + /** + * Handles the successful response from the generation API. + * @param data + * @returns {Promise|undefined>} + * @throws {Error} Throws an error if the response data contains an error message + */ async function onSuccess(data) { if (!data) return; @@ -4521,6 +4533,7 @@ export async function Generate(type, { automatic_trigger, force_name2, quiet_pro let messageChunk = ''; + // if an error was returned in data (textgenwebui), show it and throw it if (data.error) { unblockGeneration(type); generatedPromptCache = ''; @@ -4635,9 +4648,15 @@ export async function Generate(type, { automatic_trigger, force_name2, quiet_pro return Object.defineProperty(new String(getMessage), 'messageChunk', { value: messageChunk }); } + /** + * Exception handler for finishGenerating + * @param {Error|object} exception Error or response JSON + * @throws {Error|object} Re-throws the exception + */ function onError(exception) { + // if the response JSON was thrown (novel|textgenerationwebui|kobold), show the error message if (typeof exception?.error?.message === 'string') { - toastr.error(exception.error.message, t`Error`, { timeOut: 10000, extendedTimeOut: 20000 }); + toastr.error(exception.error.message, t`Text generation error`, { timeOut: 10000, extendedTimeOut: 20000 }); } generatedPromptCache = ''; @@ -5305,6 +5324,7 @@ function setInContextMessages(lastmsg, type) { * @param {string} type Generation type * @param {object} data Generation data * @returns {Promise} Response data from the API + * @throws {Error|object} */ export async function sendGenerationRequest(type, data) { if (main_api === 'openai') { @@ -5324,12 +5344,10 @@ export async function sendGenerationRequest(type, data) { }); if (!response.ok) { - const error = await response.json(); - throw error; + throw await response.json(); } - const responseData = await response.json(); - return responseData; + return await response.json(); } /** @@ -5361,6 +5379,7 @@ export async function sendStreamingRequest(type, data) { * Gets the generation endpoint URL for the specified API. * @param {string} api API name * @returns {string} Generation URL + * @throws {Error} If the API is unknown */ function getGenerateUrl(api) { switch (api) { diff --git a/public/scripts/extensions/stable-diffusion/index.js b/public/scripts/extensions/stable-diffusion/index.js index 409f38a84..2c6c00f58 100644 --- a/public/scripts/extensions/stable-diffusion/index.js +++ b/public/scripts/extensions/stable-diffusion/index.js @@ -2362,6 +2362,7 @@ function ensureSelectionExists(setting, selector) { * @param {string} [message] Chat message * @param {function} [callback] Callback function * @returns {Promise} Image path + * @throws {Error} If the prompt or image generation fails */ async function generatePicture(initiator, args, trigger, message, callback) { if (!trigger || trigger.trim().length === 0) { @@ -2379,7 +2380,7 @@ async function generatePicture(initiator, args, trigger, message, callback) { trigger = trigger.trim(); const generationType = getGenerationType(trigger); - console.log('Generation mode', generationType, 'triggered with', trigger); + console.log('Image generation mode', generationType, 'triggered with', trigger); const quietPrompt = getQuietPrompt(generationType, trigger); const context = getContext(); @@ -2387,7 +2388,7 @@ async function generatePicture(initiator, args, trigger, message, callback) { ? context.groups[Object.keys(context.groups).filter(x => context.groups[x].id === context.groupId)[0]]?.id?.toString() : context.characters[context.characterId]?.name; - if (generationType == generationMode.BACKGROUND) { + if (generationType === generationMode.BACKGROUND) { const callbackOriginal = callback; callback = async function (prompt, imagePath, generationType, _negativePromptPrefix, _initiator, prefixedPrompt) { const imgUrl = `url("${encodeURI(imagePath)}")`; @@ -2415,6 +2416,8 @@ async function generatePicture(initiator, args, trigger, message, callback) { try { const combineNegatives = (prefix) => { negativePromptPrefix = combinePrefixes(negativePromptPrefix, prefix); }; + + // generate the text prompt for the image const prompt = await getPrompt(generationType, message, trigger, quietPrompt, combineNegatives); console.log('Processed image prompt:', prompt); @@ -2425,6 +2428,7 @@ async function generatePicture(initiator, args, trigger, message, callback) { args._abortController.addEventListener('abort', stopListener); } + // generate the image imagePath = await sendGenerationRequest(generationType, prompt, negativePromptPrefix, characterName, callback, initiator, abortController.signal); } catch (err) { console.trace(err); @@ -2500,7 +2504,7 @@ function restoreOriginalDimensions(savedParams) { */ async function getPrompt(generationType, message, trigger, quietPrompt, combineNegatives) { let prompt; - + console.log('getPrompt: Generation mode', generationType, 'triggered with', trigger); switch (generationType) { case generationMode.RAW_LAST: prompt = message || getRawLastMessage(); @@ -2718,7 +2722,7 @@ async function sendGenerationRequest(generationType, prompt, additionalNegativeP throw new Error('Endpoint did not return image data.'); } } catch (err) { - console.error(err); + console.error('Image generation request error: ', err); toastr.error('Image generation failed. Please try again.' + '\n\n' + String(err), 'Image Generation'); return; } diff --git a/public/scripts/horde.js b/public/scripts/horde.js index 1e0141fb0..0b5d94bea 100644 --- a/public/scripts/horde.js +++ b/public/scripts/horde.js @@ -181,6 +181,14 @@ function setContextSizePreview() { } } +/** Generates text using the Horde API. + * @param {string} prompt + * @param params + * @param signal + * @param reportProgress + * @returns {Promise<{text: *, workerName: string}>} + * @throws {Error} + */ async function generateHorde(prompt, params, signal, reportProgress) { validateHordeModel(); delete params.prompt; diff --git a/public/scripts/openai.js b/public/scripts/openai.js index 2f1392414..8e76373b2 100644 --- a/public/scripts/openai.js +++ b/public/scripts/openai.js @@ -1312,6 +1312,11 @@ export async function prepareOpenAIMessages({ return [chat, promptManager.tokenHandler.counts]; } +/** + * Handles errors during streaming requests. + * @param {Response} response + * @param {string} decoded - response text or decoded stream data + */ function tryParseStreamingError(response, decoded) { try { const data = JSON.parse(decoded); @@ -1320,9 +1325,12 @@ function tryParseStreamingError(response, decoded) { return; } - checkQuotaError(data); + void checkQuotaError(data); checkModerationError(data); + // these do not throw correctly (equiv to Error("[object Object]")) + // if trying to fix "[object Object]" displayed to users, start here + if (data.error) { toastr.error(data.error.message || response.statusText, 'Chat Completion API'); throw new Error(data); @@ -1338,6 +1346,12 @@ function tryParseStreamingError(response, decoded) { } } +/** + * Checks if the response contains a quota error and displays a popup if it does. + * @param data + * @returns {Promise} + * @throws {object} - response JSON + */ async function checkQuotaError(data) { const errorText = await renderTemplateAsync('quotaError'); @@ -1347,6 +1361,9 @@ async function checkQuotaError(data) { if (data.quota_error) { callPopup(errorText, 'text'); + + // this does not throw correctly (equiv to Error("[object Object]")) + // if trying to fix "[object Object]" displayed to users, start here throw new Error(data); } } @@ -1765,6 +1782,15 @@ async function sendAltScaleRequest(messages, logit_bias, signal, type) { return data.output; } +/** + * Send a chat completion request to backend + * @param {string} type (impersonate, quiet, continue, etc) + * @param {Array} messages + * @param {AbortSignal?} signal + * @returns {Promise} + * @throws {Error} + */ + async function sendOpenAIRequest(type, messages, signal) { // Provide default abort signal if (!signal) { diff --git a/public/scripts/textgen-settings.js b/public/scripts/textgen-settings.js index 67e058f08..7b3017efb 100644 --- a/public/scripts/textgen-settings.js +++ b/public/scripts/textgen-settings.js @@ -880,6 +880,13 @@ function setSettingByName(setting, value, trigger) { } } +/** + * Sends a streaming request for textgenerationwebui. + * @param generate_data + * @param signal + * @returns {Promise<(function(): AsyncGenerator<{swipes: [], text: string, toolCalls: [], logprobs: {token: string, topLogprobs: Candidate[]}|null}, void, *>)|*>} + * @throws {Error} - If the response status is not OK, or from within the generator + */ async function generateTextGenWithStreaming(generate_data, signal) { generate_data.stream = true; @@ -995,6 +1002,7 @@ export function parseTabbyLogprobs(data) { * @param {Response} response - Response from the server. * @param {string} decoded - Decoded response body. * @returns {void} Nothing. + * @throws {Error} If the response contains an error message, throws Error with the message. */ function tryParseStreamingError(response, decoded) { let data = {};