Linting and commenting
Linting and commenting Linting and commenting Linting and commenting Linting and commenting
This commit is contained in:
parent
669ba2fd36
commit
0383ea52e9
|
@ -2672,8 +2672,7 @@ export async function generateQuietPrompt(quiet_prompt, quietToLoud, skipWIAN, q
|
||||||
quietName: quietName,
|
quietName: quietName,
|
||||||
};
|
};
|
||||||
originalResponseLength = responseLengthCustomized ? saveResponseLength(main_api, responseLength) : -1;
|
originalResponseLength = responseLengthCustomized ? saveResponseLength(main_api, responseLength) : -1;
|
||||||
const generateFinished = await Generate('quiet', options);
|
return await Generate('quiet', options);
|
||||||
return generateFinished;
|
|
||||||
} finally {
|
} finally {
|
||||||
if (responseLengthCustomized) {
|
if (responseLengthCustomized) {
|
||||||
restoreResponseLength(main_api, originalResponseLength);
|
restoreResponseLength(main_api, originalResponseLength);
|
||||||
|
@ -3328,9 +3327,9 @@ export async function generateRaw(prompt, api, instructOverride, quietToLoud, sy
|
||||||
|
|
||||||
let data = {};
|
let data = {};
|
||||||
|
|
||||||
if (api == 'koboldhorde') {
|
if (api === 'koboldhorde') {
|
||||||
data = await generateHorde(prompt, generateData, abortController.signal, false);
|
data = await generateHorde(prompt, generateData, abortController.signal, false);
|
||||||
} else if (api == 'openai') {
|
} else if (api === 'openai') {
|
||||||
data = await sendOpenAIRequest('quiet', generateData, abortController.signal);
|
data = await sendOpenAIRequest('quiet', generateData, abortController.signal);
|
||||||
} else {
|
} else {
|
||||||
const generateUrl = getGenerateUrl(api);
|
const generateUrl = getGenerateUrl(api);
|
||||||
|
@ -3343,13 +3342,15 @@ export async function generateRaw(prompt, api, instructOverride, quietToLoud, sy
|
||||||
});
|
});
|
||||||
|
|
||||||
if (!response.ok) {
|
if (!response.ok) {
|
||||||
const error = await response.json();
|
throw await response.json();
|
||||||
throw error;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
data = await response.json();
|
data = await response.json();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// should only happen for text completions
|
||||||
|
// other frontend paths do not return data if calling the backend fails,
|
||||||
|
// they throw things instead
|
||||||
if (data.error) {
|
if (data.error) {
|
||||||
throw new Error(data.response);
|
throw new Error(data.response);
|
||||||
}
|
}
|
||||||
|
@ -4401,6 +4402,11 @@ export async function Generate(type, { automatic_trigger, force_name2, quiet_pro
|
||||||
return Promise.resolve();
|
return Promise.resolve();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Saves itemized prompt bits and calls streaming or non-streaming generation API.
|
||||||
|
* @returns {Promise<void|*|Awaited<*>|String|{fromStream}|string|undefined|Object>}
|
||||||
|
* @throws {Error|object} Error with message text, or Error with response JSON (OAI/Horde), or the actual response JSON (novel|textgenerationwebui|kobold)
|
||||||
|
*/
|
||||||
async function finishGenerating() {
|
async function finishGenerating() {
|
||||||
if (power_user.console_log_prompts) {
|
if (power_user.console_log_prompts) {
|
||||||
console.log(generate_data.prompt);
|
console.log(generate_data.prompt);
|
||||||
|
@ -4512,6 +4518,12 @@ export async function Generate(type, { automatic_trigger, force_name2, quiet_pro
|
||||||
|
|
||||||
return finishGenerating().then(onSuccess, onError);
|
return finishGenerating().then(onSuccess, onError);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Handles the successful response from the generation API.
|
||||||
|
* @param data
|
||||||
|
* @returns {Promise<String|{fromStream}|*|string|string|void|Awaited<*>|undefined>}
|
||||||
|
* @throws {Error} Throws an error if the response data contains an error message
|
||||||
|
*/
|
||||||
async function onSuccess(data) {
|
async function onSuccess(data) {
|
||||||
if (!data) return;
|
if (!data) return;
|
||||||
|
|
||||||
|
@ -4521,6 +4533,7 @@ export async function Generate(type, { automatic_trigger, force_name2, quiet_pro
|
||||||
|
|
||||||
let messageChunk = '';
|
let messageChunk = '';
|
||||||
|
|
||||||
|
// if an error was returned in data (textgenwebui), show it and throw it
|
||||||
if (data.error) {
|
if (data.error) {
|
||||||
unblockGeneration(type);
|
unblockGeneration(type);
|
||||||
generatedPromptCache = '';
|
generatedPromptCache = '';
|
||||||
|
@ -4635,9 +4648,15 @@ export async function Generate(type, { automatic_trigger, force_name2, quiet_pro
|
||||||
return Object.defineProperty(new String(getMessage), 'messageChunk', { value: messageChunk });
|
return Object.defineProperty(new String(getMessage), 'messageChunk', { value: messageChunk });
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Exception handler for finishGenerating
|
||||||
|
* @param {Error|object} exception Error or response JSON
|
||||||
|
* @throws {Error|object} Re-throws the exception
|
||||||
|
*/
|
||||||
function onError(exception) {
|
function onError(exception) {
|
||||||
|
// if the response JSON was thrown (novel|textgenerationwebui|kobold), show the error message
|
||||||
if (typeof exception?.error?.message === 'string') {
|
if (typeof exception?.error?.message === 'string') {
|
||||||
toastr.error(exception.error.message, t`Error`, { timeOut: 10000, extendedTimeOut: 20000 });
|
toastr.error(exception.error.message, t`Text generation error`, { timeOut: 10000, extendedTimeOut: 20000 });
|
||||||
}
|
}
|
||||||
|
|
||||||
generatedPromptCache = '';
|
generatedPromptCache = '';
|
||||||
|
@ -5305,6 +5324,7 @@ function setInContextMessages(lastmsg, type) {
|
||||||
* @param {string} type Generation type
|
* @param {string} type Generation type
|
||||||
* @param {object} data Generation data
|
* @param {object} data Generation data
|
||||||
* @returns {Promise<object>} Response data from the API
|
* @returns {Promise<object>} Response data from the API
|
||||||
|
* @throws {Error|object}
|
||||||
*/
|
*/
|
||||||
export async function sendGenerationRequest(type, data) {
|
export async function sendGenerationRequest(type, data) {
|
||||||
if (main_api === 'openai') {
|
if (main_api === 'openai') {
|
||||||
|
@ -5324,12 +5344,10 @@ export async function sendGenerationRequest(type, data) {
|
||||||
});
|
});
|
||||||
|
|
||||||
if (!response.ok) {
|
if (!response.ok) {
|
||||||
const error = await response.json();
|
throw await response.json();
|
||||||
throw error;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const responseData = await response.json();
|
return await response.json();
|
||||||
return responseData;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -5361,6 +5379,7 @@ export async function sendStreamingRequest(type, data) {
|
||||||
* Gets the generation endpoint URL for the specified API.
|
* Gets the generation endpoint URL for the specified API.
|
||||||
* @param {string} api API name
|
* @param {string} api API name
|
||||||
* @returns {string} Generation URL
|
* @returns {string} Generation URL
|
||||||
|
* @throws {Error} If the API is unknown
|
||||||
*/
|
*/
|
||||||
function getGenerateUrl(api) {
|
function getGenerateUrl(api) {
|
||||||
switch (api) {
|
switch (api) {
|
||||||
|
|
|
@ -2362,6 +2362,7 @@ function ensureSelectionExists(setting, selector) {
|
||||||
* @param {string} [message] Chat message
|
* @param {string} [message] Chat message
|
||||||
* @param {function} [callback] Callback function
|
* @param {function} [callback] Callback function
|
||||||
* @returns {Promise<string>} Image path
|
* @returns {Promise<string>} Image path
|
||||||
|
* @throws {Error} If the prompt or image generation fails
|
||||||
*/
|
*/
|
||||||
async function generatePicture(initiator, args, trigger, message, callback) {
|
async function generatePicture(initiator, args, trigger, message, callback) {
|
||||||
if (!trigger || trigger.trim().length === 0) {
|
if (!trigger || trigger.trim().length === 0) {
|
||||||
|
@ -2379,7 +2380,7 @@ async function generatePicture(initiator, args, trigger, message, callback) {
|
||||||
|
|
||||||
trigger = trigger.trim();
|
trigger = trigger.trim();
|
||||||
const generationType = getGenerationType(trigger);
|
const generationType = getGenerationType(trigger);
|
||||||
console.log('Generation mode', generationType, 'triggered with', trigger);
|
console.log('Image generation mode', generationType, 'triggered with', trigger);
|
||||||
const quietPrompt = getQuietPrompt(generationType, trigger);
|
const quietPrompt = getQuietPrompt(generationType, trigger);
|
||||||
const context = getContext();
|
const context = getContext();
|
||||||
|
|
||||||
|
@ -2387,7 +2388,7 @@ async function generatePicture(initiator, args, trigger, message, callback) {
|
||||||
? context.groups[Object.keys(context.groups).filter(x => context.groups[x].id === context.groupId)[0]]?.id?.toString()
|
? context.groups[Object.keys(context.groups).filter(x => context.groups[x].id === context.groupId)[0]]?.id?.toString()
|
||||||
: context.characters[context.characterId]?.name;
|
: context.characters[context.characterId]?.name;
|
||||||
|
|
||||||
if (generationType == generationMode.BACKGROUND) {
|
if (generationType === generationMode.BACKGROUND) {
|
||||||
const callbackOriginal = callback;
|
const callbackOriginal = callback;
|
||||||
callback = async function (prompt, imagePath, generationType, _negativePromptPrefix, _initiator, prefixedPrompt) {
|
callback = async function (prompt, imagePath, generationType, _negativePromptPrefix, _initiator, prefixedPrompt) {
|
||||||
const imgUrl = `url("${encodeURI(imagePath)}")`;
|
const imgUrl = `url("${encodeURI(imagePath)}")`;
|
||||||
|
@ -2415,6 +2416,8 @@ async function generatePicture(initiator, args, trigger, message, callback) {
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const combineNegatives = (prefix) => { negativePromptPrefix = combinePrefixes(negativePromptPrefix, prefix); };
|
const combineNegatives = (prefix) => { negativePromptPrefix = combinePrefixes(negativePromptPrefix, prefix); };
|
||||||
|
|
||||||
|
// generate the text prompt for the image
|
||||||
const prompt = await getPrompt(generationType, message, trigger, quietPrompt, combineNegatives);
|
const prompt = await getPrompt(generationType, message, trigger, quietPrompt, combineNegatives);
|
||||||
console.log('Processed image prompt:', prompt);
|
console.log('Processed image prompt:', prompt);
|
||||||
|
|
||||||
|
@ -2425,6 +2428,7 @@ async function generatePicture(initiator, args, trigger, message, callback) {
|
||||||
args._abortController.addEventListener('abort', stopListener);
|
args._abortController.addEventListener('abort', stopListener);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// generate the image
|
||||||
imagePath = await sendGenerationRequest(generationType, prompt, negativePromptPrefix, characterName, callback, initiator, abortController.signal);
|
imagePath = await sendGenerationRequest(generationType, prompt, negativePromptPrefix, characterName, callback, initiator, abortController.signal);
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
console.trace(err);
|
console.trace(err);
|
||||||
|
@ -2500,7 +2504,7 @@ function restoreOriginalDimensions(savedParams) {
|
||||||
*/
|
*/
|
||||||
async function getPrompt(generationType, message, trigger, quietPrompt, combineNegatives) {
|
async function getPrompt(generationType, message, trigger, quietPrompt, combineNegatives) {
|
||||||
let prompt;
|
let prompt;
|
||||||
|
console.log('getPrompt: Generation mode', generationType, 'triggered with', trigger);
|
||||||
switch (generationType) {
|
switch (generationType) {
|
||||||
case generationMode.RAW_LAST:
|
case generationMode.RAW_LAST:
|
||||||
prompt = message || getRawLastMessage();
|
prompt = message || getRawLastMessage();
|
||||||
|
@ -2718,7 +2722,7 @@ async function sendGenerationRequest(generationType, prompt, additionalNegativeP
|
||||||
throw new Error('Endpoint did not return image data.');
|
throw new Error('Endpoint did not return image data.');
|
||||||
}
|
}
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
console.error(err);
|
console.error('Image generation request error: ', err);
|
||||||
toastr.error('Image generation failed. Please try again.' + '\n\n' + String(err), 'Image Generation');
|
toastr.error('Image generation failed. Please try again.' + '\n\n' + String(err), 'Image Generation');
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
|
@ -181,6 +181,14 @@ function setContextSizePreview() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Generates text using the Horde API.
|
||||||
|
* @param {string} prompt
|
||||||
|
* @param params
|
||||||
|
* @param signal
|
||||||
|
* @param reportProgress
|
||||||
|
* @returns {Promise<{text: *, workerName: string}>}
|
||||||
|
* @throws {Error}
|
||||||
|
*/
|
||||||
async function generateHorde(prompt, params, signal, reportProgress) {
|
async function generateHorde(prompt, params, signal, reportProgress) {
|
||||||
validateHordeModel();
|
validateHordeModel();
|
||||||
delete params.prompt;
|
delete params.prompt;
|
||||||
|
|
|
@ -1312,6 +1312,11 @@ export async function prepareOpenAIMessages({
|
||||||
return [chat, promptManager.tokenHandler.counts];
|
return [chat, promptManager.tokenHandler.counts];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Handles errors during streaming requests.
|
||||||
|
* @param {Response} response
|
||||||
|
* @param {string} decoded - response text or decoded stream data
|
||||||
|
*/
|
||||||
function tryParseStreamingError(response, decoded) {
|
function tryParseStreamingError(response, decoded) {
|
||||||
try {
|
try {
|
||||||
const data = JSON.parse(decoded);
|
const data = JSON.parse(decoded);
|
||||||
|
@ -1320,9 +1325,12 @@ function tryParseStreamingError(response, decoded) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
checkQuotaError(data);
|
void checkQuotaError(data);
|
||||||
checkModerationError(data);
|
checkModerationError(data);
|
||||||
|
|
||||||
|
// these do not throw correctly (equiv to Error("[object Object]"))
|
||||||
|
// if trying to fix "[object Object]" displayed to users, start here
|
||||||
|
|
||||||
if (data.error) {
|
if (data.error) {
|
||||||
toastr.error(data.error.message || response.statusText, 'Chat Completion API');
|
toastr.error(data.error.message || response.statusText, 'Chat Completion API');
|
||||||
throw new Error(data);
|
throw new Error(data);
|
||||||
|
@ -1338,6 +1346,12 @@ function tryParseStreamingError(response, decoded) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Checks if the response contains a quota error and displays a popup if it does.
|
||||||
|
* @param data
|
||||||
|
* @returns {Promise<void>}
|
||||||
|
* @throws {object} - response JSON
|
||||||
|
*/
|
||||||
async function checkQuotaError(data) {
|
async function checkQuotaError(data) {
|
||||||
const errorText = await renderTemplateAsync('quotaError');
|
const errorText = await renderTemplateAsync('quotaError');
|
||||||
|
|
||||||
|
@ -1347,6 +1361,9 @@ async function checkQuotaError(data) {
|
||||||
|
|
||||||
if (data.quota_error) {
|
if (data.quota_error) {
|
||||||
callPopup(errorText, 'text');
|
callPopup(errorText, 'text');
|
||||||
|
|
||||||
|
// this does not throw correctly (equiv to Error("[object Object]"))
|
||||||
|
// if trying to fix "[object Object]" displayed to users, start here
|
||||||
throw new Error(data);
|
throw new Error(data);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1765,6 +1782,15 @@ async function sendAltScaleRequest(messages, logit_bias, signal, type) {
|
||||||
return data.output;
|
return data.output;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Send a chat completion request to backend
|
||||||
|
* @param {string} type (impersonate, quiet, continue, etc)
|
||||||
|
* @param {Array} messages
|
||||||
|
* @param {AbortSignal?} signal
|
||||||
|
* @returns {Promise<unknown>}
|
||||||
|
* @throws {Error}
|
||||||
|
*/
|
||||||
|
|
||||||
async function sendOpenAIRequest(type, messages, signal) {
|
async function sendOpenAIRequest(type, messages, signal) {
|
||||||
// Provide default abort signal
|
// Provide default abort signal
|
||||||
if (!signal) {
|
if (!signal) {
|
||||||
|
|
|
@ -880,6 +880,13 @@ function setSettingByName(setting, value, trigger) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sends a streaming request for textgenerationwebui.
|
||||||
|
* @param generate_data
|
||||||
|
* @param signal
|
||||||
|
* @returns {Promise<(function(): AsyncGenerator<{swipes: [], text: string, toolCalls: [], logprobs: {token: string, topLogprobs: Candidate[]}|null}, void, *>)|*>}
|
||||||
|
* @throws {Error} - If the response status is not OK, or from within the generator
|
||||||
|
*/
|
||||||
async function generateTextGenWithStreaming(generate_data, signal) {
|
async function generateTextGenWithStreaming(generate_data, signal) {
|
||||||
generate_data.stream = true;
|
generate_data.stream = true;
|
||||||
|
|
||||||
|
@ -995,6 +1002,7 @@ export function parseTabbyLogprobs(data) {
|
||||||
* @param {Response} response - Response from the server.
|
* @param {Response} response - Response from the server.
|
||||||
* @param {string} decoded - Decoded response body.
|
* @param {string} decoded - Decoded response body.
|
||||||
* @returns {void} Nothing.
|
* @returns {void} Nothing.
|
||||||
|
* @throws {Error} If the response contains an error message, throws Error with the message.
|
||||||
*/
|
*/
|
||||||
function tryParseStreamingError(response, decoded) {
|
function tryParseStreamingError(response, decoded) {
|
||||||
let data = {};
|
let data = {};
|
||||||
|
|
Loading…
Reference in New Issue