diff --git a/public/script.js b/public/script.js index 79fb6a917..26ee43a97 100644 --- a/public/script.js +++ b/public/script.js @@ -2730,6 +2730,10 @@ class StreamingProcessor { this.onErrorStreaming(); } + hook(generatorFn) { + this.generator = generatorFn; + } + *nullStreamingGeneration() { throw new Error('Generation function for streaming is not hooked up'); } @@ -3722,10 +3726,14 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject, } console.debug(`pushed prompt bits to itemizedPrompts array. Length is now: ${itemizedPrompts.length}`); + /** @type {Promise} */ + let streamingHookPromise = Promise.resolve(); if (main_api == 'openai') { if (isStreamingEnabled() && type !== 'quiet') { - streamingProcessor.generator = await sendOpenAIRequest(type, generate_data.prompt, streamingProcessor.abortController.signal); + streamingHookPromise = sendOpenAIRequest(type, generate_data.prompt, streamingProcessor.abortController.signal) + .then(fn => streamingProcessor.hook(fn)) + .catch(onError); } else { sendOpenAIRequest(type, generate_data.prompt, abortController.signal).then(onSuccess).catch(onError); @@ -3735,13 +3743,19 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject, generateHorde(finalPrompt, generate_data, abortController.signal, true).then(onSuccess).catch(onError); } else if (main_api == 'textgenerationwebui' && isStreamingEnabled() && type !== 'quiet') { - streamingProcessor.generator = await generateTextGenWithStreaming(generate_data, streamingProcessor.abortController.signal); + streamingHookPromise = generateTextGenWithStreaming(generate_data, streamingProcessor.abortController.signal) + .then(fn => streamingProcessor.hook(fn)) + .catch(onError); } else if (main_api == 'novel' && isStreamingEnabled() && type !== 'quiet') { - streamingProcessor.generator = await generateNovelWithStreaming(generate_data, streamingProcessor.abortController.signal); + streamingHookPromise = generateNovelWithStreaming(generate_data, streamingProcessor.abortController.signal) + .then(fn => streamingProcessor.hook(fn)) + .catch(onError); } else if (main_api == 'kobold' && isStreamingEnabled() && type !== 'quiet') { - streamingProcessor.generator = await generateKoboldWithStreaming(generate_data, streamingProcessor.abortController.signal); + streamingHookPromise = generateKoboldWithStreaming(generate_data, streamingProcessor.abortController.signal) + .then(fn => streamingProcessor.hook(fn)) + .catch(onError); } else { try { @@ -3767,6 +3781,7 @@ async function Generate(type, { automatic_trigger, force_name2, resolve, reject, if (isStreamingEnabled() && type !== 'quiet') { hideSwipeButtons(); + await streamingHookPromise; let getMessage = await streamingProcessor.generate(); let messageChunk = cleanUpMessage(getMessage, isImpersonate, isContinue, false); diff --git a/public/scripts/kai-settings.js b/public/scripts/kai-settings.js index 0b6c9972e..9183a8b0d 100644 --- a/public/scripts/kai-settings.js +++ b/public/scripts/kai-settings.js @@ -163,7 +163,7 @@ function tryParseStreamingError(response, decoded) { } if (data.error) { - toastr.error(data.error.message || response.statusText, 'API returned an error'); + toastr.error(data.error.message || response.statusText, 'KoboldAI API'); throw new Error(data); } } @@ -180,7 +180,7 @@ export async function generateKoboldWithStreaming(generate_data, signal) { signal: signal, }); if (!response.ok) { - tryParseStreamingError(response, await response.body.text()); + tryParseStreamingError(response, await response.text()); throw new Error(`Got response status ${response.status}`); } const eventStream = new EventSourceStream(); diff --git a/public/scripts/nai-settings.js b/public/scripts/nai-settings.js index fe5996cde..e0feaaaf4 100644 --- a/public/scripts/nai-settings.js +++ b/public/scripts/nai-settings.js @@ -672,8 +672,8 @@ function tryParseStreamingError(response, decoded) { return; } - if (data.error) { - toastr.error(data.error.message || response.statusText, 'API returned an error'); + if (data.message || data.error) { + toastr.error(data.message || data.error?.message || response.statusText, 'NovelAI API'); throw new Error(data); } } @@ -692,7 +692,7 @@ export async function generateNovelWithStreaming(generate_data, signal) { signal: signal, }); if (!response.ok) { - tryParseStreamingError(response, await response.body.text()); + tryParseStreamingError(response, await response.text()); throw new Error(`Got response status ${response.status}`); } const eventStream = new EventSourceStream(); diff --git a/public/scripts/openai.js b/public/scripts/openai.js index 5de617283..12cfcd92a 100644 --- a/public/scripts/openai.js +++ b/public/scripts/openai.js @@ -1123,7 +1123,7 @@ function tryParseStreamingError(response, decoded) { checkQuotaError(data); if (data.error) { - toastr.error(data.error.message || response.statusText, 'API returned an error'); + toastr.error(data.error.message || response.statusText, 'Chat Completion API'); throw new Error(data); } } @@ -1564,7 +1564,7 @@ async function sendOpenAIRequest(type, messages, signal) { }); if (!response.ok) { - tryParseStreamingError(response, await response.body.text()); + tryParseStreamingError(response, await response.text()); throw new Error(`Got response status ${response.status}`); } diff --git a/public/scripts/textgen-settings.js b/public/scripts/textgen-settings.js index e6ed6deaa..9c938f701 100644 --- a/public/scripts/textgen-settings.js +++ b/public/scripts/textgen-settings.js @@ -478,7 +478,7 @@ async function generateTextGenWithStreaming(generate_data, signal) { }); if (!response.ok) { - tryParseStreamingError(response, await response.body.text()); + tryParseStreamingError(response, await response.text()); throw new Error(`Got response status ${response.status}`); } @@ -512,14 +512,15 @@ async function generateTextGenWithStreaming(generate_data, signal) { /** * Parses errors in streaming responses and displays them in toastr. - * @param {string} response - Response from the server. + * @param {Response} response - Response from the server. + * @param {string} decoded - Decoded response body. * @returns {void} Nothing. */ -function tryParseStreamingError(response) { +function tryParseStreamingError(response, decoded) { let data = {}; try { - data = JSON.parse(response); + data = JSON.parse(decoded); } catch { // No JSON. Do nothing. } @@ -527,7 +528,7 @@ function tryParseStreamingError(response) { const message = data?.error?.message || data?.message; if (message) { - toastr.error(message, 'API Error'); + toastr.error(message, 'Text Completion API'); throw new Error(message); } } diff --git a/server.js b/server.js index 589bf481c..bf6edb6bd 100644 --- a/server.js +++ b/server.js @@ -1618,16 +1618,17 @@ app.post('/generate_openai', jsonParser, function (request, response_generate_op try { const fetchResponse = await fetch(endpointUrl, config); + if (request.body.stream) { + console.log('Streaming request in progress'); + forwardFetchResponse(fetchResponse, response_generate_openai); + return; + } + if (fetchResponse.ok) { - if (request.body.stream) { - console.log('Streaming request in progress'); - forwardFetchResponse(fetchResponse, response_generate_openai); - } else { - let json = await fetchResponse.json(); - response_generate_openai.send(json); - console.log(json); - console.log(json?.choices[0]?.message); - } + let json = await fetchResponse.json(); + response_generate_openai.send(json); + console.log(json); + console.log(json?.choices[0]?.message); } else if (fetchResponse.status === 429 && retries > 0) { console.log(`Out of quota, retrying in ${Math.round(timeout / 1000)}s`); setTimeout(() => {