Properly fallback when w.ai model doesn't support streaming

This commit is contained in:
SillyLossy
2023-05-27 21:42:28 +03:00
parent a415deb8fa
commit be64b3469f

View File

@ -610,7 +610,10 @@ async function sendOpenAIRequest(type, openai_msgs_tosend, signal) {
try {
if (stream) {
generatePromise.then(() => { finished = true; }).catch(handleWindowError);
generatePromise.then((res) => {
content = res[0]?.message?.content;
finished = true;
}).catch(handleWindowError);
return windowStreamingFunction;
} else {
const result = await generatePromise;