mirror of
https://github.com/SillyTavern/SillyTavern.git
synced 2025-06-05 21:59:27 +02:00
Properly fallback when w.ai model doesn't support streaming
This commit is contained in:
@ -610,7 +610,10 @@ async function sendOpenAIRequest(type, openai_msgs_tosend, signal) {
|
||||
|
||||
try {
|
||||
if (stream) {
|
||||
generatePromise.then(() => { finished = true; }).catch(handleWindowError);
|
||||
generatePromise.then((res) => {
|
||||
content = res[0]?.message?.content;
|
||||
finished = true;
|
||||
}).catch(handleWindowError);
|
||||
return windowStreamingFunction;
|
||||
} else {
|
||||
const result = await generatePromise;
|
||||
|
Reference in New Issue
Block a user