Streaming: Fix streaming not being cleaned up before commentator speaks

super duper critical
This commit is contained in:
somebody
2023-07-24 10:57:24 -05:00
parent 30495cf8d8
commit fc7fa991d5

View File

@@ -3280,9 +3280,6 @@ def actionsubmit(data, actionmode=0, force_submit=False, force_prompt_gen=False,
if(koboldai_vars.aibusy): if(koboldai_vars.aibusy):
return return
# Open up token stream
emit("stream_tokens", True, broadcast=True, room="UI_2")
while(True): while(True):
set_aibusy(1) set_aibusy(1)
koboldai_vars.actions.clear_unused_options() koboldai_vars.actions.clear_unused_options()
@@ -3474,8 +3471,6 @@ def actionsubmit(data, actionmode=0, force_submit=False, force_prompt_gen=False,
set_aibusy(0) set_aibusy(0)
emit('from_server', {'cmd': 'scrolldown', 'data': ''}, broadcast=True, room="UI_1") emit('from_server', {'cmd': 'scrolldown', 'data': ''}, broadcast=True, room="UI_1")
break break
# Clean up token stream
emit("stream_tokens", None, broadcast=True, room="UI_2")
def apiactionsubmit_generate(txt, minimum, maximum): def apiactionsubmit_generate(txt, minimum, maximum):
koboldai_vars.generated_tkns = 0 koboldai_vars.generated_tkns = 0
@@ -3903,7 +3898,10 @@ class HordeException(Exception):
# Send text to generator and deal with output # Send text to generator and deal with output
#==================================================================# #==================================================================#
def generate(txt, minimum, maximum, found_entries=None): def generate(txt, minimum, maximum, found_entries=None):
# Open up token stream
emit("stream_tokens", True, broadcast=True, room="UI_2")
koboldai_vars.generated_tkns = 0 koboldai_vars.generated_tkns = 0
if(found_entries is None): if(found_entries is None):
@@ -3940,7 +3938,10 @@ def generate(txt, minimum, maximum, found_entries=None):
emit('from_server', {'cmd': 'errmsg', 'data': 'Error occurred during generator call; please check console.'}, broadcast=True, room="UI_1") emit('from_server', {'cmd': 'errmsg', 'data': 'Error occurred during generator call; please check console.'}, broadcast=True, room="UI_1")
logger.error(traceback.format_exc().replace("\033", "")) logger.error(traceback.format_exc().replace("\033", ""))
socketio.emit("error", str(e), broadcast=True, room="UI_2") socketio.emit("error", str(e), broadcast=True, room="UI_2")
set_aibusy(0) set_aibusy(0)
# Clean up token stream
emit("stream_tokens", None, broadcast=True, room="UI_2")
return return
for i in range(koboldai_vars.numseqs): for i in range(koboldai_vars.numseqs):
@@ -3972,7 +3973,10 @@ def generate(txt, minimum, maximum, found_entries=None):
del genout del genout
gc.collect() gc.collect()
torch.cuda.empty_cache() torch.cuda.empty_cache()
# Clean up token stream
emit("stream_tokens", None, broadcast=True, room="UI_2")
maybe_review_story() maybe_review_story()
set_aibusy(0) set_aibusy(0)