From 386477e59c98b0ef4bd5ef0c20ba87692ccea5fc Mon Sep 17 00:00:00 2001 From: somebody Date: Sat, 17 Sep 2022 20:47:44 -0500 Subject: [PATCH] Fix token streaming --- aiserver.py | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/aiserver.py b/aiserver.py index e1a7ac12..08a07183 100644 --- a/aiserver.py +++ b/aiserver.py @@ -2009,11 +2009,8 @@ def patch_transformers(): if not koboldai_vars.output_streaming: return False - - print([utils.decodenewlines(tokenizer.decode(x[-1])) for x in input_ids]) koboldai_vars.actions.stream_tokens([utils.decodenewlines(tokenizer.decode(x[-1])) for x in input_ids]) - return False # Sets up dynamic world info scanner @@ -4744,7 +4741,6 @@ def calcsubmit(txt): "TPUMeshTransformerGPTNeoX" ): legacy_generate(subtxt, min, max) - # generate(subtxt, min, max, found_entries=found_entries) elif koboldai_vars.model == "Colab": sendtocolab(utils.decodenewlines(tokenizer.decode(subtxt)), min, max) elif koboldai_vars.model == "API": @@ -4813,11 +4809,10 @@ def calcsubmit(txt): ikrequest(subtxt) def legacy_generate(text: Union[str, list], min: int, max: int): - # Architected after oairequest - koboldai_vars.lastctx = text - outputs = raw_generate( + outputs = tpool.execute( + raw_generate, text, max_length=koboldai_vars.genamt, do_streaming=True