From 271e4ed06bf6b807d3394a5ccf7bbca8e515e343 Mon Sep 17 00:00:00 2001 From: Henk Date: Wed, 11 Jan 2023 21:33:25 +0100 Subject: [PATCH] Chat Mode Improvements This commit decouples single line mode, well behaved models no longer need this since we stop at the You:. There are scenario's however where this potentially breaks chatmode completely or makes models more frustrating to use. Users who experience this can enable the Single Line mode in the formatting menu to restore the old behavior. I have also allowed token streaming again, since the issues with it have already been resolved. --- aiserver.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/aiserver.py b/aiserver.py index 6f30c1f0..99696559 100644 --- a/aiserver.py +++ b/aiserver.py @@ -2474,9 +2474,6 @@ def patch_transformers(): if not koboldai_vars.output_streaming: return False - - if koboldai_vars.chatmode: - return False data = [applyoutputformatting(utils.decodenewlines(tokenizer.decode(x[-1])), no_sentence_trimming=True, no_single_line=True) for x in input_ids] koboldai_vars.actions.stream_tokens(data) @@ -6507,7 +6504,7 @@ def applyoutputformatting(txt, no_sentence_trimming=False, no_single_line=False) if(koboldai_vars.frmtrmspch): txt = utils.removespecialchars(txt, koboldai_vars) # Single Line Mode - if((koboldai_vars.singleline or koboldai_vars.chatmode) and not no_single_line): + if(koboldai_vars.singleline and not no_single_line): txt = utils.singlelineprocessing(txt, koboldai_vars) for sub in koboldai_vars.substitutions: