mirror of
https://github.com/KoboldAI/KoboldAI-Client.git
synced 2025-06-05 21:59:24 +02:00
Chat Mode Improvements
This commit decouples single line mode, well behaved models no longer need this since we stop at the You:. There are scenario's however where this potentially breaks chatmode completely or makes models more frustrating to use. Users who experience this can enable the Single Line mode in the formatting menu to restore the old behavior. I have also allowed token streaming again, since the issues with it have already been resolved.
This commit is contained in:
@@ -2474,9 +2474,6 @@ def patch_transformers():
|
||||
|
||||
if not koboldai_vars.output_streaming:
|
||||
return False
|
||||
|
||||
if koboldai_vars.chatmode:
|
||||
return False
|
||||
|
||||
data = [applyoutputformatting(utils.decodenewlines(tokenizer.decode(x[-1])), no_sentence_trimming=True, no_single_line=True) for x in input_ids]
|
||||
koboldai_vars.actions.stream_tokens(data)
|
||||
@@ -6507,7 +6504,7 @@ def applyoutputformatting(txt, no_sentence_trimming=False, no_single_line=False)
|
||||
if(koboldai_vars.frmtrmspch):
|
||||
txt = utils.removespecialchars(txt, koboldai_vars)
|
||||
# Single Line Mode
|
||||
if((koboldai_vars.singleline or koboldai_vars.chatmode) and not no_single_line):
|
||||
if(koboldai_vars.singleline and not no_single_line):
|
||||
txt = utils.singlelineprocessing(txt, koboldai_vars)
|
||||
|
||||
for sub in koboldai_vars.substitutions:
|
||||
|
Reference in New Issue
Block a user