From 836ae9fda7f925bfa4b3df1ecbbed31441475903 Mon Sep 17 00:00:00 2001 From: somebody Date: Sun, 25 Sep 2022 22:07:38 -0500 Subject: [PATCH] Fix generation bug with prompt shaving Messed up any generations not coming from core_generate --- aiserver.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/aiserver.py b/aiserver.py index 6170209e..0e4f3bc1 100644 --- a/aiserver.py +++ b/aiserver.py @@ -5027,7 +5027,7 @@ def core_generate(text: list, min: int, max: int, found_entries: set): genout = result.encoded - already_generated += len(genout[0]) - 1 + already_generated += len(genout[0]) try: assert already_generated <= koboldai_vars.genamt @@ -5111,7 +5111,7 @@ class GenerationResult: # Shave prompt off of encoded response when needed (HF). Decoded does # not return prompt. if output_includes_prompt: - self.encoded = out_batches[:, len(prompt) - 1:] + self.encoded = out_batches[:, len(prompt):] else: self.encoded = out_batches