Fix generation bug with prompt shaving

Messed up any generations not coming from core_generate
This commit is contained in:
somebody
2022-09-25 22:07:38 -05:00
parent 8faad0811f
commit 836ae9fda7

View File

@@ -5027,7 +5027,7 @@ def core_generate(text: list, min: int, max: int, found_entries: set):
genout = result.encoded genout = result.encoded
already_generated += len(genout[0]) - 1 already_generated += len(genout[0])
try: try:
assert already_generated <= koboldai_vars.genamt assert already_generated <= koboldai_vars.genamt
@@ -5111,7 +5111,7 @@ class GenerationResult:
# Shave prompt off of encoded response when needed (HF). Decoded does # Shave prompt off of encoded response when needed (HF). Decoded does
# not return prompt. # not return prompt.
if output_includes_prompt: if output_includes_prompt:
self.encoded = out_batches[:, len(prompt) - 1:] self.encoded = out_batches[:, len(prompt):]
else: else:
self.encoded = out_batches self.encoded = out_batches