mirror of
https://github.com/KoboldAI/KoboldAI-Client.git
synced 2025-06-05 21:59:24 +02:00
Fix generation bug with prompt shaving
Messed up any generations not coming from core_generate
This commit is contained in:
@@ -5027,7 +5027,7 @@ def core_generate(text: list, min: int, max: int, found_entries: set):
|
|||||||
|
|
||||||
genout = result.encoded
|
genout = result.encoded
|
||||||
|
|
||||||
already_generated += len(genout[0]) - 1
|
already_generated += len(genout[0])
|
||||||
|
|
||||||
try:
|
try:
|
||||||
assert already_generated <= koboldai_vars.genamt
|
assert already_generated <= koboldai_vars.genamt
|
||||||
@@ -5111,7 +5111,7 @@ class GenerationResult:
|
|||||||
# Shave prompt off of encoded response when needed (HF). Decoded does
|
# Shave prompt off of encoded response when needed (HF). Decoded does
|
||||||
# not return prompt.
|
# not return prompt.
|
||||||
if output_includes_prompt:
|
if output_includes_prompt:
|
||||||
self.encoded = out_batches[:, len(prompt) - 1:]
|
self.encoded = out_batches[:, len(prompt):]
|
||||||
else:
|
else:
|
||||||
self.encoded = out_batches
|
self.encoded = out_batches
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user