mirror of
https://github.com/KoboldAI/KoboldAI-Client.git
synced 2025-06-05 21:59:24 +02:00
Fix for bug that crashes generation when using soft prompts
This commit is contained in:
@@ -515,6 +515,7 @@ class koboldai_vars(object):
|
||||
else:
|
||||
tokens = []
|
||||
for item in context:
|
||||
if item['type'] != 'soft_prompt':
|
||||
tokens.extend([x[0] for x in item['tokens']])
|
||||
|
||||
if send_context:
|
||||
@@ -524,6 +525,9 @@ class koboldai_vars(object):
|
||||
logger.debug("Token Budget: {}. Used Tokens: {}".format(token_budget, used_tokens))
|
||||
if return_text:
|
||||
return "".join([x['text'] for x in context])
|
||||
print(self.sp_length)
|
||||
print(context)
|
||||
print(tokens)
|
||||
return tokens, used_tokens, used_tokens+self.genamt, set(used_world_info)
|
||||
|
||||
def is_model_torch(self) -> bool:
|
||||
|
Reference in New Issue
Block a user