escapig gen/prompt logs so that they stay in one line

This commit is contained in:
Divided by Zer0 2022-09-12 11:49:59 +02:00
parent 3ed39f9863
commit 68aaef9090

View File

@ -4280,9 +4280,7 @@ def apiactionsubmit_generate(txt, minimum, maximum):
if not vars.quiet: if not vars.quiet:
logger.debug(f"Prompt Min:{minimum}, Max:{maximum}") logger.debug(f"Prompt Min:{minimum}, Max:{maximum}")
split_prompt = utils.decodenewlines(tokenizer.decode(txt)).split('\n') logger.prompt(utils.decodenewlines(tokenizer.decode(txt)).encode("unicode_escape").decode("utf-8"))
for line in split_prompt:
logger.prompt(line)
# Clear CUDA cache if using GPU # Clear CUDA cache if using GPU
if(vars.hascuda and (vars.usegpu or vars.breakmodel)): if(vars.hascuda and (vars.usegpu or vars.breakmodel)):
@ -4310,9 +4308,7 @@ def apiactionsubmit_tpumtjgenerate(txt, minimum, maximum):
if not vars.quiet: if not vars.quiet:
logger.debug(f"Prompt Min:{minimum}, Max:{maximum}") logger.debug(f"Prompt Min:{minimum}, Max:{maximum}")
split_prompt = utils.decodenewlines(tokenizer.decode(txt)).split('\n') logger.prompt(utils.decodenewlines(tokenizer.decode(txt)).encode("unicode_escape").decode("utf-8"))
for line in split_prompt:
logger.prompt(line)
vars._actions = vars.actions vars._actions = vars.actions
vars._prompt = vars.prompt vars._prompt = vars.prompt
@ -4825,9 +4821,7 @@ def generate(txt, minimum, maximum, found_entries=None):
if not vars.quiet: if not vars.quiet:
logger.debug(f"Prompt Min:{minimum}, Max:{maximum}") logger.debug(f"Prompt Min:{minimum}, Max:{maximum}")
split_prompt = utils.decodenewlines(tokenizer.decode(txt)).split('\n') logger.prompt(utils.decodenewlines(tokenizer.decode(txt)).encode("unicode_escape").decode("utf-8"))
for line in split_prompt:
logger.prompt(line)
# Store context in memory to use it for comparison with generated content # Store context in memory to use it for comparison with generated content
vars.lastctx = utils.decodenewlines(tokenizer.decode(txt)) vars.lastctx = utils.decodenewlines(tokenizer.decode(txt))
@ -4889,9 +4883,7 @@ def generate(txt, minimum, maximum, found_entries=None):
#==================================================================# #==================================================================#
def genresult(genout, flash=True, ignore_formatting=False): def genresult(genout, flash=True, ignore_formatting=False):
if not vars.quiet: if not vars.quiet:
split_gen = genout.split('\n') logger.generation(genout.encode("unicode_escape").decode("utf-8"))
for line in split_gen:
logger.generation(line)
# Format output before continuing # Format output before continuing
if not ignore_formatting: if not ignore_formatting:
@ -4926,9 +4918,7 @@ def genselect(genout):
result["generated_text"] = applyoutputformatting(result["generated_text"]) result["generated_text"] = applyoutputformatting(result["generated_text"])
if not vars.quiet: if not vars.quiet:
logger.info(f"Generation Result {i}") logger.info(f"Generation Result {i}")
split_gen = result["generated_text"].split('\n') logger.generation(result["generated_text"].encode("unicode_escape").decode("utf-8"))
for line in split_gen:
logger.generation(line)
i += 1 i += 1
# Add the options to the actions metadata # Add the options to the actions metadata
@ -5268,9 +5258,7 @@ def tpumtjgenerate(txt, minimum, maximum, found_entries=None):
if not vars.quiet: if not vars.quiet:
logger.debug(f"Prompt Min:{minimum}, Max:{maximum}") logger.debug(f"Prompt Min:{minimum}, Max:{maximum}")
split_prompt = utils.decodenewlines(tokenizer.decode(txt)).split('\n') logger.prompt(utils.decodenewlines(tokenizer.decode(txt)).encode("unicode_escape").decode("utf-8"))
for line in split_prompt:
logger.prompt(line)
vars._actions = vars.actions vars._actions = vars.actions
vars._prompt = vars.prompt vars._prompt = vars.prompt