From 4817a27552c360e04104b1f07f3d36b4d7fb1427 Mon Sep 17 00:00:00 2001 From: Divided by Zer0 Date: Mon, 12 Sep 2022 00:49:51 +0200 Subject: [PATCH] multicolored init --- aiserver.py | 18 +++++++++--------- logger.py | 12 +++++++++--- 2 files changed, 18 insertions(+), 12 deletions(-) diff --git a/aiserver.py b/aiserver.py index 49ffa02b..f28c45aa 100644 --- a/aiserver.py +++ b/aiserver.py @@ -416,7 +416,7 @@ app.config['SESSION_TYPE'] = 'filesystem' app.config['TEMPLATES_AUTO_RELOAD'] = True Session(app) socketio = SocketIO(app, async_method="eventlet") -logger.init("Flask", status="OK") +logger.init_ok("Flask", status="OK") old_socketio_on = socketio.on def new_socketio_on(*a, **k): @@ -1405,9 +1405,10 @@ def general_startup(override_args=None): vars.model = "NeoCustom" vars.custmodpth = modpath elif args.model: - print("Welcome to KoboldAI!\nYou have selected the following Model:", vars.model) + logger.message(f"Welcome to KoboldAI!") + logger.message(f"You have selected the following Model: {vars.model}") if args.path: - print("You have selected the following path for your Model :", args.path) + logger.message(f"You have selected the following path for your Model: {args.path}") vars.custmodpth = args.path; vars.colaburl = args.path + "/request"; # Lets just use the same parameter to keep it simple #==================================================================# @@ -2188,8 +2189,7 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal if(not vars.use_colab_tpu and vars.model not in ["InferKit", "Colab", "API", "CLUSTER", "OAI", "GooseAI" , "ReadOnly", "TPUMeshTransformerGPTJ", "TPUMeshTransformerGPTNeoX"]): loadmodelsettings() loadsettings() - print(2) - print("{0}Looking for GPU support...{1}".format(colors.PURPLE, colors.END), end="") + logger.init("GPU support", status="Searching") vars.hascuda = torch.cuda.is_available() vars.bmsupported = (utils.HAS_ACCELERATE or vars.model_type in ("gpt_neo", "gptj", "xglm", "opt")) and not vars.nobreakmodel if(args.breakmodel is not None and args.breakmodel): @@ -2202,9 +2202,9 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal if(not vars.bmsupported and (args.breakmodel_gpulayers is not None or args.breakmodel_layers is not None or args.breakmodel_disklayers is not None)): print("WARNING: This model does not support hybrid generation. --breakmodel_gpulayers will be ignored.", file=sys.stderr) if(vars.hascuda): - print("{0}FOUND!{1}".format(colors.GREEN, colors.END)) + logger.init_ok("GPU support", status="Found") else: - print("{0}NOT FOUND!{1}".format(colors.YELLOW, colors.END)) + logger.init_warn("GPU support", status="Not Found") if args.cpu: vars.usegpu = False @@ -2855,7 +2855,7 @@ def lua_startup(): print("{0}{1}{2}".format(colors.RED, "***LUA ERROR***: ", colors.END), end="", file=sys.stderr) print("{0}{1}{2}".format(colors.RED, str(e).replace("\033", ""), colors.END), file=sys.stderr) exit(1) - logger.init("LUA bridge", status="OK") + logger.init_ok("LUA bridge", status="OK") def lua_log_format_name(name): @@ -2915,7 +2915,7 @@ def load_lua_scripts(): logger.warning("Lua engine stopped; please open 'Userscripts' and press Load to reinitialize scripts.") if(vars.serverstarted): set_aibusy(0) - logger.init("LUA Scripts", status="OK") + logger.init_ok("LUA Scripts", status="OK") #==================================================================# # Print message that originates from the userscript with the given name diff --git a/logger.py b/logger.py index 7d4d71d3..a715426c 100644 --- a/logger.py +++ b/logger.py @@ -3,7 +3,7 @@ from functools import partialmethod from loguru import logger STDOUT_LEVELS = ["GENERATION", "PROMPT"] -INIT_LEVELS = ["INIT"] +INIT_LEVELS = ["INIT", "INIT_OK", "INIT_WARN", "INIT_ERR"] MESSAGE_LEVELS = ["MESSAGE"] def is_stdout_log(record): @@ -28,17 +28,23 @@ def is_stderr_log(record): logfmt = "{level: <10} | {name}:{function}:{line} - {message}" genfmt = "{level: <10} @ {time:YYYY-MM-DD HH:mm:ss} | {message}" -initfmt = "{level: <10} | {extra[status]: <8} | {message}" +initfmt = "INIT | {extra[status]: <8} | {message}" msgfmt = "{level: <10} | {message}" logger.level("GENERATION", no=24, color="") logger.level("PROMPT", no=23, color="") -logger.level("INIT", no=21, color="") +logger.level("INIT", no=21, color="") +logger.level("INIT_OK", no=21, color="") +logger.level("INIT_WARN", no=21, color="") +logger.level("INIT_ERR", no=21, color="") logger.level("MESSAGE", no=20, color="") logger.__class__.generation = partialmethod(logger.__class__.log, "GENERATION") logger.__class__.prompt = partialmethod(logger.__class__.log, "PROMPT") logger.__class__.init = partialmethod(logger.__class__.log, "INIT") +logger.__class__.init_ok = partialmethod(logger.__class__.log, "INIT_OK") +logger.__class__.init_warn = partialmethod(logger.__class__.log, "INIT_WARN") +logger.__class__.init_err = partialmethod(logger.__class__.log, "INIT_ERR") logger.__class__.message = partialmethod(logger.__class__.log, "MESSAGE") config = {