From bde31217f164a3aadc4282913012378a886d6058 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Sat, 11 Mar 2023 12:15:58 -0500 Subject: [PATCH] improve model None check --- aiserver.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/aiserver.py b/aiserver.py index 3ec8f284..c14ac730 100644 --- a/aiserver.py +++ b/aiserver.py @@ -3134,16 +3134,14 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal model = load_quant(koboldai_vars.custmodpth, os.environ['LLAMA_4BIT'], 4) else: raise RuntimeError("It looks like your environment variable for LLAMA_4BIT is not set (the model path).\nPlease set this variable before proceeding.") - exit(1) + + if model is None: + raise RuntimeError("Model returned 'None'. This is not expected to happen, but due to this, the model will not load.") except Exception as e: if("out of memory" in traceback.format_exc().lower()): raise RuntimeError("One of your GPUs ran out of memory when KoboldAI tried to load your model.") # model = GPTNeoForCausalLM.from_pretrained(koboldai_vars.custmodpth, revision=koboldai_vars.revision, cache_dir="cache", **lowmem) - - if model is None: - raise RuntimeError("Model returned 'None'. This is not expected to happen, but due to this, the model will not load. Exiting.") - exit(1) elif(os.path.isdir("models/{}".format(koboldai_vars.model.replace('/', '_')))): try: tokenizer = AutoTokenizer.from_pretrained("models/{}".format(koboldai_vars.model.replace('/', '_')), revision=koboldai_vars.revision, cache_dir="cache", use_fast=False)