improve model None check

This commit is contained in:
catboxanon
2023-03-11 12:15:58 -05:00
committed by GitHub
parent 1808b0d2ec
commit bde31217f1

View File

@@ -3134,16 +3134,14 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal
model = load_quant(koboldai_vars.custmodpth, os.environ['LLAMA_4BIT'], 4)
else:
raise RuntimeError("It looks like your environment variable for LLAMA_4BIT is not set (the model path).\nPlease set this variable before proceeding.")
exit(1)
if model is None:
raise RuntimeError("Model returned 'None'. This is not expected to happen, but due to this, the model will not load.")
except Exception as e:
if("out of memory" in traceback.format_exc().lower()):
raise RuntimeError("One of your GPUs ran out of memory when KoboldAI tried to load your model.")
# model = GPTNeoForCausalLM.from_pretrained(koboldai_vars.custmodpth, revision=koboldai_vars.revision, cache_dir="cache", **lowmem)
if model is None:
raise RuntimeError("Model returned 'None'. This is not expected to happen, but due to this, the model will not load. Exiting.")
exit(1)
elif(os.path.isdir("models/{}".format(koboldai_vars.model.replace('/', '_')))):
try:
tokenizer = AutoTokenizer.from_pretrained("models/{}".format(koboldai_vars.model.replace('/', '_')), revision=koboldai_vars.revision, cache_dir="cache", use_fast=False)