Add OPT support

This commit is contained in:
0cc4m
2023-03-29 13:25:06 +00:00
parent ef6fe680a9
commit 8d008b87a6

View File

@@ -94,6 +94,7 @@ sys.path.insert(0, os.path.abspath(Path("repos/gptq")))
from gptj import load_quant as gptj_load_quant
from gptneox import load_quant as gptneox_load_quant
from llama import load_quant as llama_load_quant
from opt import load_quant as opt_load_quant
monkey_patched_4bit = False
@@ -3169,6 +3170,9 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal
elif koboldai_vars.model_type == "llama":
model = llama_load_quant(koboldai_vars.custmodpth, path_4bit, 4, -1)
tokenizer = LlamaTokenizer.from_pretrained(koboldai_vars.custmodpth)
elif koboldai_vars.model_type == "opt":
model = opt_load_quant(koboldai_vars.custmodpth, path_4bit, 4)
tokenizer = AutoTokenizer.from_pretrained(koboldai_vars.custmodpth)
else:
raise RuntimeError(f"4-bit load failed. Model type {koboldai_vars.model_type} not supported in 4-bit")