From 2729b7764047b7c1d35f7a20e5900d61147fe598 Mon Sep 17 00:00:00 2001 From: 0cc4m Date: Sun, 2 Apr 2023 10:32:19 +0200 Subject: [PATCH 1/4] Add offload.py adapted from llama_inference_offload.py, with multi-gpu support and some improvements. Not yet functional, and still just supports Llama --- aiserver.py | 17 +++++++++++++++-- repos/gptq | 2 +- 2 files changed, 16 insertions(+), 3 deletions(-) diff --git a/aiserver.py b/aiserver.py index e7c789ac..82992461 100644 --- a/aiserver.py +++ b/aiserver.py @@ -96,6 +96,7 @@ from gptj import load_quant as gptj_load_quant from gptneox import load_quant as gptneox_load_quant from llama import load_quant as llama_load_quant from opt import load_quant as opt_load_quant +from offload import load_quant_offload monkey_patched_4bit = False @@ -3137,6 +3138,12 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal if(koboldai_vars.model_type == "gpt2"): lowmem = {} koboldai_vars.lazy_load = False # Also, lazy loader doesn't support GPT-2 models + + gpu_layers_list = [int(l) for l in gpu_layers.split(",")] + offload_4bit = use_4_bit and sum(gpu_layers_list) < utils.num_layers(model_config) + + if offload_4bit: + koboldai_vars.lazy_load = False # If we're using torch_lazy_loader, we need to get breakmodel config # early so that it knows where to load the individual model tensors @@ -3175,7 +3182,10 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal model = gptneox_load_quant(koboldai_vars.custmodpth, path_4bit, 4, groupsize) tokenizer = AutoTokenizer.from_pretrained(koboldai_vars.custmodpth) elif koboldai_vars.model_type == "llama": - model = llama_load_quant(koboldai_vars.custmodpth, path_4bit, 4, groupsize) + if offload_4bit: + model = load_quant_offload(llama_load_quant, koboldai_vars.custmodpth, path_4bit, 4, groupsize, gpu_layers_list) + else: + model = llama_load_quant(koboldai_vars.custmodpth, path_4bit, 4, groupsize) tokenizer = LlamaTokenizer.from_pretrained(koboldai_vars.custmodpth) elif koboldai_vars.model_type == "opt": model = opt_load_quant(koboldai_vars.custmodpth, path_4bit, 4, groupsize) @@ -3286,7 +3296,10 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal patch_causallm(model) if(koboldai_vars.hascuda): - if(koboldai_vars.usegpu): + if offload_4bit: + koboldai_vars.modeldim = get_hidden_size_from_model(model) + generator = model.generate + elif(koboldai_vars.usegpu): koboldai_vars.modeldim = get_hidden_size_from_model(model) if not use_4_bit: model = model.half().to(koboldai_vars.gpu_device) diff --git a/repos/gptq b/repos/gptq index 954b3218..f8bc2886 160000 --- a/repos/gptq +++ b/repos/gptq @@ -1 +1 @@ -Subproject commit 954b32183adda2acd437a3ab0683a28ca3c7e4c9 +Subproject commit f8bc2886cb2e2aaa704ea02404c2ff3841eb6fcf From e742083703ea8111379492c75e62f9dfffd54a28 Mon Sep 17 00:00:00 2001 From: 0cc4m Date: Sun, 2 Apr 2023 11:17:29 +0200 Subject: [PATCH 2/4] Fix multi-gpu-offloading --- repos/gptq | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/repos/gptq b/repos/gptq index f8bc2886..971a5785 160000 --- a/repos/gptq +++ b/repos/gptq @@ -1 +1 @@ -Subproject commit f8bc2886cb2e2aaa704ea02404c2ff3841eb6fcf +Subproject commit 971a5785a356460f2073b0141da0c1e0b8fdcbf6 From c8d00b7a10fd48f31f9d3fc4f4010f5481c772d4 Mon Sep 17 00:00:00 2001 From: 0cc4m Date: Sun, 2 Apr 2023 18:36:31 +0200 Subject: [PATCH 3/4] Add CPU offloading support for GPT-NeoX, GPT-J and OPT --- aiserver.py | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/aiserver.py b/aiserver.py index 82992461..2365f58b 100644 --- a/aiserver.py +++ b/aiserver.py @@ -3144,6 +3144,7 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal if offload_4bit: koboldai_vars.lazy_load = False + print("4-bit CPU offloader active") # If we're using torch_lazy_loader, we need to get breakmodel config # early so that it knows where to load the individual model tensors @@ -3176,10 +3177,16 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal print(f"Trying to load {koboldai_vars.model_type} model in 4-bit") if koboldai_vars.model_type == "gptj": - model = gptj_load_quant(koboldai_vars.custmodpth, path_4bit, 4, groupsize) + if offload_4bit: + model = load_quant_offload(gptj_load_quant, koboldai_vars.custmodpth, path_4bit, 4, groupsize, gpu_layers_list) + else: + model = gptj_load_quant(koboldai_vars.custmodpth, path_4bit, 4, groupsize) tokenizer = AutoTokenizer.from_pretrained(koboldai_vars.custmodpth) elif koboldai_vars.model_type == "gpt_neox": - model = gptneox_load_quant(koboldai_vars.custmodpth, path_4bit, 4, groupsize) + if offload_4bit: + model = load_quant_offload(gptneox_load_quant, koboldai_vars.custmodpth, path_4bit, 4, groupsize, gpu_layers_list) + else: + model = gptneox_load_quant(koboldai_vars.custmodpth, path_4bit, 4, groupsize) tokenizer = AutoTokenizer.from_pretrained(koboldai_vars.custmodpth) elif koboldai_vars.model_type == "llama": if offload_4bit: @@ -3188,7 +3195,10 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal model = llama_load_quant(koboldai_vars.custmodpth, path_4bit, 4, groupsize) tokenizer = LlamaTokenizer.from_pretrained(koboldai_vars.custmodpth) elif koboldai_vars.model_type == "opt": - model = opt_load_quant(koboldai_vars.custmodpth, path_4bit, 4, groupsize) + if offload_4bit: + model = load_quant_offload(opt_load_quant, koboldai_vars.custmodpth, path_4bit, 4, groupsize, gpu_layers_list) + else: + model = opt_load_quant(koboldai_vars.custmodpth, path_4bit, 4, groupsize) tokenizer = AutoTokenizer.from_pretrained(koboldai_vars.custmodpth) else: raise RuntimeError(f"4-bit load failed. Model type {koboldai_vars.model_type} not supported in 4-bit") From ec4177a6d6cf3549f3aebffc1a54b4799c506657 Mon Sep 17 00:00:00 2001 From: 0cc4m Date: Mon, 3 Apr 2023 06:50:36 +0200 Subject: [PATCH 4/4] Remove cudatoolkit-dev and gcc/gxx 9 from conda env because they didn't resolve on Windows --- environments/huggingface.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/environments/huggingface.yml b/environments/huggingface.yml index 71d26e9c..b48c2547 100644 --- a/environments/huggingface.yml +++ b/environments/huggingface.yml @@ -11,9 +11,6 @@ dependencies: - pytorch=1.11.* - python=3.8.* - cudatoolkit=11.1 - - cudatoolkit-dev=11.1 - - gcc=9.* - - gxx=9.* - eventlet=0.33.3 - dnspython=2.2.1 - markdown