From 22e7baec524caf4d113f671f923b3df905803400 Mon Sep 17 00:00:00 2001 From: Henk Date: Tue, 18 Jul 2023 21:44:34 +0200 Subject: [PATCH] Permit CPU layers on 4-bit (Worse than GGML) --- modeling/inference_models/generic_hf_torch/class.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/modeling/inference_models/generic_hf_torch/class.py b/modeling/inference_models/generic_hf_torch/class.py index 93def5a6..0bb954e3 100644 --- a/modeling/inference_models/generic_hf_torch/class.py +++ b/modeling/inference_models/generic_hf_torch/class.py @@ -88,7 +88,8 @@ class model_backend(HFTorchInferenceModel): load_in_4bit=True, bnb_4bit_compute_dtype=torch.float16, bnb_4bit_use_double_quant=True, - bnb_4bit_quant_type='nf4' + bnb_4bit_quant_type='nf4', + llm_int8_enable_fp32_cpu_offload=True ), })