From 145a43a0000d4c502c0c8b3693fb04ff95aac072 Mon Sep 17 00:00:00 2001 From: Alephrin <84307744+Alephrin@users.noreply.github.com> Date: Mon, 17 Jul 2023 04:53:47 -0600 Subject: [PATCH] Removed extra load_in_4bit. --- modeling/inference_models/generic_hf_torch/class.py | 1 - 1 file changed, 1 deletion(-) diff --git a/modeling/inference_models/generic_hf_torch/class.py b/modeling/inference_models/generic_hf_torch/class.py index b51d8f66..9a59650e 100644 --- a/modeling/inference_models/generic_hf_torch/class.py +++ b/modeling/inference_models/generic_hf_torch/class.py @@ -80,7 +80,6 @@ class model_backend(HFTorchInferenceModel): if self.use_4_bit: self.lazy_load = False tf_kwargs.update({ - "load_in_4bit": True, "quantization_config":BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_compute_dtype=torch.float16,