From 5917737676a786b1ca43551a11d7012f2b4455f2 Mon Sep 17 00:00:00 2001 From: Henk Date: Mon, 21 Aug 2023 13:17:30 +0200 Subject: [PATCH] Don't disable exllama --- modeling/inference_models/gptq_hf_torch/class.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modeling/inference_models/gptq_hf_torch/class.py b/modeling/inference_models/gptq_hf_torch/class.py index 3d044b6f..3094dc33 100644 --- a/modeling/inference_models/gptq_hf_torch/class.py +++ b/modeling/inference_models/gptq_hf_torch/class.py @@ -389,7 +389,7 @@ class model_backend(HFTorchInferenceModel): except: autogptq_failed = True # Ugly hack to get it to free the VRAM of the last attempt like we do above, better suggestions welcome - Henk if autogptq_failed: - model = AutoGPTQForCausalLM.from_quantized(location, model_basename=Path(gptq_file).stem, use_safetensors=gptq_file.endswith(".safetensors"), device_map=device_map, disable_exllama=True) + model = AutoGPTQForCausalLM.from_quantized(location, model_basename=Path(gptq_file).stem, use_safetensors=gptq_file.endswith(".safetensors"), device_map=device_map, inject_fused_attention=False) # Patch in embeddings function def get_input_embeddings(self): return self.model.get_input_embeddings()