From 8dd7b93a6c94d0fcd8dede9b6d3bb743c7f20369 Mon Sep 17 00:00:00 2001 From: Henk Date: Sat, 22 Jul 2023 16:29:55 +0200 Subject: [PATCH] HF's workaround breaks stuff --- modeling/inference_models/generic_hf_torch/class.py | 1 - 1 file changed, 1 deletion(-) diff --git a/modeling/inference_models/generic_hf_torch/class.py b/modeling/inference_models/generic_hf_torch/class.py index 1cc1a373..25d49214 100644 --- a/modeling/inference_models/generic_hf_torch/class.py +++ b/modeling/inference_models/generic_hf_torch/class.py @@ -81,7 +81,6 @@ class model_backend(HFTorchInferenceModel): tf_kwargs = { "low_cpu_mem_usage": True, - "pretraining_tp": 1, } if self.quantization == "8bit":