From 8412f83ce513968ee4bd6bef4fd87759c70683f7 Mon Sep 17 00:00:00 2001 From: somebody Date: Mon, 3 Apr 2023 18:41:18 -0500 Subject: [PATCH] Breakmodel: Fix typo --- modeling/inference_models/generic_hf_torch.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modeling/inference_models/generic_hf_torch.py b/modeling/inference_models/generic_hf_torch.py index 59c6c19f..6a8964ec 100644 --- a/modeling/inference_models/generic_hf_torch.py +++ b/modeling/inference_models/generic_hf_torch.py @@ -252,7 +252,7 @@ class GenericHFTorchInferenceModel(HFTorchInferenceModel): elif utils.koboldai_vars.breakmodel: # Use both RAM and VRAM (breakmodel) if not self.lazy_load: - self.breakmodel_device_config(model.config) + self.breakmodel_device_config(self.model.config) self._move_to_devices() elif breakmodel.disk_blocks > 0: # Use disk