Pull upstream changes, fix conflict

This commit is contained in:
0cc4m
2023-06-04 09:06:31 +02:00
15 changed files with 85 additions and 69 deletions

View File

@@ -251,7 +251,7 @@ class model_backend(HFTorchInferenceModel):
if utils.koboldai_vars.hascuda:
if self.usegpu:
if self.usegpu or self.nobreakmodel:
# Use just VRAM
self.model = self.model.half().to(utils.koboldai_vars.gpu_device)
elif self.breakmodel: