Fix high VRAM usage caused by workaround for scalar type error

This commit is contained in:
0cc4m
2023-03-28 06:30:02 +00:00
parent 0f1fc46078
commit ef6fe680a9
2 changed files with 2 additions and 2 deletions

View File

@@ -3172,7 +3172,7 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal
else:
raise RuntimeError(f"4-bit load failed. Model type {koboldai_vars.model_type} not supported in 4-bit")
model = model.float()
model = model.half()
else:
try:
tokenizer = AutoTokenizer.from_pretrained(koboldai_vars.custmodpth, revision=koboldai_vars.revision, cache_dir="cache", use_fast=False)