Not sure why this fixes it, or why we need this line......

This commit is contained in:
ebolam
2022-12-01 11:04:43 -05:00
parent 76a0bb71f0
commit 4dfbf80929

View File

@@ -3006,7 +3006,7 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal
if(koboldai_vars.hascuda): if(koboldai_vars.hascuda):
if(koboldai_vars.usegpu): if(koboldai_vars.usegpu):
koboldai_vars.modeldim = get_hidden_size_from_model(model) koboldai_vars.modeldim = get_hidden_size_from_model(model)
model = model.half().to(koboldai_vars.gpu_device) #model = model.half().to(koboldai_vars.gpu_device)
generator = model.generate generator = model.generate
elif(koboldai_vars.breakmodel): # Use both RAM and VRAM (breakmodel) elif(koboldai_vars.breakmodel): # Use both RAM and VRAM (breakmodel)
koboldai_vars.modeldim = get_hidden_size_from_model(model) koboldai_vars.modeldim = get_hidden_size_from_model(model)