mirror of
https://github.com/KoboldAI/KoboldAI-Client.git
synced 2025-06-05 21:59:24 +02:00
Not sure why this fixes it, or why we need this line......
This commit is contained in:
@@ -3006,7 +3006,7 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal
|
||||
if(koboldai_vars.hascuda):
|
||||
if(koboldai_vars.usegpu):
|
||||
koboldai_vars.modeldim = get_hidden_size_from_model(model)
|
||||
model = model.half().to(koboldai_vars.gpu_device)
|
||||
#model = model.half().to(koboldai_vars.gpu_device)
|
||||
generator = model.generate
|
||||
elif(koboldai_vars.breakmodel): # Use both RAM and VRAM (breakmodel)
|
||||
koboldai_vars.modeldim = get_hidden_size_from_model(model)
|
||||
|
Reference in New Issue
Block a user