mirror of
https://github.com/KoboldAI/KoboldAI-Client.git
synced 2025-01-20 20:38:21 +01:00
Compatibility Fixes
Rather than coding a vars.custmodpath or vars.model in all the other parts of the code I opted to just set vars.custmodpath instead to make the behavior more consistent now that it always loads from the same location.
This commit is contained in:
parent
f93d489971
commit
81120a0524
@ -865,7 +865,10 @@ if(not vars.model in ["InferKit", "Colab", "OAI", "ReadOnly", "TPUMeshTransforme
|
||||
if("/" not in vars.model and vars.model.lower().startswith("gpt2")):
|
||||
lowmem = {}
|
||||
|
||||
# Is CUDA available? If so, use GPU, otherwise fall back to CPU
|
||||
# Make model path the same as the model name to make this consistent with the other loading method
|
||||
vars.custmodpth = vars.model
|
||||
|
||||
# Download model from Huggingface if it does not exist, otherwise load locally
|
||||
|
||||
if(os.path.isdir(vars.model.replace('/', '_'))):
|
||||
with(maybe_use_float16()):
|
||||
|
Loading…
Reference in New Issue
Block a user