mirror of
https://github.com/KoboldAI/KoboldAI-Client.git
synced 2025-02-01 10:06:44 +01:00
Potential fix for tokenizer using a fallback
This commit is contained in:
parent
db9a94ca2a
commit
606c276f9d
@ -1410,6 +1410,7 @@ def load_model(use_gpu=True, gpu_layers=None, initial_load=False, online_model="
|
||||
global torch
|
||||
global model_config
|
||||
global GPT2TokenizerFast
|
||||
global tokenizer
|
||||
print("Loading vars.model: {} vars.custmodpth: {}".format(vars.model, vars.custmodpth))
|
||||
vars.noai = False
|
||||
if not initial_load:
|
||||
|
Loading…
x
Reference in New Issue
Block a user