Potential fix for tokenizer using a fallback

This commit is contained in:
ebolam 2022-06-09 09:01:40 -04:00
parent db9a94ca2a
commit 606c276f9d

View File

@ -1410,6 +1410,7 @@ def load_model(use_gpu=True, gpu_layers=None, initial_load=False, online_model="
global torch
global model_config
global GPT2TokenizerFast
global tokenizer
print("Loading vars.model: {} vars.custmodpth: {}".format(vars.model, vars.custmodpth))
vars.noai = False
if not initial_load: