AutoTokenizer

This commit is contained in:
henk717 2021-12-25 00:48:12 +01:00
parent e1cd34268b
commit 14e5fcd355
1 changed files with 0 additions and 1 deletions

View File

@ -910,7 +910,6 @@ if(not vars.model in ["InferKit", "Colab", "OAI", "ReadOnly", "TPUMeshTransforme
except ValueError as e: except ValueError as e:
tokenizer = GPT2TokenizerFast.from_pretrained(vars.custmodpth, cache_dir="cache/") tokenizer = GPT2TokenizerFast.from_pretrained(vars.custmodpth, cache_dir="cache/")
with(maybe_use_float16()): with(maybe_use_float16()):
tokenizer = GPT2TokenizerFast.from_pretrained(vars.model, cache_dir="cache/")
try: try:
model = AutoModelForCausalLM.from_pretrained(vars.model, cache_dir="cache/", **lowmem) model = AutoModelForCausalLM.from_pretrained(vars.model, cache_dir="cache/", **lowmem)
except ValueError as e: except ValueError as e: