Restore Lowmem

Accidentally got replaced in one of my test runs
This commit is contained in:
henk717 2021-12-23 18:50:01 +01:00
parent 25a6e489c1
commit cae0f279e2
1 changed files with 6 additions and 6 deletions

View File

@ -876,25 +876,25 @@ if(not vars.model in ["InferKit", "Colab", "OAI", "ReadOnly", "TPUMeshTransforme
with(maybe_use_float16()): with(maybe_use_float16()):
tokenizer = GPT2TokenizerFast.from_pretrained(vars.custmodpth, cache_dir="cache/") tokenizer = GPT2TokenizerFast.from_pretrained(vars.custmodpth, cache_dir="cache/")
try: try:
model = AutoModelForCausalLM.from_pretrained(vars.custmodpth, cache_dir="cache/", **maybe_low_cpu_mem_usage()) model = AutoModelForCausalLM.from_pretrained(vars.custmodpth, cache_dir="cache/", **lowmem)
except ValueError as e: except ValueError as e:
model = GPTNeoForCausalLM.from_pretrained(vars.custmodpth, cache_dir="cache/", **maybe_low_cpu_mem_usage()) model = GPTNeoForCausalLM.from_pretrained(vars.custmodpth, cache_dir="cache/", **lowmem)
elif(os.path.isdir(vars.model.replace('/', '_'))): elif(os.path.isdir(vars.model.replace('/', '_'))):
with(maybe_use_float16()): with(maybe_use_float16()):
tokenizer = GPT2TokenizerFast.from_pretrained(vars.model.replace('/', '_'), cache_dir="cache/") tokenizer = GPT2TokenizerFast.from_pretrained(vars.model.replace('/', '_'), cache_dir="cache/")
try: try:
model = AutoModelForCausalLM.from_pretrained(vars.model.replace('/', '_'), cache_dir="cache/", **maybe_low_cpu_mem_usage()) model = AutoModelForCausalLM.from_pretrained(vars.model.replace('/', '_'), cache_dir="cache/", **lowmem)
except ValueError as e: except ValueError as e:
model = GPTNeoForCausalLM.from_pretrained(vars.model.replace('/', '_'), cache_dir="cache/", **maybe_low_cpu_mem_usage()) model = GPTNeoForCausalLM.from_pretrained(vars.model.replace('/', '_'), cache_dir="cache/", **lowmem)
else: else:
print("Model does not exist locally, attempting to download from Huggingface...") print("Model does not exist locally, attempting to download from Huggingface...")
tokenizer = GPT2TokenizerFast.from_pretrained(vars.model, cache_dir="cache/") tokenizer = GPT2TokenizerFast.from_pretrained(vars.model, cache_dir="cache/")
with(maybe_use_float16()): with(maybe_use_float16()):
tokenizer = GPT2TokenizerFast.from_pretrained(vars.model, cache_dir="cache/") tokenizer = GPT2TokenizerFast.from_pretrained(vars.model, cache_dir="cache/")
try: try:
model = AutoModelForCausalLM.from_pretrained(vars.model, cache_dir="cache/", **maybe_low_cpu_mem_usage()) model = AutoModelForCausalLM.from_pretrained(vars.model, cache_dir="cache/", **lowmem)
except ValueError as e: except ValueError as e:
model = GPTNeoForCausalLM.from_pretrained(vars.model, cache_dir="cache/", **maybe_low_cpu_mem_usage()) model = GPTNeoForCausalLM.from_pretrained(vars.model, cache_dir="cache/", **lowmem)
model = model.half() model = model.half()
import shutil import shutil
shutil.rmtree("cache/") shutil.rmtree("cache/")