Change AutoModel to AutoModelForCausalLM

This fixes breakmodel mode for the official models from the model
selection menu.
This commit is contained in:
Gnome Ann 2021-09-25 11:41:15 -04:00
parent f8bc7eabae
commit 5893e495b6
1 changed files with 2 additions and 2 deletions

View File

@ -397,7 +397,7 @@ print("{0}OK!{1}".format(colors.GREEN, colors.END))
if(not vars.model in ["InferKit", "Colab", "OAI", "ReadOnly"]): if(not vars.model in ["InferKit", "Colab", "OAI", "ReadOnly"]):
if(not vars.noai): if(not vars.noai):
print("{0}Initializing transformers, please wait...{1}".format(colors.PURPLE, colors.END)) print("{0}Initializing transformers, please wait...{1}".format(colors.PURPLE, colors.END))
from transformers import pipeline, GPT2Tokenizer, GPT2LMHeadModel, GPTNeoForCausalLM, GPTNeoModel, AutoModel from transformers import pipeline, GPT2Tokenizer, GPT2LMHeadModel, GPTNeoForCausalLM, GPTNeoModel, AutoModelForCausalLM
# If custom GPT Neo model was chosen # If custom GPT Neo model was chosen
if(vars.model == "NeoCustom"): if(vars.model == "NeoCustom"):
@ -460,7 +460,7 @@ if(not vars.model in ["InferKit", "Colab", "OAI", "ReadOnly"]):
generator = pipeline('text-generation', model=vars.model, device=0) generator = pipeline('text-generation', model=vars.model, device=0)
elif(vars.breakmodel): # Use both RAM and VRAM (breakmodel) elif(vars.breakmodel): # Use both RAM and VRAM (breakmodel)
import breakmodel import breakmodel
model = AutoModel.from_pretrained(vars.model) model = AutoModelForCausalLM.from_pretrained(vars.model)
n_layers = model.config.num_layers n_layers = model.config.num_layers
breakmodel.total_blocks = n_layers breakmodel.total_blocks = n_layers
model.half().to('cpu') model.half().to('cpu')