Merge branch 'KoboldAI:main' into united

This commit is contained in:
henk717 2021-09-25 17:44:51 +02:00 committed by GitHub
commit 01339f0b87
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1 changed files with 2 additions and 2 deletions

View File

@ -398,7 +398,7 @@ print("{0}OK!{1}".format(colors.GREEN, colors.END))
if(not vars.model in ["InferKit", "Colab", "OAI", "ReadOnly"]): if(not vars.model in ["InferKit", "Colab", "OAI", "ReadOnly"]):
if(not vars.noai): if(not vars.noai):
print("{0}Initializing transformers, please wait...{1}".format(colors.PURPLE, colors.END)) print("{0}Initializing transformers, please wait...{1}".format(colors.PURPLE, colors.END))
from transformers import pipeline, GPT2Tokenizer, GPT2LMHeadModel, GPTNeoForCausalLM, GPTNeoModel, AutoModel from transformers import pipeline, GPT2Tokenizer, GPT2LMHeadModel, GPTNeoForCausalLM, GPTNeoModel, AutoModelForCausalLM
# If custom GPT Neo model was chosen # If custom GPT Neo model was chosen
if(vars.model == "NeoCustom"): if(vars.model == "NeoCustom"):
@ -466,7 +466,7 @@ if(not vars.model in ["InferKit", "Colab", "OAI", "ReadOnly"]):
generator = pipeline('text-generation', model=vars.model, device=0) generator = pipeline('text-generation', model=vars.model, device=0)
elif(vars.breakmodel): # Use both RAM and VRAM (breakmodel) elif(vars.breakmodel): # Use both RAM and VRAM (breakmodel)
import breakmodel import breakmodel
model = AutoModel.from_pretrained(vars.model) model = AutoModelForCausalLM.from_pretrained(vars.model)
n_layers = model.config.num_layers n_layers = model.config.num_layers
breakmodel.total_blocks = n_layers breakmodel.total_blocks = n_layers
model.half().to('cpu') model.half().to('cpu')