This commit is contained in:
ebolam
2022-03-07 11:57:33 -05:00
parent 49fc854e55
commit c50fe77a7d
2 changed files with 2 additions and 2 deletions

View File

@ -40,6 +40,7 @@ import gc
import lupa import lupa
import torch import torch
from transformers import StoppingCriteria, GPT2TokenizerFast, GPT2LMHeadModel, GPTNeoForCausalLM, GPTNeoModel, AutoModelForCausalLM, AutoTokenizer
# KoboldAI # KoboldAI
import fileops import fileops
@ -437,7 +438,6 @@ def device_config(config):
def move_model_to_devices(model): def move_model_to_devices(model):
global generator global generator
if(not vars.breakmodel): if(not vars.breakmodel):
if(vars.usegpu): if(vars.usegpu):
model = model.half().to(vars.gpu_device) model = model.half().to(vars.gpu_device)
@ -1054,7 +1054,6 @@ def load_model(use_gpu=True, key='', gpu_layers=None):
if(not vars.use_colab_tpu and vars.model not in ["InferKit", "Colab", "OAI", "GooseAI" , "ReadOnly", "TPUMeshTransformerGPTJ"]): if(not vars.use_colab_tpu and vars.model not in ["InferKit", "Colab", "OAI", "GooseAI" , "ReadOnly", "TPUMeshTransformerGPTJ"]):
if(not vars.noai): if(not vars.noai):
print("{0}Initializing transformers, please wait...{1}".format(colors.PURPLE, colors.END)) print("{0}Initializing transformers, please wait...{1}".format(colors.PURPLE, colors.END))
from transformers import StoppingCriteria, GPT2TokenizerFast, GPT2LMHeadModel, GPTNeoForCausalLM, GPTNeoModel, AutoModelForCausalLM, AutoTokenizer
for m in ("GPTJModel", "XGLMModel"): for m in ("GPTJModel", "XGLMModel"):
try: try:
globals()[m] = getattr(__import__("transformers"), m) globals()[m] = getattr(__import__("transformers"), m)

View File

@ -2618,6 +2618,7 @@ $(document).ready(function(){
}); });
load_model_close.on("click", function(ev) { load_model_close.on("click", function(ev) {
$("#modellayers").addClass("hidden");
hideLoadModelPopup(); hideLoadModelPopup();
}); });