From c50fe77a7df86a7be5a300970903388e24630b8b Mon Sep 17 00:00:00 2001 From: ebolam Date: Mon, 7 Mar 2022 11:57:33 -0500 Subject: [PATCH] Load Fix --- aiserver.py | 3 +-- static/application.js | 1 + 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/aiserver.py b/aiserver.py index 20ec110e..d44ab824 100644 --- a/aiserver.py +++ b/aiserver.py @@ -40,6 +40,7 @@ import gc import lupa import torch +from transformers import StoppingCriteria, GPT2TokenizerFast, GPT2LMHeadModel, GPTNeoForCausalLM, GPTNeoModel, AutoModelForCausalLM, AutoTokenizer # KoboldAI import fileops @@ -437,7 +438,6 @@ def device_config(config): def move_model_to_devices(model): global generator - if(not vars.breakmodel): if(vars.usegpu): model = model.half().to(vars.gpu_device) @@ -1054,7 +1054,6 @@ def load_model(use_gpu=True, key='', gpu_layers=None): if(not vars.use_colab_tpu and vars.model not in ["InferKit", "Colab", "OAI", "GooseAI" , "ReadOnly", "TPUMeshTransformerGPTJ"]): if(not vars.noai): print("{0}Initializing transformers, please wait...{1}".format(colors.PURPLE, colors.END)) - from transformers import StoppingCriteria, GPT2TokenizerFast, GPT2LMHeadModel, GPTNeoForCausalLM, GPTNeoModel, AutoModelForCausalLM, AutoTokenizer for m in ("GPTJModel", "XGLMModel"): try: globals()[m] = getattr(__import__("transformers"), m) diff --git a/static/application.js b/static/application.js index bceff5e1..8c238a4f 100644 --- a/static/application.js +++ b/static/application.js @@ -2618,6 +2618,7 @@ $(document).ready(function(){ }); load_model_close.on("click", function(ev) { + $("#modellayers").addClass("hidden"); hideLoadModelPopup(); });