mirror of
https://github.com/KoboldAI/KoboldAI-Client.git
synced 2025-02-09 08:18:40 +01:00
Initial UI based model loading. Includes all parameters except breakmodel chunks, engine # for OAI, and url for ngrok url for google colab
This commit is contained in:
parent
f6c95f18fa
commit
2ddf45141b
415
aiserver.py
415
aiserver.py
@ -39,6 +39,8 @@ import gc
|
|||||||
|
|
||||||
import lupa
|
import lupa
|
||||||
|
|
||||||
|
import torch
|
||||||
|
|
||||||
# KoboldAI
|
# KoboldAI
|
||||||
import fileops
|
import fileops
|
||||||
import gensettings
|
import gensettings
|
||||||
@ -67,71 +69,67 @@ class colors:
|
|||||||
UNDERLINE = '\033[4m'
|
UNDERLINE = '\033[4m'
|
||||||
|
|
||||||
# AI models
|
# AI models
|
||||||
mainmenu = [
|
model_menu = {'mainmenu': [
|
||||||
["Load a model from its directory", "NeoCustom", ""],
|
["Load a model from its directory", "NeoCustom", "", False],
|
||||||
["Load an old GPT-2 model (eg CloverEdition)", "GPT2Custom", ""],
|
["Load an old GPT-2 model (eg CloverEdition)", "GPT2Custom", "", False],
|
||||||
["Skein 6B (Hybrid)", "KoboldAI/GPT-J-6B-Skein", "16GB"],
|
["Skein 6B (Hybrid)", "KoboldAI/GPT-J-6B-Skein", "16GB", False],
|
||||||
["Janeway 6B (Novel)", "KoboldAI/GPT-J-6B-Janeway", "16GB"],
|
["Janeway 6B (Novel)", "KoboldAI/GPT-J-6B-Janeway", "16GB", False],
|
||||||
["Adventure 6B", "KoboldAI/GPT-J-6B-Adventure", "16GB"],
|
["Adventure 6B", "KoboldAI/GPT-J-6B-Adventure", "16GB", False],
|
||||||
["Lit 6B (NSFW)", "hakurei/lit-6B", "16GB"],
|
["Lit 6B (NSFW)", "hakurei/lit-6B", "16GB", False],
|
||||||
["Shinen 6B (NSFW)", "KoboldAI/GPT-J-6B-Shinen", "16GB"],
|
["Shinen 6B (NSFW)", "KoboldAI/GPT-J-6B-Shinen", "16GB", False],
|
||||||
["C1 6B (Chatbot)", "hakurei/c1-6B", "16GB"],
|
["C1 6B (Chatbot)", "hakurei/c1-6B", "16GB", False],
|
||||||
["Janeway Neo 2.7B (Novel)", "KoboldAI/GPT-Neo-2.7B-Janeway", "8GB"],
|
["Janeway Neo 2.7B (Novel)", "KoboldAI/GPT-Neo-2.7B-Janeway", "8GB", False],
|
||||||
["Janeway FSD 2.7B (Novel)", "KoboldAI/fairseq-dense-2.7B-Janeway", "8GB"],
|
["Janeway FSD 2.7B (Novel)", "KoboldAI/fairseq-dense-2.7B-Janeway", "8GB", False],
|
||||||
["Adventure 2.7B", "KoboldAI/GPT-Neo-2.7B-AID", "8GB"],
|
["Adventure 2.7B", "KoboldAI/GPT-Neo-2.7B-AID", "8GB", False],
|
||||||
["Picard 2.7B (Novel)", "KoboldAI/GPT-Neo-2.7B-Picard", "8GB"],
|
["Picard 2.7B (Novel)", "KoboldAI/GPT-Neo-2.7B-Picard", "8GB", False],
|
||||||
["Horni 2.7B (NSFW)", "KoboldAI/GPT-Neo-2.7B-Horni", "8GB"],
|
["Horni 2.7B (NSFW)", "KoboldAI/GPT-Neo-2.7B-Horni", "8GB", False],
|
||||||
["Horni-LN 2.7B (Novel)", "KoboldAI/GPT-Neo-2.7B-Horni-LN", "8GB"],
|
["Horni-LN 2.7B (Novel)", "KoboldAI/GPT-Neo-2.7B-Horni-LN", "8GB", False],
|
||||||
["Shinen 2.7B (NSFW)", "KoboldAI/GPT-Neo-2.7B-Shinen", "8GB"],
|
["Shinen 2.7B (NSFW)", "KoboldAI/GPT-Neo-2.7B-Shinen", "8GB", False],
|
||||||
["Untuned GPT-Neo/J", "gptneolist", ""],
|
["Untuned GPT-Neo/J", "gptneolist", "", True],
|
||||||
["Untuned Fairseq Dense", "fsdlist", ""],
|
["Untuned Fairseq Dense", "fsdlist", "", True],
|
||||||
["Untuned XGLM", "xglmlist", ""],
|
["Untuned XGLM", "xglmlist", "", True],
|
||||||
["Untuned GPT2", "gpt2list", ""],
|
["Untuned GPT2", "gpt2list", "", True],
|
||||||
["Online Services", "apilist", ""],
|
["Online Services", "apilist", "", True],
|
||||||
["Read Only (No AI)", "ReadOnly", ""]
|
["Read Only (No AI)", "ReadOnly", "", False]
|
||||||
|
],
|
||||||
|
'gptneolist': [
|
||||||
|
["GPT-J 6B", "EleutherAI/gpt-j-6B", "16GB", False],
|
||||||
|
["GPT-Neo 2.7B", "EleutherAI/gpt-neo-2.7B", "8GB", False],
|
||||||
|
["GPT-Neo 1.3B", "EleutherAI/gpt-neo-1.3B", "6GB", False],
|
||||||
|
["Return to Main Menu", "mainmenu", "", True],
|
||||||
|
],
|
||||||
|
'gpt2list': [
|
||||||
|
["GPT-2 XL", "gpt2-xl", "6GB", False],
|
||||||
|
["GPT-2 Large", "gpt2-large", "4GB", False],
|
||||||
|
["GPT-2 Med", "gpt2-medium", "2GB", False],
|
||||||
|
["GPT-2", "gpt2", "2GB", False],
|
||||||
|
["Return to Main Menu", "mainmenu", "", True],
|
||||||
|
],
|
||||||
|
'fsdlist': [
|
||||||
|
["Fairseq Dense 13B", "KoboldAI/fairseq-dense-13B", "32GB", False],
|
||||||
|
["Fairseq Dense 6.7B", "KoboldAI/fairseq-dense-6.7B", "16GB", False],
|
||||||
|
["Fairseq Dense 2.7B", "KoboldAI/fairseq-dense-2.7B", "8GB", False],
|
||||||
|
["Fairseq Dense 1.3B", "KoboldAI/fairseq-dense-1.3B", "6GB", False],
|
||||||
|
["Fairseq Dense 355M", "KoboldAI/fairseq-dense-355M", "", False],
|
||||||
|
["Fairseq Dense 125M", "KoboldAI/fairseq-dense-125M", "", False],
|
||||||
|
["Return to Main Menu", "Return", "", True],
|
||||||
|
],
|
||||||
|
'xglmlist': [
|
||||||
|
["XGLM 4.5B (Larger Dataset)", "facebook/xglm-4.5B", "", False],
|
||||||
|
["XGLM 7.5B", "facebook/xglm-7.5B", "", False],
|
||||||
|
["XGLM 2.9B", "facebook/xglm-2.9B", "", False],
|
||||||
|
["XGLM 1.7B", "facebook/xglm-1.7B", "", False],
|
||||||
|
["XGLM 564M", "facebook/xglm-564M", "", False],
|
||||||
|
["Return to Main Menu", "mainmenu", "", True],
|
||||||
|
],
|
||||||
|
'apilist': [
|
||||||
|
["OpenAI API (requires API key)", "OAI", "", False],
|
||||||
|
["InferKit API (requires API key)", "InferKit", "", False],
|
||||||
|
["KoboldAI Server API (Old Google Colab)", "Colab", "", False],
|
||||||
|
["Return to Main Menu", "mainmenu", "", True],
|
||||||
]
|
]
|
||||||
|
}
|
||||||
|
|
||||||
gptneolist = [
|
|
||||||
["GPT-J 6B", "EleutherAI/gpt-j-6B", "16GB"],
|
|
||||||
["GPT-Neo 2.7B", "EleutherAI/gpt-neo-2.7B", "8GB"],
|
|
||||||
["GPT-Neo 1.3B", "EleutherAI/gpt-neo-1.3B", "6GB"],
|
|
||||||
["Return to Main Menu", "Return", ""],
|
|
||||||
]
|
|
||||||
|
|
||||||
gpt2list = [
|
|
||||||
["GPT-2 XL", "gpt2-xl", "6GB"],
|
|
||||||
["GPT-2 Large", "gpt2-large", "4GB"],
|
|
||||||
["GPT-2 Med", "gpt2-medium", "2GB"],
|
|
||||||
["GPT-2", "gpt2", "2GB"],
|
|
||||||
["Return to Main Menu", "Return", ""],
|
|
||||||
]
|
|
||||||
|
|
||||||
fsdlist = [
|
|
||||||
["Fairseq Dense 13B", "KoboldAI/fairseq-dense-13B", "32GB"],
|
|
||||||
["Fairseq Dense 6.7B", "KoboldAI/fairseq-dense-6.7B", "16GB"],
|
|
||||||
["Fairseq Dense 2.7B", "KoboldAI/fairseq-dense-2.7B", "8GB"],
|
|
||||||
["Fairseq Dense 1.3B", "KoboldAI/fairseq-dense-1.3B", "6GB"],
|
|
||||||
["Fairseq Dense 355M", "KoboldAI/fairseq-dense-355M", ""],
|
|
||||||
["Fairseq Dense 125M", "KoboldAI/fairseq-dense-125M", ""],
|
|
||||||
["Return to Main Menu", "Return", ""],
|
|
||||||
]
|
|
||||||
|
|
||||||
xglmlist = [
|
|
||||||
["XGLM 4.5B (Larger Dataset)", "facebook/xglm-4.5B", ""],
|
|
||||||
["XGLM 7.5B", "facebook/xglm-7.5B", ""],
|
|
||||||
["XGLM 2.9B", "facebook/xglm-2.9B", ""],
|
|
||||||
["XGLM 1.7B", "facebook/xglm-1.7B", ""],
|
|
||||||
["XGLM 564M", "facebook/xglm-564M", ""],
|
|
||||||
["Return to Main Menu", "Return", ""],
|
|
||||||
]
|
|
||||||
|
|
||||||
apilist = [
|
|
||||||
["GooseAI API (requires API key)", "GooseAI", ""],
|
|
||||||
["OpenAI API (requires API key)", "OAI", ""],
|
|
||||||
["InferKit API (requires API key)", "InferKit", ""],
|
|
||||||
["KoboldAI Server API (Old Google Colab)", "Colab", ""],
|
|
||||||
["Return to Main Menu", "Return", ""],
|
|
||||||
]
|
|
||||||
# Variables
|
# Variables
|
||||||
class vars:
|
class vars:
|
||||||
lastact = "" # The last action received from the user
|
lastact = "" # The last action received from the user
|
||||||
@ -261,6 +259,9 @@ utils.vars = vars
|
|||||||
#==================================================================#
|
#==================================================================#
|
||||||
# Function to get model selection at startup
|
# Function to get model selection at startup
|
||||||
#==================================================================#
|
#==================================================================#
|
||||||
|
def sendModelSelection(menu="mainmenu"):
|
||||||
|
emit('from_server', {'cmd': 'show_model_menu', 'data': model_menu[menu], 'menu': menu}, broadcast=True)
|
||||||
|
|
||||||
def getModelSelection(modellist):
|
def getModelSelection(modellist):
|
||||||
print(" # Model\t\t\t\t\t\tVRAM\n ========================================================")
|
print(" # Model\t\t\t\t\t\tVRAM\n ========================================================")
|
||||||
i = 1
|
i = 1
|
||||||
@ -717,36 +718,52 @@ def spRequest(filename):
|
|||||||
# Startup
|
# Startup
|
||||||
#==================================================================#
|
#==================================================================#
|
||||||
|
|
||||||
# Parsing Parameters
|
# Set logging level to reduce chatter from Flask
|
||||||
parser = argparse.ArgumentParser(description="KoboldAI Server")
|
import logging
|
||||||
parser.add_argument("--remote", action='store_true', help="Optimizes KoboldAI for Remote Play")
|
log = logging.getLogger('werkzeug')
|
||||||
parser.add_argument("--ngrok", action='store_true', help="Optimizes KoboldAI for Remote Play using Ngrok")
|
log.setLevel(logging.ERROR)
|
||||||
parser.add_argument("--host", action='store_true', help="Optimizes KoboldAI for Remote Play without using a proxy service")
|
|
||||||
parser.add_argument("--model", help="Specify the Model Type to skip the Menu")
|
|
||||||
parser.add_argument("--path", help="Specify the Path for local models (For model NeoCustom or GPT2Custom)")
|
|
||||||
parser.add_argument("--cpu", action='store_true', help="By default unattended launches are on the GPU use this option to force CPU usage.")
|
|
||||||
parser.add_argument("--breakmodel", action='store_true', help=argparse.SUPPRESS)
|
|
||||||
parser.add_argument("--breakmodel_layers", type=int, help=argparse.SUPPRESS)
|
|
||||||
parser.add_argument("--breakmodel_gpulayers", type=str, help="If using a model that supports hybrid generation, this is a comma-separated list that specifies how many layers to put on each GPU device. For example to put 8 layers on device 0, 9 layers on device 1 and 11 layers on device 2, use --beakmodel_gpulayers 8,9,11")
|
|
||||||
parser.add_argument("--override_delete", action='store_true', help="Deleting stories from inside the browser is disabled if you are using --remote and enabled otherwise. Using this option will instead allow deleting stories if using --remote and prevent deleting stories otherwise.")
|
|
||||||
parser.add_argument("--override_rename", action='store_true', help="Renaming stories from inside the browser is disabled if you are using --remote and enabled otherwise. Using this option will instead allow renaming stories if using --remote and prevent renaming stories otherwise.")
|
|
||||||
parser.add_argument("--configname", help="Force a fixed configuration name to aid with config management.")
|
|
||||||
parser.add_argument("--colab", action='store_true', help="Optimize for Google Colab.")
|
|
||||||
parser.add_argument("--nobreakmodel", action='store_true', help="Disables Breakmodel support completely.")
|
|
||||||
parser.add_argument("--unblock", action='store_true', default=False, help="Unblocks the KoboldAI port to be accessible from other machines without optimizing for remote play (It is recommended to use --host instead)")
|
|
||||||
parser.add_argument("--quiet", action='store_true', default=False, help="If present will suppress any story related text from showing on the console")
|
|
||||||
parser.add_argument("--lowmem", action='store_true', help="Extra Low Memory loading for the GPU, slower but memory does not peak to twice the usage")
|
|
||||||
|
|
||||||
args: argparse.Namespace = None
|
# Start flask & SocketIO
|
||||||
if(os.environ.get("KOBOLDAI_ARGS") is not None):
|
print("{0}Initializing Flask... {1}".format(colors.PURPLE, colors.END), end="")
|
||||||
|
from flask import Flask, render_template, Response, request, copy_current_request_context
|
||||||
|
from flask_socketio import SocketIO, emit
|
||||||
|
app = Flask(__name__)
|
||||||
|
app.config['SECRET KEY'] = 'secret!'
|
||||||
|
socketio = SocketIO(app, async_method="eventlet")
|
||||||
|
print("{0}OK!{1}".format(colors.GREEN, colors.END))
|
||||||
|
|
||||||
|
def general_startup():
|
||||||
|
global args
|
||||||
|
# Parsing Parameters
|
||||||
|
parser = argparse.ArgumentParser(description="KoboldAI Server")
|
||||||
|
parser.add_argument("--remote", action='store_true', help="Optimizes KoboldAI for Remote Play")
|
||||||
|
parser.add_argument("--ngrok", action='store_true', help="Optimizes KoboldAI for Remote Play using Ngrok")
|
||||||
|
parser.add_argument("--host", action='store_true', help="Optimizes KoboldAI for Remote Play without using a proxy service")
|
||||||
|
parser.add_argument("--model", help="Specify the Model Type to skip the Menu")
|
||||||
|
parser.add_argument("--path", help="Specify the Path for local models (For model NeoCustom or GPT2Custom)")
|
||||||
|
parser.add_argument("--cpu", action='store_true', help="By default unattended launches are on the GPU use this option to force CPU usage.")
|
||||||
|
parser.add_argument("--breakmodel", action='store_true', help=argparse.SUPPRESS)
|
||||||
|
parser.add_argument("--breakmodel_layers", type=int, help=argparse.SUPPRESS)
|
||||||
|
parser.add_argument("--breakmodel_gpulayers", type=str, help="If using a model that supports hybrid generation, this is a comma-separated list that specifies how many layers to put on each GPU device. For example to put 8 layers on device 0, 9 layers on device 1 and 11 layers on device 2, use --beakmodel_gpulayers 8,9,11")
|
||||||
|
parser.add_argument("--override_delete", action='store_true', help="Deleting stories from inside the browser is disabled if you are using --remote and enabled otherwise. Using this option will instead allow deleting stories if using --remote and prevent deleting stories otherwise.")
|
||||||
|
parser.add_argument("--override_rename", action='store_true', help="Renaming stories from inside the browser is disabled if you are using --remote and enabled otherwise. Using this option will instead allow renaming stories if using --remote and prevent renaming stories otherwise.")
|
||||||
|
parser.add_argument("--configname", help="Force a fixed configuration name to aid with config management.")
|
||||||
|
parser.add_argument("--colab", action='store_true', help="Optimize for Google Colab.")
|
||||||
|
parser.add_argument("--nobreakmodel", action='store_true', help="Disables Breakmodel support completely.")
|
||||||
|
parser.add_argument("--unblock", action='store_true', default=False, help="Unblocks the KoboldAI port to be accessible from other machines without optimizing for remote play (It is recommended to use --host instead)")
|
||||||
|
parser.add_argument("--quiet", action='store_true', default=False, help="If present will suppress any story related text from showing on the console")
|
||||||
|
parser.add_argument("--lowmem", action='store_true', help="Extra Low Memory loading for the GPU, slower but memory does not peak to twice the usage")
|
||||||
|
|
||||||
|
|
||||||
|
if(os.environ.get("KOBOLDAI_ARGS") is not None):
|
||||||
import shlex
|
import shlex
|
||||||
args = parser.parse_args(shlex.split(os.environ["KOBOLDAI_ARGS"]))
|
args = parser.parse_args(shlex.split(os.environ["KOBOLDAI_ARGS"]))
|
||||||
else:
|
else:
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
vars.model = args.model;
|
vars.model = args.model;
|
||||||
|
|
||||||
if args.colab:
|
if args.colab:
|
||||||
args.remote = True;
|
args.remote = True;
|
||||||
args.override_rename = True;
|
args.override_rename = True;
|
||||||
args.override_delete = True;
|
args.override_delete = True;
|
||||||
@ -754,41 +771,38 @@ if args.colab:
|
|||||||
args.quiet = True;
|
args.quiet = True;
|
||||||
args.lowmem = True;
|
args.lowmem = True;
|
||||||
|
|
||||||
if args.quiet:
|
if args.quiet:
|
||||||
vars.quiet = True
|
vars.quiet = True
|
||||||
|
|
||||||
if args.nobreakmodel:
|
if args.nobreakmodel:
|
||||||
vars.nobreakmodel = True;
|
vars.nobreakmodel = True;
|
||||||
|
|
||||||
if args.remote:
|
if args.remote:
|
||||||
vars.host = True;
|
vars.host = True;
|
||||||
|
|
||||||
if args.ngrok:
|
if args.ngrok:
|
||||||
vars.host = True;
|
vars.host = True;
|
||||||
|
|
||||||
if args.host:
|
if args.host:
|
||||||
vars.host = True;
|
vars.host = True;
|
||||||
|
|
||||||
if args.cpu:
|
if args.cpu:
|
||||||
vars.use_colab_tpu = False
|
vars.use_colab_tpu = False
|
||||||
|
|
||||||
vars.smandelete = vars.host == args.override_delete
|
vars.smandelete = vars.host == args.override_delete
|
||||||
vars.smanrename = vars.host == args.override_rename
|
vars.smanrename = vars.host == args.override_rename
|
||||||
|
|
||||||
# Select a model to run
|
|
||||||
if args.model:
|
|
||||||
print("Welcome to KoboldAI!\nYou have selected the following Model:", vars.model)
|
|
||||||
if args.path:
|
|
||||||
print("You have selected the following path for your Model :", args.path)
|
|
||||||
vars.custmodpth = args.path;
|
|
||||||
vars.colaburl = args.path + "/request"; # Lets just use the same parameter to keep it simple
|
|
||||||
|
|
||||||
else:
|
#==================================================================#
|
||||||
print("{0}Welcome to the KoboldAI Server!\nListed RAM is the optimal VRAM and CPU ram can be up to twice the amount.\nMost models can run at less VRAM with reduced max tokens or less layers on the GPU.\nSelect an AI model to continue:{1}\n".format(colors.CYAN, colors.END))
|
# Load Model
|
||||||
getModelSelection(mainmenu)
|
#==================================================================#
|
||||||
|
def load_model(use_gpu=True, key=''):
|
||||||
# If transformers model was selected & GPU available, ask to use CPU or GPU
|
global model
|
||||||
if(vars.model not in ["InferKit", "Colab", "OAI", "GooseAI" , "ReadOnly", "TPUMeshTransformerGPTJ"]):
|
global generator
|
||||||
|
vars.noai = False
|
||||||
|
set_aibusy(True)
|
||||||
|
# If transformers model was selected & GPU available, ask to use CPU or GPU
|
||||||
|
if(vars.model not in ["InferKit", "Colab", "OAI", "GooseAI" , "ReadOnly", "TPUMeshTransformerGPTJ"]):
|
||||||
vars.allowsp = True
|
vars.allowsp = True
|
||||||
# Test for GPU support
|
# Test for GPU support
|
||||||
import torch
|
import torch
|
||||||
@ -828,7 +842,7 @@ if(vars.model not in ["InferKit", "Colab", "OAI", "GooseAI" , "ReadOnly", "TPUMe
|
|||||||
print("WARNING: No model type detected, assuming Neo (If this is a GPT2 model use the other menu option or --model GPT2Custom)")
|
print("WARNING: No model type detected, assuming Neo (If this is a GPT2 model use the other menu option or --model GPT2Custom)")
|
||||||
vars.model_type = "gpt_neo"
|
vars.model_type = "gpt_neo"
|
||||||
|
|
||||||
if(not vars.use_colab_tpu and vars.model not in ["InferKit", "Colab", "OAI", "GooseAI" , "ReadOnly", "TPUMeshTransformerGPTJ"]):
|
if(not vars.use_colab_tpu and vars.model not in ["InferKit", "Colab", "OAI", "GooseAI" , "ReadOnly", "TPUMeshTransformerGPTJ"]):
|
||||||
loadmodelsettings()
|
loadmodelsettings()
|
||||||
loadsettings()
|
loadsettings()
|
||||||
print("{0}Looking for GPU support...{1}".format(colors.PURPLE, colors.END), end="")
|
print("{0}Looking for GPU support...{1}".format(colors.PURPLE, colors.END), end="")
|
||||||
@ -865,19 +879,13 @@ if(not vars.use_colab_tpu and vars.model not in ["InferKit", "Colab", "OAI", "Go
|
|||||||
vars.usegpu = False
|
vars.usegpu = False
|
||||||
vars.breakmodel = True
|
vars.breakmodel = True
|
||||||
else:
|
else:
|
||||||
print(" 1 - GPU\n 2 - CPU\n")
|
|
||||||
genselected = False
|
genselected = False
|
||||||
else:
|
else:
|
||||||
genselected = False
|
genselected = False
|
||||||
|
|
||||||
if(vars.hascuda):
|
if(vars.hascuda):
|
||||||
while(genselected == False):
|
while(genselected == False):
|
||||||
genselect = input("Mode> ")
|
if(use_gpu):
|
||||||
if(genselect == ""):
|
|
||||||
vars.breakmodel = False
|
|
||||||
vars.usegpu = True
|
|
||||||
genselected = True
|
|
||||||
elif(genselect.isnumeric() and int(genselect) == 1):
|
|
||||||
if(vars.bmsupported):
|
if(vars.bmsupported):
|
||||||
vars.breakmodel = True
|
vars.breakmodel = True
|
||||||
vars.usegpu = False
|
vars.usegpu = False
|
||||||
@ -886,19 +894,16 @@ if(not vars.use_colab_tpu and vars.model not in ["InferKit", "Colab", "OAI", "Go
|
|||||||
vars.breakmodel = False
|
vars.breakmodel = False
|
||||||
vars.usegpu = True
|
vars.usegpu = True
|
||||||
genselected = True
|
genselected = True
|
||||||
elif(genselect.isnumeric() and int(genselect) == 2):
|
else:
|
||||||
vars.breakmodel = False
|
vars.breakmodel = False
|
||||||
vars.usegpu = False
|
vars.usegpu = False
|
||||||
genselected = True
|
genselected = True
|
||||||
else:
|
|
||||||
print("{0}Please enter a valid selection.{1}".format(colors.RED, colors.END))
|
|
||||||
|
|
||||||
# Ask for API key if InferKit was selected
|
# Ask for API key if InferKit was selected
|
||||||
if(vars.model == "InferKit"):
|
if(vars.model == "InferKit"):
|
||||||
if(not path.exists("settings/" + getmodelname().replace('/', '_') + ".settings")):
|
if(not path.exists("settings/" + getmodelname().replace('/', '_') + ".settings")):
|
||||||
# If the client settings file doesn't exist, create it
|
# If the client settings file doesn't exist, create it
|
||||||
print("{0}Please enter your InferKit API key:{1}\n".format(colors.CYAN, colors.END))
|
vars.apikey = key
|
||||||
vars.apikey = input("Key> ")
|
|
||||||
# Write API key to file
|
# Write API key to file
|
||||||
os.makedirs('settings', exist_ok=True)
|
os.makedirs('settings', exist_ok=True)
|
||||||
file = open("settings/" + getmodelname().replace('/', '_') + ".settings", "w")
|
file = open("settings/" + getmodelname().replace('/', '_') + ".settings", "w")
|
||||||
@ -918,8 +923,7 @@ if(vars.model == "InferKit"):
|
|||||||
file.close()
|
file.close()
|
||||||
else:
|
else:
|
||||||
# Get API key, add it to settings object, and write it to disk
|
# Get API key, add it to settings object, and write it to disk
|
||||||
print("{0}Please enter your InferKit API key:{1}\n".format(colors.CYAN, colors.END))
|
vars.apikey = key
|
||||||
vars.apikey = input("Key> ")
|
|
||||||
js["apikey"] = vars.apikey
|
js["apikey"] = vars.apikey
|
||||||
# Write API key to file
|
# Write API key to file
|
||||||
file = open("settings/" + getmodelname().replace('/', '_') + ".settings", "w")
|
file = open("settings/" + getmodelname().replace('/', '_') + ".settings", "w")
|
||||||
@ -928,20 +932,19 @@ if(vars.model == "InferKit"):
|
|||||||
finally:
|
finally:
|
||||||
file.close()
|
file.close()
|
||||||
|
|
||||||
# Swap OAI Server if GooseAI was selected
|
# Swap OAI Server if GooseAI was selected
|
||||||
if(vars.model == "GooseAI"):
|
if(vars.model == "GooseAI"):
|
||||||
vars.oaiengines = "https://api.goose.ai/v1/engines"
|
vars.oaiengines = "https://api.goose.ai/v1/engines"
|
||||||
vars.model = "OAI"
|
vars.model = "OAI"
|
||||||
args.configname = "GooseAI"
|
args.configname = "GooseAI"
|
||||||
|
|
||||||
# Ask for API key if OpenAI was selected
|
# Ask for API key if OpenAI was selected
|
||||||
if(vars.model == "OAI"):
|
if(vars.model == "OAI"):
|
||||||
if not args.configname:
|
if not args.configname:
|
||||||
args.configname = "OAI"
|
args.configname = "OAI"
|
||||||
if(not path.exists("settings/" + getmodelname().replace('/', '_') + ".settings")):
|
if(not path.exists("settings/" + getmodelname().replace('/', '_') + ".settings")):
|
||||||
# If the client settings file doesn't exist, create it
|
# If the client settings file doesn't exist, create it
|
||||||
print("{0}Please enter your API key:{1}\n".format(colors.CYAN, colors.END))
|
vars.oaiapikey = key
|
||||||
vars.oaiapikey = input("Key> ")
|
|
||||||
# Write API key to file
|
# Write API key to file
|
||||||
os.makedirs('settings', exist_ok=True)
|
os.makedirs('settings', exist_ok=True)
|
||||||
file = open("settings/" + getmodelname().replace('/', '_') + ".settings", "w")
|
file = open("settings/" + getmodelname().replace('/', '_') + ".settings", "w")
|
||||||
@ -961,8 +964,7 @@ if(vars.model == "OAI"):
|
|||||||
file.close()
|
file.close()
|
||||||
else:
|
else:
|
||||||
# Get API key, add it to settings object, and write it to disk
|
# Get API key, add it to settings object, and write it to disk
|
||||||
print("{0}Please enter your API key:{1}\n".format(colors.CYAN, colors.END))
|
vars.oaiapikey = key
|
||||||
vars.oaiapikey = input("Key> ")
|
|
||||||
js["oaiapikey"] = vars.oaiapikey
|
js["oaiapikey"] = vars.oaiapikey
|
||||||
# Write API key to file
|
# Write API key to file
|
||||||
file = open("settings/" + getmodelname().replace('/', '_') + ".settings", "w")
|
file = open("settings/" + getmodelname().replace('/', '_') + ".settings", "w")
|
||||||
@ -1005,32 +1007,18 @@ if(vars.model == "OAI"):
|
|||||||
print(req.json())
|
print(req.json())
|
||||||
quit()
|
quit()
|
||||||
|
|
||||||
# Ask for ngrok url if Google Colab was selected
|
# Ask for ngrok url if Google Colab was selected
|
||||||
if(vars.model == "Colab"):
|
if(vars.model == "Colab"):
|
||||||
if(vars.colaburl == ""):
|
if(vars.colaburl == ""):
|
||||||
print("{0}NOTE: For the modern KoboldAI Colab's you open the links directly in your browser.\nThis option is only for the KoboldAI Server API, not all features are supported in this mode.\n".format(colors.YELLOW, colors.END))
|
print("{0}NOTE: For the modern KoboldAI Colab's you open the links directly in your browser.\nThis option is only for the KoboldAI Server API, not all features are supported in this mode.\n".format(colors.YELLOW, colors.END))
|
||||||
print("{0}Enter the URL of the server (For example a trycloudflare link):{1}\n".format(colors.CYAN, colors.END))
|
print("{0}Enter the URL of the server (For example a trycloudflare link):{1}\n".format(colors.CYAN, colors.END))
|
||||||
vars.colaburl = input("URL> ") + "/request"
|
vars.colaburl = input("URL> ") + "/request"
|
||||||
|
|
||||||
if(vars.model == "ReadOnly"):
|
if(vars.model == "ReadOnly"):
|
||||||
vars.noai = True
|
vars.noai = True
|
||||||
|
|
||||||
# Set logging level to reduce chatter from Flask
|
# Start transformers and create pipeline
|
||||||
import logging
|
if(not vars.use_colab_tpu and vars.model not in ["InferKit", "Colab", "OAI", "GooseAI" , "ReadOnly", "TPUMeshTransformerGPTJ"]):
|
||||||
log = logging.getLogger('werkzeug')
|
|
||||||
log.setLevel(logging.ERROR)
|
|
||||||
|
|
||||||
# Start flask & SocketIO
|
|
||||||
print("{0}Initializing Flask... {1}".format(colors.PURPLE, colors.END), end="")
|
|
||||||
from flask import Flask, render_template, Response, request, copy_current_request_context
|
|
||||||
from flask_socketio import SocketIO, emit
|
|
||||||
app = Flask(__name__)
|
|
||||||
app.config['SECRET KEY'] = 'secret!'
|
|
||||||
socketio = SocketIO(app, async_method="eventlet")
|
|
||||||
print("{0}OK!{1}".format(colors.GREEN, colors.END))
|
|
||||||
|
|
||||||
# Start transformers and create pipeline
|
|
||||||
if(not vars.use_colab_tpu and vars.model not in ["InferKit", "Colab", "OAI", "GooseAI" , "ReadOnly", "TPUMeshTransformerGPTJ"]):
|
|
||||||
if(not vars.noai):
|
if(not vars.noai):
|
||||||
print("{0}Initializing transformers, please wait...{1}".format(colors.PURPLE, colors.END))
|
print("{0}Initializing transformers, please wait...{1}".format(colors.PURPLE, colors.END))
|
||||||
from transformers import StoppingCriteria, GPT2TokenizerFast, GPT2LMHeadModel, GPTNeoForCausalLM, GPTNeoModel, AutoModelForCausalLM, AutoTokenizer
|
from transformers import StoppingCriteria, GPT2TokenizerFast, GPT2LMHeadModel, GPTNeoForCausalLM, GPTNeoModel, AutoModelForCausalLM, AutoTokenizer
|
||||||
@ -1447,7 +1435,7 @@ if(not vars.use_colab_tpu and vars.model not in ["InferKit", "Colab", "OAI", "Go
|
|||||||
else:
|
else:
|
||||||
from transformers import GPT2TokenizerFast
|
from transformers import GPT2TokenizerFast
|
||||||
tokenizer = GPT2TokenizerFast.from_pretrained("gpt2", cache_dir="cache/")
|
tokenizer = GPT2TokenizerFast.from_pretrained("gpt2", cache_dir="cache/")
|
||||||
else:
|
else:
|
||||||
def tpumtjgetsofttokens():
|
def tpumtjgetsofttokens():
|
||||||
soft_tokens = None
|
soft_tokens = None
|
||||||
if(vars.sp is None):
|
if(vars.sp is None):
|
||||||
@ -1562,6 +1550,13 @@ else:
|
|||||||
else:
|
else:
|
||||||
loadsettings()
|
loadsettings()
|
||||||
|
|
||||||
|
lua_startup()
|
||||||
|
# Load scripts
|
||||||
|
load_lua_scripts()
|
||||||
|
|
||||||
|
final_startup()
|
||||||
|
set_aibusy(False)
|
||||||
|
|
||||||
# Set up Flask routes
|
# Set up Flask routes
|
||||||
@app.route('/')
|
@app.route('/')
|
||||||
@app.route('/index')
|
@app.route('/index')
|
||||||
@ -1614,7 +1609,13 @@ def download():
|
|||||||
|
|
||||||
#============================ LUA API =============================#
|
#============================ LUA API =============================#
|
||||||
|
|
||||||
if(path.exists("settings/" + getmodelname().replace('/', '_') + ".settings")):
|
_bridged = {}
|
||||||
|
F = TypeVar("F", bound=Callable)
|
||||||
|
def lua_startup():
|
||||||
|
global _bridged
|
||||||
|
global F
|
||||||
|
global bridged
|
||||||
|
if(path.exists("settings/" + getmodelname().replace('/', '_') + ".settings")):
|
||||||
file = open("settings/" + getmodelname().replace('/', '_') + ".settings", "r")
|
file = open("settings/" + getmodelname().replace('/', '_') + ".settings", "r")
|
||||||
js = json.load(file)
|
js = json.load(file)
|
||||||
if("userscripts" in js):
|
if("userscripts" in js):
|
||||||
@ -1631,12 +1632,46 @@ if(path.exists("settings/" + getmodelname().replace('/', '_') + ".settings")):
|
|||||||
vars.corescript = "default.lua"
|
vars.corescript = "default.lua"
|
||||||
file.close()
|
file.close()
|
||||||
|
|
||||||
|
#==================================================================#
|
||||||
|
# Lua runtime startup
|
||||||
|
#==================================================================#
|
||||||
|
|
||||||
|
print("", end="", flush=True)
|
||||||
|
print(colors.PURPLE + "Initializing Lua Bridge... " + colors.END, end="", flush=True)
|
||||||
|
|
||||||
|
# Set up Lua state
|
||||||
|
vars.lua_state = lupa.LuaRuntime(unpack_returned_tuples=True)
|
||||||
|
|
||||||
|
# Load bridge.lua
|
||||||
|
bridged = {
|
||||||
|
"corescript_path": os.path.join(os.path.dirname(os.path.realpath(__file__)), "cores"),
|
||||||
|
"userscript_path": os.path.join(os.path.dirname(os.path.realpath(__file__)), "userscripts"),
|
||||||
|
"config_path": os.path.join(os.path.dirname(os.path.realpath(__file__)), "userscripts"),
|
||||||
|
"lib_paths": vars.lua_state.table(os.path.join(os.path.dirname(os.path.realpath(__file__)), "lualibs"), os.path.join(os.path.dirname(os.path.realpath(__file__)), "extern", "lualibs")),
|
||||||
|
"vars": vars,
|
||||||
|
}
|
||||||
|
for kwarg in _bridged:
|
||||||
|
bridged[kwarg] = _bridged[kwarg]
|
||||||
|
try:
|
||||||
|
vars.lua_kobold, vars.lua_koboldcore, vars.lua_koboldbridge = vars.lua_state.globals().dofile(os.path.join(os.path.dirname(os.path.realpath(__file__)), "bridge.lua"))(
|
||||||
|
vars.lua_state.globals().python,
|
||||||
|
bridged,
|
||||||
|
)
|
||||||
|
except lupa.LuaError as e:
|
||||||
|
print(colors.RED + "ERROR!" + colors.END)
|
||||||
|
vars.lua_koboldbridge.obliterate_multiverse()
|
||||||
|
print("{0}{1}{2}".format(colors.RED, "***LUA ERROR***: ", colors.END), end="", file=sys.stderr)
|
||||||
|
print("{0}{1}{2}".format(colors.RED, str(e).replace("\033", ""), colors.END), file=sys.stderr)
|
||||||
|
exit(1)
|
||||||
|
print(colors.GREEN + "OK!" + colors.END)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def lua_log_format_name(name):
|
def lua_log_format_name(name):
|
||||||
return f"[{name}]" if type(name) is str else "CORE"
|
return f"[{name}]" if type(name) is str else "CORE"
|
||||||
|
|
||||||
_bridged = {}
|
|
||||||
F = TypeVar("F", bound=Callable)
|
|
||||||
def bridged_kwarg(name=None):
|
def bridged_kwarg(name=None):
|
||||||
|
global F
|
||||||
def _bridged_kwarg(f: F):
|
def _bridged_kwarg(f: F):
|
||||||
_bridged[name if name is not None else f.__name__[4:] if f.__name__[:4] == "lua_" else f.__name__] = f
|
_bridged[name if name is not None else f.__name__[4:] if f.__name__[:4] == "lua_" else f.__name__] = f
|
||||||
return f
|
return f
|
||||||
@ -2172,42 +2207,6 @@ def execute_outmod():
|
|||||||
for k in vars.lua_deleted:
|
for k in vars.lua_deleted:
|
||||||
inlinedelete(k)
|
inlinedelete(k)
|
||||||
|
|
||||||
#==================================================================#
|
|
||||||
# Lua runtime startup
|
|
||||||
#==================================================================#
|
|
||||||
|
|
||||||
print("", end="", flush=True)
|
|
||||||
print(colors.PURPLE + "Initializing Lua Bridge... " + colors.END, end="", flush=True)
|
|
||||||
|
|
||||||
# Set up Lua state
|
|
||||||
vars.lua_state = lupa.LuaRuntime(unpack_returned_tuples=True)
|
|
||||||
|
|
||||||
# Load bridge.lua
|
|
||||||
bridged = {
|
|
||||||
"corescript_path": os.path.join(os.path.dirname(os.path.realpath(__file__)), "cores"),
|
|
||||||
"userscript_path": os.path.join(os.path.dirname(os.path.realpath(__file__)), "userscripts"),
|
|
||||||
"config_path": os.path.join(os.path.dirname(os.path.realpath(__file__)), "userscripts"),
|
|
||||||
"lib_paths": vars.lua_state.table(os.path.join(os.path.dirname(os.path.realpath(__file__)), "lualibs"), os.path.join(os.path.dirname(os.path.realpath(__file__)), "extern", "lualibs")),
|
|
||||||
"vars": vars,
|
|
||||||
}
|
|
||||||
for kwarg in _bridged:
|
|
||||||
bridged[kwarg] = _bridged[kwarg]
|
|
||||||
try:
|
|
||||||
vars.lua_kobold, vars.lua_koboldcore, vars.lua_koboldbridge = vars.lua_state.globals().dofile(os.path.join(os.path.dirname(os.path.realpath(__file__)), "bridge.lua"))(
|
|
||||||
vars.lua_state.globals().python,
|
|
||||||
bridged,
|
|
||||||
)
|
|
||||||
except lupa.LuaError as e:
|
|
||||||
print(colors.RED + "ERROR!" + colors.END)
|
|
||||||
vars.lua_koboldbridge.obliterate_multiverse()
|
|
||||||
print("{0}{1}{2}".format(colors.RED, "***LUA ERROR***: ", colors.END), end="", file=sys.stderr)
|
|
||||||
print("{0}{1}{2}".format(colors.RED, str(e).replace("\033", ""), colors.END), file=sys.stderr)
|
|
||||||
exit(1)
|
|
||||||
print(colors.GREEN + "OK!" + colors.END)
|
|
||||||
|
|
||||||
# Load scripts
|
|
||||||
load_lua_scripts()
|
|
||||||
|
|
||||||
|
|
||||||
#============================ METHODS =============================#
|
#============================ METHODS =============================#
|
||||||
|
|
||||||
@ -2528,6 +2527,12 @@ def get_message(msg):
|
|||||||
load_lua_scripts()
|
load_lua_scripts()
|
||||||
unloaded, loaded = getuslist()
|
unloaded, loaded = getuslist()
|
||||||
sendUSStatItems()
|
sendUSStatItems()
|
||||||
|
elif(msg['cmd'] == 'list_model'):
|
||||||
|
sendModelSelection(menu=msg['data'])
|
||||||
|
elif(msg['cmd'] == 'load_model'):
|
||||||
|
load_model(use_gpu=msg['use_gpu'], key=msg['key'])
|
||||||
|
elif(msg['cmd'] == 'selectmodel'):
|
||||||
|
vars.model = msg['data']
|
||||||
elif(msg['cmd'] == 'loadselect'):
|
elif(msg['cmd'] == 'loadselect'):
|
||||||
vars.loadselect = msg["data"]
|
vars.loadselect = msg["data"]
|
||||||
elif(msg['cmd'] == 'spselect'):
|
elif(msg['cmd'] == 'spselect'):
|
||||||
@ -3793,10 +3798,16 @@ def refresh_settings():
|
|||||||
def set_aibusy(state):
|
def set_aibusy(state):
|
||||||
if(state):
|
if(state):
|
||||||
vars.aibusy = True
|
vars.aibusy = True
|
||||||
|
try:
|
||||||
emit('from_server', {'cmd': 'setgamestate', 'data': 'wait'}, broadcast=True)
|
emit('from_server', {'cmd': 'setgamestate', 'data': 'wait'}, broadcast=True)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
else:
|
else:
|
||||||
vars.aibusy = False
|
vars.aibusy = False
|
||||||
|
try:
|
||||||
emit('from_server', {'cmd': 'setgamestate', 'data': 'ready'}, broadcast=True)
|
emit('from_server', {'cmd': 'setgamestate', 'data': 'ready'}, broadcast=True)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
#==================================================================#
|
#==================================================================#
|
||||||
#
|
#
|
||||||
@ -5088,16 +5099,17 @@ def randomGameRequest(topic, memory=""):
|
|||||||
vars.memory = memory
|
vars.memory = memory
|
||||||
emit('from_server', {'cmd': 'setmemory', 'data': vars.memory}, broadcast=True)
|
emit('from_server', {'cmd': 'setmemory', 'data': vars.memory}, broadcast=True)
|
||||||
|
|
||||||
# Prevent tokenizer from taking extra time the first time it's used
|
def final_startup():
|
||||||
def __preempt_tokenizer():
|
# Prevent tokenizer from taking extra time the first time it's used
|
||||||
|
def __preempt_tokenizer():
|
||||||
if("tokenizer" not in globals()):
|
if("tokenizer" not in globals()):
|
||||||
return
|
return
|
||||||
utils.decodenewlines(tokenizer.decode([25678, 559]))
|
utils.decodenewlines(tokenizer.decode([25678, 559]))
|
||||||
tokenizer.encode(utils.encodenewlines("eunoia"))
|
tokenizer.encode(utils.encodenewlines("eunoia"))
|
||||||
threading.Thread(target=__preempt_tokenizer).start()
|
threading.Thread(target=__preempt_tokenizer).start()
|
||||||
|
|
||||||
# Load soft prompt specified by the settings file, if applicable
|
# Load soft prompt specified by the settings file, if applicable
|
||||||
if(path.exists("settings/" + getmodelname().replace('/', '_') + ".settings")):
|
if(path.exists("settings/" + getmodelname().replace('/', '_') + ".settings")):
|
||||||
file = open("settings/" + getmodelname().replace('/', '_') + ".settings", "r")
|
file = open("settings/" + getmodelname().replace('/', '_') + ".settings", "r")
|
||||||
js = json.load(file)
|
js = json.load(file)
|
||||||
if(vars.allowsp and "softprompt" in js and type(js["softprompt"]) is str and all(q not in js["softprompt"] for q in ("..", ":")) and (len(js["softprompt"]) == 0 or all(js["softprompt"][0] not in q for q in ("/", "\\")))):
|
if(vars.allowsp and "softprompt" in js and type(js["softprompt"]) is str and all(q not in js["softprompt"] for q in ("..", ":")) and (len(js["softprompt"]) == 0 or all(js["softprompt"][0] not in q for q in ("/", "\\")))):
|
||||||
@ -5106,8 +5118,8 @@ if(path.exists("settings/" + getmodelname().replace('/', '_') + ".settings")):
|
|||||||
vars.spfilename = ""
|
vars.spfilename = ""
|
||||||
file.close()
|
file.close()
|
||||||
|
|
||||||
# Precompile TPU backend if required
|
# Precompile TPU backend if required
|
||||||
if(vars.use_colab_tpu or vars.model in ("TPUMeshTransformerGPTJ",)):
|
if(vars.use_colab_tpu or vars.model in ("TPUMeshTransformerGPTJ",)):
|
||||||
soft_tokens = tpumtjgetsofttokens()
|
soft_tokens = tpumtjgetsofttokens()
|
||||||
if(vars.dynamicscan or (not vars.nogenmod and vars.has_genmod)):
|
if(vars.dynamicscan or (not vars.nogenmod and vars.has_genmod)):
|
||||||
threading.Thread(
|
threading.Thread(
|
||||||
@ -5175,6 +5187,11 @@ print("", end="", flush=True)
|
|||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
print("{0}\nStarting webserver...{1}".format(colors.GREEN, colors.END), flush=True)
|
print("{0}\nStarting webserver...{1}".format(colors.GREEN, colors.END), flush=True)
|
||||||
|
|
||||||
|
general_startup()
|
||||||
|
#show_select_model_list()
|
||||||
|
vars.model = "ReadOnly"
|
||||||
|
load_model()
|
||||||
|
|
||||||
# Start Flask/SocketIO (Blocking, so this must be last method!)
|
# Start Flask/SocketIO (Blocking, so this must be last method!)
|
||||||
|
|
||||||
#socketio.run(app, host='0.0.0.0', port=5000)
|
#socketio.run(app, host='0.0.0.0', port=5000)
|
||||||
|
@ -7,6 +7,7 @@ var socket;
|
|||||||
|
|
||||||
// UI references for jQuery
|
// UI references for jQuery
|
||||||
var connect_status;
|
var connect_status;
|
||||||
|
var button_loadmodel;
|
||||||
var button_newgame;
|
var button_newgame;
|
||||||
var button_rndgame;
|
var button_rndgame;
|
||||||
var button_save;
|
var button_save;
|
||||||
@ -55,6 +56,7 @@ var savepins;
|
|||||||
var topic;
|
var topic;
|
||||||
var saveas_accept;
|
var saveas_accept;
|
||||||
var saveas_close;
|
var saveas_close;
|
||||||
|
var loadmodelpopup;
|
||||||
var loadpopup;
|
var loadpopup;
|
||||||
var loadcontent;
|
var loadcontent;
|
||||||
var load_accept;
|
var load_accept;
|
||||||
@ -890,6 +892,17 @@ function sendSaveAsRequest() {
|
|||||||
socket.send({'cmd': 'saveasrequest', 'data': {"name": saveasinput.val(), "pins": savepins.val()}});
|
socket.send({'cmd': 'saveasrequest', 'data': {"name": saveasinput.val(), "pins": savepins.val()}});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function showLoadModelPopup() {
|
||||||
|
loadmodelpopup.removeClass("hidden");
|
||||||
|
loadmodelpopup.addClass("flex");
|
||||||
|
}
|
||||||
|
|
||||||
|
function hideLoadModelPopup() {
|
||||||
|
loadmodelpopup.removeClass("flex");
|
||||||
|
loadmodelpopup.addClass("hidden");
|
||||||
|
loadmodelcontent.html("");
|
||||||
|
}
|
||||||
|
|
||||||
function showLoadPopup() {
|
function showLoadPopup() {
|
||||||
loadpopup.removeClass("hidden");
|
loadpopup.removeClass("hidden");
|
||||||
loadpopup.addClass("flex");
|
loadpopup.addClass("flex");
|
||||||
@ -923,6 +936,46 @@ function hideUSPopup() {
|
|||||||
spcontent.html("");
|
spcontent.html("");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
function buildLoadModelList(ar) {
|
||||||
|
disableButtons([load_model_accept]);
|
||||||
|
loadmodelcontent.html("");
|
||||||
|
var i;
|
||||||
|
for(i=0; i<ar.length; i++) {
|
||||||
|
var html
|
||||||
|
html = "<div class=\"flex\">\
|
||||||
|
<div class=\"loadlistpadding\"></div>"
|
||||||
|
if(ar[i][3]) {
|
||||||
|
html = html + "<span class=\"loadlisticon loadmodellisticon-folder oi oi-folder allowed\" aria-hidden=\"true\"></span>"
|
||||||
|
} else {
|
||||||
|
html = html + "<div class=\"loadlistpadding\"></div>"
|
||||||
|
}
|
||||||
|
html = html + "<div class=\"loadlistpadding\"></div>\
|
||||||
|
<div class=\"loadlistitem\" id=\"loadmodel"+i+"\" name=\""+ar[i][1]+"\">\
|
||||||
|
<div>"+ar[i][0]+"</div>\
|
||||||
|
<div class=\"flex-push-right\">"+ar[i][2]+"</div>\
|
||||||
|
</div>\
|
||||||
|
</div>"
|
||||||
|
loadmodelcontent.append(html);
|
||||||
|
if(ar[i][3]) {
|
||||||
|
$("#loadmodel"+i).off("click").on("click", (function () {
|
||||||
|
return function () {
|
||||||
|
socket.send({'cmd': 'list_model', 'data': $(this).attr("name")});
|
||||||
|
disableButtons([load_model_accept]);
|
||||||
|
}
|
||||||
|
})(i));
|
||||||
|
} else {
|
||||||
|
$("#loadmodel"+i).off("click").on("click", (function () {
|
||||||
|
return function () {
|
||||||
|
socket.send({'cmd': 'selectmodel', 'data': $(this).attr("name")});
|
||||||
|
highlightLoadLine($(this));
|
||||||
|
enableButtons([load_model_accept]);
|
||||||
|
}
|
||||||
|
})(i));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
function buildLoadList(ar) {
|
function buildLoadList(ar) {
|
||||||
disableButtons([load_accept]);
|
disableButtons([load_accept]);
|
||||||
loadcontent.html("");
|
loadcontent.html("");
|
||||||
@ -1771,6 +1824,7 @@ $(document).ready(function(){
|
|||||||
|
|
||||||
// Bind UI references
|
// Bind UI references
|
||||||
connect_status = $('#connectstatus');
|
connect_status = $('#connectstatus');
|
||||||
|
button_loadmodel = $('#btn_loadmodel');
|
||||||
button_newgame = $('#btn_newgame');
|
button_newgame = $('#btn_newgame');
|
||||||
button_rndgame = $('#btn_rndgame');
|
button_rndgame = $('#btn_rndgame');
|
||||||
button_save = $('#btn_save');
|
button_save = $('#btn_save');
|
||||||
@ -1823,9 +1877,13 @@ $(document).ready(function(){
|
|||||||
saveas_accept = $("#btn_saveasaccept");
|
saveas_accept = $("#btn_saveasaccept");
|
||||||
saveas_close = $("#btn_saveasclose");
|
saveas_close = $("#btn_saveasclose");
|
||||||
loadpopup = $("#loadcontainer");
|
loadpopup = $("#loadcontainer");
|
||||||
|
loadmodelpopup = $("#loadmodelcontainer");
|
||||||
loadcontent = $("#loadlistcontent");
|
loadcontent = $("#loadlistcontent");
|
||||||
|
loadmodelcontent = $("#loadmodellistcontent");
|
||||||
load_accept = $("#btn_loadaccept");
|
load_accept = $("#btn_loadaccept");
|
||||||
load_close = $("#btn_loadclose");
|
load_close = $("#btn_loadclose");
|
||||||
|
load_model_accept = $("#btn_loadmodelaccept");
|
||||||
|
load_model_close = $("#btn_loadmodelclose");
|
||||||
sppopup = $("#spcontainer");
|
sppopup = $("#spcontainer");
|
||||||
spcontent = $("#splistcontent");
|
spcontent = $("#splistcontent");
|
||||||
sp_accept = $("#btn_spaccept");
|
sp_accept = $("#btn_spaccept");
|
||||||
@ -2313,6 +2371,18 @@ $(document).ready(function(){
|
|||||||
} else {
|
} else {
|
||||||
debug_area.addClass("hidden");
|
debug_area.addClass("hidden");
|
||||||
}
|
}
|
||||||
|
} else if(msg.cmd == 'show_model_menu') {
|
||||||
|
if(msg.menu == 'gpt2list') {
|
||||||
|
$("#use_gpu_div").removeClass("hidden")
|
||||||
|
} else {
|
||||||
|
$("#use_gpu_div").addClass("hidden")
|
||||||
|
}
|
||||||
|
if(msg.menu == 'apilist') {
|
||||||
|
$("#modelkey").removeClass("hidden")
|
||||||
|
} else {
|
||||||
|
$("#modelkey").addClass("hidden")
|
||||||
|
}
|
||||||
|
buildLoadModelList(msg.data);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
@ -2511,6 +2581,10 @@ $(document).ready(function(){
|
|||||||
hideLoadPopup();
|
hideLoadPopup();
|
||||||
});
|
});
|
||||||
|
|
||||||
|
load_model_close.on("click", function(ev) {
|
||||||
|
hideLoadModelPopup();
|
||||||
|
});
|
||||||
|
|
||||||
load_accept.on("click", function(ev) {
|
load_accept.on("click", function(ev) {
|
||||||
hideMessage();
|
hideMessage();
|
||||||
newly_loaded = true;
|
newly_loaded = true;
|
||||||
@ -2518,6 +2592,13 @@ $(document).ready(function(){
|
|||||||
hideLoadPopup();
|
hideLoadPopup();
|
||||||
});
|
});
|
||||||
|
|
||||||
|
load_model_accept.on("click", function(ev) {
|
||||||
|
hideMessage();
|
||||||
|
socket.send({'cmd': 'load_model', 'use_gpu': $('#use_gpu')[0].checked, 'key': $('#modelkey')[0].value});
|
||||||
|
loadmodelcontent.html("");
|
||||||
|
hideLoadModelPopup();
|
||||||
|
});
|
||||||
|
|
||||||
sp_close.on("click", function(ev) {
|
sp_close.on("click", function(ev) {
|
||||||
hideSPPopup();
|
hideSPPopup();
|
||||||
});
|
});
|
||||||
@ -2540,6 +2621,11 @@ $(document).ready(function(){
|
|||||||
hideUSPopup();
|
hideUSPopup();
|
||||||
});
|
});
|
||||||
|
|
||||||
|
button_loadmodel.on("click", function(ev) {
|
||||||
|
showLoadModelPopup();
|
||||||
|
socket.send({'cmd': 'list_model', 'data': 'mainmenu'});
|
||||||
|
});
|
||||||
|
|
||||||
button_newgame.on("click", function(ev) {
|
button_newgame.on("click", function(ev) {
|
||||||
if(connected) {
|
if(connected) {
|
||||||
showNewStoryPopup();
|
showNewStoryPopup();
|
||||||
|
@ -33,6 +33,12 @@
|
|||||||
</button>
|
</button>
|
||||||
<div class="collapse navbar-collapse" id="navbarNavDropdown">
|
<div class="collapse navbar-collapse" id="navbarNavDropdown">
|
||||||
<ul class="nav navbar-nav">
|
<ul class="nav navbar-nav">
|
||||||
|
<li class="nav-item dropdown">
|
||||||
|
<a class="nav-link dropdown-toggle" href="#" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">AI</a>
|
||||||
|
<div class="dropdown-menu">
|
||||||
|
<a class="dropdown-item" href="#" id="btn_loadmodel">Load Model</a>
|
||||||
|
</div>
|
||||||
|
</li>
|
||||||
<li class="nav-item dropdown">
|
<li class="nav-item dropdown">
|
||||||
<a class="nav-link dropdown-toggle" href="#" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">New Game</a>
|
<a class="nav-link dropdown-toggle" href="#" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">New Game</a>
|
||||||
<div class="dropdown-menu">
|
<div class="dropdown-menu">
|
||||||
@ -265,6 +271,29 @@
|
|||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
<div class="popupcontainer hidden" id="loadmodelcontainer">
|
||||||
|
<div id="loadpopup">
|
||||||
|
<div class="popuptitlebar">
|
||||||
|
<div class="popuptitletext">Select A Model To Load</div>
|
||||||
|
</div>
|
||||||
|
<div class="loadmodellistheader">
|
||||||
|
<div>Model</div>
|
||||||
|
</div>
|
||||||
|
<div id="loadmodellistcontent">
|
||||||
|
</div>
|
||||||
|
<div class="popupfooter">
|
||||||
|
<input class="form-control hidden" type="text" placeholder="key" id="modelkey"><br>
|
||||||
|
</div>
|
||||||
|
<div class="popupfooter">
|
||||||
|
<button type="button" class="btn btn-primary" id="btn_loadmodelaccept">Load</button>
|
||||||
|
<button type="button" class="btn btn-primary" id="btn_loadmodelclose">Cancel</button>
|
||||||
|
<div class="box flex-push-right hidden" id=use_gpu_div>
|
||||||
|
<input type="checkbox" data-toggle="toggle" data-onstyle="success" id="use_gpu" checked>
|
||||||
|
<div class="box-label">Use GPU</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
<div class="popupcontainer hidden" id="spcontainer">
|
<div class="popupcontainer hidden" id="spcontainer">
|
||||||
<div id="sppopup">
|
<div id="sppopup">
|
||||||
<div class="popuptitlebar">
|
<div class="popuptitlebar">
|
||||||
|
Loading…
x
Reference in New Issue
Block a user