Initial UI based model loading. Includes all parameters except breakmodel chunks, engine # for OAI, and url for ngrok url for google colab

This commit is contained in:
ebolam 2022-03-06 19:51:35 -05:00
parent f6c95f18fa
commit 2ddf45141b
3 changed files with 1106 additions and 974 deletions

View File

@ -39,6 +39,8 @@ import gc
import lupa
import torch
# KoboldAI
import fileops
import gensettings
@ -67,71 +69,67 @@ class colors:
UNDERLINE = '\033[4m'
# AI models
mainmenu = [
["Load a model from its directory", "NeoCustom", ""],
["Load an old GPT-2 model (eg CloverEdition)", "GPT2Custom", ""],
["Skein 6B (Hybrid)", "KoboldAI/GPT-J-6B-Skein", "16GB"],
["Janeway 6B (Novel)", "KoboldAI/GPT-J-6B-Janeway", "16GB"],
["Adventure 6B", "KoboldAI/GPT-J-6B-Adventure", "16GB"],
["Lit 6B (NSFW)", "hakurei/lit-6B", "16GB"],
["Shinen 6B (NSFW)", "KoboldAI/GPT-J-6B-Shinen", "16GB"],
["C1 6B (Chatbot)", "hakurei/c1-6B", "16GB"],
["Janeway Neo 2.7B (Novel)", "KoboldAI/GPT-Neo-2.7B-Janeway", "8GB"],
["Janeway FSD 2.7B (Novel)", "KoboldAI/fairseq-dense-2.7B-Janeway", "8GB"],
["Adventure 2.7B", "KoboldAI/GPT-Neo-2.7B-AID", "8GB"],
["Picard 2.7B (Novel)", "KoboldAI/GPT-Neo-2.7B-Picard", "8GB"],
["Horni 2.7B (NSFW)", "KoboldAI/GPT-Neo-2.7B-Horni", "8GB"],
["Horni-LN 2.7B (Novel)", "KoboldAI/GPT-Neo-2.7B-Horni-LN", "8GB"],
["Shinen 2.7B (NSFW)", "KoboldAI/GPT-Neo-2.7B-Shinen", "8GB"],
["Untuned GPT-Neo/J", "gptneolist", ""],
["Untuned Fairseq Dense", "fsdlist", ""],
["Untuned XGLM", "xglmlist", ""],
["Untuned GPT2", "gpt2list", ""],
["Online Services", "apilist", ""],
["Read Only (No AI)", "ReadOnly", ""]
model_menu = {'mainmenu': [
["Load a model from its directory", "NeoCustom", "", False],
["Load an old GPT-2 model (eg CloverEdition)", "GPT2Custom", "", False],
["Skein 6B (Hybrid)", "KoboldAI/GPT-J-6B-Skein", "16GB", False],
["Janeway 6B (Novel)", "KoboldAI/GPT-J-6B-Janeway", "16GB", False],
["Adventure 6B", "KoboldAI/GPT-J-6B-Adventure", "16GB", False],
["Lit 6B (NSFW)", "hakurei/lit-6B", "16GB", False],
["Shinen 6B (NSFW)", "KoboldAI/GPT-J-6B-Shinen", "16GB", False],
["C1 6B (Chatbot)", "hakurei/c1-6B", "16GB", False],
["Janeway Neo 2.7B (Novel)", "KoboldAI/GPT-Neo-2.7B-Janeway", "8GB", False],
["Janeway FSD 2.7B (Novel)", "KoboldAI/fairseq-dense-2.7B-Janeway", "8GB", False],
["Adventure 2.7B", "KoboldAI/GPT-Neo-2.7B-AID", "8GB", False],
["Picard 2.7B (Novel)", "KoboldAI/GPT-Neo-2.7B-Picard", "8GB", False],
["Horni 2.7B (NSFW)", "KoboldAI/GPT-Neo-2.7B-Horni", "8GB", False],
["Horni-LN 2.7B (Novel)", "KoboldAI/GPT-Neo-2.7B-Horni-LN", "8GB", False],
["Shinen 2.7B (NSFW)", "KoboldAI/GPT-Neo-2.7B-Shinen", "8GB", False],
["Untuned GPT-Neo/J", "gptneolist", "", True],
["Untuned Fairseq Dense", "fsdlist", "", True],
["Untuned XGLM", "xglmlist", "", True],
["Untuned GPT2", "gpt2list", "", True],
["Online Services", "apilist", "", True],
["Read Only (No AI)", "ReadOnly", "", False]
],
'gptneolist': [
["GPT-J 6B", "EleutherAI/gpt-j-6B", "16GB", False],
["GPT-Neo 2.7B", "EleutherAI/gpt-neo-2.7B", "8GB", False],
["GPT-Neo 1.3B", "EleutherAI/gpt-neo-1.3B", "6GB", False],
["Return to Main Menu", "mainmenu", "", True],
],
'gpt2list': [
["GPT-2 XL", "gpt2-xl", "6GB", False],
["GPT-2 Large", "gpt2-large", "4GB", False],
["GPT-2 Med", "gpt2-medium", "2GB", False],
["GPT-2", "gpt2", "2GB", False],
["Return to Main Menu", "mainmenu", "", True],
],
'fsdlist': [
["Fairseq Dense 13B", "KoboldAI/fairseq-dense-13B", "32GB", False],
["Fairseq Dense 6.7B", "KoboldAI/fairseq-dense-6.7B", "16GB", False],
["Fairseq Dense 2.7B", "KoboldAI/fairseq-dense-2.7B", "8GB", False],
["Fairseq Dense 1.3B", "KoboldAI/fairseq-dense-1.3B", "6GB", False],
["Fairseq Dense 355M", "KoboldAI/fairseq-dense-355M", "", False],
["Fairseq Dense 125M", "KoboldAI/fairseq-dense-125M", "", False],
["Return to Main Menu", "Return", "", True],
],
'xglmlist': [
["XGLM 4.5B (Larger Dataset)", "facebook/xglm-4.5B", "", False],
["XGLM 7.5B", "facebook/xglm-7.5B", "", False],
["XGLM 2.9B", "facebook/xglm-2.9B", "", False],
["XGLM 1.7B", "facebook/xglm-1.7B", "", False],
["XGLM 564M", "facebook/xglm-564M", "", False],
["Return to Main Menu", "mainmenu", "", True],
],
'apilist': [
["OpenAI API (requires API key)", "OAI", "", False],
["InferKit API (requires API key)", "InferKit", "", False],
["KoboldAI Server API (Old Google Colab)", "Colab", "", False],
["Return to Main Menu", "mainmenu", "", True],
]
}
gptneolist = [
["GPT-J 6B", "EleutherAI/gpt-j-6B", "16GB"],
["GPT-Neo 2.7B", "EleutherAI/gpt-neo-2.7B", "8GB"],
["GPT-Neo 1.3B", "EleutherAI/gpt-neo-1.3B", "6GB"],
["Return to Main Menu", "Return", ""],
]
gpt2list = [
["GPT-2 XL", "gpt2-xl", "6GB"],
["GPT-2 Large", "gpt2-large", "4GB"],
["GPT-2 Med", "gpt2-medium", "2GB"],
["GPT-2", "gpt2", "2GB"],
["Return to Main Menu", "Return", ""],
]
fsdlist = [
["Fairseq Dense 13B", "KoboldAI/fairseq-dense-13B", "32GB"],
["Fairseq Dense 6.7B", "KoboldAI/fairseq-dense-6.7B", "16GB"],
["Fairseq Dense 2.7B", "KoboldAI/fairseq-dense-2.7B", "8GB"],
["Fairseq Dense 1.3B", "KoboldAI/fairseq-dense-1.3B", "6GB"],
["Fairseq Dense 355M", "KoboldAI/fairseq-dense-355M", ""],
["Fairseq Dense 125M", "KoboldAI/fairseq-dense-125M", ""],
["Return to Main Menu", "Return", ""],
]
xglmlist = [
["XGLM 4.5B (Larger Dataset)", "facebook/xglm-4.5B", ""],
["XGLM 7.5B", "facebook/xglm-7.5B", ""],
["XGLM 2.9B", "facebook/xglm-2.9B", ""],
["XGLM 1.7B", "facebook/xglm-1.7B", ""],
["XGLM 564M", "facebook/xglm-564M", ""],
["Return to Main Menu", "Return", ""],
]
apilist = [
["GooseAI API (requires API key)", "GooseAI", ""],
["OpenAI API (requires API key)", "OAI", ""],
["InferKit API (requires API key)", "InferKit", ""],
["KoboldAI Server API (Old Google Colab)", "Colab", ""],
["Return to Main Menu", "Return", ""],
]
# Variables
class vars:
lastact = "" # The last action received from the user
@ -261,6 +259,9 @@ utils.vars = vars
#==================================================================#
# Function to get model selection at startup
#==================================================================#
def sendModelSelection(menu="mainmenu"):
emit('from_server', {'cmd': 'show_model_menu', 'data': model_menu[menu], 'menu': menu}, broadcast=True)
def getModelSelection(modellist):
print(" # Model\t\t\t\t\t\tVRAM\n ========================================================")
i = 1
@ -717,6 +718,22 @@ def spRequest(filename):
# Startup
#==================================================================#
# Set logging level to reduce chatter from Flask
import logging
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
# Start flask & SocketIO
print("{0}Initializing Flask... {1}".format(colors.PURPLE, colors.END), end="")
from flask import Flask, render_template, Response, request, copy_current_request_context
from flask_socketio import SocketIO, emit
app = Flask(__name__)
app.config['SECRET KEY'] = 'secret!'
socketio = SocketIO(app, async_method="eventlet")
print("{0}OK!{1}".format(colors.GREEN, colors.END))
def general_startup():
global args
# Parsing Parameters
parser = argparse.ArgumentParser(description="KoboldAI Server")
parser.add_argument("--remote", action='store_true', help="Optimizes KoboldAI for Remote Play")
@ -737,7 +754,7 @@ parser.add_argument("--unblock", action='store_true', default=False, help="Unblo
parser.add_argument("--quiet", action='store_true', default=False, help="If present will suppress any story related text from showing on the console")
parser.add_argument("--lowmem", action='store_true', help="Extra Low Memory loading for the GPU, slower but memory does not peak to twice the usage")
args: argparse.Namespace = None
if(os.environ.get("KOBOLDAI_ARGS") is not None):
import shlex
args = parser.parse_args(shlex.split(os.environ["KOBOLDAI_ARGS"]))
@ -775,18 +792,15 @@ if args.cpu:
vars.smandelete = vars.host == args.override_delete
vars.smanrename = vars.host == args.override_rename
# Select a model to run
if args.model:
print("Welcome to KoboldAI!\nYou have selected the following Model:", vars.model)
if args.path:
print("You have selected the following path for your Model :", args.path)
vars.custmodpth = args.path;
vars.colaburl = args.path + "/request"; # Lets just use the same parameter to keep it simple
else:
print("{0}Welcome to the KoboldAI Server!\nListed RAM is the optimal VRAM and CPU ram can be up to twice the amount.\nMost models can run at less VRAM with reduced max tokens or less layers on the GPU.\nSelect an AI model to continue:{1}\n".format(colors.CYAN, colors.END))
getModelSelection(mainmenu)
#==================================================================#
# Load Model
#==================================================================#
def load_model(use_gpu=True, key=''):
global model
global generator
vars.noai = False
set_aibusy(True)
# If transformers model was selected & GPU available, ask to use CPU or GPU
if(vars.model not in ["InferKit", "Colab", "OAI", "GooseAI" , "ReadOnly", "TPUMeshTransformerGPTJ"]):
vars.allowsp = True
@ -865,19 +879,13 @@ if(not vars.use_colab_tpu and vars.model not in ["InferKit", "Colab", "OAI", "Go
vars.usegpu = False
vars.breakmodel = True
else:
print(" 1 - GPU\n 2 - CPU\n")
genselected = False
else:
genselected = False
if(vars.hascuda):
while(genselected == False):
genselect = input("Mode> ")
if(genselect == ""):
vars.breakmodel = False
vars.usegpu = True
genselected = True
elif(genselect.isnumeric() and int(genselect) == 1):
if(use_gpu):
if(vars.bmsupported):
vars.breakmodel = True
vars.usegpu = False
@ -886,19 +894,16 @@ if(not vars.use_colab_tpu and vars.model not in ["InferKit", "Colab", "OAI", "Go
vars.breakmodel = False
vars.usegpu = True
genselected = True
elif(genselect.isnumeric() and int(genselect) == 2):
else:
vars.breakmodel = False
vars.usegpu = False
genselected = True
else:
print("{0}Please enter a valid selection.{1}".format(colors.RED, colors.END))
# Ask for API key if InferKit was selected
if(vars.model == "InferKit"):
if(not path.exists("settings/" + getmodelname().replace('/', '_') + ".settings")):
# If the client settings file doesn't exist, create it
print("{0}Please enter your InferKit API key:{1}\n".format(colors.CYAN, colors.END))
vars.apikey = input("Key> ")
vars.apikey = key
# Write API key to file
os.makedirs('settings', exist_ok=True)
file = open("settings/" + getmodelname().replace('/', '_') + ".settings", "w")
@ -918,8 +923,7 @@ if(vars.model == "InferKit"):
file.close()
else:
# Get API key, add it to settings object, and write it to disk
print("{0}Please enter your InferKit API key:{1}\n".format(colors.CYAN, colors.END))
vars.apikey = input("Key> ")
vars.apikey = key
js["apikey"] = vars.apikey
# Write API key to file
file = open("settings/" + getmodelname().replace('/', '_') + ".settings", "w")
@ -940,8 +944,7 @@ if(vars.model == "OAI"):
args.configname = "OAI"
if(not path.exists("settings/" + getmodelname().replace('/', '_') + ".settings")):
# If the client settings file doesn't exist, create it
print("{0}Please enter your API key:{1}\n".format(colors.CYAN, colors.END))
vars.oaiapikey = input("Key> ")
vars.oaiapikey = key
# Write API key to file
os.makedirs('settings', exist_ok=True)
file = open("settings/" + getmodelname().replace('/', '_') + ".settings", "w")
@ -961,8 +964,7 @@ if(vars.model == "OAI"):
file.close()
else:
# Get API key, add it to settings object, and write it to disk
print("{0}Please enter your API key:{1}\n".format(colors.CYAN, colors.END))
vars.oaiapikey = input("Key> ")
vars.oaiapikey = key
js["oaiapikey"] = vars.oaiapikey
# Write API key to file
file = open("settings/" + getmodelname().replace('/', '_') + ".settings", "w")
@ -1015,20 +1017,6 @@ if(vars.model == "Colab"):
if(vars.model == "ReadOnly"):
vars.noai = True
# Set logging level to reduce chatter from Flask
import logging
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
# Start flask & SocketIO
print("{0}Initializing Flask... {1}".format(colors.PURPLE, colors.END), end="")
from flask import Flask, render_template, Response, request, copy_current_request_context
from flask_socketio import SocketIO, emit
app = Flask(__name__)
app.config['SECRET KEY'] = 'secret!'
socketio = SocketIO(app, async_method="eventlet")
print("{0}OK!{1}".format(colors.GREEN, colors.END))
# Start transformers and create pipeline
if(not vars.use_colab_tpu and vars.model not in ["InferKit", "Colab", "OAI", "GooseAI" , "ReadOnly", "TPUMeshTransformerGPTJ"]):
if(not vars.noai):
@ -1562,6 +1550,13 @@ else:
else:
loadsettings()
lua_startup()
# Load scripts
load_lua_scripts()
final_startup()
set_aibusy(False)
# Set up Flask routes
@app.route('/')
@app.route('/index')
@ -1614,6 +1609,12 @@ def download():
#============================ LUA API =============================#
_bridged = {}
F = TypeVar("F", bound=Callable)
def lua_startup():
global _bridged
global F
global bridged
if(path.exists("settings/" + getmodelname().replace('/', '_') + ".settings")):
file = open("settings/" + getmodelname().replace('/', '_') + ".settings", "r")
js = json.load(file)
@ -1631,12 +1632,46 @@ if(path.exists("settings/" + getmodelname().replace('/', '_') + ".settings")):
vars.corescript = "default.lua"
file.close()
#==================================================================#
# Lua runtime startup
#==================================================================#
print("", end="", flush=True)
print(colors.PURPLE + "Initializing Lua Bridge... " + colors.END, end="", flush=True)
# Set up Lua state
vars.lua_state = lupa.LuaRuntime(unpack_returned_tuples=True)
# Load bridge.lua
bridged = {
"corescript_path": os.path.join(os.path.dirname(os.path.realpath(__file__)), "cores"),
"userscript_path": os.path.join(os.path.dirname(os.path.realpath(__file__)), "userscripts"),
"config_path": os.path.join(os.path.dirname(os.path.realpath(__file__)), "userscripts"),
"lib_paths": vars.lua_state.table(os.path.join(os.path.dirname(os.path.realpath(__file__)), "lualibs"), os.path.join(os.path.dirname(os.path.realpath(__file__)), "extern", "lualibs")),
"vars": vars,
}
for kwarg in _bridged:
bridged[kwarg] = _bridged[kwarg]
try:
vars.lua_kobold, vars.lua_koboldcore, vars.lua_koboldbridge = vars.lua_state.globals().dofile(os.path.join(os.path.dirname(os.path.realpath(__file__)), "bridge.lua"))(
vars.lua_state.globals().python,
bridged,
)
except lupa.LuaError as e:
print(colors.RED + "ERROR!" + colors.END)
vars.lua_koboldbridge.obliterate_multiverse()
print("{0}{1}{2}".format(colors.RED, "***LUA ERROR***: ", colors.END), end="", file=sys.stderr)
print("{0}{1}{2}".format(colors.RED, str(e).replace("\033", ""), colors.END), file=sys.stderr)
exit(1)
print(colors.GREEN + "OK!" + colors.END)
def lua_log_format_name(name):
return f"[{name}]" if type(name) is str else "CORE"
_bridged = {}
F = TypeVar("F", bound=Callable)
def bridged_kwarg(name=None):
global F
def _bridged_kwarg(f: F):
_bridged[name if name is not None else f.__name__[4:] if f.__name__[:4] == "lua_" else f.__name__] = f
return f
@ -2172,42 +2207,6 @@ def execute_outmod():
for k in vars.lua_deleted:
inlinedelete(k)
#==================================================================#
# Lua runtime startup
#==================================================================#
print("", end="", flush=True)
print(colors.PURPLE + "Initializing Lua Bridge... " + colors.END, end="", flush=True)
# Set up Lua state
vars.lua_state = lupa.LuaRuntime(unpack_returned_tuples=True)
# Load bridge.lua
bridged = {
"corescript_path": os.path.join(os.path.dirname(os.path.realpath(__file__)), "cores"),
"userscript_path": os.path.join(os.path.dirname(os.path.realpath(__file__)), "userscripts"),
"config_path": os.path.join(os.path.dirname(os.path.realpath(__file__)), "userscripts"),
"lib_paths": vars.lua_state.table(os.path.join(os.path.dirname(os.path.realpath(__file__)), "lualibs"), os.path.join(os.path.dirname(os.path.realpath(__file__)), "extern", "lualibs")),
"vars": vars,
}
for kwarg in _bridged:
bridged[kwarg] = _bridged[kwarg]
try:
vars.lua_kobold, vars.lua_koboldcore, vars.lua_koboldbridge = vars.lua_state.globals().dofile(os.path.join(os.path.dirname(os.path.realpath(__file__)), "bridge.lua"))(
vars.lua_state.globals().python,
bridged,
)
except lupa.LuaError as e:
print(colors.RED + "ERROR!" + colors.END)
vars.lua_koboldbridge.obliterate_multiverse()
print("{0}{1}{2}".format(colors.RED, "***LUA ERROR***: ", colors.END), end="", file=sys.stderr)
print("{0}{1}{2}".format(colors.RED, str(e).replace("\033", ""), colors.END), file=sys.stderr)
exit(1)
print(colors.GREEN + "OK!" + colors.END)
# Load scripts
load_lua_scripts()
#============================ METHODS =============================#
@ -2528,6 +2527,12 @@ def get_message(msg):
load_lua_scripts()
unloaded, loaded = getuslist()
sendUSStatItems()
elif(msg['cmd'] == 'list_model'):
sendModelSelection(menu=msg['data'])
elif(msg['cmd'] == 'load_model'):
load_model(use_gpu=msg['use_gpu'], key=msg['key'])
elif(msg['cmd'] == 'selectmodel'):
vars.model = msg['data']
elif(msg['cmd'] == 'loadselect'):
vars.loadselect = msg["data"]
elif(msg['cmd'] == 'spselect'):
@ -3793,10 +3798,16 @@ def refresh_settings():
def set_aibusy(state):
if(state):
vars.aibusy = True
try:
emit('from_server', {'cmd': 'setgamestate', 'data': 'wait'}, broadcast=True)
except:
pass
else:
vars.aibusy = False
try:
emit('from_server', {'cmd': 'setgamestate', 'data': 'ready'}, broadcast=True)
except:
pass
#==================================================================#
#
@ -5088,6 +5099,7 @@ def randomGameRequest(topic, memory=""):
vars.memory = memory
emit('from_server', {'cmd': 'setmemory', 'data': vars.memory}, broadcast=True)
def final_startup():
# Prevent tokenizer from taking extra time the first time it's used
def __preempt_tokenizer():
if("tokenizer" not in globals()):
@ -5175,6 +5187,11 @@ print("", end="", flush=True)
if __name__ == "__main__":
print("{0}\nStarting webserver...{1}".format(colors.GREEN, colors.END), flush=True)
general_startup()
#show_select_model_list()
vars.model = "ReadOnly"
load_model()
# Start Flask/SocketIO (Blocking, so this must be last method!)
#socketio.run(app, host='0.0.0.0', port=5000)

View File

@ -7,6 +7,7 @@ var socket;
// UI references for jQuery
var connect_status;
var button_loadmodel;
var button_newgame;
var button_rndgame;
var button_save;
@ -55,6 +56,7 @@ var savepins;
var topic;
var saveas_accept;
var saveas_close;
var loadmodelpopup;
var loadpopup;
var loadcontent;
var load_accept;
@ -890,6 +892,17 @@ function sendSaveAsRequest() {
socket.send({'cmd': 'saveasrequest', 'data': {"name": saveasinput.val(), "pins": savepins.val()}});
}
function showLoadModelPopup() {
loadmodelpopup.removeClass("hidden");
loadmodelpopup.addClass("flex");
}
function hideLoadModelPopup() {
loadmodelpopup.removeClass("flex");
loadmodelpopup.addClass("hidden");
loadmodelcontent.html("");
}
function showLoadPopup() {
loadpopup.removeClass("hidden");
loadpopup.addClass("flex");
@ -923,6 +936,46 @@ function hideUSPopup() {
spcontent.html("");
}
function buildLoadModelList(ar) {
disableButtons([load_model_accept]);
loadmodelcontent.html("");
var i;
for(i=0; i<ar.length; i++) {
var html
html = "<div class=\"flex\">\
<div class=\"loadlistpadding\"></div>"
if(ar[i][3]) {
html = html + "<span class=\"loadlisticon loadmodellisticon-folder oi oi-folder allowed\" aria-hidden=\"true\"></span>"
} else {
html = html + "<div class=\"loadlistpadding\"></div>"
}
html = html + "<div class=\"loadlistpadding\"></div>\
<div class=\"loadlistitem\" id=\"loadmodel"+i+"\" name=\""+ar[i][1]+"\">\
<div>"+ar[i][0]+"</div>\
<div class=\"flex-push-right\">"+ar[i][2]+"</div>\
</div>\
</div>"
loadmodelcontent.append(html);
if(ar[i][3]) {
$("#loadmodel"+i).off("click").on("click", (function () {
return function () {
socket.send({'cmd': 'list_model', 'data': $(this).attr("name")});
disableButtons([load_model_accept]);
}
})(i));
} else {
$("#loadmodel"+i).off("click").on("click", (function () {
return function () {
socket.send({'cmd': 'selectmodel', 'data': $(this).attr("name")});
highlightLoadLine($(this));
enableButtons([load_model_accept]);
}
})(i));
}
}
}
function buildLoadList(ar) {
disableButtons([load_accept]);
loadcontent.html("");
@ -1771,6 +1824,7 @@ $(document).ready(function(){
// Bind UI references
connect_status = $('#connectstatus');
button_loadmodel = $('#btn_loadmodel');
button_newgame = $('#btn_newgame');
button_rndgame = $('#btn_rndgame');
button_save = $('#btn_save');
@ -1823,9 +1877,13 @@ $(document).ready(function(){
saveas_accept = $("#btn_saveasaccept");
saveas_close = $("#btn_saveasclose");
loadpopup = $("#loadcontainer");
loadmodelpopup = $("#loadmodelcontainer");
loadcontent = $("#loadlistcontent");
loadmodelcontent = $("#loadmodellistcontent");
load_accept = $("#btn_loadaccept");
load_close = $("#btn_loadclose");
load_model_accept = $("#btn_loadmodelaccept");
load_model_close = $("#btn_loadmodelclose");
sppopup = $("#spcontainer");
spcontent = $("#splistcontent");
sp_accept = $("#btn_spaccept");
@ -2313,6 +2371,18 @@ $(document).ready(function(){
} else {
debug_area.addClass("hidden");
}
} else if(msg.cmd == 'show_model_menu') {
if(msg.menu == 'gpt2list') {
$("#use_gpu_div").removeClass("hidden")
} else {
$("#use_gpu_div").addClass("hidden")
}
if(msg.menu == 'apilist') {
$("#modelkey").removeClass("hidden")
} else {
$("#modelkey").addClass("hidden")
}
buildLoadModelList(msg.data);
}
});
@ -2511,6 +2581,10 @@ $(document).ready(function(){
hideLoadPopup();
});
load_model_close.on("click", function(ev) {
hideLoadModelPopup();
});
load_accept.on("click", function(ev) {
hideMessage();
newly_loaded = true;
@ -2518,6 +2592,13 @@ $(document).ready(function(){
hideLoadPopup();
});
load_model_accept.on("click", function(ev) {
hideMessage();
socket.send({'cmd': 'load_model', 'use_gpu': $('#use_gpu')[0].checked, 'key': $('#modelkey')[0].value});
loadmodelcontent.html("");
hideLoadModelPopup();
});
sp_close.on("click", function(ev) {
hideSPPopup();
});
@ -2540,6 +2621,11 @@ $(document).ready(function(){
hideUSPopup();
});
button_loadmodel.on("click", function(ev) {
showLoadModelPopup();
socket.send({'cmd': 'list_model', 'data': 'mainmenu'});
});
button_newgame.on("click", function(ev) {
if(connected) {
showNewStoryPopup();

View File

@ -33,6 +33,12 @@
</button>
<div class="collapse navbar-collapse" id="navbarNavDropdown">
<ul class="nav navbar-nav">
<li class="nav-item dropdown">
<a class="nav-link dropdown-toggle" href="#" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">AI</a>
<div class="dropdown-menu">
<a class="dropdown-item" href="#" id="btn_loadmodel">Load Model</a>
</div>
</li>
<li class="nav-item dropdown">
<a class="nav-link dropdown-toggle" href="#" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">New Game</a>
<div class="dropdown-menu">
@ -265,6 +271,29 @@
</div>
</div>
</div>
<div class="popupcontainer hidden" id="loadmodelcontainer">
<div id="loadpopup">
<div class="popuptitlebar">
<div class="popuptitletext">Select A Model To Load</div>
</div>
<div class="loadmodellistheader">
<div>Model</div>
</div>
<div id="loadmodellistcontent">
</div>
<div class="popupfooter">
<input class="form-control hidden" type="text" placeholder="key" id="modelkey"><br>
</div>
<div class="popupfooter">
<button type="button" class="btn btn-primary" id="btn_loadmodelaccept">Load</button>
<button type="button" class="btn btn-primary" id="btn_loadmodelclose">Cancel</button>
<div class="box flex-push-right hidden" id=use_gpu_div>
<input type="checkbox" data-toggle="toggle" data-onstyle="success" id="use_gpu" checked>
<div class="box-label">Use GPU</div>
</div>
</div>
</div>
</div>
<div class="popupcontainer hidden" id="spcontainer">
<div id="sppopup">
<div class="popuptitlebar">