Merge pull request #144 from ebolam/Web-UI

Web UI Enhancement, Basic Unit Tests
This commit is contained in:
henk717 2022-06-10 16:07:50 +02:00 committed by GitHub
commit dc45e808c7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 257 additions and 7 deletions

View File

@ -330,6 +330,7 @@ class vars:
debug = False # If set to true, will send debug information to the client for display debug = False # If set to true, will send debug information to the client for display
lazy_load = True # Whether or not to use torch_lazy_loader.py for transformers models in order to reduce CPU memory usage lazy_load = True # Whether or not to use torch_lazy_loader.py for transformers models in order to reduce CPU memory usage
use_colab_tpu = os.environ.get("COLAB_TPU_ADDR", "") != "" or os.environ.get("TPU_NAME", "") != "" # Whether or not we're in a Colab TPU instance or Kaggle TPU instance and are going to use the TPU rather than the CPU use_colab_tpu = os.environ.get("COLAB_TPU_ADDR", "") != "" or os.environ.get("TPU_NAME", "") != "" # Whether or not we're in a Colab TPU instance or Kaggle TPU instance and are going to use the TPU rather than the CPU
revision = None
utils.vars = vars utils.vars = vars
@ -378,9 +379,9 @@ def get_folder_path_info(base):
if path[-1] == "\\": if path[-1] == "\\":
path = path[:-1] path = path[:-1]
breadcrumbs = [] breadcrumbs = []
for i in range(len(path.split("\\"))): for i in range(len(path.replace("/", "\\").split("\\"))):
breadcrumbs.append(["\\".join(path.split("\\")[:i+1]), breadcrumbs.append(["\\".join(path.replace("/", "\\").split("\\")[:i+1]),
path.split("\\")[i]]) path.replace("/", "\\").split("\\")[i]])
if len(breadcrumbs) == 1: if len(breadcrumbs) == 1:
breadcrumbs = [["{}:\\".format(chr(i)), "{}:\\".format(chr(i))] for i in range(65, 91) if os.path.exists("{}:".format(chr(i)))] breadcrumbs = [["{}:\\".format(chr(i)), "{}:\\".format(chr(i))] for i in range(65, 91) if os.path.exists("{}:".format(chr(i)))]
else: else:
@ -394,6 +395,7 @@ def get_folder_path_info(base):
# Paths/breadcrumbs is a list of lists, where the first element in the sublist is the full path and the second is the folder name # Paths/breadcrumbs is a list of lists, where the first element in the sublist is the full path and the second is the folder name
return (paths, breadcrumbs) return (paths, breadcrumbs)
def getModelSelection(modellist): def getModelSelection(modellist):
print(" # Model\t\t\t\t\t\tVRAM\n ========================================================") print(" # Model\t\t\t\t\t\tVRAM\n ========================================================")
i = 1 i = 1
@ -907,7 +909,7 @@ def spRequest(filename):
#==================================================================# #==================================================================#
# Startup # Startup
#==================================================================# #==================================================================#
def general_startup(): def general_startup(override_args=None):
global args global args
# Parsing Parameters # Parsing Parameters
parser = argparse.ArgumentParser(description="KoboldAI Server") parser = argparse.ArgumentParser(description="KoboldAI Server")
@ -936,7 +938,13 @@ def general_startup():
parser.add_argument("--lowmem", action='store_true', help="Extra Low Memory loading for the GPU, slower but memory does not peak to twice the usage") parser.add_argument("--lowmem", action='store_true', help="Extra Low Memory loading for the GPU, slower but memory does not peak to twice the usage")
parser.add_argument("--savemodel", action='store_true', help="Saves the model to the models folder even if --colab is used (Allows you to save models to Google Drive)") parser.add_argument("--savemodel", action='store_true', help="Saves the model to the models folder even if --colab is used (Allows you to save models to Google Drive)")
#args: argparse.Namespace = None #args: argparse.Namespace = None
if(os.environ.get("KOBOLDAI_ARGS") is not None): if "pytest" in sys.modules and override_args is None:
args = parser.parse_args([])
return
if override_args is not None:
import shlex
args = parser.parse_args(shlex.split(override_args))
elif(os.environ.get("KOBOLDAI_ARGS") is not None):
import shlex import shlex
args = parser.parse_args(shlex.split(os.environ["KOBOLDAI_ARGS"])) args = parser.parse_args(shlex.split(os.environ["KOBOLDAI_ARGS"]))
else: else:

2
pytest.ini Normal file
View File

@ -0,0 +1,2 @@
[pytest]
addopts = --ignore=miniconda3 --html=unit_test_report.html -v

View File

@ -221,7 +221,7 @@ This project contains work from the following contributors :
* The Gantian - Creator of KoboldAI, has created most features such as the interface, the different AI model / API integrations and in general the largest part of the project. * The Gantian - Creator of KoboldAI, has created most features such as the interface, the different AI model / API integrations and in general the largest part of the project.
* VE FORBRYDERNE - Contributed many features such as the Editing overhaul, Adventure Mode, expansions to the world info section, breakmodel integration, scripting support, softpromtps and much more. As well as vastly improving the TPU compatibility and integrating external code into KoboldAI so we could use official versions of Transformers with virtually no downsides. * VE FORBRYDERNE - Contributed many features such as the Editing overhaul, Adventure Mode, expansions to the world info section, breakmodel integration, scripting support, softpromtps and much more. As well as vastly improving the TPU compatibility and integrating external code into KoboldAI so we could use official versions of Transformers with virtually no downsides.
* Henk717 - Contributed the installation scripts, this readme, random story generator, the docker scripts, the foundation for the commandline interface and other smaller changes as well as integrating multiple parts of the code of different forks to unite it all. He also optimized the model loading so that downloaded models get converted to efficient offline models and that in future models are more likely to work out of the box. Not all code Github attributes to Henk717 is by Henk717 as some of it has been integrations of other people's work. We try to clarify this in the contributors list as much as we can. * Henk717 - Contributed the installation scripts, this readme, random story generator, the docker scripts, the foundation for the commandline interface and other smaller changes as well as integrating multiple parts of the code of different forks to unite it all. He also optimized the model loading so that downloaded models get converted to efficient offline models and that in future models are more likely to work out of the box. Not all code Github attributes to Henk717 is by Henk717 as some of it has been integrations of other people's work. We try to clarify this in the contributors list as much as we can.
* Ebolam - Automatic Saving * Ebolam - Automatic Saving, back/redo, pinning, web loading of models
* Frogging101 - top\_k / tfs support (Part of this support was later redone by VE to integrate what was originally inside of finetuneanon's transformers) * Frogging101 - top\_k / tfs support (Part of this support was later redone by VE to integrate what was originally inside of finetuneanon's transformers)
* UWUplus (Ralf) - Contributed storage systems for community colabs, as well as cleaning up and integrating the website dependencies/code better. He is also the maintainer of flask-cloudflared which we use to generate the cloudflare links. * UWUplus (Ralf) - Contributed storage systems for community colabs, as well as cleaning up and integrating the website dependencies/code better. He is also the maintainer of flask-cloudflared which we use to generate the cloudflare links.
* Javalar - Initial Performance increases on the story\_refresh * Javalar - Initial Performance increases on the story\_refresh

View File

@ -2140,14 +2140,17 @@ $(document).ready(function(){
enableButtons([button_actmem, button_actwi, button_actback, button_actfwd, button_actretry]); enableButtons([button_actmem, button_actwi, button_actback, button_actfwd, button_actretry]);
hideWaitAnimation(); hideWaitAnimation();
gamestate = "ready"; gamestate = "ready";
favicon.stop_swap();
} else if(msg.data == "wait") { } else if(msg.data == "wait") {
gamestate = "wait"; gamestate = "wait";
disableSendBtn(); disableSendBtn();
disableButtons([button_actmem, button_actwi, button_actback, button_actfwd, button_actretry]); disableButtons([button_actmem, button_actwi, button_actback, button_actfwd, button_actretry]);
showWaitAnimation(); showWaitAnimation();
favicon.start_swap();
} else if(msg.data == "start") { } else if(msg.data == "start") {
setStartState(); setStartState();
gamestate = "ready"; gamestate = "ready";
favicon.stop_swap();
} }
} else if(msg.cmd == "allowsp") { } else if(msg.cmd == "allowsp") {
allowsp = !!msg.data; allowsp = !!msg.data;

64
static/favicon.js Normal file
View File

@ -0,0 +1,64 @@
// Global Definitions
var fav_icon2 = "data:image/x-icon;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAMAAAAoLQ9TAAAABGdBTUEAALGPC/xhBQAAACBjSFJNAAB6JgAAgIQAAPoAAACA6AAAdTAAAOpgAAA6mAAAF3CculE8AAAB+1BMVEUAAAAAAAAAAAAAAAAAAQAAAAAAAQAAAAAAAAASFhBBWD4iUyoFEwgFEwguUTM+VDoMFAwAAAA+elIudz8AAAAAAAA0MigyLyQAAAAbLh1LdElSbUoVMBkAAABAZ0M2fkUAAAABAQFMiGQraDkAAQANFxEGFQkLFg8EEAYAAAAsZDonZjUAAABCgVVAnFYrSjhEjFpFi1sdRScAAAAjOi8VMxx1dGOFgGYAAABOTEabmIdlYlQaGhgaGhddXFauqY5JRjoAAAAAAAABAQFGeExIl1lX0XRW0XRHi1RFe02vv5W31KFd1Hpc1Hpe1HvO1KvDvJlqZ1plYVOmoIVt1IFl1H7AuZp1cV9jX1AmSCw3Nzg7NmA1MTJuz4Bm1H5MST9HPl9BQEMgNiNXgWKiobFgXICDd5dfw3RZVnJiV3zGv9Bqf29Oj2G/v8hTTpGhl8dbxHVd0npiYoxhWJvIxtlcimZFn1lRclg9SkZNblZBeEpDbEZCa0ZBc0hLY1BAS1BdaV87j01Vx3FWynJSrGZOhlVasGtas2xatm1at21WnWJQm15WyXJQvmlavnBZrGlEYEJWe1RBWz9Um2BavXBgxn9XhllGY0RLaklXiFlTwG5OpmVSfFNMbUpGZEVLa0lShldEhVCChHiKiHvWz6/Kw6WWlZGAfmj///8kr0X+AAAARHRSTlMAASFrcAhxIjLb/vWvsPb+20b4+DFFyMkz2vf43CP9/m5y9vZysLGvsQn19mz+/tz4+NxHycr3+Ejb/vaxsPX+3TRtcBrzrrgAAAABYktHRKhQCDaSAAAAB3RJTUUH5gYJFyQy3tftxgAAAQBJREFUGNNjYGBgYGRiZmFlZWNmZ2SAAA5OLm4eXj5+AQ6ogKCQi6ubu4ensCCIxygiKubl7ePr6+cfIC4owcjAJCkVGBQc4usbGhYeIS0jy8AsFxkVHRPr6xsXn5CYJK/AoKiUnJKalg5UkZGZla2swsCqmpObl1/g61tYVFxSqsbKwKpeVl5RWVVdU1tX39CoocnAotXU3NLa1t7R2dXd06utwqCj6+vb1z9h4sRJk6f4+uopMLDrG0z1nTZ94sQZM31nGRrJMjBKGJvMnjN3wrz5CxaaCnKAvSNqtmjxkqXLlptbQP0iYmllbWNrZ+/gCBVgZHdS1GR1VpAFqQcApI0/jqlZOvEAAAAldEVYdGRhdGU6Y3JlYXRlADIwMjItMDYtMDlUMjM6MzY6NTArMDA6MDDi0xr+AAAAJXRFWHRkYXRlOm1vZGlmeQAyMDIyLTA2LTA5VDIzOjM2OjUwKzAwOjAwk46iQgAAAABJRU5ErkJggg==";
var fav_icon1 = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAMAAAAoLQ9TAAAABGdBTUEAALGPC/xhBQAAACBjSFJNAAB6JgAAgIQAAPoAAACA6AAAdTAAAOpgAAA6mAAAF3CculE8AAAB+FBMVEUAAAAAAAAAAAAAAAAAAAEAAAAAAQEAAAAAAAAUFRlLVGYrSWgHEBoHEBk3S19HUGMOExkAAABOcos7apIAAAAAAAA2Ly01KyoAAAAgKzdVaX9bZHIaKzwAAABKYHhDcZgAAAABAQFfgJY2XX0AAQEQFhoIEhwOFRgGDRUAAAAAAQE3W3cyWnwAAABSeJJRjLs1R1FVgaFWgJ4lPlMAAAAsOD4aLj55bm2Md3QAAABPSkmfko9pXlsbGRkbGRlfWlm1oJxMQkAAAAAAAAABAQFTb4tYibFtvPpWgKNScpC6s7nExtNzwPp1wPnZx8jMsKtuZGFoXVutmJODwfJ7wfbHr6p5a2hnW1gtQlI4ODk7N2A2LzWDvet8wPZPRkRHPl9CQUQlMTthe4+ko7RhXYGEeJhzsuJaVXRjWHzIwtNwfYddhqLCwcpTTpGimMhvsuVzv/djYpBgWJvLydxlgptVirdZbX1ASFZUaXtOb4xOZX1OZHxNa4ZRX21DSV5gaG9Je6lqsepstO1knclcfJxtoc5tpNFuptVup9ZnkbdgjrVss+xjpuBvrd9snspOW29jdI5LVmlkj7Vvrd54t+RlfptQXXJWZHtlf51oruNgmMFfdJBYZn1RXnRWZXthfZxSeZiGgYGOhYLdxb/RubWZlpWFd3T////2kwjgAAAARXRSTlMAASFrcAhxIjLb/vWvsPb+20b4+DFFyMkz2vf43CP9/m5y9vZysLGvsQlw9fZs/v7c+PjcR8nK9/hI2/72sbD1/t00bXBAFktiAAAAAWJLR0SnwLcrAwAAAAd0SU1FB+YGCRchHQhxJNoAAAD/SURBVBjTY2BgYGBkYmZhZWVjZmdkgAAOTi5uHl4+fgEOqICgkKubu7uHp7AgiMcoIirm5e3j4+Pr5y8uKMHIwCQpFRAYFOzjExIaFi4tI8vALBcRGRUd4+MTGxefkCivwKColJSckpoGVJGekZmlrMLAqpqdk5uX7+NTUFhUXKLGysCqXlpWXlFZVV1TW1ffoKHJoKXd2NTc0trW3tHZ1d2jo8Kgq+fj09vXP2HCxEmTfXz0FRjYDQyn+EydNmHC9Bk+M42MZRkYJUxMZ82e0z933vwFZoIcYO+Imi9ctHjJ0mUWllC/iFhZ29ja2Ts4OkEFGNmdFTVZXRRkQeoBhkE/Yj5NSZ4AAAAldEVYdGRhdGU6Y3JlYXRlADIwMjItMDYtMDlUMjM6MzM6MjgrMDA6MDA90JbEAAAAJXRFWHRkYXRlOm1vZGlmeQAyMDIyLTA2LTA5VDIzOjMzOjI4KzAwOjAwTI0ueAAAAABJRU5ErkJggg==";
var fav_icon = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAMAAAAoLQ9TAAAABGdBTUEAALGPC/xhBQAAACBjSFJNAAB6JgAAgIQAAPoAAACA6AAAdTAAAOpgAAA6mAAAF3CculE8AAAB8lBMVEUAAAAAAAAAAAAAAAABAAAAAAABAAAAAAAAAAAdEBB0Pz5rKCgaBwcZBwdkMzJxPDocDAwAAACLTU6SOzsAAAAAAAA9Mic/LyEAAAA6HByQUUaIVEY+GBgAAACAQkKaQUIAAAABAQGWXl9+NjYBAAAaEBAcCAgZDQ0WBQUAAAB3Nzd9MjIAAACTUVK7UVJRNTWhVVaeVldTJSUAAAA+LC0+GhuGcmCgf2EAAABUTESrl4NzYlEdGhcdGhdiXFbIqIhWRjcAAAAAAAABAQGUSkq1VVX6bW6oUVGXS0vmro7+uJn6c3T6dXX/yqPnu5F3aFhxYVG/oH/7gHv6enjeuJOEcFtzX01VLCs4ODk7NmA5MTH1gHr6e3hWSTxHPl9CQUQ/JCKPYGGko7RhXYGEeJjmcW9cVnFjWH3IwtOHb3CjXV3CwcpTTpGimMjlb3D4c3RmYI1gWJvLydybZWW+T0x+V1hRP0Z7U1WTSEiHRUWGRUSORkZuTlBRQVBwX2CvRkXtaGjvamrNYWKmU1PVZ2fXaGjbaWncaWnAX1+7W1vkYF/ja2zRZWV9QkGeVFN2Pz69XV3ia2zkeHmpWFd/REOJSUirWVjjaGjBYGCeUlKMSkl8QkGBRUSoVlWeUE2QgXeWiHr1zqjmw5+bl5KVe2T///8NZLRGAAAARHRSTlMAASFrcAhxIjLb/vWvsPb+20b4+DFFyMkz2vf43CP9/m5y9vZysLGvsQn19mz+/tz4+NxHycr3+Ejb/vaxsPX+3TRtcBrzrrgAAAABYktHRKUuuUovAAAAB3RJTUUH5gYJFzsfVlK/LQAAAP9JREFUGNNjYGBgYGRiZmFlZWNmZ2SAAA5OLm4eXj5+AQ6ogKCQi6ubm7uHsCCIxygiKubp5e3t7ePrJy4owcjAJCnlHxAY5O0dHBIaJi0jy8AsFx4RGRXt7R0TGxefIK/AoKiUmJSckgpUkZaekamswsCqmpWdk5vn7Z1fUFhUrMbKwKpeUlpWXlFZVV1TW1evocnAotXQ2NTc0trW3tHZ2KWtwqCj6+3d3dPb19c/YaK3t54CA7u+wSTvyVP6+qZO855uaCTLwChhbDJj5qzZc6bOnWcqyAH2jqjZ/AULFy1eYm4B9YuIpZW1ja2dvYMjVICR3UlRk9VZQRakHgAlRz6K4dvoSgAAACV0RVh0ZGF0ZTpjcmVhdGUAMjAyMi0wNi0wOVQyMzo1OTozMSswMDowMJt1iQMAAAAldEVYdGRhdGU6bW9kaWZ5ADIwMjItMDYtMDlUMjM6NTk6MzErMDA6MDDqKDG/AAAAAElFTkSuQmCC"
var favicon = {
// Change the Page Icon and Title.
change: function(iconURL) {
this.addLink(iconURL, "icon");
this.addLink(iconURL, "shortcut icon");
},
addLink: function(iconURL, relValue) {
var link = document.createElement("link");
link.type = "image/x-icon";
link.rel = relValue;
link.href = iconURL;
this.removeLink(relValue);
this.docHead.appendChild(link);
},
removeLink: function(relValue) {
var links = this.docHead.getElementsByTagName("link");
for (var i = 0; i < links.length; i++) {
var link = links[i];
if (link.type == "image/x-icon" && link.rel == relValue) {
this.docHead.removeChild(link);
return; // Assuming only one match at most.
}
}
},
swapLink: function() {
if (this.run == true) {
if (this.icon == 1) {
this.change(fav_icon2);
this.icon = 2;
} else {
this.change(fav_icon1);
this.icon = 1;
}
}
},
auto_swap: function() {
if (this.run == true) {
this.swapLink();
setTimeout(() => { this.auto_swap(); }, 1000);
}
},
start_swap: function() {
this.run = true;
this.auto_swap();
},
stop_swap: function() {
this.run = false;
this.change(fav_icon);
},
docHead:document.getElementsByTagName("head")[0]
}

View File

@ -18,6 +18,7 @@
<script src="static/bootstrap-toggle.min.js"></script> <script src="static/bootstrap-toggle.min.js"></script>
<script src="static/rangy-core.min.js"></script> <script src="static/rangy-core.min.js"></script>
<script src="static/application.js?ver=1.18c"></script> <script src="static/application.js?ver=1.18c"></script>
<script src="static/favicon.js"></script>
</head> </head>
<body> <body>
<input type="file" id="remote-save-select" accept="application/json" style="display:none"> <input type="file" id="remote-save-select" accept="application/json" style="display:none">

172
test_aiserver.py Normal file
View File

@ -0,0 +1,172 @@
import pytest, time
import aiserver
#Test Model List:
test_models = [
('EleutherAI/gpt-neo-1.3B', {'key': False, 'gpu': False, 'layer_count': 24, 'breakmodel': True, 'url': False}),
('gpt2', {'key': False, 'gpu': False, 'layer_count': 12, 'breakmodel': True, 'url': False}),
('facebook/opt-350m', {'key': False, 'gpu': False, 'layer_count': 24, 'breakmodel': True, 'url': False})
]
@pytest.fixture
def client_data():
app = aiserver.app
#app.test_client_class = FlaskLoginClient
client_conn = app.test_client()
socketio_client = aiserver.socketio.test_client(app, flask_test_client=client_conn)
#Clear out the connection message
response = socketio_client.get_received()
return (client_conn, app, socketio_client)
def get_model_menu(model):
for menu in aiserver.model_menu:
for item in aiserver.model_menu[menu]:
if item[1] == model:
for main_menu_line in aiserver.model_menu['mainmenu']:
if main_menu_line[1] == menu:
return (menu, main_menu_line, item)
return None
def generate_story_data(client_data):
(client, app, socketio_client) = client_data
socketio_client.emit('message',{'cmd': 'submit', 'allowabort': False, 'actionmode': 0, 'chatname': None, 'data': ''})
#wait until the game state turns back to start
state = 'wait'
new_text = None
start_time = time.time()
timeout = time.time() + 60*1
while state == 'wait':
if time.time() > timeout:
break
responses = socketio_client.get_received()
for response in responses:
response = response['args'][0]
print(response)
if response['cmd'] == 'setgamestate':
state = response['data']
elif response['cmd'] == 'updatechunk' or response['cmd'] == 'genseqs':
new_text = response['data']
time.sleep(0.1)
assert new_text is not None
def test_basic_connection(client_data):
(client, app, socketio_client) = client_data
response = client.get("/")
assert response.status_code == 200
def test_load_story_from_web_ui(client_data):
(client, app, socketio_client) = client_data
#List out the stories and make sure we have the sample story
socketio_client.emit('message',{'cmd': 'loadlistrequest', 'data': ''})
response = socketio_client.get_received()[0]['args'][0]['data']
found_sample_story = False
for story in response:
if story['name'] == 'sample_story':
found_sample_story = True
assert found_sample_story
#Click on the sample story, then click load
socketio_client.emit('message',{'cmd': 'loadselect', 'data': 'sample_story'})
socketio_client.emit('message',{'cmd': 'loadrequest', 'data': ''})
#Wait until we get the data back from the load
loaded_story = False
timeout = time.time() + 60*2
while not loaded_story:
if time.time() > timeout:
break
responses = socketio_client.get_received()
for response in responses:
response = response['args'][0]
if 'cmd' not in response:
print(response)
assert False
if response['cmd'] == 'updatescreen':
loaded_story = True
story_text = response['data']
break
assert loaded_story
#Verify that it's the right story data
assert story_text == '<chunk n="0" id="n0" tabindex="-1">Niko the kobold stalked carefully down the alley, his small scaly figure obscured by a dusky cloak that fluttered lightly in the cold winter breeze. Holding up his tail to keep it from dragging in the dirty snow that covered the cobblestone, he waited patiently for the butcher to turn his attention from his stall so that he could pilfer his next meal: a tender-looking</chunk><chunk n="1" id="n1" tabindex="-1"> chicken. He crouched just slightly as he neared the stall to ensure that no one was watching, not that anyone would be dumb enough to hassle a small kobold. What else was there for a lowly kobold to</chunk><chunk n="2" id="n2" tabindex="-1"> do in a city? All that Niko needed to know was</chunk><chunk n="3" id="n3" tabindex="-1"> where to find the chicken and then how to make off with it.<br/><br/>A soft thud caused Niko to quickly lift his head. Standing behind the stall where the butcher had been cutting his chicken,</chunk>'
@pytest.mark.parametrize("model, expected_load_options", test_models)
def test_load_model_from_web_ui(client_data, model, expected_load_options):
(client, app, socketio_client) = client_data
#Clear out any old messages
response = socketio_client.get_received()
(menu, menu_line, model_line) = get_model_menu(model)
#Send the ai load model menu option
socketio_client.emit('message',{'cmd': 'list_model', 'data': 'mainmenu'})
response = socketio_client.get_received()[0]['args'][0]['data']
assert menu_line in response
#Send the click model menu option
socketio_client.emit('message',{'cmd': 'list_model', 'data': menu, 'pretty_name': ""})
response = socketio_client.get_received()[0]['args'][0]['data']
assert model_line in response
#Click the model
socketio_client.emit('message',{'cmd': 'selectmodel', 'data': model})
response = socketio_client.get_received()[0]['args'][0]
#Check that we're getting the right load options
print(response)
assert response['key'] == expected_load_options['key']
assert response['gpu'] == expected_load_options['gpu']
assert response['layer_count'] == expected_load_options['layer_count']
assert response['breakmodel'] == expected_load_options['breakmodel']
assert response['url'] == expected_load_options['url']
#Now send the load
socketio_client.emit('message',{'cmd': 'load_model', 'use_gpu': True, 'key': '', 'gpu_layers': '', 'url': '', 'online_model': ''})
#wait until the game state turns back to start
state = 'wait'
start_time = time.time()
timeout = time.time() + 60*2
while state == 'wait':
if time.time() > timeout:
break
responses = socketio_client.get_received()
for response in responses:
response = response['args'][0]
if response['cmd'] == 'setgamestate':
state = response['data']
time.sleep(0.1)
#Give it a second to get all of the settings, etc and clear out the messages
responses = socketio_client.get_received()
#check the model info to see if it's loaded
socketio_client.emit('message',{'cmd': 'show_model', 'data': ''})
response = socketio_client.get_received()[0]['args'][0]
assert response == {'cmd': 'show_model_name', 'data': model}
generate_story_data(client_data)
@pytest.mark.parametrize("model, expected_load_options", test_models)
def test_load_model_from_command_line(client_data, model, expected_load_options):
(client, app, socketio_client) = client_data
#Clear out any old messages
response = socketio_client.get_received()
(menu, menu_line, model_line) = get_model_menu(model)
aiserver.general_startup("--model {}".format(model))
aiserver.load_model(initial_load=True)
#check the model info to see if it's loaded
socketio_client.emit('message',{'cmd': 'show_model', 'data': ''})
response = socketio_client.get_received()[0]['args'][0]
assert response == {'cmd': 'show_model_name', 'data': model}
generate_story_data(client_data)