From 663dee784d2e3876954aab7781d6ebc12006084f Mon Sep 17 00:00:00 2001 From: ebolam Date: Thu, 9 Jun 2022 13:16:32 -0400 Subject: [PATCH 1/5] Unit Tests using pytest and Minor modifications to allow unit testing --- aiserver.py | 12 +++- unit_tests.py | 172 ++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 182 insertions(+), 2 deletions(-) create mode 100644 unit_tests.py diff --git a/aiserver.py b/aiserver.py index fd8f00fa..ae3af5c6 100644 --- a/aiserver.py +++ b/aiserver.py @@ -330,6 +330,7 @@ class vars: debug = False # If set to true, will send debug information to the client for display lazy_load = True # Whether or not to use torch_lazy_loader.py for transformers models in order to reduce CPU memory usage use_colab_tpu = os.environ.get("COLAB_TPU_ADDR", "") != "" or os.environ.get("TPU_NAME", "") != "" # Whether or not we're in a Colab TPU instance or Kaggle TPU instance and are going to use the TPU rather than the CPU + revision = None utils.vars = vars @@ -907,7 +908,7 @@ def spRequest(filename): #==================================================================# # Startup #==================================================================# -def general_startup(): +def general_startup(override_args=None): global args # Parsing Parameters parser = argparse.ArgumentParser(description="KoboldAI Server") @@ -936,7 +937,14 @@ def general_startup(): parser.add_argument("--lowmem", action='store_true', help="Extra Low Memory loading for the GPU, slower but memory does not peak to twice the usage") parser.add_argument("--savemodel", action='store_true', help="Saves the model to the models folder even if --colab is used (Allows you to save models to Google Drive)") #args: argparse.Namespace = None - if(os.environ.get("KOBOLDAI_ARGS") is not None): + if len(sys.argv) > 1 and override_args is None: + if sys.argv[1] == 'unit_tests.py': + args = parser.parse_args([]) + return + if override_args is not None: + import shlex + args = parser.parse_args(shlex.split(override_args)) + elif(os.environ.get("KOBOLDAI_ARGS") is not None): import shlex args = parser.parse_args(shlex.split(os.environ["KOBOLDAI_ARGS"])) else: diff --git a/unit_tests.py b/unit_tests.py new file mode 100644 index 00000000..c947267b --- /dev/null +++ b/unit_tests.py @@ -0,0 +1,172 @@ +import pytest, time +import aiserver + +#Test Model List: +test_models = [ + ('EleutherAI/gpt-neo-1.3B', {'key': False, 'gpu': False, 'layer_count': 24, 'breakmodel': True, 'url': False}), + ('gpt2', {'key': False, 'gpu': False, 'layer_count': 12, 'breakmodel': True, 'url': False}), + ('facebook/opt-350m', {'key': False, 'gpu': False, 'layer_count': 24, 'breakmodel': True, 'url': False}) + ] + +@pytest.fixture +def client_data(): + app = aiserver.app + #app.test_client_class = FlaskLoginClient + client_conn = app.test_client() + socketio_client = aiserver.socketio.test_client(app, flask_test_client=client_conn) + #Clear out the connection message + response = socketio_client.get_received() + return (client_conn, app, socketio_client) + + +def get_model_menu(model): + for menu in aiserver.model_menu: + for item in aiserver.model_menu[menu]: + if item[1] == model: + for main_menu_line in aiserver.model_menu['mainmenu']: + if main_menu_line[1] == menu: + return (menu, main_menu_line, item) + return None + +def generate_story_data(client_data): + (client, app, socketio_client) = client_data + socketio_client.emit('message',{'cmd': 'submit', 'allowabort': False, 'actionmode': 0, 'chatname': None, 'data': ''}) + + #wait until the game state turns back to start + state = 'wait' + new_text = None + start_time = time.time() + timeout = time.time() + 60*1 + while state == 'wait': + if time.time() > timeout: + break + responses = socketio_client.get_received() + for response in responses: + response = response['args'][0] + print(response) + if response['cmd'] == 'setgamestate': + state = response['data'] + elif response['cmd'] == 'updatechunk' or response['cmd'] == 'genseqs': + new_text = response['data'] + time.sleep(0.1) + + assert new_text is not None + +def test_basic_connection(client_data): + (client, app, socketio_client) = client_data + response = client.get("/") + assert response.status_code == 200 + +def test_load_story_from_web_ui(client_data): + (client, app, socketio_client) = client_data + + #List out the stories and make sure we have the sample story + socketio_client.emit('message',{'cmd': 'loadlistrequest', 'data': ''}) + response = socketio_client.get_received()[0]['args'][0]['data'] + found_sample_story = False + for story in response: + if story['name'] == 'sample_story': + found_sample_story = True + assert found_sample_story + + #Click on the sample story, then click load + socketio_client.emit('message',{'cmd': 'loadselect', 'data': 'sample_story'}) + socketio_client.emit('message',{'cmd': 'loadrequest', 'data': ''}) + + #Wait until we get the data back from the load + loaded_story = False + timeout = time.time() + 60*2 + while not loaded_story: + if time.time() > timeout: + break + responses = socketio_client.get_received() + for response in responses: + response = response['args'][0] + if 'cmd' not in response: + print(response) + assert False + if response['cmd'] == 'updatescreen': + loaded_story = True + story_text = response['data'] + break + assert loaded_story + + #Verify that it's the right story data + assert story_text == 'Niko the kobold stalked carefully down the alley, his small scaly figure obscured by a dusky cloak that fluttered lightly in the cold winter breeze. Holding up his tail to keep it from dragging in the dirty snow that covered the cobblestone, he waited patiently for the butcher to turn his attention from his stall so that he could pilfer his next meal: a tender-looking chicken. He crouched just slightly as he neared the stall to ensure that no one was watching, not that anyone would be dumb enough to hassle a small kobold. What else was there for a lowly kobold to do in a city? All that Niko needed to know was where to find the chicken and then how to make off with it.

A soft thud caused Niko to quickly lift his head. Standing behind the stall where the butcher had been cutting his chicken,
' + +@pytest.mark.parametrize("model, expected_load_options", test_models) +def test_load_model_from_web_ui(client_data, model, expected_load_options): + (client, app, socketio_client) = client_data + + #Clear out any old messages + response = socketio_client.get_received() + + (menu, menu_line, model_line) = get_model_menu(model) + + #Send the ai load model menu option + socketio_client.emit('message',{'cmd': 'list_model', 'data': 'mainmenu'}) + response = socketio_client.get_received()[0]['args'][0]['data'] + assert menu_line in response + + #Send the click model menu option + socketio_client.emit('message',{'cmd': 'list_model', 'data': menu, 'pretty_name': ""}) + response = socketio_client.get_received()[0]['args'][0]['data'] + assert model_line in response + + #Click the model + socketio_client.emit('message',{'cmd': 'selectmodel', 'data': model}) + response = socketio_client.get_received()[0]['args'][0] + #Check that we're getting the right load options + print(response) + assert response['key'] == expected_load_options['key'] + assert response['gpu'] == expected_load_options['gpu'] + assert response['layer_count'] == expected_load_options['layer_count'] + assert response['breakmodel'] == expected_load_options['breakmodel'] + assert response['url'] == expected_load_options['url'] + + #Now send the load + socketio_client.emit('message',{'cmd': 'load_model', 'use_gpu': True, 'key': '', 'gpu_layers': '', 'url': '', 'online_model': ''}) + #wait until the game state turns back to start + state = 'wait' + start_time = time.time() + timeout = time.time() + 60*2 + while state == 'wait': + if time.time() > timeout: + break + responses = socketio_client.get_received() + for response in responses: + response = response['args'][0] + if response['cmd'] == 'setgamestate': + state = response['data'] + time.sleep(0.1) + + #Give it a second to get all of the settings, etc and clear out the messages + responses = socketio_client.get_received() + + #check the model info to see if it's loaded + socketio_client.emit('message',{'cmd': 'show_model', 'data': ''}) + response = socketio_client.get_received()[0]['args'][0] + assert response == {'cmd': 'show_model_name', 'data': model} + + generate_story_data(client_data) + +@pytest.mark.parametrize("model, expected_load_options", test_models) +def test_load_model_from_command_line(client_data, model, expected_load_options): + (client, app, socketio_client) = client_data + + #Clear out any old messages + response = socketio_client.get_received() + + (menu, menu_line, model_line) = get_model_menu(model) + + aiserver.general_startup("--model {}".format(model)) + + aiserver.load_model(initial_load=True) + + #check the model info to see if it's loaded + socketio_client.emit('message',{'cmd': 'show_model', 'data': ''}) + response = socketio_client.get_received()[0]['args'][0] + assert response == {'cmd': 'show_model_name', 'data': model} + + generate_story_data(client_data) + From f89d1f131fda2bf74a758e0df82c08281ff36c69 Mon Sep 17 00:00:00 2001 From: ebolam Date: Thu, 9 Jun 2022 13:34:13 -0400 Subject: [PATCH 2/5] Update readme.md --- readme.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/readme.md b/readme.md index a136c856..ee0d601b 100644 --- a/readme.md +++ b/readme.md @@ -221,7 +221,7 @@ This project contains work from the following contributors : * The Gantian - Creator of KoboldAI, has created most features such as the interface, the different AI model / API integrations and in general the largest part of the project. * VE FORBRYDERNE - Contributed many features such as the Editing overhaul, Adventure Mode, expansions to the world info section, breakmodel integration, scripting support, softpromtps and much more. As well as vastly improving the TPU compatibility and integrating external code into KoboldAI so we could use official versions of Transformers with virtually no downsides. * Henk717 - Contributed the installation scripts, this readme, random story generator, the docker scripts, the foundation for the commandline interface and other smaller changes as well as integrating multiple parts of the code of different forks to unite it all. He also optimized the model loading so that downloaded models get converted to efficient offline models and that in future models are more likely to work out of the box. Not all code Github attributes to Henk717 is by Henk717 as some of it has been integrations of other people's work. We try to clarify this in the contributors list as much as we can. -* Ebolam - Automatic Saving +* Ebolam - Automatic Saving, back/redo, pinning, web loading of models * Frogging101 - top\_k / tfs support (Part of this support was later redone by VE to integrate what was originally inside of finetuneanon's transformers) * UWUplus (Ralf) - Contributed storage systems for community colabs, as well as cleaning up and integrating the website dependencies/code better. He is also the maintainer of flask-cloudflared which we use to generate the cloudflare links. * Javalar - Initial Performance increases on the story\_refresh @@ -238,4 +238,4 @@ Did we miss your contribution? Feel free to issue a commit adding your name to t KoboldAI is licensed with a AGPL license, in short this means that it can be used by anyone for any purpose. However, if you decide to make a publicly available instance your users are entitled to a copy of the source code including all modifications that you have made (which needs to be available trough an interface such as a button on your website), you may also not distribute this project in a form that does not contain the source code (Such as compiling / encrypting the code and distributing this version without also distributing the source code that includes the changes that you made. You are allowed to distribute this in a closed form if you also provide a separate archive with the source code.). -umamba.exe is bundled for convenience because we observed that many of our users had trouble with command line download methods, it is not part of our project and does not fall under the AGPL license. It is licensed under the BSD-3-Clause license. Other files with differing licenses will have a reference or embedded version of this license within the file. \ No newline at end of file +umamba.exe is bundled for convenience because we observed that many of our users had trouble with command line download methods, it is not part of our project and does not fall under the AGPL license. It is licensed under the BSD-3-Clause license. Other files with differing licenses will have a reference or embedded version of this license within the file. From 32b883892aed147bbf54fd6a869f4d3896af86af Mon Sep 17 00:00:00 2001 From: ebolam Date: Thu, 9 Jun 2022 20:03:34 -0400 Subject: [PATCH 3/5] Added favicon swapping mechanism on aibusy --- static/application.js | 3 ++ static/favicon.js | 64 +++++++++++++++++++++++++++++++++++++++++++ templates/index.html | 1 + 3 files changed, 68 insertions(+) create mode 100644 static/favicon.js diff --git a/static/application.js b/static/application.js index 5cc5c227..edc85c95 100644 --- a/static/application.js +++ b/static/application.js @@ -2140,14 +2140,17 @@ $(document).ready(function(){ enableButtons([button_actmem, button_actwi, button_actback, button_actfwd, button_actretry]); hideWaitAnimation(); gamestate = "ready"; + favicon.stop_swap(); } else if(msg.data == "wait") { gamestate = "wait"; disableSendBtn(); disableButtons([button_actmem, button_actwi, button_actback, button_actfwd, button_actretry]); showWaitAnimation(); + favicon.start_swap(); } else if(msg.data == "start") { setStartState(); gamestate = "ready"; + favicon.stop_swap(); } } else if(msg.cmd == "allowsp") { allowsp = !!msg.data; diff --git a/static/favicon.js b/static/favicon.js new file mode 100644 index 00000000..180059ff --- /dev/null +++ b/static/favicon.js @@ -0,0 +1,64 @@ +// Global Definitions +var fav_icon2 = "data:image/x-icon;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAMAAAAoLQ9TAAAABGdBTUEAALGPC/xhBQAAACBjSFJNAAB6JgAAgIQAAPoAAACA6AAAdTAAAOpgAAA6mAAAF3CculE8AAAB+1BMVEUAAAAAAAAAAAAAAAAAAQAAAAAAAQAAAAAAAAASFhBBWD4iUyoFEwgFEwguUTM+VDoMFAwAAAA+elIudz8AAAAAAAA0MigyLyQAAAAbLh1LdElSbUoVMBkAAABAZ0M2fkUAAAABAQFMiGQraDkAAQANFxEGFQkLFg8EEAYAAAAsZDonZjUAAABCgVVAnFYrSjhEjFpFi1sdRScAAAAjOi8VMxx1dGOFgGYAAABOTEabmIdlYlQaGhgaGhddXFauqY5JRjoAAAAAAAABAQFGeExIl1lX0XRW0XRHi1RFe02vv5W31KFd1Hpc1Hpe1HvO1KvDvJlqZ1plYVOmoIVt1IFl1H7AuZp1cV9jX1AmSCw3Nzg7NmA1MTJuz4Bm1H5MST9HPl9BQEMgNiNXgWKiobFgXICDd5dfw3RZVnJiV3zGv9Bqf29Oj2G/v8hTTpGhl8dbxHVd0npiYoxhWJvIxtlcimZFn1lRclg9SkZNblZBeEpDbEZCa0ZBc0hLY1BAS1BdaV87j01Vx3FWynJSrGZOhlVasGtas2xatm1at21WnWJQm15WyXJQvmlavnBZrGlEYEJWe1RBWz9Um2BavXBgxn9XhllGY0RLaklXiFlTwG5OpmVSfFNMbUpGZEVLa0lShldEhVCChHiKiHvWz6/Kw6WWlZGAfmj///8kr0X+AAAARHRSTlMAASFrcAhxIjLb/vWvsPb+20b4+DFFyMkz2vf43CP9/m5y9vZysLGvsQn19mz+/tz4+NxHycr3+Ejb/vaxsPX+3TRtcBrzrrgAAAABYktHRKhQCDaSAAAAB3RJTUUH5gYJFyQy3tftxgAAAQBJREFUGNNjYGBgYGRiZmFlZWNmZ2SAAA5OLm4eXj5+AQ6ogKCQi6ubu4ensCCIxygiKubl7ePr6+cfIC4owcjAJCkVGBQc4usbGhYeIS0jy8AsFxkVHRPr6xsXn5CYJK/AoKiUnJKalg5UkZGZla2swsCqmpObl1/g61tYVFxSqsbKwKpeVl5RWVVdU1tX39CoocnAotXU3NLa1t7R2dXd06utwqCj6+vb1z9h4sRJk6f4+uopMLDrG0z1nTZ94sQZM31nGRrJMjBKGJvMnjN3wrz5CxaaCnKAvSNqtmjxkqXLlptbQP0iYmllbWNrZ+/gCBVgZHdS1GR1VpAFqQcApI0/jqlZOvEAAAAldEVYdGRhdGU6Y3JlYXRlADIwMjItMDYtMDlUMjM6MzY6NTArMDA6MDDi0xr+AAAAJXRFWHRkYXRlOm1vZGlmeQAyMDIyLTA2LTA5VDIzOjM2OjUwKzAwOjAwk46iQgAAAABJRU5ErkJggg=="; +var fav_icon1 = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAMAAAAoLQ9TAAAABGdBTUEAALGPC/xhBQAAACBjSFJNAAB6JgAAgIQAAPoAAACA6AAAdTAAAOpgAAA6mAAAF3CculE8AAAB+FBMVEUAAAAAAAAAAAAAAAAAAAEAAAAAAQEAAAAAAAAUFRlLVGYrSWgHEBoHEBk3S19HUGMOExkAAABOcos7apIAAAAAAAA2Ly01KyoAAAAgKzdVaX9bZHIaKzwAAABKYHhDcZgAAAABAQFfgJY2XX0AAQEQFhoIEhwOFRgGDRUAAAAAAQE3W3cyWnwAAABSeJJRjLs1R1FVgaFWgJ4lPlMAAAAsOD4aLj55bm2Md3QAAABPSkmfko9pXlsbGRkbGRlfWlm1oJxMQkAAAAAAAAABAQFTb4tYibFtvPpWgKNScpC6s7nExtNzwPp1wPnZx8jMsKtuZGFoXVutmJODwfJ7wfbHr6p5a2hnW1gtQlI4ODk7N2A2LzWDvet8wPZPRkRHPl9CQUQlMTthe4+ko7RhXYGEeJhzsuJaVXRjWHzIwtNwfYddhqLCwcpTTpGimMhvsuVzv/djYpBgWJvLydxlgptVirdZbX1ASFZUaXtOb4xOZX1OZHxNa4ZRX21DSV5gaG9Je6lqsepstO1knclcfJxtoc5tpNFuptVup9ZnkbdgjrVss+xjpuBvrd9snspOW29jdI5LVmlkj7Vvrd54t+RlfptQXXJWZHtlf51oruNgmMFfdJBYZn1RXnRWZXthfZxSeZiGgYGOhYLdxb/RubWZlpWFd3T////2kwjgAAAARXRSTlMAASFrcAhxIjLb/vWvsPb+20b4+DFFyMkz2vf43CP9/m5y9vZysLGvsQlw9fZs/v7c+PjcR8nK9/hI2/72sbD1/t00bXBAFktiAAAAAWJLR0SnwLcrAwAAAAd0SU1FB+YGCRchHQhxJNoAAAD/SURBVBjTY2BgYGBkYmZhZWVjZmdkgAAOTi5uHl4+fgEOqICgkKubu7uHp7AgiMcoIirm5e3j4+Pr5y8uKMHIwCQpFRAYFOzjExIaFi4tI8vALBcRGRUd4+MTGxefkCivwKColJSckpoGVJGekZmlrMLAqpqdk5uX7+NTUFhUXKLGysCqXlpWXlFZVV1TW1ffoKHJoKXd2NTc0trW3tHZ1d2jo8Kgq+fj09vXP2HCxEmTfXz0FRjYDQyn+EydNmHC9Bk+M42MZRkYJUxMZ82e0z933vwFZoIcYO+Imi9ctHjJ0mUWllC/iFhZ29ja2Ts4OkEFGNmdFTVZXRRkQeoBhkE/Yj5NSZ4AAAAldEVYdGRhdGU6Y3JlYXRlADIwMjItMDYtMDlUMjM6MzM6MjgrMDA6MDA90JbEAAAAJXRFWHRkYXRlOm1vZGlmeQAyMDIyLTA2LTA5VDIzOjMzOjI4KzAwOjAwTI0ueAAAAABJRU5ErkJggg=="; +var fav_icon = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAMAAAAoLQ9TAAAABGdBTUEAALGPC/xhBQAAACBjSFJNAAB6JgAAgIQAAPoAAACA6AAAdTAAAOpgAAA6mAAAF3CculE8AAAB8lBMVEUAAAAAAAAAAAAAAAABAAAAAAABAAAAAAAAAAAdEBB0Pz5rKCgaBwcZBwdkMzJxPDocDAwAAACLTU6SOzsAAAAAAAA9Mic/LyEAAAA6HByQUUaIVEY+GBgAAACAQkKaQUIAAAABAQGWXl9+NjYBAAAaEBAcCAgZDQ0WBQUAAAB3Nzd9MjIAAACTUVK7UVJRNTWhVVaeVldTJSUAAAA+LC0+GhuGcmCgf2EAAABUTESrl4NzYlEdGhcdGhdiXFbIqIhWRjcAAAAAAAABAQGUSkq1VVX6bW6oUVGXS0vmro7+uJn6c3T6dXX/yqPnu5F3aFhxYVG/oH/7gHv6enjeuJOEcFtzX01VLCs4ODk7NmA5MTH1gHr6e3hWSTxHPl9CQUQ/JCKPYGGko7RhXYGEeJjmcW9cVnFjWH3IwtOHb3CjXV3CwcpTTpGimMjlb3D4c3RmYI1gWJvLydybZWW+T0x+V1hRP0Z7U1WTSEiHRUWGRUSORkZuTlBRQVBwX2CvRkXtaGjvamrNYWKmU1PVZ2fXaGjbaWncaWnAX1+7W1vkYF/ja2zRZWV9QkGeVFN2Pz69XV3ia2zkeHmpWFd/REOJSUirWVjjaGjBYGCeUlKMSkl8QkGBRUSoVlWeUE2QgXeWiHr1zqjmw5+bl5KVe2T///8NZLRGAAAARHRSTlMAASFrcAhxIjLb/vWvsPb+20b4+DFFyMkz2vf43CP9/m5y9vZysLGvsQn19mz+/tz4+NxHycr3+Ejb/vaxsPX+3TRtcBrzrrgAAAABYktHRKUuuUovAAAAB3RJTUUH5gYJFzsfVlK/LQAAAP9JREFUGNNjYGBgYGRiZmFlZWNmZ2SAAA5OLm4eXj5+AQ6ogKCQi6ubm7uHsCCIxygiKubp5e3t7ePrJy4owcjAJCnlHxAY5O0dHBIaJi0jy8AsFx4RGRXt7R0TGxefIK/AoKiUmJSckgpUkZaekamswsCqmpWdk5vn7Z1fUFhUrMbKwKpeUlpWXlFZVV1TW1evocnAotXQ2NTc0trW3tHZ2KWtwqCj6+3d3dPb19c/YaK3t54CA7u+wSTvyVP6+qZO855uaCTLwChhbDJj5qzZc6bOnWcqyAH2jqjZ/AULFy1eYm4B9YuIpZW1ja2dvYMjVICR3UlRk9VZQRakHgAlRz6K4dvoSgAAACV0RVh0ZGF0ZTpjcmVhdGUAMjAyMi0wNi0wOVQyMzo1OTozMSswMDowMJt1iQMAAAAldEVYdGRhdGU6bW9kaWZ5ADIwMjItMDYtMDlUMjM6NTk6MzErMDA6MDDqKDG/AAAAAElFTkSuQmCC" + +var favicon = { + + // Change the Page Icon and Title. + change: function(iconURL) { + this.addLink(iconURL, "icon"); + this.addLink(iconURL, "shortcut icon"); + }, + + addLink: function(iconURL, relValue) { + var link = document.createElement("link"); + link.type = "image/x-icon"; + link.rel = relValue; + link.href = iconURL; + this.removeLink(relValue); + this.docHead.appendChild(link); + }, + + removeLink: function(relValue) { + var links = this.docHead.getElementsByTagName("link"); + for (var i = 0; i < links.length; i++) { + var link = links[i]; + if (link.type == "image/x-icon" && link.rel == relValue) { + this.docHead.removeChild(link); + return; // Assuming only one match at most. + } + } + }, + + swapLink: function() { + if (this.run == true) { + if (this.icon == 1) { + this.change(fav_icon2); + this.icon = 2; + } else { + this.change(fav_icon1); + this.icon = 1; + } + } + }, + + auto_swap: function() { + if (this.run == true) { + this.swapLink(); + setTimeout(() => { this.auto_swap(); }, 1000); + } + }, + + start_swap: function() { + this.run = true; + this.auto_swap(); + }, + + stop_swap: function() { + this.run = false; + this.change(fav_icon); + }, + + docHead:document.getElementsByTagName("head")[0] +} \ No newline at end of file diff --git a/templates/index.html b/templates/index.html index ac3c322e..f4858ad3 100644 --- a/templates/index.html +++ b/templates/index.html @@ -18,6 +18,7 @@ + From 13f17d3eca8d4a61b3938aee103ac967cc88b877 Mon Sep 17 00:00:00 2001 From: ebolam Date: Fri, 10 Jun 2022 08:39:15 -0400 Subject: [PATCH 4/5] Changed unit tests so that they run with a simple pytest command --- aiserver.py | 7 +++---- pytest.ini | 2 ++ unit_tests.py => test_aiserver.py | 0 3 files changed, 5 insertions(+), 4 deletions(-) create mode 100644 pytest.ini rename unit_tests.py => test_aiserver.py (100%) diff --git a/aiserver.py b/aiserver.py index ae3af5c6..baaca551 100644 --- a/aiserver.py +++ b/aiserver.py @@ -937,10 +937,9 @@ def general_startup(override_args=None): parser.add_argument("--lowmem", action='store_true', help="Extra Low Memory loading for the GPU, slower but memory does not peak to twice the usage") parser.add_argument("--savemodel", action='store_true', help="Saves the model to the models folder even if --colab is used (Allows you to save models to Google Drive)") #args: argparse.Namespace = None - if len(sys.argv) > 1 and override_args is None: - if sys.argv[1] == 'unit_tests.py': - args = parser.parse_args([]) - return + if "pytest" in sys.modules and override_args is None: + args = parser.parse_args([]) + return if override_args is not None: import shlex args = parser.parse_args(shlex.split(override_args)) diff --git a/pytest.ini b/pytest.ini new file mode 100644 index 00000000..a4ea9a2d --- /dev/null +++ b/pytest.ini @@ -0,0 +1,2 @@ +[pytest] +addopts = --ignore=miniconda3 --html=unit_test_report.html -v \ No newline at end of file diff --git a/unit_tests.py b/test_aiserver.py similarity index 100% rename from unit_tests.py rename to test_aiserver.py From 4a920724d9c04063d0dba681532ad8841af8cb43 Mon Sep 17 00:00:00 2001 From: ebolam Date: Fri, 10 Jun 2022 09:12:04 -0400 Subject: [PATCH 5/5] fix for folder paths on linux --- aiserver.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/aiserver.py b/aiserver.py index baaca551..9b9a1c76 100644 --- a/aiserver.py +++ b/aiserver.py @@ -379,9 +379,9 @@ def get_folder_path_info(base): if path[-1] == "\\": path = path[:-1] breadcrumbs = [] - for i in range(len(path.split("\\"))): - breadcrumbs.append(["\\".join(path.split("\\")[:i+1]), - path.split("\\")[i]]) + for i in range(len(path.replace("/", "\\").split("\\"))): + breadcrumbs.append(["\\".join(path.replace("/", "\\").split("\\")[:i+1]), + path.replace("/", "\\").split("\\")[i]]) if len(breadcrumbs) == 1: breadcrumbs = [["{}:\\".format(chr(i)), "{}:\\".format(chr(i))] for i in range(65, 91) if os.path.exists("{}:".format(chr(i)))] else: @@ -395,6 +395,7 @@ def get_folder_path_info(base): # Paths/breadcrumbs is a list of lists, where the first element in the sublist is the full path and the second is the folder name return (paths, breadcrumbs) + def getModelSelection(modellist): print(" # Model\t\t\t\t\t\tVRAM\n ========================================================") i = 1