From f027d8b6e56393c12b8cd1611a3c0b7cc90802c9 Mon Sep 17 00:00:00 2001 From: ebolam Date: Wed, 17 May 2023 21:15:31 -0400 Subject: [PATCH] Better working valid detection and named model backends for UI --- aiserver.py | 53 +++-- modeling/inference_models/api.py | 3 +- modeling/inference_models/basic_api.py | 4 +- modeling/inference_models/generic_hf_torch.py | 3 +- modeling/inference_models/gooseai.py | 5 +- modeling/inference_models/hf_mtj.py | 4 +- modeling/inference_models/horde.py | 3 +- modeling/inference_models/openai.py | 6 +- modeling/inference_models/parents/hf.py | 24 +- .../parents/openai_gooseai.py | 2 +- modeling/inference_models/readonly.py | 3 +- modeling/inference_models/rwkv.py | 5 +- static/koboldai.js | 206 +++++++++++------- templates/templates.html | 5 +- 14 files changed, 191 insertions(+), 135 deletions(-) diff --git a/aiserver.py b/aiserver.py index 92dde7f4..314fb512 100644 --- a/aiserver.py +++ b/aiserver.py @@ -622,12 +622,12 @@ from modeling.patches import patch_transformers #Load all of the model importers import importlib -model_loader_code = {} -model_loaders = {} +model_backend_code = {} +model_backends = {} for module in os.listdir("./modeling/inference_models"): if os.path.isfile(os.path.join("./modeling/inference_models",module)) and module[-3:] == '.py': - model_loader_code[module[:-3]] = importlib.import_module('modeling.inference_models.{}'.format(module[:-3])) - model_loaders[module[:-3]] = model_loader_code[module[:-3]].model_loader() + model_backend_code[module[:-3]] = importlib.import_module('modeling.inference_models.{}'.format(module[:-3])) + model_backends[model_backend_code[module[:-3]].model_backend_name] = model_backend_code[module[:-3]].model_backend() old_socketio_on = socketio.on @@ -1354,6 +1354,7 @@ def general_startup(override_args=None): parser.add_argument("--port", type=int, help="Specify the port on which the application will be joinable") parser.add_argument("--aria2_port", type=int, help="Specify the port on which aria2's RPC interface will be open if aria2 is installed (defaults to 6799)") parser.add_argument("--model", help="Specify the Model Type to skip the Menu") + parser.add_argument("--model_backend", help="Specify the model backend you want to use") parser.add_argument("--path", help="Specify the Path for local models (For model NeoCustom or GPT2Custom)") parser.add_argument("--apikey", help="Specify the API key to use for online services") parser.add_argument("--sh_apikey", help="Specify the API key to use for txt2img from the Stable Horde. Get a key from https://horde.koboldai.net/register") @@ -1447,6 +1448,12 @@ def general_startup(override_args=None): args.max_summary_length = int(args.max_summary_length) if args.model: + # At this point we have to try to load the model through the selected backend + if not args.model_backend: + logger.error("Didn't select a model backend. Please enter one through the --model_backend or remove the --model from the run command") + exit() + #if + koboldai_vars.model = args.model; koboldai_vars.revision = args.revision koboldai_settings.multi_story = args.multi_story @@ -1472,7 +1479,7 @@ def general_startup(override_args=None): koboldai_vars.quiet = True if args.nobreakmodel: - model_loaders['generic_hf_torch'].nobreakmodel = True + model_backends['Huggingface'].nobreakmodel = True if args.remote: koboldai_vars.host = True; @@ -1484,7 +1491,7 @@ def general_startup(override_args=None): koboldai_vars.host = True; if args.lowmem: - model_loaders['generic_hf_torch'].low_mem = True + model_backends['Huggingface'].low_mem = True if args.host != "Disabled": # This means --host option was submitted without an argument @@ -1520,7 +1527,7 @@ def general_startup(override_args=None): koboldai_vars.use_colab_tpu = False koboldai_vars.hascuda = False koboldai_vars.usegpu = False - model_loaders['generic_hf_torch'].nobreakmodel = True + model_backends['Huggingface'].nobreakmodel = True koboldai_vars.smandelete = koboldai_vars.host == args.override_delete koboldai_vars.smanrename = koboldai_vars.host == args.override_rename @@ -1582,7 +1589,7 @@ def unload_model(): koboldai_vars.badwordsids = koboldai_settings.badwordsids_default -def load_model(plugin, initial_load=False): +def load_model(model_backend, initial_load=False): global model global tokenizer global model_config @@ -1637,7 +1644,7 @@ def load_model(plugin, initial_load=False): koboldai_vars.default_preset = koboldai_settings.default_preset - model = model_loaders[plugin] + model = model_backends[model_backend] model.load(initial_load=initial_load, save_model=not (args.colab or args.cacheonly) or args.savemodel) logger.debug("Model Type: {}".format(koboldai_vars.model_type)) @@ -6103,33 +6110,23 @@ def UI_2_select_model(data): emit("open_model_load_menu", {"items": [{**item.to_json(), **{"menu":data["name"]}} for item in model_menu[data["name"]] if item.should_show()]}) else: #Get load methods - logger.debug("Asking for model info on potential model: {}".format(data)) - valid = False if 'path' not in data or data['path'] == "": valid_loaders = {} - for model_loader in model_loaders: - logger.debug("Testing Loader {} for model {}: {}".format(model_loader, data["name"], model_loaders[model_loader].is_valid(data["name"], data["path"] if 'path' in data else None, data["menu"]))) - if model_loaders[model_loader].is_valid(data["name"], data["path"] if 'path' in data else None, data["menu"]): - valid_loaders[model_loader] = model_loaders[model_loader].get_requested_parameters(data["name"], data["path"] if 'path' in data else None, data["menu"]) - valid = True - if valid: - logger.debug("Valid Loaders: {}".format(valid_loaders)) - emit("selected_model_info", valid_loaders) - if not valid and 'path' in data: + for model_backend in model_backends: + valid_loaders[model_backend] = model_backends[model_backend].get_requested_parameters(data["name"], data["path"] if 'path' in data else None, data["menu"]) + emit("selected_model_info", {"model_backends": valid_loaders, "preselected": "Huggingface"}) + else: #Get directories paths, breadcrumbs = get_folder_path_info(data['path']) output = [] for path in paths: valid=False - for model_loader in model_loaders: - if model_loaders[model_loader].is_valid(path[1], path[0], "Custom"): + for model_backend in model_backends: + if model_backends[model_backend].is_valid(path[1], path[0], "Custom"): valid=True break output.append({'label': path[1], 'name': path[0], 'size': "", "menu": "Custom", 'path': path[0], 'isMenu': not valid}) - emit("open_model_load_menu", {"items": output+[{'label': 'Return to Main Menu', 'name':'mainmenu', 'size': "", "menu": "Custom", 'isMenu': True}], 'breadcrumbs': breadcrumbs}) - elif not valid: - logger.error("Nothing can load the model: {}".format(valid_loaders)) - + emit("open_model_load_menu", {"items": output+[{'label': 'Return to Main Menu', 'name':'mainmenu', 'size': "", "menu": "Custom", 'isMenu': True}], 'breadcrumbs': breadcrumbs}) return @@ -6156,7 +6153,7 @@ def UI_2_select_model(data): @socketio.on('load_model') @logger.catch def UI_2_load_model(data): - model_loaders[data['plugin']].set_input_parameters(data) + model_backends[data['plugin']].set_input_parameters(data) load_model(data['plugin']) #load_model(use_gpu=data['use_gpu'], gpu_layers=data['gpu_layers'], disk_layers=data['disk_layers'], online_model=data['online_model'], url=koboldai_vars.colaburl, use_8_bit=data['use_8_bit']) @@ -10671,7 +10668,7 @@ for schema in config_endpoint_schemas: def startup(): if koboldai_vars.model == "" or koboldai_vars.model is None: koboldai_vars.model = "ReadOnly" - socketio.start_background_task(load_model, *('readonly',), **{'initial_load':True}) + socketio.start_background_task(load_model, *('Read Only',), **{'initial_load':True}) print("", end="", flush=True) diff --git a/modeling/inference_models/api.py b/modeling/inference_models/api.py index 5bddd714..409158f5 100644 --- a/modeling/inference_models/api.py +++ b/modeling/inference_models/api.py @@ -17,12 +17,13 @@ from modeling.inference_model import ( ModelCapabilities, ) +model_backend_name = "KoboldAI API" class APIException(Exception): """To be used for errors when using the Kobold API as an interface.""" -class model_loader(InferenceModel): +class model_backend(InferenceModel): def __init__(self) -> None: super().__init__() #self.base_url = "" diff --git a/modeling/inference_models/basic_api.py b/modeling/inference_models/basic_api.py index 5666ba8e..cca9652b 100644 --- a/modeling/inference_models/basic_api.py +++ b/modeling/inference_models/basic_api.py @@ -15,11 +15,13 @@ from modeling.inference_model import ( ) +model_backend_name = "KoboldAI Old Colab Method" + class BasicAPIException(Exception): """To be used for errors when using the Basic API as an interface.""" -class model_loader(InferenceModel): +class model_backend(InferenceModel): def __init__(self) -> None: super().__init__() diff --git a/modeling/inference_models/generic_hf_torch.py b/modeling/inference_models/generic_hf_torch.py index c228e2ee..f7a00f45 100644 --- a/modeling/inference_models/generic_hf_torch.py +++ b/modeling/inference_models/generic_hf_torch.py @@ -22,8 +22,9 @@ except ModuleNotFoundError as e: from modeling.inference_models.parents.hf_torch import HFTorchInferenceModel +model_backend_name = "Huggingface" -class model_loader(HFTorchInferenceModel): +class model_backend(HFTorchInferenceModel): def _initialize_model(self): return diff --git a/modeling/inference_models/gooseai.py b/modeling/inference_models/gooseai.py index 08d8ea06..9d6e8771 100644 --- a/modeling/inference_models/gooseai.py +++ b/modeling/inference_models/gooseai.py @@ -11,16 +11,17 @@ from modeling.inference_model import ( InferenceModel, ) -from modeling.inference_models.parents.openai_gooseai import model_loader as openai_gooseai_model_loader +from modeling.inference_models.parents.openai_gooseai import model_backend as openai_gooseai_model_backend +model_backend_name = "GooseAI" class OpenAIAPIError(Exception): def __init__(self, error_type: str, error_message) -> None: super().__init__(f"{error_type}: {error_message}") -class model_loader(openai_gooseai_model_loader): +class model_backend(openai_gooseai_model_backend): """InferenceModel for interfacing with OpenAI's generation API.""" def __init__(self): diff --git a/modeling/inference_models/hf_mtj.py b/modeling/inference_models/hf_mtj.py index 4e82d348..6351eca2 100644 --- a/modeling/inference_models/hf_mtj.py +++ b/modeling/inference_models/hf_mtj.py @@ -19,10 +19,10 @@ from modeling.inference_model import ( from modeling.inference_models.parents.hf import HFInferenceModel from modeling.tokenizer import GenericTokenizer +model_backend_name = "Huggingface MTJ" - -class model_loader(HFInferenceModel): +class model_backend(HFInferenceModel): def __init__( self, #model_name: str, diff --git a/modeling/inference_models/horde.py b/modeling/inference_models/horde.py index bd457197..6c880bbe 100644 --- a/modeling/inference_models/horde.py +++ b/modeling/inference_models/horde.py @@ -16,12 +16,13 @@ from modeling.inference_model import ( ModelCapabilities, ) +model_backend_name = "Horde" class HordeException(Exception): """To be used for errors on server side of the Horde.""" -class model_loader(InferenceModel): +class model_backend(InferenceModel): def __init__(self) -> None: super().__init__() self.url = "https://horde.koboldai.net" diff --git a/modeling/inference_models/openai.py b/modeling/inference_models/openai.py index cad2a7f2..19a7d1e6 100644 --- a/modeling/inference_models/openai.py +++ b/modeling/inference_models/openai.py @@ -11,16 +11,16 @@ from modeling.inference_model import ( InferenceModel, ) -from modeling.inference_models.parents.openai_gooseai import model_loader as openai_gooseai_model_loader - +from modeling.inference_models.parents.openai_gooseai import model_backend as openai_gooseai_model_backend +model_backend_name = "OpenAI" class OpenAIAPIError(Exception): def __init__(self, error_type: str, error_message) -> None: super().__init__(f"{error_type}: {error_message}") -class model_loader(openai_gooseai_model_loader): +class model_backend(openai_gooseai_model_backend): """InferenceModel for interfacing with OpenAI's generation API.""" def __init__(self): diff --git a/modeling/inference_models/parents/hf.py b/modeling/inference_models/parents/hf.py index ba291c3f..69549bd5 100644 --- a/modeling/inference_models/parents/hf.py +++ b/modeling/inference_models/parents/hf.py @@ -1,7 +1,7 @@ import os from typing import Optional from transformers import AutoConfig - +import warnings import utils import koboldai_settings from logger import logger @@ -43,7 +43,7 @@ class HFInferenceModel(InferenceModel): else: self.model_config = AutoConfig.from_pretrained(model_name, revision=utils.koboldai_vars.revision, cache_dir="cache") layer_count = self.model_config["n_layer"] if isinstance(self.model_config, dict) else self.model_config.num_layers if hasattr(self.model_config, "num_layers") else self.model_config.n_layer if hasattr(self.model_config, "n_layer") else self.model_config.num_hidden_layers if hasattr(self.model_config, 'num_hidden_layers') else None - if layer_count is not None and layer_count >= 0: + if layer_count is not None and layer_count >= 0 and not self.nobreakmodel: if os.path.exists("settings/{}.breakmodel".format(model_name.replace("/", "_"))): with open("settings/{}.breakmodel".format(model_name.replace("/", "_")), "r") as file: data = [x for x in file.read().split("\n")[:2] if x != ''] @@ -128,15 +128,17 @@ class HFInferenceModel(InferenceModel): def set_input_parameters(self, parameters): if self.hf_torch: import breakmodel - gpu_count = torch.cuda.device_count() - layers = [] - for i in range(gpu_count): - layers.append(int(parameters["{}_Layers".format(i)]) if parameters["{}_Layers".format(i)].isnumeric() else None) - self.cpu_layers = parameters['CPU_Layers'] if 'CPU_Layers' in parameters else None - self.layers = layers - self.disk_layers = int(parameters['Disk_Layers']) if 'Disk_Layers' in parameters and parameters['Disk_Layers'].isnumeric() else 0 - breakmodel.gpu_blocks = layers - breakmodel.disk_blocks = self.disk_layers + layer_count = self.model_config["n_layer"] if isinstance(self.model_config, dict) else self.model_config.num_layers if hasattr(self.model_config, "num_layers") else self.model_config.n_layer if hasattr(self.model_config, "n_layer") else self.model_config.num_hidden_layers if hasattr(self.model_config, 'num_hidden_layers') else None + if layer_count is not None and layer_count >= 0 and not self.nobreakmodel: + gpu_count = torch.cuda.device_count() + layers = [] + for i in range(gpu_count): + layers.append(int(parameters["{}_Layers".format(i)]) if parameters["{}_Layers".format(i)].isnumeric() else None) + self.cpu_layers = parameters['CPU_Layers'] if 'CPU_Layers' in parameters else None + self.layers = layers + self.disk_layers = int(parameters['Disk_Layers']) if 'Disk_Layers' in parameters and parameters['Disk_Layers'].isnumeric() else 0 + breakmodel.gpu_blocks = layers + breakmodel.disk_blocks = self.disk_layers self.usegpu = parameters['use_gpu'] if 'use_gpu' in parameters else None self.model_type = self.get_model_type() self.breakmodel = ((self.model_type != 'gpt2') or self.model_type in ("gpt_neo", "gptj", "xglm", "opt")) and not self.nobreakmodel diff --git a/modeling/inference_models/parents/openai_gooseai.py b/modeling/inference_models/parents/openai_gooseai.py index 621ccbad..871ea5ce 100644 --- a/modeling/inference_models/parents/openai_gooseai.py +++ b/modeling/inference_models/parents/openai_gooseai.py @@ -18,7 +18,7 @@ class OpenAIAPIError(Exception): super().__init__(f"{error_type}: {error_message}") -class model_loader(InferenceModel): +class model_backend(InferenceModel): """InferenceModel for interfacing with OpenAI's generation API.""" def __init__(self): diff --git a/modeling/inference_models/readonly.py b/modeling/inference_models/readonly.py index c642c05a..92531af4 100644 --- a/modeling/inference_models/readonly.py +++ b/modeling/inference_models/readonly.py @@ -14,12 +14,13 @@ from modeling.inference_model import ( ModelCapabilities, ) +model_backend_name = "Read Only" class BasicAPIException(Exception): """To be used for errors when using the Basic API as an interface.""" -class model_loader(InferenceModel): +class model_backend(InferenceModel): def __init__(self) -> None: super().__init__() diff --git a/modeling/inference_models/rwkv.py b/modeling/inference_models/rwkv.py index d14d8c81..fa6497b7 100644 --- a/modeling/inference_models/rwkv.py +++ b/modeling/inference_models/rwkv.py @@ -55,7 +55,10 @@ MODEL_FILES = { } -class model_loader(InferenceModel): +model_backend_name = "RWKV" + + +class model_backend(InferenceModel): def __init__( self, #model_name: str, diff --git a/static/koboldai.js b/static/koboldai.js index de3ab324..905403c1 100644 --- a/static/koboldai.js +++ b/static/koboldai.js @@ -1645,8 +1645,85 @@ function show_model_menu(data) { } +function model_settings_checker() { + //get check value: + missing_element = false; + if (this.check_data != null) { + if ('sum' in this.check_data) { + check_value = 0 + for (const temp of this.check_data['sum']) { + if (document.getElementById(this.id.split("|")[0] +"|" + temp + "_value")) { + check_value += parseInt(document.getElementById(this.id.split("|")[0] +"|" + temp + "_value").value); + } else { + missing_element = true; + } + } + } else { + check_value = this.value + } + if (this.check_data['check'] == "=") { + valid = (check_value == this.check_data['value']); + } else if (this.check_data['check'] == "!=") { + valid = (check_value != this.check_data['value']); + } else if (this.check_data['check'] == ">=") { + valid = (check_value >= this.check_data['value']); + } else if (this.check_data['check'] == "<=") { + valid = (check_value <= this.check_data['value']); + } else if (this.check_data['check'] == "<=") { + valid = (check_value > this.check_data['value']); + } else if (this.check_data['check'] == "<=") { + valid = (check_value < this.check_data['value']); + } + if (valid || missing_element) { + //if we are supposed to refresh when this value changes we'll resubmit + if ((this.getAttribute("refresh_model_inputs") == "true") && !missing_element && !this.noresubmit) { + console.log("resubmit"); + } + if ('sum' in this.check_data) { + for (const temp of this.check_data['sum']) { + if (document.getElementById(this.id.split("|")[0] +"|" + temp + "_value")) { + document.getElementById(this.id.split("|")[0] +"|" + temp + "_value").closest(".setting_container_model").classList.remove('input_error'); + document.getElementById(this.id.split("|")[0] +"|" + temp + "_value").closest(".setting_container_model").removeAttribute("tooltip"); + } + } + } else { + this.closest(".setting_container_model").classList.remove('input_error'); + this.closest(".setting_container_model").removeAttribute("tooltip"); + } + } else { + if ('sum' in this.check_data) { + for (const temp of this.check_data['sum']) { + if (document.getElementById(this.id.split("|")[0] +"|" + temp + "_value")) { + document.getElementById(this.id.split("|")[0] +"|" + temp + "_value").closest(".setting_container_model").classList.add('input_error'); + document.getElementById(this.id.split("|")[0] +"|" + temp + "_value").closest(".setting_container_model").setAttribute("tooltip", this.check_data['check_message']); + } + } + } else { + this.closest(".setting_container_model").classList.add('input_error'); + this.closest(".setting_container_model").setAttribute("tooltip", this.check_data['check_message']); + } + } + } + var accept = document.getElementById("btn_loadmodelaccept"); + ok_to_load = true; + for (const item of document.getElementsByClassName("input_error")) { + if (item.classList.contains("input_error") && !item.closest(".model_plugin_settings_area").classList.contains("hidden")) { + ok_to_load = false; + break; + } + } + + if (ok_to_load) { + accept.classList.remove("disabled"); + accept.disabled = false; + } else { + accept.classList.add("disabled"); + accept.disabled = true; + } +} -function selected_model_info(data) { +function selected_model_info(sent_data) { + const data = sent_data['model_backends']; //clear out the loadmodelsettings var loadmodelsettings = document.getElementById('loadmodelsettings') while (loadmodelsettings.firstChild) { @@ -1667,7 +1744,10 @@ function selected_model_info(data) { for (const area of document.getElementsByClassName("model_plugin_settings_area")) { area.classList.add("hidden"); } - document.getElementById(this.value + "_settings_area").classList.remove("hidden"); + if (document.getElementById(this.value + "_settings_area")) { + document.getElementById(this.value + "_settings_area").classList.remove("hidden"); + } + model_settings_checker() } //create the content for (const [loader, items] of Object.entries(data)) { @@ -1679,7 +1759,11 @@ function selected_model_info(data) { modelpluginoption.innerText = loader; modelpluginoption.value = loader; modelplugin.append(modelpluginoption); + if (loader == sent_data['preselected']) { + modelplugin.value = sent_data['preselected']; + } + //create the user input for each requested input for (item of items) { let new_setting = document.getElementById('blank_model_settings').cloneNode(true); new_setting.id = loader; @@ -1687,73 +1771,7 @@ function selected_model_info(data) { new_setting.querySelector('#blank_model_settings_label').innerText = item['label']; new_setting.querySelector('#blank_model_settings_tooltip').setAttribute("tooltip", item['tooltip']); - onchange_event = function () { - //get check value: - if ('sum' in this.check_data) { - check_value = 0 - for (const temp of this.check_data['sum']) { - if (document.getElementById(this.id.split("|")[0] +"|" + temp + "_value")) { - check_value += parseInt(document.getElementById(this.id.split("|")[0] +"|" + temp + "_value").value); - } - } - } else { - check_value = this.value - } - if (this.check_data['check'] == "=") { - valid = (check_value == this.check_data['value']); - } else if (this.check_data['check'] == "!=") { - valid = (check_value != this.check_data['value']); - } else if (this.check_data['check'] == ">=") { - valid = (check_value >= this.check_data['value']); - } else if (this.check_data['check'] == "<=") { - valid = (check_value <= this.check_data['value']); - } else if (this.check_data['check'] == "<=") { - valid = (check_value > this.check_data['value']); - } else if (this.check_data['check'] == "<=") { - valid = (check_value < this.check_data['value']); - } - if (valid) { - //if we are supposed to refresh when this value changes we'll resubmit - if (this.getAttribute("refresh_model_inputs") == "true") { - console.log("resubmit"); - } - if ('sum' in this.check_data) { - for (const temp of this.check_data['sum']) { - if (document.getElementById(this.id.split("|")[0] +"|" + temp + "_value")) { - document.getElementById(this.id.split("|")[0] +"|" + temp + "_value").closest(".setting_container_model").classList.remove('input_error'); - document.getElementById(this.id.split("|")[0] +"|" + temp + "_value").closest(".setting_container_model").removeAttribute("tooltip"); - } - } - } else { - this.closest(".setting_container_model").classList.remove('input_error'); - this.closest(".setting_container_model").removeAttribute("tooltip"); - } - var accept = document.getElementById("btn_loadmodelaccept"); - if (document.getElementsByClassName("input_error").length) - accept.disabled = true; - } else { - if ('sum' in this.check_data) { - for (const temp of this.check_data['sum']) { - if (document.getElementById(this.id.split("|")[0] +"|" + temp + "_value")) { - document.getElementById(this.id.split("|")[0] +"|" + temp + "_value").closest(".setting_container_model").classList.add('input_error'); - document.getElementById(this.id.split("|")[0] +"|" + temp + "_value").closest(".setting_container_model").setAttribute("tooltip", this.check_data['check_message']); - } - } - } else { - this.closest(".setting_container_model").classList.add('input_error'); - this.closest(".setting_container_model").setAttribute("tooltip", this.check_data['check_message']); - } - } - var accept = document.getElementById("btn_loadmodelaccept"); - if (document.getElementsByClassName("input_error").length > 0) { - accept.classList.add("disabled"); - accept.disabled = true; - } else { - accept.classList.remove("disabled"); - accept.disabled = false; - } - - } + onchange_event = model_settings_checker; if (item['uitype'] == "slider") { var slider_number = new_setting.querySelector('#blank_model_settings_value_slider_number'); slider_number.value = item['default']; @@ -1764,6 +1782,7 @@ function selected_model_info(data) { slider.value = item['default']; slider.min = item['min']; slider.max = item['max']; + slider.setAttribute("data_type", item['unit']); slider.id = loader + "|" + item['id'] + "_value"; if ('check' in item) { slider.check_data = item['check']; @@ -1777,25 +1796,37 @@ function selected_model_info(data) { slider.setAttribute("refresh_model_inputs", item['refresh_model_inputs']); new_setting.querySelector('#blank_model_settings_min_label').innerText = item['min']; new_setting.querySelector('#blank_model_settings_max_label').innerText = item['max']; + slider.noresubmit = true; slider.onchange(); + slider.noresubmit = false; } else { - new_setting.querySelector('#blank_model_settings_slider').classList.add("hidden"); + new_setting.querySelector('#blank_model_settings_slider').remove(); } if (item['uitype'] == "toggle") { - var toggle = new_setting.querySelector('#blank_model_settings_toggle'); + toggle = document.createElement("input"); + toggle.type='checkbox'; + toggle.classList.add("setting_item_input"); + toggle.classList.add("blank_model_settings_input"); + toggle.classList.add("model_settings_input"); toggle.id = loader + "|" + item['id'] + "_value"; toggle.checked = item['default']; - toggle.onchange = onchange_event; + toggle.onclick = onchange_event; + toggle.setAttribute("data_type", item['unit']); toggle.setAttribute("refresh_model_inputs", item['refresh_model_inputs']); if ('check' in item) { toggle.check_data = item['check']; } else { toggle.check_data = null; } - toggle.onchange(); + new_setting.querySelector('#blank_model_settings_toggle').append(toggle); + setTimeout(function() { + $('#'+loader + "\\|" + item['id'] + "_value").bootstrapToggle({size: "mini", onstyle: "success", toggle: "toggle"}); + }, 200); + toggle.noresubmit = true; + toggle.onclick(); + toggle.noresubmit = false; } else { - new_setting.querySelector('#blank_model_settings_checkbox_container').classList.add("hidden"); - new_setting.querySelector('#blank_model_settings_toggle').classList.add("hidden"); + new_setting.querySelector('#blank_model_settings_toggle').remove(); } if (item['uitype'] == "dropdown") { var select_element = new_setting.querySelector('#blank_model_settings_dropdown'); @@ -1807,6 +1838,7 @@ function selected_model_info(data) { select_element.append(new_option); } select_element.value = item['default']; + select_element.setAttribute("data_type", item['unit']); select_element.onchange = onchange_event; select_element.setAttribute("refresh_model_inputs", item['refresh_model_inputs']); if ('check' in item) { @@ -1814,14 +1846,17 @@ function selected_model_info(data) { } else { select_element.check_data = null; } + select_element.noresubmit = true; select_element.onchange(); + select_element.noresubmit = false; } else { - new_setting.querySelector('#blank_model_settings_dropdown').classList.add("hidden"); + new_setting.querySelector('#blank_model_settings_dropdown').remove(); } if (item['uitype'] == "password") { var password_item = new_setting.querySelector('#blank_model_settings_password'); password_item.id = loader + "|" + item['id'] + "_value"; password_item.value = item['default']; + password_item.setAttribute("data_type", item['unit']); password_item.onchange = onchange_event; password_item.setAttribute("refresh_model_inputs", item['refresh_model_inputs']); if ('check' in item) { @@ -1829,24 +1864,29 @@ function selected_model_info(data) { } else { password_item.check_data = null; } + password_item.noresubmit = true; password_item.onchange(); + password_item.noresubmit = false; } else { - new_setting.querySelector('#blank_model_settings_password').classList.add("hidden"); + new_setting.querySelector('#blank_model_settings_password').remove(); } if (item['uitype'] == "text") { var text_item = new_setting.querySelector('#blank_model_settings_text'); text_item.id = loader + "|" + item['id'] + "_value"; text_item.value = item['default']; text_item.onchange = onchange_event; + text_item.setAttribute("data_type", item['unit']); text_item.setAttribute("refresh_model_inputs", item['refresh_model_inputs']); if ('check' in item) { text_item.check_data = item['check']; } else { text_item.check_data = null; } + text_item.noresubmit = true; text_item.onchange(); + text_item.noresubmit = false; } else { - new_setting.querySelector('#blank_model_settings_text').classList.add("hidden"); + new_setting.querySelector('#blank_model_settings_text').remove(); } model_area.append(new_setting); @@ -1891,7 +1931,15 @@ function load_model() { //get an object of all the input settings from the user data = {} for (const element of settings_area.querySelectorAll(".model_settings_input:not(.hidden)")) { - data[element.id.split("|")[1].replace("_value", "")] = element.value; + var element_data = element.value; + if (element.getAttribute("data_type") == "int") { + element_data = parseInt(element_data); + } else if (element.getAttribute("data_type") == "float") { + element_data = parseFloat(element_data); + } else if (element.getAttribute("data_type") == "bool") { + element_data = (element_data == 'on'); + } + data[element.id.split("|")[1].replace("_value", "")] = element_data; } data = {...data, ...selected_model_data}; diff --git a/templates/templates.html b/templates/templates.html index 49cd3e5b..49fa99f6 100644 --- a/templates/templates.html +++ b/templates/templates.html @@ -162,9 +162,8 @@ - - - + +