diff --git a/aiserver.py b/aiserver.py index 480bbf37..1fe206af 100644 --- a/aiserver.py +++ b/aiserver.py @@ -2173,7 +2173,7 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal if not utils.HAS_ACCELERATE: disk_layers = None koboldai_vars.reset_model() - koboldai_vars.cluster_requested_models = online_model + koboldai_vars.cluster_requested_models = [online_model] if isinstance(online_model, str) else online_model koboldai_vars.noai = False if not use_breakmodel_args: set_aibusy(True) diff --git a/static/koboldai.js b/static/koboldai.js index 24202c12..7d85e6c7 100644 --- a/static/koboldai.js +++ b/static/koboldai.js @@ -1429,8 +1429,8 @@ function load_model() { for (item of document.getElementById("oaimodel").selectedOptions) { selected_models.push(item.value); } - if (selected_models == []) { - selected_models = ""; + if (selected_models == ['']) { + selected_models = []; } else if (selected_models.length == 1) { selected_models = selected_models[0]; }