Horde Fix

This commit is contained in:
ebolam
2022-09-15 08:36:35 -04:00
parent c222d2055b
commit 1ee87b5719
4 changed files with 35 additions and 7 deletions

View File

@@ -1181,6 +1181,7 @@ def get_model_info(model, directory=""):
default_url = None default_url = None
models_on_url = False models_on_url = False
multi_online_models = False multi_online_models = False
show_online_model_select=False
gpu_count = torch.cuda.device_count() gpu_count = torch.cuda.device_count()
gpu_names = [] gpu_names = []
for i in range(gpu_count): for i in range(gpu_count):
@@ -1189,6 +1190,7 @@ def get_model_info(model, directory=""):
url = True url = True
elif model == 'CLUSTER': elif model == 'CLUSTER':
models_on_url = True models_on_url = True
show_online_model_select=True
url = True url = True
key = True key = True
default_url = 'https://koboldai.net' default_url = 'https://koboldai.net'
@@ -1206,6 +1208,7 @@ def get_model_info(model, directory=""):
default_url = js['oaiurl'] default_url = js['oaiurl']
get_cluster_models({'model': model, 'key': key_value, 'url': default_url}) get_cluster_models({'model': model, 'key': key_value, 'url': default_url})
elif model in [x[1] for x in model_menu['apilist']]: elif model in [x[1] for x in model_menu['apilist']]:
show_online_model_select=True
if path.exists("settings/{}.v2_settings".format(model)): if path.exists("settings/{}.v2_settings".format(model)):
with open("settings/{}.v2_settings".format(model), "r") as file: with open("settings/{}.v2_settings".format(model), "r") as file:
# Check if API key exists # Check if API key exists
@@ -1254,7 +1257,7 @@ def get_model_info(model, directory=""):
'gpu':gpu, 'layer_count':layer_count, 'breakmodel':breakmodel, 'multi_online_models': multi_online_models, 'default_url': default_url, 'gpu':gpu, 'layer_count':layer_count, 'breakmodel':breakmodel, 'multi_online_models': multi_online_models, 'default_url': default_url,
'disk_break_value': disk_blocks, 'disk_break': utils.HAS_ACCELERATE, 'disk_break_value': disk_blocks, 'disk_break': utils.HAS_ACCELERATE,
'break_values': break_values, 'gpu_count': gpu_count, 'break_values': break_values, 'gpu_count': gpu_count,
'url': url, 'gpu_names': gpu_names, 'models_on_url': models_on_url}, broadcast=False, room="UI_2") 'url': url, 'gpu_names': gpu_names, 'models_on_url': models_on_url, 'show_online_model_select': show_online_model_select}, broadcast=False, room="UI_2")
@@ -1927,6 +1930,7 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal
if not utils.HAS_ACCELERATE: if not utils.HAS_ACCELERATE:
disk_layers = None disk_layers = None
koboldai_vars.reset_model() koboldai_vars.reset_model()
koboldai_vars.cluster_requested_models = online_model
koboldai_vars.noai = False koboldai_vars.noai = False
if not use_breakmodel_args: if not use_breakmodel_args:
set_aibusy(True) set_aibusy(True)
@@ -1990,7 +1994,7 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal
koboldai_vars.configname = f"{koboldai_vars.model}_{online_model.replace('/', '_')}" koboldai_vars.configname = f"{koboldai_vars.model}_{online_model.replace('/', '_')}"
if path.exists(get_config_filename()): if path.exists(get_config_filename()):
changed=False changed=False
with open("settings/{}.v2_settings".format(koboldai_vars.model), "r") as file: with open(get_config_filename(), "r") as file:
# Check if API key exists # Check if API key exists
js = json.load(file) js = json.load(file)
if 'online_model' in js: if 'online_model' in js:

View File

@@ -439,7 +439,7 @@ class model_settings(settings):
self.selected_preset = "" self.selected_preset = ""
self.uid_presets = [] self.uid_presets = []
self.default_preset = {} self.default_preset = {}
cluster_requested_models = [] # The models which we allow to generate during cluster mode self.cluster_requested_models = [] # The models which we allow to generate during cluster mode
#dummy class to eat the tqdm output #dummy class to eat the tqdm output

View File

@@ -2290,6 +2290,7 @@ h2 .material-icons-outlined {
} }
} }
.horde_trigger[model_model="ReadOnly"] { .horde_trigger[model_model="ReadOnly"],
.horde_trigger[model_model="CLUSTER"] {
display: none; display: none;
} }

View File

@@ -1062,6 +1062,7 @@ function show_model_menu(data) {
document.getElementById("modelurl").classList.add("hidden"); document.getElementById("modelurl").classList.add("hidden");
document.getElementById("use_gpu_div").classList.add("hidden"); document.getElementById("use_gpu_div").classList.add("hidden");
document.getElementById("modellayers").classList.add("hidden"); document.getElementById("modellayers").classList.add("hidden");
document.getElementById("oaimodel").classList.add("hidden");
var model_layer_bars = document.getElementById('model_layer_bars'); var model_layer_bars = document.getElementById('model_layer_bars');
while (model_layer_bars.firstChild) { while (model_layer_bars.firstChild) {
model_layer_bars.removeChild(model_layer_bars.firstChild); model_layer_bars.removeChild(model_layer_bars.firstChild);
@@ -1169,15 +1170,27 @@ function selected_model_info(data) {
document.getElementById("modelurl").classList.add("hidden"); document.getElementById("modelurl").classList.add("hidden");
} }
//default URL loading
if (data.default_url != null) {
document.getElementById("modelurl").value = data.default_url;
}
//change model loading on url if needed //change model loading on url if needed
if (data.models_on_url) { if (data.models_on_url) {
document.getElementById("modelurl").onchange = function () {socket.emit('get_cluster_models', {'key': document.getElementById("modelkey").value, 'url': this.value});}; document.getElementById("modelurl").onchange = function () {socket.emit('get_cluster_models', {'model': document.getElementById('btn_loadmodelaccept').getAttribute('selected_model'), 'key': document.getElementById("modelkey").value, 'url': this.value});};
document.getElementById("modelkey").onchange = function () {socket.emit('get_cluster_models', {'key': this.value, 'url': document.getElementById("modelurl").value});}; document.getElementById("modelkey").onchange = function () {socket.emit('get_cluster_models', {'model': document.getElementById('btn_loadmodelaccept').getAttribute('selected_model'), 'key': this.value, 'url': document.getElementById("modelurl").value});};
} else { } else {
document.getElementById("modelkey").ochange = function () {socket.emit('OAI_Key_Update', {'model': document.getElementById('btn_loadmodelaccept').getAttribute('selected_model'), 'key': this.value});}; document.getElementById("modelkey").ochange = function () {socket.emit('OAI_Key_Update', {'model': document.getElementById('btn_loadmodelaccept').getAttribute('selected_model'), 'key': this.value});};
document.getElementById("modelurl").ochange = null; document.getElementById("modelurl").ochange = null;
} }
//show model select for APIs
if (data.show_online_model_select) {
document.getElementById("oaimodel").classList.remove("hidden");
} else {
document.getElementById("oaimodel").classList.add("hidden");
}
//Multiple Model Select? //Multiple Model Select?
if (data.multi_online_models) { if (data.multi_online_models) {
document.getElementById("oaimodel").setAttribute("multiple", ""); document.getElementById("oaimodel").setAttribute("multiple", "");
@@ -1372,10 +1385,20 @@ function load_model() {
var path = ""; var path = "";
} }
let selected_models = [];
for (item of document.getElementById("oaimodel").selectedOptions) {
selected_models.push(item.value);
}
if (selected_models == []) {
selected_models = "";
} else if (selected_models.length == 1) {
selected_models = selected_models[0];
}
message = {'model': model, 'path': path, 'use_gpu': document.getElementById("use_gpu").checked, message = {'model': model, 'path': path, 'use_gpu': document.getElementById("use_gpu").checked,
'key': document.getElementById('modelkey').value, 'gpu_layers': gpu_layers.join(), 'key': document.getElementById('modelkey').value, 'gpu_layers': gpu_layers.join(),
'disk_layers': disk_layers, 'url': document.getElementById("modelurl").value, 'disk_layers': disk_layers, 'url': document.getElementById("modelurl").value,
'online_model': document.getElementById("oaimodel").value}; 'online_model': selected_models};
socket.emit("load_model", message); socket.emit("load_model", message);
document.getElementById("loadmodelcontainer").classList.add("hidden"); document.getElementById("loadmodelcontainer").classList.add("hidden");
} }