diff --git a/aiserver.py b/aiserver.py
index c5687dcf..20ec110e 100644
--- a/aiserver.py
+++ b/aiserver.py
@@ -2563,6 +2563,9 @@ def get_message(msg):
elif(msg['cmd'] == 'list_model'):
sendModelSelection(menu=msg['data'])
elif(msg['cmd'] == 'load_model'):
+ f = open("settings/" + vars.model.replace('/', '_') + ".breakmodel", "w")
+ f.write(msg['gpu_layers'])
+ f.close()
load_model(use_gpu=msg['use_gpu'], key=msg['key'], gpu_layers=msg['gpu_layers'])
elif(msg['cmd'] == 'selectmodel'):
if msg['data'] in ('NeoCustom', 'GPT2Custom') and 'path' not in msg:
@@ -2574,7 +2577,13 @@ def get_message(msg):
else:
layers = get_layer_count(vars.model)
if layers is not None:
- emit('from_server', {'cmd': 'show_layer_bar', 'data': layers, 'gpu_count': torch.cuda.device_count()}, broadcast=True)
+ if path.exists("settings/" + vars.model.replace('/', '_') + ".breakmodel"):
+ f = open("settings/" + vars.model.replace('/', '_') + ".breakmodel", "r")
+ breakmodel = f.read().split(",")
+ f.close()
+ else:
+ breakmodel = [layers for i in range(torch.cuda.device_count())]
+ emit('from_server', {'cmd': 'show_layer_bar', 'data': layers, 'gpu_count': torch.cuda.device_count(), 'breakmodel': breakmodel}, broadcast=True)
else:
emit('from_server', {'cmd': 'hide_layer_bar'}, broadcast=True)
elif(msg['cmd'] == 'loadselect'):
diff --git a/static/application.js b/static/application.js
index e4908bd2..bceff5e1 100644
--- a/static/application.js
+++ b/static/application.js
@@ -2410,7 +2410,7 @@ $(document).ready(function(){
$("#modellayers").removeClass("hidden");
html = "";
for (let i=0; i < msg.gpu_count; i++) {
- html += "GPU " + i + ": ";
+ html += "GPU " + i + ": ";
}
$("#model_layer_bars").html(html);
$("#gpu_layers_max").html(msg.data);