Fix for lazy loading

This commit is contained in:
ebolam 2022-06-06 14:27:47 -04:00
parent 60b70bdf8a
commit d9480ec439
2 changed files with 9 additions and 1 deletions

View File

@ -350,8 +350,10 @@ def sendModelSelection(menu="mainmenu"):
if menu in ('NeoCustom', 'GPT2Custom'): if menu in ('NeoCustom', 'GPT2Custom'):
menu_list = [[folder, menu, "", False] for folder in next(os.walk('./models'))[1]] menu_list = [[folder, menu, "", False] for folder in next(os.walk('./models'))[1]]
menu_list.append(["Return to Main Menu", "mainmenu", "", True]) menu_list.append(["Return to Main Menu", "mainmenu", "", True])
emit('from_server', {'cmd': 'hide_layer_bar'}, broadcast=True)
emit('from_server', {'cmd': 'show_model_menu', 'data': menu_list, 'menu': 'custom'}, broadcast=True) emit('from_server', {'cmd': 'show_model_menu', 'data': menu_list, 'menu': 'custom'}, broadcast=True)
time.sleep(0.2)
emit('from_server', {'cmd': 'hide_layer_bar'}, broadcast=True)
time.sleep(0.2)
else: else:
emit('from_server', {'cmd': 'show_model_menu', 'data': model_menu[menu], 'menu': menu}, broadcast=True) emit('from_server', {'cmd': 'show_model_menu', 'data': model_menu[menu], 'menu': menu}, broadcast=True)
@ -989,6 +991,7 @@ def load_model(use_gpu=True, key='', gpu_layers=None, initial_load=False):
global model global model
global generator global generator
global torch global torch
global model_config
vars.noai = False vars.noai = False
if not initial_load: if not initial_load:
set_aibusy(True) set_aibusy(True)
@ -1618,6 +1621,8 @@ def load_model(use_gpu=True, key='', gpu_layers=None, initial_load=False):
import shutil import shutil
shutil.move(vars.model.replace('/', '_'), "models/{}".format(vars.model.replace('/', '_'))) shutil.move(vars.model.replace('/', '_'), "models/{}".format(vars.model.replace('/', '_')))
print("\n", flush=True) print("\n", flush=True)
print("At lazy load section")
print(vars.lazy_load)
with maybe_use_float16(), torch_lazy_loader.use_lazy_torch_load(enable=vars.lazy_load, callback=get_lazy_load_callback(utils.num_layers(model_config)) if vars.lazy_load else None, dematerialized_modules=True): with maybe_use_float16(), torch_lazy_loader.use_lazy_torch_load(enable=vars.lazy_load, callback=get_lazy_load_callback(utils.num_layers(model_config)) if vars.lazy_load else None, dematerialized_modules=True):
if(vars.lazy_load): # torch_lazy_loader.py and low_cpu_mem_usage can't be used at the same time if(vars.lazy_load): # torch_lazy_loader.py and low_cpu_mem_usage can't be used at the same time
lowmem = {} lowmem = {}

View File

@ -2513,8 +2513,10 @@ $(document).ready(function(){
} }
if(msg.menu == 'apilist') { if(msg.menu == 'apilist') {
$("#modelkey").removeClass("hidden"); $("#modelkey").removeClass("hidden");
console.log("Should be showing key");
} else { } else {
$("#modelkey").addClass("hidden"); $("#modelkey").addClass("hidden");
console.log("Should be hiding key");
} }
buildLoadModelList(msg.data, msg.menu); buildLoadModelList(msg.data, msg.menu);
} else if(msg.cmd == 'show_layer_bar') { } else if(msg.cmd == 'show_layer_bar') {
@ -2529,6 +2531,7 @@ $(document).ready(function(){
$("#gpu_count")[0].value = msg.gpu_count; $("#gpu_count")[0].value = msg.gpu_count;
update_gpu_layers(); update_gpu_layers();
} else if(msg.cmd == 'hide_layer_bar') { } else if(msg.cmd == 'hide_layer_bar') {
console.log("Should be removing layer bar");
$("#modellayers").addClass("hidden"); $("#modellayers").addClass("hidden");
} else if(msg.cmd == 'check_enable_model_load') { } else if(msg.cmd == 'check_enable_model_load') {
//Check if it's safe to enable the load model button //Check if it's safe to enable the load model button