Merge pull request #180 from ebolam/united

Fix for APIs and Custom Models not working in AI menu
This commit is contained in:
henk717 2022-08-12 00:52:29 +02:00 committed by GitHub
commit 4ed913ad4f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 52 additions and 46 deletions

View File

@ -543,7 +543,10 @@ def device_config(config):
global breakmodel, generator
import breakmodel
n_layers = utils.num_layers(config)
if(args.breakmodel_gpulayers is not None or (utils.HAS_ACCELERATE and args.breakmodel_disklayers is not None)):
if args.cpu:
breakmodel.gpu_blocks = [0]*n_layers
return
elif(args.breakmodel_gpulayers is not None or (utils.HAS_ACCELERATE and args.breakmodel_disklayers is not None)):
try:
if(not args.breakmodel_gpulayers):
breakmodel.gpu_blocks = []
@ -1209,6 +1212,8 @@ def get_model_info(model, directory=""):
url = True
elif not utils.HAS_ACCELERATE and not torch.cuda.is_available():
pass
elif args.cpu:
pass
else:
layer_count = get_layer_count(model, directory=directory)
if layer_count is None:
@ -1728,8 +1733,12 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal
time.sleep(0.1)
if gpu_layers is not None:
args.breakmodel_gpulayers = gpu_layers
elif initial_load:
gpu_layers = args.breakmodel_gpulayers
if disk_layers is not None:
args.breakmodel_disklayers = int(disk_layers)
elif initial_load:
disk_layers = args.breakmodel_disklayers
#We need to wipe out the existing model and refresh the cuda cache
model = None
@ -1838,41 +1847,19 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal
else:
print("{0}NOT FOUND!{1}".format(colors.YELLOW, colors.END))
if args.model:
if(vars.hascuda):
genselected = True
if args.cpu:
vars.usegpu = False
gpu_layers = None
disk_layers = None
vars.breakmodel = False
elif vars.hascuda:
if(vars.bmsupported):
vars.usegpu = False
vars.breakmodel = True
else:
vars.breakmodel = False
vars.usegpu = True
vars.breakmodel = utils.HAS_ACCELERATE
if(vars.bmsupported):
vars.usegpu = False
vars.breakmodel = True
if(args.cpu):
vars.usegpu = False
vars.breakmodel = utils.HAS_ACCELERATE
elif(vars.hascuda):
if(vars.bmsupported):
genselected = True
vars.usegpu = False
vars.breakmodel = True
else:
genselected = False
else:
genselected = False
if(vars.hascuda):
if(use_gpu):
if(vars.bmsupported):
vars.breakmodel = True
vars.usegpu = False
genselected = True
else:
vars.breakmodel = False
vars.usegpu = True
genselected = True
else:
vars.breakmodel = utils.HAS_ACCELERATE
vars.usegpu = False
genselected = True
# Ask for API key if InferKit was selected
if(vars.model == "InferKit"):
@ -2091,7 +2078,8 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal
# If we're using torch_lazy_loader, we need to get breakmodel config
# early so that it knows where to load the individual model tensors
if(utils.HAS_ACCELERATE or vars.lazy_load and vars.hascuda and vars.breakmodel):
if (utils.HAS_ACCELERATE or vars.lazy_load and vars.hascuda and vars.breakmodel) and not vars.nobreakmodel:
print(1)
device_config(model_config)
# Download model from Huggingface if it does not exist, otherwise load locally
@ -2222,6 +2210,7 @@ def load_model(use_gpu=True, gpu_layers=None, disk_layers=None, initial_load=Fal
elif(vars.breakmodel): # Use both RAM and VRAM (breakmodel)
vars.modeldim = get_hidden_size_from_model(model)
if(not vars.lazy_load):
print(2)
device_config(model.config)
move_model_to_devices(model)
elif(utils.HAS_ACCELERATE and __import__("breakmodel").disk_blocks > 0):
@ -3460,7 +3449,7 @@ def get_message(msg):
else:
filename = "settings/{}.breakmodel".format(vars.model.replace('/', '_'))
f = open(filename, "w")
f.write(msg['gpu_layers'] + '\n' + msg['disk_layers'])
f.write(str(msg['gpu_layers']) + '\n' + str(msg['disk_layers']))
f.close()
vars.colaburl = msg['url'] + "/request"
load_model(use_gpu=msg['use_gpu'], gpu_layers=msg['gpu_layers'], disk_layers=msg['disk_layers'], online_model=msg['online_model'])

View File

@ -1078,7 +1078,7 @@ function buildLoadModelList(ar, menu, breadcrumbs, showdelete) {
html = html + "<span class=\"loadlisticon loadmodellisticon-folder oi oi-folder allowed\" aria-hidden=\"true\"></span>"
} else {
//this is a model
html = html + "<div class=\"loadlistpadding\"></div>"
html = html + "<div class=\"loadlisticon oi oi-caret-right allowed\"></div>"
}
//now let's do the delete icon if applicable
@ -1096,6 +1096,7 @@ function buildLoadModelList(ar, menu, breadcrumbs, showdelete) {
</div>"
loadmodelcontent.append(html);
//If this is a menu
console.log(ar[i]);
if(ar[i][3]) {
$("#loadmodel"+i).off("click").on("click", (function () {
return function () {
@ -1105,15 +1106,27 @@ function buildLoadModelList(ar, menu, breadcrumbs, showdelete) {
})(i));
//Normal load
} else {
$("#loadmodel"+i).off("click").on("click", (function () {
return function () {
$("#use_gpu_div").addClass("hidden");
$("#modelkey").addClass("hidden");
$("#modellayers").addClass("hidden");
socket.send({'cmd': 'selectmodel', 'data': $(this).attr("name")});
highlightLoadLine($(this));
}
})(i));
if (['NeoCustom', 'GPT2Custom'].includes(menu)) {
$("#loadmodel"+i).off("click").on("click", (function () {
return function () {
$("#use_gpu_div").addClass("hidden");
$("#modelkey").addClass("hidden");
$("#modellayers").addClass("hidden");
socket.send({'cmd': 'selectmodel', 'data': $(this).attr("name"), 'path': $(this).attr("pretty_name")});
highlightLoadLine($(this));
}
})(i));
} else {
$("#loadmodel"+i).off("click").on("click", (function () {
return function () {
$("#use_gpu_div").addClass("hidden");
$("#modelkey").addClass("hidden");
$("#modellayers").addClass("hidden");
socket.send({'cmd': 'selectmodel', 'data': $(this).attr("name")});
highlightLoadLine($(this));
}
})(i));
}
}
}
}
@ -2841,6 +2854,8 @@ $(document).ready(function(){
if (msg.key) {
$("#modelkey").removeClass("hidden");
$("#modelkey")[0].value = msg.key_value;
//if we're in the API list, disable to load button until the model is selected (after the API Key is entered)
disableButtons([load_model_accept]);
} else {
$("#modelkey").addClass("hidden");
@ -2878,6 +2893,7 @@ $(document).ready(function(){
}
} else if(msg.cmd == 'oai_engines') {
$("#oaimodel").removeClass("hidden")
enableButtons([load_model_accept]);
selected_item = 0;
length = $("#oaimodel")[0].options.length;
for (let i = 0; i < length; i++) {
@ -2914,6 +2930,7 @@ $(document).ready(function(){
opt.innerHTML = engine[1];
$("#oaimodel")[0].appendChild(opt);
}
enableButtons([load_model_accept]);
}
});