num_layers fixes
As requested by VE_FORBRYDERNE (Possibly implemented it on to many places, needs testing but since the other one is already broken I am committing it first so I can more easily test)
This commit is contained in:
parent
d7a2424d2d
commit
a2c82bbcc8
|
@ -212,7 +212,7 @@ def device_list(n_layers, primary=None, selected=None):
|
||||||
def device_config(model):
|
def device_config(model):
|
||||||
global breakmodel, generator
|
global breakmodel, generator
|
||||||
import breakmodel
|
import breakmodel
|
||||||
n_layers = model.config.num_layers or model.config.n_layer
|
n_layers = model.config.num_layers if hasattr(model.config, "num_layers") else model.config.n_layer
|
||||||
if(args.breakmodel_gpulayers is not None):
|
if(args.breakmodel_gpulayers is not None):
|
||||||
try:
|
try:
|
||||||
breakmodel.gpu_blocks = list(map(int, args.breakmodel_gpulayers.split(',')))
|
breakmodel.gpu_blocks = list(map(int, args.breakmodel_gpulayers.split(',')))
|
||||||
|
@ -278,7 +278,7 @@ def device_config(model):
|
||||||
# If all layers are on the same device, use the old GPU generation mode
|
# If all layers are on the same device, use the old GPU generation mode
|
||||||
while(len(breakmodel.gpu_blocks) and breakmodel.gpu_blocks[-1] == 0):
|
while(len(breakmodel.gpu_blocks) and breakmodel.gpu_blocks[-1] == 0):
|
||||||
breakmodel.gpu_blocks.pop()
|
breakmodel.gpu_blocks.pop()
|
||||||
if(len(breakmodel.gpu_blocks) and breakmodel.gpu_blocks[-1] in (-1, model.config.num_layers or model.config.n_layer)):
|
if(len(breakmodel.gpu_blocks) and breakmodel.gpu_blocks[-1] in (-1, model.config.num_layers if hasattr(model.config, "num_layers") else model.config.n_layer)):
|
||||||
vars.breakmodel = False
|
vars.breakmodel = False
|
||||||
vars.usegpu = True
|
vars.usegpu = True
|
||||||
model = model.half().to(len(breakmodel.gpu_blocks)-1)
|
model = model.half().to(len(breakmodel.gpu_blocks)-1)
|
||||||
|
|
Loading…
Reference in New Issue