Merge pull request #220 from ebolam/united
Fix for loading models on CPU only that don't support breakmodel
This commit is contained in:
commit
d2ff32be32
|
@ -1516,6 +1516,7 @@ def get_model_info(model, directory=""):
|
||||||
layer_count = get_layer_count(model, directory=directory)
|
layer_count = get_layer_count(model, directory=directory)
|
||||||
if layer_count is None:
|
if layer_count is None:
|
||||||
breakmodel = False
|
breakmodel = False
|
||||||
|
gpu = True
|
||||||
else:
|
else:
|
||||||
breakmodel = True
|
breakmodel = True
|
||||||
if model in ["NeoCustom", "GPT2Custom"]:
|
if model in ["NeoCustom", "GPT2Custom"]:
|
||||||
|
|
Loading…
Reference in New Issue