Merge pull request #167 from VE-FORBRYDERNE/accelerate
Fix base fairseq dense models when using accelerate with a GPU
This commit is contained in:
commit
74547b31d6
|
@ -642,6 +642,11 @@ def move_model_to_devices(model):
|
|||
import breakmodel
|
||||
|
||||
if(utils.HAS_ACCELERATE):
|
||||
import accelerate.utils
|
||||
for key, value in model.state_dict().items():
|
||||
target_dtype = torch.float32 if breakmodel.primary_device == "cpu" else torch.float16
|
||||
if(value.dtype is not target_dtype):
|
||||
accelerate.utils.set_module_tensor_to_device(model, key, target_dtype)
|
||||
disk_blocks = breakmodel.disk_blocks
|
||||
gpu_blocks = breakmodel.gpu_blocks
|
||||
ram_blocks = len(utils.layers_module_names) - sum(gpu_blocks)
|
||||
|
|
Loading…
Reference in New Issue