Compare commits

...

3 Commits

Author SHA1 Message Date
Forkoz acecbe7ceb
Merge 6dda1a4f32 into 77d1d5a69c 2024-04-30 14:00:34 +08:00
pyp_l40 77d1d5a69c small fix 2024-04-29 08:47:32 -05:00
Forkoz 6dda1a4f32
Float16 KV Cache in voicecraft.py 2024-04-05 17:52:28 +00:00
2 changed files with 3 additions and 4 deletions

View File

@ -711,7 +711,7 @@ class VoiceCraft(
##################### silence repetition handling #####################
# prepare the cache placeholder
# n_layers, 2, bsz, num_heads, src_len, head_dim
past = torch.ones([self.args.num_decoder_layers, 2, x.shape[0]], device=x.device, dtype=torch.float32) if kvcache else None
past = torch.ones([self.args.num_decoder_layers, 2, x.shape[0]], device=x.device, dtype=torch.float16) if kvcache else None
# handle multi-span kv-cache
new_masked_span = False
@ -1011,7 +1011,7 @@ class VoiceCraft(
# prepare the cache placeholder
# n_layers, 2, bsz, num_heads, src_len, head_dim
past = torch.ones([self.args.num_decoder_layers, 2, x.shape[0]], device=x.device, dtype=torch.float32) if kvcache else None
past = torch.ones([self.args.num_decoder_layers, 2, x.shape[0]], device=x.device, dtype=torch.float16) if kvcache else None
# logging.info(f"number of decoder layers: {self.args.num_decoder_layers}")
# logging.info(f"number of decoder layers: {self.args.num_decoder_layers}")
# logging.info(f"number of decoder layers: {self.args.num_decoder_layers}")
@ -1261,7 +1261,7 @@ class VoiceCraft(
# prepare the cache placeholder
# n_layers, 2, bsz, num_heads, src_len, head_dim
past = torch.ones([self.args.num_decoder_layers, 2, x.shape[0]], device=x.device, dtype=torch.float32) if kvcache else None
past = torch.ones([self.args.num_decoder_layers, 2, x.shape[0]], device=x.device, dtype=torch.float16) if kvcache else None
# logging.info(f"number of decoder layers: {self.args.num_decoder_layers}")
# logging.info(f"number of decoder layers: {self.args.num_decoder_layers}")
# logging.info(f"number of decoder layers: {self.args.num_decoder_layers}")

View File

@ -78,7 +78,6 @@ class Trainer:
if self.rank == 0 and self.progress['step'] % self.args.tb_write_every_n_steps == 0:
self.writer.add_scalar("train/lr", cur_lr, self.progress['step'])
self.wandb.log({"train/lr": cur_lr}, step=self.progress['step'])
all_inds = list(range(len(batch['y'])))
sum_losses = 0