Compare commits

...

4 Commits

Author SHA1 Message Date
Forkoz f26822614b
Merge 6dda1a4f32 into da6d34e26e 2024-04-28 11:28:45 +05:30
Puyuan Peng da6d34e26e
Merge pull request #119 from Keeo/patch-1
Update huggingface-hub to version that exists
2024-04-26 20:46:12 -05:00
Martin Morávek 47f808df4c
Update huggingface-hub to version that exists
Latest version as of today is 0.22.2 [huggingface-hub](https://github.com/huggingface/huggingface_hub/releases).

Likely cause of this issue is manual edit in the environments.yml.
2024-04-27 01:39:42 +02:00
Forkoz 6dda1a4f32
Float16 KV Cache in voicecraft.py 2024-04-05 17:52:28 +00:00
2 changed files with 4 additions and 4 deletions

View File

@ -308,7 +308,7 @@ dependencies:
- h11==0.14.0
- httpcore==1.0.4
- httpx==0.27.0
- huggingface-hub==0.22.4
- huggingface-hub==0.22.2
- hydra-colorlog==1.2.0
- hydra-core==1.3.2
- ipython==8.12.3

View File

@ -711,7 +711,7 @@ class VoiceCraft(
##################### silence repetition handling #####################
# prepare the cache placeholder
# n_layers, 2, bsz, num_heads, src_len, head_dim
past = torch.ones([self.args.num_decoder_layers, 2, x.shape[0]], device=x.device, dtype=torch.float32) if kvcache else None
past = torch.ones([self.args.num_decoder_layers, 2, x.shape[0]], device=x.device, dtype=torch.float16) if kvcache else None
# handle multi-span kv-cache
new_masked_span = False
@ -1011,7 +1011,7 @@ class VoiceCraft(
# prepare the cache placeholder
# n_layers, 2, bsz, num_heads, src_len, head_dim
past = torch.ones([self.args.num_decoder_layers, 2, x.shape[0]], device=x.device, dtype=torch.float32) if kvcache else None
past = torch.ones([self.args.num_decoder_layers, 2, x.shape[0]], device=x.device, dtype=torch.float16) if kvcache else None
# logging.info(f"number of decoder layers: {self.args.num_decoder_layers}")
# logging.info(f"number of decoder layers: {self.args.num_decoder_layers}")
# logging.info(f"number of decoder layers: {self.args.num_decoder_layers}")
@ -1261,7 +1261,7 @@ class VoiceCraft(
# prepare the cache placeholder
# n_layers, 2, bsz, num_heads, src_len, head_dim
past = torch.ones([self.args.num_decoder_layers, 2, x.shape[0]], device=x.device, dtype=torch.float32) if kvcache else None
past = torch.ones([self.args.num_decoder_layers, 2, x.shape[0]], device=x.device, dtype=torch.float16) if kvcache else None
# logging.info(f"number of decoder layers: {self.args.num_decoder_layers}")
# logging.info(f"number of decoder layers: {self.args.num_decoder_layers}")
# logging.info(f"number of decoder layers: {self.args.num_decoder_layers}")