Compare commits

...

3 Commits

Author SHA1 Message Date
Forkoz 64da32134a
Merge 6dda1a4f32 into fd20265324 2024-04-26 14:19:46 +08:00
pyp_l40 fd20265324 fix number bug in whisperx alignment 2024-04-25 14:38:03 -05:00
Forkoz 6dda1a4f32
Float16 KV Cache in voicecraft.py 2024-04-05 17:52:28 +00:00
2 changed files with 6 additions and 4 deletions

View File

@ -74,6 +74,8 @@ class WhisperxModel:
def transcribe(self, audio_path):
segments = self.model.transcribe(audio_path, batch_size=8)["segments"]
for segment in segments:
segment['text'] = replace_numbers_with_words(segment['text'])
return self.align_model.align(segments, audio_path)
@ -177,7 +179,7 @@ def align(seed, transcript, audio_path):
if align_model is None:
raise gr.Error("Align model not loaded")
seed_everything(seed)
transcript = replace_numbers_with_words(transcript).replace(" ", " ").replace(" ", " ")
fragments = align_segments(transcript, audio_path)
segments = [{
"start": float(fragment["begin"]),

View File

@ -711,7 +711,7 @@ class VoiceCraft(
##################### silence repetition handling #####################
# prepare the cache placeholder
# n_layers, 2, bsz, num_heads, src_len, head_dim
past = torch.ones([self.args.num_decoder_layers, 2, x.shape[0]], device=x.device, dtype=torch.float32) if kvcache else None
past = torch.ones([self.args.num_decoder_layers, 2, x.shape[0]], device=x.device, dtype=torch.float16) if kvcache else None
# handle multi-span kv-cache
new_masked_span = False
@ -1011,7 +1011,7 @@ class VoiceCraft(
# prepare the cache placeholder
# n_layers, 2, bsz, num_heads, src_len, head_dim
past = torch.ones([self.args.num_decoder_layers, 2, x.shape[0]], device=x.device, dtype=torch.float32) if kvcache else None
past = torch.ones([self.args.num_decoder_layers, 2, x.shape[0]], device=x.device, dtype=torch.float16) if kvcache else None
# logging.info(f"number of decoder layers: {self.args.num_decoder_layers}")
# logging.info(f"number of decoder layers: {self.args.num_decoder_layers}")
# logging.info(f"number of decoder layers: {self.args.num_decoder_layers}")
@ -1261,7 +1261,7 @@ class VoiceCraft(
# prepare the cache placeholder
# n_layers, 2, bsz, num_heads, src_len, head_dim
past = torch.ones([self.args.num_decoder_layers, 2, x.shape[0]], device=x.device, dtype=torch.float32) if kvcache else None
past = torch.ones([self.args.num_decoder_layers, 2, x.shape[0]], device=x.device, dtype=torch.float16) if kvcache else None
# logging.info(f"number of decoder layers: {self.args.num_decoder_layers}")
# logging.info(f"number of decoder layers: {self.args.num_decoder_layers}")
# logging.info(f"number of decoder layers: {self.args.num_decoder_layers}")