Empty cuda cache between inferences

This commit is contained in:
Forkoz 2024-04-06 00:05:06 +00:00 committed by GitHub
parent 142772c3df
commit 1e79d9032e
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
1 changed files with 4 additions and 2 deletions

View File

@ -98,7 +98,9 @@ def inference_one_sample(model, model_args, phn2num, text_tokenizer, audio_token
gen_sample = audio_tokenizer.decode(
[(gen_frames, None)]
)
#Empty cuda cache between runs
if torch.cuda.is_available():
torch.cuda.empty_cache()
# return
return concat_sample, gen_sample
@ -187,4 +189,4 @@ if __name__ == "__main__":
seg_save_fn_concat = f"{args.output_dir}/concat_{new_audio_fn[:-4]}_{i}_seed{args.seed}.wav"
torchaudio.save(seg_save_fn_gen, gen_audio, args.codec_audio_sr)
torchaudio.save(seg_save_fn_concat, concated_audio, args.codec_audio_sr)
torchaudio.save(seg_save_fn_concat, concated_audio, args.codec_audio_sr)