Merge pull request #75 from Ph0rk0z/patch-2

Empty cuda cache between inferences.
This commit is contained in:
Puyuan Peng 2024-04-06 10:55:24 -05:00 committed by GitHub
commit 2ae80ef87a
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -98,7 +98,9 @@ def inference_one_sample(model, model_args, phn2num, text_tokenizer, audio_token
gen_sample = audio_tokenizer.decode( gen_sample = audio_tokenizer.decode(
[(gen_frames, None)] [(gen_frames, None)]
) )
#Empty cuda cache between runs
if torch.cuda.is_available():
torch.cuda.empty_cache()
# return # return
return concat_sample, gen_sample return concat_sample, gen_sample