183 lines
7.1 KiB
Plaintext
183 lines
7.1 KiB
Plaintext
{
|
|
"cells": [
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 1,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"import os\n",
|
|
"os.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\" \n",
|
|
"os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\""
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 2,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stderr",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"/home/pyp/miniconda3/envs/voicecraft/lib/python3.9/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
|
|
" from .autonotebook import tqdm as notebook_tqdm\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"# import libs\n",
|
|
"import torch\n",
|
|
"import torchaudio\n",
|
|
"\n",
|
|
"from data.tokenizer import (\n",
|
|
" AudioTokenizer,\n",
|
|
" TextTokenizer,\n",
|
|
")\n",
|
|
"\n",
|
|
"\n"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 3,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# hyperparameters for inference\n",
|
|
"left_margin = 0.08\n",
|
|
"right_margin = 0.08\n",
|
|
"seed = 1\n",
|
|
"codec_audio_sr = 16000\n",
|
|
"codec_sr = 50\n",
|
|
"top_k = 0\n",
|
|
"top_p = 0.8\n",
|
|
"temperature = 1\n",
|
|
"kvcache = 0\n",
|
|
"silence_tokens=[1388,1898,131]\n",
|
|
"# if there are long silence in the generated audio, reduce the stop_repetition to 3, 2 or even 1\n",
|
|
"stop_repetition = 2\n",
|
|
"# if there are long silence or unnaturally strecthed words, increase sample_batch_size to 2, 3 or even 4\n",
|
|
"# what this will do to the model is that the model will run sample_batch_size examples of the same audio, and pick the one that's the shortest\n",
|
|
"sample_batch_size = 1\n",
|
|
"device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n",
|
|
"\n",
|
|
"# point to the original file or record the file\n",
|
|
"# write down the transcript for the file, or run whisper to get the transcript (and you can modify it if it's not accurate), save it as a .txt file\n",
|
|
"orig_audio = \"/home/pyp/VoiceCraft/demo/84_121550_000074_000000.wav\"\n",
|
|
"orig_transcript = \"But when I had approached so near to them The common object, which the sense deceives, Lost not by distance any of its marks,\"\n",
|
|
"\n",
|
|
"# move the audio and transcript to temp folder\n",
|
|
"temp_folder = \"/home/pyp/VoiceCraft/demo/temp\"\n",
|
|
"os.makedirs(temp_folder, exist_ok=True)\n",
|
|
"os.system(f\"cp {orig_audio} {temp_folder}\")\n",
|
|
"filename = os.path.splitext(orig_audio.split(\"/\")[-1])[0]\n",
|
|
"with open(f\"{temp_folder}/{filename}.txt\", \"w\") as f:\n",
|
|
" f.write(orig_transcript)\n",
|
|
"# run MFA to get the alignment\n",
|
|
"align_temp = f\"{temp_folder}/mfa_alignments\"\n",
|
|
"os.makedirs(align_temp, exist_ok=True)\n",
|
|
"os.system(f\"mfa align -j 1 --output_format csv {temp_folder} english_us_arpa english_us_arpa {align_temp}\")\n",
|
|
"# if the above fails, it could be because the audio is too hard for the alignment model, increasing the beam size usually solves the issue\n",
|
|
"# os.system(f\"mfa align -j 1 --output_format csv {temp_folder} english_us_arpa english_us_arpa {align_temp} --beam 1000 --retry_beam 2000\")\n",
|
|
"audio_fn = f\"{temp_folder}/{filename}.wav\"\n",
|
|
"transcript_fn = f\"{temp_folder}/{filename}.txt\"\n",
|
|
"align_fn = f\"{align_temp}/{filename}.csv\""
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 4,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stderr",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"Dora directory: /tmp/audiocraft_pyp\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"# take a look at demo/temp/mfa_alignment, decide which part of the audio to use as prompt\n",
|
|
"cut_off_sec = 3.01 # according to forced-alignment file, the word \"common\" stop as 3.01 sec\n",
|
|
"target_transcript = \"But when I had approached so near to them The common I cannot believe that the same model can also do text to speech synthesis as well!\"\n",
|
|
"info = torchaudio.info(audio_fn)\n",
|
|
"audio_dur = info.num_frames / info.sample_rate\n",
|
|
"\n",
|
|
"assert cut_off_sec < audio_dur, f\"cut_off_sec {cut_off_sec} is larger than the audio duration {audio_dur}\"\n",
|
|
"prompt_end_frame = int(cut_off_sec * info.sample_rate)\n",
|
|
"\n",
|
|
"\n",
|
|
"# # load model, tokenizer, and other necessary files\n",
|
|
"from models import voicecraft\n",
|
|
"ckpt_fn = \"/data/scratch/pyp/exp_pyp/VoiceCraft/gigaspeech/pretrained_830M/best_bundle.pth\"\n",
|
|
"encodec_fn = \"/data/scratch/pyp/exp_pyp/audiocraft/encodec/xps/6f79c6a8/checkpoint.th\"\n",
|
|
"ckpt = torch.load(ckpt_fn, map_location=\"cpu\")\n",
|
|
"model = voicecraft.VoiceCraft(ckpt[\"config\"])\n",
|
|
"model.load_state_dict(ckpt[\"model\"])\n",
|
|
"model.to(device)\n",
|
|
"model.eval()\n",
|
|
"\n",
|
|
"phn2num = ckpt['phn2num']\n",
|
|
"\n",
|
|
"text_tokenizer = TextTokenizer(backend=\"espeak\")\n",
|
|
"audio_tokenizer = AudioTokenizer(signature=encodec_fn) # will also put the neural codec model on gpu\n",
|
|
"\n",
|
|
"# run the model to get the output\n",
|
|
"decode_config = {'top_k': top_k, 'top_p': top_p, 'temperature': temperature, 'stop_repetition': stop_repetition, 'kvcache': kvcache, \"codec_audio_sr\": codec_audio_sr, \"codec_sr\": codec_sr, \"silence_tokens\": silence_tokens, \"sample_batch_size\": sample_batch_size}\n",
|
|
"from inference_tts_scale import inference_one_sample\n",
|
|
"concated_audio, gen_audio = inference_one_sample(model, ckpt[\"config\"], phn2num, text_tokenizer, audio_tokenizer, audio_fn, target_transcript, device, decode_config, prompt_end_frame)\n",
|
|
" \n",
|
|
"# save segments for comparison\n",
|
|
"concated_audio, gen_audio = concated_audio[0].cpu(), gen_audio[0].cpu()\n",
|
|
"# logging.info(f\"length of the resynthesize orig audio: {orig_audio.shape}\")\n",
|
|
"\n",
|
|
"# output_dir\n",
|
|
"output_dir = \"/home/pyp/VoiceCraft/demo/generated_tts\"\n",
|
|
"os.makedirs(output_dir, exist_ok=True)\n",
|
|
"\n",
|
|
"seg_save_fn_gen = f\"{output_dir}/{os.path.basename(audio_fn)[:-4]}_gen_seed{seed}.wav\"\n",
|
|
"seg_save_fn_concat = f\"{output_dir}/{os.path.basename(audio_fn)[:-4]}_concat_seed{seed}.wav\" \n",
|
|
"\n",
|
|
"torchaudio.save(seg_save_fn_gen, gen_audio, codec_audio_sr)\n",
|
|
"torchaudio.save(seg_save_fn_concat, concated_audio, codec_audio_sr)\n",
|
|
"\n",
|
|
"# if you get error importing T5 in transformers\n",
|
|
"# try \n",
|
|
"# pip uninstall Pillow\n",
|
|
"# pip install Pillow\n",
|
|
"# you are might get warnings like WARNING:phonemizer:words count mismatch on 300.0% of the lines (3/1), this can be safely ignored"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": []
|
|
}
|
|
],
|
|
"metadata": {
|
|
"kernelspec": {
|
|
"display_name": "voicecraft",
|
|
"language": "python",
|
|
"name": "python3"
|
|
},
|
|
"language_info": {
|
|
"codemirror_mode": {
|
|
"name": "ipython",
|
|
"version": 3
|
|
},
|
|
"file_extension": ".py",
|
|
"mimetype": "text/x-python",
|
|
"name": "python",
|
|
"nbconvert_exporter": "python",
|
|
"pygments_lexer": "ipython3",
|
|
"version": "3.9.18"
|
|
}
|
|
},
|
|
"nbformat": 4,
|
|
"nbformat_minor": 2
|
|
}
|