VoiceCraft/inference_speech_editing.ipynb

304 lines
632 KiB
Plaintext
Raw Permalink Normal View History

2024-03-21 19:02:20 +01:00
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"os.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\" \n",
"os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"7\"\n",
"os.environ[\"USER\"] = \"YOUR_USERNAME\" # TODO change this to your username"
2024-03-21 19:02:20 +01:00
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/home/pyp/miniconda3/envs/voicecraft/lib/python3.9/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
" from .autonotebook import tqdm as notebook_tqdm\n"
]
}
],
"source": [
"# import libs\n",
"import torch\n",
"import torchaudio\n",
"import numpy as np\n",
"import random\n",
2024-04-18 21:38:55 +02:00
"from argparse import Namespace\n",
2024-03-21 19:02:20 +01:00
"\n",
"from data.tokenizer import (\n",
" AudioTokenizer,\n",
" TextTokenizer,\n",
")\n",
"\n",
"from models import voicecraft"
2024-03-21 19:02:20 +01:00
]
},
2024-03-30 20:45:26 +01:00
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# install MFA models and dictionaries if you haven't done so already\n",
"!source ~/.bashrc && \\\n",
" conda activate voicecraft && \\\n",
" mfa model download dictionary english_us_arpa && \\\n",
" mfa model download acoustic english_us_arpa"
]
},
2024-03-21 19:02:20 +01:00
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"# hyperparameters for inference\n",
"left_margin = 0.08\n",
"right_margin = 0.08\n",
"codec_audio_sr = 16000\n",
"codec_sr = 50\n",
"top_k = 0\n",
"top_p = 0.8\n",
"temperature = 1\n",
"kvcache = 0\n",
2024-03-29 00:21:30 +01:00
"# adjust the below three arguments if the generation is not as good\n",
"seed = 1 # random seed magic\n",
"silence_tokens = [1388,1898,131] # if there are long silence in the generated audio, reduce the stop_repetition to 3, 2 or even 1\n",
"stop_repetition = -1 # -1 means do not adjust prob of silence tokens. if there are long silence or unnaturally strecthed words, increase sample_batch_size to 2, 3 or even 4\n",
"# what this will do to the model is that the model will run sample_batch_size examples of the same audio, and pick the one that's the shortest\n",
"def seed_everything(seed):\n",
" os.environ['PYTHONHASHSEED'] = str(seed)\n",
" random.seed(seed)\n",
" np.random.seed(seed)\n",
" torch.manual_seed(seed)\n",
" torch.cuda.manual_seed(seed)\n",
" torch.backends.cudnn.benchmark = False\n",
" torch.backends.cudnn.deterministic = True\n",
"seed_everything(seed)\n",
2024-03-21 19:02:20 +01:00
"device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n",
2024-04-18 21:38:55 +02:00
"# load model, tokenizer, and other necessary files\n",
"voicecraft_name=\"giga330M.pth\" # or gigaHalfLibri330M_TTSEnhanced_max16s.pth, giga830M.pth\n",
"\n",
"# the new way of loading the model, with huggingface, recommended\n",
"from models import voicecraft\n",
"model = voicecraft.VoiceCraft.from_pretrained(f\"pyp1/VoiceCraft_{voicecraft_name.replace('.pth', '')}\")\n",
"phn2num = model.args.phn2num\n",
"config = vars(model.args)\n",
"model.to(device)\n",
"\n",
"# # the old way of loading the model\n",
"# from models import voicecraft\n",
"# filepath = hf_hub_download(repo_id=\"pyp1/VoiceCraft\", filename=voicecraft_name, repo_type=\"model\")\n",
"# ckpt = torch.load(filepath, map_location=\"cpu\")\n",
"# model = voicecraft.VoiceCraft(ckpt[\"config\"])\n",
"# model.load_state_dict(ckpt[\"model\"])\n",
"# config = vars(model.args)\n",
"# phn2num = ckpt[\"phn2num\"]\n",
"# model.to(device)\n",
"# model.eval()\n",
"\n",
"encodec_fn = \"./pretrained_models/encodec_4cb2048_giga.th\"\n",
"if not os.path.exists(encodec_fn):\n",
" os.system(f\"wget https://huggingface.co/pyp1/VoiceCraft/resolve/main/encodec_4cb2048_giga.th\")\n",
" os.system(f\"mv encodec_4cb2048_giga.th ./pretrained_models/encodec_4cb2048_giga.th\")\n",
"audio_tokenizer = AudioTokenizer(signature=encodec_fn) # will also put the neural codec model on gpu\n",
"\n",
"text_tokenizer = TextTokenizer(backend=\"espeak\")\n",
2024-03-21 19:02:20 +01:00
"\n",
"# point to the original file or record the file\n",
"# write down the transcript for the file, or run whisper to get the transcript (and you can modify it if it's not accurate), save it as a .txt file\n",
"orig_audio = \"./demo/84_121550_000074_000000.wav\"\n",
"orig_transcript = \"But when I had approached so near to them The common object, which the sense deceives, Lost not by distance any of its marks,\"\n",
"# move the audio and transcript to temp folder\n",
"temp_folder = \"./demo/temp\"\n",
"os.makedirs(temp_folder, exist_ok=True)\n",
"os.system(f\"cp {orig_audio} {temp_folder}\")\n",
"filename = os.path.splitext(orig_audio.split(\"/\")[-1])[0]\n",
"with open(f\"{temp_folder}/{filename}.txt\", \"w\") as f:\n",
" f.write(orig_transcript)\n",
"# run MFA to get the alignment\n",
"align_temp = f\"{temp_folder}/mfa_alignments\"\n",
"os.makedirs(align_temp, exist_ok=True)\n",
"os.system(f\"mfa align -j 1 --output_format csv {temp_folder} english_us_arpa english_us_arpa {align_temp}\")\n",
2024-03-21 19:02:20 +01:00
"# if it fail, it could be because the audio is too hard for the alignment model, increasing the beam size usually solves the issue\n",
"# os.system(f\"mfa align -j 1 --output_format csv {temp_folder} english_us_arpa english_us_arpa {align_temp} --beam 1000 --retry_beam 2000\")\n",
"audio_fn = f\"{temp_folder}/{filename}.wav\"\n",
"transcript_fn = f\"{temp_folder}/{filename}.txt\"\n",
"align_fn = f\"{align_temp}/{filename}.csv\"\n",
"\n"
]
},
{
"cell_type": "code",
2024-03-29 00:21:30 +01:00
"execution_count": 5,
2024-03-21 19:02:20 +01:00
"metadata": {},
"outputs": [
{
2024-03-29 00:21:30 +01:00
"name": "stdout",
2024-03-21 19:02:20 +01:00
"output_type": "stream",
"text": [
2024-03-29 00:21:30 +01:00
"original:\n"
2024-03-21 19:02:20 +01:00
]
2024-03-29 00:21:30 +01:00
},
{
"data": {
"text/html": [
"\n",
" <audio controls=\"controls\" >\n",
" <source src=\"data:audio/wav;base64,UklGRqTgAwBXQVZFZm10IBAAAAABAAEAgD4AAAB9AAACABAAZGF0YYDgAwDS/14AsACf/5X/ff+e/6H/Wv9ZACcAv/9Q/2H/FAArAEoALQDp/x8APAA3AAgAIgARAN3/3v/l//X/FQAQAAMA6P/m/9z/yv8TAOz/0//C/6j/CgAlABAA///h//3/DwDa/6r/q//r/w4A7//O/+n/LwBOAFQAMgD1/+3/EAD9/w0AIwAZAP7/IAAwABwAJQAjACcAJgAgADsASwAtADgAZQBpACsADgAKAAUAJwA+ADgANQBHAFkAWAAmACIARgBLAEEARAAbAB8ATABaAFQAMQAEAPH/2//c/1gAZAAwADcAJwDs/+7/MgAzABAA/v8hAPH/xv/n/wMAIQAPAAgA///k/87/0P8AAAAAAQDo/9//3P/L/+X/+v/2/87/5P/t////+//p//j/4f8AAAsACAAEAPv/6f/i//n/CgAQAAkAAgDy/+H/0//j//L//v8JAAkAAgD+/+T/zv/F/7j/vf/P/+n/6v/h//T/+P/R/+f/3P+x/5//m/+0/7D/yP+l/7f/uf+W/7L/wv/s/+X/y//K/87/4v/g/9f/3P/i//T/EwAhADAAKgDy//P/CAARAAgAAgAnACoACADr//3//P8QACYAPgBQADkAUgBuAGUAWABVAEsAYwB8AGEARgA5ACcALwAoAA8AKQAxACoAMgAkAAoA9v8YACcAHAAmABIAIQApACsALgAuADEAFwAyAD8ALAAsACgAHQAAABMAIAAbAB8AKgArAPf/7P/5/+7/6P/7/xAABgAJAAQA6//2/+z/2v/d//D/CQD9//L/6//a/9T/6P8BAOj/3v/h/+7/9P/4/woACADx//b/HgAGAOH/w//H/9r/2f/Z/9H/0f/M/8f/x//U/7z/l/+P/5H/hv+D/4n/mP+W/47/if+B/3z/d/97/43/mP+q/8n/uP+m/5n/iv+r/6b/of+y/7//1v/j/+D/0f+//8H/2//L/7X/zP/P/9n/8v/X/9T/4f/p//b/3v/S/9f/1v/G/+H/FQARABIAHAADAPb/9P/2//b/9/8EABMALwAuADUAKAAQAAsADgAfADwAQgAUABAAEgANAAgABgAGAAkADgD5/+b/6P/1//n/6v/n//n/AgDv/+z/6//n//L/6P/0//b/4f/e/9f/z//U/9n/5v/y//X//f/6//7/AQAAAAoAEgALAAsAGwAVABAAIgAuACoAQABYAFUAXABJAEIATQBYAFsAXgBlAGAAXgBiAFkAUQBIAFAAWwBaAGgAcAB5AJIApAC0ALIAtwDFAMUAyAC2AKIAqACrAKMAnAChAKsAsQCvAKMAmgCRAIQAhACTAKEAowCuAK8AnQCKAHUAegB8AH0AhACIAJIAnQCWAIgAhgB/AGsAbgBwAG4AdAB1AHkAfgB6AIMAeAB6AIUAhwCBAHoAfgB9AHcAhACJAIkAhwCDAIEAcAB3AHkAiQB9AGYAdQB7AFMATgBfAFMAPQA1AC0AGgAaAAYA2v/Q/8b/1f/z//n/AADz/93/u/+n/5T/h/+G/5D/pf+2/7r/sv+u/6L/m/+e/5L/kf+I/33/eP90/3b/Zf9p/1b/P/9E/0T/SP9H/1P/Vv9g/1X/PP8x/yP/Ff8D//D+2v7N/sb+wv7L/tH+2/7x/tf+1v7z/vz++/4G//3+Bf8Q/wX/HP8q/zX/Of82/0j/Uv93/5n/pf+9/8P/xv/H/+H/9f8AACMALAA3ACAABgAQACAANwBJAHAAnADWAPkA8wDtAOMA3QDEAJMAZQBWAEQAkACLAIMAtwDNAN4AdABGAHsAeABXAFoAzQChAFsAtgDDAH8AUACeAJEAGgD//zIAaABwADwA8v9kALX/0/6w/zMAKv8v/xIA3f/x/0UAEwAVAO//AAAyAEMAdQB//4//BADD/7f/+P9WANn/CgAbALUAuADg//kA0AEGAWQBXgJPAokB0wEOAoMBHQJUAs0BoQHmARACdQLpAX8CVgO7AokCgALVAr8ClALSAR4AFQCSACcA/P+0/0/9Z/71/Ab+Sv8eBJ/+yf0JCXv07AhgERn0lgHFJMUrGxm1GDAQ/RQ9H3QQJxJDF8AUUxBOEl8RzgYSDo8Gqv+wAAD3svo3ASH6IvKZ9arvqe+N9UTtaO208zL0MfSz+LD43vnCAKz/zf7IAgYCrwCuAiX/Av22/6n+APnt9yr4bPWV+bf3PPVT+iX30PSm+WT5uPct/Hv8wvtb/pX9hvsW/eP92/v++1n8LPuj/Lr8yvsx/PD7Kvzc/LP9wP10/lz+jP7h/pv/FwCm/8j+Xf61/hD/u/4P/hH9yfz0/UD++vwW/AX8pvvC+sX54fke+nH5efig+Or4Hvil94P3afe49tH1xvQ49BjzDPI78sDxcvC58BbxgvA18CPv0O3E7XjufO4T7gLuBu4k7qjt9+3i7uTue/BH8iLzh/MV9j35KvyG/5UBwwQvCAYLMg42EWkTmRYHGoUbfx1ZH1sf+x2xHIYaIxj3FfwSBBECDv0JLQayAp7+k/qe98X1R/R68zjymvCl8AHwwu5X7SXtaO0W747wNfEE8vPxMfJV8rfxevFC8uTypPMb9An0K/WS+Ib7t/1SADIElwnkDaMQwhM8F8MY1hnQGm4aThv3HEUdNxyfGsAYoBf7FRASSRAND3wMIAySDBUNyA1eD5YQFhFeERESWxM+EygTAhP3Ea4PIg0aCyII1QR2AXL90vni9aHwtu2G7LrqSul66ZjqeOoK6U/nn+eI5anh2eAz4uThjN943lTdmdpv1o7TSdLv0vbX4d8n6RD0DwHsDN8XlSBlJhUsJzBzMno1HDnpOeU5DTlzNM8rxyHzF+kOsgVZ/L33CfYl9Xn0rvRV9U73g/m4+qD9VgKPBsQK1Q9dFLcWSBcaF74VgRJtDqIKrgYfA17/RvxP+Wv2gvNB8bbvFO/q7wzx0fO092/7H/72AKYDcQY0CGYIcQgrCJ8Hawc5Br0DywBq/tH7c/mT9Uvy1fCm8JfwUPEe9EH4Vv0VAnMG7AloDmYUNRr3HQ0gNCMRJmYlfiBWGsYVng8vCLkB5/0y+xr5AviA+OH4zPn/+kX9TADCAmgEdAdfCv4JkQg6BwYE6P3T9j7v1ug04gnaA9MWzRbHH8A7usS2IrMUsRSyjre0vmXFr8sl3kD5wA/4G2QqHzw6RfhJtEVCQZg79zc3M20vZyfdF5YNmgNY83PeStKizBDM4Mwg0nbcaOtw+WoEBw8/FfAaWiGaKWkrTyq8LAkw1i3SJJUaMRHRByT9+/FZ6ezj9+Dn4P/juuXB5nLruvIw+NL6EgBPB2IPjBX0GfYcgB7bHUwaWBV0DY4FHABW/dz54fTM8UrxkvIm8Xju9u2u7o7wMPNK9/H5HP0jA1QIBAsrCrwIEQhLB2wEY/8P/OL60/qy+vL5ovml+Tb65/tm/HT8b/61BC8MGRASE1gXoxw8Hk4cvxnUFmQU0hCZDUoKoAZ+BXMGxAaSA0YAQwA9AXz/tPom97T2UPXG8f3tzeqb5krhztuk1A3N9Mb+w1/Df8Jtvqu+w8gc1CbZA+Qm/OsRWh8aKC80hT7OPzE72DZtM6YpgSLSIJ0a9grm/KP15ux74IzTTM6B0frWJNyS6HD4GwUjEEkdZyaKJ5IoQStBLTkp3iHkHeocVBYJCowAWPrs8IPn++KX4NPe/eB76O7wsvY+/HUEHA7QErITMhYyGWMZnhdHFmcS5AxHB14CsPsa8snpe+VX46Pf19zk3gnj++Zh6rbuZ/P49xT+RwQXCXIN7BKfGTIbTBiHFewVpRJQCNf/Ffwq+d/zRfDA7SrsP+w77U3w/PMp+Cv/pQkvEnsW/huLIUMioiCSHxgfth1iGw8YnxSOEEIKpwQJ/y75HfRy8cDw/e8V8W/zafVh9kD13/JJ7qTp+uJd2u7T1s2Gy7vLxsnAxl/F0Mh1yu7MadHY3MvwowSPF/An6DU3PRFDn0MwO5s
" Your browser does not support the audio element.\n",
" </audio>\n",
" "
],
"text/plain": [
"<IPython.lib.display.Audio object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"edited:\n"
]
},
{
"data": {
"text/html": [
"\n",
" <audio controls=\"controls\" >\n",
" <source src=\"data:audio/wav;base64,UklGRqRjAwBXQVZFZm10IBAAAAABAAEAgD4AAAB9AAACABAAZGF0YYBjAwDO/2cAwACW/4v/cf+V/5j/S/9hACsAuf9A/1L/FgAvAFEAMQDm/yIAQQA9AAgAJgASANr/2//i//T/FwASAAQA5v/k/9n/xf8VAOr/z/+8/6D/CwAoABEA///f//z/EADW/6L/o//q/w8A7f/J/+f/NABVAFsANwD0/+v/EgD9/w4AJgAbAP7/IwA1AB8AKAAnACoAKgAjAEAAUgAxAD0AbgBzAC8ADwALAAUAKgBEAD0AOgBOAGEAYAAqACUATABSAEcASgAeACIAUwBiAFwANgAEAPD/2P/Z/2EAbQA1ADwAKgDr/+z/NgA4ABEA/f8kAO//wP/k/wMAJAARAAkA///h/8n/zP8AAAAAAgDm/9z/2P/G/+P/+f/1/8n/4f/r////+//n//f/3v8AAAwACQAFAPr/5//g//n/CwASAAoAAgDx/9//z//g//H//v8KAAoAAgD+/+H/yv/A/7H/tv/L/+f/6P/e//P/+P/N/+X/2f+q/5b/kf+t/6n/wv+c/7D/sv+M/6r/vP/r/+P/xv/F/8n/3//d/9T/2f/f//P/FQAkADQALgDx//L/CQASAAkAAwAqAC4ACQDp//3/+/8SACoAQwBYAD4AWgB5AG4AYABdAFIAbACIAGoATQA+ACsAMwArABEALQA1AC0ANwAnAAsA9f8aACsAHgAqABMAJAAtAC8AMgAyADYAGQA3AEUAMAAwACwAIAAAABUAIwAeACIALgAvAPb/6v/4/+z/5f/6/xEABwAKAAQA6f/2/+r/1v/a/+7/CgD9//D/6f/W/8//5v8BAOb/2//e/+z/8//3/wsACQDv//b/IQAGAN7/vf/C/9b/1f/V/83/zf/I/8H/wf/Q/7b/jf+F/4f/e/94/37/jv+N/4T/fv91/2//av9v/4P/jv+i/8T/sv+d/5D/f/+j/57/mP+r/7n/0v/g/93/zf+5/7v/1//G/67/yP/L/9b/8f/T/9D/3v/m//X/2//O/9T/0v/B/97/FwASABMAHwADAPX/8//1//b/9v8EABUAMwAzADkALAASAAwADwAiAEEASAAWABIAFAAPAAkABwAHAAoADwD5/+T/5v/0//n/6P/k//j/AwDu/+r/6f/l//D/5f/z//X/3//b/9P/y//Q/9X/4//x//T//P/5//3/AQAAAAsAFAAMAAwAHQAXABIAJQAzAC0ARgBgAF0AZABPAEgAVABgAGQAZwBuAGkAZwBrAGIAWQBPAFgAZABjAHIAegCEAJ8AswDFAMMAyADXANcA2gDHALEAtwC7ALIAqgCvALsAwgC/ALIAqACfAJAAkACgALAAsgC+AL8ArACXAIAAhgCIAIkAkACVAJ8ArACkAJUAkgCLAHUAeAB6AHgAfwCAAIQAigCFAI8AhACFAJIAlACNAIUAiQCJAIIAkACVAJYAkwCPAI0AegCCAIQAlgCJAHAAgACHAFsAVQBnAFsAQwA6ADEAHAAdAAYA1//M/8H/0f/x//j/AADy/9n/tf+e/4r/fP97/4b/nP+v/7T/q/+n/5r/kf+V/4j/h/99/3D/a/9n/2n/Vv9b/0b/Lv8z/zP/N/82/0P/Rv9S/0X/Kv8e/w7///7s/tf+v/6w/qr+pf6u/rX+wP7Y/rv+u/7a/uT+4/7v/uX+7v76/u7+B/8W/yL/J/8k/zj/Qv9q/4//nf+3/73/wf/C/97/9P8AACcAMAA8ACMABgASACMAPABPAHoAqgDpABABCQEDAfgA8QDWAKAAbgBeAEoAngCYAI8AyADgAPIAfgBNAIYAgwBfAGIA4ACwAGQAxgDVAIsAWACtAJ8AHAD//zcAcgB6AEIA8P9tAK7/t/6p/zgAF/8b/xQA2v/w/0sAFQAXAO3/AAA3AEkAfwBz/4X/BAC+/7D/+P9eANb/CwAdAMUAyQDd/xAB+wEeAYQBlgKGAq0B/gE+AqYBTwKLAvgByAETAkACrgIWArkCpAP8AsUCugIYAwAD0QL9ASEAFwCfACsA+/+t/w/9Qf6u/Nf9Of9/BH/+lf3dCWzzvQn4EgHzuwEmKMsvaRv7Gq0R6hYcIvcR0hNmGakW0xH9E/cSbgddDykHov/AAC32NfpUAZf53PCk9CruKe6Y9IzrsuuS8hzzHPMH+AT4TvnUAKT/sf4KAzYCvwDtAhD/u/yv/4r+W/gv93L3c/T++PT2P/TO+VT2yPMR+cn49fbT+yn8Xvs0/lz9HfvS/LH9efuf+wP8u/pT/G38ZvvX+5D70PuS/H39i/1P/jX+af7H/pH/GgCe/6z+N/6X/vn+nf7i/cz8ffzE/Rf+s/y6+6f7QPtH+jL5UvmU+df4yPfy90P4ZPfh9rv2nvbe9eL0vvMj8+jxxPD38HHwBO9S77fvFe/C7pftJewX7Nzs4ext7FvsX+yA7PjrTuxQ7VLtDu8F8fPxYvIs9Z74z/t6/7oBMwXwCAoMgA/LEjEVrRhrHAweNSA6IjwivSBUH/YcWxr8F7sUlRJLD+gKvgbyAn7+E/rY9tT0NPNT8vTwMO8874juLO2g62rrs+uI7SPv2e+78Kjw7fAT8WfwJPD+8LDxgvIE8+/yLPTj9x37gf1ZAJQEeAorDysSkxVeGQkbNRxGHdwc0B2gH/Ufzx4RHQcbzBkAGLkTyBFvEKINPQ26DUgODA/HEBwSqBL2EroTIhUDFesUwRSeEx8RVg4fDOIIRwWZATX9Qfn09DfvCOy76sboNOdp56Hofeju5gvlYuUZ49/e/N123x/fkdxj2yTaKdee0nnPFs7LzkjU7twN5/jyJwEcDhAakyPsKSIwkzQWN2M6XD47PzY/Sz5EOdUv4iQmGkgQOAYD/Pv2HvUl9GrzpPNa9IH26/g8+mj9jQIpB8ILSRE8Fs0Yaxk5Gb0XNBTAD5wLSwdoA0//7vuy+Ir1XPLn7zfuhu1v7q3vsvLw9gT78/0NAfwDCQf1CCsJNwnsCFIIGQjMBhUE3gBF/m/72fid9AnxcO897y3v9u8G84v3GP1FAgoH1gq7D0YWnhy4IP4icCaRKdUoeiPCHMYXDhHvCOIBtf3B+nj4RvfQ9zr4OvmJ+gX9UwADA9AEIwhTC+kKWwnkB2UEt/379bTttuZ334zW4c5oyNvBQbrSswmwEqzUqeuq5rC0uAPA4cYJ26H4MxGKHkkupUGWS8RQHExBRxFBGz3rN8kzBisOGtYO7wMu8l7bF87qx0vHLcjpzTLZhOnW+NIEaRAzF2kdaiRsLWYvMi7YMHI0DDI0KAYdxRKJCOH8sfBE51jhHd4L3mzhUONv5I/pgvF491j6FAD7B8wQhhdWHJ8fTSGZILYcThewDhEGHwAX/Uv52/N+8PDvVvHI79zsTewX7SLvA/J+9mL52PxtAxgJBwwaC4kJzwj2B9QEVf+z+2r6Wvo1+mT5DfkQ+a35h/sR/CD8Sv4kBU0NlBHTFH0ZRR8EIegeHBztGEQWXhLZDjwLPAf/BQoHYwfmA0wASQBaAXD/N/pV9tr1VfR38FXs2uhF5Hjee9io0F7ItsF6vs292LxnuKu4sMMU0JTVceHL+5ATPCLKK/s4Q0SrRaFA4jsnOHktrCXWIw8d+Aud/K/0JeuV3XbPu8k7zTXT2dhr5r73kwWeEfof7iksK00sOy9pMQIt+iSjIJIfYRj1CpkA0/mJ70PlUOC03cfbI95S5ozv1/Xm+94EaA+KFIEVPBiDG7gbyhlTGBgUEw7zB5UCS/vT8L7nDOO04KrcnNnZ22DgruRl6B/tP/I79+f9rATsCa4OqRT6G7IdhxqBF/AXWxQUCdP/ufuJ+MHy0u4T7FfqbuqC69zu4vJw9xj/iArbE4wYkB6gJGkloiN5IvMhcSDmHUUahBYUEjQLFQXy/o34BvMc8FnvhO6270fycPR+9UP0qvGo7JbnT+Do1uHPOsmzxu7Gy8R+wfy/vsOKxT3IIc2d2WXvEAW5GZsr3DrWQjpJ1UmgQPo
" Your browser does not support the audio element.\n",
" </audio>\n",
" "
],
"text/plain": [
"<IPython.lib.display.Audio object>"
]
},
"metadata": {},
"output_type": "display_data"
2024-03-21 19:02:20 +01:00
}
],
"source": [
"editTypes_set = set(['substitution', 'insertion', 'deletion'])\n",
"# propose what do you want the target modified transcript to be\n",
"target_transcript = \"But when I saw the mirage of the lake in the distance, which the sense deceives, Lost not by distance any of its marks,\"\n",
"edit_type = \"substitution\"\n",
"assert edit_type in editTypes_set, f\"Invalid edit type {edit_type}. Must be one of {editTypes_set}.\"\n",
"\n",
"# if you want to do a second modification on top of the first one, write down the second modification (target_transcript2, type_of_modification2)\n",
"# make sure the two modification do not overlap, if they do, you need to combine them into one modification\n",
"\n",
"# run the script to turn user input to the format that the model can take\n",
"from edit_utils import get_span\n",
"orig_span, new_span = get_span(orig_transcript, target_transcript, edit_type)\n",
"if orig_span[0] > orig_span[1]:\n",
" RuntimeError(f\"example {audio_fn} failed\")\n",
"if orig_span[0] == orig_span[1]:\n",
" orig_span_save = [orig_span[0]]\n",
"else:\n",
" orig_span_save = orig_span\n",
"if new_span[0] == new_span[1]:\n",
" new_span_save = [new_span[0]]\n",
"else:\n",
" new_span_save = new_span\n",
"\n",
"orig_span_save = \",\".join([str(item) for item in orig_span_save])\n",
"new_span_save = \",\".join([str(item) for item in new_span_save])\n",
"from inference_speech_editing_scale import get_mask_interval\n",
"\n",
"start, end = get_mask_interval(align_fn, orig_span_save, edit_type)\n",
"info = torchaudio.info(audio_fn)\n",
"audio_dur = info.num_frames / info.sample_rate\n",
"morphed_span = (max(start - left_margin, 1/codec_sr), min(end + right_margin, audio_dur)) # in seconds\n",
"\n",
"# span in codec frames\n",
"mask_interval = [[round(morphed_span[0]*codec_sr), round(morphed_span[1]*codec_sr)]]\n",
"mask_interval = torch.LongTensor(mask_interval) # [M,2], M==1 for now\n",
"\n",
2024-04-14 00:11:15 +02:00
"\n",
2024-03-21 19:02:20 +01:00
"\n",
"# run the model to get the output\n",
"from inference_speech_editing_scale import inference_one_sample\n",
"\n",
"decode_config = {'top_k': top_k, 'top_p': top_p, 'temperature': temperature, 'stop_repetition': stop_repetition, 'kvcache': kvcache, \"codec_audio_sr\": codec_audio_sr, \"codec_sr\": codec_sr, \"silence_tokens\": silence_tokens}\n",
2024-04-18 21:38:55 +02:00
"orig_audio, new_audio = inference_one_sample(model, Namespace(**config), phn2num, text_tokenizer, audio_tokenizer, audio_fn, target_transcript, mask_interval, device, decode_config)\n",
2024-03-21 19:02:20 +01:00
" \n",
"# save segments for comparison\n",
"orig_audio, new_audio = orig_audio[0].cpu(), new_audio[0].cpu()\n",
"# logging.info(f\"length of the resynthesize orig audio: {orig_audio.shape}\")\n",
"\n",
2024-03-29 00:21:30 +01:00
"# display the audio\n",
"from IPython.display import Audio\n",
"print(\"original:\")\n",
"display(Audio(orig_audio, rate=codec_audio_sr))\n",
"\n",
"print(\"edited:\")\n",
"display(Audio(new_audio, rate=codec_audio_sr))\n",
"\n",
"# # save the audio\n",
"# # output_dir\n",
"# output_dir = \"./demo/generated_se\"\n",
"# os.makedirs(output_dir, exist_ok=True)\n",
2024-03-21 19:02:20 +01:00
"\n",
2024-03-29 00:21:30 +01:00
"# save_fn_new = f\"{output_dir}/{os.path.basename(audio_fn)[:-4]}_new_seed{seed}.wav\"\n",
2024-03-21 19:02:20 +01:00
"\n",
2024-03-29 00:21:30 +01:00
"# torchaudio.save(save_fn_new, new_audio, codec_audio_sr)\n",
2024-03-21 19:02:20 +01:00
"\n",
2024-03-29 00:21:30 +01:00
"# save_fn_orig = f\"{output_dir}/{os.path.basename(audio_fn)[:-4]}_orig.wav\"\n",
"# if not os.path.isfile(save_fn_orig):\n",
"# orig_audio, orig_sr = torchaudio.load(audio_fn)\n",
"# if orig_sr != codec_audio_sr:\n",
"# orig_audio = torchaudio.transforms.Resample(orig_sr, codec_audio_sr)(orig_audio)\n",
"# torchaudio.save(save_fn_orig, orig_audio, codec_audio_sr)\n",
2024-03-21 19:02:20 +01:00
"\n",
2024-03-29 00:21:30 +01:00
"# # if you get error importing T5 in transformers\n",
"# # try \n",
"# # pip uninstall Pillow\n",
"# # pip install Pillow\n",
"# # you are likely to get warning looks like WARNING:phonemizer:words count mismatch on 300.0% of the lines (3/1), this can be safely ignored"
2024-03-21 19:02:20 +01:00
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "voicecraft",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.18"
}
},
"nbformat": 4,
"nbformat_minor": 2
}