mirror of
https://github.com/w-okada/voice-changer.git
synced 2025-01-23 13:35:12 +03:00
Merge branch 'master' of github.com:w-okada/voice-changer
This commit is contained in:
commit
c2b979a05f
1
Hina_Mod_Kaggle_Real_Time_Voice_Changer.ipynb
Normal file
1
Hina_Mod_Kaggle_Real_Time_Voice_Changer.ipynb
Normal file
File diff suppressed because one or more lines are too long
@ -30,7 +30,8 @@
|
||||
"> Seems that PTH models performance better than ONNX for now, you can still try ONNX models and see if it satisfies you\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"*You can always [click here](https://github.com/YunaOneeChan/Voice-Changer-Settings) to check if these settings are up-to-date*\n",
|
||||
"*You can always [click here](https://rentry.co/VoiceChangerGuide#gpu-chart-for-known-working-chunkextra\n",
|
||||
") to check if these settings are up-to-date*\n",
|
||||
"<br><br>\n",
|
||||
"\n",
|
||||
"---\n",
|
||||
@ -46,7 +47,7 @@
|
||||
"# **Credits and Support**\n",
|
||||
"Realtime Voice Changer by [w-okada](https://github.com/w-okada)\\\n",
|
||||
"Colab files updated by [rafacasari](https://github.com/Rafacasari)\\\n",
|
||||
"Recommended settings by [YunaOneeChan](https://github.com/YunaOneeChan)\\\n",
|
||||
"Recommended settings by [Raven](https://github.com/ravencutie21)\\\n",
|
||||
"Modified again by [Hina](https://huggingface.co/HinaBl)\n",
|
||||
"\n",
|
||||
"Need help? [AI Hub Discord](https://discord.gg/aihub) » ***#help-realtime-vc***\n",
|
||||
@ -54,26 +55,6 @@
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"cellView": "form",
|
||||
"id": "RhdqDSt-LfGr"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# @title **[Optional]** Connect to Google Drive\n",
|
||||
"# @markdown Using Google Drive can improve load times a bit and your models will be stored, so you don't need to re-upload every time that you use.\n",
|
||||
"import os\n",
|
||||
"from google.colab import drive\n",
|
||||
"\n",
|
||||
"if not os.path.exists('/content/drive'):\n",
|
||||
" drive.mount('/content/drive')\n",
|
||||
"\n",
|
||||
"%cd /content/drive/MyDrive"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
@ -83,8 +64,9 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#=================Updated=================\n",
|
||||
"# @title **[1]** Clone repository and install dependencies\n",
|
||||
"# @markdown This first step will download the latest version of Voice Changer and install the dependencies. **It will take around 2 minutes to complete.**\n",
|
||||
"# @markdown This first step will download the latest version of Voice Changer and install the dependencies. **It can take some time to complete.**\n",
|
||||
"import os\n",
|
||||
"import time\n",
|
||||
"import subprocess\n",
|
||||
@ -93,12 +75,28 @@
|
||||
"import base64\n",
|
||||
"import codecs\n",
|
||||
"\n",
|
||||
"from IPython.display import clear_output, Javascript\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"#@markdown ---\n",
|
||||
"# @title **[Optional]** Connect to Google Drive\n",
|
||||
"# @markdown Using Google Drive can improve load times a bit and your models will be stored, so you don't need to re-upload every time that you use.\n",
|
||||
"\n",
|
||||
"Use_Drive=False #@param {type:\"boolean\"}\n",
|
||||
"\n",
|
||||
"from google.colab import drive\n",
|
||||
"\n",
|
||||
"if Use_Drive==True:\n",
|
||||
" if not os.path.exists('/content/drive'):\n",
|
||||
" drive.mount('/content/drive')\n",
|
||||
"\n",
|
||||
" %cd /content/drive/MyDrive\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"externalgit=codecs.decode('uggcf://tvguho.pbz/j-bxnqn/ibvpr-punatre.tvg','rot_13')\n",
|
||||
"rvctimer=codecs.decode('uggcf://tvguho.pbz/uvanoy/eipgvzre.tvg','rot_13')\n",
|
||||
"pathloc=codecs.decode('ibvpr-punatre','rot_13')\n",
|
||||
"!git clone --depth 1 $externalgit &> /dev/null\n",
|
||||
"\n",
|
||||
"from IPython.display import clear_output, Javascript\n",
|
||||
"\n",
|
||||
"def update_timer_and_print():\n",
|
||||
" global timer\n",
|
||||
@ -112,165 +110,114 @@
|
||||
"timer = 0\n",
|
||||
"threading.Thread(target=update_timer_and_print, daemon=True).start()\n",
|
||||
"\n",
|
||||
"# os.system('cls')\n",
|
||||
"clear_output()\n",
|
||||
"!rm -rf rvctimer\n",
|
||||
"!git clone --depth 1 $rvctimer\n",
|
||||
"!cp -f rvctimer/index.html $pathloc/client/demo/dist/\n",
|
||||
"\n",
|
||||
"!pip install colorama --quiet\n",
|
||||
"from colorama import Fore, Style\n",
|
||||
"\n",
|
||||
"print(f\"{Fore.CYAN}> Cloning the repository...{Style.RESET_ALL}\")\n",
|
||||
"!git clone --depth 1 $externalgit &> /dev/null\n",
|
||||
"print(f\"{Fore.GREEN}> Successfully cloned the repository!{Style.RESET_ALL}\")\n",
|
||||
"%cd $pathloc/server/\n",
|
||||
"\n",
|
||||
"print(\"\\033[92mSuccessfully cloned the repository\")\n",
|
||||
"# Read the content of the file\n",
|
||||
"file_path = '../client/demo/dist/assets/gui_settings/version.txt'\n",
|
||||
"\n",
|
||||
"with open(file_path, 'r') as file:\n",
|
||||
" file_content = file.read()\n",
|
||||
"\n",
|
||||
"# Replace the specific text\n",
|
||||
"text_to_replace = \"-.-.-.-\"\n",
|
||||
"new_text = \"Google.Colab\" # New text to replace the specific text\n",
|
||||
"\n",
|
||||
"modified_content = file_content.replace(text_to_replace, new_text)\n",
|
||||
"\n",
|
||||
"# Write the modified content back to the file\n",
|
||||
"with open(file_path, 'w') as file:\n",
|
||||
" file.write(modified_content)\n",
|
||||
"\n",
|
||||
"print(f\"Text '{text_to_replace}' has been replaced with '{new_text}' in the file.\")\n",
|
||||
"\n",
|
||||
"print(f\"{Fore.CYAN}> Installing libportaudio2...{Style.RESET_ALL}\")\n",
|
||||
"!apt-get -y install libportaudio2 -qq\n",
|
||||
"\n",
|
||||
"!sed -i '/torch==/d' requirements.txt\n",
|
||||
"!sed -i '/torchaudio==/d' requirements.txt\n",
|
||||
"!sed -i '/numpy==/d' requirements.txt\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"!apt-get install libportaudio2 &> /dev/null --quiet\n",
|
||||
"!pip install pyworld onnxruntime-gpu uvicorn faiss-gpu fairseq jedi google-colab moviepy decorator==4.4.2 sounddevice numpy==1.23.5 pyngrok --quiet\n",
|
||||
"print(\"\\033[92mInstalling Requirements!\")\n",
|
||||
"print(f\"{Fore.CYAN}> Installing pre-dependencies...{Style.RESET_ALL}\")\n",
|
||||
"# Install dependencies that are missing from requirements.txt and pyngrok\n",
|
||||
"!pip install faiss-gpu fairseq pyngrok --quiet\n",
|
||||
"!pip install pyworld --no-build-isolation --quiet\n",
|
||||
"# Install webstuff\n",
|
||||
"import asyncio\n",
|
||||
"import re\n",
|
||||
"!pip install playwright\n",
|
||||
"!playwright install\n",
|
||||
"!playwright install-deps\n",
|
||||
"!pip install nest_asyncio\n",
|
||||
"from playwright.async_api import async_playwright\n",
|
||||
"print(f\"{Fore.CYAN}> Installing dependencies from requirements.txt...{Style.RESET_ALL}\")\n",
|
||||
"!pip install -r requirements.txt --quiet\n",
|
||||
"clear_output()\n",
|
||||
"!pip install -r requirements.txt --no-build-isolation --quiet\n",
|
||||
"# Maybe install Tensor packages?\n",
|
||||
"#!pip install torch-tensorrt\n",
|
||||
"#!pip install TensorRT\n",
|
||||
"print(\"\\033[92mSuccessfully installed all packages!\")\n",
|
||||
"# os.system('cls')\n",
|
||||
"clear_output()\n",
|
||||
"print(\"\\033[92mFinished, please continue to the next cell\")"
|
||||
"print(f\"{Fore.GREEN}> Successfully installed all packages!{Style.RESET_ALL}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"\n",
|
||||
"#@title #**[Optional]** Upload a voice model (Run this before running the Voice Changer)**[Currently Under Construction]**\n",
|
||||
"#@markdown ---\n",
|
||||
"#@title **[Optional]** Upload a voice model (Run this before running the Voice Changer)\n",
|
||||
"import os\n",
|
||||
"import json\n",
|
||||
"from IPython.display import Image\n",
|
||||
"import requests\n",
|
||||
"\n",
|
||||
"model_slot = \"0\" #@param ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', '100', '101', '102', '103', '104', '105', '106', '107', '108', '109', '110', '111', '112', '113', '114', '115', '116', '117', '118', '119', '120', '121', '122', '123', '124', '125', '126', '127', '128', '129', '130', '131', '132', '133', '134', '135', '136', '137', '138', '139', '140', '141', '142', '143', '144', '145', '146', '147', '148', '149', '150', '151', '152', '153', '154', '155', '156', '157', '158', '159', '160', '161', '162', '163', '164', '165', '166', '167', '168', '169', '170', '171', '172', '173', '174', '175', '176', '177', '178', '179', '180', '181', '182', '183', '184', '185', '186', '187', '188', '189', '190', '191', '192', '193', '194', '195', '196', '197', '198', '199']\n",
|
||||
"\n",
|
||||
"#@markdown #Model Number `(Default is 0)` you can add multiple models as long as you change the number!\n",
|
||||
"model_number = \"0\" #@param ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', '100', '101', '102', '103', '104', '105', '106', '107', '108', '109', '110', '111', '112', '113', '114', '115', '116', '117', '118', '119', '120', '121', '122', '123', '124', '125', '126', '127', '128', '129', '130', '131', '132', '133', '134', '135', '136', '137', '138', '139', '140', '141', '142', '143', '144', '145', '146', '147', '148', '149', '150', '151', '152', '153', '154', '155', '156', '157', '158', '159', '160', '161', '162', '163', '164', '165', '166', '167', '168', '169', '170', '171', '172', '173', '174', '175', '176', '177', '178', '179', '180', '181', '182', '183', '184', '185', '186', '187', '188', '189', '190', '191', '192', '193', '194', '195', '196', '197', '198', '199']\n",
|
||||
"\n",
|
||||
"!rm -rf model_dir/$model_number\n",
|
||||
"#@markdown ---\n",
|
||||
"#@markdown #**[Optional]** Add an icon to the model `(can be any image/leave empty for no image)`\n",
|
||||
"icon_link = \"https://cdn.donmai.us/original/8a/92/8a924397e9aac922e94bdc1f28ff978a.jpg\" #@param {type:\"string\"}\n",
|
||||
"#@markdown ---\n",
|
||||
"!rm -rf model_dir/$model_slot\n",
|
||||
"#@markdown **[Optional]** Add an icon to the model\n",
|
||||
"icon_link = \"https://cdn.donmai.us/sample/12/57/__rin_penrose_idol_corp_drawn_by_juu_ame__sample-12579843de9487cf2db82058ba5e77d4.jpg\" #@param {type:\"string\"}\n",
|
||||
"icon_link = '\"'+icon_link+'\"'\n",
|
||||
"!mkdir model_dir\n",
|
||||
"!mkdir model_dir/$model_number\n",
|
||||
"#@markdown #Put your model's download link here `(must be a zip file)`\n",
|
||||
"model_link = \"https://huggingface.co/HinaBl/Akatsuki/resolve/main/akatsuki_200epoch.zip\" #@param {type:\"string\"}\n",
|
||||
"!mkdir model_dir/$model_slot\n",
|
||||
"#@markdown Put your model's download link here `(must be a zip file)` only supports **weights.gg** & **huggingface.co**\n",
|
||||
"model_link = \"https://huggingface.co/HinaBl/Rin-Penrose/resolve/main/RinPenrose600.zip?download=true\" #@param {type:\"string\"}\n",
|
||||
"\n",
|
||||
"if model_link.startswith(\"https://www.weights.gg\") or model_link.startswith(\"https://weights.gg\"):\n",
|
||||
" weights_code = requests.get(\"https://pastebin.com/raw/ytHLr8h0\").text\n",
|
||||
" exec(weights_code)\n",
|
||||
"else:\n",
|
||||
" model_link = model_link\n",
|
||||
"\n",
|
||||
"model_link = '\"'+model_link+'\"'\n",
|
||||
"!curl -L $model_link > model.zip\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Conditionally set the iconFile based on whether icon_link is empty\n",
|
||||
"if icon_link:\n",
|
||||
" iconFile = \"icon.png\"\n",
|
||||
" !curl -L $icon_link > model_dir/$model_number/icon.png\n",
|
||||
" !curl -L $icon_link > model_dir/$model_slot/icon.png\n",
|
||||
"else:\n",
|
||||
" iconFile = \"\"\n",
|
||||
" print(\"icon_link is empty, so no icon file will be downloaded.\")\n",
|
||||
"#@markdown ---\n",
|
||||
"\n",
|
||||
"!unzip model.zip -d model_dir/$model_slot\n",
|
||||
"\n",
|
||||
"!unzip model.zip -d model_dir/$model_number\n",
|
||||
"\n",
|
||||
"# Checks all the files in model_number and puts it outside of it\n",
|
||||
"\n",
|
||||
"!mv model_dir/$model_number/*/* model_dir/$model_number/\n",
|
||||
"!rm -rf model_dir/$model_number/*/\n",
|
||||
"\n",
|
||||
"# if theres a folder in the number,\n",
|
||||
"# take all the files in the folder and put it outside of that folder\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"#@markdown #**Model Voice Convertion Setting**\n",
|
||||
"!mv model_dir/$model_slot/*/* model_dir/$model_slot/\n",
|
||||
"!rm -rf model_dir/$model_slot/*/\n",
|
||||
"#@markdown **Model Voice Convertion Setting**\n",
|
||||
"Tune = 12 #@param {type:\"slider\",min:-50,max:50,step:1}\n",
|
||||
"Index = 0 #@param {type:\"slider\",min:0,max:1,step:0.1}\n",
|
||||
"#@markdown ---\n",
|
||||
"#@markdown #Parameter Option `(Ignore if theres a Parameter File)`\n",
|
||||
"Slot_Index = -1 #@param [-1,0,1] {type:\"raw\"}\n",
|
||||
"Sampling_Rate = 48000 #@param [32000,40000,48000] {type:\"raw\"}\n",
|
||||
"\n",
|
||||
"# @markdown #**[Optional]** Parameter file for your voice model\n",
|
||||
"#@markdown _(must be named params.json)_ (Leave Empty for Default)\n",
|
||||
"param_link = \"\" #@param {type:\"string\"}\n",
|
||||
"param_link = \"\"\n",
|
||||
"if param_link == \"\":\n",
|
||||
" model_dir = \"model_dir/\"+model_number+\"/\"\n",
|
||||
" paramset = requests.get(\"https://pastebin.com/raw/SAKwUCt1\").text\n",
|
||||
" exec(paramset)\n",
|
||||
"\n",
|
||||
" # Find the .pth and .index files in the model_dir/0 directory\n",
|
||||
" pth_files = [f for f in os.listdir(model_dir) if f.endswith(\".pth\")]\n",
|
||||
" index_files = [f for f in os.listdir(model_dir) if f.endswith(\".index\")]\n",
|
||||
"\n",
|
||||
" if pth_files and index_files:\n",
|
||||
" # Take the first .pth and .index file as model and index names\n",
|
||||
" model_name = pth_files[0].replace(\".pth\", \"\")\n",
|
||||
" index_name = index_files[0].replace(\".index\", \"\")\n",
|
||||
" else:\n",
|
||||
" # Set default values if no .pth and .index files are found\n",
|
||||
" model_name = \"Null\"\n",
|
||||
" index_name = \"Null\"\n",
|
||||
"\n",
|
||||
" # Define the content for params.json\n",
|
||||
" params_content = {\n",
|
||||
" \"slotIndex\": Slot_Index,\n",
|
||||
" \"voiceChangerType\": \"RVC\",\n",
|
||||
" \"name\": model_name,\n",
|
||||
" \"description\": \"\",\n",
|
||||
" \"credit\": \"\",\n",
|
||||
" \"termsOfUseUrl\": \"\",\n",
|
||||
" \"iconFile\": iconFile,\n",
|
||||
" \"speakers\": {\n",
|
||||
" \"0\": \"target\"\n",
|
||||
" },\n",
|
||||
" \"modelFile\": f\"{model_name}.pth\",\n",
|
||||
" \"indexFile\": f\"{index_name}.index\",\n",
|
||||
" \"defaultTune\": Tune,\n",
|
||||
" \"defaultIndexRatio\": Index,\n",
|
||||
" \"defaultProtect\": 0.5,\n",
|
||||
" \"isONNX\": False,\n",
|
||||
" \"modelType\": \"pyTorchRVCv2\",\n",
|
||||
" \"samplingRate\": Sampling_Rate,\n",
|
||||
" \"f0\": True,\n",
|
||||
" \"embChannels\": 768,\n",
|
||||
" \"embOutputLayer\": 12,\n",
|
||||
" \"useFinalProj\": False,\n",
|
||||
" \"deprecated\": False,\n",
|
||||
" \"embedder\": \"hubert_base\",\n",
|
||||
" \"sampleId\": \"\"\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" # Write the content to params.json\n",
|
||||
" with open(f\"{model_dir}/params.json\", \"w\") as param_file:\n",
|
||||
" json.dump(params_content, param_file)\n",
|
||||
"\n",
|
||||
"# !unzip model.zip -d model_dir/0/\n",
|
||||
"clear_output()\n",
|
||||
"print(\"\\033[92mModel with the name of \"+model_name+\" has been Imported!\")\n"
|
||||
"print(\"\\033[93mModel with the name of \"+model_name+\" has been Imported to slot \"+model_slot)"
|
||||
],
|
||||
"metadata": {
|
||||
"cellView": "form",
|
||||
"id": "_ZtbKUVUgN3G"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"#@title Delete a model\n",
|
||||
"#@markdown ---\n",
|
||||
"#@markdown Select which slot you want to delete\n",
|
||||
"Delete_Slot = \"0\" #@param ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', '100', '101', '102', '103', '104', '105', '106', '107', '108', '109', '110', '111', '112', '113', '114', '115', '116', '117', '118', '119', '120', '121', '122', '123', '124', '125', '126', '127', '128', '129', '130', '131', '132', '133', '134', '135', '136', '137', '138', '139', '140', '141', '142', '143', '144', '145', '146', '147', '148', '149', '150', '151', '152', '153', '154', '155', '156', '157', '158', '159', '160', '161', '162', '163', '164', '165', '166', '167', '168', '169', '170', '171', '172', '173', '174', '175', '176', '177', '178', '179', '180', '181', '182', '183', '184', '185', '186', '187', '188', '189', '190', '191', '192', '193', '194', '195', '196', '197', '198', '199']\n",
|
||||
"{type:\"slider\",min:0,max:1,step:0.1}\n",
|
||||
"\n",
|
||||
"!rm -rf model_dir/$Model_Number\n",
|
||||
"print(\"\\033[92mSuccessfully removed Model is slot \"+Delete_Slot)\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "P9g6rG1-KUwt"
|
||||
"id": "_ZtbKUVUgN3G",
|
||||
"cellView": "form"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
@ -279,71 +226,79 @@
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "lLWQuUd7WW9U"
|
||||
"id": "lLWQuUd7WW9U",
|
||||
"cellView": "form"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# @title **[2]** Start Server **using ngrok** (Recommended | **need a ngrok account**)\n",
|
||||
"\n",
|
||||
"#=======================Updated=========================\n",
|
||||
"\n",
|
||||
"# @title Start Server **using ngrok**\n",
|
||||
"# @markdown This cell will start the server, the first time that you run it will download the models, so it can take a while (~1-2 minutes)\n",
|
||||
"\n",
|
||||
"# @markdown ---\n",
|
||||
"# @markdown You'll need a ngrok account, but **it's free**!\n",
|
||||
"# @markdown You'll need a ngrok account, but <font color=green>**it's free**</font> and easy to create!\n",
|
||||
"# @markdown ---\n",
|
||||
"# @markdown **1** - Create a **free** account at [ngrok](https://dashboard.ngrok.com/signup)\\\n",
|
||||
"# @markdown **2** - If you didn't logged in with Google or Github, you will need to **verify your e-mail**!\\\n",
|
||||
"# @markdown **3** - Click [this link](https://dashboard.ngrok.com/get-started/your-authtoken) to get your auth token, copy it and place it here:\n",
|
||||
"from pyngrok import conf, ngrok\n",
|
||||
"\n",
|
||||
"f0_det= \"rmvpe_onnx\" #@param [\"rmvpe_onnx\",\"rvc\"]\n",
|
||||
"Token = 'YOUR_TOKEN_HERE' # @param {type:\"string\"}\n",
|
||||
"# @markdown **4** - Still need further tests, but maybe region can help a bit on latency?\\\n",
|
||||
"# @markdown **1** - Create a <font color=green>**free**</font> account at [ngrok](https://dashboard.ngrok.com/signup) or **login with Google/Github account**\\\n",
|
||||
"# @markdown **2** - If you didn't logged in with Google/Github, you will need to **verify your e-mail**!\\\n",
|
||||
"# @markdown **3** - Click [this link](https://dashboard.ngrok.com/get-started/your-authtoken) to get your auth token, and place it here:\n",
|
||||
"Token = 'TOKEN_HERE' # @param {type:\"string\"}\n",
|
||||
"# @markdown **4** - *(optional)* Change to a region near to you or keep at United States if increase latency\\\n",
|
||||
"# @markdown `Default Region: us - United States (Ohio)`\n",
|
||||
"Region = \"ap - Asia/Pacific (Singapore)\" # @param [\"ap - Asia/Pacific (Singapore)\", \"au - Australia (Sydney)\",\"eu - Europe (Frankfurt)\", \"in - India (Mumbai)\",\"jp - Japan (Tokyo)\",\"sa - South America (Sao Paulo)\", \"us - United States (Ohio)\"]\n",
|
||||
"MyConfig = conf.PyngrokConfig()\n",
|
||||
"Region = \"us - United States (Ohio)\" # @param [\"ap - Asia/Pacific (Singapore)\", \"au - Australia (Sydney)\",\"eu - Europe (Frankfurt)\", \"in - India (Mumbai)\",\"jp - Japan (Tokyo)\",\"sa - South America (Sao Paulo)\", \"us - United States (Ohio)\"]\n",
|
||||
"\n",
|
||||
"#@markdown **5** - *(optional)* Other options:\n",
|
||||
"ClearConsole = True # @param {type:\"boolean\"}\n",
|
||||
"Play_Notification = True # @param {type:\"boolean\"}\n",
|
||||
"\n",
|
||||
"# ---------------------------------\n",
|
||||
"# DO NOT TOUCH ANYTHING DOWN BELOW!\n",
|
||||
"# ---------------------------------\n",
|
||||
"\n",
|
||||
"%cd $pathloc/server/\n",
|
||||
"\n",
|
||||
"from pyngrok import conf, ngrok\n",
|
||||
"MyConfig = conf.PyngrokConfig()\n",
|
||||
"MyConfig.auth_token = Token\n",
|
||||
"MyConfig.region = Region[0:2]\n",
|
||||
"\n",
|
||||
"conf.get_default().authtoken = Token\n",
|
||||
"conf.get_default().region = Region[0:2]\n",
|
||||
"\n",
|
||||
"#conf.get_default().authtoken = Token\n",
|
||||
"#conf.get_default().region = Region\n",
|
||||
"conf.set_default(MyConfig);\n",
|
||||
"\n",
|
||||
"# @markdown ---\n",
|
||||
"# @markdown If you want to automatically clear the output when the server loads, check this option.\n",
|
||||
"Clear_Output = True # @param {type:\"boolean\"}\n",
|
||||
"\n",
|
||||
"mainpy=codecs.decode('ZZIPFreireFVB.cl','rot_13')\n",
|
||||
"\n",
|
||||
"import portpicker, socket, urllib.request\n",
|
||||
"PORT = portpicker.pick_unused_port()\n",
|
||||
"import subprocess, threading, time, socket, urllib.request\n",
|
||||
"PORT = 8000\n",
|
||||
"\n",
|
||||
"from pyngrok import ngrok\n",
|
||||
"# Edited ⏬⏬\n",
|
||||
"ngrokConnection = ngrok.connect(PORT)\n",
|
||||
"public_url = ngrokConnection.public_url\n",
|
||||
"\n",
|
||||
"def iframe_thread(port):\n",
|
||||
"from IPython.display import clear_output\n",
|
||||
"from IPython.display import Audio, display\n",
|
||||
"def play_notification_sound():\n",
|
||||
" display(Audio(url='https://raw.githubusercontent.com/hinabl/rmvpe-ai-kaggle/main/custom/audios/notif.mp3', autoplay=True))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def wait_for_server():\n",
|
||||
" while True:\n",
|
||||
" time.sleep(0.5)\n",
|
||||
" sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n",
|
||||
" result = sock.connect_ex(('127.0.0.1', port))\n",
|
||||
" result = sock.connect_ex(('127.0.0.1', PORT))\n",
|
||||
" if result == 0:\n",
|
||||
" break\n",
|
||||
" sock.close()\n",
|
||||
" if ClearConsole:\n",
|
||||
" clear_output()\n",
|
||||
" print(\"------- SERVER READY! -------\")\n",
|
||||
" print(\"--------- SERVER READY! ---------\")\n",
|
||||
" print(\"Your server is available at:\")\n",
|
||||
" print(public_url)\n",
|
||||
" print(\"-----------------------------\")\n",
|
||||
" # display(Javascript('window.open(\"{url}\", \\'_blank\\');'.format(url=public_url)))\n",
|
||||
" print(\"---------------------------------\")\n",
|
||||
" if Play_Notification==True:\n",
|
||||
" play_notification_sound()\n",
|
||||
"\n",
|
||||
"print(PORT)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"threading.Thread(target=iframe_thread, daemon=True, args=(PORT,)).start()\n",
|
||||
"threading.Thread(target=wait_for_server, daemon=True).start()\n",
|
||||
"\n",
|
||||
"mainpy=codecs.decode('ZZIPFreireFVB.cl','rot_13')\n",
|
||||
"\n",
|
||||
"!python3 $mainpy \\\n",
|
||||
" -p {PORT} \\\n",
|
||||
@ -360,74 +315,27 @@
|
||||
" --rmvpe pretrain/rmvpe.pt \\\n",
|
||||
" --model_dir model_dir \\\n",
|
||||
" --samples samples.json\n",
|
||||
"\n"
|
||||
"\n",
|
||||
"ngrok.disconnect(ngrokConnection.public_url)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# @title **[Optional]** Start Server **using localtunnel** (ngrok alternative | no account needed)\n",
|
||||
"# @markdown This cell will start the server, the first time that you run it will download the models, so it can take a while (~1-2 minutes)\n",
|
||||
"\n",
|
||||
"# @markdown ---\n",
|
||||
"!npm config set update-notifier false\n",
|
||||
"!npm install -g localtunnel\n",
|
||||
"print(\"\\033[92mLocalTunnel installed!\")\n",
|
||||
"# @markdown If you want to automatically clear the output when the server loads, check this option.\n",
|
||||
"Clear_Output = True # @param {type:\"boolean\"}\n",
|
||||
"\n",
|
||||
"import portpicker, subprocess, threading, time, socket, urllib.request\n",
|
||||
"PORT = portpicker.pick_unused_port()\n",
|
||||
"\n",
|
||||
"from IPython.display import clear_output, Javascript\n",
|
||||
"\n",
|
||||
"def iframe_thread(port):\n",
|
||||
" while True:\n",
|
||||
" time.sleep(0.5)\n",
|
||||
" sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n",
|
||||
" result = sock.connect_ex(('127.0.0.1', port))\n",
|
||||
" if result == 0:\n",
|
||||
" break\n",
|
||||
" sock.close()\n",
|
||||
" clear_output()\n",
|
||||
" print(\"Use the following endpoint to connect to localtunnel:\", urllib.request.urlopen('https://ipv4.icanhazip.com').read().decode('utf8').strip(\"\\n\"))\n",
|
||||
" p = subprocess.Popen([\"lt\", \"--port\", \"{}\".format(port)], stdout=subprocess.PIPE)\n",
|
||||
" for line in p.stdout:\n",
|
||||
" print(line.decode(), end='')\n",
|
||||
"\n",
|
||||
"threading.Thread(target=iframe_thread, daemon=True, args=(PORT,)).start()\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"!python3 MMVCServerSIO.py \\\n",
|
||||
" -p {PORT} \\\n",
|
||||
" --https False \\\n",
|
||||
" --content_vec_500 pretrain/checkpoint_best_legacy_500.pt \\\n",
|
||||
" --content_vec_500_onnx pretrain/content_vec_500.onnx \\\n",
|
||||
" --content_vec_500_onnx_on true \\\n",
|
||||
" --hubert_base pretrain/hubert_base.pt \\\n",
|
||||
" --hubert_base_jp pretrain/rinna_hubert_base_jp.pt \\\n",
|
||||
" --hubert_soft pretrain/hubert/hubert-soft-0d54a1f4.pt \\\n",
|
||||
" --nsf_hifigan pretrain/nsf_hifigan/model \\\n",
|
||||
" --crepe_onnx_full pretrain/crepe_onnx_full.onnx \\\n",
|
||||
" --crepe_onnx_tiny pretrain/crepe_onnx_tiny.onnx \\\n",
|
||||
" --rmvpe pretrain/rmvpe.pt \\\n",
|
||||
" --model_dir model_dir \\\n",
|
||||
" --samples samples.json \\\n",
|
||||
" --colab True"
|
||||
"![](https://i.pinimg.com/474x/de/72/9e/de729ecfa41b69901c42c82fff752414.jpg)\n",
|
||||
"![](https://i.pinimg.com/474x/de/72/9e/de729ecfa41b69901c42c82fff752414.jpg)"
|
||||
],
|
||||
"metadata": {
|
||||
"cellView": "form",
|
||||
"id": "ZwZaCf4BeZi2"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
"id": "2Uu1sTSwTc7q"
|
||||
}
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": [],
|
||||
"private_outputs": true,
|
||||
"gpuType": "T4"
|
||||
"gpuType": "T4",
|
||||
"include_colab_link": true
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
|
@ -119,6 +119,8 @@ To run docker, see [start docker](docker_vcclient/README_en.md).
|
||||
|
||||
To run on Anaconda venv, see [server developer's guide](README_dev_en.md)
|
||||
|
||||
To run on Linux using an AMD GPU, see [setup guide linux](tutorials/tutorial_anaconda_amd_rocm.md)
|
||||
|
||||
# Real-time performance
|
||||
|
||||
Conversion is almost instantaneous when using GPU.
|
||||
|
@ -79,6 +79,14 @@
|
||||
"created_at": "2023-11-18T10:51:05Z",
|
||||
"repoId": 527419347,
|
||||
"pullRequestNo": 1006
|
||||
},
|
||||
{
|
||||
"name": "shdancer",
|
||||
"id": 62684554,
|
||||
"comment_id": 1825247691,
|
||||
"created_at": "2023-11-24T07:25:45Z",
|
||||
"repoId": 527419347,
|
||||
"pullRequestNo": 1017
|
||||
}
|
||||
]
|
||||
}
|
BIN
tutorials/images/amd_gpu_select.png
Normal file
BIN
tutorials/images/amd_gpu_select.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 62 KiB |
BIN
tutorials/images/wine_device.png
Normal file
BIN
tutorials/images/wine_device.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 94 KiB |
108
tutorials/tutorial_anaconda_amd_rocm.md
Normal file
108
tutorials/tutorial_anaconda_amd_rocm.md
Normal file
@ -0,0 +1,108 @@
|
||||
# Voice Changer for AMD GPUs under Linux
|
||||
|
||||
## Introduction
|
||||
|
||||
At the moment, there are significant challenges in using machine learning solutions with an AMD GPU under Windows due to the lack of driver support. While AMD has released ROCm for newer GPUs, there is still no MIOpen release for Windows. Without MIOpen, there won't be a PyTorch release. DirectML is currently the only hardware-independent solution, but it offers poor performance and requires ONNX models that cannot load an index.
|
||||
|
||||
Fortunately AMD has good driver support under Linux, and with ROCm, you can utilize the CUDA implementation of the voice changer, resulting in a significant performance improvement. You'll be able to use standard models, including index files. While Linux is not typically associated with gaming, tools like [Steam Proton](https://www.protondb.com/), [Lutris](https://lutris.net/), and [Wine](https://www.winehq.org/) enable you to play most games on Linux.
|
||||
|
||||
**Benchmark with Radeon RX 7900 XTX:**
|
||||
- DirectML: Chunk 112 with Extra 8192 (using rmvpe_onnx)
|
||||
- CUDA: Chunk 48 with Extra 131072 (using rvmpe)
|
||||
|
||||
## Prerequisites
|
||||
|
||||
### AMDGPU Driver and ROCm
|
||||
|
||||
First, you need to install the appropriate drivers on your system. Most distributions allow easy driver installation through the package manager. Alternatively, you can download the driver directly from the [AMD website](https://www.amd.com/en/support). Select "Graphics", your GPU, and download the version compatible with your distribution. Then install the driver directly using your package manager by referencing the downloaded file.
|
||||
|
||||
Next, install ROCm following the [official AMD guide](https://rocm.docs.amd.com/en/latest/deploy/linux/index.html). You can install the package using the [package manager](https://rocm.docs.amd.com/en/latest/deploy/linux/os-native/install.html) or by using the [AMDGPU Install script](https://rocm.docs.amd.com/en/latest/deploy/linux/installer/index.html).
|
||||
|
||||
### Anaconda (optional)
|
||||
|
||||
The second dependency is optional but recommended. Anaconda can be used to manage Python packages and environments, preventing dependency conflicts when using multiple software with the same libraries. Many distributions allow Anaconda installation through the [package manager](https://docs.anaconda.com/free/anaconda/install/linux/). Alternatively, download the package from the [Anaconda website](https://www.anaconda.com/download) and manually run the installer:
|
||||
|
||||
|
||||
```bash
|
||||
cd ~/Downloads
|
||||
chmod u+x Anaconda3-xxx-Linux-x86_64.sh
|
||||
./Anaconda3-xxx-Linux-x86_64.sh
|
||||
```
|
||||
|
||||
## Setup Environment
|
||||
Now create a new environment, download the voice changer, and set up the dependencies. First create the new environment using conda and specify a Python version. Python 3.10.9 works well with ROCm 7.2 - for other versions check the PyTorch documentation:
|
||||
|
||||
```bash
|
||||
conda create --name voicechanger python=3.10.9
|
||||
```
|
||||
|
||||
Activate the environment to install dependencies within it:
|
||||
|
||||
```bash
|
||||
conda activate voicechanger
|
||||
```
|
||||
|
||||
Next create a new directory and clone the Github repository. Using this solution you don't need to download a release from HuggingFace.
|
||||
|
||||
```bash
|
||||
mkdir ~/Documents/voicechanger
|
||||
cd ~/Documents/voicechanger
|
||||
git clone https://github.com/w-okada/voice-changer.git
|
||||
```
|
||||
|
||||
|
||||
## Install Dependencies
|
||||
|
||||
After downloading the repository, install all dependencies. Start with PyTorch for ROCm. AMD provides a [guide](https://rocm.docs.amd.com/projects/radeon/en/latest/docs/install/install-pytorch.html) for installing the correct PyTorch version, which is updated regularly. Begin by downloading Torch and Torchvision because the other steps are handled automatically when creating the conda environment:
|
||||
|
||||
```bash
|
||||
# The versions of the Wheels can vary based on your GPU and the current ROCm release
|
||||
wget https://repo.radeon.com/rocm/manylinux/rocm-rel-5.7/torch-2.0.1%2Brocm5.7-cp310-cp310-linux_x86_64.whl
|
||||
wget https://repo.radeon.com/rocm/manylinux/rocm-rel-5.7/torchvision-0.15.2%2Brocm5.7-cp310-cp310-linux_x86_64.whl
|
||||
```
|
||||
|
||||
Your directory should now look like this:
|
||||
|
||||
```bash
|
||||
$ ls
|
||||
torch-2.0.1+rocm5.7-cp310-cp310-linux_x86_64.whl
|
||||
torchvision-0.15.2+rocm5.7-cp310-cp310-linux_x86_64.whl
|
||||
voice-changer
|
||||
```
|
||||
|
||||
Now, install PyTorch within the environment:
|
||||
|
||||
```bash
|
||||
pip3 install --force-reinstall torch-2.0.1+rocm5.7-cp310-cp310-linux_x86_64.whl torchvision-0.15.2+rocm5.7-cp310-cp310-linux_x86_64.whl
|
||||
```
|
||||
|
||||
To run the voice changer, install additional dependencies using pip. Navigate to the server directory and use pip to install the requirements.txt file:
|
||||
|
||||
```bash
|
||||
cd ~/Documents/voicechanger/voice-changer/server
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
## Start the server
|
||||
After installing the dependencies, run the server using the MMVCServerSIO.py file:
|
||||
|
||||
```bash
|
||||
python3 MMVCServerSIO.py
|
||||
```
|
||||
|
||||
The server will download all the required models and run. Now you can use the voice changer through the WebUI by opening http://127.0.0.1:18888/. You can select your GPU from the menu.
|
||||
|
||||
![image](images/amd_gpu_select.png)
|
||||
|
||||
## Configure Audio Loopback
|
||||
In the last step, create a virtual audio device that redirects the web UI's output to an input, which can be used as a microphone in applications.
|
||||
|
||||
Most distributions use PulseAudio by default, and you can create an audio loopback by creating two virtual devices. There is a [guide](https://github.com/NapoleonWils0n/cerberus/blob/master/pulseaudio/virtual-mic.org) for setting up virtual audio devices. At the top of the document, you'll find a solution for a temporary setup. Creating the default.pa config will create a permanent device.
|
||||
|
||||
The default names of the audio devices are:
|
||||
- Input: Virtual Source VirtualMic on Monitor of NullOutput
|
||||
- Output: Null Output
|
||||
|
||||
In most applications, you can select the audio device as input. If you use Wine or Lutris and want to use the microphone within those environments, you need to add the device to your Wine configuration.
|
||||
|
||||
![image](images/wine_device.png)
|
Loading…
Reference in New Issue
Block a user