mirror of
https://github.com/w-okada/voice-changer.git
synced 2025-02-02 16:23:58 +03:00
commit
702d468d2f
1
Hina_Mod_Kaggle_Real_Time_Voice_Changer.ipynb
Normal file
1
Hina_Mod_Kaggle_Real_Time_Voice_Changer.ipynb
Normal file
File diff suppressed because one or more lines are too long
@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"cells": [
|
"cells": [
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "view-in-github",
|
"id": "view-in-github",
|
||||||
@ -30,7 +30,8 @@
|
|||||||
"> Seems that PTH models performance better than ONNX for now, you can still try ONNX models and see if it satisfies you\n",
|
"> Seems that PTH models performance better than ONNX for now, you can still try ONNX models and see if it satisfies you\n",
|
||||||
"\n",
|
"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"*You can always [click here](https://github.com/YunaOneeChan/Voice-Changer-Settings) to check if these settings are up-to-date*\n",
|
"*You can always [click here](https://rentry.co/VoiceChangerGuide#gpu-chart-for-known-working-chunkextra\n",
|
||||||
|
") to check if these settings are up-to-date*\n",
|
||||||
"<br><br>\n",
|
"<br><br>\n",
|
||||||
"\n",
|
"\n",
|
||||||
"---\n",
|
"---\n",
|
||||||
@ -46,7 +47,7 @@
|
|||||||
"# **Credits and Support**\n",
|
"# **Credits and Support**\n",
|
||||||
"Realtime Voice Changer by [w-okada](https://github.com/w-okada)\\\n",
|
"Realtime Voice Changer by [w-okada](https://github.com/w-okada)\\\n",
|
||||||
"Colab files updated by [rafacasari](https://github.com/Rafacasari)\\\n",
|
"Colab files updated by [rafacasari](https://github.com/Rafacasari)\\\n",
|
||||||
"Recommended settings by [YunaOneeChan](https://github.com/YunaOneeChan)\\\n",
|
"Recommended settings by [Raven](https://github.com/ravencutie21)\\\n",
|
||||||
"Modified again by [Hina](https://huggingface.co/HinaBl)\n",
|
"Modified again by [Hina](https://huggingface.co/HinaBl)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Need help? [AI Hub Discord](https://discord.gg/aihub) » ***#help-realtime-vc***\n",
|
"Need help? [AI Hub Discord](https://discord.gg/aihub) » ***#help-realtime-vc***\n",
|
||||||
@ -54,26 +55,6 @@
|
|||||||
"---"
|
"---"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {
|
|
||||||
"cellView": "form",
|
|
||||||
"id": "RhdqDSt-LfGr"
|
|
||||||
},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# @title **[Optional]** Connect to Google Drive\n",
|
|
||||||
"# @markdown Using Google Drive can improve load times a bit and your models will be stored, so you don't need to re-upload every time that you use.\n",
|
|
||||||
"import os\n",
|
|
||||||
"from google.colab import drive\n",
|
|
||||||
"\n",
|
|
||||||
"if not os.path.exists('/content/drive'):\n",
|
|
||||||
" drive.mount('/content/drive')\n",
|
|
||||||
"\n",
|
|
||||||
"%cd /content/drive/MyDrive"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": null,
|
"execution_count": null,
|
||||||
@ -83,8 +64,9 @@
|
|||||||
},
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
|
"#=================Updated=================\n",
|
||||||
"# @title **[1]** Clone repository and install dependencies\n",
|
"# @title **[1]** Clone repository and install dependencies\n",
|
||||||
"# @markdown This first step will download the latest version of Voice Changer and install the dependencies. **It will take around 2 minutes to complete.**\n",
|
"# @markdown This first step will download the latest version of Voice Changer and install the dependencies. **It can take some time to complete.**\n",
|
||||||
"import os\n",
|
"import os\n",
|
||||||
"import time\n",
|
"import time\n",
|
||||||
"import subprocess\n",
|
"import subprocess\n",
|
||||||
@ -93,12 +75,28 @@
|
|||||||
"import base64\n",
|
"import base64\n",
|
||||||
"import codecs\n",
|
"import codecs\n",
|
||||||
"\n",
|
"\n",
|
||||||
"from IPython.display import clear_output, Javascript\n",
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"#@markdown ---\n",
|
||||||
|
"# @title **[Optional]** Connect to Google Drive\n",
|
||||||
|
"# @markdown Using Google Drive can improve load times a bit and your models will be stored, so you don't need to re-upload every time that you use.\n",
|
||||||
|
"\n",
|
||||||
|
"Use_Drive=False #@param {type:\"boolean\"}\n",
|
||||||
|
"\n",
|
||||||
|
"from google.colab import drive\n",
|
||||||
|
"\n",
|
||||||
|
"if Use_Drive==True:\n",
|
||||||
|
" if not os.path.exists('/content/drive'):\n",
|
||||||
|
" drive.mount('/content/drive')\n",
|
||||||
|
"\n",
|
||||||
|
" %cd /content/drive/MyDrive\n",
|
||||||
|
"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"externalgit=codecs.decode('uggcf://tvguho.pbz/j-bxnqn/ibvpr-punatre.tvg','rot_13')\n",
|
"externalgit=codecs.decode('uggcf://tvguho.pbz/j-bxnqn/ibvpr-punatre.tvg','rot_13')\n",
|
||||||
"rvctimer=codecs.decode('uggcf://tvguho.pbz/uvanoy/eipgvzre.tvg','rot_13')\n",
|
"rvctimer=codecs.decode('uggcf://tvguho.pbz/uvanoy/eipgvzre.tvg','rot_13')\n",
|
||||||
"pathloc=codecs.decode('ibvpr-punatre','rot_13')\n",
|
"pathloc=codecs.decode('ibvpr-punatre','rot_13')\n",
|
||||||
"!git clone --depth 1 $externalgit &> /dev/null\n",
|
"\n",
|
||||||
|
"from IPython.display import clear_output, Javascript\n",
|
||||||
"\n",
|
"\n",
|
||||||
"def update_timer_and_print():\n",
|
"def update_timer_and_print():\n",
|
||||||
" global timer\n",
|
" global timer\n",
|
||||||
@ -112,165 +110,114 @@
|
|||||||
"timer = 0\n",
|
"timer = 0\n",
|
||||||
"threading.Thread(target=update_timer_and_print, daemon=True).start()\n",
|
"threading.Thread(target=update_timer_and_print, daemon=True).start()\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# os.system('cls')\n",
|
"!pip install colorama --quiet\n",
|
||||||
"clear_output()\n",
|
"from colorama import Fore, Style\n",
|
||||||
"!rm -rf rvctimer\n",
|
|
||||||
"!git clone --depth 1 $rvctimer\n",
|
|
||||||
"!cp -f rvctimer/index.html $pathloc/client/demo/dist/\n",
|
|
||||||
"\n",
|
|
||||||
"\n",
|
"\n",
|
||||||
|
"print(f\"{Fore.CYAN}> Cloning the repository...{Style.RESET_ALL}\")\n",
|
||||||
|
"!git clone --depth 1 $externalgit &> /dev/null\n",
|
||||||
|
"print(f\"{Fore.GREEN}> Successfully cloned the repository!{Style.RESET_ALL}\")\n",
|
||||||
"%cd $pathloc/server/\n",
|
"%cd $pathloc/server/\n",
|
||||||
"\n",
|
"\n",
|
||||||
"print(\"\\033[92mSuccessfully cloned the repository\")\n",
|
"# Read the content of the file\n",
|
||||||
|
"file_path = '../client/demo/dist/assets/gui_settings/version.txt'\n",
|
||||||
|
"\n",
|
||||||
|
"with open(file_path, 'r') as file:\n",
|
||||||
|
" file_content = file.read()\n",
|
||||||
|
"\n",
|
||||||
|
"# Replace the specific text\n",
|
||||||
|
"text_to_replace = \"-.-.-.-\"\n",
|
||||||
|
"new_text = \"Google.Colab\" # New text to replace the specific text\n",
|
||||||
|
"\n",
|
||||||
|
"modified_content = file_content.replace(text_to_replace, new_text)\n",
|
||||||
|
"\n",
|
||||||
|
"# Write the modified content back to the file\n",
|
||||||
|
"with open(file_path, 'w') as file:\n",
|
||||||
|
" file.write(modified_content)\n",
|
||||||
|
"\n",
|
||||||
|
"print(f\"Text '{text_to_replace}' has been replaced with '{new_text}' in the file.\")\n",
|
||||||
|
"\n",
|
||||||
|
"print(f\"{Fore.CYAN}> Installing libportaudio2...{Style.RESET_ALL}\")\n",
|
||||||
|
"!apt-get -y install libportaudio2 -qq\n",
|
||||||
|
"\n",
|
||||||
|
"!sed -i '/torch==/d' requirements.txt\n",
|
||||||
|
"!sed -i '/torchaudio==/d' requirements.txt\n",
|
||||||
|
"!sed -i '/numpy==/d' requirements.txt\n",
|
||||||
"\n",
|
"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"\n",
|
"print(f\"{Fore.CYAN}> Installing pre-dependencies...{Style.RESET_ALL}\")\n",
|
||||||
"!apt-get install libportaudio2 &> /dev/null --quiet\n",
|
"# Install dependencies that are missing from requirements.txt and pyngrok\n",
|
||||||
"!pip install pyworld onnxruntime-gpu uvicorn faiss-gpu fairseq jedi google-colab moviepy decorator==4.4.2 sounddevice numpy==1.23.5 pyngrok --quiet\n",
|
"!pip install faiss-gpu fairseq pyngrok --quiet\n",
|
||||||
"print(\"\\033[92mInstalling Requirements!\")\n",
|
"!pip install pyworld --no-build-isolation --quiet\n",
|
||||||
|
"# Install webstuff\n",
|
||||||
|
"import asyncio\n",
|
||||||
|
"import re\n",
|
||||||
|
"!pip install playwright\n",
|
||||||
|
"!playwright install\n",
|
||||||
|
"!playwright install-deps\n",
|
||||||
|
"!pip install nest_asyncio\n",
|
||||||
|
"from playwright.async_api import async_playwright\n",
|
||||||
|
"print(f\"{Fore.CYAN}> Installing dependencies from requirements.txt...{Style.RESET_ALL}\")\n",
|
||||||
|
"!pip install -r requirements.txt --quiet\n",
|
||||||
"clear_output()\n",
|
"clear_output()\n",
|
||||||
"!pip install -r requirements.txt --no-build-isolation --quiet\n",
|
"print(f\"{Fore.GREEN}> Successfully installed all packages!{Style.RESET_ALL}\")"
|
||||||
"# Maybe install Tensor packages?\n",
|
|
||||||
"#!pip install torch-tensorrt\n",
|
|
||||||
"#!pip install TensorRT\n",
|
|
||||||
"print(\"\\033[92mSuccessfully installed all packages!\")\n",
|
|
||||||
"# os.system('cls')\n",
|
|
||||||
"clear_output()\n",
|
|
||||||
"print(\"\\033[92mFinished, please continue to the next cell\")"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"source": [
|
"source": [
|
||||||
"\n",
|
"#@title **[Optional]** Upload a voice model (Run this before running the Voice Changer)\n",
|
||||||
"#@title #**[Optional]** Upload a voice model (Run this before running the Voice Changer)**[Currently Under Construction]**\n",
|
|
||||||
"#@markdown ---\n",
|
|
||||||
"import os\n",
|
"import os\n",
|
||||||
"import json\n",
|
"import json\n",
|
||||||
|
"from IPython.display import Image\n",
|
||||||
|
"import requests\n",
|
||||||
"\n",
|
"\n",
|
||||||
|
"model_slot = \"0\" #@param ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', '100', '101', '102', '103', '104', '105', '106', '107', '108', '109', '110', '111', '112', '113', '114', '115', '116', '117', '118', '119', '120', '121', '122', '123', '124', '125', '126', '127', '128', '129', '130', '131', '132', '133', '134', '135', '136', '137', '138', '139', '140', '141', '142', '143', '144', '145', '146', '147', '148', '149', '150', '151', '152', '153', '154', '155', '156', '157', '158', '159', '160', '161', '162', '163', '164', '165', '166', '167', '168', '169', '170', '171', '172', '173', '174', '175', '176', '177', '178', '179', '180', '181', '182', '183', '184', '185', '186', '187', '188', '189', '190', '191', '192', '193', '194', '195', '196', '197', '198', '199']\n",
|
||||||
"\n",
|
"\n",
|
||||||
"#@markdown #Model Number `(Default is 0)` you can add multiple models as long as you change the number!\n",
|
"!rm -rf model_dir/$model_slot\n",
|
||||||
"model_number = \"0\" #@param ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', '100', '101', '102', '103', '104', '105', '106', '107', '108', '109', '110', '111', '112', '113', '114', '115', '116', '117', '118', '119', '120', '121', '122', '123', '124', '125', '126', '127', '128', '129', '130', '131', '132', '133', '134', '135', '136', '137', '138', '139', '140', '141', '142', '143', '144', '145', '146', '147', '148', '149', '150', '151', '152', '153', '154', '155', '156', '157', '158', '159', '160', '161', '162', '163', '164', '165', '166', '167', '168', '169', '170', '171', '172', '173', '174', '175', '176', '177', '178', '179', '180', '181', '182', '183', '184', '185', '186', '187', '188', '189', '190', '191', '192', '193', '194', '195', '196', '197', '198', '199']\n",
|
"#@markdown **[Optional]** Add an icon to the model\n",
|
||||||
"\n",
|
"icon_link = \"https://cdn.donmai.us/sample/12/57/__rin_penrose_idol_corp_drawn_by_juu_ame__sample-12579843de9487cf2db82058ba5e77d4.jpg\" #@param {type:\"string\"}\n",
|
||||||
"!rm -rf model_dir/$model_number\n",
|
|
||||||
"#@markdown ---\n",
|
|
||||||
"#@markdown #**[Optional]** Add an icon to the model `(can be any image/leave empty for no image)`\n",
|
|
||||||
"icon_link = \"https://cdn.donmai.us/original/8a/92/8a924397e9aac922e94bdc1f28ff978a.jpg\" #@param {type:\"string\"}\n",
|
|
||||||
"#@markdown ---\n",
|
|
||||||
"icon_link = '\"'+icon_link+'\"'\n",
|
"icon_link = '\"'+icon_link+'\"'\n",
|
||||||
"!mkdir model_dir\n",
|
"!mkdir model_dir\n",
|
||||||
"!mkdir model_dir/$model_number\n",
|
"!mkdir model_dir/$model_slot\n",
|
||||||
"#@markdown #Put your model's download link here `(must be a zip file)`\n",
|
"#@markdown Put your model's download link here `(must be a zip file)` only supports **weights.gg** & **huggingface.co**\n",
|
||||||
"model_link = \"https://huggingface.co/HinaBl/Akatsuki/resolve/main/akatsuki_200epoch.zip\" #@param {type:\"string\"}\n",
|
"model_link = \"https://huggingface.co/HinaBl/Rin-Penrose/resolve/main/RinPenrose600.zip?download=true\" #@param {type:\"string\"}\n",
|
||||||
|
"\n",
|
||||||
|
"if model_link.startswith(\"https://www.weights.gg\") or model_link.startswith(\"https://weights.gg\"):\n",
|
||||||
|
" weights_code = requests.get(\"https://pastebin.com/raw/ytHLr8h0\").text\n",
|
||||||
|
" exec(weights_code)\n",
|
||||||
|
"else:\n",
|
||||||
|
" model_link = model_link\n",
|
||||||
|
"\n",
|
||||||
"model_link = '\"'+model_link+'\"'\n",
|
"model_link = '\"'+model_link+'\"'\n",
|
||||||
"!curl -L $model_link > model.zip\n",
|
"!curl -L $model_link > model.zip\n",
|
||||||
"\n",
|
"\n",
|
||||||
"\n",
|
|
||||||
"# Conditionally set the iconFile based on whether icon_link is empty\n",
|
"# Conditionally set the iconFile based on whether icon_link is empty\n",
|
||||||
"if icon_link:\n",
|
"if icon_link:\n",
|
||||||
" iconFile = \"icon.png\"\n",
|
" iconFile = \"icon.png\"\n",
|
||||||
" !curl -L $icon_link > model_dir/$model_number/icon.png\n",
|
" !curl -L $icon_link > model_dir/$model_slot/icon.png\n",
|
||||||
"else:\n",
|
"else:\n",
|
||||||
|
" iconFile = \"\"\n",
|
||||||
" print(\"icon_link is empty, so no icon file will be downloaded.\")\n",
|
" print(\"icon_link is empty, so no icon file will be downloaded.\")\n",
|
||||||
"#@markdown ---\n",
|
|
||||||
"\n",
|
"\n",
|
||||||
|
"!unzip model.zip -d model_dir/$model_slot\n",
|
||||||
"\n",
|
"\n",
|
||||||
"!unzip model.zip -d model_dir/$model_number\n",
|
"!mv model_dir/$model_slot/*/* model_dir/$model_slot/\n",
|
||||||
"\n",
|
"!rm -rf model_dir/$model_slot/*/\n",
|
||||||
"# Checks all the files in model_number and puts it outside of it\n",
|
"#@markdown **Model Voice Convertion Setting**\n",
|
||||||
"\n",
|
|
||||||
"!mv model_dir/$model_number/*/* model_dir/$model_number/\n",
|
|
||||||
"!rm -rf model_dir/$model_number/*/\n",
|
|
||||||
"\n",
|
|
||||||
"# if theres a folder in the number,\n",
|
|
||||||
"# take all the files in the folder and put it outside of that folder\n",
|
|
||||||
"\n",
|
|
||||||
"\n",
|
|
||||||
"#@markdown #**Model Voice Convertion Setting**\n",
|
|
||||||
"Tune = 12 #@param {type:\"slider\",min:-50,max:50,step:1}\n",
|
"Tune = 12 #@param {type:\"slider\",min:-50,max:50,step:1}\n",
|
||||||
"Index = 0 #@param {type:\"slider\",min:0,max:1,step:0.1}\n",
|
"Index = 0 #@param {type:\"slider\",min:0,max:1,step:0.1}\n",
|
||||||
"#@markdown ---\n",
|
|
||||||
"#@markdown #Parameter Option `(Ignore if theres a Parameter File)`\n",
|
|
||||||
"Slot_Index = -1 #@param [-1,0,1] {type:\"raw\"}\n",
|
|
||||||
"Sampling_Rate = 48000 #@param [32000,40000,48000] {type:\"raw\"}\n",
|
|
||||||
"\n",
|
"\n",
|
||||||
"# @markdown #**[Optional]** Parameter file for your voice model\n",
|
"param_link = \"\"\n",
|
||||||
"#@markdown _(must be named params.json)_ (Leave Empty for Default)\n",
|
|
||||||
"param_link = \"\" #@param {type:\"string\"}\n",
|
|
||||||
"if param_link == \"\":\n",
|
"if param_link == \"\":\n",
|
||||||
" model_dir = \"model_dir/\"+model_number+\"/\"\n",
|
" paramset = requests.get(\"https://pastebin.com/raw/SAKwUCt1\").text\n",
|
||||||
|
" exec(paramset)\n",
|
||||||
"\n",
|
"\n",
|
||||||
" # Find the .pth and .index files in the model_dir/0 directory\n",
|
|
||||||
" pth_files = [f for f in os.listdir(model_dir) if f.endswith(\".pth\")]\n",
|
|
||||||
" index_files = [f for f in os.listdir(model_dir) if f.endswith(\".index\")]\n",
|
|
||||||
"\n",
|
|
||||||
" if pth_files and index_files:\n",
|
|
||||||
" # Take the first .pth and .index file as model and index names\n",
|
|
||||||
" model_name = pth_files[0].replace(\".pth\", \"\")\n",
|
|
||||||
" index_name = index_files[0].replace(\".index\", \"\")\n",
|
|
||||||
" else:\n",
|
|
||||||
" # Set default values if no .pth and .index files are found\n",
|
|
||||||
" model_name = \"Null\"\n",
|
|
||||||
" index_name = \"Null\"\n",
|
|
||||||
"\n",
|
|
||||||
" # Define the content for params.json\n",
|
|
||||||
" params_content = {\n",
|
|
||||||
" \"slotIndex\": Slot_Index,\n",
|
|
||||||
" \"voiceChangerType\": \"RVC\",\n",
|
|
||||||
" \"name\": model_name,\n",
|
|
||||||
" \"description\": \"\",\n",
|
|
||||||
" \"credit\": \"\",\n",
|
|
||||||
" \"termsOfUseUrl\": \"\",\n",
|
|
||||||
" \"iconFile\": iconFile,\n",
|
|
||||||
" \"speakers\": {\n",
|
|
||||||
" \"0\": \"target\"\n",
|
|
||||||
" },\n",
|
|
||||||
" \"modelFile\": f\"{model_name}.pth\",\n",
|
|
||||||
" \"indexFile\": f\"{index_name}.index\",\n",
|
|
||||||
" \"defaultTune\": Tune,\n",
|
|
||||||
" \"defaultIndexRatio\": Index,\n",
|
|
||||||
" \"defaultProtect\": 0.5,\n",
|
|
||||||
" \"isONNX\": False,\n",
|
|
||||||
" \"modelType\": \"pyTorchRVCv2\",\n",
|
|
||||||
" \"samplingRate\": Sampling_Rate,\n",
|
|
||||||
" \"f0\": True,\n",
|
|
||||||
" \"embChannels\": 768,\n",
|
|
||||||
" \"embOutputLayer\": 12,\n",
|
|
||||||
" \"useFinalProj\": False,\n",
|
|
||||||
" \"deprecated\": False,\n",
|
|
||||||
" \"embedder\": \"hubert_base\",\n",
|
|
||||||
" \"sampleId\": \"\"\n",
|
|
||||||
" }\n",
|
|
||||||
"\n",
|
|
||||||
" # Write the content to params.json\n",
|
|
||||||
" with open(f\"{model_dir}/params.json\", \"w\") as param_file:\n",
|
|
||||||
" json.dump(params_content, param_file)\n",
|
|
||||||
"\n",
|
|
||||||
"# !unzip model.zip -d model_dir/0/\n",
|
|
||||||
"clear_output()\n",
|
"clear_output()\n",
|
||||||
"print(\"\\033[92mModel with the name of \"+model_name+\" has been Imported!\")\n"
|
"print(\"\\033[93mModel with the name of \"+model_name+\" has been Imported to slot \"+model_slot)"
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"cellView": "form",
|
"id": "_ZtbKUVUgN3G",
|
||||||
"id": "_ZtbKUVUgN3G"
|
"cellView": "form"
|
||||||
},
|
|
||||||
"execution_count": null,
|
|
||||||
"outputs": []
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"source": [
|
|
||||||
"#@title Delete a model\n",
|
|
||||||
"#@markdown ---\n",
|
|
||||||
"#@markdown Select which slot you want to delete\n",
|
|
||||||
"Delete_Slot = \"0\" #@param ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', '100', '101', '102', '103', '104', '105', '106', '107', '108', '109', '110', '111', '112', '113', '114', '115', '116', '117', '118', '119', '120', '121', '122', '123', '124', '125', '126', '127', '128', '129', '130', '131', '132', '133', '134', '135', '136', '137', '138', '139', '140', '141', '142', '143', '144', '145', '146', '147', '148', '149', '150', '151', '152', '153', '154', '155', '156', '157', '158', '159', '160', '161', '162', '163', '164', '165', '166', '167', '168', '169', '170', '171', '172', '173', '174', '175', '176', '177', '178', '179', '180', '181', '182', '183', '184', '185', '186', '187', '188', '189', '190', '191', '192', '193', '194', '195', '196', '197', '198', '199']\n",
|
|
||||||
"{type:\"slider\",min:0,max:1,step:0.1}\n",
|
|
||||||
"\n",
|
|
||||||
"!rm -rf model_dir/$Model_Number\n",
|
|
||||||
"print(\"\\033[92mSuccessfully removed Model is slot \"+Delete_Slot)\n"
|
|
||||||
],
|
|
||||||
"metadata": {
|
|
||||||
"id": "P9g6rG1-KUwt"
|
|
||||||
},
|
},
|
||||||
"execution_count": null,
|
"execution_count": null,
|
||||||
"outputs": []
|
"outputs": []
|
||||||
@ -279,71 +226,79 @@
|
|||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": null,
|
"execution_count": null,
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "lLWQuUd7WW9U"
|
"id": "lLWQuUd7WW9U",
|
||||||
|
"cellView": "form"
|
||||||
},
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# @title **[2]** Start Server **using ngrok** (Recommended | **need a ngrok account**)\n",
|
"\n",
|
||||||
|
"#=======================Updated=========================\n",
|
||||||
|
"\n",
|
||||||
|
"# @title Start Server **using ngrok**\n",
|
||||||
"# @markdown This cell will start the server, the first time that you run it will download the models, so it can take a while (~1-2 minutes)\n",
|
"# @markdown This cell will start the server, the first time that you run it will download the models, so it can take a while (~1-2 minutes)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# @markdown ---\n",
|
"# @markdown ---\n",
|
||||||
"# @markdown You'll need a ngrok account, but **it's free**!\n",
|
"# @markdown You'll need a ngrok account, but <font color=green>**it's free**</font> and easy to create!\n",
|
||||||
"# @markdown ---\n",
|
"# @markdown ---\n",
|
||||||
"# @markdown **1** - Create a **free** account at [ngrok](https://dashboard.ngrok.com/signup)\\\n",
|
"# @markdown **1** - Create a <font color=green>**free**</font> account at [ngrok](https://dashboard.ngrok.com/signup) or **login with Google/Github account**\\\n",
|
||||||
"# @markdown **2** - If you didn't logged in with Google or Github, you will need to **verify your e-mail**!\\\n",
|
"# @markdown **2** - If you didn't logged in with Google/Github, you will need to **verify your e-mail**!\\\n",
|
||||||
"# @markdown **3** - Click [this link](https://dashboard.ngrok.com/get-started/your-authtoken) to get your auth token, copy it and place it here:\n",
|
"# @markdown **3** - Click [this link](https://dashboard.ngrok.com/get-started/your-authtoken) to get your auth token, and place it here:\n",
|
||||||
"from pyngrok import conf, ngrok\n",
|
"Token = 'TOKEN_HERE' # @param {type:\"string\"}\n",
|
||||||
"\n",
|
"# @markdown **4** - *(optional)* Change to a region near to you or keep at United States if increase latency\\\n",
|
||||||
"f0_det= \"rmvpe_onnx\" #@param [\"rmvpe_onnx\",\"rvc\"]\n",
|
|
||||||
"Token = 'YOUR_TOKEN_HERE' # @param {type:\"string\"}\n",
|
|
||||||
"# @markdown **4** - Still need further tests, but maybe region can help a bit on latency?\\\n",
|
|
||||||
"# @markdown `Default Region: us - United States (Ohio)`\n",
|
"# @markdown `Default Region: us - United States (Ohio)`\n",
|
||||||
"Region = \"ap - Asia/Pacific (Singapore)\" # @param [\"ap - Asia/Pacific (Singapore)\", \"au - Australia (Sydney)\",\"eu - Europe (Frankfurt)\", \"in - India (Mumbai)\",\"jp - Japan (Tokyo)\",\"sa - South America (Sao Paulo)\", \"us - United States (Ohio)\"]\n",
|
"Region = \"us - United States (Ohio)\" # @param [\"ap - Asia/Pacific (Singapore)\", \"au - Australia (Sydney)\",\"eu - Europe (Frankfurt)\", \"in - India (Mumbai)\",\"jp - Japan (Tokyo)\",\"sa - South America (Sao Paulo)\", \"us - United States (Ohio)\"]\n",
|
||||||
"MyConfig = conf.PyngrokConfig()\n",
|
|
||||||
"\n",
|
"\n",
|
||||||
|
"#@markdown **5** - *(optional)* Other options:\n",
|
||||||
|
"ClearConsole = True # @param {type:\"boolean\"}\n",
|
||||||
|
"Play_Notification = True # @param {type:\"boolean\"}\n",
|
||||||
|
"\n",
|
||||||
|
"# ---------------------------------\n",
|
||||||
|
"# DO NOT TOUCH ANYTHING DOWN BELOW!\n",
|
||||||
|
"# ---------------------------------\n",
|
||||||
|
"\n",
|
||||||
|
"%cd $pathloc/server/\n",
|
||||||
|
"\n",
|
||||||
|
"from pyngrok import conf, ngrok\n",
|
||||||
|
"MyConfig = conf.PyngrokConfig()\n",
|
||||||
"MyConfig.auth_token = Token\n",
|
"MyConfig.auth_token = Token\n",
|
||||||
"MyConfig.region = Region[0:2]\n",
|
"MyConfig.region = Region[0:2]\n",
|
||||||
"\n",
|
"#conf.get_default().authtoken = Token\n",
|
||||||
"conf.get_default().authtoken = Token\n",
|
"#conf.get_default().region = Region\n",
|
||||||
"conf.get_default().region = Region[0:2]\n",
|
|
||||||
"\n",
|
|
||||||
"conf.set_default(MyConfig);\n",
|
"conf.set_default(MyConfig);\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# @markdown ---\n",
|
"import subprocess, threading, time, socket, urllib.request\n",
|
||||||
"# @markdown If you want to automatically clear the output when the server loads, check this option.\n",
|
"PORT = 8000\n",
|
||||||
"Clear_Output = True # @param {type:\"boolean\"}\n",
|
|
||||||
"\n",
|
|
||||||
"mainpy=codecs.decode('ZZIPFreireFVB.cl','rot_13')\n",
|
|
||||||
"\n",
|
|
||||||
"import portpicker, socket, urllib.request\n",
|
|
||||||
"PORT = portpicker.pick_unused_port()\n",
|
|
||||||
"\n",
|
"\n",
|
||||||
"from pyngrok import ngrok\n",
|
"from pyngrok import ngrok\n",
|
||||||
"# Edited ⏬⏬\n",
|
|
||||||
"ngrokConnection = ngrok.connect(PORT)\n",
|
"ngrokConnection = ngrok.connect(PORT)\n",
|
||||||
"public_url = ngrokConnection.public_url\n",
|
"public_url = ngrokConnection.public_url\n",
|
||||||
"\n",
|
"\n",
|
||||||
"def iframe_thread(port):\n",
|
"from IPython.display import clear_output\n",
|
||||||
" while True:\n",
|
"from IPython.display import Audio, display\n",
|
||||||
" time.sleep(0.5)\n",
|
"def play_notification_sound():\n",
|
||||||
" sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n",
|
" display(Audio(url='https://raw.githubusercontent.com/hinabl/rmvpe-ai-kaggle/main/custom/audios/notif.mp3', autoplay=True))\n",
|
||||||
" result = sock.connect_ex(('127.0.0.1', port))\n",
|
|
||||||
" if result == 0:\n",
|
|
||||||
" break\n",
|
|
||||||
" sock.close()\n",
|
|
||||||
" clear_output()\n",
|
|
||||||
" print(\"------- SERVER READY! -------\")\n",
|
|
||||||
" print(\"Your server is available at:\")\n",
|
|
||||||
" print(public_url)\n",
|
|
||||||
" print(\"-----------------------------\")\n",
|
|
||||||
" # display(Javascript('window.open(\"{url}\", \\'_blank\\');'.format(url=public_url)))\n",
|
|
||||||
"\n",
|
|
||||||
"print(PORT)\n",
|
|
||||||
"\n",
|
"\n",
|
||||||
"\n",
|
"\n",
|
||||||
|
"def wait_for_server():\n",
|
||||||
|
" while True:\n",
|
||||||
|
" time.sleep(0.5)\n",
|
||||||
|
" sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n",
|
||||||
|
" result = sock.connect_ex(('127.0.0.1', PORT))\n",
|
||||||
|
" if result == 0:\n",
|
||||||
|
" break\n",
|
||||||
|
" sock.close()\n",
|
||||||
|
" if ClearConsole:\n",
|
||||||
|
" clear_output()\n",
|
||||||
|
" print(\"--------- SERVER READY! ---------\")\n",
|
||||||
|
" print(\"Your server is available at:\")\n",
|
||||||
|
" print(public_url)\n",
|
||||||
|
" print(\"---------------------------------\")\n",
|
||||||
|
" if Play_Notification==True:\n",
|
||||||
|
" play_notification_sound()\n",
|
||||||
"\n",
|
"\n",
|
||||||
"threading.Thread(target=iframe_thread, daemon=True, args=(PORT,)).start()\n",
|
"threading.Thread(target=wait_for_server, daemon=True).start()\n",
|
||||||
"\n",
|
"\n",
|
||||||
|
"mainpy=codecs.decode('ZZIPFreireFVB.cl','rot_13')\n",
|
||||||
"\n",
|
"\n",
|
||||||
"!python3 $mainpy \\\n",
|
"!python3 $mainpy \\\n",
|
||||||
" -p {PORT} \\\n",
|
" -p {PORT} \\\n",
|
||||||
@ -360,74 +315,27 @@
|
|||||||
" --rmvpe pretrain/rmvpe.pt \\\n",
|
" --rmvpe pretrain/rmvpe.pt \\\n",
|
||||||
" --model_dir model_dir \\\n",
|
" --model_dir model_dir \\\n",
|
||||||
" --samples samples.json\n",
|
" --samples samples.json\n",
|
||||||
"\n"
|
"\n",
|
||||||
|
"ngrok.disconnect(ngrokConnection.public_url)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "markdown",
|
||||||
"source": [
|
"source": [
|
||||||
"# @title **[Optional]** Start Server **using localtunnel** (ngrok alternative | no account needed)\n",
|
"![](https://i.pinimg.com/474x/de/72/9e/de729ecfa41b69901c42c82fff752414.jpg)\n",
|
||||||
"# @markdown This cell will start the server, the first time that you run it will download the models, so it can take a while (~1-2 minutes)\n",
|
"![](https://i.pinimg.com/474x/de/72/9e/de729ecfa41b69901c42c82fff752414.jpg)"
|
||||||
"\n",
|
|
||||||
"# @markdown ---\n",
|
|
||||||
"!npm config set update-notifier false\n",
|
|
||||||
"!npm install -g localtunnel\n",
|
|
||||||
"print(\"\\033[92mLocalTunnel installed!\")\n",
|
|
||||||
"# @markdown If you want to automatically clear the output when the server loads, check this option.\n",
|
|
||||||
"Clear_Output = True # @param {type:\"boolean\"}\n",
|
|
||||||
"\n",
|
|
||||||
"import portpicker, subprocess, threading, time, socket, urllib.request\n",
|
|
||||||
"PORT = portpicker.pick_unused_port()\n",
|
|
||||||
"\n",
|
|
||||||
"from IPython.display import clear_output, Javascript\n",
|
|
||||||
"\n",
|
|
||||||
"def iframe_thread(port):\n",
|
|
||||||
" while True:\n",
|
|
||||||
" time.sleep(0.5)\n",
|
|
||||||
" sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n",
|
|
||||||
" result = sock.connect_ex(('127.0.0.1', port))\n",
|
|
||||||
" if result == 0:\n",
|
|
||||||
" break\n",
|
|
||||||
" sock.close()\n",
|
|
||||||
" clear_output()\n",
|
|
||||||
" print(\"Use the following endpoint to connect to localtunnel:\", urllib.request.urlopen('https://ipv4.icanhazip.com').read().decode('utf8').strip(\"\\n\"))\n",
|
|
||||||
" p = subprocess.Popen([\"lt\", \"--port\", \"{}\".format(port)], stdout=subprocess.PIPE)\n",
|
|
||||||
" for line in p.stdout:\n",
|
|
||||||
" print(line.decode(), end='')\n",
|
|
||||||
"\n",
|
|
||||||
"threading.Thread(target=iframe_thread, daemon=True, args=(PORT,)).start()\n",
|
|
||||||
"\n",
|
|
||||||
"\n",
|
|
||||||
"!python3 MMVCServerSIO.py \\\n",
|
|
||||||
" -p {PORT} \\\n",
|
|
||||||
" --https False \\\n",
|
|
||||||
" --content_vec_500 pretrain/checkpoint_best_legacy_500.pt \\\n",
|
|
||||||
" --content_vec_500_onnx pretrain/content_vec_500.onnx \\\n",
|
|
||||||
" --content_vec_500_onnx_on true \\\n",
|
|
||||||
" --hubert_base pretrain/hubert_base.pt \\\n",
|
|
||||||
" --hubert_base_jp pretrain/rinna_hubert_base_jp.pt \\\n",
|
|
||||||
" --hubert_soft pretrain/hubert/hubert-soft-0d54a1f4.pt \\\n",
|
|
||||||
" --nsf_hifigan pretrain/nsf_hifigan/model \\\n",
|
|
||||||
" --crepe_onnx_full pretrain/crepe_onnx_full.onnx \\\n",
|
|
||||||
" --crepe_onnx_tiny pretrain/crepe_onnx_tiny.onnx \\\n",
|
|
||||||
" --rmvpe pretrain/rmvpe.pt \\\n",
|
|
||||||
" --model_dir model_dir \\\n",
|
|
||||||
" --samples samples.json \\\n",
|
|
||||||
" --colab True"
|
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"cellView": "form",
|
"id": "2Uu1sTSwTc7q"
|
||||||
"id": "ZwZaCf4BeZi2"
|
}
|
||||||
},
|
|
||||||
"execution_count": null,
|
|
||||||
"outputs": []
|
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"colab": {
|
"colab": {
|
||||||
"provenance": [],
|
"provenance": [],
|
||||||
"private_outputs": true,
|
"private_outputs": true,
|
||||||
"gpuType": "T4"
|
"gpuType": "T4",
|
||||||
|
"include_colab_link": true
|
||||||
},
|
},
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
"display_name": "Python 3",
|
"display_name": "Python 3",
|
||||||
@ -440,4 +348,4 @@
|
|||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
||||||
"nbformat_minor": 0
|
"nbformat_minor": 0
|
||||||
}
|
}
|
Loading…
Reference in New Issue
Block a user