diff --git a/Hina_Mod_Kaggle_Real_Time_Voice_Changer.ipynb b/Hina_Mod_Kaggle_Real_Time_Voice_Changer.ipynb new file mode 100644 index 00000000..b688476c --- /dev/null +++ b/Hina_Mod_Kaggle_Real_Time_Voice_Changer.ipynb @@ -0,0 +1 @@ +{"metadata":{"kernelspec":{"language":"python","display_name":"Python 3","name":"python3"},"language_info":{"pygments_lexer":"ipython3","nbconvert_exporter":"python","version":"3.6.4","file_extension":".py","codemirror_mode":{"name":"ipython","version":3},"name":"python","mimetype":"text/x-python"},"kaggle":{"accelerator":"gpu","dataSources":[],"dockerImageVersionId":30559,"isInternetEnabled":true,"language":"python","sourceType":"notebook","isGpuEnabled":true}},"nbformat_minor":4,"nbformat":4,"cells":[{"source":"\"Kaggle\"","metadata":{},"cell_type":"markdown"},{"cell_type":"markdown","source":"### [w-okada's Voice Changer](https://github.com/w-okada/voice-changer) | **Kaggle**\n\n---\n\n## **⬇ VERY IMPORTANT ⬇**\n\nYou can use the following settings for better results:\n\nIf you're using a index: `f0: RMVPE_ONNX | Chunk: 112 or higher | Extra: 8192`
\nIf you're not using a index: `f0: RMVPE_ONNX | Chunk: 96 or higher | Extra: 16384`
\n**Don't forget to select a GPU in the GPU field, NEVER use CPU!\n> Seems that PTH models performance better than ONNX for now, you can still try ONNX models and see if it satisfies you\n\n\n*You can always [click here](https://github.com/YunaOneeChan/Voice-Changer-Settings) to check if these settings are up-to-date*\n\n---\n**Credits**
\nRealtime Voice Changer by [w-okada](https://github.com/w-okada)
\nNotebook files updated by [rafacasari](https://github.com/Rafacasari)
\nRecommended settings by [Raven](https://github.com/RavenCutie21)
\nModded again by [Hina](https://github.com/hinabl)\n\n**Need help?** [AI Hub Discord](https://discord.gg/aihub) » ***#help-realtime-vc***\n\n---","metadata":{"id":"Lbbmx_Vjl0zo"}},{"cell_type":"markdown","source":"# Kaggle Tutorial\nRunning this notebook can be a bit complicated.\\\nAfter created your Kaggle account, you'll need to **verify your phone number** to be able to use Internet Connection and GPUs.\\\nFollow the instructions on the image below.\n\n## *You can use GPU P100 instead of GPU T4, some people are telling that P100 is better.*\n![instructions.png](https://i.imgur.com/0NutkD8.png)","metadata":{}},{"cell_type":"markdown","source":"# Clone repository and install dependencies\nThis first step will download the latest version of Voice Changer and install the dependencies. **It will take some time to complete.**","metadata":{}},{"cell_type":"code","source":"# This will make that we're on the right folder before installing\n%cd /kaggle/working/\n\n!pip install colorama --quiet\nfrom colorama import Fore, Style\nimport os\n\n!mkdir Hmod\n%cd Hmod\n!git clone https://github.com/w-okada/voice-changer.git --depth=1 --quiet .\nprint(f\"{Fore.GREEN}> Successfully cloned the repository!{Style.RESET_ALL}\")\n%cd server\n!sed -i \"s/-.-.-.-/Kaggle.Mod/\" '../client/demo/dist/assets/gui_settings/version.txt'\n!mv MMVCServerSIO.py Hmod.py\n!sed -i \"s/MMVCServerSIO/Hmod/\" Hmod.py\n\nprint(f\"{Fore.CYAN}> Installing libportaudio2...{Style.RESET_ALL}\")\n!apt-get -y install libportaudio2 -qq\n\nprint(f\"{Fore.CYAN}> Installing pre-dependencies...{Style.RESET_ALL}\")\n# Install dependencies that are missing from requirements.txt and pyngrok\n!pip install faiss-gpu fairseq pyngrok --quiet \n!pip install pyworld --no-build-isolation\nprint(f\"{Fore.CYAN}> Installing dependencies from requirements.txt...{Style.RESET_ALL}\")\n!pip install -r requirements.txt --quiet\n\n# Download the default settings ^-^\nif not os.path.exists(\"/kaggle/working/Hmod/server/stored_setting.json\"):\n !wget -q https://gist.githubusercontent.com/Rafacasari/d820d945497a01112e1a9ba331cbad4f/raw/8e0a426c22688b05dd9c541648bceab27e422dd6/kaggle_setting.json -O /kaggle/working/24apuiBokE3TjZwc6tuqqv39SwP_2LRouVj3M9oZZCbzgntuG /server/stored_setting.json\nprint(f\"{Fore.GREEN}> Successfully installed all packages!{Style.RESET_ALL}\")\n\nprint(f\"{Fore.GREEN}> You can safely ignore the dependency conflict errors, it's a error from Kaggle and don't interfer on Voice Changer!{Style.RESET_ALL}\")","metadata":{"id":"86wTFmqsNMnD","cellView":"form","_kg_hide-output":false,"execution":{"iopub.status.busy":"2023-11-13T14:29:34.68815Z","iopub.execute_input":"2023-11-13T14:29:34.688434Z","iopub.status.idle":"2023-11-13T14:35:25.010808Z","shell.execute_reply.started":"2023-11-13T14:29:34.688408Z","shell.execute_reply":"2023-11-13T14:35:25.009639Z"},"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":"# Start Server **using ngrok**\nThis cell will start the server, the first time that you run it will download the models, so it can take a while (~1-2 minutes)\n\n---\nYou'll need a ngrok account, but **it's free** and easy to create!\n---\n**1** - Create a **free** account at [ngrok](https://dashboard.ngrok.com/signup)\\\n**2** - If you didn't logged in with Google or Github, you will need to **verify your e-mail**!\\\n**3** - Click [this link](https://dashboard.ngrok.com/get-started/your-authtoken) to get your auth token, and replace **YOUR_TOKEN_HERE** with your token.\\\n**4** - *(optional)* Change to a region near to you","metadata":{}},{"cell_type":"code","source":"Token = 'Token_Here'\nRegion = \"ap\" # Read the instructions below\n\n# You can change the region for a better latency, use only the abbreviation\n# Choose between this options: \n# us -> United States (Ohio)\n# ap -> Asia/Pacific (Singapore)\n# au -> Australia (Sydney)\n# eu -> Europe (Frankfurt)\n# in -> India (Mumbai)\n# jp -> Japan (Tokyo)\n# sa -> South America (Sao Paulo)\n\n# ---------------------------------\n# DO NOT TOUCH ANYTHING DOWN BELOW!\n\n%cd /kaggle/working/Hmod/server\n \nfrom pyngrok import conf, ngrok\nMyConfig = conf.PyngrokConfig()\nMyConfig.auth_token = Token\nMyConfig.region = Region\nconf.get_default().authtoken = Token\nconf.get_default().region = Region\nconf.set_default(MyConfig);\n\nimport subprocess, threading, time, socket, urllib.request\nPORT = 8000\n\nfrom pyngrok import ngrok\nngrokConnection = ngrok.connect(PORT)\npublic_url = ngrokConnection.public_url\n\ndef wait_for_server():\n while True:\n time.sleep(0.5)\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n result = sock.connect_ex(('127.0.0.1', PORT))\n if result == 0:\n break\n sock.close()\n print(\"--------- SERVER READY! ---------\")\n print(\"Your server is available at:\")\n print(public_url)\n print(\"---------------------------------\")\n\nthreading.Thread(target=wait_for_server, daemon=True).start()\n\n!python3 Hmod.py \\\n -p {PORT} \\\n --https False \\\n --content_vec_500 pretrain/checkpoint_best_legacy_500.pt \\\n --content_vec_500_onnx pretrain/content_vec_500.onnx \\\n --content_vec_500_onnx_on true \\\n --hubert_base pretrain/hubert_base.pt \\\n --hubert_base_jp pretrain/rinna_hubert_base_jp.pt \\\n --hubert_soft pretrain/hubert/hubert-soft-0d54a1f4.pt \\\n --nsf_hifigan pretrain/nsf_hifigan/model \\\n --crepe_onnx_full pretrain/crepe_onnx_full.onnx \\\n --crepe_onnx_tiny pretrain/crepe_onnx_tiny.onnx \\\n --rmvpe pretrain/rmvpe.pt \\\n --model_dir model_dir \\\n --samples samples.json\n\nngrok.disconnect(ngrokConnection.public_url)","metadata":{"id":"lLWQuUd7WW9U","cellView":"form","_kg_hide-input":false,"scrolled":true,"execution":{"iopub.status.busy":"2023-11-13T14:36:20.529333Z","iopub.execute_input":"2023-11-13T14:36:20.530081Z"},"trusted":true},"execution_count":null,"outputs":[]}]} \ No newline at end of file diff --git a/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb b/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb index ebbab41f..b9d0a82a 100644 --- a/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb +++ b/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb @@ -1,6 +1,6 @@ { "cells": [ -{ + { "cell_type": "markdown", "metadata": { "id": "view-in-github", @@ -30,7 +30,8 @@ "> Seems that PTH models performance better than ONNX for now, you can still try ONNX models and see if it satisfies you\n", "\n", "\n", - "*You can always [click here](https://github.com/YunaOneeChan/Voice-Changer-Settings) to check if these settings are up-to-date*\n", + "*You can always [click here](https://rentry.co/VoiceChangerGuide#gpu-chart-for-known-working-chunkextra\n", + ") to check if these settings are up-to-date*\n", "

\n", "\n", "---\n", @@ -46,7 +47,7 @@ "# **Credits and Support**\n", "Realtime Voice Changer by [w-okada](https://github.com/w-okada)\\\n", "Colab files updated by [rafacasari](https://github.com/Rafacasari)\\\n", - "Recommended settings by [YunaOneeChan](https://github.com/YunaOneeChan)\\\n", + "Recommended settings by [Raven](https://github.com/ravencutie21)\\\n", "Modified again by [Hina](https://huggingface.co/HinaBl)\n", "\n", "Need help? [AI Hub Discord](https://discord.gg/aihub) » ***#help-realtime-vc***\n", @@ -54,26 +55,6 @@ "---" ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "cellView": "form", - "id": "RhdqDSt-LfGr" - }, - "outputs": [], - "source": [ - "# @title **[Optional]** Connect to Google Drive\n", - "# @markdown Using Google Drive can improve load times a bit and your models will be stored, so you don't need to re-upload every time that you use.\n", - "import os\n", - "from google.colab import drive\n", - "\n", - "if not os.path.exists('/content/drive'):\n", - " drive.mount('/content/drive')\n", - "\n", - "%cd /content/drive/MyDrive" - ] - }, { "cell_type": "code", "execution_count": null, @@ -83,8 +64,9 @@ }, "outputs": [], "source": [ + "#=================Updated=================\n", "# @title **[1]** Clone repository and install dependencies\n", - "# @markdown This first step will download the latest version of Voice Changer and install the dependencies. **It will take around 2 minutes to complete.**\n", + "# @markdown This first step will download the latest version of Voice Changer and install the dependencies. **It can take some time to complete.**\n", "import os\n", "import time\n", "import subprocess\n", @@ -93,12 +75,28 @@ "import base64\n", "import codecs\n", "\n", - "from IPython.display import clear_output, Javascript\n", + "\n", + "\n", + "#@markdown ---\n", + "# @title **[Optional]** Connect to Google Drive\n", + "# @markdown Using Google Drive can improve load times a bit and your models will be stored, so you don't need to re-upload every time that you use.\n", + "\n", + "Use_Drive=False #@param {type:\"boolean\"}\n", + "\n", + "from google.colab import drive\n", + "\n", + "if Use_Drive==True:\n", + " if not os.path.exists('/content/drive'):\n", + " drive.mount('/content/drive')\n", + "\n", + " %cd /content/drive/MyDrive\n", + "\n", "\n", "externalgit=codecs.decode('uggcf://tvguho.pbz/j-bxnqn/ibvpr-punatre.tvg','rot_13')\n", "rvctimer=codecs.decode('uggcf://tvguho.pbz/uvanoy/eipgvzre.tvg','rot_13')\n", "pathloc=codecs.decode('ibvpr-punatre','rot_13')\n", - "!git clone --depth 1 $externalgit &> /dev/null\n", + "\n", + "from IPython.display import clear_output, Javascript\n", "\n", "def update_timer_and_print():\n", " global timer\n", @@ -112,165 +110,114 @@ "timer = 0\n", "threading.Thread(target=update_timer_and_print, daemon=True).start()\n", "\n", - "# os.system('cls')\n", - "clear_output()\n", - "!rm -rf rvctimer\n", - "!git clone --depth 1 $rvctimer\n", - "!cp -f rvctimer/index.html $pathloc/client/demo/dist/\n", - "\n", + "!pip install colorama --quiet\n", + "from colorama import Fore, Style\n", "\n", + "print(f\"{Fore.CYAN}> Cloning the repository...{Style.RESET_ALL}\")\n", + "!git clone --depth 1 $externalgit &> /dev/null\n", + "print(f\"{Fore.GREEN}> Successfully cloned the repository!{Style.RESET_ALL}\")\n", "%cd $pathloc/server/\n", "\n", - "print(\"\\033[92mSuccessfully cloned the repository\")\n", + "# Read the content of the file\n", + "file_path = '../client/demo/dist/assets/gui_settings/version.txt'\n", + "\n", + "with open(file_path, 'r') as file:\n", + " file_content = file.read()\n", + "\n", + "# Replace the specific text\n", + "text_to_replace = \"-.-.-.-\"\n", + "new_text = \"Google.Colab\" # New text to replace the specific text\n", + "\n", + "modified_content = file_content.replace(text_to_replace, new_text)\n", + "\n", + "# Write the modified content back to the file\n", + "with open(file_path, 'w') as file:\n", + " file.write(modified_content)\n", + "\n", + "print(f\"Text '{text_to_replace}' has been replaced with '{new_text}' in the file.\")\n", + "\n", + "print(f\"{Fore.CYAN}> Installing libportaudio2...{Style.RESET_ALL}\")\n", + "!apt-get -y install libportaudio2 -qq\n", + "\n", + "!sed -i '/torch==/d' requirements.txt\n", + "!sed -i '/torchaudio==/d' requirements.txt\n", + "!sed -i '/numpy==/d' requirements.txt\n", "\n", "\n", - "\n", - "!apt-get install libportaudio2 &> /dev/null --quiet\n", - "!pip install pyworld onnxruntime-gpu uvicorn faiss-gpu fairseq jedi google-colab moviepy decorator==4.4.2 sounddevice numpy==1.23.5 pyngrok --quiet\n", - "print(\"\\033[92mInstalling Requirements!\")\n", + "print(f\"{Fore.CYAN}> Installing pre-dependencies...{Style.RESET_ALL}\")\n", + "# Install dependencies that are missing from requirements.txt and pyngrok\n", + "!pip install faiss-gpu fairseq pyngrok --quiet\n", + "!pip install pyworld --no-build-isolation --quiet\n", + "# Install webstuff\n", + "import asyncio\n", + "import re\n", + "!pip install playwright\n", + "!playwright install\n", + "!playwright install-deps\n", + "!pip install nest_asyncio\n", + "from playwright.async_api import async_playwright\n", + "print(f\"{Fore.CYAN}> Installing dependencies from requirements.txt...{Style.RESET_ALL}\")\n", + "!pip install -r requirements.txt --quiet\n", "clear_output()\n", - "!pip install -r requirements.txt --no-build-isolation --quiet\n", - "# Maybe install Tensor packages?\n", - "#!pip install torch-tensorrt\n", - "#!pip install TensorRT\n", - "print(\"\\033[92mSuccessfully installed all packages!\")\n", - "# os.system('cls')\n", - "clear_output()\n", - "print(\"\\033[92mFinished, please continue to the next cell\")" + "print(f\"{Fore.GREEN}> Successfully installed all packages!{Style.RESET_ALL}\")" ] }, { "cell_type": "code", "source": [ - "\n", - "#@title #**[Optional]** Upload a voice model (Run this before running the Voice Changer)**[Currently Under Construction]**\n", - "#@markdown ---\n", + "#@title **[Optional]** Upload a voice model (Run this before running the Voice Changer)\n", "import os\n", "import json\n", + "from IPython.display import Image\n", + "import requests\n", "\n", + "model_slot = \"0\" #@param ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', '100', '101', '102', '103', '104', '105', '106', '107', '108', '109', '110', '111', '112', '113', '114', '115', '116', '117', '118', '119', '120', '121', '122', '123', '124', '125', '126', '127', '128', '129', '130', '131', '132', '133', '134', '135', '136', '137', '138', '139', '140', '141', '142', '143', '144', '145', '146', '147', '148', '149', '150', '151', '152', '153', '154', '155', '156', '157', '158', '159', '160', '161', '162', '163', '164', '165', '166', '167', '168', '169', '170', '171', '172', '173', '174', '175', '176', '177', '178', '179', '180', '181', '182', '183', '184', '185', '186', '187', '188', '189', '190', '191', '192', '193', '194', '195', '196', '197', '198', '199']\n", "\n", - "#@markdown #Model Number `(Default is 0)` you can add multiple models as long as you change the number!\n", - "model_number = \"0\" #@param ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', '100', '101', '102', '103', '104', '105', '106', '107', '108', '109', '110', '111', '112', '113', '114', '115', '116', '117', '118', '119', '120', '121', '122', '123', '124', '125', '126', '127', '128', '129', '130', '131', '132', '133', '134', '135', '136', '137', '138', '139', '140', '141', '142', '143', '144', '145', '146', '147', '148', '149', '150', '151', '152', '153', '154', '155', '156', '157', '158', '159', '160', '161', '162', '163', '164', '165', '166', '167', '168', '169', '170', '171', '172', '173', '174', '175', '176', '177', '178', '179', '180', '181', '182', '183', '184', '185', '186', '187', '188', '189', '190', '191', '192', '193', '194', '195', '196', '197', '198', '199']\n", - "\n", - "!rm -rf model_dir/$model_number\n", - "#@markdown ---\n", - "#@markdown #**[Optional]** Add an icon to the model `(can be any image/leave empty for no image)`\n", - "icon_link = \"https://cdn.donmai.us/original/8a/92/8a924397e9aac922e94bdc1f28ff978a.jpg\" #@param {type:\"string\"}\n", - "#@markdown ---\n", + "!rm -rf model_dir/$model_slot\n", + "#@markdown **[Optional]** Add an icon to the model\n", + "icon_link = \"https://cdn.donmai.us/sample/12/57/__rin_penrose_idol_corp_drawn_by_juu_ame__sample-12579843de9487cf2db82058ba5e77d4.jpg\" #@param {type:\"string\"}\n", "icon_link = '\"'+icon_link+'\"'\n", "!mkdir model_dir\n", - "!mkdir model_dir/$model_number\n", - "#@markdown #Put your model's download link here `(must be a zip file)`\n", - "model_link = \"https://huggingface.co/HinaBl/Akatsuki/resolve/main/akatsuki_200epoch.zip\" #@param {type:\"string\"}\n", + "!mkdir model_dir/$model_slot\n", + "#@markdown Put your model's download link here `(must be a zip file)` only supports **weights.gg** & **huggingface.co**\n", + "model_link = \"https://huggingface.co/HinaBl/Rin-Penrose/resolve/main/RinPenrose600.zip?download=true\" #@param {type:\"string\"}\n", + "\n", + "if model_link.startswith(\"https://www.weights.gg\") or model_link.startswith(\"https://weights.gg\"):\n", + " weights_code = requests.get(\"https://pastebin.com/raw/ytHLr8h0\").text\n", + " exec(weights_code)\n", + "else:\n", + " model_link = model_link\n", + "\n", "model_link = '\"'+model_link+'\"'\n", "!curl -L $model_link > model.zip\n", "\n", - "\n", "# Conditionally set the iconFile based on whether icon_link is empty\n", "if icon_link:\n", " iconFile = \"icon.png\"\n", - " !curl -L $icon_link > model_dir/$model_number/icon.png\n", + " !curl -L $icon_link > model_dir/$model_slot/icon.png\n", "else:\n", + " iconFile = \"\"\n", " print(\"icon_link is empty, so no icon file will be downloaded.\")\n", - "#@markdown ---\n", "\n", + "!unzip model.zip -d model_dir/$model_slot\n", "\n", - "!unzip model.zip -d model_dir/$model_number\n", - "\n", - "# Checks all the files in model_number and puts it outside of it\n", - "\n", - "!mv model_dir/$model_number/*/* model_dir/$model_number/\n", - "!rm -rf model_dir/$model_number/*/\n", - "\n", - "# if theres a folder in the number,\n", - "# take all the files in the folder and put it outside of that folder\n", - "\n", - "\n", - "#@markdown #**Model Voice Convertion Setting**\n", + "!mv model_dir/$model_slot/*/* model_dir/$model_slot/\n", + "!rm -rf model_dir/$model_slot/*/\n", + "#@markdown **Model Voice Convertion Setting**\n", "Tune = 12 #@param {type:\"slider\",min:-50,max:50,step:1}\n", "Index = 0 #@param {type:\"slider\",min:0,max:1,step:0.1}\n", - "#@markdown ---\n", - "#@markdown #Parameter Option `(Ignore if theres a Parameter File)`\n", - "Slot_Index = -1 #@param [-1,0,1] {type:\"raw\"}\n", - "Sampling_Rate = 48000 #@param [32000,40000,48000] {type:\"raw\"}\n", "\n", - "# @markdown #**[Optional]** Parameter file for your voice model\n", - "#@markdown _(must be named params.json)_ (Leave Empty for Default)\n", - "param_link = \"\" #@param {type:\"string\"}\n", + "param_link = \"\"\n", "if param_link == \"\":\n", - " model_dir = \"model_dir/\"+model_number+\"/\"\n", + " paramset = requests.get(\"https://pastebin.com/raw/SAKwUCt1\").text\n", + " exec(paramset)\n", "\n", - " # Find the .pth and .index files in the model_dir/0 directory\n", - " pth_files = [f for f in os.listdir(model_dir) if f.endswith(\".pth\")]\n", - " index_files = [f for f in os.listdir(model_dir) if f.endswith(\".index\")]\n", - "\n", - " if pth_files and index_files:\n", - " # Take the first .pth and .index file as model and index names\n", - " model_name = pth_files[0].replace(\".pth\", \"\")\n", - " index_name = index_files[0].replace(\".index\", \"\")\n", - " else:\n", - " # Set default values if no .pth and .index files are found\n", - " model_name = \"Null\"\n", - " index_name = \"Null\"\n", - "\n", - " # Define the content for params.json\n", - " params_content = {\n", - " \"slotIndex\": Slot_Index,\n", - " \"voiceChangerType\": \"RVC\",\n", - " \"name\": model_name,\n", - " \"description\": \"\",\n", - " \"credit\": \"\",\n", - " \"termsOfUseUrl\": \"\",\n", - " \"iconFile\": iconFile,\n", - " \"speakers\": {\n", - " \"0\": \"target\"\n", - " },\n", - " \"modelFile\": f\"{model_name}.pth\",\n", - " \"indexFile\": f\"{index_name}.index\",\n", - " \"defaultTune\": Tune,\n", - " \"defaultIndexRatio\": Index,\n", - " \"defaultProtect\": 0.5,\n", - " \"isONNX\": False,\n", - " \"modelType\": \"pyTorchRVCv2\",\n", - " \"samplingRate\": Sampling_Rate,\n", - " \"f0\": True,\n", - " \"embChannels\": 768,\n", - " \"embOutputLayer\": 12,\n", - " \"useFinalProj\": False,\n", - " \"deprecated\": False,\n", - " \"embedder\": \"hubert_base\",\n", - " \"sampleId\": \"\"\n", - " }\n", - "\n", - " # Write the content to params.json\n", - " with open(f\"{model_dir}/params.json\", \"w\") as param_file:\n", - " json.dump(params_content, param_file)\n", - "\n", - "# !unzip model.zip -d model_dir/0/\n", "clear_output()\n", - "print(\"\\033[92mModel with the name of \"+model_name+\" has been Imported!\")\n" + "print(\"\\033[93mModel with the name of \"+model_name+\" has been Imported to slot \"+model_slot)" ], "metadata": { - "cellView": "form", - "id": "_ZtbKUVUgN3G" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "#@title Delete a model\n", - "#@markdown ---\n", - "#@markdown Select which slot you want to delete\n", - "Delete_Slot = \"0\" #@param ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', '100', '101', '102', '103', '104', '105', '106', '107', '108', '109', '110', '111', '112', '113', '114', '115', '116', '117', '118', '119', '120', '121', '122', '123', '124', '125', '126', '127', '128', '129', '130', '131', '132', '133', '134', '135', '136', '137', '138', '139', '140', '141', '142', '143', '144', '145', '146', '147', '148', '149', '150', '151', '152', '153', '154', '155', '156', '157', '158', '159', '160', '161', '162', '163', '164', '165', '166', '167', '168', '169', '170', '171', '172', '173', '174', '175', '176', '177', '178', '179', '180', '181', '182', '183', '184', '185', '186', '187', '188', '189', '190', '191', '192', '193', '194', '195', '196', '197', '198', '199']\n", - "{type:\"slider\",min:0,max:1,step:0.1}\n", - "\n", - "!rm -rf model_dir/$Model_Number\n", - "print(\"\\033[92mSuccessfully removed Model is slot \"+Delete_Slot)\n" - ], - "metadata": { - "id": "P9g6rG1-KUwt" + "id": "_ZtbKUVUgN3G", + "cellView": "form" }, "execution_count": null, "outputs": [] @@ -279,71 +226,79 @@ "cell_type": "code", "execution_count": null, "metadata": { - "id": "lLWQuUd7WW9U" + "id": "lLWQuUd7WW9U", + "cellView": "form" }, "outputs": [], "source": [ - "# @title **[2]** Start Server **using ngrok** (Recommended | **need a ngrok account**)\n", + "\n", + "#=======================Updated=========================\n", + "\n", + "# @title Start Server **using ngrok**\n", "# @markdown This cell will start the server, the first time that you run it will download the models, so it can take a while (~1-2 minutes)\n", "\n", "# @markdown ---\n", - "# @markdown You'll need a ngrok account, but **it's free**!\n", + "# @markdown You'll need a ngrok account, but **it's free** and easy to create!\n", "# @markdown ---\n", - "# @markdown **1** - Create a **free** account at [ngrok](https://dashboard.ngrok.com/signup)\\\n", - "# @markdown **2** - If you didn't logged in with Google or Github, you will need to **verify your e-mail**!\\\n", - "# @markdown **3** - Click [this link](https://dashboard.ngrok.com/get-started/your-authtoken) to get your auth token, copy it and place it here:\n", - "from pyngrok import conf, ngrok\n", - "\n", - "f0_det= \"rmvpe_onnx\" #@param [\"rmvpe_onnx\",\"rvc\"]\n", - "Token = 'YOUR_TOKEN_HERE' # @param {type:\"string\"}\n", - "# @markdown **4** - Still need further tests, but maybe region can help a bit on latency?\\\n", + "# @markdown **1** - Create a **free** account at [ngrok](https://dashboard.ngrok.com/signup) or **login with Google/Github account**\\\n", + "# @markdown **2** - If you didn't logged in with Google/Github, you will need to **verify your e-mail**!\\\n", + "# @markdown **3** - Click [this link](https://dashboard.ngrok.com/get-started/your-authtoken) to get your auth token, and place it here:\n", + "Token = 'TOKEN_HERE' # @param {type:\"string\"}\n", + "# @markdown **4** - *(optional)* Change to a region near to you or keep at United States if increase latency\\\n", "# @markdown `Default Region: us - United States (Ohio)`\n", - "Region = \"ap - Asia/Pacific (Singapore)\" # @param [\"ap - Asia/Pacific (Singapore)\", \"au - Australia (Sydney)\",\"eu - Europe (Frankfurt)\", \"in - India (Mumbai)\",\"jp - Japan (Tokyo)\",\"sa - South America (Sao Paulo)\", \"us - United States (Ohio)\"]\n", - "MyConfig = conf.PyngrokConfig()\n", + "Region = \"us - United States (Ohio)\" # @param [\"ap - Asia/Pacific (Singapore)\", \"au - Australia (Sydney)\",\"eu - Europe (Frankfurt)\", \"in - India (Mumbai)\",\"jp - Japan (Tokyo)\",\"sa - South America (Sao Paulo)\", \"us - United States (Ohio)\"]\n", "\n", + "#@markdown **5** - *(optional)* Other options:\n", + "ClearConsole = True # @param {type:\"boolean\"}\n", + "Play_Notification = True # @param {type:\"boolean\"}\n", + "\n", + "# ---------------------------------\n", + "# DO NOT TOUCH ANYTHING DOWN BELOW!\n", + "# ---------------------------------\n", + "\n", + "%cd $pathloc/server/\n", + "\n", + "from pyngrok import conf, ngrok\n", + "MyConfig = conf.PyngrokConfig()\n", "MyConfig.auth_token = Token\n", "MyConfig.region = Region[0:2]\n", - "\n", - "conf.get_default().authtoken = Token\n", - "conf.get_default().region = Region[0:2]\n", - "\n", + "#conf.get_default().authtoken = Token\n", + "#conf.get_default().region = Region\n", "conf.set_default(MyConfig);\n", "\n", - "# @markdown ---\n", - "# @markdown If you want to automatically clear the output when the server loads, check this option.\n", - "Clear_Output = True # @param {type:\"boolean\"}\n", - "\n", - "mainpy=codecs.decode('ZZIPFreireFVB.cl','rot_13')\n", - "\n", - "import portpicker, socket, urllib.request\n", - "PORT = portpicker.pick_unused_port()\n", + "import subprocess, threading, time, socket, urllib.request\n", + "PORT = 8000\n", "\n", "from pyngrok import ngrok\n", - "# Edited ⏬⏬\n", "ngrokConnection = ngrok.connect(PORT)\n", "public_url = ngrokConnection.public_url\n", "\n", - "def iframe_thread(port):\n", - " while True:\n", - " time.sleep(0.5)\n", - " sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n", - " result = sock.connect_ex(('127.0.0.1', port))\n", - " if result == 0:\n", - " break\n", - " sock.close()\n", - " clear_output()\n", - " print(\"------- SERVER READY! -------\")\n", - " print(\"Your server is available at:\")\n", - " print(public_url)\n", - " print(\"-----------------------------\")\n", - " # display(Javascript('window.open(\"{url}\", \\'_blank\\');'.format(url=public_url)))\n", - "\n", - "print(PORT)\n", + "from IPython.display import clear_output\n", + "from IPython.display import Audio, display\n", + "def play_notification_sound():\n", + " display(Audio(url='https://raw.githubusercontent.com/hinabl/rmvpe-ai-kaggle/main/custom/audios/notif.mp3', autoplay=True))\n", "\n", "\n", + "def wait_for_server():\n", + " while True:\n", + " time.sleep(0.5)\n", + " sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n", + " result = sock.connect_ex(('127.0.0.1', PORT))\n", + " if result == 0:\n", + " break\n", + " sock.close()\n", + " if ClearConsole:\n", + " clear_output()\n", + " print(\"--------- SERVER READY! ---------\")\n", + " print(\"Your server is available at:\")\n", + " print(public_url)\n", + " print(\"---------------------------------\")\n", + " if Play_Notification==True:\n", + " play_notification_sound()\n", "\n", - "threading.Thread(target=iframe_thread, daemon=True, args=(PORT,)).start()\n", + "threading.Thread(target=wait_for_server, daemon=True).start()\n", "\n", + "mainpy=codecs.decode('ZZIPFreireFVB.cl','rot_13')\n", "\n", "!python3 $mainpy \\\n", " -p {PORT} \\\n", @@ -360,74 +315,27 @@ " --rmvpe pretrain/rmvpe.pt \\\n", " --model_dir model_dir \\\n", " --samples samples.json\n", - "\n" + "\n", + "ngrok.disconnect(ngrokConnection.public_url)" ] }, { - "cell_type": "code", + "cell_type": "markdown", "source": [ - "# @title **[Optional]** Start Server **using localtunnel** (ngrok alternative | no account needed)\n", - "# @markdown This cell will start the server, the first time that you run it will download the models, so it can take a while (~1-2 minutes)\n", - "\n", - "# @markdown ---\n", - "!npm config set update-notifier false\n", - "!npm install -g localtunnel\n", - "print(\"\\033[92mLocalTunnel installed!\")\n", - "# @markdown If you want to automatically clear the output when the server loads, check this option.\n", - "Clear_Output = True # @param {type:\"boolean\"}\n", - "\n", - "import portpicker, subprocess, threading, time, socket, urllib.request\n", - "PORT = portpicker.pick_unused_port()\n", - "\n", - "from IPython.display import clear_output, Javascript\n", - "\n", - "def iframe_thread(port):\n", - " while True:\n", - " time.sleep(0.5)\n", - " sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n", - " result = sock.connect_ex(('127.0.0.1', port))\n", - " if result == 0:\n", - " break\n", - " sock.close()\n", - " clear_output()\n", - " print(\"Use the following endpoint to connect to localtunnel:\", urllib.request.urlopen('https://ipv4.icanhazip.com').read().decode('utf8').strip(\"\\n\"))\n", - " p = subprocess.Popen([\"lt\", \"--port\", \"{}\".format(port)], stdout=subprocess.PIPE)\n", - " for line in p.stdout:\n", - " print(line.decode(), end='')\n", - "\n", - "threading.Thread(target=iframe_thread, daemon=True, args=(PORT,)).start()\n", - "\n", - "\n", - "!python3 MMVCServerSIO.py \\\n", - " -p {PORT} \\\n", - " --https False \\\n", - " --content_vec_500 pretrain/checkpoint_best_legacy_500.pt \\\n", - " --content_vec_500_onnx pretrain/content_vec_500.onnx \\\n", - " --content_vec_500_onnx_on true \\\n", - " --hubert_base pretrain/hubert_base.pt \\\n", - " --hubert_base_jp pretrain/rinna_hubert_base_jp.pt \\\n", - " --hubert_soft pretrain/hubert/hubert-soft-0d54a1f4.pt \\\n", - " --nsf_hifigan pretrain/nsf_hifigan/model \\\n", - " --crepe_onnx_full pretrain/crepe_onnx_full.onnx \\\n", - " --crepe_onnx_tiny pretrain/crepe_onnx_tiny.onnx \\\n", - " --rmvpe pretrain/rmvpe.pt \\\n", - " --model_dir model_dir \\\n", - " --samples samples.json \\\n", - " --colab True" + "![](https://i.pinimg.com/474x/de/72/9e/de729ecfa41b69901c42c82fff752414.jpg)\n", + "![](https://i.pinimg.com/474x/de/72/9e/de729ecfa41b69901c42c82fff752414.jpg)" ], "metadata": { - "cellView": "form", - "id": "ZwZaCf4BeZi2" - }, - "execution_count": null, - "outputs": [] + "id": "2Uu1sTSwTc7q" + } } ], "metadata": { "colab": { "provenance": [], "private_outputs": true, - "gpuType": "T4" + "gpuType": "T4", + "include_colab_link": true }, "kernelspec": { "display_name": "Python 3", @@ -440,4 +348,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} +} \ No newline at end of file