diff --git a/Realtime_Voice_Changer_on_Colab.ipynb b/Realtime_Voice_Changer_on_Colab.ipynb
index 1684258a..144b9e98 100644
--- a/Realtime_Voice_Changer_on_Colab.ipynb
+++ b/Realtime_Voice_Changer_on_Colab.ipynb
@@ -1,6 +1,6 @@
{
"cells": [
- {
+ {
"cell_type": "markdown",
"metadata": {
"id": "view-in-github",
@@ -12,265 +12,146 @@
},
{
"cell_type": "markdown",
- "metadata": {
- "id": "Lbbmx_Vjl0zo"
- },
"source": [
- "### w-okada's Voice Changer | **Google Colab**\n",
+ "### [w-okada's Voice Changer](https://github.com/w-okada/voice-changer) | **Colab**\n",
"\n",
"---\n",
"\n",
- "##**READ ME - VERY IMPORTANT**\n",
+ "## **⬇ VERY IMPORTANT ⬇**\n",
"\n",
- "This is an attempt to run [Realtime Voice Changer](https://github.com/w-okada/voice-changer) on Google Colab, still not perfect but is totally usable, you can use the following settings for better results:\n",
+ "You can use the following settings for better results:\n",
"\n",
- "If you're using a index: `f0: RMVPE_ONNX | Chunk: 112 or higher | Extra: 8192`\\\n",
- "If you're not using a index: `f0: RMVPE_ONNX | Chunk: 96 or higher | Extra: 16384`\\\n",
- "**Don't forget to select your Colab GPU in the GPU field (Tesla T4, for free users)*\n",
+ "If you're using a index: `f0: RMVPE_ONNX | Chunk: 112 or higher | Extra: 8192`
\n",
+ "If you're not using a index: `f0: RMVPE_ONNX | Chunk: 96 or higher | Extra: 16384`
\n",
+ "**Don't forget to select a T4 GPU in the GPU field, NEVER use CPU!\n",
"> Seems that PTH models performance better than ONNX for now, you can still try ONNX models and see if it satisfies you\n",
"\n",
"\n",
"*You can always [click here](https://github.com/YunaOneeChan/Voice-Changer-Settings) to check if these settings are up-to-date*\n",
- "
\n",
"\n",
"---\n",
"\n",
- "###Always use Colab GPU (**VERY VERY VERY IMPORTANT!**)\n",
+ "### ⬇ Always use Colab GPU! (**IMPORTANT!**) ⬇\n",
"You need to use a Colab GPU so the Voice Changer can work faster and better\\\n",
"Use the menu above and click on **Runtime** » **Change runtime** » **Hardware acceleration** to select a GPU (**T4 is the free one**)\n",
"\n",
"---\n",
- "\n",
- "
\n",
- "\n",
- "# **Credits and Support**\n",
- "Realtime Voice Changer by [w-okada](https://github.com/w-okada)\\\n",
- "Colab files updated by [rafacasari](https://github.com/Rafacasari)\\\n",
+ "**Credits**
\n",
+ "Realtime Voice Changer by [w-okada](https://github.com/w-okada)
\n",
+ "Notebook files updated by [rafacasari](https://github.com/Rafacasari)
\n",
"Recommended settings by [YunaOneeChan](https://github.com/YunaOneeChan)\n",
"\n",
- "Need help? [AI Hub Discord](https://discord.gg/aihub) » ***#help-realtime-vc***\n",
+ "**Need help?** [AI Hub Discord](https://discord.gg/aihub) » ***#help-realtime-vc***\n",
"\n",
"---"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "cellView": "form",
- "id": "RhdqDSt-LfGr"
- },
- "outputs": [],
- "source": [
- "# @title **[Optional]** Connect to Google Drive\n",
- "# @markdown Using Google Drive can improve load times a bit and your models will be stored, so you don't need to re-upload every time that you use.\n",
- "import os\n",
- "from google.colab import drive\n",
- "\n",
- "if not os.path.exists('/content/drive'):\n",
- " drive.mount('/content/drive')\n",
- "\n",
- "%cd /content/drive/MyDrive"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "86wTFmqsNMnD",
- "cellView": "form"
- },
- "outputs": [],
- "source": [
- "# @title **[1]** Clone repository and install dependencies\n",
- "# @markdown This first step will download the latest version of Voice Changer and install the dependencies. **It will take around 2 minutes to complete.**\n",
- "\n",
- "!git clone --depth 1 https://github.com/w-okada/voice-changer.git &> /dev/null\n",
- "\n",
- "%cd voice-changer/server/\n",
- "print(\"\\033[92mSuccessfully cloned the repository\")\n",
- "\n",
- "!apt-get install libportaudio2 &> /dev/null\n",
- "!pip install pyworld\n",
- "!pip install onnxruntime-gpu uvicorn faiss-gpu fairseq jedi google-colab moviepy decorator==4.4.2 sounddevice numpy==1.23.5 pyngrok --quiet\n",
- "!pip install -r requirements.txt --no-build-isolation --quiet\n",
- "# Maybe install Tensor packages?\n",
- "#!pip install torch-tensorrt\n",
- "#!pip install TensorRT\n",
- "print(\"\\033[92mSuccessfully installed all packages!\")"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "lLWQuUd7WW9U",
- "cellView": "form"
- },
- "outputs": [],
- "source": [
- "# @title **[2]** Start Server **using ngrok** (Recommended | **need a ngrok account**)\n",
- "# @markdown This cell will start the server, the first time that you run it will download the models, so it can take a while (~1-2 minutes)\n",
- "\n",
- "# @markdown ---\n",
- "# @markdown You'll need a ngrok account, but **it's free**!\n",
- "# @markdown ---\n",
- "# @markdown **1** - Create a **free** account at [ngrok](https://dashboard.ngrok.com/signup)\\\n",
- "# @markdown **2** - If you didn't logged in with Google or Github, you will need to **verify your e-mail**!\\\n",
- "# @markdown **3** - Click [this link](https://dashboard.ngrok.com/get-started/your-authtoken) to get your auth token, copy it and place it here:\n",
- "from pyngrok import conf, ngrok\n",
- "\n",
- "Token = '' # @param {type:\"string\"}\n",
- "# @markdown **4** - Still need further tests, but maybe region can help a bit on latency?\\\n",
- "# @markdown `Default Region: us - United States (Ohio)`\n",
- "Region = \"us - United States (Ohio)\" # @param [\"ap - Asia/Pacific (Singapore)\", \"au - Australia (Sydney)\",\"eu - Europe (Frankfurt)\", \"in - India (Mumbai)\",\"jp - Japan (Tokyo)\",\"sa - South America (Sao Paulo)\", \"us - United States (Ohio)\"]\n",
- "\n",
- "MyConfig = conf.PyngrokConfig()\n",
- "\n",
- "MyConfig.auth_token = Token\n",
- "MyConfig.region = Region[0:2]\n",
- "\n",
- "conf.get_default().authtoken = Token\n",
- "conf.get_default().region = Region[0:2]\n",
- "\n",
- "conf.set_default(MyConfig);\n",
- "\n",
- "# @markdown ---\n",
- "# @markdown If you want to automatically clear the output when the server loads, check this option.\n",
- "Clear_Output = True # @param {type:\"boolean\"}\n",
- "\n",
- "import portpicker, subprocess, threading, time, socket, urllib.request\n",
- "PORT = portpicker.pick_unused_port()\n",
- "\n",
- "from IPython.display import clear_output, Javascript\n",
- "\n",
- "from pyngrok import ngrok\n",
- "ngrokConnection = ngrok.connect(PORT)\n",
- "public_url = ngrokConnection.public_url\n",
- "\n",
- "def iframe_thread(port):\n",
- " while True:\n",
- " time.sleep(0.5)\n",
- " sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n",
- " result = sock.connect_ex(('127.0.0.1', port))\n",
- " if result == 0:\n",
- " break\n",
- " sock.close()\n",
- " clear_output()\n",
- " print(\"------- SERVER READY! -------\")\n",
- " print(\"Your server is available at:\")\n",
- " print(public_url)\n",
- " print(\"-----------------------------\")\n",
- " display(Javascript('window.open(\"{url}\", \\'_blank\\');'.format(url=public_url)))\n",
- "\n",
- "threading.Thread(target=iframe_thread, daemon=True, args=(PORT,)).start()\n",
- "\n",
- "!python3 MMVCServerSIO.py \\\n",
- " -p {PORT} \\\n",
- " --https False \\\n",
- " --content_vec_500 pretrain/checkpoint_best_legacy_500.pt \\\n",
- " --content_vec_500_onnx pretrain/content_vec_500.onnx \\\n",
- " --content_vec_500_onnx_on true \\\n",
- " --hubert_base pretrain/hubert_base.pt \\\n",
- " --hubert_base_jp pretrain/rinna_hubert_base_jp.pt \\\n",
- " --hubert_soft pretrain/hubert/hubert-soft-0d54a1f4.pt \\\n",
- " --nsf_hifigan pretrain/nsf_hifigan/model \\\n",
- " --crepe_onnx_full pretrain/crepe_onnx_full.onnx \\\n",
- " --crepe_onnx_tiny pretrain/crepe_onnx_tiny.onnx \\\n",
- " --rmvpe pretrain/rmvpe.pt \\\n",
- " --model_dir model_dir \\\n",
- " --samples samples.json"
- ]
- },
- {
- "cell_type": "code",
- "source": [
- "# @title **[Optional]** Start Server **using localtunnel** (ngrok alternative | no account needed)\n",
- "# @markdown This cell will start the server, the first time that you run it will download the models, so it can take a while (~1-2 minutes)\n",
- "\n",
- "# @markdown ---\n",
- "!npm config set update-notifier false\n",
- "!npm install -g localtunnel\n",
- "print(\"\\033[92mLocalTunnel installed!\")\n",
- "# @markdown If you want to automatically clear the output when the server loads, check this option.\n",
- "Clear_Output = True # @param {type:\"boolean\"}\n",
- "\n",
- "import portpicker, subprocess, threading, time, socket, urllib.request\n",
- "PORT = portpicker.pick_unused_port()\n",
- "\n",
- "from IPython.display import clear_output, Javascript\n",
- "\n",
- "def iframe_thread(port):\n",
- " while True:\n",
- " time.sleep(0.5)\n",
- " sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n",
- " result = sock.connect_ex(('127.0.0.1', port))\n",
- " if result == 0:\n",
- " break\n",
- " sock.close()\n",
- " clear_output()\n",
- " print(\"Use the following endpoint to connect to localtunnel:\", urllib.request.urlopen('https://ipv4.icanhazip.com').read().decode('utf8').strip(\"\\n\"))\n",
- " p = subprocess.Popen([\"lt\", \"--port\", \"{}\".format(port)], stdout=subprocess.PIPE)\n",
- " for line in p.stdout:\n",
- " print(line.decode(), end='')\n",
- "\n",
- "threading.Thread(target=iframe_thread, daemon=True, args=(PORT,)).start()\n",
- "\n",
- "\n",
- "!python3 MMVCServerSIO.py \\\n",
- " -p {PORT} \\\n",
- " --https False \\\n",
- " --content_vec_500 pretrain/checkpoint_best_legacy_500.pt \\\n",
- " --content_vec_500_onnx pretrain/content_vec_500.onnx \\\n",
- " --content_vec_500_onnx_on true \\\n",
- " --hubert_base pretrain/hubert_base.pt \\\n",
- " --hubert_base_jp pretrain/rinna_hubert_base_jp.pt \\\n",
- " --hubert_soft pretrain/hubert/hubert-soft-0d54a1f4.pt \\\n",
- " --nsf_hifigan pretrain/nsf_hifigan/model \\\n",
- " --crepe_onnx_full pretrain/crepe_onnx_full.onnx \\\n",
- " --crepe_onnx_tiny pretrain/crepe_onnx_tiny.onnx \\\n",
- " --rmvpe pretrain/rmvpe.pt \\\n",
- " --model_dir model_dir \\\n",
- " --samples samples.json \\\n",
- " --colab True"
],
"metadata": {
- "cellView": "form",
- "id": "ZwZaCf4BeZi2"
- },
- "execution_count": null,
- "outputs": []
- },
- {
- "cell_type": "markdown",
- "source": [
- "# In Development | **Need contributors**"
- ],
- "metadata": {
- "id": "iuf9pBHYpTn-"
+ "id": "Lbbmx_Vjl0zo"
}
},
{
"cell_type": "code",
"source": [
- "# @title **[BROKEN]** Start Server using Colab Tunnels (trying to fix this TwT)\n",
- "# @markdown **Issue:** Everything starts correctly, but when you try to use the client, you'll see in your browser console a bunch of errors **(Error 500 - Not Allowed.)**\n",
+ "# @title Clone repository and install dependencies\n",
+ "# @markdown This first step will download the latest version of Voice Changer and install the dependencies. **It can take some time to complete.**\n",
+ "%cd /content/\n",
"\n",
- "import portpicker, subprocess, threading, time, socket, urllib.request\n",
- "PORT = portpicker.pick_unused_port()\n",
+ "!pip install colorama --quiet\n",
+ "from colorama import Fore, Style\n",
+ "import os\n",
"\n",
- "def iframe_thread(port):\n",
- " while True:\n",
- " time.sleep(0.5)\n",
- " sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n",
- " result = sock.connect_ex(('127.0.0.1', port))\n",
- " if result == 0:\n",
- " break\n",
- " sock.close()\n",
- " from google.colab.output import serve_kernel_port_as_window\n",
- " serve_kernel_port_as_window(PORT)\n",
+ "print(f\"{Fore.CYAN}> Cloning the repository...{Style.RESET_ALL}\")\n",
+ "!git clone https://github.com/w-okada/voice-changer.git --quiet\n",
+ "print(f\"{Fore.GREEN}> Successfully cloned the repository!{Style.RESET_ALL}\")\n",
+ "%cd voice-changer/server/\n",
"\n",
- "threading.Thread(target=iframe_thread, daemon=True, args=(PORT,)).start()\n",
+ "print(f\"{Fore.CYAN}> Installing libportaudio2...{Style.RESET_ALL}\")\n",
+ "!apt-get -y install libportaudio2 -qq\n",
+ "\n",
+ "print(f\"{Fore.CYAN}> Installing pre-dependencies...{Style.RESET_ALL}\")\n",
+ "# Install dependencies that are missing from requirements.txt and pyngrok\n",
+ "!pip install faiss-gpu fairseq pyngrok --quiet\n",
+ "!pip install pyworld --no-build-isolation --quiet\n",
+ "print(f\"{Fore.CYAN}> Installing dependencies from requirements.txt...{Style.RESET_ALL}\")\n",
+ "!pip install -r requirements.txt --quiet\n",
+ "\n",
+ "print(f\"{Fore.GREEN}> Successfully installed all packages!{Style.RESET_ALL}\")"
+ ],
+ "metadata": {
+ "id": "86wTFmqsNMnD",
+ "cellView": "form",
+ "_kg_hide-output": false,
+ "execution": {
+ "iopub.status.busy": "2023-09-14T04:01:17.308284Z",
+ "iopub.execute_input": "2023-09-14T04:01:17.308682Z",
+ "iopub.status.idle": "2023-09-14T04:08:08.475375Z",
+ "shell.execute_reply.started": "2023-09-14T04:01:17.308652Z",
+ "shell.execute_reply": "2023-09-14T04:08:08.473827Z"
+ },
+ "trusted": true
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# @title Start Server **using ngrok**\n",
+ "# @markdown This cell will start the server, the first time that you run it will download the models, so it can take a while (~1-2 minutes)\n",
+ "\n",
+ "# @markdown ---\n",
+ "# @markdown You'll need a ngrok account, but **it's free** and easy to create!\n",
+ "# @markdown ---\n",
+ "# @markdown **1** - Create a **free** account at [ngrok](https://dashboard.ngrok.com/signup) or **login with Google/Github account**\\\n",
+ "# @markdown **2** - If you didn't logged in with Google/Github, you will need to **verify your e-mail**!\\\n",
+ "# @markdown **3** - Click [this link](https://dashboard.ngrok.com/get-started/your-authtoken) to get your auth token, and place it here:\n",
+ "Token = '' # @param {type:\"string\"}\n",
+ "# @markdown **4** - *(optional)* Change to a region near to you or keep at United States if increase latency\\\n",
+ "# @markdown `Default Region: us - United States (Ohio)`\n",
+ "Region = \"us - United States (Ohio)\" # @param [\"ap - Asia/Pacific (Singapore)\", \"au - Australia (Sydney)\",\"eu - Europe (Frankfurt)\", \"in - India (Mumbai)\",\"jp - Japan (Tokyo)\",\"sa - South America (Sao Paulo)\", \"us - United States (Ohio)\"]\n",
+ "\n",
+ "#@markdown **5** - *(optional)* Other options:\n",
+ "ClearConsole = True # @param {type:\"boolean\"}\n",
+ "\n",
+ "# ---------------------------------\n",
+ "# DO NOT TOUCH ANYTHING DOWN BELOW!\n",
+ "# ---------------------------------\n",
+ "\n",
+ "%cd /content/voice-changer/server\n",
+ "\n",
+ "from pyngrok import conf, ngrok\n",
+ "MyConfig = conf.PyngrokConfig()\n",
+ "MyConfig.auth_token = Token\n",
+ "MyConfig.region = Region[0:2]\n",
+ "#conf.get_default().authtoken = Token\n",
+ "#conf.get_default().region = Region\n",
+ "conf.set_default(MyConfig);\n",
+ "\n",
+ "import subprocess, threading, time, socket, urllib.request\n",
+ "PORT = 8000\n",
+ "\n",
+ "from pyngrok import ngrok\n",
+ "ngrokConnection = ngrok.connect(PORT)\n",
+ "public_url = ngrokConnection.public_url\n",
+ "\n",
+ "from IPython.display import clear_output\n",
+ "\n",
+ "def wait_for_server():\n",
+ " while True:\n",
+ " time.sleep(0.5)\n",
+ " sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n",
+ " result = sock.connect_ex(('127.0.0.1', PORT))\n",
+ " if result == 0:\n",
+ " break\n",
+ " sock.close()\n",
+ " if ClearConsole:\n",
+ " clear_output()\n",
+ " print(\"--------- SERVER READY! ---------\")\n",
+ " print(\"Your server is available at:\")\n",
+ " print(public_url)\n",
+ " print(\"---------------------------------\")\n",
+ "\n",
+ "threading.Thread(target=wait_for_server, daemon=True).start()\n",
"\n",
"!python3 MMVCServerSIO.py \\\n",
" -p {PORT} \\\n",
@@ -286,11 +167,16 @@
" --crepe_onnx_tiny pretrain/crepe_onnx_tiny.onnx \\\n",
" --rmvpe pretrain/rmvpe.pt \\\n",
" --model_dir model_dir \\\n",
- " --samples samples.json"
+ " --samples samples.json\n",
+ "\n",
+ "ngrok.disconnect(ngrokConnection.public_url)"
],
"metadata": {
- "id": "P2BN-iWvDrMM",
- "cellView": "form"
+ "id": "lLWQuUd7WW9U",
+ "cellView": "form",
+ "_kg_hide-input": false,
+ "scrolled": true,
+ "trusted": true
},
"execution_count": null,
"outputs": []