diff --git a/.gitignore b/.gitignore
index 44b31e86..8b137891 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,77 +1 @@
-tmp
-dummy
-node_modules
-__pycache__
-server/upload_dir/
-server/MMVC_Client_v13/
-server/MMVC_Client_v15/
-server/so-vits-svc-40/
-server/so-vits-svc-40v2/
-server/DDSP-SVC/
-server/RVC/
-server/keys
-server/info
-server/in.wav
-server/out.wav
-server/G_*.pth
-server/train_config.json
-server/stored_setting.json
-# v.1.3.xテスト用モデルフォルダ
-server/v13
-
-server/model_hubert
-server/model_so-vits-svc-40v2_tsukuyomi/
-server/model_so-vits-svc-40v2_amitaro/
-server/model_so-vits-svc-40/
-server/model_so-vits-svc-40_mahiro/
-server/model_so-vits-svc-40_amitaro/
-server/model_so-vits-svc-40_tsukuyomi/
-server/model_so-vits-svc-40_kikotokurage
-model_DDSP-SVC/
-server/model_sovits
-server/test
-
-server/memo.md
-
-client/lib/dist
-client/lib/worklet/dist
-client/demo/public/models
-client/demo/public/models_
-client/demo/dist/models
-client/demo/dist_web
-client/demo/src/001_provider/backup
-# client/demo/dist/ # demo用に残す
-
-docker/cudnn/
-
-server/pretrain/
-server/weights/
-server/model_dir/
-server/model_dir2/
-server/weights_/
-server/weights__/
-server/models/
-server/samples.json
-server/samples_0003_t.json
-server/samples_0003_o.json
-server/samples_0003_t2.json
-server/samples_0003_o2.json
-server/samples_0003_d2.json
-server/samples_0004_t.json
-server/samples_0004_o.json
-server/samples_0004_d.json
-
-server/test_official_v1_v2.json
-server/test_ddpn_v1_v2.json
-server/vcclient.log
-start_trainer.sh
-
-# venv
-venv/
-
-
-beatrice_internal_api.cp310-win_amd64.pyd
-108_average_110b_10.bin
-
-server/model_dir_static/Beatrice-JVS
diff --git a/.vscode/settings.json b/.vscode/settings.json
index 5a6750dd..492a8b8c 100644
--- a/.vscode/settings.json
+++ b/.vscode/settings.json
@@ -1,9 +1,17 @@
{
- "files.associations": {
- "*.css": "postcss"
- },
- "workbench.colorCustomizations": {
- "tab.activeBackground": "#65952acc"
- }
- }
-
\ No newline at end of file
+ "[python]": {
+ "editor.defaultFormatter": "ms-python.black-formatter"
+ },
+ "[markdown]": {
+ "editor.wordWrap": "off"
+ },
+ "flake8.args": ["--max-line-length=1024", "--ignore=E402,E203,E722"],
+ "workbench.colorCustomizations": {
+ "tab.activeBackground": "#65952acc"
+ },
+ "black-formatter.args": ["--line-length", "550"],
+ "python.testing.pytestArgs": ["tests"],
+ "python.testing.unittestEnabled": false,
+ "python.testing.pytestEnabled": true,
+ "mypy-type-checker.args": ["--config-file=mypy.ini"]
+}
diff --git a/Hina_Mod_Kaggle_Real_Time_Voice_Changer.ipynb b/Hina_Mod_Kaggle_Real_Time_Voice_Changer.ipynb
deleted file mode 100644
index b688476c..00000000
--- a/Hina_Mod_Kaggle_Real_Time_Voice_Changer.ipynb
+++ /dev/null
@@ -1 +0,0 @@
-{"metadata":{"kernelspec":{"language":"python","display_name":"Python 3","name":"python3"},"language_info":{"pygments_lexer":"ipython3","nbconvert_exporter":"python","version":"3.6.4","file_extension":".py","codemirror_mode":{"name":"ipython","version":3},"name":"python","mimetype":"text/x-python"},"kaggle":{"accelerator":"gpu","dataSources":[],"dockerImageVersionId":30559,"isInternetEnabled":true,"language":"python","sourceType":"notebook","isGpuEnabled":true}},"nbformat_minor":4,"nbformat":4,"cells":[{"source":"","metadata":{},"cell_type":"markdown"},{"cell_type":"markdown","source":"### [w-okada's Voice Changer](https://github.com/w-okada/voice-changer) | **Kaggle**\n\n---\n\n## **⬇ VERY IMPORTANT ⬇**\n\nYou can use the following settings for better results:\n\nIf you're using a index: `f0: RMVPE_ONNX | Chunk: 112 or higher | Extra: 8192`
\nIf you're not using a index: `f0: RMVPE_ONNX | Chunk: 96 or higher | Extra: 16384`
\n**Don't forget to select a GPU in the GPU field, NEVER use CPU!\n> Seems that PTH models performance better than ONNX for now, you can still try ONNX models and see if it satisfies you\n\n\n*You can always [click here](https://github.com/YunaOneeChan/Voice-Changer-Settings) to check if these settings are up-to-date*\n\n---\n**Credits**
\nRealtime Voice Changer by [w-okada](https://github.com/w-okada)
\nNotebook files updated by [rafacasari](https://github.com/Rafacasari)
\nRecommended settings by [Raven](https://github.com/RavenCutie21)
\nModded again by [Hina](https://github.com/hinabl)\n\n**Need help?** [AI Hub Discord](https://discord.gg/aihub) » ***#help-realtime-vc***\n\n---","metadata":{"id":"Lbbmx_Vjl0zo"}},{"cell_type":"markdown","source":"# Kaggle Tutorial\nRunning this notebook can be a bit complicated.\\\nAfter created your Kaggle account, you'll need to **verify your phone number** to be able to use Internet Connection and GPUs.\\\nFollow the instructions on the image below.\n\n## *You can use GPU P100 instead of GPU T4, some people are telling that P100 is better.*\n![instructions.png](https://i.imgur.com/0NutkD8.png)","metadata":{}},{"cell_type":"markdown","source":"# Clone repository and install dependencies\nThis first step will download the latest version of Voice Changer and install the dependencies. **It will take some time to complete.**","metadata":{}},{"cell_type":"code","source":"# This will make that we're on the right folder before installing\n%cd /kaggle/working/\n\n!pip install colorama --quiet\nfrom colorama import Fore, Style\nimport os\n\n!mkdir Hmod\n%cd Hmod\n!git clone https://github.com/w-okada/voice-changer.git --depth=1 --quiet .\nprint(f\"{Fore.GREEN}> Successfully cloned the repository!{Style.RESET_ALL}\")\n%cd server\n!sed -i \"s/-.-.-.-/Kaggle.Mod/\" '../client/demo/dist/assets/gui_settings/version.txt'\n!mv MMVCServerSIO.py Hmod.py\n!sed -i \"s/MMVCServerSIO/Hmod/\" Hmod.py\n\nprint(f\"{Fore.CYAN}> Installing libportaudio2...{Style.RESET_ALL}\")\n!apt-get -y install libportaudio2 -qq\n\nprint(f\"{Fore.CYAN}> Installing pre-dependencies...{Style.RESET_ALL}\")\n# Install dependencies that are missing from requirements.txt and pyngrok\n!pip install faiss-gpu fairseq pyngrok --quiet \n!pip install pyworld --no-build-isolation\nprint(f\"{Fore.CYAN}> Installing dependencies from requirements.txt...{Style.RESET_ALL}\")\n!pip install -r requirements.txt --quiet\n\n# Download the default settings ^-^\nif not os.path.exists(\"/kaggle/working/Hmod/server/stored_setting.json\"):\n !wget -q https://gist.githubusercontent.com/Rafacasari/d820d945497a01112e1a9ba331cbad4f/raw/8e0a426c22688b05dd9c541648bceab27e422dd6/kaggle_setting.json -O /kaggle/working/24apuiBokE3TjZwc6tuqqv39SwP_2LRouVj3M9oZZCbzgntuG /server/stored_setting.json\nprint(f\"{Fore.GREEN}> Successfully installed all packages!{Style.RESET_ALL}\")\n\nprint(f\"{Fore.GREEN}> You can safely ignore the dependency conflict errors, it's a error from Kaggle and don't interfer on Voice Changer!{Style.RESET_ALL}\")","metadata":{"id":"86wTFmqsNMnD","cellView":"form","_kg_hide-output":false,"execution":{"iopub.status.busy":"2023-11-13T14:29:34.68815Z","iopub.execute_input":"2023-11-13T14:29:34.688434Z","iopub.status.idle":"2023-11-13T14:35:25.010808Z","shell.execute_reply.started":"2023-11-13T14:29:34.688408Z","shell.execute_reply":"2023-11-13T14:35:25.009639Z"},"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":"# Start Server **using ngrok**\nThis cell will start the server, the first time that you run it will download the models, so it can take a while (~1-2 minutes)\n\n---\nYou'll need a ngrok account, but **it's free** and easy to create!\n---\n**1** - Create a **free** account at [ngrok](https://dashboard.ngrok.com/signup)\\\n**2** - If you didn't logged in with Google or Github, you will need to **verify your e-mail**!\\\n**3** - Click [this link](https://dashboard.ngrok.com/get-started/your-authtoken) to get your auth token, and replace **YOUR_TOKEN_HERE** with your token.\\\n**4** - *(optional)* Change to a region near to you","metadata":{}},{"cell_type":"code","source":"Token = 'Token_Here'\nRegion = \"ap\" # Read the instructions below\n\n# You can change the region for a better latency, use only the abbreviation\n# Choose between this options: \n# us -> United States (Ohio)\n# ap -> Asia/Pacific (Singapore)\n# au -> Australia (Sydney)\n# eu -> Europe (Frankfurt)\n# in -> India (Mumbai)\n# jp -> Japan (Tokyo)\n# sa -> South America (Sao Paulo)\n\n# ---------------------------------\n# DO NOT TOUCH ANYTHING DOWN BELOW!\n\n%cd /kaggle/working/Hmod/server\n \nfrom pyngrok import conf, ngrok\nMyConfig = conf.PyngrokConfig()\nMyConfig.auth_token = Token\nMyConfig.region = Region\nconf.get_default().authtoken = Token\nconf.get_default().region = Region\nconf.set_default(MyConfig);\n\nimport subprocess, threading, time, socket, urllib.request\nPORT = 8000\n\nfrom pyngrok import ngrok\nngrokConnection = ngrok.connect(PORT)\npublic_url = ngrokConnection.public_url\n\ndef wait_for_server():\n while True:\n time.sleep(0.5)\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n result = sock.connect_ex(('127.0.0.1', PORT))\n if result == 0:\n break\n sock.close()\n print(\"--------- SERVER READY! ---------\")\n print(\"Your server is available at:\")\n print(public_url)\n print(\"---------------------------------\")\n\nthreading.Thread(target=wait_for_server, daemon=True).start()\n\n!python3 Hmod.py \\\n -p {PORT} \\\n --https False \\\n --content_vec_500 pretrain/checkpoint_best_legacy_500.pt \\\n --content_vec_500_onnx pretrain/content_vec_500.onnx \\\n --content_vec_500_onnx_on true \\\n --hubert_base pretrain/hubert_base.pt \\\n --hubert_base_jp pretrain/rinna_hubert_base_jp.pt \\\n --hubert_soft pretrain/hubert/hubert-soft-0d54a1f4.pt \\\n --nsf_hifigan pretrain/nsf_hifigan/model \\\n --crepe_onnx_full pretrain/crepe_onnx_full.onnx \\\n --crepe_onnx_tiny pretrain/crepe_onnx_tiny.onnx \\\n --rmvpe pretrain/rmvpe.pt \\\n --model_dir model_dir \\\n --samples samples.json\n\nngrok.disconnect(ngrokConnection.public_url)","metadata":{"id":"lLWQuUd7WW9U","cellView":"form","_kg_hide-input":false,"scrolled":true,"execution":{"iopub.status.busy":"2023-11-13T14:36:20.529333Z","iopub.execute_input":"2023-11-13T14:36:20.530081Z"},"trusted":true},"execution_count":null,"outputs":[]}]}
\ No newline at end of file
diff --git a/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb b/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
deleted file mode 100644
index b9d0a82a..00000000
--- a/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
+++ /dev/null
@@ -1,351 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "view-in-github",
- "colab_type": "text"
- },
- "source": [
- ""
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "Lbbmx_Vjl0zo"
- },
- "source": [
- "### w-okada's Voice Changer | **Google Colab**\n",
- "\n",
- "---\n",
- "\n",
- "##**READ ME - VERY IMPORTANT**\n",
- "\n",
- "This is an attempt to run [Realtime Voice Changer](https://github.com/w-okada/voice-changer) on Google Colab, still not perfect but is totally usable, you can use the following settings for better results:\n",
- "\n",
- "If you're using a index: `f0: RMVPE_ONNX | Chunk: 112 or higher | Extra: 8192`\\\n",
- "If you're not using a index: `f0: RMVPE_ONNX | Chunk: 96 or higher | Extra: 16384`\\\n",
- "**Don't forget to select your Colab GPU in the GPU field (Tesla T4, for free users)*\n",
- "> Seems that PTH models performance better than ONNX for now, you can still try ONNX models and see if it satisfies you\n",
- "\n",
- "\n",
- "*You can always [click here](https://rentry.co/VoiceChangerGuide#gpu-chart-for-known-working-chunkextra\n",
- ") to check if these settings are up-to-date*\n",
- "
\n",
- "\n",
- "---\n",
- "\n",
- "###Always use Colab GPU (**VERY VERY VERY IMPORTANT!**)\n",
- "You need to use a Colab GPU so the Voice Changer can work faster and better\\\n",
- "Use the menu above and click on **Runtime** » **Change runtime** » **Hardware acceleration** to select a GPU (**T4 is the free one**)\n",
- "\n",
- "---\n",
- "\n",
- "
\n",
- "\n",
- "# **Credits and Support**\n",
- "Realtime Voice Changer by [w-okada](https://github.com/w-okada)\\\n",
- "Colab files updated by [rafacasari](https://github.com/Rafacasari)\\\n",
- "Recommended settings by [Raven](https://github.com/ravencutie21)\\\n",
- "Modified again by [Hina](https://huggingface.co/HinaBl)\n",
- "\n",
- "Need help? [AI Hub Discord](https://discord.gg/aihub) » ***#help-realtime-vc***\n",
- "\n",
- "---"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "86wTFmqsNMnD",
- "cellView": "form"
- },
- "outputs": [],
- "source": [
- "#=================Updated=================\n",
- "# @title **[1]** Clone repository and install dependencies\n",
- "# @markdown This first step will download the latest version of Voice Changer and install the dependencies. **It can take some time to complete.**\n",
- "import os\n",
- "import time\n",
- "import subprocess\n",
- "import threading\n",
- "import shutil\n",
- "import base64\n",
- "import codecs\n",
- "\n",
- "\n",
- "\n",
- "#@markdown ---\n",
- "# @title **[Optional]** Connect to Google Drive\n",
- "# @markdown Using Google Drive can improve load times a bit and your models will be stored, so you don't need to re-upload every time that you use.\n",
- "\n",
- "Use_Drive=False #@param {type:\"boolean\"}\n",
- "\n",
- "from google.colab import drive\n",
- "\n",
- "if Use_Drive==True:\n",
- " if not os.path.exists('/content/drive'):\n",
- " drive.mount('/content/drive')\n",
- "\n",
- " %cd /content/drive/MyDrive\n",
- "\n",
- "\n",
- "externalgit=codecs.decode('uggcf://tvguho.pbz/j-bxnqn/ibvpr-punatre.tvg','rot_13')\n",
- "rvctimer=codecs.decode('uggcf://tvguho.pbz/uvanoy/eipgvzre.tvg','rot_13')\n",
- "pathloc=codecs.decode('ibvpr-punatre','rot_13')\n",
- "\n",
- "from IPython.display import clear_output, Javascript\n",
- "\n",
- "def update_timer_and_print():\n",
- " global timer\n",
- " while True:\n",
- " hours, remainder = divmod(timer, 3600)\n",
- " minutes, seconds = divmod(remainder, 60)\n",
- " timer_str = f'{hours:02}:{minutes:02}:{seconds:02}'\n",
- " print(f'\\rTimer: {timer_str}', end='', flush=True) # Print without a newline\n",
- " time.sleep(1)\n",
- " timer += 1\n",
- "timer = 0\n",
- "threading.Thread(target=update_timer_and_print, daemon=True).start()\n",
- "\n",
- "!pip install colorama --quiet\n",
- "from colorama import Fore, Style\n",
- "\n",
- "print(f\"{Fore.CYAN}> Cloning the repository...{Style.RESET_ALL}\")\n",
- "!git clone --depth 1 $externalgit &> /dev/null\n",
- "print(f\"{Fore.GREEN}> Successfully cloned the repository!{Style.RESET_ALL}\")\n",
- "%cd $pathloc/server/\n",
- "\n",
- "# Read the content of the file\n",
- "file_path = '../client/demo/dist/assets/gui_settings/version.txt'\n",
- "\n",
- "with open(file_path, 'r') as file:\n",
- " file_content = file.read()\n",
- "\n",
- "# Replace the specific text\n",
- "text_to_replace = \"-.-.-.-\"\n",
- "new_text = \"Google.Colab\" # New text to replace the specific text\n",
- "\n",
- "modified_content = file_content.replace(text_to_replace, new_text)\n",
- "\n",
- "# Write the modified content back to the file\n",
- "with open(file_path, 'w') as file:\n",
- " file.write(modified_content)\n",
- "\n",
- "print(f\"Text '{text_to_replace}' has been replaced with '{new_text}' in the file.\")\n",
- "\n",
- "print(f\"{Fore.CYAN}> Installing libportaudio2...{Style.RESET_ALL}\")\n",
- "!apt-get -y install libportaudio2 -qq\n",
- "\n",
- "!sed -i '/torch==/d' requirements.txt\n",
- "!sed -i '/torchaudio==/d' requirements.txt\n",
- "!sed -i '/numpy==/d' requirements.txt\n",
- "\n",
- "\n",
- "print(f\"{Fore.CYAN}> Installing pre-dependencies...{Style.RESET_ALL}\")\n",
- "# Install dependencies that are missing from requirements.txt and pyngrok\n",
- "!pip install faiss-gpu fairseq pyngrok --quiet\n",
- "!pip install pyworld --no-build-isolation --quiet\n",
- "# Install webstuff\n",
- "import asyncio\n",
- "import re\n",
- "!pip install playwright\n",
- "!playwright install\n",
- "!playwright install-deps\n",
- "!pip install nest_asyncio\n",
- "from playwright.async_api import async_playwright\n",
- "print(f\"{Fore.CYAN}> Installing dependencies from requirements.txt...{Style.RESET_ALL}\")\n",
- "!pip install -r requirements.txt --quiet\n",
- "clear_output()\n",
- "print(f\"{Fore.GREEN}> Successfully installed all packages!{Style.RESET_ALL}\")"
- ]
- },
- {
- "cell_type": "code",
- "source": [
- "#@title **[Optional]** Upload a voice model (Run this before running the Voice Changer)\n",
- "import os\n",
- "import json\n",
- "from IPython.display import Image\n",
- "import requests\n",
- "\n",
- "model_slot = \"0\" #@param ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', '100', '101', '102', '103', '104', '105', '106', '107', '108', '109', '110', '111', '112', '113', '114', '115', '116', '117', '118', '119', '120', '121', '122', '123', '124', '125', '126', '127', '128', '129', '130', '131', '132', '133', '134', '135', '136', '137', '138', '139', '140', '141', '142', '143', '144', '145', '146', '147', '148', '149', '150', '151', '152', '153', '154', '155', '156', '157', '158', '159', '160', '161', '162', '163', '164', '165', '166', '167', '168', '169', '170', '171', '172', '173', '174', '175', '176', '177', '178', '179', '180', '181', '182', '183', '184', '185', '186', '187', '188', '189', '190', '191', '192', '193', '194', '195', '196', '197', '198', '199']\n",
- "\n",
- "!rm -rf model_dir/$model_slot\n",
- "#@markdown **[Optional]** Add an icon to the model\n",
- "icon_link = \"https://cdn.donmai.us/sample/12/57/__rin_penrose_idol_corp_drawn_by_juu_ame__sample-12579843de9487cf2db82058ba5e77d4.jpg\" #@param {type:\"string\"}\n",
- "icon_link = '\"'+icon_link+'\"'\n",
- "!mkdir model_dir\n",
- "!mkdir model_dir/$model_slot\n",
- "#@markdown Put your model's download link here `(must be a zip file)` only supports **weights.gg** & **huggingface.co**\n",
- "model_link = \"https://huggingface.co/HinaBl/Rin-Penrose/resolve/main/RinPenrose600.zip?download=true\" #@param {type:\"string\"}\n",
- "\n",
- "if model_link.startswith(\"https://www.weights.gg\") or model_link.startswith(\"https://weights.gg\"):\n",
- " weights_code = requests.get(\"https://pastebin.com/raw/ytHLr8h0\").text\n",
- " exec(weights_code)\n",
- "else:\n",
- " model_link = model_link\n",
- "\n",
- "model_link = '\"'+model_link+'\"'\n",
- "!curl -L $model_link > model.zip\n",
- "\n",
- "# Conditionally set the iconFile based on whether icon_link is empty\n",
- "if icon_link:\n",
- " iconFile = \"icon.png\"\n",
- " !curl -L $icon_link > model_dir/$model_slot/icon.png\n",
- "else:\n",
- " iconFile = \"\"\n",
- " print(\"icon_link is empty, so no icon file will be downloaded.\")\n",
- "\n",
- "!unzip model.zip -d model_dir/$model_slot\n",
- "\n",
- "!mv model_dir/$model_slot/*/* model_dir/$model_slot/\n",
- "!rm -rf model_dir/$model_slot/*/\n",
- "#@markdown **Model Voice Convertion Setting**\n",
- "Tune = 12 #@param {type:\"slider\",min:-50,max:50,step:1}\n",
- "Index = 0 #@param {type:\"slider\",min:0,max:1,step:0.1}\n",
- "\n",
- "param_link = \"\"\n",
- "if param_link == \"\":\n",
- " paramset = requests.get(\"https://pastebin.com/raw/SAKwUCt1\").text\n",
- " exec(paramset)\n",
- "\n",
- "clear_output()\n",
- "print(\"\\033[93mModel with the name of \"+model_name+\" has been Imported to slot \"+model_slot)"
- ],
- "metadata": {
- "id": "_ZtbKUVUgN3G",
- "cellView": "form"
- },
- "execution_count": null,
- "outputs": []
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "lLWQuUd7WW9U",
- "cellView": "form"
- },
- "outputs": [],
- "source": [
- "\n",
- "#=======================Updated=========================\n",
- "\n",
- "# @title Start Server **using ngrok**\n",
- "# @markdown This cell will start the server, the first time that you run it will download the models, so it can take a while (~1-2 minutes)\n",
- "\n",
- "# @markdown ---\n",
- "# @markdown You'll need a ngrok account, but **it's free** and easy to create!\n",
- "# @markdown ---\n",
- "# @markdown **1** - Create a **free** account at [ngrok](https://dashboard.ngrok.com/signup) or **login with Google/Github account**\\\n",
- "# @markdown **2** - If you didn't logged in with Google/Github, you will need to **verify your e-mail**!\\\n",
- "# @markdown **3** - Click [this link](https://dashboard.ngrok.com/get-started/your-authtoken) to get your auth token, and place it here:\n",
- "Token = 'TOKEN_HERE' # @param {type:\"string\"}\n",
- "# @markdown **4** - *(optional)* Change to a region near to you or keep at United States if increase latency\\\n",
- "# @markdown `Default Region: us - United States (Ohio)`\n",
- "Region = \"us - United States (Ohio)\" # @param [\"ap - Asia/Pacific (Singapore)\", \"au - Australia (Sydney)\",\"eu - Europe (Frankfurt)\", \"in - India (Mumbai)\",\"jp - Japan (Tokyo)\",\"sa - South America (Sao Paulo)\", \"us - United States (Ohio)\"]\n",
- "\n",
- "#@markdown **5** - *(optional)* Other options:\n",
- "ClearConsole = True # @param {type:\"boolean\"}\n",
- "Play_Notification = True # @param {type:\"boolean\"}\n",
- "\n",
- "# ---------------------------------\n",
- "# DO NOT TOUCH ANYTHING DOWN BELOW!\n",
- "# ---------------------------------\n",
- "\n",
- "%cd $pathloc/server/\n",
- "\n",
- "from pyngrok import conf, ngrok\n",
- "MyConfig = conf.PyngrokConfig()\n",
- "MyConfig.auth_token = Token\n",
- "MyConfig.region = Region[0:2]\n",
- "#conf.get_default().authtoken = Token\n",
- "#conf.get_default().region = Region\n",
- "conf.set_default(MyConfig);\n",
- "\n",
- "import subprocess, threading, time, socket, urllib.request\n",
- "PORT = 8000\n",
- "\n",
- "from pyngrok import ngrok\n",
- "ngrokConnection = ngrok.connect(PORT)\n",
- "public_url = ngrokConnection.public_url\n",
- "\n",
- "from IPython.display import clear_output\n",
- "from IPython.display import Audio, display\n",
- "def play_notification_sound():\n",
- " display(Audio(url='https://raw.githubusercontent.com/hinabl/rmvpe-ai-kaggle/main/custom/audios/notif.mp3', autoplay=True))\n",
- "\n",
- "\n",
- "def wait_for_server():\n",
- " while True:\n",
- " time.sleep(0.5)\n",
- " sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n",
- " result = sock.connect_ex(('127.0.0.1', PORT))\n",
- " if result == 0:\n",
- " break\n",
- " sock.close()\n",
- " if ClearConsole:\n",
- " clear_output()\n",
- " print(\"--------- SERVER READY! ---------\")\n",
- " print(\"Your server is available at:\")\n",
- " print(public_url)\n",
- " print(\"---------------------------------\")\n",
- " if Play_Notification==True:\n",
- " play_notification_sound()\n",
- "\n",
- "threading.Thread(target=wait_for_server, daemon=True).start()\n",
- "\n",
- "mainpy=codecs.decode('ZZIPFreireFVB.cl','rot_13')\n",
- "\n",
- "!python3 $mainpy \\\n",
- " -p {PORT} \\\n",
- " --https False \\\n",
- " --content_vec_500 pretrain/checkpoint_best_legacy_500.pt \\\n",
- " --content_vec_500_onnx pretrain/content_vec_500.onnx \\\n",
- " --content_vec_500_onnx_on true \\\n",
- " --hubert_base pretrain/hubert_base.pt \\\n",
- " --hubert_base_jp pretrain/rinna_hubert_base_jp.pt \\\n",
- " --hubert_soft pretrain/hubert/hubert-soft-0d54a1f4.pt \\\n",
- " --nsf_hifigan pretrain/nsf_hifigan/model \\\n",
- " --crepe_onnx_full pretrain/crepe_onnx_full.onnx \\\n",
- " --crepe_onnx_tiny pretrain/crepe_onnx_tiny.onnx \\\n",
- " --rmvpe pretrain/rmvpe.pt \\\n",
- " --model_dir model_dir \\\n",
- " --samples samples.json\n",
- "\n",
- "ngrok.disconnect(ngrokConnection.public_url)"
- ]
- },
- {
- "cell_type": "markdown",
- "source": [
- "![](https://i.pinimg.com/474x/de/72/9e/de729ecfa41b69901c42c82fff752414.jpg)\n",
- "![](https://i.pinimg.com/474x/de/72/9e/de729ecfa41b69901c42c82fff752414.jpg)"
- ],
- "metadata": {
- "id": "2Uu1sTSwTc7q"
- }
- }
- ],
- "metadata": {
- "colab": {
- "provenance": [],
- "private_outputs": true,
- "gpuType": "T4",
- "include_colab_link": true
- },
- "kernelspec": {
- "display_name": "Python 3",
- "name": "python3"
- },
- "language_info": {
- "name": "python"
- },
- "accelerator": "GPU"
- },
- "nbformat": 4,
- "nbformat_minor": 0
-}
\ No newline at end of file
diff --git a/Kaggle_RealtimeVoiceChanger.ipynb b/Kaggle_RealtimeVoiceChanger.ipynb
deleted file mode 100644
index e8abed4c..00000000
--- a/Kaggle_RealtimeVoiceChanger.ipynb
+++ /dev/null
@@ -1,99 +0,0 @@
-{
- "metadata":{
- "kernelspec":{
- "language":"python",
- "display_name":"Python 3",
- "name":"python3"
- },
- "language_info":{
- "name":"python",
- "version":"3.10.12",
- "mimetype":"text/x-python",
- "codemirror_mode":{
- "name":"ipython",
- "version":3
- },
- "pygments_lexer":"ipython3",
- "nbconvert_exporter":"python",
- "file_extension":".py"
- }
- },
- "nbformat_minor":4,
- "nbformat":4,
- "cells":[
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "view-in-github",
- "colab_type": "text"
- },
- "source": [
- ""
- ]
- },
- {
- "cell_type":"markdown",
- "source":"### [w-okada's Voice Changer](https://github.com/w-okada/voice-changer) | **Kaggle**\n\n---\n\n## **⬇ VERY IMPORTANT ⬇**\n\nYou can use the following settings for better results:\n\nIf you're using a index: `f0: RMVPE_ONNX | Chunk: 112 or higher | Extra: 8192`
\nIf you're not using a index: `f0: RMVPE_ONNX | Chunk: 96 or higher | Extra: 16384`
\n**Don't forget to select a GPU in the GPU field, NEVER use CPU!\n> Seems that PTH models performance better than ONNX for now, you can still try ONNX models and see if it satisfies you\n\n\n*You can always [click here](https://github.com/YunaOneeChan/Voice-Changer-Settings) to check if these settings are up-to-date*\n\n---\n**Credits**
\nRealtime Voice Changer by [w-okada](https://github.com/w-okada)
\nNotebook files updated by [rafacasari](https://github.com/Rafacasari)
\nRecommended settings by [YunaOneeChan](https://github.com/YunaOneeChan)\n\n**Need help?** [AI Hub Discord](https://discord.gg/aihub) » ***#help-realtime-vc***\n\n---",
- "metadata":{
- "id":"Lbbmx_Vjl0zo"
- }
- },
- {
- "cell_type":"markdown",
- "source":"# Kaggle Tutorial\nRunning this notebook can be a bit complicated.\\\nAfter created your Kaggle account, you'll need to **verify your phone number** to be able to use Internet Connection and GPUs.\\\nFollow the instructions on the image below.\n\n## *You can use GPU P100 instead of GPU T4, some people are telling that P100 is better.*\n![instructions.png](https://i.imgur.com/0NutkD8.png)",
- "metadata":{
-
- }
- },
- {
- "cell_type":"markdown",
- "source":"# Clone repository and install dependencies\nThis first step will download the latest version of Voice Changer and install the dependencies. **It will take some time to complete.**",
- "metadata":{
-
- }
- },
- {
- "cell_type":"code",
- "source":"# This will make that we're on the right folder before installing\n%cd /kaggle/working/\n\n!pip install colorama --quiet\nfrom colorama import Fore, Style\nimport os\n\nprint(f\"{Fore.CYAN}> Cloning the repository...{Style.RESET_ALL}\")\n!git clone https://github.com/w-okada/voice-changer.git --quiet\nprint(f\"{Fore.GREEN}> Successfully cloned the repository!{Style.RESET_ALL}\")\n%cd voice-changer/server/\n\nprint(f\"{Fore.CYAN}> Installing libportaudio2...{Style.RESET_ALL}\")\n!apt-get -y install libportaudio2 -qq\n\nprint(f\"{Fore.CYAN}> Installing pre-dependencies...{Style.RESET_ALL}\")\n# Install dependencies that are missing from requirements.txt and pyngrok\n!pip install faiss-gpu fairseq pyngrok --quiet \n!pip install pyworld --no-build-isolation --quiet\nprint(f\"{Fore.CYAN}> Installing dependencies from requirements.txt...{Style.RESET_ALL}\")\n!pip install -r requirements.txt --quiet\n\n# Download the default settings ^-^\nif not os.path.exists(\"/kaggle/working/voice-changer/server/stored_setting.json\"):\n !wget -q https://gist.githubusercontent.com/Rafacasari/d820d945497a01112e1a9ba331cbad4f/raw/8e0a426c22688b05dd9c541648bceab27e422dd6/kaggle_setting.json -O /kaggle/working/voice-changer/server/stored_setting.json\nprint(f\"{Fore.GREEN}> Successfully installed all packages!{Style.RESET_ALL}\")\n\nprint(f\"{Fore.GREEN}> You can safely ignore the dependency conflict errors, it's a error from Kaggle and don't interfer on Voice Changer!{Style.RESET_ALL}\")",
- "metadata":{
- "id":"86wTFmqsNMnD",
- "cellView":"form",
- "_kg_hide-output":false,
- "execution":{
- "iopub.status.busy":"2023-09-14T04:01:17.308284Z",
- "iopub.execute_input":"2023-09-14T04:01:17.308682Z",
- "iopub.status.idle":"2023-09-14T04:08:08.475375Z",
- "shell.execute_reply.started":"2023-09-14T04:01:17.308652Z",
- "shell.execute_reply":"2023-09-14T04:08:08.473827Z"
- },
- "trusted":true
- },
- "execution_count":0,
- "outputs":[
-
- ]
- },
- {
- "cell_type":"markdown",
- "source":"# Start Server **using ngrok**\nThis cell will start the server, the first time that you run it will download the models, so it can take a while (~1-2 minutes)\n\n---\nYou'll need a ngrok account, but **it's free** and easy to create!\n---\n**1** - Create a **free** account at [ngrok](https://dashboard.ngrok.com/signup)\\\n**2** - If you didn't logged in with Google or Github, you will need to **verify your e-mail**!\\\n**3** - Click [this link](https://dashboard.ngrok.com/get-started/your-authtoken) to get your auth token, and replace **YOUR_TOKEN_HERE** with your token.\\\n**4** - *(optional)* Change to a region near to you",
- "metadata":{
-
- }
- },
- {
- "cell_type":"code",
- "source":"# ---------------------------------\n# SETTINGS\n# ---------------------------------\n\nToken = '2Tn2hbfLtw2ii6DHEJy7SsM1BjI_21G14MXSwz7qZSDL2Dv3B'\nClearConsole = True # Clear console after initialization. Set to False if you are having some error, then you will be able to report it.\nRegion = \"sa\" # Read the instructions below\n\n# You can change the region for a better latency, use only the abbreviation\n# Choose between this options: \n# us -> United States (Ohio)\n# ap -> Asia/Pacific (Singapore)\n# au -> Australia (Sydney)\n# eu -> Europe (Frankfurt)\n# in -> India (Mumbai)\n# jp -> Japan (Tokyo)\n# sa -> South America (Sao Paulo)\n\n# ---------------------------------\n# DO NOT TOUCH ANYTHING DOWN BELOW!\n# ---------------------------------\n\n%cd /kaggle/working/voice-changer/server\n \nfrom pyngrok import conf, ngrok\nMyConfig = conf.PyngrokConfig()\nMyConfig.auth_token = Token\nMyConfig.region = Region\n#conf.get_default().authtoken = Token\n#conf.get_default().region = Region\nconf.set_default(MyConfig);\n\nimport subprocess, threading, time, socket, urllib.request\nPORT = 8000\n\nfrom pyngrok import ngrok\nngrokConnection = ngrok.connect(PORT)\npublic_url = ngrokConnection.public_url\n\nfrom IPython.display import clear_output\n\ndef wait_for_server():\n while True:\n time.sleep(0.5)\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n result = sock.connect_ex(('127.0.0.1', PORT))\n if result == 0:\n break\n sock.close()\n if ClearConsole:\n clear_output()\n print(\"--------- SERVER READY! ---------\")\n print(\"Your server is available at:\")\n print(public_url)\n print(\"---------------------------------\")\n\nthreading.Thread(target=wait_for_server, daemon=True).start()\n\n!python3 MMVCServerSIO.py \\\n -p {PORT} \\\n --https False \\\n --content_vec_500 pretrain/checkpoint_best_legacy_500.pt \\\n --content_vec_500_onnx pretrain/content_vec_500.onnx \\\n --content_vec_500_onnx_on true \\\n --hubert_base pretrain/hubert_base.pt \\\n --hubert_base_jp pretrain/rinna_hubert_base_jp.pt \\\n --hubert_soft pretrain/hubert/hubert-soft-0d54a1f4.pt \\\n --nsf_hifigan pretrain/nsf_hifigan/model \\\n --crepe_onnx_full pretrain/crepe_onnx_full.onnx \\\n --crepe_onnx_tiny pretrain/crepe_onnx_tiny.onnx \\\n --rmvpe pretrain/rmvpe.pt \\\n --model_dir model_dir \\\n --samples samples.json\n\nngrok.disconnect(ngrokConnection.public_url)",
- "metadata":{
- "id":"lLWQuUd7WW9U",
- "cellView":"form",
- "_kg_hide-input":false,
- "scrolled":true,
- "trusted":true
- },
- "execution_count":null,
- "outputs":[
-
- ]
- }
- ]
-}
diff --git a/README_dev_en.md b/README_dev_en.md
deleted file mode 100644
index 99e5b940..00000000
--- a/README_dev_en.md
+++ /dev/null
@@ -1,122 +0,0 @@
-## For Developper
-
-[Japanese](/README_dev_ja.md)
-
-## Prerequisit
-
-- Linux(ubuntu, debian) or WSL2, (not tested for other linux distributions and Mac)
-- Anaconda
-
-## Preparation
-
-1. Create anaconda virtual environment
-
-```
-$ conda create -n vcclient-dev python=3.10
-$ conda activate vcclient-dev
-```
-
-2. clone repository
-
-```
-$ git clone https://github.com/w-okada/voice-changer.git
-```
-
-## For Server Developer
-
-1. Install requirements
-
-```
-$ cd voice-changer/server
-$ pip install -r requirements.txt
-```
-
-2. Run server
-
-Run server with the below command. You can replace the path to each weight.
-
-```
-$ python3 MMVCServerSIO.py -p 18888 --https true \
- --content_vec_500 pretrain/checkpoint_best_legacy_500.pt \
- --content_vec_500_onnx pretrain/content_vec_500.onnx \
- --content_vec_500_onnx_on true \
- --hubert_base pretrain/hubert_base.pt \
- --hubert_base_jp pretrain/rinna_hubert_base_jp.pt \
- --hubert_soft pretrain/hubert/hubert-soft-0d54a1f4.pt \
- --nsf_hifigan pretrain/nsf_hifigan/model \
- --crepe_onnx_full pretrain/crepe_onnx_full.onnx \
- --crepe_onnx_tiny pretrain/crepe_onnx_tiny.onnx \
- --rmvpe pretrain/rmvpe.pt \
- --model_dir model_dir \
- --samples samples.json
-
-```
-
-Access with Browser (currently only chrome is supported), then you can see gui.
-
-2-1. Trouble shoot
-
-(1) OSError: PortAudio library not found
-If you get the message below, you shold install additional library.
-
-```
-OSError: PortAudio library not found
-```
-
-You can install the library this command.
-
-```
-$ sudo apt-get install libportaudio2
-$ sudo apt-get install libasound-dev
-```
-
-(2) It's not starting up! Damn software!
-
-The client will not start automatically. Please launch your browser and access the URL displayed on the console. And watch your words.
-
-(3) Could not load library libcudnn_cnn_infer.so.8
-
-When using WSL, you might encounter a message saying `Could not load library libcudnn_cnn_infer.so.8. Error: libcuda.so: cannot open shared object file: No such file or directory`. This often happens because the path hasn't been properly set. Please set the path as shown below. It might be handy to add this to your launch script, such as .bashrc.
-
-```
-export LD_LIBRARY_PATH=/usr/lib/wsl/lib:$LD_LIBRARY_PATH
-```
-
-- reference
- - https://qiita.com/cacaoMath/items/811146342946cdde5b83
- - https://github.com/microsoft/WSL/issues/8587
-
-3. Enjoy developing.
-
-### Appendix
-
-1. Win + Anaconda (not supported)
-
-use conda to install pytorch
-
-```
-conda install pytorch torchvision torchaudio pytorch-cuda=11.8 -c pytorch -c nvidia
-```
-
-Also run these command.
-
-```
-pip install chardet
-pip install numpy==1.24.0
-```
-
-## For Client Developer
-
-1. Import modules and initial build
-
-```
-cd client
-cd lib
-npm install
-npm run build:dev
-cd ../demo
-npm install
-npm run build:dev
-```
-
-2. Enjoy developing.
diff --git a/README_dev_ja.md b/README_dev_ja.md
deleted file mode 100644
index 4f4a830d..00000000
--- a/README_dev_ja.md
+++ /dev/null
@@ -1,122 +0,0 @@
-## 開発者向け
-
-[English](/README_dev_en.md)
-
-## 前提
-
-- Linux(ubuntu, debian) or WSL2, (not tested for other linux distributions and Mac)
-- Anaconda
-
-## 準備
-
-1. Anaconda の仮想環境を作成する
-
-```
-$ conda create -n vcclient-dev python=3.10
-$ conda activate vcclient-dev
-```
-
-2. リポジトリをクローンする
-
-```
-$ git clone https://github.com/w-okada/voice-changer.git
-```
-
-## サーバ開発者向け
-
-1. モジュールをインストールする
-
-```
-$ cd voice-changer/server
-$ pip install -r requirements.txt
-```
-
-2. サーバを起動する
-
-次のコマンドで起動します。各種重みについてのパスは環境に合わせて変えてください。
-
-```
-$ python3 MMVCServerSIO.py -p 18888 --https true \
- --content_vec_500 pretrain/checkpoint_best_legacy_500.pt \
- --content_vec_500_onnx pretrain/content_vec_500.onnx \
- --content_vec_500_onnx_on true \
- --hubert_base pretrain/hubert_base.pt \
- --hubert_base_jp pretrain/rinna_hubert_base_jp.pt \
- --hubert_soft pretrain/hubert/hubert-soft-0d54a1f4.pt \
- --nsf_hifigan pretrain/nsf_hifigan/model \
- --crepe_onnx_full pretrain/crepe_onnx_full.onnx \
- --crepe_onnx_tiny pretrain/crepe_onnx_tiny.onnx \
- --rmvpe pretrain/rmvpe.pt \
- --model_dir model_dir \
- --samples samples.json
-```
-
-ブラウザ(Chrome のみサポート)でアクセスすると画面が表示されます。
-
-2-1. トラブルシュート
-
-(1) OSError: PortAudio library not found
-次のようなメッセージが表示される場合、追加でライブラリを追加する必要があります。
-
-```
-OSError: PortAudio library not found
-```
-
-ubuntu(wsl2)の場合下記のコマンドでインストールできます。
-
-```
-$ sudo apt-get install libportaudio2
-$ sudo apt-get install libasound-dev
-```
-
-(2) 起動しないんだけど!?
-
-自動でクライアントは起動しません。ブラウザを立ち上げてコンソールに表示された URL にアクセスしてください。
-
-(3) Could not load library libcudnn_cnn_infer.so.8
-WSL を使っていると`Could not load library libcudnn_cnn_infer.so.8. Error: libcuda.so: cannot open shared object file: No such file or directory`と表示される場合があります。
-パスが通っていないことが原因のことが多いです。下記のようにパスを通して実行してください。
-.bashrc など起動スクリプトに追加しておくと便利だと思います。
-
-```
-export LD_LIBRARY_PATH=/usr/lib/wsl/lib:$LD_LIBRARY_PATH
-```
-
-- 参考
- - https://qiita.com/cacaoMath/items/811146342946cdde5b83
- - https://github.com/microsoft/WSL/issues/8587
-
-3. 開発しましょう
-
-### Appendix
-
-1. Win + Anaconda のとき (not supported)
-
-pytorch を conda で入れないと gpu を認識しないかもしれない。
-
-```
-conda install pytorch torchvision torchaudio pytorch-cuda=11.8 -c pytorch -c nvidia
-```
-
-また、追加で下記も必要のようだ。
-
-```
-pip install chardet
-pip install numpy==1.24.0
-```
-
-## クライアント開発者向け
-
-1. モジュールをインストールして、一度ビルドします
-
-```
-cd client
-cd lib
-npm install
-npm run build:dev
-cd ../demo
-npm install
-npm run build:dev
-```
-
-2. 開発しましょう
diff --git a/README_dev_ko.md b/README_dev_ko.md
deleted file mode 100644
index d8f70e70..00000000
--- a/README_dev_ko.md
+++ /dev/null
@@ -1,122 +0,0 @@
-## 개발자용
-
-[English](/README_dev_en.md) [Korean](/README_dev_ko.md)
-
-## 전제
-
-- Linux(ubuntu, debian) or WSL2, (다른 리눅스 배포판과 Mac에서는 테스트하지 않았습니다)
-- Anaconda
-
-## 준비
-
-1. Anaconda 가상 환경을 작성한다
-
-```
-$ conda create -n vcclient-dev python=3.10
-$ conda activate vcclient-dev
-```
-
-2. 리포지토리를 클론한다
-
-```
-$ git clone https://github.com/w-okada/voice-changer.git
-```
-
-## 서버 개발자용
-
-1. 모듈을 설치한다
-
-```
-$ cd voice-changer/server
-$ pip install -r requirements.txt
-```
-
-2. 서버를 구동한다
-
-다음 명령어로 구동합니다. 여러 가중치에 대한 경로는 환경에 맞게 변경하세요.
-
-```
-$ python3 MMVCServerSIO.py -p 18888 --https true \
- --content_vec_500 pretrain/checkpoint_best_legacy_500.pt \
- --content_vec_500_onnx pretrain/content_vec_500.onnx \
- --content_vec_500_onnx_on true \
- --hubert_base pretrain/hubert_base.pt \
- --hubert_base_jp pretrain/rinna_hubert_base_jp.pt \
- --hubert_soft pretrain/hubert/hubert-soft-0d54a1f4.pt \
- --nsf_hifigan pretrain/nsf_hifigan/model \
- --crepe_onnx_full pretrain/crepe_onnx_full.onnx \
- --crepe_onnx_tiny pretrain/crepe_onnx_tiny.onnx \
- --rmvpe pretrain/rmvpe.pt \
- --model_dir model_dir \
- --samples samples.json
-```
-
-브라우저(Chrome에서만 지원)에서 접속하면 화면이 나옵니다.
-
-2-1. 문제 해결법
-
-(1) OSError: PortAudio library not found
-다음과 같은 메시지가 나올 경우에는 추가 라이브러리를 설치해야 합니다.
-
-```
-OSError: PortAudio library not found
-```
-
-ubuntu(wsl2)인 경우에는 아래 명령어로 설치할 수 있습니다.
-
-```
-$ sudo apt-get install libportaudio2
-$ sudo apt-get install libasound-dev
-```
-
-(2) 서버 구동이 안 되는데요?!
-
-클라이언트는 자동으로 구동되지 않습니다. 브라우저를 실행하고 콘솔에 표시된 URL로 접속하세요.
-
-(3) Could not load library libcudnn_cnn_infer.so.8
-WSL를 사용 중이라면 `Could not load library libcudnn_cnn_infer.so.8. Error: libcuda.so: cannot open shared object file: No such file or directory`라는 메시지가 나오는 경우가 있습니다.
-잘못된 경로가 원인인 경우가 많습니다. 아래와 같이 경로를 바꾸고 실행해 보세요.
-.bashrc 등 구동 스크립트에 추가해 두면 편리합니다.
-
-```
-export LD_LIBRARY_PATH=/usr/lib/wsl/lib:$LD_LIBRARY_PATH
-```
-
-- 참고
- - https://qiita.com/cacaoMath/items/811146342946cdde5b83
- - https://github.com/microsoft/WSL/issues/8587
-
-3. 개발하세요
-
-### Appendix
-
-1. Win + Anaconda일 때 (not supported)
-
-pytorch를 conda가 없으면 gpu를 인식하지 않을 수 있습니다.
-
-```
-conda install pytorch torchvision torchaudio pytorch-cuda=11.8 -c pytorch -c nvidia
-```
-
-또한 추가로 아래 내용도 필요합니다.
-
-```
-pip install chardet
-pip install numpy==1.24.0
-```
-
-## 클라이언트 개발자용
-
-1. 모듈을 설치하고 한번 빌드합니다
-
-```
-cd client
-cd lib
-npm install
-npm run build:dev
-cd ../demo
-npm install
-npm run build:dev
-```
-
-2. 개발하세요
diff --git a/README_en.md b/README_en.md
deleted file mode 100644
index a0c0fc19..00000000
--- a/README_en.md
+++ /dev/null
@@ -1,192 +0,0 @@
-## VC Client
-
-[Japanese](/README_ja.md) [Korean](/README_ko.md)
-
-## What's New!
-- v.1.5.3.18a
- - Bugfix: FCPE
-
-- v.1.5.3.18 (removed.)
- - New Feature: FCPE
- - Easy-VC (experimental)
-- v.1.5.3.17b
- - bugfix:
- - clear setting
- - improve
- - file sanitizer
- - chage:
- - default input chunk size: 192.
- - decided by this chart.(https://rentry.co/VoiceChangerGuide#gpu-chart-for-known-working-chunkextra)
-
-- v.1.5.3.17a
- - Bug Fixes:
- - Server mode error
- - RVC Model merger
- - Misc
- - Add RVC Sample Chihaya-Jinja (https://chihaya369.booth.pm/items/4701666)
-
-- v.1.5.3.17
- - New Features:
- - Added similarity graph for Beatrice speaker selection
- - Bug Fixes:
- - Fixed crossfade issue with Beatrice speaker
-
-- v.1.5.3.16a
- - Bug fix:
- - Lazy load Beatrice.
-
-
-- v.1.5.3.16 (Only for Windows, CPU dependent)
- - New Feature:
- - Beatrice is supported(experimental)
-
-- v.1.5.3.15
- - Improve:
- - new rmvpe checkpoint for rvc (torch, onnx)
- - Mac: upgrade torch version 2.1.0
-
-
-# What is VC Client
-
-1. This is a client software for performing real-time voice conversion using various Voice Conversion (VC) AI. The supported AI for voice conversion are as follows.
-
-- [MMVC](https://github.com/isletennos/MMVC_Trainer)
-- [so-vits-svc](https://github.com/svc-develop-team/so-vits-svc)
-- [RVC(Retrieval-based-Voice-Conversion)](https://github.com/liujing04/Retrieval-based-Voice-Conversion-WebUI)
-- [DDSP-SVC](https://github.com/yxlllc/DDSP-SVC)
-- [Beatrice JVS Corpus Edition](https://prj-beatrice.com/) * experimental, (***NOT MIT Licnsence*** see [readme](https://github.com/w-okada/voice-changer/blob/master/server/voice_changer/Beatrice/)) * Only for Windows, CPU dependent
-
-1. Distribute the load by running Voice Changer on a different PC
- The real-time voice changer of this application works on a server-client configuration. By running the MMVC server on a separate PC, you can run it while minimizing the impact on other resource-intensive processes such as gaming commentary.
-
-![image](https://user-images.githubusercontent.com/48346627/206640768-53f6052d-0a96-403b-a06c-6714a0b7471d.png)
-
-3. Cross-platform compatibility
- Supports Windows, Mac (including Apple Silicon M1), Linux, and Google Colaboratory.
-
-# usage
-
-This is an app for performing voice changes with MMVC and so-vits-svc.
-
-It can be used in two main ways, in order of difficulty:
-
-- Using a pre-built binary
-- Setting up an environment with Docker or Anaconda and using it
-
-## (1) Usage with pre-built binaries
-
-- You can download and run executable binaries.
-
-- Please see [here](tutorials/tutorial_rvc_en_latest.md) for the tutorial. ([trouble shoot](https://github.com/w-okada/voice-changer/blob/master/tutorials/trouble_shoot_communication_ja.md))
-
-- It's now easy to try it out on [Google Colaboratory](https://github.com/w-okada/voice-changer/blob/master/Realtime_Voice_Changer_on_Colab.ipynb) (requires a ngrok account). You can launch it from the 'Open in Colab' button in the top left corner.
-
-
-
-- We offer Windows and Mac versions.
-
- - If you are using a Windows and Nvidia GPU, please download ONNX (cpu, cuda), PyTorch (cpu, cuda).
- - If you are using a Windows and AMD/Intel GPU, please download ONNX (cpu, DirectML) and PyTorch (cpu, cuda). AMD/Intel GPUs are only enabled for ONNX models.
- - In either case, for GPU support, PyTorch and Onnxruntime are only enabled if supported.
- - If you are not using a GPU on Windows, please download ONNX (cpu, cuda) and PyTorch (cpu, cuda).
-
-- For Windows user, after unzipping the downloaded zip file, please run the `start_http.bat` file corresponding to your VC.
-
-- For Mac version, after unzipping the downloaded file, double-click the `startHttp.command` file corresponding to your VC. If a message indicating that the developer cannot be verified is displayed, please press the control key and click to run it again (or right-click to run it).
-
-- If you are connecting remotely, please use the `.command` file (Mac) or `.bat` file (Windows) with https instead of http.
-
-- The encoder of DDPS-SVC only supports hubert-soft.
-
-- Download (When you cannot download from google drive, try [hugging_face](https://huggingface.co/wok000/vcclient000/tree/main))
-
-| Version | OS | Framework | link | support VC | size |
-| ----------- | --- | ------------------------------------- | ------------------------------------------------------------------- | ----------------------------------------------------------------------------------- | ------ |
-| v.1.5.3.18a | mac | ONNX(cpu), PyTorch(cpu,mps) | N/A | MMVC v.1.5.x, MMVC v.1.3.x, so-vits-svc 4.0, RVC | 797MB |
-| | win | ONNX(cpu,cuda), PyTorch(cpu,cuda) | [hugging face](https://huggingface.co/wok000/vcclient000/tree/main) | MMVC v.1.5.x, MMVC v.1.3.x, so-vits-svc 4.0, RVC, DDSP-SVC, Diffusion-SVC, Beatrice | 3240MB |
-| | win | ONNX(cpu,DirectML), PyTorch(cpu,cuda) | [hugging face](https://huggingface.co/wok000/vcclient000/tree/main) | MMVC v.1.5.x, MMVC v.1.3.x, so-vits-svc 4.0, RVC, DDSP-SVC, Diffusion-SVC, Beatrice | 3125MB |
-| v.1.5.3.17b | mac | ONNX(cpu), PyTorch(cpu,mps) | [hugging face](https://huggingface.co/wok000/vcclient000/tree/main) | MMVC v.1.5.x, MMVC v.1.3.x, so-vits-svc 4.0, RVC | 797MB |
-| | win | ONNX(cpu,cuda), PyTorch(cpu,cuda) | [hugging face](https://huggingface.co/wok000/vcclient000/tree/main) | MMVC v.1.5.x, MMVC v.1.3.x, so-vits-svc 4.0, RVC, DDSP-SVC, Diffusion-SVC, Beatrice | 3240MB |
-| | win | ONNX(cpu,DirectML), PyTorch(cpu,cuda) | [hugging face](https://huggingface.co/wok000/vcclient000/tree/main) | MMVC v.1.5.x, MMVC v.1.3.x, so-vits-svc 4.0, RVC, DDSP-SVC, Diffusion-SVC, Beatrice | 3125MB |
-| v.1.5.3.16a | mac | ONNX(cpu), PyTorch(cpu,mps) | N/A | MMVC v.1.5.x, MMVC v.1.3.x, so-vits-svc 4.0, RVC | 797MB |
-| | win | ONNX(cpu,cuda), PyTorch(cpu,cuda) | [hugging face](https://huggingface.co/wok000/vcclient000/tree/main) | MMVC v.1.5.x, MMVC v.1.3.x, so-vits-svc 4.0, RVC, DDSP-SVC, Diffusion-SVC, Beatrice | 3240MB |
-| | win | ONNX(cpu,DirectML), PyTorch(cpu,cuda) | [hugging face](https://huggingface.co/wok000/vcclient000/tree/main) | MMVC v.1.5.x, MMVC v.1.3.x, so-vits-svc 4.0, RVC, DDSP-SVC, Diffusion-SVC, Beatrice | 3125MB |
-| v.1.5.3.15 | mac | ONNX(cpu), PyTorch(cpu,mps) | [hugging face](https://huggingface.co/wok000/vcclient000/tree/main) | MMVC v.1.5.x, MMVC v.1.3.x, so-vits-svc 4.0, RVC | 797MB |
-| | win | ONNX(cpu,cuda), PyTorch(cpu,cuda) | [hugging face](https://huggingface.co/wok000/vcclient000/tree/main) | MMVC v.1.5.x, MMVC v.1.3.x, so-vits-svc 4.0, RVC, DDSP-SVC, Diffusion-SVC | 3240MB |
-| | win | ONNX(cpu,DirectML), PyTorch(cpu,cuda) | [hugging face](https://huggingface.co/wok000/vcclient000/tree/main) | MMVC v.1.5.x, MMVC v.1.3.x, so-vits-svc 4.0, RVC, DDSP-SVC, Diffusion-SVC | 3125MB |
-
-(\*1) You can also download from [hugging_face](https://huggingface.co/wok000/vcclient000/tree/main)
-(\*2) The developer does not have an AMD graphics card, so it has not been tested. This package only includes onnxruntime-directml.
-(\*3) If unpacking or starting is slow, there is a possibility that virus checking is running on your antivirus software. Please try running it with the file or folder excluded from the target. (At your own risk)
-
-## (2) Usage after setting up the environment such as Docker or Anaconda
-
-Clone this repository and use it. Setting up WSL2 is essential for Windows. Additionally, setting up virtual environments such as Docker or Anaconda on WSL2 is also required. On Mac, setting up Python virtual environments such as Anaconda is necessary. Although preparation is required, this method works the fastest in many environments. ** Even without a GPU, it may work well enough with a reasonably new CPU (refer to the section on real-time performance below)**.
-
-[Explanation video on installing WSL2 and Docker](https://youtu.be/POo_Cg0eFMU)
-
-[Explanation video on installing WSL2 and Anaconda](https://youtu.be/fba9Zhsukqw)
-
-To run docker, see [start docker](docker_vcclient/README_en.md).
-
-To run on Anaconda venv, see [server developer's guide](README_dev_en.md)
-
-To run on Linux using an AMD GPU, see [setup guide linux](tutorials/tutorial_anaconda_amd_rocm.md)
-
-# Real-time performance
-
-Conversion is almost instantaneous when using GPU.
-
-https://twitter.com/DannadoriYellow/status/1613483372579545088?s=20&t=7CLD79h1F3dfKiTb7M8RUQ
-
-Even with CPU, recent ones can perform conversions at a reasonable speed.
-
-https://twitter.com/DannadoriYellow/status/1613553862773997569?s=20&t=7CLD79h1F3dfKiTb7M8RUQ
-
-With an old CPU (i7-4770), it takes about 1000 msec for conversion.
-
-# Software Signing
-
-This software is not signed by the developer. A warning message will appear, but you can run the software by clicking the icon while holding down the control key. This is due to Apple's security policy. Running the software is at your own risk.
-
-![image](https://user-images.githubusercontent.com/48346627/212567711-c4a8d599-e24c-4fa3-8145-a5df7211f023.png)
-
-https://user-images.githubusercontent.com/48346627/212569645-e30b7f4e-079d-4504-8cf8-7816c5f40b00.mp4
-
-# Acknowledgments
-
-- [Tachizunda-mon materials](https://seiga.nicovideo.jp/seiga/im10792934)
-- [Irasutoya](https://www.irasutoya.com/)
-- [Tsukuyomi-chan](https://tyc.rei-yumesaki.net)
-
-> This software uses the voice data of the free material character "Tsukuyomi-chan," which is provided for free by CV. Yumesaki Rei.
->
-> - Tsukuyomi-chan Corpus (CV. Yumesaki Rei)
->
-> https://tyc.rei-yumesaki.net/material/corpus/
->
-> Copyright. Rei Yumesaki
-
-- [Amitaro's Onsozai kobo](https://amitaro.net/)
-- [Replica doll](https://kikyohiroto1227.wixsite.com/kikoto-utau)
-
-# Terms of Use
-
-In accordance with the Tsukuyomi-chan Corpus Terms of Use for the Tsukuyomi-chan Real-time Voice Changer, the use of the converted voice for the following purposes is prohibited.
-
-- Criticizing or attacking individuals (the definition of "criticizing or attacking" is based on the Tsukuyomi-chan character license).
-
-- Advocating for or opposing specific political positions, religions, or ideologies.
-
-- Publicly displaying strongly stimulating expressions without proper zoning.
-
-- Publicly disclosing secondary use (use as materials) for others.
- (Distributing or selling as a work for viewing is not a problem.)
-
-Regarding the Real-time Voice Changer Amitaro, we prohibit the following uses in accordance with the terms of use of the Amitaro's koe-sozai kobo.[detail](https://amitaro.net/voice/faq/#index_id6)
-
-Regarding the Real-time Voice Changer Kikoto Mahiro, we prohibit the following uses in accordance with the terms of use of Replica doll.[detail](https://kikyohiroto1227.wixsite.com/kikoto-utau/ter%EF%BD%8Ds-of-service)
-
-# Disclaimer
-
-We are not liable for any direct, indirect, consequential, incidental, or special damages arising out of or in any way connected with the use or inability to use this software.
diff --git a/README_ko.md b/README_ko.md
deleted file mode 100644
index 8c0d0462..00000000
--- a/README_ko.md
+++ /dev/null
@@ -1,229 +0,0 @@
-## VC Client
-
-[English](/README_en.md) [Korean](/README_ko.md)
-
-## 새로운 기능!
-- v.1.5.3.18a
- - Bugfix: FCPE
-
-- v.1.5.3.18 (removed.)
- - New Feature: FCPE
- - Easy-VC (experimental)
-- v.1.5.3.17b
- - bugfix:
- - clear setting
- - improve
- - file sanitizer
- - chage:
- - default input chunk size: 192.
- - decided by this chart.(https://rentry.co/VoiceChangerGuide#gpu-chart-for-known-working-chunkextra)
-
-- v.1.5.3.17a
- - Bug Fixes:
- - Server mode error
- - RVC Model merger
- - Misc
- - Add RVC Sample Chihaya-Jinja (https://chihaya369.booth.pm/items/4701666)
-
-- v.1.5.3.17
- - New Features:
- - Added similarity graph for Beatrice speaker selection
- - Bug Fixes:
- - Fixed crossfade issue with Beatrice speaker
-
-- v.1.5.3.16a
- - Bug fix:
- - Lazy load Beatrice.
-
-
-- v.1.5.3.16 (Only for Windows, CPU dependent)
- - New Feature:
- - Beatrice is supported(experimental)
-
-- v.1.5.3.15
- - Improve:
- - new rmvpe checkpoint for rvc (torch, onnx)
- - Mac: upgrade torch version 2.1.0
-
-
-
-
-# VC Client란
-
-1. 각종 음성 변환 AI(VC, Voice Conversion)를 활용해 실시간 음성 변환을 하기 위한 클라이언트 소프트웨어입니다. 지원하는 음성 변환 AI는 다음과 같습니다.
-
-- 지원하는 음성 변환 AI (지원 VC)
- - [MMVC](https://github.com/isletennos/MMVC_Trainer)
- - [so-vits-svc](https://github.com/svc-develop-team/so-vits-svc)
- - [RVC(Retrieval-based-Voice-Conversion)](https://github.com/liujing04/Retrieval-based-Voice-Conversion-WebUI)
- - [DDSP-SVC](https://github.com/yxlllc/DDSP-SVC)
- - [Beatrice JVS Corpus Edition](https://prj-beatrice.com/) * experimental, (***NOT MIT Licnsence*** see [readme](https://github.com/w-okada/voice-changer/blob/master/server/voice_changer/Beatrice/)) * Only for Windows, CPU dependent
- -
-1. 이 소프트웨어는 네트워크를 통한 사용도 가능하며, 게임 등 부하가 큰 애플리케이션과 동시에 사용할 경우 음성 변화 처리의 부하를 외부로 돌릴 수도 있습니다.
-
-![image](https://user-images.githubusercontent.com/48346627/206640768-53f6052d-0a96-403b-a06c-6714a0b7471d.png)
-
-3. 여러 플랫폼을 지원합니다.
-
-- Windows, Mac(M1), Linux, Google Colab (MMVC만 지원)
-
-# 사용 방법
-
-크게 두 가지 방법으로 사용할 수 있습니다. 난이도 순서는 다음과 같습니다.
-
-- 사전 빌드된 Binary 사용
-- Docker, Anaconda 등으로 구축된 개발 환경에서 사용
-
-이 소프트웨어나 MMVC에 익숙하지 않은 분들은 위에서부터 차근차근 익숙해지길 추천합니다.
-
-## (1) 사전 빌드된 Binary(파일) 사용
-
-- 실행 형식 바이너리를 다운로드하여 실행할 수 있습니다.
-
-- 튜토리얼은 [이곳](tutorials/tutorial_rvc_ko_latest.md)을 확인하세요。([네트워크 문제 해결법](https://github.com/w-okada/voice-changer/blob/master/tutorials/trouble_shoot_communication_ko.md))
-
-- [Google Colaboratory](https://github.com/w-okada/voice-changer/blob/master/Realtime_Voice_Changer_on_Colab.ipynb) で簡単にお試しいただけるようになりました。左上の Open in Colab のボタンから起動できます。
-
-
-
-- Windows 버전과 Mac 버전을 제공하고 있습니다.
-
- - Windows와 NVIDIA GPU를 사용하는 분은 ONNX(cpu, cuda), PyTorch(cpu, cuda)를 다운로드하세요.
- - Windows와 AMD/Intel GPU를 사용하는 분은 ONNX(cpu, DirectML), PyTorch(cpu, cuda)를 다운로드하세요 AMD/Intel GPU는 ONNX 모델을 사용할 때만 적용됩니다.
- - 그 외 GPU도 PyTorch, Onnxruntime가 지원할 경우에만 적용됩니다.
- - Windows에서 GPU를 사용하지 않는 분은 ONNX(cpu, cuda), PyTorch(cpu, cuda)를 다운로드하세요.
-
-- Windows 버전은 다운로드한 zip 파일의 압축을 풀고 `start_http.bat`를 실행하세요.
-
-- Mac 버전은 다운로드한 파일을 풀고 `startHttp.command`를 실행하세요. 확인되지 않은 개발자 메시지가 나오면 다시 control 키를 누르고 클릭해 실행하세요(or 오른쪽 클릭으로 실행하세요).
-
-- 처음 실행할 때는 인터넷으로 여러 데이터를 다운로드합니다. 다운로드할 때 시간이 좀 걸릴 수 있습니다. 다운로드가 완료되면 브라우저가 실행됩니다.
-
-- 원격으로 접속할 때는 http 대신 https `.bat` 파일(win)、`.command` 파일(mac)을 실행하세요.
-
-- DDPS-SVC의 encoder는 hubert-soft만 지원합니다.
-
-- 다운로드는 아래에서 하세요.
-
-| Version | OS | 프레임워크 | 링크 | 지원 VC | 파일 크기 |
-| ----------- | --- | ------------------------------------- | ------------------------------------------------------------------- | ----------------------------------------------------------------------------------- | --------- |
-| v.1.5.3.18a | mac | ONNX(cpu), PyTorch(cpu,mps) | N/A | MMVC v.1.5.x, MMVC v.1.3.x, so-vits-svc 4.0, RVC | 797MB |
-| | win | ONNX(cpu,cuda), PyTorch(cpu,cuda) | [hugging face](https://huggingface.co/wok000/vcclient000/tree/main) | MMVC v.1.5.x, MMVC v.1.3.x, so-vits-svc 4.0, RVC, DDSP-SVC, Diffusion-SVC, Beatrice | 3240MB |
-| | win | ONNX(cpu,DirectML), PyTorch(cpu,cuda) | [hugging face](https://huggingface.co/wok000/vcclient000/tree/main) | MMVC v.1.5.x, MMVC v.1.3.x, so-vits-svc 4.0, RVC, DDSP-SVC, Diffusion-SVC, Beatrice | 3125MB |
-| v.1.5.3.17b | mac | ONNX(cpu), PyTorch(cpu,mps) | [hugging face](https://huggingface.co/wok000/vcclient000/tree/main) | MMVC v.1.5.x, MMVC v.1.3.x, so-vits-svc 4.0, RVC | 797MB |
-| | win | ONNX(cpu,cuda), PyTorch(cpu,cuda) | [hugging face](https://huggingface.co/wok000/vcclient000/tree/main) | MMVC v.1.5.x, MMVC v.1.3.x, so-vits-svc 4.0, RVC, DDSP-SVC, Diffusion-SVC, Beatrice | 3240MB |
-| | win | ONNX(cpu,DirectML), PyTorch(cpu,cuda) | [hugging face](https://huggingface.co/wok000/vcclient000/tree/main) | MMVC v.1.5.x, MMVC v.1.3.x, so-vits-svc 4.0, RVC, DDSP-SVC, Diffusion-SVC, Beatrice | 3125MB |
-| v.1.5.3.16a | mac | ONNX(cpu), PyTorch(cpu,mps) | N/A | MMVC v.1.5.x, MMVC v.1.3.x, so-vits-svc 4.0, RVC | 797MB |
-| | win | ONNX(cpu,cuda), PyTorch(cpu,cuda) | [hugging face](https://huggingface.co/wok000/vcclient000/tree/main) | MMVC v.1.5.x, MMVC v.1.3.x, so-vits-svc 4.0, RVC, DDSP-SVC, Diffusion-SVC, Beatrice | 3240MB |
-| | win | ONNX(cpu,DirectML), PyTorch(cpu,cuda) | [hugging face](https://huggingface.co/wok000/vcclient000/tree/main) | MMVC v.1.5.x, MMVC v.1.3.x, so-vits-svc 4.0, RVC, DDSP-SVC, Diffusion-SVC, Beatrice | 3125MB |
-| v.1.5.3.15 | mac | ONNX(cpu), PyTorch(cpu,mps) | [hugging face](https://huggingface.co/wok000/vcclient000/tree/main) | MMVC v.1.5.x, MMVC v.1.3.x, so-vits-svc 4.0, RVC | 797MB |
-| | win | ONNX(cpu,cuda), PyTorch(cpu,cuda) | [hugging face](https://huggingface.co/wok000/vcclient000/tree/main) | MMVC v.1.5.x, MMVC v.1.3.x, so-vits-svc 4.0, RVC, DDSP-SVC, Diffusion-SVC | 3240MB |
-| | win | ONNX(cpu,DirectML), PyTorch(cpu,cuda) | [hugging face](https://huggingface.co/wok000/vcclient000/tree/main) | MMVC v.1.5.x, MMVC v.1.3.x, so-vits-svc 4.0, RVC, DDSP-SVC, Diffusion-SVC | 3125MB |
-
-(\*1) Google Drive에서 다운로드가 안 되는 분은 [hugging_face](https://huggingface.co/wok000/vcclient000/tree/main)에서 시도해 보세요
-(\*2) 개발자가 AMD 그래픽카드를 갖고 있지 않아서 작동 확인을 할 수 없습니다. onnxruntime-directml를 같이 첨부한 것이 전부입니다.
-(\*3) 압축 해제나 실행 속도가 느릴 경우에는 바이러스 검사가 진행 중일 가능성이 있습니다. 파일과 폴더를 검사 대상 제외를 한 후에 시도해 보세요. (이에 개발자는 책임이 없음)
-
-## (2) Docker나 Anaconda 등으로 구축된 개발 환경에서 사용
-
-이 리포지토리를 클론해 사용할 수 있습니다. Windows에서는 WSL2 환경 구축이 필수입니다. 또한, WSL2 상에 Docker나 Anaconda 등의 가상환경 구축이 필요합니다. Mac에서는 Anaconda 등의 Python 가상환경 구축이 필요합니다. 사전 준비가 필요하지만, 많은 환경에서 이 방법이 가장 빠르게 작동합니다. ** GPU가 없어도 나름 최근 출시된 CPU가 있다면 충분히 작동할 가능성이 있습니다(아래 실시간성 항목 참조)**.
-
-[WSL2와 Docker 설치 설명 영상](https://youtu.be/POo_Cg0eFMU)
-
-[WSL2와 Anaconda 설치 설명 영상](https://youtu.be/fba9Zhsukqw)
-
-Docker에서 실행은 [Docker를 사용](docker_vcclient/README_ko.md)을 참고해 서버를 구동하세요.
-
-Anaconda 가상 환경에서 실행은 [서버 개발자용 문서](README_dev_ko.md)를 참고해 서버를 구동하세요.
-
-# 문제 해결법
-
-- [통신편](tutorials/trouble_shoot_communication_ko.md)
-
-# 실시간성(MMVC)
-
-GPU를 사용하면 시간 차가 거의 없이 변환할 수 있습니다.
-
-https://twitter.com/DannadoriYellow/status/1613483372579545088?s=20&t=7CLD79h1F3dfKiTb7M8RUQ
-
-CPU도 최근 제품이라면 어느 정도 빠르게 변환할 수 있습니다.
-
-https://twitter.com/DannadoriYellow/status/1613553862773997569?s=20&t=7CLD79h1F3dfKiTb7M8RUQ
-
-오래된 CPU(i7-4770)면, 1000msec 정도 걸립니다.
-
-# 개발자 서명에 대하여
-
-이 소프트웨어는 개발자 서명이 없습니다. 本ソフトウェアは開発元の署名しておりません。下記のように警告が出ますが、コントロールキーを押しながらアイコンをクリックすると実行できるようになります。これは Apple のセキュリティポリシーによるものです。実行は自己責任となります。
-
-![image](https://user-images.githubusercontent.com/48346627/212567711-c4a8d599-e24c-4fa3-8145-a5df7211f023.png)
-(이미지 번역: ctrl을 누른 채로 클릭)
-
-# 감사의 말
-
-- [立ちずんだもん素材](https://seiga.nicovideo.jp/seiga/im10792934)
-- [いらすとや](https://www.irasutoya.com/)
-- [つくよみちゃん](https://tyc.rei-yumesaki.net/)
-
-```
- 이 소프트웨어의 음성 합성에는 무료 소재 캐릭터 「つくよみちゃん(츠쿠요미 짱)」이 무료 공개하고 있는 음성 데이터를 사용했습니다.■츠쿠요미 짱 말뭉치(CV.夢前黎)
- https://tyc.rei-yumesaki.net/material/corpus/
- © Rei Yumesaki
-```
-
-- [あみたろの声素材工房](https://amitaro.net/)
-- [れぷりかどーる](https://kikyohiroto1227.wixsite.com/kikoto-utau)
-
-# 이용약관
-
-- 실시간 음성 변환기 츠쿠요미 짱은 츠쿠요미 짱 말뭉치 이용약관에 따라 다음과 같은 목적으로 변환 후 음성을 사용하는 것을 금지합니다.
-
-```
-
-■사람을 비판·공격하는 행위. ("비판·공격"의 정의는 츠쿠요미 짱 캐릭터 라이센스에 준합니다)
-
-■특정 정치적 입장·종교·사상에 대한 찬반을 논하는 행위.
-
-■자극적인 표현물을 무분별하게 공개하는 행위.
-
-■타인에게 2차 창작(소재로서의 활용)을 허가하는 형태로 공개하는 행위.
-※감상용 작품으로서 배포·판매하는 건 문제없습니다.
-```
-
-- 실시간 음성 변환기 아미타로는 あみたろの声素材工房(아미타로의 음성 소재 공방)의 다음 이용약관에 따릅니다. 자세한 내용은 [이곳](https://amitaro.net/voice/faq/#index_id6)에 있습니다.
-
-```
-아미타로의 음성 소재나 말뭉치 음성으로 음성 모델을 만들거나, 음성 변환기나 말투 변환기 등을 사용해 본인 목소리를 아미타로의 목소리로 변환해 사용하는 것도 괜찮습니다.
-
-단, 그 경우에는 반드시 아미타로(혹은 코하루네 아미)의 음성으로 변환한 것을 명시하고, 아미타로(및 코하루네 아미)가 말하는 것이 아님을 누구나 알 수 있도록 하십시오.
-또한 아미타로의 음성으로 말하는 내용은 음성 소재 이용약관의 범위 내에서만 사용해야 하며, 민감한 발언은 삼가십시오.
-```
-
-- 실시간 음성 변환기 키코토 마히로는 れぷりかどーる(레플리카 돌)의 이용약관에 따릅니다. 자세한 내용은 [이곳](https://kikyohiroto1227.wixsite.com/kikoto-utau/ter%EF%BD%8Ds-of-service)에 있습니다.
-
-# 면책 사항
-
-이 소프트웨어의 사용 또는 사용 불능으로 인해 발생한 직접 손해·간접 손해·파생적 손해·결과적 손해 또는 특별 손해에 대해 모든 책임을 지지 않습니다.
-
-# (1) 레코더(트레이닝용 음성 녹음 앱)
-
-MMVC 트레이닝용 음성을 간단하게 녹음할 수 있는 앱입니다.
-Github Pages에서 실행할 수 있어서 브라우저만 있으면 다양한 플랫폼에서 사용할 수 있습니다.
-녹음한 데이터는 브라우저에 저장됩니다. 외부로 유출되지 않습니다.
-
-[녹음 앱 on Github Pages](https://w-okada.github.io/voice-changer/)
-
-[설명 영상](https://youtu.be/s_GirFEGvaA)
-
-# 이전 버전
-
-| Version | OS | 프레임워크 | link | 지원 VC | 파일 크기 |
-| ---------- | --- | --------------------------------- | ---------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | --------- |
-| v.1.5.2.9e | mac | ONNX(cpu), PyTorch(cpu,mps) | [normal](https://drive.google.com/uc?id=1W0d7I7619PcO7kjb1SPXp6MmH5Unvd78&export=download) \*1 | MMVC v.1.5.x, MMVC v.1.3.x, so-vits-svc 4.0, RVC | 796MB |
-| | win | ONNX(cpu,cuda), PyTorch(cpu,cuda) | [normal](https://drive.google.com/uc?id=1tmTMJRRggS2Sb4goU-eHlRvUBR88RZDl&export=download) \*1 | MMVC v.1.5.x, MMVC v.1.3.x, so-vits-svc 4.0, so-vits-svc 4.0v2, RVC, DDSP-SVC | 2872MB |
-| v.1.5.3.1 | mac | ONNX(cpu), PyTorch(cpu,mps) | [normal](https://drive.google.com/uc?id=1oswF72q_cQQeXhIn6W275qLnoBAmcrR_&export=download) \*1 | MMVC v.1.5.x, MMVC v.1.3.x, so-vits-svc 4.0, RVC | 796MB |
-| | win | ONNX(cpu,cuda), PyTorch(cpu,cuda) | [normal](https://drive.google.com/uc?id=1AWjDhW4w2Uljp1-9P8YUJBZsIlnhkJX2&export=download) \*1 | MMVC v.1.5.x, MMVC v.1.3.x, so-vits-svc 4.0, so-vits-svc 4.0v2, RVC, DDSP-SVC | 2872MB |
-
-# For Contributor
-
-이 리포지토리는 [CLA](https://raw.githubusercontent.com/w-okada/voice-changer/master/LICENSE-CLA)를 설정했습니다.
diff --git a/Realtime_Voice_Changer_on_Colab.ipynb b/Realtime_Voice_Changer_on_Colab.ipynb
deleted file mode 100644
index 144b9e98..00000000
--- a/Realtime_Voice_Changer_on_Colab.ipynb
+++ /dev/null
@@ -1,206 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "view-in-github",
- "colab_type": "text"
- },
- "source": [
- ""
- ]
- },
- {
- "cell_type": "markdown",
- "source": [
- "### [w-okada's Voice Changer](https://github.com/w-okada/voice-changer) | **Colab**\n",
- "\n",
- "---\n",
- "\n",
- "## **⬇ VERY IMPORTANT ⬇**\n",
- "\n",
- "You can use the following settings for better results:\n",
- "\n",
- "If you're using a index: `f0: RMVPE_ONNX | Chunk: 112 or higher | Extra: 8192`
\n",
- "If you're not using a index: `f0: RMVPE_ONNX | Chunk: 96 or higher | Extra: 16384`
\n",
- "**Don't forget to select a T4 GPU in the GPU field, NEVER use CPU!\n",
- "> Seems that PTH models performance better than ONNX for now, you can still try ONNX models and see if it satisfies you\n",
- "\n",
- "\n",
- "*You can always [click here](https://github.com/YunaOneeChan/Voice-Changer-Settings) to check if these settings are up-to-date*\n",
- "\n",
- "---\n",
- "\n",
- "### ⬇ Always use Colab GPU! (**IMPORTANT!**) ⬇\n",
- "You need to use a Colab GPU so the Voice Changer can work faster and better\\\n",
- "Use the menu above and click on **Runtime** » **Change runtime** » **Hardware acceleration** to select a GPU (**T4 is the free one**)\n",
- "\n",
- "---\n",
- "**Credits**
\n",
- "Realtime Voice Changer by [w-okada](https://github.com/w-okada)
\n",
- "Notebook files updated by [rafacasari](https://github.com/Rafacasari)
\n",
- "Recommended settings by [YunaOneeChan](https://github.com/YunaOneeChan)\n",
- "\n",
- "**Need help?** [AI Hub Discord](https://discord.gg/aihub) » ***#help-realtime-vc***\n",
- "\n",
- "---"
- ],
- "metadata": {
- "id": "Lbbmx_Vjl0zo"
- }
- },
- {
- "cell_type": "code",
- "source": [
- "# @title Clone repository and install dependencies\n",
- "# @markdown This first step will download the latest version of Voice Changer and install the dependencies. **It can take some time to complete.**\n",
- "%cd /content/\n",
- "\n",
- "!pip install colorama --quiet\n",
- "from colorama import Fore, Style\n",
- "import os\n",
- "\n",
- "print(f\"{Fore.CYAN}> Cloning the repository...{Style.RESET_ALL}\")\n",
- "!git clone https://github.com/w-okada/voice-changer.git --quiet\n",
- "print(f\"{Fore.GREEN}> Successfully cloned the repository!{Style.RESET_ALL}\")\n",
- "%cd voice-changer/server/\n",
- "\n",
- "print(f\"{Fore.CYAN}> Installing libportaudio2...{Style.RESET_ALL}\")\n",
- "!apt-get -y install libportaudio2 -qq\n",
- "\n",
- "print(f\"{Fore.CYAN}> Installing pre-dependencies...{Style.RESET_ALL}\")\n",
- "# Install dependencies that are missing from requirements.txt and pyngrok\n",
- "!pip install faiss-gpu fairseq pyngrok --quiet\n",
- "!pip install pyworld --no-build-isolation --quiet\n",
- "print(f\"{Fore.CYAN}> Installing dependencies from requirements.txt...{Style.RESET_ALL}\")\n",
- "!pip install -r requirements.txt --quiet\n",
- "\n",
- "print(f\"{Fore.GREEN}> Successfully installed all packages!{Style.RESET_ALL}\")"
- ],
- "metadata": {
- "id": "86wTFmqsNMnD",
- "cellView": "form",
- "_kg_hide-output": false,
- "execution": {
- "iopub.status.busy": "2023-09-14T04:01:17.308284Z",
- "iopub.execute_input": "2023-09-14T04:01:17.308682Z",
- "iopub.status.idle": "2023-09-14T04:08:08.475375Z",
- "shell.execute_reply.started": "2023-09-14T04:01:17.308652Z",
- "shell.execute_reply": "2023-09-14T04:08:08.473827Z"
- },
- "trusted": true
- },
- "execution_count": null,
- "outputs": []
- },
- {
- "cell_type": "code",
- "source": [
- "# @title Start Server **using ngrok**\n",
- "# @markdown This cell will start the server, the first time that you run it will download the models, so it can take a while (~1-2 minutes)\n",
- "\n",
- "# @markdown ---\n",
- "# @markdown You'll need a ngrok account, but **it's free** and easy to create!\n",
- "# @markdown ---\n",
- "# @markdown **1** - Create a **free** account at [ngrok](https://dashboard.ngrok.com/signup) or **login with Google/Github account**\\\n",
- "# @markdown **2** - If you didn't logged in with Google/Github, you will need to **verify your e-mail**!\\\n",
- "# @markdown **3** - Click [this link](https://dashboard.ngrok.com/get-started/your-authtoken) to get your auth token, and place it here:\n",
- "Token = '' # @param {type:\"string\"}\n",
- "# @markdown **4** - *(optional)* Change to a region near to you or keep at United States if increase latency\\\n",
- "# @markdown `Default Region: us - United States (Ohio)`\n",
- "Region = \"us - United States (Ohio)\" # @param [\"ap - Asia/Pacific (Singapore)\", \"au - Australia (Sydney)\",\"eu - Europe (Frankfurt)\", \"in - India (Mumbai)\",\"jp - Japan (Tokyo)\",\"sa - South America (Sao Paulo)\", \"us - United States (Ohio)\"]\n",
- "\n",
- "#@markdown **5** - *(optional)* Other options:\n",
- "ClearConsole = True # @param {type:\"boolean\"}\n",
- "\n",
- "# ---------------------------------\n",
- "# DO NOT TOUCH ANYTHING DOWN BELOW!\n",
- "# ---------------------------------\n",
- "\n",
- "%cd /content/voice-changer/server\n",
- "\n",
- "from pyngrok import conf, ngrok\n",
- "MyConfig = conf.PyngrokConfig()\n",
- "MyConfig.auth_token = Token\n",
- "MyConfig.region = Region[0:2]\n",
- "#conf.get_default().authtoken = Token\n",
- "#conf.get_default().region = Region\n",
- "conf.set_default(MyConfig);\n",
- "\n",
- "import subprocess, threading, time, socket, urllib.request\n",
- "PORT = 8000\n",
- "\n",
- "from pyngrok import ngrok\n",
- "ngrokConnection = ngrok.connect(PORT)\n",
- "public_url = ngrokConnection.public_url\n",
- "\n",
- "from IPython.display import clear_output\n",
- "\n",
- "def wait_for_server():\n",
- " while True:\n",
- " time.sleep(0.5)\n",
- " sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n",
- " result = sock.connect_ex(('127.0.0.1', PORT))\n",
- " if result == 0:\n",
- " break\n",
- " sock.close()\n",
- " if ClearConsole:\n",
- " clear_output()\n",
- " print(\"--------- SERVER READY! ---------\")\n",
- " print(\"Your server is available at:\")\n",
- " print(public_url)\n",
- " print(\"---------------------------------\")\n",
- "\n",
- "threading.Thread(target=wait_for_server, daemon=True).start()\n",
- "\n",
- "!python3 MMVCServerSIO.py \\\n",
- " -p {PORT} \\\n",
- " --https False \\\n",
- " --content_vec_500 pretrain/checkpoint_best_legacy_500.pt \\\n",
- " --content_vec_500_onnx pretrain/content_vec_500.onnx \\\n",
- " --content_vec_500_onnx_on true \\\n",
- " --hubert_base pretrain/hubert_base.pt \\\n",
- " --hubert_base_jp pretrain/rinna_hubert_base_jp.pt \\\n",
- " --hubert_soft pretrain/hubert/hubert-soft-0d54a1f4.pt \\\n",
- " --nsf_hifigan pretrain/nsf_hifigan/model \\\n",
- " --crepe_onnx_full pretrain/crepe_onnx_full.onnx \\\n",
- " --crepe_onnx_tiny pretrain/crepe_onnx_tiny.onnx \\\n",
- " --rmvpe pretrain/rmvpe.pt \\\n",
- " --model_dir model_dir \\\n",
- " --samples samples.json\n",
- "\n",
- "ngrok.disconnect(ngrokConnection.public_url)"
- ],
- "metadata": {
- "id": "lLWQuUd7WW9U",
- "cellView": "form",
- "_kg_hide-input": false,
- "scrolled": true,
- "trusted": true
- },
- "execution_count": null,
- "outputs": []
- }
- ],
- "metadata": {
- "colab": {
- "provenance": [],
- "private_outputs": true,
- "include_colab_link": true,
- "gpuType": "T4",
- "collapsed_sections": [
- "iuf9pBHYpTn-"
- ]
- },
- "kernelspec": {
- "display_name": "Python 3",
- "name": "python3"
- },
- "language_info": {
- "name": "python"
- },
- "accelerator": "GPU"
- },
- "nbformat": 4,
- "nbformat_minor": 0
-}
diff --git a/client/.vscode/settings.json b/client/.vscode/settings.json
deleted file mode 100644
index 90365164..00000000
--- a/client/.vscode/settings.json
+++ /dev/null
@@ -1,11 +0,0 @@
-{
- "workbench.colorCustomizations": {
- "tab.activeBackground": "#65952acc"
- },
- "editor.defaultFormatter": "esbenp.prettier-vscode",
- "prettier.printWidth": 1024,
- "prettier.tabWidth": 4,
- "files.associations": {
- "*.css": "postcss"
- }
-}
diff --git a/client/buildAllDemo.sh b/client/buildAllDemo.sh
deleted file mode 100755
index b947f92c..00000000
--- a/client/buildAllDemo.sh
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/bin/bash
-
-cd demo && ncu -u && npm install && npm run build:prod && cd -
diff --git a/client/demo/.eslintrc.js b/client/demo/.eslintrc.js
deleted file mode 100644
index 533f3698..00000000
--- a/client/demo/.eslintrc.js
+++ /dev/null
@@ -1,18 +0,0 @@
-module.exports = {
- env: {
- browser: true,
- es2021: true,
- node: true,
- },
- extends: ["eslint:recommended", "plugin:react/recommended", "plugin:@typescript-eslint/recommended"],
- parser: "@typescript-eslint/parser",
- parserOptions: {
- ecmaFeatures: {
- jsx: true,
- },
- ecmaVersion: 13,
- sourceType: "module",
- },
- plugins: ["react", "@typescript-eslint"],
- rules: {},
-};
diff --git a/client/demo/.prettierrc b/client/demo/.prettierrc
deleted file mode 100644
index e2828759..00000000
--- a/client/demo/.prettierrc
+++ /dev/null
@@ -1,6 +0,0 @@
-{
- "tabWidth": 4,
- "useTabs": false,
- "semi": true,
- "printWidth": 360
-}
diff --git a/client/demo/.vscode/settings.json b/client/demo/.vscode/settings.json
deleted file mode 100644
index 90365164..00000000
--- a/client/demo/.vscode/settings.json
+++ /dev/null
@@ -1,11 +0,0 @@
-{
- "workbench.colorCustomizations": {
- "tab.activeBackground": "#65952acc"
- },
- "editor.defaultFormatter": "esbenp.prettier-vscode",
- "prettier.printWidth": 1024,
- "prettier.tabWidth": 4,
- "files.associations": {
- "*.css": "postcss"
- }
-}
diff --git a/client/demo/build-voice-changer-js.sh b/client/demo/build-voice-changer-js.sh
deleted file mode 100644
index 413a0dd1..00000000
--- a/client/demo/build-voice-changer-js.sh
+++ /dev/null
@@ -1,11 +0,0 @@
-# cd ~/git-work/voice-changer-js/lib/ ; npm run build:dev; cd -
-# rm -rf node_modules/@dannadori/voice-changer-js
-# mkdir -p node_modules/@dannadori/voice-changer-js/dist
-# cp -r ~/git-work/voice-changer-js/lib/package.json node_modules/@dannadori/voice-changer-js/
-# cp -r ~/git-work/voice-changer-js/lib/dist node_modules/@dannadori/voice-changer-js/
-
-cd ~/git-work/voice-changer-js/lib/ ; npm run build:prod; cd -
-rm -rf node_modules/@dannadori/voice-changer-js
-mkdir -p node_modules/@dannadori/voice-changer-js/dist
-cp -r ~/git-work/voice-changer-js/lib/package.json node_modules/@dannadori/voice-changer-js/
-cp -r ~/git-work/voice-changer-js/lib/dist node_modules/@dannadori/voice-changer-js/
diff --git a/client/demo/dist/assets/beatrice/female-clickable.svg b/client/demo/dist/assets/beatrice/female-clickable.svg
deleted file mode 100644
index 60512426..00000000
--- a/client/demo/dist/assets/beatrice/female-clickable.svg
+++ /dev/null
@@ -1,928 +0,0 @@
-
-
\ No newline at end of file
diff --git a/client/demo/dist/assets/beatrice/male-clickable.svg b/client/demo/dist/assets/beatrice/male-clickable.svg
deleted file mode 100644
index a5385f62..00000000
--- a/client/demo/dist/assets/beatrice/male-clickable.svg
+++ /dev/null
@@ -1,898 +0,0 @@
-
-
\ No newline at end of file
diff --git a/client/demo/dist/assets/buymeacoffee.png b/client/demo/dist/assets/buymeacoffee.png
deleted file mode 100644
index 02d6ea41..00000000
Binary files a/client/demo/dist/assets/buymeacoffee.png and /dev/null differ
diff --git a/client/demo/dist/assets/gui_settings/GUI.json b/client/demo/dist/assets/gui_settings/GUI.json
deleted file mode 100644
index e234f1a4..00000000
--- a/client/demo/dist/assets/gui_settings/GUI.json
+++ /dev/null
@@ -1,30 +0,0 @@
-{
- "type": "demo",
- "id": "RVC",
- "front": {
- "modelSlotControl": [
- {
- "name": "headerArea",
- "options": {
- "mainTitle": "Realtime Voice Changer Client",
- "subTitle": ""
- }
- },
- {
- "name": "modelSlotArea",
- "options": {}
- },
- {
- "name": "characterArea",
- "options": {}
- },
- {
- "name": "configArea",
- "options": {
- "detectors": ["dio", "harvest", "crepe", "crepe_full", "crepe_tiny", "rmvpe", "rmvpe_onnx", "fcpe"],
- "inputChunkNums": [1, 2, 4, 6, 8, 16, 24, 32, 40, 48, 64, 80, 96, 112, 128, 192, 256, 320, 384, 448, 512, 576, 640, 704, 768, 832, 896, 960, 1024, 2048, 4096, 8192, 16384]
- }
- }
- ]
- }
-}
diff --git a/client/demo/dist/assets/gui_settings/RVC.json b/client/demo/dist/assets/gui_settings/RVC.json
deleted file mode 100644
index a3099e2b..00000000
--- a/client/demo/dist/assets/gui_settings/RVC.json
+++ /dev/null
@@ -1,30 +0,0 @@
-{
- "type": "demo",
- "id": "RVC",
- "front": {
- "modelSlotControl": [
- {
- "name": "headerArea",
- "options": {
- "mainTitle": "Realtime Voice Changer Client",
- "subTitle": "for RVC"
- }
- },
- {
- "name": "modelSlotArea",
- "options": {}
- },
- {
- "name": "characterArea",
- "options": {}
- },
- {
- "name": "configArea",
- "options": {
- "detectors": ["dio", "harvest", "crepe"],
- "inputChunkNums": [8, 16, 24, 32, 40, 48, 64, 80, 96, 112, 128, 192, 256, 320, 384, 448, 512, 576, 640, 704, 768, 832, 896, 960, 1024, 2048]
- }
- }
- ]
- }
-}
diff --git a/client/demo/dist/assets/gui_settings/edition.txt b/client/demo/dist/assets/gui_settings/edition.txt
deleted file mode 100644
index e69de29b..00000000
diff --git a/client/demo/dist/assets/gui_settings/edition_dml.txt b/client/demo/dist/assets/gui_settings/edition_dml.txt
deleted file mode 100644
index ceffe78a..00000000
--- a/client/demo/dist/assets/gui_settings/edition_dml.txt
+++ /dev/null
@@ -1 +0,0 @@
-onnxdirectML-cuda
diff --git a/client/demo/dist/assets/gui_settings/edition_web.txt b/client/demo/dist/assets/gui_settings/edition_web.txt
deleted file mode 100644
index c0772185..00000000
--- a/client/demo/dist/assets/gui_settings/edition_web.txt
+++ /dev/null
@@ -1 +0,0 @@
-web
diff --git a/client/demo/dist/assets/gui_settings/version.txt b/client/demo/dist/assets/gui_settings/version.txt
deleted file mode 100644
index c31213bc..00000000
--- a/client/demo/dist/assets/gui_settings/version.txt
+++ /dev/null
@@ -1 +0,0 @@
--.-.-.-
\ No newline at end of file
diff --git a/client/demo/dist/assets/icons/blank.png b/client/demo/dist/assets/icons/blank.png
deleted file mode 100644
index 539c8785..00000000
Binary files a/client/demo/dist/assets/icons/blank.png and /dev/null differ
diff --git a/client/demo/dist/assets/icons/file-text.svg b/client/demo/dist/assets/icons/file-text.svg
deleted file mode 100644
index 4197ddd4..00000000
--- a/client/demo/dist/assets/icons/file-text.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/client/demo/dist/assets/icons/flect.png b/client/demo/dist/assets/icons/flect.png
deleted file mode 100644
index eb25e5d0..00000000
Binary files a/client/demo/dist/assets/icons/flect.png and /dev/null differ
diff --git a/client/demo/dist/assets/icons/folder.svg b/client/demo/dist/assets/icons/folder.svg
deleted file mode 100644
index 134458b9..00000000
--- a/client/demo/dist/assets/icons/folder.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/client/demo/dist/assets/icons/github.svg b/client/demo/dist/assets/icons/github.svg
deleted file mode 100644
index cac76304..00000000
--- a/client/demo/dist/assets/icons/github.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/client/demo/dist/assets/icons/help-circle.svg b/client/demo/dist/assets/icons/help-circle.svg
deleted file mode 100644
index 51fddd80..00000000
--- a/client/demo/dist/assets/icons/help-circle.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/client/demo/dist/assets/icons/home.svg b/client/demo/dist/assets/icons/home.svg
deleted file mode 100644
index 7e630021..00000000
--- a/client/demo/dist/assets/icons/home.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/client/demo/dist/assets/icons/human.png b/client/demo/dist/assets/icons/human.png
deleted file mode 100644
index 7b4bf8df..00000000
Binary files a/client/demo/dist/assets/icons/human.png and /dev/null differ
diff --git a/client/demo/dist/assets/icons/linkedin.svg b/client/demo/dist/assets/icons/linkedin.svg
deleted file mode 100644
index 021adb4d..00000000
--- a/client/demo/dist/assets/icons/linkedin.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/client/demo/dist/assets/icons/noimage.png b/client/demo/dist/assets/icons/noimage.png
deleted file mode 100644
index 6b520524..00000000
Binary files a/client/demo/dist/assets/icons/noimage.png and /dev/null differ
diff --git a/client/demo/dist/assets/icons/tool.svg b/client/demo/dist/assets/icons/tool.svg
deleted file mode 100644
index f3cbf3d9..00000000
--- a/client/demo/dist/assets/icons/tool.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/client/demo/dist/assets/icons/twitter.svg b/client/demo/dist/assets/icons/twitter.svg
deleted file mode 100644
index 640ade90..00000000
--- a/client/demo/dist/assets/icons/twitter.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/client/demo/dist/assets/icons/zun.png b/client/demo/dist/assets/icons/zun.png
deleted file mode 100644
index 2e6f7c18..00000000
Binary files a/client/demo/dist/assets/icons/zun.png and /dev/null differ
diff --git a/client/demo/dist/favicon.ico b/client/demo/dist/favicon.ico
deleted file mode 100644
index 052e3af9..00000000
Binary files a/client/demo/dist/favicon.ico and /dev/null differ
diff --git a/client/demo/dist/index.html b/client/demo/dist/index.html
deleted file mode 100644
index 83226387..00000000
--- a/client/demo/dist/index.html
+++ /dev/null
@@ -1 +0,0 @@
-