From 889874ecafc45704d0f0db17584dc2ce2e49f0f4 Mon Sep 17 00:00:00 2001
From: Hina <79749008+hinabl@users.noreply.github.com>
Date: Thu, 28 Sep 2023 19:18:21 +0800
Subject: [PATCH 01/26] Added Auto Sampling Rate
The Model Uploader Colab Cell is probably more buggy now but it works....
---
...fied_Realtime_Voice_Changer_on_Colab.ipynb | 119 ++++++++++++------
1 file changed, 83 insertions(+), 36 deletions(-)
diff --git a/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb b/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
index ebbab41f..dbcc76ff 100644
--- a/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
+++ b/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
@@ -1,15 +1,5 @@
{
"cells": [
-{
- "cell_type": "markdown",
- "metadata": {
- "id": "view-in-github",
- "colab_type": "text"
- },
- "source": [
- ""
- ]
- },
{
"cell_type": "markdown",
"metadata": {
@@ -147,21 +137,22 @@
"#@markdown ---\n",
"import os\n",
"import json\n",
+ "from IPython.display import Image\n",
"\n",
"\n",
"#@markdown #Model Number `(Default is 0)` you can add multiple models as long as you change the number!\n",
- "model_number = \"0\" #@param ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', '100', '101', '102', '103', '104', '105', '106', '107', '108', '109', '110', '111', '112', '113', '114', '115', '116', '117', '118', '119', '120', '121', '122', '123', '124', '125', '126', '127', '128', '129', '130', '131', '132', '133', '134', '135', '136', '137', '138', '139', '140', '141', '142', '143', '144', '145', '146', '147', '148', '149', '150', '151', '152', '153', '154', '155', '156', '157', '158', '159', '160', '161', '162', '163', '164', '165', '166', '167', '168', '169', '170', '171', '172', '173', '174', '175', '176', '177', '178', '179', '180', '181', '182', '183', '184', '185', '186', '187', '188', '189', '190', '191', '192', '193', '194', '195', '196', '197', '198', '199']\n",
+ "model_number = \"6\" #@param ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', '100', '101', '102', '103', '104', '105', '106', '107', '108', '109', '110', '111', '112', '113', '114', '115', '116', '117', '118', '119', '120', '121', '122', '123', '124', '125', '126', '127', '128', '129', '130', '131', '132', '133', '134', '135', '136', '137', '138', '139', '140', '141', '142', '143', '144', '145', '146', '147', '148', '149', '150', '151', '152', '153', '154', '155', '156', '157', '158', '159', '160', '161', '162', '163', '164', '165', '166', '167', '168', '169', '170', '171', '172', '173', '174', '175', '176', '177', '178', '179', '180', '181', '182', '183', '184', '185', '186', '187', '188', '189', '190', '191', '192', '193', '194', '195', '196', '197', '198', '199']\n",
"\n",
"!rm -rf model_dir/$model_number\n",
"#@markdown ---\n",
"#@markdown #**[Optional]** Add an icon to the model `(can be any image/leave empty for no image)`\n",
- "icon_link = \"https://cdn.donmai.us/original/8a/92/8a924397e9aac922e94bdc1f28ff978a.jpg\" #@param {type:\"string\"}\n",
+ "icon_link = \"https://cdn.discordapp.com/attachments/1144453160912572506/1144453161210351697/mika.png?ex=65163190&is=6514e010&hm=6cfc987d42e448b2912f5225e2c865df92d688c8dc46a135c2cca32682a3f3ea&\" #@param {type:\"string\"}\n",
"#@markdown ---\n",
"icon_link = '\"'+icon_link+'\"'\n",
"!mkdir model_dir\n",
"!mkdir model_dir/$model_number\n",
"#@markdown #Put your model's download link here `(must be a zip file)`\n",
- "model_link = \"https://huggingface.co/HinaBl/Akatsuki/resolve/main/akatsuki_200epoch.zip\" #@param {type:\"string\"}\n",
+ "model_link = \"https://huggingface.co/Kit-Lemonfoot/kitlemonfoot_rvc_models/resolve/main/Mika%20Melatika%20(Speaking)(KitLemonfoot).zip\" #@param {type:\"string\"}\n",
"model_link = '\"'+model_link+'\"'\n",
"!curl -L $model_link > model.zip\n",
"\n",
@@ -171,6 +162,7 @@
" iconFile = \"icon.png\"\n",
" !curl -L $icon_link > model_dir/$model_number/icon.png\n",
"else:\n",
+ " iconFile = \"\"\n",
" print(\"icon_link is empty, so no icon file will be downloaded.\")\n",
"#@markdown ---\n",
"\n",
@@ -190,32 +182,83 @@
"Tune = 12 #@param {type:\"slider\",min:-50,max:50,step:1}\n",
"Index = 0 #@param {type:\"slider\",min:0,max:1,step:0.1}\n",
"#@markdown ---\n",
- "#@markdown #Parameter Option `(Ignore if theres a Parameter File)`\n",
- "Slot_Index = -1 #@param [-1,0,1] {type:\"raw\"}\n",
- "Sampling_Rate = 48000 #@param [32000,40000,48000] {type:\"raw\"}\n",
"\n",
"# @markdown #**[Optional]** Parameter file for your voice model\n",
"#@markdown _(must be named params.json)_ (Leave Empty for Default)\n",
"param_link = \"\" #@param {type:\"string\"}\n",
"if param_link == \"\":\n",
- " model_dir = \"model_dir/\"+model_number+\"/\"\n",
+ " from voice_changer.RVC.RVCModelSlotGenerator import RVCModelSlotGenerator\n",
+ " from voice_changer.VoiceChangerParamsManager import VoiceChangerParamsManager\n",
+ " from voice_changer.utils.LoadModelParams import LoadModelParamFile, LoadModelParams\n",
+ " from voice_changer.utils.VoiceChangerParams import VoiceChangerParams\n",
"\n",
- " # Find the .pth and .index files in the model_dir/0 directory\n",
- " pth_files = [f for f in os.listdir(model_dir) if f.endswith(\".pth\")]\n",
- " index_files = [f for f in os.listdir(model_dir) if f.endswith(\".index\")]\n",
+ " model_dir1 = \"model_dir/\"+model_number+\"/\"\n",
"\n",
- " if pth_files and index_files:\n",
- " # Take the first .pth and .index file as model and index names\n",
+ " is_pth = True # Set this to True if you want to search for .pth files, or False for .onnx files\n",
+ " file_extension = \".pth\" if is_pth else \".onnx\"\n",
+ "\n",
+ " # pth_files = [f for f in os.listdir(model_dir1) if f.endswith(file_extension)]\n",
+ "\n",
+ " pth_files = [f for f in os.listdir(model_dir1) if f.endswith(\".pth\") or f.endswith(\".onnx\")]\n",
+ " print(pth_files)\n",
+ " index_files = [f for f in os.listdir(model_dir1) if f.endswith(\".index\")]\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ " if pth_files:\n",
" model_name = pth_files[0].replace(\".pth\", \"\")\n",
+ "\n",
+ " else:\n",
+ " model_name = \"Null\"\n",
+ " if index_files:\n",
" index_name = index_files[0].replace(\".index\", \"\")\n",
" else:\n",
- " # Set default values if no .pth and .index files are found\n",
- " model_name = \"Null\"\n",
- " index_name = \"Null\"\n",
+ " index_name = \"\"\n",
"\n",
- " # Define the content for params.json\n",
+ " original_string = str(pth_files)\n",
+ " string_pth_files = original_string[2:-2]\n",
+ " print(\"IM A STRING\"+original_string)\n",
+ "\n",
+ " print(model_name)\n",
+ " voiceChangerParams = VoiceChangerParams(\n",
+ " model_dir=\"./model_dir/\"+model_number,\n",
+ " content_vec_500=\"\",\n",
+ " content_vec_500_onnx=\"\",\n",
+ " content_vec_500_onnx_on=\"\",\n",
+ " hubert_base=\"\",\n",
+ " hubert_base_jp=\"\",\n",
+ " hubert_soft=\"\",\n",
+ " nsf_hifigan=\"\",\n",
+ " crepe_onnx_full=\"\",\n",
+ " crepe_onnx_tiny=\"\",\n",
+ " rmvpe=\"\",\n",
+ " rmvpe_onnx=\"\",\n",
+ " sample_mode=\"\"\n",
+ " )\n",
+ " vcparams = VoiceChangerParamsManager.get_instance()\n",
+ " vcparams.setParams(voiceChangerParams)\n",
+ "\n",
+ " file = LoadModelParamFile(\n",
+ " name=string_pth_files,\n",
+ " kind=\"rvcModel\",\n",
+ " dir=\"\",\n",
+ " )\n",
+ "\n",
+ " loadParam = LoadModelParams(\n",
+ " voiceChangerType=\"RVC\",\n",
+ " files=[file],\n",
+ " slot=\"\",\n",
+ " isSampleMode=False,\n",
+ " sampleId=\"\",\n",
+ " params={},\n",
+ " )\n",
+ " slotInfo = RVCModelSlotGenerator.loadModel(loadParam)\n",
+ " print(slotInfo.samplingRate)\n",
+ "\n",
+ "#----------------Make the Json File-----------\n",
" params_content = {\n",
- " \"slotIndex\": Slot_Index,\n",
+ " \"slotIndex\": -1,\n",
" \"voiceChangerType\": \"RVC\",\n",
" \"name\": model_name,\n",
" \"description\": \"\",\n",
@@ -225,14 +268,14 @@
" \"speakers\": {\n",
" \"0\": \"target\"\n",
" },\n",
- " \"modelFile\": f\"{model_name}.pth\",\n",
+ " \"modelFile\": string_pth_files,\n",
" \"indexFile\": f\"{index_name}.index\",\n",
" \"defaultTune\": Tune,\n",
" \"defaultIndexRatio\": Index,\n",
" \"defaultProtect\": 0.5,\n",
" \"isONNX\": False,\n",
" \"modelType\": \"pyTorchRVCv2\",\n",
- " \"samplingRate\": Sampling_Rate,\n",
+ " \"samplingRate\": slotInfo.samplingRate,\n",
" \"f0\": True,\n",
" \"embChannels\": 768,\n",
" \"embOutputLayer\": 12,\n",
@@ -243,16 +286,18 @@
" }\n",
"\n",
" # Write the content to params.json\n",
- " with open(f\"{model_dir}/params.json\", \"w\") as param_file:\n",
+ " with open(f\"{model_dir1}/params.json\", \"w\") as param_file:\n",
" json.dump(params_content, param_file)\n",
"\n",
+ "\n",
"# !unzip model.zip -d model_dir/0/\n",
"clear_output()\n",
- "print(\"\\033[92mModel with the name of \"+model_name+\" has been Imported!\")\n"
+ "print(\"\\033[92mModel with the name of \"+model_name+\" has been Imported to slot \"+model_number)\n",
+ "Image(url=icon_link)"
],
"metadata": {
- "cellView": "form",
- "id": "_ZtbKUVUgN3G"
+ "id": "_ZtbKUVUgN3G",
+ "cellView": "form"
},
"execution_count": null,
"outputs": []
@@ -270,7 +315,8 @@
"print(\"\\033[92mSuccessfully removed Model is slot \"+Delete_Slot)\n"
],
"metadata": {
- "id": "P9g6rG1-KUwt"
+ "id": "P9g6rG1-KUwt",
+ "cellView": "form"
},
"execution_count": null,
"outputs": []
@@ -279,7 +325,8 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "id": "lLWQuUd7WW9U"
+ "id": "lLWQuUd7WW9U",
+ "cellView": "form"
},
"outputs": [],
"source": [
@@ -440,4 +487,4 @@
},
"nbformat": 4,
"nbformat_minor": 0
-}
+}
\ No newline at end of file
From 8d3a0f8c7313917438f24bac03f3f198f3a61219 Mon Sep 17 00:00:00 2001
From: Hina <79749008+hinabl@users.noreply.github.com>
Date: Fri, 29 Sep 2023 00:44:13 +0800
Subject: [PATCH 02/26] Created using Colaboratory
---
...ified_Realtime_Voice_Changer_on_Colab.ipynb | 18 ++++++++++++++----
1 file changed, 14 insertions(+), 4 deletions(-)
diff --git a/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb b/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
index dbcc76ff..8035d256 100644
--- a/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
+++ b/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
@@ -1,5 +1,15 @@
{
"cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "view-in-github",
+ "colab_type": "text"
+ },
+ "source": [
+ ""
+ ]
+ },
{
"cell_type": "markdown",
"metadata": {
@@ -141,7 +151,7 @@
"\n",
"\n",
"#@markdown #Model Number `(Default is 0)` you can add multiple models as long as you change the number!\n",
- "model_number = \"6\" #@param ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', '100', '101', '102', '103', '104', '105', '106', '107', '108', '109', '110', '111', '112', '113', '114', '115', '116', '117', '118', '119', '120', '121', '122', '123', '124', '125', '126', '127', '128', '129', '130', '131', '132', '133', '134', '135', '136', '137', '138', '139', '140', '141', '142', '143', '144', '145', '146', '147', '148', '149', '150', '151', '152', '153', '154', '155', '156', '157', '158', '159', '160', '161', '162', '163', '164', '165', '166', '167', '168', '169', '170', '171', '172', '173', '174', '175', '176', '177', '178', '179', '180', '181', '182', '183', '184', '185', '186', '187', '188', '189', '190', '191', '192', '193', '194', '195', '196', '197', '198', '199']\n",
+ "model_number = \"0\" #@param ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', '100', '101', '102', '103', '104', '105', '106', '107', '108', '109', '110', '111', '112', '113', '114', '115', '116', '117', '118', '119', '120', '121', '122', '123', '124', '125', '126', '127', '128', '129', '130', '131', '132', '133', '134', '135', '136', '137', '138', '139', '140', '141', '142', '143', '144', '145', '146', '147', '148', '149', '150', '151', '152', '153', '154', '155', '156', '157', '158', '159', '160', '161', '162', '163', '164', '165', '166', '167', '168', '169', '170', '171', '172', '173', '174', '175', '176', '177', '178', '179', '180', '181', '182', '183', '184', '185', '186', '187', '188', '189', '190', '191', '192', '193', '194', '195', '196', '197', '198', '199']\n",
"\n",
"!rm -rf model_dir/$model_number\n",
"#@markdown ---\n",
@@ -315,8 +325,7 @@
"print(\"\\033[92mSuccessfully removed Model is slot \"+Delete_Slot)\n"
],
"metadata": {
- "id": "P9g6rG1-KUwt",
- "cellView": "form"
+ "id": "P9g6rG1-KUwt"
},
"execution_count": null,
"outputs": []
@@ -474,7 +483,8 @@
"colab": {
"provenance": [],
"private_outputs": true,
- "gpuType": "T4"
+ "gpuType": "T4",
+ "include_colab_link": true
},
"kernelspec": {
"display_name": "Python 3",
From bfd7f5cef7fe8e7f8216b238a3ffcbc9e6912c6c Mon Sep 17 00:00:00 2001
From: Hina <79749008+hinabl@users.noreply.github.com>
Date: Fri, 29 Sep 2023 00:55:14 +0800
Subject: [PATCH 03/26] Created using Colaboratory
---
Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb | 12 +++++++-----
1 file changed, 7 insertions(+), 5 deletions(-)
diff --git a/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb b/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
index 8035d256..63aa882d 100644
--- a/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
+++ b/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
@@ -189,7 +189,9 @@
"\n",
"\n",
"#@markdown #**Model Voice Convertion Setting**\n",
+ "#@markdown Tune `-12=F-M`**||**`0=M-M/F-F`**||**`12=M-F`\n",
"Tune = 12 #@param {type:\"slider\",min:-50,max:50,step:1}\n",
+ "#@markdown Index `0=Default`**||**`1=Replicate Accent`\n",
"Index = 0 #@param {type:\"slider\",min:0,max:1,step:0.1}\n",
"#@markdown ---\n",
"\n",
@@ -306,8 +308,7 @@
"Image(url=icon_link)"
],
"metadata": {
- "id": "_ZtbKUVUgN3G",
- "cellView": "form"
+ "id": "_ZtbKUVUgN3G"
},
"execution_count": null,
"outputs": []
@@ -315,17 +316,18 @@
{
"cell_type": "code",
"source": [
- "#@title Delete a model\n",
+ "#@title Delete a model `[Only Use When Needed]`\n",
"#@markdown ---\n",
"#@markdown Select which slot you want to delete\n",
"Delete_Slot = \"0\" #@param ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', '100', '101', '102', '103', '104', '105', '106', '107', '108', '109', '110', '111', '112', '113', '114', '115', '116', '117', '118', '119', '120', '121', '122', '123', '124', '125', '126', '127', '128', '129', '130', '131', '132', '133', '134', '135', '136', '137', '138', '139', '140', '141', '142', '143', '144', '145', '146', '147', '148', '149', '150', '151', '152', '153', '154', '155', '156', '157', '158', '159', '160', '161', '162', '163', '164', '165', '166', '167', '168', '169', '170', '171', '172', '173', '174', '175', '176', '177', '178', '179', '180', '181', '182', '183', '184', '185', '186', '187', '188', '189', '190', '191', '192', '193', '194', '195', '196', '197', '198', '199']\n",
- "{type:\"slider\",min:0,max:1,step:0.1}\n",
+ "# {type:\"slider\",min:0,max:1,step:0.1}\n",
"\n",
"!rm -rf model_dir/$Model_Number\n",
"print(\"\\033[92mSuccessfully removed Model is slot \"+Delete_Slot)\n"
],
"metadata": {
- "id": "P9g6rG1-KUwt"
+ "id": "P9g6rG1-KUwt",
+ "cellView": "form"
},
"execution_count": null,
"outputs": []
From ae52548113e3eecf0919a49feb27043dfbd92845 Mon Sep 17 00:00:00 2001
From: Hina <79749008+hinabl@users.noreply.github.com>
Date: Tue, 3 Oct 2023 16:48:48 +0800
Subject: [PATCH 04/26] Removed localtunnel
---
...fied_Realtime_Voice_Changer_on_Colab.ipynb | 62 +------------------
1 file changed, 2 insertions(+), 60 deletions(-)
diff --git a/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb b/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
index 63aa882d..8b068ed6 100644
--- a/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
+++ b/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
@@ -308,7 +308,8 @@
"Image(url=icon_link)"
],
"metadata": {
- "id": "_ZtbKUVUgN3G"
+ "id": "_ZtbKUVUgN3G",
+ "cellView": "form"
},
"execution_count": null,
"outputs": []
@@ -420,65 +421,6 @@
" --samples samples.json\n",
"\n"
]
- },
- {
- "cell_type": "code",
- "source": [
- "# @title **[Optional]** Start Server **using localtunnel** (ngrok alternative | no account needed)\n",
- "# @markdown This cell will start the server, the first time that you run it will download the models, so it can take a while (~1-2 minutes)\n",
- "\n",
- "# @markdown ---\n",
- "!npm config set update-notifier false\n",
- "!npm install -g localtunnel\n",
- "print(\"\\033[92mLocalTunnel installed!\")\n",
- "# @markdown If you want to automatically clear the output when the server loads, check this option.\n",
- "Clear_Output = True # @param {type:\"boolean\"}\n",
- "\n",
- "import portpicker, subprocess, threading, time, socket, urllib.request\n",
- "PORT = portpicker.pick_unused_port()\n",
- "\n",
- "from IPython.display import clear_output, Javascript\n",
- "\n",
- "def iframe_thread(port):\n",
- " while True:\n",
- " time.sleep(0.5)\n",
- " sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n",
- " result = sock.connect_ex(('127.0.0.1', port))\n",
- " if result == 0:\n",
- " break\n",
- " sock.close()\n",
- " clear_output()\n",
- " print(\"Use the following endpoint to connect to localtunnel:\", urllib.request.urlopen('https://ipv4.icanhazip.com').read().decode('utf8').strip(\"\\n\"))\n",
- " p = subprocess.Popen([\"lt\", \"--port\", \"{}\".format(port)], stdout=subprocess.PIPE)\n",
- " for line in p.stdout:\n",
- " print(line.decode(), end='')\n",
- "\n",
- "threading.Thread(target=iframe_thread, daemon=True, args=(PORT,)).start()\n",
- "\n",
- "\n",
- "!python3 MMVCServerSIO.py \\\n",
- " -p {PORT} \\\n",
- " --https False \\\n",
- " --content_vec_500 pretrain/checkpoint_best_legacy_500.pt \\\n",
- " --content_vec_500_onnx pretrain/content_vec_500.onnx \\\n",
- " --content_vec_500_onnx_on true \\\n",
- " --hubert_base pretrain/hubert_base.pt \\\n",
- " --hubert_base_jp pretrain/rinna_hubert_base_jp.pt \\\n",
- " --hubert_soft pretrain/hubert/hubert-soft-0d54a1f4.pt \\\n",
- " --nsf_hifigan pretrain/nsf_hifigan/model \\\n",
- " --crepe_onnx_full pretrain/crepe_onnx_full.onnx \\\n",
- " --crepe_onnx_tiny pretrain/crepe_onnx_tiny.onnx \\\n",
- " --rmvpe pretrain/rmvpe.pt \\\n",
- " --model_dir model_dir \\\n",
- " --samples samples.json \\\n",
- " --colab True"
- ],
- "metadata": {
- "cellView": "form",
- "id": "ZwZaCf4BeZi2"
- },
- "execution_count": null,
- "outputs": []
}
],
"metadata": {
From 22b0f839923b5250069b786e4ff28ef2fc448762 Mon Sep 17 00:00:00 2001
From: Hina <79749008+hinabl@users.noreply.github.com>
Date: Wed, 4 Oct 2023 00:12:43 +0800
Subject: [PATCH 05/26] Created using Colaboratory
---
...fied_Realtime_Voice_Changer_on_Colab.ipynb | 68 +++++++++++++++++++
1 file changed, 68 insertions(+)
diff --git a/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb b/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
index 8b068ed6..8cc67ed8 100644
--- a/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
+++ b/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
@@ -421,6 +421,74 @@
" --samples samples.json\n",
"\n"
]
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "![](https://i.pinimg.com/474x/de/72/9e/de729ecfa41b69901c42c82fff752414.jpg)\n",
+ "![](https://i.pinimg.com/474x/de/72/9e/de729ecfa41b69901c42c82fff752414.jpg)"
+ ],
+ "metadata": {
+ "id": "2Uu1sTSwTc7q"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# @title **[Optional]** Start Server **using localtunnel** (ngrok alternative | no account needed)\n",
+ "# @markdown This cell will start the server, the first time that you run it will download the models, so it can take a while (~1-2 minutes)\n",
+ "\n",
+ "# @markdown ---\n",
+ "!npm config set update-notifier false\n",
+ "!npm install -g localtunnel\n",
+ "print(\"\\033[92mLocalTunnel installed!\")\n",
+ "# @markdown If you want to automatically clear the output when the server loads, check this option.\n",
+ "Clear_Output = True # @param {type:\"boolean\"}\n",
+ "\n",
+ "import portpicker, subprocess, threading, time, socket, urllib.request\n",
+ "PORT = portpicker.pick_unused_port()\n",
+ "\n",
+ "from IPython.display import clear_output, Javascript\n",
+ "\n",
+ "def iframe_thread(port):\n",
+ " while True:\n",
+ " time.sleep(0.5)\n",
+ " sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n",
+ " result = sock.connect_ex(('127.0.0.1', port))\n",
+ " if result == 0:\n",
+ " break\n",
+ " sock.close()\n",
+ " clear_output()\n",
+ " print(\"Use the following endpoint to connect to localtunnel:\", urllib.request.urlopen('https://ipv4.icanhazip.com').read().decode('utf8').strip(\"\\n\"))\n",
+ " p = subprocess.Popen([\"lt\", \"--port\", \"{}\".format(port)], stdout=subprocess.PIPE)\n",
+ " for line in p.stdout:\n",
+ " print(line.decode(), end='')\n",
+ "\n",
+ "threading.Thread(target=iframe_thread, daemon=True, args=(PORT,)).start()\n",
+ "\n",
+ "\n",
+ "!python3 MMVCServerSIO.py \\\n",
+ " -p {PORT} \\\n",
+ " --https False \\\n",
+ " --content_vec_500 pretrain/checkpoint_best_legacy_500.pt \\\n",
+ " --content_vec_500_onnx pretrain/content_vec_500.onnx \\\n",
+ " --content_vec_500_onnx_on true \\\n",
+ " --hubert_base pretrain/hubert_base.pt \\\n",
+ " --hubert_base_jp pretrain/rinna_hubert_base_jp.pt \\\n",
+ " --hubert_soft pretrain/hubert/hubert-soft-0d54a1f4.pt \\\n",
+ " --nsf_hifigan pretrain/nsf_hifigan/model \\\n",
+ " --crepe_onnx_full pretrain/crepe_onnx_full.onnx \\\n",
+ " --crepe_onnx_tiny pretrain/crepe_onnx_tiny.onnx \\\n",
+ " --rmvpe pretrain/rmvpe.pt \\\n",
+ " --model_dir model_dir \\\n",
+ " --samples samples.json \\\n",
+ " --colab True"
+ ],
+ "metadata": {
+ "id": "Mr7325z-TTX5"
+ },
+ "execution_count": null,
+ "outputs": []
}
],
"metadata": {
From c0db39990de3bc432d31512add06d706e478248d Mon Sep 17 00:00:00 2001
From: Hina <79749008+hinabl@users.noreply.github.com>
Date: Fri, 13 Oct 2023 19:18:07 +0800
Subject: [PATCH 06/26] Background WEEEE
---
...fied_Realtime_Voice_Changer_on_Colab.ipynb | 102 ++++++++----------
1 file changed, 42 insertions(+), 60 deletions(-)
diff --git a/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb b/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
index 8cc67ed8..a3b17bda 100644
--- a/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
+++ b/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
@@ -116,7 +116,6 @@
"clear_output()\n",
"!rm -rf rvctimer\n",
"!git clone --depth 1 $rvctimer\n",
- "!cp -f rvctimer/index.html $pathloc/client/demo/dist/\n",
"\n",
"\n",
"%cd $pathloc/server/\n",
@@ -357,7 +356,7 @@
"Token = 'YOUR_TOKEN_HERE' # @param {type:\"string\"}\n",
"# @markdown **4** - Still need further tests, but maybe region can help a bit on latency?\\\n",
"# @markdown `Default Region: us - United States (Ohio)`\n",
- "Region = \"ap - Asia/Pacific (Singapore)\" # @param [\"ap - Asia/Pacific (Singapore)\", \"au - Australia (Sydney)\",\"eu - Europe (Frankfurt)\", \"in - India (Mumbai)\",\"jp - Japan (Tokyo)\",\"sa - South America (Sao Paulo)\", \"us - United States (Ohio)\"]\n",
+ "Region = \"us - United States (Ohio)\" # @param [\"ap - Asia/Pacific (Singapore)\", \"au - Australia (Sydney)\",\"eu - Europe (Frankfurt)\", \"in - India (Mumbai)\",\"jp - Japan (Tokyo)\",\"sa - South America (Sao Paulo)\", \"us - United States (Ohio)\"]\n",
"MyConfig = conf.PyngrokConfig()\n",
"\n",
"MyConfig.auth_token = Token\n",
@@ -372,6 +371,47 @@
"# @markdown If you want to automatically clear the output when the server loads, check this option.\n",
"Clear_Output = True # @param {type:\"boolean\"}\n",
"\n",
+ "#@markdown ---\n",
+ "#@markdown If you want to use a custom background for the voice changer\n",
+ "Use_Custom_BG=False #@param{type:\"boolean\"}\n",
+ "BG_URL=\"https://w.wallha.com/ws/14/cMmpo5vn.jpg\" #@param{type:\"string\"}\n",
+ "#@markdown Text colors can be hex ``#101010`` or name of color ``black`` (css)\n",
+ "Text_Color=\"green\" #@param{type:\"string\"}\n",
+ "if Use_Custom_BG==True:\n",
+ " if BG_URL==\"\":\n",
+ " !cp -f rvctimer/index.html $pathloc/client/demo/dist/\n",
+ " else:\n",
+ " html_template = f'''\n",
+ " \n",
+ " \n",
+ "
\n",
+ " \n",
+ " Voice Changer Client Demo\n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " '''\n",
+ " with open('index.html', 'w') as file:\n",
+ " file.write(html_template)\n",
+ " !mkdir ../client/demo/dist/temp/\n",
+ " !mv ../client/demo/dist/index.html ../client/demo/dist/temp/index.html\n",
+ " !mv index.html ../client/demo/dist/\n",
+ "else:\n",
+ " !cp -f ../client/demo/dist/temp/index.html ../client/demo/dist/index.html\n",
+ "\n",
"mainpy=codecs.decode('ZZIPFreireFVB.cl','rot_13')\n",
"\n",
"import portpicker, socket, urllib.request\n",
@@ -431,64 +471,6 @@
"metadata": {
"id": "2Uu1sTSwTc7q"
}
- },
- {
- "cell_type": "code",
- "source": [
- "# @title **[Optional]** Start Server **using localtunnel** (ngrok alternative | no account needed)\n",
- "# @markdown This cell will start the server, the first time that you run it will download the models, so it can take a while (~1-2 minutes)\n",
- "\n",
- "# @markdown ---\n",
- "!npm config set update-notifier false\n",
- "!npm install -g localtunnel\n",
- "print(\"\\033[92mLocalTunnel installed!\")\n",
- "# @markdown If you want to automatically clear the output when the server loads, check this option.\n",
- "Clear_Output = True # @param {type:\"boolean\"}\n",
- "\n",
- "import portpicker, subprocess, threading, time, socket, urllib.request\n",
- "PORT = portpicker.pick_unused_port()\n",
- "\n",
- "from IPython.display import clear_output, Javascript\n",
- "\n",
- "def iframe_thread(port):\n",
- " while True:\n",
- " time.sleep(0.5)\n",
- " sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n",
- " result = sock.connect_ex(('127.0.0.1', port))\n",
- " if result == 0:\n",
- " break\n",
- " sock.close()\n",
- " clear_output()\n",
- " print(\"Use the following endpoint to connect to localtunnel:\", urllib.request.urlopen('https://ipv4.icanhazip.com').read().decode('utf8').strip(\"\\n\"))\n",
- " p = subprocess.Popen([\"lt\", \"--port\", \"{}\".format(port)], stdout=subprocess.PIPE)\n",
- " for line in p.stdout:\n",
- " print(line.decode(), end='')\n",
- "\n",
- "threading.Thread(target=iframe_thread, daemon=True, args=(PORT,)).start()\n",
- "\n",
- "\n",
- "!python3 MMVCServerSIO.py \\\n",
- " -p {PORT} \\\n",
- " --https False \\\n",
- " --content_vec_500 pretrain/checkpoint_best_legacy_500.pt \\\n",
- " --content_vec_500_onnx pretrain/content_vec_500.onnx \\\n",
- " --content_vec_500_onnx_on true \\\n",
- " --hubert_base pretrain/hubert_base.pt \\\n",
- " --hubert_base_jp pretrain/rinna_hubert_base_jp.pt \\\n",
- " --hubert_soft pretrain/hubert/hubert-soft-0d54a1f4.pt \\\n",
- " --nsf_hifigan pretrain/nsf_hifigan/model \\\n",
- " --crepe_onnx_full pretrain/crepe_onnx_full.onnx \\\n",
- " --crepe_onnx_tiny pretrain/crepe_onnx_tiny.onnx \\\n",
- " --rmvpe pretrain/rmvpe.pt \\\n",
- " --model_dir model_dir \\\n",
- " --samples samples.json \\\n",
- " --colab True"
- ],
- "metadata": {
- "id": "Mr7325z-TTX5"
- },
- "execution_count": null,
- "outputs": []
}
],
"metadata": {
From 6094be47f2042faaf51c24f7afc4a1127aeeb2dc Mon Sep 17 00:00:00 2001
From: Hina <79749008+hinabl@users.noreply.github.com>
Date: Wed, 25 Oct 2023 10:31:14 +0800
Subject: [PATCH 07/26] Updated Credits and Info
---
Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb b/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
index a3b17bda..75310ad3 100644
--- a/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
+++ b/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
@@ -30,7 +30,8 @@
"> Seems that PTH models performance better than ONNX for now, you can still try ONNX models and see if it satisfies you\n",
"\n",
"\n",
- "*You can always [click here](https://github.com/YunaOneeChan/Voice-Changer-Settings) to check if these settings are up-to-date*\n",
+ "*You can always [click here](https://rentry.co/VoiceChangerGuide#gpu-chart-for-known-working-chunkextra\n",
+ ") to check if these settings are up-to-date*\n",
"
\n",
"\n",
"---\n",
@@ -46,7 +47,7 @@
"# **Credits and Support**\n",
"Realtime Voice Changer by [w-okada](https://github.com/w-okada)\\\n",
"Colab files updated by [rafacasari](https://github.com/Rafacasari)\\\n",
- "Recommended settings by [YunaOneeChan](https://github.com/YunaOneeChan)\\\n",
+ "Recommended settings by [Raven](https://github.com/ravencutie21)\\\n",
"Modified again by [Hina](https://huggingface.co/HinaBl)\n",
"\n",
"Need help? [AI Hub Discord](https://discord.gg/aihub) » ***#help-realtime-vc***\n",
From 5c84c4cb91d6d7a8cf3b8aec1bf71a5273b76ba4 Mon Sep 17 00:00:00 2001
From: Hina <79749008+hinabl@users.noreply.github.com>
Date: Mon, 30 Oct 2023 19:49:54 +0800
Subject: [PATCH 08/26] Removed packages from requirements that are not needed
or already installed (first batch)
---
Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb | 10 ++++++++--
1 file changed, 8 insertions(+), 2 deletions(-)
diff --git a/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb b/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
index 75310ad3..d581e3fa 100644
--- a/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
+++ b/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
@@ -126,9 +126,15 @@
"\n",
"\n",
"!apt-get install libportaudio2 &> /dev/null --quiet\n",
- "!pip install pyworld onnxruntime-gpu uvicorn faiss-gpu fairseq jedi google-colab moviepy decorator==4.4.2 sounddevice numpy==1.23.5 pyngrok --quiet\n",
+ "!pip install pyworld onnxruntime-gpu uvicorn faiss-gpu fairseq jedi google-colab moviepy decorator==4.4.2 sounddevice pyngrok --quiet\n",
"print(\"\\033[92mInstalling Requirements!\")\n",
"clear_output()\n",
+ "\n",
+ "!sed -i '/torch==/d' requirements.txt\n",
+ "!sed -i '/torchaudio==/d' requirements.txt\n",
+ "!sed -i '/numpy==/d' requirements.txt\n",
+ "\n",
+ "\n",
"!pip install -r requirements.txt --no-build-isolation --quiet\n",
"# Maybe install Tensor packages?\n",
"#!pip install torch-tensorrt\n",
@@ -320,7 +326,7 @@
"#@title Delete a model `[Only Use When Needed]`\n",
"#@markdown ---\n",
"#@markdown Select which slot you want to delete\n",
- "Delete_Slot = \"0\" #@param ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', '100', '101', '102', '103', '104', '105', '106', '107', '108', '109', '110', '111', '112', '113', '114', '115', '116', '117', '118', '119', '120', '121', '122', '123', '124', '125', '126', '127', '128', '129', '130', '131', '132', '133', '134', '135', '136', '137', '138', '139', '140', '141', '142', '143', '144', '145', '146', '147', '148', '149', '150', '151', '152', '153', '154', '155', '156', '157', '158', '159', '160', '161', '162', '163', '164', '165', '166', '167', '168', '169', '170', '171', '172', '173', '174', '175', '176', '177', '178', '179', '180', '181', '182', '183', '184', '185', '186', '187', '188', '189', '190', '191', '192', '193', '194', '195', '196', '197', '198', '199']\n",
+ "Delete_Slot = \"198\" #@param ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', '100', '101', '102', '103', '104', '105', '106', '107', '108', '109', '110', '111', '112', '113', '114', '115', '116', '117', '118', '119', '120', '121', '122', '123', '124', '125', '126', '127', '128', '129', '130', '131', '132', '133', '134', '135', '136', '137', '138', '139', '140', '141', '142', '143', '144', '145', '146', '147', '148', '149', '150', '151', '152', '153', '154', '155', '156', '157', '158', '159', '160', '161', '162', '163', '164', '165', '166', '167', '168', '169', '170', '171', '172', '173', '174', '175', '176', '177', '178', '179', '180', '181', '182', '183', '184', '185', '186', '187', '188', '189', '190', '191', '192', '193', '194', '195', '196', '197', '198', '199']\n",
"# {type:\"slider\",min:0,max:1,step:0.1}\n",
"\n",
"!rm -rf model_dir/$Model_Number\n",
From 65cde67b492c815466a52c71f8c1095235365838 Mon Sep 17 00:00:00 2001
From: Hina <79749008+hinabl@users.noreply.github.com>
Date: Wed, 1 Nov 2023 13:00:22 +0800
Subject: [PATCH 09/26] Using Rafa's Install
---
Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb | 10 ++++++++--
1 file changed, 8 insertions(+), 2 deletions(-)
diff --git a/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb b/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
index d581e3fa..6a71a6f1 100644
--- a/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
+++ b/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
@@ -125,8 +125,14 @@
"\n",
"\n",
"\n",
- "!apt-get install libportaudio2 &> /dev/null --quiet\n",
- "!pip install pyworld onnxruntime-gpu uvicorn faiss-gpu fairseq jedi google-colab moviepy decorator==4.4.2 sounddevice pyngrok --quiet\n",
+ "# !apt-get install libportaudio2 &> /dev/null --quiet\n",
+ "# !pip install pyworld onnxruntime-gpu uvicorn faiss-gpu fairseq jedi google-colab moviepy decorator==4.4.2 sounddevice pyngrok --quiet\n",
+ "\n",
+ "# Install dependencies that are missing from requirements.txt and pyngrok\n",
+ "!pip install faiss-gpu fairseq pyngrok --quiet\n",
+ "!pip install pyworld --no-build-isolation --quiet\n",
+ "\n",
+ "\n",
"print(\"\\033[92mInstalling Requirements!\")\n",
"clear_output()\n",
"\n",
From f86bee676879ad0131dd7711d2fca816485f7d49 Mon Sep 17 00:00:00 2001
From: Hina <79749008+hinabl@users.noreply.github.com>
Date: Wed, 1 Nov 2023 22:18:42 +0800
Subject: [PATCH 10/26] Fixed libportaudio missing
---
Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb b/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
index 6a71a6f1..fb6ed2f8 100644
--- a/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
+++ b/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
@@ -125,7 +125,7 @@
"\n",
"\n",
"\n",
- "# !apt-get install libportaudio2 &> /dev/null --quiet\n",
+ "!apt-get install libportaudio2 &> /dev/null --quiet\n",
"# !pip install pyworld onnxruntime-gpu uvicorn faiss-gpu fairseq jedi google-colab moviepy decorator==4.4.2 sounddevice pyngrok --quiet\n",
"\n",
"# Install dependencies that are missing from requirements.txt and pyngrok\n",
@@ -366,7 +366,7 @@
"from pyngrok import conf, ngrok\n",
"\n",
"f0_det= \"rmvpe_onnx\" #@param [\"rmvpe_onnx\",\"rvc\"]\n",
- "Token = 'YOUR_TOKEN_HERE' # @param {type:\"string\"}\n",
+ "Token = '24apuiBokE3TjZwc6tuqqv39SwP_2LRouVj3M9oZZCbzgntuG' # @param {type:\"string\"}\n",
"# @markdown **4** - Still need further tests, but maybe region can help a bit on latency?\\\n",
"# @markdown `Default Region: us - United States (Ohio)`\n",
"Region = \"us - United States (Ohio)\" # @param [\"ap - Asia/Pacific (Singapore)\", \"au - Australia (Sydney)\",\"eu - Europe (Frankfurt)\", \"in - India (Mumbai)\",\"jp - Japan (Tokyo)\",\"sa - South America (Sao Paulo)\", \"us - United States (Ohio)\"]\n",
From 497c7c067854f6a0689073e6b629ee500bd84fb9 Mon Sep 17 00:00:00 2001
From: Hina <79749008+hinabl@users.noreply.github.com>
Date: Thu, 2 Nov 2023 21:54:22 +0800
Subject: [PATCH 11/26] Created using Colaboratory
---
Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb b/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
index fb6ed2f8..b2660d8f 100644
--- a/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
+++ b/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
@@ -366,7 +366,7 @@
"from pyngrok import conf, ngrok\n",
"\n",
"f0_det= \"rmvpe_onnx\" #@param [\"rmvpe_onnx\",\"rvc\"]\n",
- "Token = '24apuiBokE3TjZwc6tuqqv39SwP_2LRouVj3M9oZZCbzgntuG' # @param {type:\"string\"}\n",
+ "Token = 'Token_Here' # @param {type:\"string\"}\n",
"# @markdown **4** - Still need further tests, but maybe region can help a bit on latency?\\\n",
"# @markdown `Default Region: us - United States (Ohio)`\n",
"Region = \"us - United States (Ohio)\" # @param [\"ap - Asia/Pacific (Singapore)\", \"au - Australia (Sydney)\",\"eu - Europe (Frankfurt)\", \"in - India (Mumbai)\",\"jp - Japan (Tokyo)\",\"sa - South America (Sao Paulo)\", \"us - United States (Ohio)\"]\n",
From 6d20b3dad2525857df4a585cd2e420a2fe223042 Mon Sep 17 00:00:00 2001
From: Hina <79749008+hinabl@users.noreply.github.com>
Date: Fri, 3 Nov 2023 10:17:57 +0800
Subject: [PATCH 12/26] Updated to Rafa's latest Voice Changer Colab
---
...fied_Realtime_Voice_Changer_on_Colab.ipynb | 181 +++++++-----------
1 file changed, 65 insertions(+), 116 deletions(-)
diff --git a/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb b/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
index b2660d8f..61a27ce0 100644
--- a/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
+++ b/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
@@ -84,8 +84,9 @@
},
"outputs": [],
"source": [
+ "#=================Updated=================\n",
"# @title **[1]** Clone repository and install dependencies\n",
- "# @markdown This first step will download the latest version of Voice Changer and install the dependencies. **It will take around 2 minutes to complete.**\n",
+ "# @markdown This first step will download the latest version of Voice Changer and install the dependencies. **It can take some time to complete.**\n",
"import os\n",
"import time\n",
"import subprocess\n",
@@ -94,12 +95,12 @@
"import base64\n",
"import codecs\n",
"\n",
- "from IPython.display import clear_output, Javascript\n",
"\n",
"externalgit=codecs.decode('uggcf://tvguho.pbz/j-bxnqn/ibvpr-punatre.tvg','rot_13')\n",
"rvctimer=codecs.decode('uggcf://tvguho.pbz/uvanoy/eipgvzre.tvg','rot_13')\n",
- "pathloc=codecs.decode('ibvpr-punatre','rot_13')\n",
- "!git clone --depth 1 $externalgit &> /dev/null\n",
+ "pathloc=codecs.decode('/pbagrag/ibvpr-punatre','rot_13')\n",
+ "\n",
+ "from IPython.display import clear_output, Javascript\n",
"\n",
"def update_timer_and_print():\n",
" global timer\n",
@@ -113,42 +114,30 @@
"timer = 0\n",
"threading.Thread(target=update_timer_and_print, daemon=True).start()\n",
"\n",
- "# os.system('cls')\n",
- "clear_output()\n",
- "!rm -rf rvctimer\n",
- "!git clone --depth 1 $rvctimer\n",
- "\n",
+ "!pip install colorama --quiet\n",
+ "from colorama import Fore, Style\n",
"\n",
+ "print(f\"{Fore.CYAN}> Cloning the repository...{Style.RESET_ALL}\")\n",
+ "!git clone --depth 1 $externalgit &> /dev/null\n",
+ "print(f\"{Fore.GREEN}> Successfully cloned the repository!{Style.RESET_ALL}\")\n",
"%cd $pathloc/server/\n",
"\n",
- "print(\"\\033[92mSuccessfully cloned the repository\")\n",
- "\n",
- "\n",
- "\n",
- "!apt-get install libportaudio2 &> /dev/null --quiet\n",
- "# !pip install pyworld onnxruntime-gpu uvicorn faiss-gpu fairseq jedi google-colab moviepy decorator==4.4.2 sounddevice pyngrok --quiet\n",
- "\n",
- "# Install dependencies that are missing from requirements.txt and pyngrok\n",
- "!pip install faiss-gpu fairseq pyngrok --quiet\n",
- "!pip install pyworld --no-build-isolation --quiet\n",
- "\n",
- "\n",
- "print(\"\\033[92mInstalling Requirements!\")\n",
- "clear_output()\n",
+ "print(f\"{Fore.CYAN}> Installing libportaudio2...{Style.RESET_ALL}\")\n",
+ "!apt-get -y install libportaudio2 -qq\n",
"\n",
"!sed -i '/torch==/d' requirements.txt\n",
"!sed -i '/torchaudio==/d' requirements.txt\n",
"!sed -i '/numpy==/d' requirements.txt\n",
"\n",
"\n",
- "!pip install -r requirements.txt --no-build-isolation --quiet\n",
- "# Maybe install Tensor packages?\n",
- "#!pip install torch-tensorrt\n",
- "#!pip install TensorRT\n",
- "print(\"\\033[92mSuccessfully installed all packages!\")\n",
- "# os.system('cls')\n",
- "clear_output()\n",
- "print(\"\\033[92mFinished, please continue to the next cell\")"
+ "print(f\"{Fore.CYAN}> Installing pre-dependencies...{Style.RESET_ALL}\")\n",
+ "# Install dependencies that are missing from requirements.txt and pyngrok\n",
+ "!pip install faiss-gpu fairseq pyngrok --quiet\n",
+ "!pip install pyworld --no-build-isolation --quiet\n",
+ "print(f\"{Fore.CYAN}> Installing dependencies from requirements.txt...{Style.RESET_ALL}\")\n",
+ "!pip install -r requirements.txt --quiet\n",
+ "\n",
+ "print(f\"{Fore.GREEN}> Successfully installed all packages!{Style.RESET_ALL}\")"
]
},
{
@@ -354,108 +343,67 @@
},
"outputs": [],
"source": [
- "# @title **[2]** Start Server **using ngrok** (Recommended | **need a ngrok account**)\n",
+ "\n",
+ "#=======================Updated=========================\n",
+ "\n",
+ "# @title Start Server **using ngrok**\n",
"# @markdown This cell will start the server, the first time that you run it will download the models, so it can take a while (~1-2 minutes)\n",
"\n",
"# @markdown ---\n",
- "# @markdown You'll need a ngrok account, but **it's free**!\n",
+ "# @markdown You'll need a ngrok account, but **it's free** and easy to create!\n",
"# @markdown ---\n",
- "# @markdown **1** - Create a **free** account at [ngrok](https://dashboard.ngrok.com/signup)\\\n",
- "# @markdown **2** - If you didn't logged in with Google or Github, you will need to **verify your e-mail**!\\\n",
- "# @markdown **3** - Click [this link](https://dashboard.ngrok.com/get-started/your-authtoken) to get your auth token, copy it and place it here:\n",
- "from pyngrok import conf, ngrok\n",
- "\n",
- "f0_det= \"rmvpe_onnx\" #@param [\"rmvpe_onnx\",\"rvc\"]\n",
- "Token = 'Token_Here' # @param {type:\"string\"}\n",
- "# @markdown **4** - Still need further tests, but maybe region can help a bit on latency?\\\n",
+ "# @markdown **1** - Create a **free** account at [ngrok](https://dashboard.ngrok.com/signup) or **login with Google/Github account**\\\n",
+ "# @markdown **2** - If you didn't logged in with Google/Github, you will need to **verify your e-mail**!\\\n",
+ "# @markdown **3** - Click [this link](https://dashboard.ngrok.com/get-started/your-authtoken) to get your auth token, and place it here:\n",
+ "Token = '' # @param {type:\"string\"}\n",
+ "# @markdown **4** - *(optional)* Change to a region near to you or keep at United States if increase latency\\\n",
"# @markdown `Default Region: us - United States (Ohio)`\n",
- "Region = \"us - United States (Ohio)\" # @param [\"ap - Asia/Pacific (Singapore)\", \"au - Australia (Sydney)\",\"eu - Europe (Frankfurt)\", \"in - India (Mumbai)\",\"jp - Japan (Tokyo)\",\"sa - South America (Sao Paulo)\", \"us - United States (Ohio)\"]\n",
- "MyConfig = conf.PyngrokConfig()\n",
+ "Region = \"jp - Japan (Tokyo)\" # @param [\"ap - Asia/Pacific (Singapore)\", \"au - Australia (Sydney)\",\"eu - Europe (Frankfurt)\", \"in - India (Mumbai)\",\"jp - Japan (Tokyo)\",\"sa - South America (Sao Paulo)\", \"us - United States (Ohio)\"]\n",
"\n",
+ "#@markdown **5** - *(optional)* Other options:\n",
+ "ClearConsole = True # @param {type:\"boolean\"}\n",
+ "\n",
+ "# ---------------------------------\n",
+ "# DO NOT TOUCH ANYTHING DOWN BELOW!\n",
+ "# ---------------------------------\n",
+ "\n",
+ "%cd $pathloc/server//server\n",
+ "\n",
+ "from pyngrok import conf, ngrok\n",
+ "MyConfig = conf.PyngrokConfig()\n",
"MyConfig.auth_token = Token\n",
"MyConfig.region = Region[0:2]\n",
- "\n",
- "conf.get_default().authtoken = Token\n",
- "conf.get_default().region = Region[0:2]\n",
- "\n",
+ "#conf.get_default().authtoken = Token\n",
+ "#conf.get_default().region = Region\n",
"conf.set_default(MyConfig);\n",
"\n",
- "# @markdown ---\n",
- "# @markdown If you want to automatically clear the output when the server loads, check this option.\n",
- "Clear_Output = True # @param {type:\"boolean\"}\n",
- "\n",
- "#@markdown ---\n",
- "#@markdown If you want to use a custom background for the voice changer\n",
- "Use_Custom_BG=False #@param{type:\"boolean\"}\n",
- "BG_URL=\"https://w.wallha.com/ws/14/cMmpo5vn.jpg\" #@param{type:\"string\"}\n",
- "#@markdown Text colors can be hex ``#101010`` or name of color ``black`` (css)\n",
- "Text_Color=\"green\" #@param{type:\"string\"}\n",
- "if Use_Custom_BG==True:\n",
- " if BG_URL==\"\":\n",
- " !cp -f rvctimer/index.html $pathloc/client/demo/dist/\n",
- " else:\n",
- " html_template = f'''\n",
- " \n",
- " \n",
- " \n",
- " \n",
- " Voice Changer Client Demo\n",
- " \n",
- " \n",
- " \n",
- " \n",
- " \n",
- " \n",
- " \n",
- " '''\n",
- " with open('index.html', 'w') as file:\n",
- " file.write(html_template)\n",
- " !mkdir ../client/demo/dist/temp/\n",
- " !mv ../client/demo/dist/index.html ../client/demo/dist/temp/index.html\n",
- " !mv index.html ../client/demo/dist/\n",
- "else:\n",
- " !cp -f ../client/demo/dist/temp/index.html ../client/demo/dist/index.html\n",
- "\n",
- "mainpy=codecs.decode('ZZIPFreireFVB.cl','rot_13')\n",
- "\n",
- "import portpicker, socket, urllib.request\n",
- "PORT = portpicker.pick_unused_port()\n",
+ "import subprocess, threading, time, socket, urllib.request\n",
+ "PORT = 8000\n",
"\n",
"from pyngrok import ngrok\n",
- "# Edited ⏬⏬\n",
"ngrokConnection = ngrok.connect(PORT)\n",
"public_url = ngrokConnection.public_url\n",
"\n",
- "def iframe_thread(port):\n",
- " while True:\n",
- " time.sleep(0.5)\n",
- " sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n",
- " result = sock.connect_ex(('127.0.0.1', port))\n",
- " if result == 0:\n",
- " break\n",
- " sock.close()\n",
- " clear_output()\n",
- " print(\"------- SERVER READY! -------\")\n",
- " print(\"Your server is available at:\")\n",
- " print(public_url)\n",
- " print(\"-----------------------------\")\n",
- " # display(Javascript('window.open(\"{url}\", \\'_blank\\');'.format(url=public_url)))\n",
+ "from IPython.display import clear_output\n",
"\n",
- "print(PORT)\n",
+ "def wait_for_server():\n",
+ " while True:\n",
+ " time.sleep(0.5)\n",
+ " sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n",
+ " result = sock.connect_ex(('127.0.0.1', PORT))\n",
+ " if result == 0:\n",
+ " break\n",
+ " sock.close()\n",
+ " if ClearConsole:\n",
+ " clear_output()\n",
+ " print(\"--------- SERVER READY! ---------\")\n",
+ " print(\"Your server is available at:\")\n",
+ " print(public_url)\n",
+ " print(\"---------------------------------\")\n",
"\n",
+ "threading.Thread(target=wait_for_server, daemon=True).start()\n",
"\n",
- "\n",
- "threading.Thread(target=iframe_thread, daemon=True, args=(PORT,)).start()\n",
- "\n",
+ "mainpy=codecs.decode('ZZIPFreireFVB.cl','rot_13')\n",
"\n",
"!python3 $mainpy \\\n",
" -p {PORT} \\\n",
@@ -472,7 +420,8 @@
" --rmvpe pretrain/rmvpe.pt \\\n",
" --model_dir model_dir \\\n",
" --samples samples.json\n",
- "\n"
+ "\n",
+ "ngrok.disconnect(ngrokConnection.public_url)\n"
]
},
{
From 5de0630bb42296307dc05cff73ddafeaf76982a1 Mon Sep 17 00:00:00 2001
From: Hina <79749008+hinabl@users.noreply.github.com>
Date: Fri, 3 Nov 2023 11:01:48 +0800
Subject: [PATCH 13/26] Added Google Colab on version text
---
...dified_Realtime_Voice_Changer_on_Colab.ipynb | 17 ++++++++++++++++-
1 file changed, 16 insertions(+), 1 deletion(-)
diff --git a/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb b/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
index 61a27ce0..dd587f33 100644
--- a/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
+++ b/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
@@ -122,6 +122,21 @@
"print(f\"{Fore.GREEN}> Successfully cloned the repository!{Style.RESET_ALL}\")\n",
"%cd $pathloc/server/\n",
"\n",
+ "file_path = '/content/voice-changer/client/demo/dist/assets/gui_settings/version.txt'\n",
+ "\n",
+ "with open(file_path, 'r') as file:\n",
+ " file_content = file.read()\n",
+ "\n",
+ "text_to_replace = \"-.-.-.-\"\n",
+ "new_text = \"Google.Colab\" # New text to replace the specific text\n",
+ "\n",
+ "modified_content = file_content.replace(text_to_replace, new_text)\n",
+ "\n",
+ "with open(file_path, 'w') as file:\n",
+ " file.write(modified_content)\n",
+ "\n",
+ "print(f\"Text '{text_to_replace}' has been replaced with '{new_text}' in the file.\")\n",
+ "\n",
"print(f\"{Fore.CYAN}> Installing libportaudio2...{Style.RESET_ALL}\")\n",
"!apt-get -y install libportaudio2 -qq\n",
"\n",
@@ -355,7 +370,7 @@
"# @markdown **1** - Create a **free** account at [ngrok](https://dashboard.ngrok.com/signup) or **login with Google/Github account**\\\n",
"# @markdown **2** - If you didn't logged in with Google/Github, you will need to **verify your e-mail**!\\\n",
"# @markdown **3** - Click [this link](https://dashboard.ngrok.com/get-started/your-authtoken) to get your auth token, and place it here:\n",
- "Token = '' # @param {type:\"string\"}\n",
+ "Token = '24apuiBokE3TjZwc6tuqqv39SwP_2LRouVj3M9oZZCbzgntuG' # @param {type:\"string\"}\n",
"# @markdown **4** - *(optional)* Change to a region near to you or keep at United States if increase latency\\\n",
"# @markdown `Default Region: us - United States (Ohio)`\n",
"Region = \"jp - Japan (Tokyo)\" # @param [\"ap - Asia/Pacific (Singapore)\", \"au - Australia (Sydney)\",\"eu - Europe (Frankfurt)\", \"in - India (Mumbai)\",\"jp - Japan (Tokyo)\",\"sa - South America (Sao Paulo)\", \"us - United States (Ohio)\"]\n",
From ee827731b6c0e4555e38b5c464a33421a58e6c49 Mon Sep 17 00:00:00 2001
From: Hina <79749008+hinabl@users.noreply.github.com>
Date: Fri, 3 Nov 2023 11:31:03 +0800
Subject: [PATCH 14/26] Merged Google Drive with Clone and install
---
...fied_Realtime_Voice_Changer_on_Colab.ipynb | 49 +++++++++----------
1 file changed, 24 insertions(+), 25 deletions(-)
diff --git a/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb b/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
index dd587f33..5fa0c1cb 100644
--- a/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
+++ b/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
@@ -55,26 +55,6 @@
"---"
]
},
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "cellView": "form",
- "id": "RhdqDSt-LfGr"
- },
- "outputs": [],
- "source": [
- "# @title **[Optional]** Connect to Google Drive\n",
- "# @markdown Using Google Drive can improve load times a bit and your models will be stored, so you don't need to re-upload every time that you use.\n",
- "import os\n",
- "from google.colab import drive\n",
- "\n",
- "if not os.path.exists('/content/drive'):\n",
- " drive.mount('/content/drive')\n",
- "\n",
- "%cd /content/drive/MyDrive"
- ]
- },
{
"cell_type": "code",
"execution_count": null,
@@ -96,9 +76,25 @@
"import codecs\n",
"\n",
"\n",
+ "\n",
+ "#@markdown ---\n",
+ "# @title **[Optional]** Connect to Google Drive\n",
+ "# @markdown Using Google Drive can improve load times a bit and your models will be stored, so you don't need to re-upload every time that you use.\n",
+ "\n",
+ "Use_Drive=False #@param {type:\"boolean\"}\n",
+ "\n",
+ "from google.colab import drive\n",
+ "\n",
+ "if Use_Drive==True:\n",
+ " if not os.path.exists('/content/drive'):\n",
+ " drive.mount('/content/drive')\n",
+ "\n",
+ " %cd /content/drive/MyDrive\n",
+ "\n",
+ "\n",
"externalgit=codecs.decode('uggcf://tvguho.pbz/j-bxnqn/ibvpr-punatre.tvg','rot_13')\n",
"rvctimer=codecs.decode('uggcf://tvguho.pbz/uvanoy/eipgvzre.tvg','rot_13')\n",
- "pathloc=codecs.decode('/pbagrag/ibvpr-punatre','rot_13')\n",
+ "pathloc=codecs.decode('ibvpr-punatre','rot_13')\n",
"\n",
"from IPython.display import clear_output, Javascript\n",
"\n",
@@ -122,16 +118,19 @@
"print(f\"{Fore.GREEN}> Successfully cloned the repository!{Style.RESET_ALL}\")\n",
"%cd $pathloc/server/\n",
"\n",
- "file_path = '/content/voice-changer/client/demo/dist/assets/gui_settings/version.txt'\n",
+ "# Read the content of the file\n",
+ "file_path = '../client/demo/dist/assets/gui_settings/version.txt'\n",
"\n",
"with open(file_path, 'r') as file:\n",
" file_content = file.read()\n",
"\n",
+ "# Replace the specific text\n",
"text_to_replace = \"-.-.-.-\"\n",
"new_text = \"Google.Colab\" # New text to replace the specific text\n",
"\n",
"modified_content = file_content.replace(text_to_replace, new_text)\n",
"\n",
+ "# Write the modified content back to the file\n",
"with open(file_path, 'w') as file:\n",
" file.write(modified_content)\n",
"\n",
@@ -370,10 +369,10 @@
"# @markdown **1** - Create a **free** account at [ngrok](https://dashboard.ngrok.com/signup) or **login with Google/Github account**\\\n",
"# @markdown **2** - If you didn't logged in with Google/Github, you will need to **verify your e-mail**!\\\n",
"# @markdown **3** - Click [this link](https://dashboard.ngrok.com/get-started/your-authtoken) to get your auth token, and place it here:\n",
- "Token = '24apuiBokE3TjZwc6tuqqv39SwP_2LRouVj3M9oZZCbzgntuG' # @param {type:\"string\"}\n",
+ "Token = '' # @param {type:\"string\"}\n",
"# @markdown **4** - *(optional)* Change to a region near to you or keep at United States if increase latency\\\n",
"# @markdown `Default Region: us - United States (Ohio)`\n",
- "Region = \"jp - Japan (Tokyo)\" # @param [\"ap - Asia/Pacific (Singapore)\", \"au - Australia (Sydney)\",\"eu - Europe (Frankfurt)\", \"in - India (Mumbai)\",\"jp - Japan (Tokyo)\",\"sa - South America (Sao Paulo)\", \"us - United States (Ohio)\"]\n",
+ "Region = \"us - United States (Ohio)\" # @param [\"ap - Asia/Pacific (Singapore)\", \"au - Australia (Sydney)\",\"eu - Europe (Frankfurt)\", \"in - India (Mumbai)\",\"jp - Japan (Tokyo)\",\"sa - South America (Sao Paulo)\", \"us - United States (Ohio)\"]\n",
"\n",
"#@markdown **5** - *(optional)* Other options:\n",
"ClearConsole = True # @param {type:\"boolean\"}\n",
@@ -382,7 +381,7 @@
"# DO NOT TOUCH ANYTHING DOWN BELOW!\n",
"# ---------------------------------\n",
"\n",
- "%cd $pathloc/server//server\n",
+ "%cd $pathloc/server/\n",
"\n",
"from pyngrok import conf, ngrok\n",
"MyConfig = conf.PyngrokConfig()\n",
From da9e6aaf6bc056f5c0bc5766f837744c84e3eecd Mon Sep 17 00:00:00 2001
From: Hina <79749008+hinabl@users.noreply.github.com>
Date: Fri, 10 Nov 2023 21:55:42 +0800
Subject: [PATCH 15/26] Removed "Under Construction"
---
Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb b/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
index 5fa0c1cb..f14e946b 100644
--- a/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
+++ b/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
@@ -158,7 +158,7 @@
"cell_type": "code",
"source": [
"\n",
- "#@title #**[Optional]** Upload a voice model (Run this before running the Voice Changer)**[Currently Under Construction]**\n",
+ "#@title #**[Optional]** Upload a voice model (Run this before running the Voice Changer)\n",
"#@markdown ---\n",
"import os\n",
"import json\n",
From 53ea4cef578215264478e7c3ff4f8e64b7d7e90b Mon Sep 17 00:00:00 2001
From: Hina <79749008+hinabl@users.noreply.github.com>
Date: Mon, 13 Nov 2023 01:25:20 +0800
Subject: [PATCH 16/26] Cleaned Some Cells
---
...fied_Realtime_Voice_Changer_on_Colab.ipynb | 68 ++++++-------------
1 file changed, 19 insertions(+), 49 deletions(-)
diff --git a/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb b/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
index f14e946b..b5135df4 100644
--- a/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
+++ b/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
@@ -158,26 +158,22 @@
"cell_type": "code",
"source": [
"\n",
- "#@title #**[Optional]** Upload a voice model (Run this before running the Voice Changer)\n",
- "#@markdown ---\n",
+ "#@title **[Optional]** Upload a voice model (Run this before running the Voice Changer)\n",
"import os\n",
"import json\n",
"from IPython.display import Image\n",
"\n",
"\n",
- "#@markdown #Model Number `(Default is 0)` you can add multiple models as long as you change the number!\n",
- "model_number = \"0\" #@param ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', '100', '101', '102', '103', '104', '105', '106', '107', '108', '109', '110', '111', '112', '113', '114', '115', '116', '117', '118', '119', '120', '121', '122', '123', '124', '125', '126', '127', '128', '129', '130', '131', '132', '133', '134', '135', '136', '137', '138', '139', '140', '141', '142', '143', '144', '145', '146', '147', '148', '149', '150', '151', '152', '153', '154', '155', '156', '157', '158', '159', '160', '161', '162', '163', '164', '165', '166', '167', '168', '169', '170', '171', '172', '173', '174', '175', '176', '177', '178', '179', '180', '181', '182', '183', '184', '185', '186', '187', '188', '189', '190', '191', '192', '193', '194', '195', '196', '197', '198', '199']\n",
+ "model_slot = \"0\" #@param ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', '100', '101', '102', '103', '104', '105', '106', '107', '108', '109', '110', '111', '112', '113', '114', '115', '116', '117', '118', '119', '120', '121', '122', '123', '124', '125', '126', '127', '128', '129', '130', '131', '132', '133', '134', '135', '136', '137', '138', '139', '140', '141', '142', '143', '144', '145', '146', '147', '148', '149', '150', '151', '152', '153', '154', '155', '156', '157', '158', '159', '160', '161', '162', '163', '164', '165', '166', '167', '168', '169', '170', '171', '172', '173', '174', '175', '176', '177', '178', '179', '180', '181', '182', '183', '184', '185', '186', '187', '188', '189', '190', '191', '192', '193', '194', '195', '196', '197', '198', '199']\n",
"\n",
- "!rm -rf model_dir/$model_number\n",
- "#@markdown ---\n",
- "#@markdown #**[Optional]** Add an icon to the model `(can be any image/leave empty for no image)`\n",
- "icon_link = \"https://cdn.discordapp.com/attachments/1144453160912572506/1144453161210351697/mika.png?ex=65163190&is=6514e010&hm=6cfc987d42e448b2912f5225e2c865df92d688c8dc46a135c2cca32682a3f3ea&\" #@param {type:\"string\"}\n",
- "#@markdown ---\n",
+ "!rm -rf model_dir/$model_slot\n",
+ "#@markdown **[Optional]** Add an icon to the model\n",
+ "icon_link = \"https://static.wikia.nocookie.net/virtualyoutuber/images/8/8b/ShyreiProfile.png\" #@param {type:\"string\"}\n",
"icon_link = '\"'+icon_link+'\"'\n",
"!mkdir model_dir\n",
- "!mkdir model_dir/$model_number\n",
- "#@markdown #Put your model's download link here `(must be a zip file)`\n",
- "model_link = \"https://huggingface.co/Kit-Lemonfoot/kitlemonfoot_rvc_models/resolve/main/Mika%20Melatika%20(Speaking)(KitLemonfoot).zip\" #@param {type:\"string\"}\n",
+ "!mkdir model_dir/$model_slot\n",
+ "#@markdown Put your model's download link here `(must be a zip file)`\n",
+ "model_link = \"https://huggingface.co/RavenCutie21/Models/resolve/main/SquChan_e800_20800steps.zip?download=true\" #@param {type:\"string\"}\n",
"model_link = '\"'+model_link+'\"'\n",
"!curl -L $model_link > model.zip\n",
"\n",
@@ -185,41 +181,35 @@
"# Conditionally set the iconFile based on whether icon_link is empty\n",
"if icon_link:\n",
" iconFile = \"icon.png\"\n",
- " !curl -L $icon_link > model_dir/$model_number/icon.png\n",
+ " !curl -L $icon_link > model_dir/$model_slot/icon.png\n",
"else:\n",
" iconFile = \"\"\n",
" print(\"icon_link is empty, so no icon file will be downloaded.\")\n",
- "#@markdown ---\n",
"\n",
"\n",
- "!unzip model.zip -d model_dir/$model_number\n",
+ "!unzip model.zip -d model_dir/$model_slot\n",
"\n",
- "# Checks all the files in model_number and puts it outside of it\n",
+ "# Checks all the files in model_slot and puts it outside of it\n",
"\n",
- "!mv model_dir/$model_number/*/* model_dir/$model_number/\n",
- "!rm -rf model_dir/$model_number/*/\n",
+ "!mv model_dir/$model_slot/*/* model_dir/$model_slot/\n",
+ "!rm -rf model_dir/$model_slot/*/\n",
"\n",
"# if theres a folder in the number,\n",
"# take all the files in the folder and put it outside of that folder\n",
"\n",
"\n",
- "#@markdown #**Model Voice Convertion Setting**\n",
- "#@markdown Tune `-12=F-M`**||**`0=M-M/F-F`**||**`12=M-F`\n",
+ "#@markdown **Model Voice Convertion Setting**\n",
"Tune = 12 #@param {type:\"slider\",min:-50,max:50,step:1}\n",
- "#@markdown Index `0=Default`**||**`1=Replicate Accent`\n",
"Index = 0 #@param {type:\"slider\",min:0,max:1,step:0.1}\n",
- "#@markdown ---\n",
"\n",
- "# @markdown #**[Optional]** Parameter file for your voice model\n",
- "#@markdown _(must be named params.json)_ (Leave Empty for Default)\n",
- "param_link = \"\" #@param {type:\"string\"}\n",
+ "param_link = \"\"\n",
"if param_link == \"\":\n",
" from voice_changer.RVC.RVCModelSlotGenerator import RVCModelSlotGenerator\n",
" from voice_changer.VoiceChangerParamsManager import VoiceChangerParamsManager\n",
" from voice_changer.utils.LoadModelParams import LoadModelParamFile, LoadModelParams\n",
" from voice_changer.utils.VoiceChangerParams import VoiceChangerParams\n",
"\n",
- " model_dir1 = \"model_dir/\"+model_number+\"/\"\n",
+ " model_dir1 = \"model_dir/\"+model_slot+\"/\"\n",
"\n",
" is_pth = True # Set this to True if you want to search for .pth files, or False for .onnx files\n",
" file_extension = \".pth\" if is_pth else \".onnx\"\n",
@@ -249,7 +239,7 @@
"\n",
" print(model_name)\n",
" voiceChangerParams = VoiceChangerParams(\n",
- " model_dir=\"./model_dir/\"+model_number,\n",
+ " model_dir=\"./model_dir/\"+model_slot,\n",
" content_vec_500=\"\",\n",
" content_vec_500_onnx=\"\",\n",
" content_vec_500_onnx_on=\"\",\n",
@@ -319,8 +309,7 @@
"\n",
"# !unzip model.zip -d model_dir/0/\n",
"clear_output()\n",
- "print(\"\\033[92mModel with the name of \"+model_name+\" has been Imported to slot \"+model_number)\n",
- "Image(url=icon_link)"
+ "print(\"\\033[92mModel with the name of \"+model_name+\" has been Imported to slot \"+model_slot)"
],
"metadata": {
"id": "_ZtbKUVUgN3G",
@@ -329,25 +318,6 @@
"execution_count": null,
"outputs": []
},
- {
- "cell_type": "code",
- "source": [
- "#@title Delete a model `[Only Use When Needed]`\n",
- "#@markdown ---\n",
- "#@markdown Select which slot you want to delete\n",
- "Delete_Slot = \"198\" #@param ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', '100', '101', '102', '103', '104', '105', '106', '107', '108', '109', '110', '111', '112', '113', '114', '115', '116', '117', '118', '119', '120', '121', '122', '123', '124', '125', '126', '127', '128', '129', '130', '131', '132', '133', '134', '135', '136', '137', '138', '139', '140', '141', '142', '143', '144', '145', '146', '147', '148', '149', '150', '151', '152', '153', '154', '155', '156', '157', '158', '159', '160', '161', '162', '163', '164', '165', '166', '167', '168', '169', '170', '171', '172', '173', '174', '175', '176', '177', '178', '179', '180', '181', '182', '183', '184', '185', '186', '187', '188', '189', '190', '191', '192', '193', '194', '195', '196', '197', '198', '199']\n",
- "# {type:\"slider\",min:0,max:1,step:0.1}\n",
- "\n",
- "!rm -rf model_dir/$Model_Number\n",
- "print(\"\\033[92mSuccessfully removed Model is slot \"+Delete_Slot)\n"
- ],
- "metadata": {
- "id": "P9g6rG1-KUwt",
- "cellView": "form"
- },
- "execution_count": null,
- "outputs": []
- },
{
"cell_type": "code",
"execution_count": null,
@@ -369,7 +339,7 @@
"# @markdown **1** - Create a **free** account at [ngrok](https://dashboard.ngrok.com/signup) or **login with Google/Github account**\\\n",
"# @markdown **2** - If you didn't logged in with Google/Github, you will need to **verify your e-mail**!\\\n",
"# @markdown **3** - Click [this link](https://dashboard.ngrok.com/get-started/your-authtoken) to get your auth token, and place it here:\n",
- "Token = '' # @param {type:\"string\"}\n",
+ "Token = 'TOKEN_HERE' # @param {type:\"string\"}\n",
"# @markdown **4** - *(optional)* Change to a region near to you or keep at United States if increase latency\\\n",
"# @markdown `Default Region: us - United States (Ohio)`\n",
"Region = \"us - United States (Ohio)\" # @param [\"ap - Asia/Pacific (Singapore)\", \"au - Australia (Sydney)\",\"eu - Europe (Frankfurt)\", \"in - India (Mumbai)\",\"jp - Japan (Tokyo)\",\"sa - South America (Sao Paulo)\", \"us - United States (Ohio)\"]\n",
From 218872ba7544ba4ec7b38790eb253bca647a34fd Mon Sep 17 00:00:00 2001
From: Hina <79749008+hinabl@users.noreply.github.com>
Date: Mon, 13 Nov 2023 19:59:42 +0800
Subject: [PATCH 17/26] Make Kaggle Realtime Voice Changer
---
Hina_Mod_Kaggle_Real_Time_Voice_Changer.ipynb | 1 +
1 file changed, 1 insertion(+)
create mode 100644 Hina_Mod_Kaggle_Real_Time_Voice_Changer.ipynb
diff --git a/Hina_Mod_Kaggle_Real_Time_Voice_Changer.ipynb b/Hina_Mod_Kaggle_Real_Time_Voice_Changer.ipynb
new file mode 100644
index 00000000..a56be1a2
--- /dev/null
+++ b/Hina_Mod_Kaggle_Real_Time_Voice_Changer.ipynb
@@ -0,0 +1 @@
+{"cells":[{"cell_type":"markdown","metadata":{},"source":["# Voice Changer Kaggle Ver by [Hina](https://linktr.ee/_hina__)\n","### Repo from [w-okada](https://github.com/w-okada)"]},{"cell_type":"markdown","metadata":{},"source":["### Clone Repo and Install Repo"]},{"cell_type":"code","execution_count":1,"metadata":{"_cell_guid":"b1076dfc-b9ad-4769-8c92-a6c4dae69d19","_uuid":"8f2839f25d086af736a60e9eeb907d3b93b6e0e5","execution":{"iopub.execute_input":"2023-11-13T11:43:17.161165Z","iopub.status.busy":"2023-11-13T11:43:17.160820Z","iopub.status.idle":"2023-11-13T11:48:50.214546Z","shell.execute_reply":"2023-11-13T11:48:50.213329Z","shell.execute_reply.started":"2023-11-13T11:43:17.161135Z"},"trusted":true},"outputs":[{"name":"stdout","output_type":"stream","text":["Cloned\n"]}],"source":["from IPython.display import clear_output, Javascript\n","!mkdir hinabl\n","%cd hinabl\n","!git clone https://github.com/w-okada/voice-changer.git --depth=1 --quiet .\n","%cd server\n","!sed -i \"s/-.-.-.-/Kaggle.Mod/\" '../client/demo/dist/assets/gui_settings/version.txt'\n","!pip install -r requirements.txt\n","!mv MMVCServerSIO.py run.py\n","!sed -i \"s/MMVCServerSIO/run/\" run.py\n","!apt-get -y install libportaudio2 -qq\n","!pip install faiss-gpu fairseq pyngrok --quiet\n","!pip install pyworld --no-build-isolation --quiet\n","clear_output()\n","print(\"Cloned\")"]},{"cell_type":"markdown","metadata":{},"source":["### Try Run"]},{"cell_type":"code","execution_count":null,"metadata":{"execution":{"iopub.execute_input":"2023-11-13T11:49:54.280036Z","iopub.status.busy":"2023-11-13T11:49:54.279053Z"},"trusted":true},"outputs":[{"name":"stdout","output_type":"stream","text":["Server URL:https://f9c1-34-168-102-26.ngrok-free.app\n"]}],"source":["import subprocess, threading, time, socket, urllib.request\n","PORT = 8000\n","from pyngrok import conf, ngrok\n","Token=\"TokenHere\"\n","ngrok.set_auth_token(Token)\n","ngrok.region = \"ap\" # ap | au | eu | in | jp | sa | us | us-cal-1\n","ngrokConnection = ngrok.connect(PORT)\n","public_url = ngrokConnection.public_url\n","def wait_for_server():\n"," while True:\n"," time.sleep(0.5)\n"," sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n"," result = sock.connect_ex(('127.0.0.1', PORT))\n"," if result == 0:\n"," break\n"," sock.close()\n"," clear_output()\n"," print(\"Server URL:\"+public_url)\n","threading.Thread(target=wait_for_server, daemon=True).start()\n","!python3 run.py -p {PORT} --https False "]}],"metadata":{"kernelspec":{"display_name":"Python 3","language":"python","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.10.12"}},"nbformat":4,"nbformat_minor":4}
From 1da0717a456f11fc15e96749c50b0c980b9f750b Mon Sep 17 00:00:00 2001
From: Hina <79749008+hinabl@users.noreply.github.com>
Date: Tue, 14 Nov 2023 17:05:24 +0800
Subject: [PATCH 18/26] Fixed Kaggle Realtime VoiceChanger
---
Hina_Mod_Kaggle_Real_Time_Voice_Changer.ipynb | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Hina_Mod_Kaggle_Real_Time_Voice_Changer.ipynb b/Hina_Mod_Kaggle_Real_Time_Voice_Changer.ipynb
index a56be1a2..3aaef9c2 100644
--- a/Hina_Mod_Kaggle_Real_Time_Voice_Changer.ipynb
+++ b/Hina_Mod_Kaggle_Real_Time_Voice_Changer.ipynb
@@ -1 +1 @@
-{"cells":[{"cell_type":"markdown","metadata":{},"source":["# Voice Changer Kaggle Ver by [Hina](https://linktr.ee/_hina__)\n","### Repo from [w-okada](https://github.com/w-okada)"]},{"cell_type":"markdown","metadata":{},"source":["### Clone Repo and Install Repo"]},{"cell_type":"code","execution_count":1,"metadata":{"_cell_guid":"b1076dfc-b9ad-4769-8c92-a6c4dae69d19","_uuid":"8f2839f25d086af736a60e9eeb907d3b93b6e0e5","execution":{"iopub.execute_input":"2023-11-13T11:43:17.161165Z","iopub.status.busy":"2023-11-13T11:43:17.160820Z","iopub.status.idle":"2023-11-13T11:48:50.214546Z","shell.execute_reply":"2023-11-13T11:48:50.213329Z","shell.execute_reply.started":"2023-11-13T11:43:17.161135Z"},"trusted":true},"outputs":[{"name":"stdout","output_type":"stream","text":["Cloned\n"]}],"source":["from IPython.display import clear_output, Javascript\n","!mkdir hinabl\n","%cd hinabl\n","!git clone https://github.com/w-okada/voice-changer.git --depth=1 --quiet .\n","%cd server\n","!sed -i \"s/-.-.-.-/Kaggle.Mod/\" '../client/demo/dist/assets/gui_settings/version.txt'\n","!pip install -r requirements.txt\n","!mv MMVCServerSIO.py run.py\n","!sed -i \"s/MMVCServerSIO/run/\" run.py\n","!apt-get -y install libportaudio2 -qq\n","!pip install faiss-gpu fairseq pyngrok --quiet\n","!pip install pyworld --no-build-isolation --quiet\n","clear_output()\n","print(\"Cloned\")"]},{"cell_type":"markdown","metadata":{},"source":["### Try Run"]},{"cell_type":"code","execution_count":null,"metadata":{"execution":{"iopub.execute_input":"2023-11-13T11:49:54.280036Z","iopub.status.busy":"2023-11-13T11:49:54.279053Z"},"trusted":true},"outputs":[{"name":"stdout","output_type":"stream","text":["Server URL:https://f9c1-34-168-102-26.ngrok-free.app\n"]}],"source":["import subprocess, threading, time, socket, urllib.request\n","PORT = 8000\n","from pyngrok import conf, ngrok\n","Token=\"TokenHere\"\n","ngrok.set_auth_token(Token)\n","ngrok.region = \"ap\" # ap | au | eu | in | jp | sa | us | us-cal-1\n","ngrokConnection = ngrok.connect(PORT)\n","public_url = ngrokConnection.public_url\n","def wait_for_server():\n"," while True:\n"," time.sleep(0.5)\n"," sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n"," result = sock.connect_ex(('127.0.0.1', PORT))\n"," if result == 0:\n"," break\n"," sock.close()\n"," clear_output()\n"," print(\"Server URL:\"+public_url)\n","threading.Thread(target=wait_for_server, daemon=True).start()\n","!python3 run.py -p {PORT} --https False "]}],"metadata":{"kernelspec":{"display_name":"Python 3","language":"python","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.10.12"}},"nbformat":4,"nbformat_minor":4}
+{"metadata":{"kernelspec":{"language":"python","display_name":"Python 3","name":"python3"},"language_info":{"pygments_lexer":"ipython3","nbconvert_exporter":"python","version":"3.6.4","file_extension":".py","codemirror_mode":{"name":"ipython","version":3},"name":"python","mimetype":"text/x-python"}},"nbformat_minor":4,"nbformat":4,"cells":[{"source":"","metadata":{},"cell_type":"markdown"},{"cell_type":"markdown","source":"### [w-okada's Voice Changer](https://github.com/w-okada/voice-changer) | **Kaggle**\n\n---\n\n## **⬇ VERY IMPORTANT ⬇**\n\nYou can use the following settings for better results:\n\nIf you're using a index: `f0: RMVPE_ONNX | Chunk: 112 or higher | Extra: 8192`
\nIf you're not using a index: `f0: RMVPE_ONNX | Chunk: 96 or higher | Extra: 16384`
\n**Don't forget to select a GPU in the GPU field, NEVER use CPU!\n> Seems that PTH models performance better than ONNX for now, you can still try ONNX models and see if it satisfies you\n\n\n*You can always [click here](https://github.com/YunaOneeChan/Voice-Changer-Settings) to check if these settings are up-to-date*\n\n---\n**Credits**
\nRealtime Voice Changer by [w-okada](https://github.com/w-okada)
\nNotebook files updated by [rafacasari](https://github.com/Rafacasari)
\nRecommended settings by [Raven](https://github.com/RavenCutie21)
\nModded again by [Hina](https://github.com/hinabl)\n\n**Need help?** [AI Hub Discord](https://discord.gg/aihub) » ***#help-realtime-vc***\n\n---","metadata":{"id":"Lbbmx_Vjl0zo"}},{"cell_type":"markdown","source":"# Kaggle Tutorial\nRunning this notebook can be a bit complicated.\\\nAfter created your Kaggle account, you'll need to **verify your phone number** to be able to use Internet Connection and GPUs.\\\nFollow the instructions on the image below.\n\n## *You can use GPU P100 instead of GPU T4, some people are telling that P100 is better.*\n![instructions.png](https://i.imgur.com/0NutkD8.png)","metadata":{}},{"cell_type":"markdown","source":"# Clone repository and install dependencies\nThis first step will download the latest version of Voice Changer and install the dependencies. **It will take some time to complete.**","metadata":{}},{"cell_type":"code","source":"# This will make that we're on the right folder before installing\n%cd /kaggle/working/\n\n!pip install colorama --quiet\nfrom colorama import Fore, Style\nimport os\n\n!mkdir Hmod\n%cd Hmod\n!git clone https://github.com/w-okada/voice-changer.git --depth=1 --quiet .\nprint(f\"{Fore.GREEN}> Successfully cloned the repository!{Style.RESET_ALL}\")\n%cd server\n!sed -i \"s/-.-.-.-/Kaggle.Mod/\" '../client/demo/dist/assets/gui_settings/version.txt'\n!mv MMVCServerSIO.py Hmod.py\n!sed -i \"s/MMVCServerSIO/Hmod/\" Hmod.py\n\nprint(f\"{Fore.CYAN}> Installing libportaudio2...{Style.RESET_ALL}\")\n!apt-get -y install libportaudio2 -qq\n\nprint(f\"{Fore.CYAN}> Installing pre-dependencies...{Style.RESET_ALL}\")\n# Install dependencies that are missing from requirements.txt and pyngrok\n!pip install faiss-gpu fairseq pyngrok --quiet \n!pip install pyworld --no-build-isolation\nprint(f\"{Fore.CYAN}> Installing dependencies from requirements.txt...{Style.RESET_ALL}\")\n!pip install -r requirements.txt --quiet\n\n# Download the default settings ^-^\nif not os.path.exists(\"/kaggle/working/Hmod/server/stored_setting.json\"):\n !wget -q https://gist.githubusercontent.com/Rafacasari/d820d945497a01112e1a9ba331cbad4f/raw/8e0a426c22688b05dd9c541648bceab27e422dd6/kaggle_setting.json -O /kaggle/working/24apuiBokE3TjZwc6tuqqv39SwP_2LRouVj3M9oZZCbzgntuG /server/stored_setting.json\nprint(f\"{Fore.GREEN}> Successfully installed all packages!{Style.RESET_ALL}\")\n\nprint(f\"{Fore.GREEN}> You can safely ignore the dependency conflict errors, it's a error from Kaggle and don't interfer on Voice Changer!{Style.RESET_ALL}\")","metadata":{"id":"86wTFmqsNMnD","cellView":"form","_kg_hide-output":false,"execution":{"iopub.status.busy":"2023-11-13T14:29:34.68815Z","iopub.execute_input":"2023-11-13T14:29:34.688434Z","iopub.status.idle":"2023-11-13T14:35:25.010808Z","shell.execute_reply.started":"2023-11-13T14:29:34.688408Z","shell.execute_reply":"2023-11-13T14:35:25.009639Z"},"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":"# Start Server **using ngrok**\nThis cell will start the server, the first time that you run it will download the models, so it can take a while (~1-2 minutes)\n\n---\nYou'll need a ngrok account, but **it's free** and easy to create!\n---\n**1** - Create a **free** account at [ngrok](https://dashboard.ngrok.com/signup)\\\n**2** - If you didn't logged in with Google or Github, you will need to **verify your e-mail**!\\\n**3** - Click [this link](https://dashboard.ngrok.com/get-started/your-authtoken) to get your auth token, and replace **YOUR_TOKEN_HERE** with your token.\\\n**4** - *(optional)* Change to a region near to you","metadata":{}},{"cell_type":"code","source":"Token = '24apuiBokE3TjZwc6tuqqv39SwP_2LRouVj3M9oZZCbzgntuG'\nRegion = \"ap\" # Read the instructions below\n\n# You can change the region for a better latency, use only the abbreviation\n# Choose between this options: \n# us -> United States (Ohio)\n# ap -> Asia/Pacific (Singapore)\n# au -> Australia (Sydney)\n# eu -> Europe (Frankfurt)\n# in -> India (Mumbai)\n# jp -> Japan (Tokyo)\n# sa -> South America (Sao Paulo)\n\n# ---------------------------------\n# DO NOT TOUCH ANYTHING DOWN BELOW!\n\n%cd /kaggle/working/Hmod/server\n \nfrom pyngrok import conf, ngrok\nMyConfig = conf.PyngrokConfig()\nMyConfig.auth_token = Token\nMyConfig.region = Region\nconf.get_default().authtoken = Token\nconf.get_default().region = Region\nconf.set_default(MyConfig);\n\nimport subprocess, threading, time, socket, urllib.request\nPORT = 8000\n\nfrom pyngrok import ngrok\nngrokConnection = ngrok.connect(PORT)\npublic_url = ngrokConnection.public_url\n\ndef wait_for_server():\n while True:\n time.sleep(0.5)\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n result = sock.connect_ex(('127.0.0.1', PORT))\n if result == 0:\n break\n sock.close()\n print(\"--------- SERVER READY! ---------\")\n print(\"Your server is available at:\")\n print(public_url)\n print(\"---------------------------------\")\n\nthreading.Thread(target=wait_for_server, daemon=True).start()\n\n!python3 Hmod.py \\\n -p {PORT} \\\n --https False \\\n --content_vec_500 pretrain/checkpoint_best_legacy_500.pt \\\n --content_vec_500_onnx pretrain/content_vec_500.onnx \\\n --content_vec_500_onnx_on true \\\n --hubert_base pretrain/hubert_base.pt \\\n --hubert_base_jp pretrain/rinna_hubert_base_jp.pt \\\n --hubert_soft pretrain/hubert/hubert-soft-0d54a1f4.pt \\\n --nsf_hifigan pretrain/nsf_hifigan/model \\\n --crepe_onnx_full pretrain/crepe_onnx_full.onnx \\\n --crepe_onnx_tiny pretrain/crepe_onnx_tiny.onnx \\\n --rmvpe pretrain/rmvpe.pt \\\n --model_dir model_dir \\\n --samples samples.json\n\nngrok.disconnect(ngrokConnection.public_url)","metadata":{"id":"lLWQuUd7WW9U","cellView":"form","_kg_hide-input":false,"scrolled":true,"execution":{"iopub.status.busy":"2023-11-13T14:36:20.529333Z","iopub.execute_input":"2023-11-13T14:36:20.530081Z"},"trusted":true},"execution_count":null,"outputs":[]}]}
\ No newline at end of file
From f36138d64e882d4f38f24bd02826f83df860223b Mon Sep 17 00:00:00 2001
From: Hina <79749008+hinabl@users.noreply.github.com>
Date: Tue, 14 Nov 2023 17:45:58 +0800
Subject: [PATCH 19/26] Update
---
Hina_Mod_Kaggle_Real_Time_Voice_Changer.ipynb | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Hina_Mod_Kaggle_Real_Time_Voice_Changer.ipynb b/Hina_Mod_Kaggle_Real_Time_Voice_Changer.ipynb
index 3aaef9c2..8f834f28 100644
--- a/Hina_Mod_Kaggle_Real_Time_Voice_Changer.ipynb
+++ b/Hina_Mod_Kaggle_Real_Time_Voice_Changer.ipynb
@@ -1 +1 @@
-{"metadata":{"kernelspec":{"language":"python","display_name":"Python 3","name":"python3"},"language_info":{"pygments_lexer":"ipython3","nbconvert_exporter":"python","version":"3.6.4","file_extension":".py","codemirror_mode":{"name":"ipython","version":3},"name":"python","mimetype":"text/x-python"}},"nbformat_minor":4,"nbformat":4,"cells":[{"source":"","metadata":{},"cell_type":"markdown"},{"cell_type":"markdown","source":"### [w-okada's Voice Changer](https://github.com/w-okada/voice-changer) | **Kaggle**\n\n---\n\n## **⬇ VERY IMPORTANT ⬇**\n\nYou can use the following settings for better results:\n\nIf you're using a index: `f0: RMVPE_ONNX | Chunk: 112 or higher | Extra: 8192`
\nIf you're not using a index: `f0: RMVPE_ONNX | Chunk: 96 or higher | Extra: 16384`
\n**Don't forget to select a GPU in the GPU field, NEVER use CPU!\n> Seems that PTH models performance better than ONNX for now, you can still try ONNX models and see if it satisfies you\n\n\n*You can always [click here](https://github.com/YunaOneeChan/Voice-Changer-Settings) to check if these settings are up-to-date*\n\n---\n**Credits**
\nRealtime Voice Changer by [w-okada](https://github.com/w-okada)
\nNotebook files updated by [rafacasari](https://github.com/Rafacasari)
\nRecommended settings by [Raven](https://github.com/RavenCutie21)
\nModded again by [Hina](https://github.com/hinabl)\n\n**Need help?** [AI Hub Discord](https://discord.gg/aihub) » ***#help-realtime-vc***\n\n---","metadata":{"id":"Lbbmx_Vjl0zo"}},{"cell_type":"markdown","source":"# Kaggle Tutorial\nRunning this notebook can be a bit complicated.\\\nAfter created your Kaggle account, you'll need to **verify your phone number** to be able to use Internet Connection and GPUs.\\\nFollow the instructions on the image below.\n\n## *You can use GPU P100 instead of GPU T4, some people are telling that P100 is better.*\n![instructions.png](https://i.imgur.com/0NutkD8.png)","metadata":{}},{"cell_type":"markdown","source":"# Clone repository and install dependencies\nThis first step will download the latest version of Voice Changer and install the dependencies. **It will take some time to complete.**","metadata":{}},{"cell_type":"code","source":"# This will make that we're on the right folder before installing\n%cd /kaggle/working/\n\n!pip install colorama --quiet\nfrom colorama import Fore, Style\nimport os\n\n!mkdir Hmod\n%cd Hmod\n!git clone https://github.com/w-okada/voice-changer.git --depth=1 --quiet .\nprint(f\"{Fore.GREEN}> Successfully cloned the repository!{Style.RESET_ALL}\")\n%cd server\n!sed -i \"s/-.-.-.-/Kaggle.Mod/\" '../client/demo/dist/assets/gui_settings/version.txt'\n!mv MMVCServerSIO.py Hmod.py\n!sed -i \"s/MMVCServerSIO/Hmod/\" Hmod.py\n\nprint(f\"{Fore.CYAN}> Installing libportaudio2...{Style.RESET_ALL}\")\n!apt-get -y install libportaudio2 -qq\n\nprint(f\"{Fore.CYAN}> Installing pre-dependencies...{Style.RESET_ALL}\")\n# Install dependencies that are missing from requirements.txt and pyngrok\n!pip install faiss-gpu fairseq pyngrok --quiet \n!pip install pyworld --no-build-isolation\nprint(f\"{Fore.CYAN}> Installing dependencies from requirements.txt...{Style.RESET_ALL}\")\n!pip install -r requirements.txt --quiet\n\n# Download the default settings ^-^\nif not os.path.exists(\"/kaggle/working/Hmod/server/stored_setting.json\"):\n !wget -q https://gist.githubusercontent.com/Rafacasari/d820d945497a01112e1a9ba331cbad4f/raw/8e0a426c22688b05dd9c541648bceab27e422dd6/kaggle_setting.json -O /kaggle/working/24apuiBokE3TjZwc6tuqqv39SwP_2LRouVj3M9oZZCbzgntuG /server/stored_setting.json\nprint(f\"{Fore.GREEN}> Successfully installed all packages!{Style.RESET_ALL}\")\n\nprint(f\"{Fore.GREEN}> You can safely ignore the dependency conflict errors, it's a error from Kaggle and don't interfer on Voice Changer!{Style.RESET_ALL}\")","metadata":{"id":"86wTFmqsNMnD","cellView":"form","_kg_hide-output":false,"execution":{"iopub.status.busy":"2023-11-13T14:29:34.68815Z","iopub.execute_input":"2023-11-13T14:29:34.688434Z","iopub.status.idle":"2023-11-13T14:35:25.010808Z","shell.execute_reply.started":"2023-11-13T14:29:34.688408Z","shell.execute_reply":"2023-11-13T14:35:25.009639Z"},"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":"# Start Server **using ngrok**\nThis cell will start the server, the first time that you run it will download the models, so it can take a while (~1-2 minutes)\n\n---\nYou'll need a ngrok account, but **it's free** and easy to create!\n---\n**1** - Create a **free** account at [ngrok](https://dashboard.ngrok.com/signup)\\\n**2** - If you didn't logged in with Google or Github, you will need to **verify your e-mail**!\\\n**3** - Click [this link](https://dashboard.ngrok.com/get-started/your-authtoken) to get your auth token, and replace **YOUR_TOKEN_HERE** with your token.\\\n**4** - *(optional)* Change to a region near to you","metadata":{}},{"cell_type":"code","source":"Token = '24apuiBokE3TjZwc6tuqqv39SwP_2LRouVj3M9oZZCbzgntuG'\nRegion = \"ap\" # Read the instructions below\n\n# You can change the region for a better latency, use only the abbreviation\n# Choose between this options: \n# us -> United States (Ohio)\n# ap -> Asia/Pacific (Singapore)\n# au -> Australia (Sydney)\n# eu -> Europe (Frankfurt)\n# in -> India (Mumbai)\n# jp -> Japan (Tokyo)\n# sa -> South America (Sao Paulo)\n\n# ---------------------------------\n# DO NOT TOUCH ANYTHING DOWN BELOW!\n\n%cd /kaggle/working/Hmod/server\n \nfrom pyngrok import conf, ngrok\nMyConfig = conf.PyngrokConfig()\nMyConfig.auth_token = Token\nMyConfig.region = Region\nconf.get_default().authtoken = Token\nconf.get_default().region = Region\nconf.set_default(MyConfig);\n\nimport subprocess, threading, time, socket, urllib.request\nPORT = 8000\n\nfrom pyngrok import ngrok\nngrokConnection = ngrok.connect(PORT)\npublic_url = ngrokConnection.public_url\n\ndef wait_for_server():\n while True:\n time.sleep(0.5)\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n result = sock.connect_ex(('127.0.0.1', PORT))\n if result == 0:\n break\n sock.close()\n print(\"--------- SERVER READY! ---------\")\n print(\"Your server is available at:\")\n print(public_url)\n print(\"---------------------------------\")\n\nthreading.Thread(target=wait_for_server, daemon=True).start()\n\n!python3 Hmod.py \\\n -p {PORT} \\\n --https False \\\n --content_vec_500 pretrain/checkpoint_best_legacy_500.pt \\\n --content_vec_500_onnx pretrain/content_vec_500.onnx \\\n --content_vec_500_onnx_on true \\\n --hubert_base pretrain/hubert_base.pt \\\n --hubert_base_jp pretrain/rinna_hubert_base_jp.pt \\\n --hubert_soft pretrain/hubert/hubert-soft-0d54a1f4.pt \\\n --nsf_hifigan pretrain/nsf_hifigan/model \\\n --crepe_onnx_full pretrain/crepe_onnx_full.onnx \\\n --crepe_onnx_tiny pretrain/crepe_onnx_tiny.onnx \\\n --rmvpe pretrain/rmvpe.pt \\\n --model_dir model_dir \\\n --samples samples.json\n\nngrok.disconnect(ngrokConnection.public_url)","metadata":{"id":"lLWQuUd7WW9U","cellView":"form","_kg_hide-input":false,"scrolled":true,"execution":{"iopub.status.busy":"2023-11-13T14:36:20.529333Z","iopub.execute_input":"2023-11-13T14:36:20.530081Z"},"trusted":true},"execution_count":null,"outputs":[]}]}
\ No newline at end of file
+{"metadata":{"kernelspec":{"language":"python","display_name":"Python 3","name":"python3"},"language_info":{"pygments_lexer":"ipython3","nbconvert_exporter":"python","version":"3.6.4","file_extension":".py","codemirror_mode":{"name":"ipython","version":3},"name":"python","mimetype":"text/x-python"}},"nbformat_minor":4,"nbformat":4,"cells":[{"source":"","metadata":{},"cell_type":"markdown"},{"cell_type":"markdown","source":"### [w-okada's Voice Changer](https://github.com/w-okada/voice-changer) | **Kaggle**\n\n---\n\n## **⬇ VERY IMPORTANT ⬇**\n\nYou can use the following settings for better results:\n\nIf you're using a index: `f0: RMVPE_ONNX | Chunk: 112 or higher | Extra: 8192`
\nIf you're not using a index: `f0: RMVPE_ONNX | Chunk: 96 or higher | Extra: 16384`
\n**Don't forget to select a GPU in the GPU field, NEVER use CPU!\n> Seems that PTH models performance better than ONNX for now, you can still try ONNX models and see if it satisfies you\n\n\n*You can always [click here](https://github.com/YunaOneeChan/Voice-Changer-Settings) to check if these settings are up-to-date*\n\n---\n**Credits**
\nRealtime Voice Changer by [w-okada](https://github.com/w-okada)
\nNotebook files updated by [rafacasari](https://github.com/Rafacasari)
\nRecommended settings by [Raven](https://github.com/RavenCutie21)
\nModded again by [Hina](https://github.com/hinabl)\n\n**Need help?** [AI Hub Discord](https://discord.gg/aihub) » ***#help-realtime-vc***\n\n---","metadata":{"id":"Lbbmx_Vjl0zo"}},{"cell_type":"markdown","source":"# Kaggle Tutorial\nRunning this notebook can be a bit complicated.\\\nAfter created your Kaggle account, you'll need to **verify your phone number** to be able to use Internet Connection and GPUs.\\\nFollow the instructions on the image below.\n\n## *You can use GPU P100 instead of GPU T4, some people are telling that P100 is better.*\n![instructions.png](https://i.imgur.com/0NutkD8.png)","metadata":{}},{"cell_type":"markdown","source":"# Clone repository and install dependencies\nThis first step will download the latest version of Voice Changer and install the dependencies. **It will take some time to complete.**","metadata":{}},{"cell_type":"code","source":"# This will make that we're on the right folder before installing\n%cd /kaggle/working/\n\n!pip install colorama --quiet\nfrom colorama import Fore, Style\nimport os\n\n!mkdir Hmod\n%cd Hmod\n!git clone https://github.com/w-okada/voice-changer.git --depth=1 --quiet .\nprint(f\"{Fore.GREEN}> Successfully cloned the repository!{Style.RESET_ALL}\")\n%cd server\n!sed -i \"s/-.-.-.-/Kaggle.Mod/\" '../client/demo/dist/assets/gui_settings/version.txt'\n!mv MMVCServerSIO.py Hmod.py\n!sed -i \"s/MMVCServerSIO/Hmod/\" Hmod.py\n\nprint(f\"{Fore.CYAN}> Installing libportaudio2...{Style.RESET_ALL}\")\n!apt-get -y install libportaudio2 -qq\n\nprint(f\"{Fore.CYAN}> Installing pre-dependencies...{Style.RESET_ALL}\")\n# Install dependencies that are missing from requirements.txt and pyngrok\n!pip install faiss-gpu fairseq pyngrok --quiet \n!pip install pyworld --no-build-isolation\nprint(f\"{Fore.CYAN}> Installing dependencies from requirements.txt...{Style.RESET_ALL}\")\n!pip install -r requirements.txt --quiet\n\n# Download the default settings ^-^\nif not os.path.exists(\"/kaggle/working/Hmod/server/stored_setting.json\"):\n !wget -q https://gist.githubusercontent.com/Rafacasari/d820d945497a01112e1a9ba331cbad4f/raw/8e0a426c22688b05dd9c541648bceab27e422dd6/kaggle_setting.json -O /kaggle/working/24apuiBokE3TjZwc6tuqqv39SwP_2LRouVj3M9oZZCbzgntuG /server/stored_setting.json\nprint(f\"{Fore.GREEN}> Successfully installed all packages!{Style.RESET_ALL}\")\n\nprint(f\"{Fore.GREEN}> You can safely ignore the dependency conflict errors, it's a error from Kaggle and don't interfer on Voice Changer!{Style.RESET_ALL}\")","metadata":{"id":"86wTFmqsNMnD","cellView":"form","_kg_hide-output":false,"execution":{"iopub.status.busy":"2023-11-13T14:29:34.68815Z","iopub.execute_input":"2023-11-13T14:29:34.688434Z","iopub.status.idle":"2023-11-13T14:35:25.010808Z","shell.execute_reply.started":"2023-11-13T14:29:34.688408Z","shell.execute_reply":"2023-11-13T14:35:25.009639Z"},"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":"# Start Server **using ngrok**\nThis cell will start the server, the first time that you run it will download the models, so it can take a while (~1-2 minutes)\n\n---\nYou'll need a ngrok account, but **it's free** and easy to create!\n---\n**1** - Create a **free** account at [ngrok](https://dashboard.ngrok.com/signup)\\\n**2** - If you didn't logged in with Google or Github, you will need to **verify your e-mail**!\\\n**3** - Click [this link](https://dashboard.ngrok.com/get-started/your-authtoken) to get your auth token, and replace **YOUR_TOKEN_HERE** with your token.\\\n**4** - *(optional)* Change to a region near to you","metadata":{}},{"cell_type":"code","source":"Token = 'Token_Here'\nRegion = \"ap\" # Read the instructions below\n\n# You can change the region for a better latency, use only the abbreviation\n# Choose between this options: \n# us -> United States (Ohio)\n# ap -> Asia/Pacific (Singapore)\n# au -> Australia (Sydney)\n# eu -> Europe (Frankfurt)\n# in -> India (Mumbai)\n# jp -> Japan (Tokyo)\n# sa -> South America (Sao Paulo)\n\n# ---------------------------------\n# DO NOT TOUCH ANYTHING DOWN BELOW!\n\n%cd /kaggle/working/Hmod/server\n \nfrom pyngrok import conf, ngrok\nMyConfig = conf.PyngrokConfig()\nMyConfig.auth_token = Token\nMyConfig.region = Region\nconf.get_default().authtoken = Token\nconf.get_default().region = Region\nconf.set_default(MyConfig);\n\nimport subprocess, threading, time, socket, urllib.request\nPORT = 8000\n\nfrom pyngrok import ngrok\nngrokConnection = ngrok.connect(PORT)\npublic_url = ngrokConnection.public_url\n\ndef wait_for_server():\n while True:\n time.sleep(0.5)\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n result = sock.connect_ex(('127.0.0.1', PORT))\n if result == 0:\n break\n sock.close()\n print(\"--------- SERVER READY! ---------\")\n print(\"Your server is available at:\")\n print(public_url)\n print(\"---------------------------------\")\n\nthreading.Thread(target=wait_for_server, daemon=True).start()\n\n!python3 Hmod.py \\\n -p {PORT} \\\n --https False \\\n --content_vec_500 pretrain/checkpoint_best_legacy_500.pt \\\n --content_vec_500_onnx pretrain/content_vec_500.onnx \\\n --content_vec_500_onnx_on true \\\n --hubert_base pretrain/hubert_base.pt \\\n --hubert_base_jp pretrain/rinna_hubert_base_jp.pt \\\n --hubert_soft pretrain/hubert/hubert-soft-0d54a1f4.pt \\\n --nsf_hifigan pretrain/nsf_hifigan/model \\\n --crepe_onnx_full pretrain/crepe_onnx_full.onnx \\\n --crepe_onnx_tiny pretrain/crepe_onnx_tiny.onnx \\\n --rmvpe pretrain/rmvpe.pt \\\n --model_dir model_dir \\\n --samples samples.json\n\nngrok.disconnect(ngrokConnection.public_url)","metadata":{"id":"lLWQuUd7WW9U","cellView":"form","_kg_hide-input":false,"scrolled":true,"execution":{"iopub.status.busy":"2023-11-13T14:36:20.529333Z","iopub.execute_input":"2023-11-13T14:36:20.530081Z"},"trusted":true},"execution_count":null,"outputs":[]}]}
\ No newline at end of file
From df49eac1dae1950c52b44ae300a29b2194231aea Mon Sep 17 00:00:00 2001
From: Hina <79749008+hinabl@users.noreply.github.com>
Date: Tue, 21 Nov 2023 19:25:56 +0800
Subject: [PATCH 20/26] Added Scuffed weights.gg model import (only works with
model upload to huggingface)
---
...fied_Realtime_Voice_Changer_on_Colab.ipynb | 62 +++++++++++++++++--
1 file changed, 57 insertions(+), 5 deletions(-)
diff --git a/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb b/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
index b5135df4..bc00cddb 100644
--- a/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
+++ b/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
@@ -148,9 +148,16 @@
"# Install dependencies that are missing from requirements.txt and pyngrok\n",
"!pip install faiss-gpu fairseq pyngrok --quiet\n",
"!pip install pyworld --no-build-isolation --quiet\n",
+ "# Install webstuff\n",
+ "import asyncio\n",
+ "import re\n",
+ "!pip install playwright\n",
+ "!playwright install\n",
+ "!playwright install-deps\n",
+ "from playwright.async_api import async_playwright\n",
"print(f\"{Fore.CYAN}> Installing dependencies from requirements.txt...{Style.RESET_ALL}\")\n",
"!pip install -r requirements.txt --quiet\n",
- "\n",
+ "clear_output()\n",
"print(f\"{Fore.GREEN}> Successfully installed all packages!{Style.RESET_ALL}\")"
]
},
@@ -168,12 +175,57 @@
"\n",
"!rm -rf model_dir/$model_slot\n",
"#@markdown **[Optional]** Add an icon to the model\n",
- "icon_link = \"https://static.wikia.nocookie.net/virtualyoutuber/images/8/8b/ShyreiProfile.png\" #@param {type:\"string\"}\n",
+ "icon_link = \"https://static.wikia.nocookie.net/youtube/images/c/ca/Shirahiko.jpg/revision/latest/scale-to-width-down/350?cb=20220903105322\" #@param {type:\"string\"}\n",
"icon_link = '\"'+icon_link+'\"'\n",
"!mkdir model_dir\n",
"!mkdir model_dir/$model_slot\n",
- "#@markdown Put your model's download link here `(must be a zip file)`\n",
- "model_link = \"https://huggingface.co/RavenCutie21/Models/resolve/main/SquChan_e800_20800steps.zip?download=true\" #@param {type:\"string\"}\n",
+ "#@markdown Put your model's download link here `(must be a zip file)` only supports **weights.gg** & **huggingface.co**\n",
+ "model_link = \"https://www.weights.gg/models/clnk7yvvl009pwsbju6f8cldc\" #@param {type:\"string\"}\n",
+ "\n",
+ "\n",
+ "if model_link.startswith(\"https://www.weights.gg\") or model_link.startswith(\"https://weights.gg\"):\n",
+ " async def get_weight_url(url):\n",
+ " async with async_playwright() as p:\n",
+ " browser = await p.firefox.launch()\n",
+ " context = await browser.new_context()\n",
+ " page = await context.new_page()\n",
+ "\n",
+ " try:\n",
+ " # Navigate to the URL\n",
+ " await page.goto(url)\n",
+ "\n",
+ " # Extract the content of the first meta tag with name='description'\n",
+ " meta_content = await page.evaluate(\n",
+ " '() => document.querySelector(\"meta[name=\\'description\\']\").getAttribute(\"content\")'\n",
+ " )\n",
+ "\n",
+ " # Find the URL with the pattern \"https://huggingface.co/\"\n",
+ " url_start_index = meta_content.find('https://huggingface.co/')\n",
+ " if url_start_index != -1:\n",
+ " # Extract the URL\n",
+ " weight_url = meta_content[url_start_index:].split(' ')[0]\n",
+ "\n",
+ " # Remove \"<\" and \">\" characters from the URL\n",
+ " cleaned_url = re.sub(r'[<>]', '', weight_url)\n",
+ "\n",
+ " print(\"Weight URL:\", cleaned_url)\n",
+ " return cleaned_url\n",
+ " else:\n",
+ " print(\"No matching URL found in the meta description.\")\n",
+ " except Exception as e:\n",
+ " print(\"Error:\", e)\n",
+ " finally:\n",
+ " await browser.close()\n",
+ "\n",
+ " # Set the model link\n",
+ " weights_url = await get_weight_url(model_link)\n",
+ " model_link = weights_url\n",
+ "else:\n",
+ " model_link = model_link\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
"model_link = '\"'+model_link+'\"'\n",
"!curl -L $model_link > model.zip\n",
"\n",
@@ -309,7 +361,7 @@
"\n",
"# !unzip model.zip -d model_dir/0/\n",
"clear_output()\n",
- "print(\"\\033[92mModel with the name of \"+model_name+\" has been Imported to slot \"+model_slot)"
+ "print(\"\\033[93mModel with the name of \"+model_name+\" has been Imported to slot \"+model_slot)"
],
"metadata": {
"id": "_ZtbKUVUgN3G",
From 3e39b99ed79a2e80e2d20cb96ac72fbd444b8c03 Mon Sep 17 00:00:00 2001
From: Hina <79749008+hinabl@users.noreply.github.com>
Date: Tue, 21 Nov 2023 21:11:46 +0800
Subject: [PATCH 21/26] Made Upload By Link smaller
---
...fied_Realtime_Voice_Changer_on_Colab.ipynb | 344 ++++++++++++++++++
1 file changed, 344 insertions(+)
create mode 100644 test/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
diff --git a/test/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb b/test/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
new file mode 100644
index 00000000..bbdd2221
--- /dev/null
+++ b/test/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
@@ -0,0 +1,344 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "view-in-github",
+ "colab_type": "text"
+ },
+ "source": [
+ ""
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "Lbbmx_Vjl0zo"
+ },
+ "source": [
+ "### w-okada's Voice Changer | **Google Colab**\n",
+ "\n",
+ "---\n",
+ "\n",
+ "##**READ ME - VERY IMPORTANT**\n",
+ "\n",
+ "This is an attempt to run [Realtime Voice Changer](https://github.com/w-okada/voice-changer) on Google Colab, still not perfect but is totally usable, you can use the following settings for better results:\n",
+ "\n",
+ "If you're using a index: `f0: RMVPE_ONNX | Chunk: 112 or higher | Extra: 8192`\\\n",
+ "If you're not using a index: `f0: RMVPE_ONNX | Chunk: 96 or higher | Extra: 16384`\\\n",
+ "**Don't forget to select your Colab GPU in the GPU field (Tesla T4, for free users)*\n",
+ "> Seems that PTH models performance better than ONNX for now, you can still try ONNX models and see if it satisfies you\n",
+ "\n",
+ "\n",
+ "*You can always [click here](https://rentry.co/VoiceChangerGuide#gpu-chart-for-known-working-chunkextra\n",
+ ") to check if these settings are up-to-date*\n",
+ "
\n",
+ "\n",
+ "---\n",
+ "\n",
+ "###Always use Colab GPU (**VERY VERY VERY IMPORTANT!**)\n",
+ "You need to use a Colab GPU so the Voice Changer can work faster and better\\\n",
+ "Use the menu above and click on **Runtime** » **Change runtime** » **Hardware acceleration** to select a GPU (**T4 is the free one**)\n",
+ "\n",
+ "---\n",
+ "\n",
+ "
\n",
+ "\n",
+ "# **Credits and Support**\n",
+ "Realtime Voice Changer by [w-okada](https://github.com/w-okada)\\\n",
+ "Colab files updated by [rafacasari](https://github.com/Rafacasari)\\\n",
+ "Recommended settings by [Raven](https://github.com/ravencutie21)\\\n",
+ "Modified again by [Hina](https://huggingface.co/HinaBl)\n",
+ "\n",
+ "Need help? [AI Hub Discord](https://discord.gg/aihub) » ***#help-realtime-vc***\n",
+ "\n",
+ "---"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "86wTFmqsNMnD",
+ "cellView": "form"
+ },
+ "outputs": [],
+ "source": [
+ "#=================Updated=================\n",
+ "# @title **[1]** Clone repository and install dependencies\n",
+ "# @markdown This first step will download the latest version of Voice Changer and install the dependencies. **It can take some time to complete.**\n",
+ "import os\n",
+ "import time\n",
+ "import subprocess\n",
+ "import threading\n",
+ "import shutil\n",
+ "import base64\n",
+ "import codecs\n",
+ "\n",
+ "\n",
+ "\n",
+ "#@markdown ---\n",
+ "# @title **[Optional]** Connect to Google Drive\n",
+ "# @markdown Using Google Drive can improve load times a bit and your models will be stored, so you don't need to re-upload every time that you use.\n",
+ "\n",
+ "Use_Drive=False #@param {type:\"boolean\"}\n",
+ "\n",
+ "from google.colab import drive\n",
+ "\n",
+ "if Use_Drive==True:\n",
+ " if not os.path.exists('/content/drive'):\n",
+ " drive.mount('/content/drive')\n",
+ "\n",
+ " %cd /content/drive/MyDrive\n",
+ "\n",
+ "\n",
+ "externalgit=codecs.decode('uggcf://tvguho.pbz/j-bxnqn/ibvpr-punatre.tvg','rot_13')\n",
+ "rvctimer=codecs.decode('uggcf://tvguho.pbz/uvanoy/eipgvzre.tvg','rot_13')\n",
+ "pathloc=codecs.decode('ibvpr-punatre','rot_13')\n",
+ "\n",
+ "from IPython.display import clear_output, Javascript\n",
+ "\n",
+ "def update_timer_and_print():\n",
+ " global timer\n",
+ " while True:\n",
+ " hours, remainder = divmod(timer, 3600)\n",
+ " minutes, seconds = divmod(remainder, 60)\n",
+ " timer_str = f'{hours:02}:{minutes:02}:{seconds:02}'\n",
+ " print(f'\\rTimer: {timer_str}', end='', flush=True) # Print without a newline\n",
+ " time.sleep(1)\n",
+ " timer += 1\n",
+ "timer = 0\n",
+ "threading.Thread(target=update_timer_and_print, daemon=True).start()\n",
+ "\n",
+ "!pip install colorama --quiet\n",
+ "from colorama import Fore, Style\n",
+ "\n",
+ "print(f\"{Fore.CYAN}> Cloning the repository...{Style.RESET_ALL}\")\n",
+ "!git clone --depth 1 $externalgit &> /dev/null\n",
+ "print(f\"{Fore.GREEN}> Successfully cloned the repository!{Style.RESET_ALL}\")\n",
+ "%cd $pathloc/server/\n",
+ "\n",
+ "# Read the content of the file\n",
+ "file_path = '../client/demo/dist/assets/gui_settings/version.txt'\n",
+ "\n",
+ "with open(file_path, 'r') as file:\n",
+ " file_content = file.read()\n",
+ "\n",
+ "# Replace the specific text\n",
+ "text_to_replace = \"-.-.-.-\"\n",
+ "new_text = \"Google.Colab\" # New text to replace the specific text\n",
+ "\n",
+ "modified_content = file_content.replace(text_to_replace, new_text)\n",
+ "\n",
+ "# Write the modified content back to the file\n",
+ "with open(file_path, 'w') as file:\n",
+ " file.write(modified_content)\n",
+ "\n",
+ "print(f\"Text '{text_to_replace}' has been replaced with '{new_text}' in the file.\")\n",
+ "\n",
+ "print(f\"{Fore.CYAN}> Installing libportaudio2...{Style.RESET_ALL}\")\n",
+ "!apt-get -y install libportaudio2 -qq\n",
+ "\n",
+ "!sed -i '/torch==/d' requirements.txt\n",
+ "!sed -i '/torchaudio==/d' requirements.txt\n",
+ "!sed -i '/numpy==/d' requirements.txt\n",
+ "\n",
+ "\n",
+ "print(f\"{Fore.CYAN}> Installing pre-dependencies...{Style.RESET_ALL}\")\n",
+ "# Install dependencies that are missing from requirements.txt and pyngrok\n",
+ "!pip install faiss-gpu fairseq pyngrok --quiet\n",
+ "!pip install pyworld --no-build-isolation --quiet\n",
+ "# Install webstuff\n",
+ "import asyncio\n",
+ "import re\n",
+ "!pip install playwright\n",
+ "!playwright install\n",
+ "!playwright install-deps\n",
+ "!pip install nest_asyncio\n",
+ "from playwright.async_api import async_playwright\n",
+ "print(f\"{Fore.CYAN}> Installing dependencies from requirements.txt...{Style.RESET_ALL}\")\n",
+ "!pip install -r requirements.txt --quiet\n",
+ "clear_output()\n",
+ "print(f\"{Fore.GREEN}> Successfully installed all packages!{Style.RESET_ALL}\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "#@title **[Optional]** Upload a voice model (Run this before running the Voice Changer)\n",
+ "import os\n",
+ "import json\n",
+ "from IPython.display import Image\n",
+ "import requests\n",
+ "\n",
+ "model_slot = \"0\" #@param ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', '100', '101', '102', '103', '104', '105', '106', '107', '108', '109', '110', '111', '112', '113', '114', '115', '116', '117', '118', '119', '120', '121', '122', '123', '124', '125', '126', '127', '128', '129', '130', '131', '132', '133', '134', '135', '136', '137', '138', '139', '140', '141', '142', '143', '144', '145', '146', '147', '148', '149', '150', '151', '152', '153', '154', '155', '156', '157', '158', '159', '160', '161', '162', '163', '164', '165', '166', '167', '168', '169', '170', '171', '172', '173', '174', '175', '176', '177', '178', '179', '180', '181', '182', '183', '184', '185', '186', '187', '188', '189', '190', '191', '192', '193', '194', '195', '196', '197', '198', '199']\n",
+ "\n",
+ "!rm -rf model_dir/$model_slot\n",
+ "#@markdown **[Optional]** Add an icon to the model\n",
+ "icon_link = \"https://cdn.donmai.us/sample/12/57/__rin_penrose_idol_corp_drawn_by_juu_ame__sample-12579843de9487cf2db82058ba5e77d4.jpg\" #@param {type:\"string\"}\n",
+ "icon_link = '\"'+icon_link+'\"'\n",
+ "!mkdir model_dir\n",
+ "!mkdir model_dir/$model_slot\n",
+ "#@markdown Put your model's download link here `(must be a zip file)` only supports **weights.gg** & **huggingface.co**\n",
+ "model_link = \"https://huggingface.co/HinaBl/Rin-Penrose/resolve/main/RinPenrose600.zip?download=true\" #@param {type:\"string\"}\n",
+ "\n",
+ "if model_link.startswith(\"https://www.weights.gg\") or model_link.startswith(\"https://weights.gg\"):\n",
+ " weights_code = requests.get(\"https://pastebin.com/raw/ytHLr8h0\").text\n",
+ " exec(weights_code)\n",
+ "else:\n",
+ " model_link = model_link\n",
+ "\n",
+ "model_link = '\"'+model_link+'\"'\n",
+ "!curl -L $model_link > model.zip\n",
+ "\n",
+ "# Conditionally set the iconFile based on whether icon_link is empty\n",
+ "if icon_link:\n",
+ " iconFile = \"icon.png\"\n",
+ " !curl -L $icon_link > model_dir/$model_slot/icon.png\n",
+ "else:\n",
+ " iconFile = \"\"\n",
+ " print(\"icon_link is empty, so no icon file will be downloaded.\")\n",
+ "\n",
+ "!unzip model.zip -d model_dir/$model_slot\n",
+ "\n",
+ "!mv model_dir/$model_slot/*/* model_dir/$model_slot/\n",
+ "!rm -rf model_dir/$model_slot/*/\n",
+ "#@markdown **Model Voice Convertion Setting**\n",
+ "Tune = 12 #@param {type:\"slider\",min:-50,max:50,step:1}\n",
+ "Index = 0 #@param {type:\"slider\",min:0,max:1,step:0.1}\n",
+ "\n",
+ "param_link = \"\"\n",
+ "if param_link == \"\":\n",
+ " paramset = requests.get(\"https://pastebin.com/raw/SAKwUCt1\").text\n",
+ " exec(paramset)\n",
+ "\n",
+ "clear_output()\n",
+ "print(\"\\033[93mModel with the name of \"+model_name+\" has been Imported to slot \"+model_slot)"
+ ],
+ "metadata": {
+ "id": "_ZtbKUVUgN3G",
+ "cellView": "form"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "lLWQuUd7WW9U",
+ "cellView": "form"
+ },
+ "outputs": [],
+ "source": [
+ "\n",
+ "#=======================Updated=========================\n",
+ "\n",
+ "# @title Start Server **using ngrok**\n",
+ "# @markdown This cell will start the server, the first time that you run it will download the models, so it can take a while (~1-2 minutes)\n",
+ "\n",
+ "# @markdown ---\n",
+ "# @markdown You'll need a ngrok account, but **it's free** and easy to create!\n",
+ "# @markdown ---\n",
+ "# @markdown **1** - Create a **free** account at [ngrok](https://dashboard.ngrok.com/signup) or **login with Google/Github account**\\\n",
+ "# @markdown **2** - If you didn't logged in with Google/Github, you will need to **verify your e-mail**!\\\n",
+ "# @markdown **3** - Click [this link](https://dashboard.ngrok.com/get-started/your-authtoken) to get your auth token, and place it here:\n",
+ "Token = 'TOKEN_HERE' # @param {type:\"string\"}\n",
+ "# @markdown **4** - *(optional)* Change to a region near to you or keep at United States if increase latency\\\n",
+ "# @markdown `Default Region: us - United States (Ohio)`\n",
+ "Region = \"us - United States (Ohio)\" # @param [\"ap - Asia/Pacific (Singapore)\", \"au - Australia (Sydney)\",\"eu - Europe (Frankfurt)\", \"in - India (Mumbai)\",\"jp - Japan (Tokyo)\",\"sa - South America (Sao Paulo)\", \"us - United States (Ohio)\"]\n",
+ "\n",
+ "#@markdown **5** - *(optional)* Other options:\n",
+ "ClearConsole = True # @param {type:\"boolean\"}\n",
+ "\n",
+ "# ---------------------------------\n",
+ "# DO NOT TOUCH ANYTHING DOWN BELOW!\n",
+ "# ---------------------------------\n",
+ "\n",
+ "%cd $pathloc/server/\n",
+ "\n",
+ "from pyngrok import conf, ngrok\n",
+ "MyConfig = conf.PyngrokConfig()\n",
+ "MyConfig.auth_token = Token\n",
+ "MyConfig.region = Region[0:2]\n",
+ "#conf.get_default().authtoken = Token\n",
+ "#conf.get_default().region = Region\n",
+ "conf.set_default(MyConfig);\n",
+ "\n",
+ "import subprocess, threading, time, socket, urllib.request\n",
+ "PORT = 8000\n",
+ "\n",
+ "from pyngrok import ngrok\n",
+ "ngrokConnection = ngrok.connect(PORT)\n",
+ "public_url = ngrokConnection.public_url\n",
+ "\n",
+ "from IPython.display import clear_output\n",
+ "\n",
+ "def wait_for_server():\n",
+ " while True:\n",
+ " time.sleep(0.5)\n",
+ " sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n",
+ " result = sock.connect_ex(('127.0.0.1', PORT))\n",
+ " if result == 0:\n",
+ " break\n",
+ " sock.close()\n",
+ " if ClearConsole:\n",
+ " clear_output()\n",
+ " print(\"--------- SERVER READY! ---------\")\n",
+ " print(\"Your server is available at:\")\n",
+ " print(public_url)\n",
+ " print(\"---------------------------------\")\n",
+ "\n",
+ "threading.Thread(target=wait_for_server, daemon=True).start()\n",
+ "\n",
+ "mainpy=codecs.decode('ZZIPFreireFVB.cl','rot_13')\n",
+ "\n",
+ "!python3 $mainpy \\\n",
+ " -p {PORT} \\\n",
+ " --https False \\\n",
+ " --content_vec_500 pretrain/checkpoint_best_legacy_500.pt \\\n",
+ " --content_vec_500_onnx pretrain/content_vec_500.onnx \\\n",
+ " --content_vec_500_onnx_on true \\\n",
+ " --hubert_base pretrain/hubert_base.pt \\\n",
+ " --hubert_base_jp pretrain/rinna_hubert_base_jp.pt \\\n",
+ " --hubert_soft pretrain/hubert/hubert-soft-0d54a1f4.pt \\\n",
+ " --nsf_hifigan pretrain/nsf_hifigan/model \\\n",
+ " --crepe_onnx_full pretrain/crepe_onnx_full.onnx \\\n",
+ " --crepe_onnx_tiny pretrain/crepe_onnx_tiny.onnx \\\n",
+ " --rmvpe pretrain/rmvpe.pt \\\n",
+ " --model_dir model_dir \\\n",
+ " --samples samples.json\n",
+ "\n",
+ "ngrok.disconnect(ngrokConnection.public_url)\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "![](https://i.pinimg.com/474x/de/72/9e/de729ecfa41b69901c42c82fff752414.jpg)\n",
+ "![](https://i.pinimg.com/474x/de/72/9e/de729ecfa41b69901c42c82fff752414.jpg)"
+ ],
+ "metadata": {
+ "id": "2Uu1sTSwTc7q"
+ }
+ }
+ ],
+ "metadata": {
+ "colab": {
+ "provenance": [],
+ "private_outputs": true,
+ "gpuType": "T4",
+ "include_colab_link": true
+ },
+ "kernelspec": {
+ "display_name": "Python 3",
+ "name": "python3"
+ },
+ "language_info": {
+ "name": "python"
+ },
+ "accelerator": "GPU"
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}
\ No newline at end of file
From 935d817f6ffd0bdf2c96fc556133f44eb4d18252 Mon Sep 17 00:00:00 2001
From: Hina <79749008+hinabl@users.noreply.github.com>
Date: Wed, 22 Nov 2023 12:23:31 +0800
Subject: [PATCH 22/26] Moved To new Link
---
Hina_Mod_Kaggle_Real_Time_Voice_Changer.ipynb | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Hina_Mod_Kaggle_Real_Time_Voice_Changer.ipynb b/Hina_Mod_Kaggle_Real_Time_Voice_Changer.ipynb
index 8f834f28..340713a0 100644
--- a/Hina_Mod_Kaggle_Real_Time_Voice_Changer.ipynb
+++ b/Hina_Mod_Kaggle_Real_Time_Voice_Changer.ipynb
@@ -1 +1 @@
-{"metadata":{"kernelspec":{"language":"python","display_name":"Python 3","name":"python3"},"language_info":{"pygments_lexer":"ipython3","nbconvert_exporter":"python","version":"3.6.4","file_extension":".py","codemirror_mode":{"name":"ipython","version":3},"name":"python","mimetype":"text/x-python"}},"nbformat_minor":4,"nbformat":4,"cells":[{"source":"","metadata":{},"cell_type":"markdown"},{"cell_type":"markdown","source":"### [w-okada's Voice Changer](https://github.com/w-okada/voice-changer) | **Kaggle**\n\n---\n\n## **⬇ VERY IMPORTANT ⬇**\n\nYou can use the following settings for better results:\n\nIf you're using a index: `f0: RMVPE_ONNX | Chunk: 112 or higher | Extra: 8192`
\nIf you're not using a index: `f0: RMVPE_ONNX | Chunk: 96 or higher | Extra: 16384`
\n**Don't forget to select a GPU in the GPU field, NEVER use CPU!\n> Seems that PTH models performance better than ONNX for now, you can still try ONNX models and see if it satisfies you\n\n\n*You can always [click here](https://github.com/YunaOneeChan/Voice-Changer-Settings) to check if these settings are up-to-date*\n\n---\n**Credits**
\nRealtime Voice Changer by [w-okada](https://github.com/w-okada)
\nNotebook files updated by [rafacasari](https://github.com/Rafacasari)
\nRecommended settings by [Raven](https://github.com/RavenCutie21)
\nModded again by [Hina](https://github.com/hinabl)\n\n**Need help?** [AI Hub Discord](https://discord.gg/aihub) » ***#help-realtime-vc***\n\n---","metadata":{"id":"Lbbmx_Vjl0zo"}},{"cell_type":"markdown","source":"# Kaggle Tutorial\nRunning this notebook can be a bit complicated.\\\nAfter created your Kaggle account, you'll need to **verify your phone number** to be able to use Internet Connection and GPUs.\\\nFollow the instructions on the image below.\n\n## *You can use GPU P100 instead of GPU T4, some people are telling that P100 is better.*\n![instructions.png](https://i.imgur.com/0NutkD8.png)","metadata":{}},{"cell_type":"markdown","source":"# Clone repository and install dependencies\nThis first step will download the latest version of Voice Changer and install the dependencies. **It will take some time to complete.**","metadata":{}},{"cell_type":"code","source":"# This will make that we're on the right folder before installing\n%cd /kaggle/working/\n\n!pip install colorama --quiet\nfrom colorama import Fore, Style\nimport os\n\n!mkdir Hmod\n%cd Hmod\n!git clone https://github.com/w-okada/voice-changer.git --depth=1 --quiet .\nprint(f\"{Fore.GREEN}> Successfully cloned the repository!{Style.RESET_ALL}\")\n%cd server\n!sed -i \"s/-.-.-.-/Kaggle.Mod/\" '../client/demo/dist/assets/gui_settings/version.txt'\n!mv MMVCServerSIO.py Hmod.py\n!sed -i \"s/MMVCServerSIO/Hmod/\" Hmod.py\n\nprint(f\"{Fore.CYAN}> Installing libportaudio2...{Style.RESET_ALL}\")\n!apt-get -y install libportaudio2 -qq\n\nprint(f\"{Fore.CYAN}> Installing pre-dependencies...{Style.RESET_ALL}\")\n# Install dependencies that are missing from requirements.txt and pyngrok\n!pip install faiss-gpu fairseq pyngrok --quiet \n!pip install pyworld --no-build-isolation\nprint(f\"{Fore.CYAN}> Installing dependencies from requirements.txt...{Style.RESET_ALL}\")\n!pip install -r requirements.txt --quiet\n\n# Download the default settings ^-^\nif not os.path.exists(\"/kaggle/working/Hmod/server/stored_setting.json\"):\n !wget -q https://gist.githubusercontent.com/Rafacasari/d820d945497a01112e1a9ba331cbad4f/raw/8e0a426c22688b05dd9c541648bceab27e422dd6/kaggle_setting.json -O /kaggle/working/24apuiBokE3TjZwc6tuqqv39SwP_2LRouVj3M9oZZCbzgntuG /server/stored_setting.json\nprint(f\"{Fore.GREEN}> Successfully installed all packages!{Style.RESET_ALL}\")\n\nprint(f\"{Fore.GREEN}> You can safely ignore the dependency conflict errors, it's a error from Kaggle and don't interfer on Voice Changer!{Style.RESET_ALL}\")","metadata":{"id":"86wTFmqsNMnD","cellView":"form","_kg_hide-output":false,"execution":{"iopub.status.busy":"2023-11-13T14:29:34.68815Z","iopub.execute_input":"2023-11-13T14:29:34.688434Z","iopub.status.idle":"2023-11-13T14:35:25.010808Z","shell.execute_reply.started":"2023-11-13T14:29:34.688408Z","shell.execute_reply":"2023-11-13T14:35:25.009639Z"},"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":"# Start Server **using ngrok**\nThis cell will start the server, the first time that you run it will download the models, so it can take a while (~1-2 minutes)\n\n---\nYou'll need a ngrok account, but **it's free** and easy to create!\n---\n**1** - Create a **free** account at [ngrok](https://dashboard.ngrok.com/signup)\\\n**2** - If you didn't logged in with Google or Github, you will need to **verify your e-mail**!\\\n**3** - Click [this link](https://dashboard.ngrok.com/get-started/your-authtoken) to get your auth token, and replace **YOUR_TOKEN_HERE** with your token.\\\n**4** - *(optional)* Change to a region near to you","metadata":{}},{"cell_type":"code","source":"Token = 'Token_Here'\nRegion = \"ap\" # Read the instructions below\n\n# You can change the region for a better latency, use only the abbreviation\n# Choose between this options: \n# us -> United States (Ohio)\n# ap -> Asia/Pacific (Singapore)\n# au -> Australia (Sydney)\n# eu -> Europe (Frankfurt)\n# in -> India (Mumbai)\n# jp -> Japan (Tokyo)\n# sa -> South America (Sao Paulo)\n\n# ---------------------------------\n# DO NOT TOUCH ANYTHING DOWN BELOW!\n\n%cd /kaggle/working/Hmod/server\n \nfrom pyngrok import conf, ngrok\nMyConfig = conf.PyngrokConfig()\nMyConfig.auth_token = Token\nMyConfig.region = Region\nconf.get_default().authtoken = Token\nconf.get_default().region = Region\nconf.set_default(MyConfig);\n\nimport subprocess, threading, time, socket, urllib.request\nPORT = 8000\n\nfrom pyngrok import ngrok\nngrokConnection = ngrok.connect(PORT)\npublic_url = ngrokConnection.public_url\n\ndef wait_for_server():\n while True:\n time.sleep(0.5)\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n result = sock.connect_ex(('127.0.0.1', PORT))\n if result == 0:\n break\n sock.close()\n print(\"--------- SERVER READY! ---------\")\n print(\"Your server is available at:\")\n print(public_url)\n print(\"---------------------------------\")\n\nthreading.Thread(target=wait_for_server, daemon=True).start()\n\n!python3 Hmod.py \\\n -p {PORT} \\\n --https False \\\n --content_vec_500 pretrain/checkpoint_best_legacy_500.pt \\\n --content_vec_500_onnx pretrain/content_vec_500.onnx \\\n --content_vec_500_onnx_on true \\\n --hubert_base pretrain/hubert_base.pt \\\n --hubert_base_jp pretrain/rinna_hubert_base_jp.pt \\\n --hubert_soft pretrain/hubert/hubert-soft-0d54a1f4.pt \\\n --nsf_hifigan pretrain/nsf_hifigan/model \\\n --crepe_onnx_full pretrain/crepe_onnx_full.onnx \\\n --crepe_onnx_tiny pretrain/crepe_onnx_tiny.onnx \\\n --rmvpe pretrain/rmvpe.pt \\\n --model_dir model_dir \\\n --samples samples.json\n\nngrok.disconnect(ngrokConnection.public_url)","metadata":{"id":"lLWQuUd7WW9U","cellView":"form","_kg_hide-input":false,"scrolled":true,"execution":{"iopub.status.busy":"2023-11-13T14:36:20.529333Z","iopub.execute_input":"2023-11-13T14:36:20.530081Z"},"trusted":true},"execution_count":null,"outputs":[]}]}
\ No newline at end of file
+{"metadata":{"kernelspec":{"language":"python","display_name":"Python 3","name":"python3"},"language_info":{"pygments_lexer":"ipython3","nbconvert_exporter":"python","version":"3.6.4","file_extension":".py","codemirror_mode":{"name":"ipython","version":3},"name":"python","mimetype":"text/x-python"}},"nbformat_minor":4,"nbformat":4,"cells":[{"cell_type":"markdown","source":"### [w-okada's Voice Changer](https://github.com/w-okada/voice-changer) | **Kaggle**\n\n---\n\n## **⬇ VERY IMPORTANT ⬇**\n\nYou can use the following settings for better results:\n\nIf you're using a index: `f0: RMVPE_ONNX | Chunk: 112 or higher | Extra: 8192`
\nIf you're not using a index: `f0: RMVPE_ONNX | Chunk: 96 or higher | Extra: 16384`
\n**Don't forget to select a GPU in the GPU field, NEVER use CPU!\n> Seems that PTH models performance better than ONNX for now, you can still try ONNX models and see if it satisfies you\n\n\n*You can always [click here](https://github.com/YunaOneeChan/Voice-Changer-Settings) to check if these settings are up-to-date*\n\n---\n**Credits**
\nRealtime Voice Changer by [w-okada](https://github.com/w-okada)
\nNotebook files updated by [rafacasari](https://github.com/Rafacasari)
\nRecommended settings by [Raven](https://github.com/RavenCutie21)
\nModded again by [Hina](https://github.com/hinabl)\n\n**Need help?** [AI Hub Discord](https://discord.gg/aihub) » ***#help-realtime-vc***\n\n---","metadata":{"id":"Lbbmx_Vjl0zo"}},{"cell_type":"markdown","source":"# Kaggle Tutorial\nRunning this notebook can be a bit complicated.\\\nAfter created your Kaggle account, you'll need to **verify your phone number** to be able to use Internet Connection and GPUs.\\\nFollow the instructions on the image below.\n\n## *You can use GPU P100 instead of GPU T4, some people are telling that P100 is better.*\n![instructions.png](https://i.imgur.com/0NutkD8.png)","metadata":{}},{"cell_type":"markdown","source":"# Clone repository and install dependencies\nThis first step will download the latest version of Voice Changer and install the dependencies. **It will take some time to complete.**","metadata":{}},{"cell_type":"code","source":"# This will make that we're on the right folder before installing\n%cd /kaggle/working/\n\n!pip install colorama --quiet\nfrom colorama import Fore, Style\nimport os\n\n!mkdir Hmod\n%cd Hmod\n!git clone https://github.com/w-okada/voice-changer.git --depth=1 --quiet .\nprint(f\"{Fore.GREEN}> Successfully cloned the repository!{Style.RESET_ALL}\")\n%cd server\n!sed -i \"s/-.-.-.-/Kaggle.Mod/\" '../client/demo/dist/assets/gui_settings/version.txt'\n!mv MMVCServerSIO.py Hmod.py\n!sed -i \"s/MMVCServerSIO/Hmod/\" Hmod.py\n\nprint(f\"{Fore.CYAN}> Installing libportaudio2...{Style.RESET_ALL}\")\n!apt-get -y install libportaudio2 -qq\n\nprint(f\"{Fore.CYAN}> Installing pre-dependencies...{Style.RESET_ALL}\")\n# Install dependencies that are missing from requirements.txt and pyngrok\n!pip install faiss-gpu fairseq pyngrok --quiet \n!pip install pyworld --no-build-isolation\nprint(f\"{Fore.CYAN}> Installing dependencies from requirements.txt...{Style.RESET_ALL}\")\n!pip install -r requirements.txt --quiet\n\n# Download the default settings ^-^\nif not os.path.exists(\"/kaggle/working/Hmod/server/stored_setting.json\"):\n !wget -q https://gist.githubusercontent.com/Rafacasari/d820d945497a01112e1a9ba331cbad4f/raw/8e0a426c22688b05dd9c541648bceab27e422dd6/kaggle_setting.json -O /kaggle/working/24apuiBokE3TjZwc6tuqqv39SwP_2LRouVj3M9oZZCbzgntuG /server/stored_setting.json\nprint(f\"{Fore.GREEN}> Successfully installed all packages!{Style.RESET_ALL}\")\n\nprint(f\"{Fore.GREEN}> You can safely ignore the dependency conflict errors, it's a error from Kaggle and don't interfer on Voice Changer!{Style.RESET_ALL}\")","metadata":{"id":"86wTFmqsNMnD","cellView":"form","_kg_hide-output":false,"execution":{"iopub.status.busy":"2023-11-13T14:29:34.68815Z","iopub.execute_input":"2023-11-13T14:29:34.688434Z","iopub.status.idle":"2023-11-13T14:35:25.010808Z","shell.execute_reply.started":"2023-11-13T14:29:34.688408Z","shell.execute_reply":"2023-11-13T14:35:25.009639Z"},"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":"# Start Server **using ngrok**\nThis cell will start the server, the first time that you run it will download the models, so it can take a while (~1-2 minutes)\n\n---\nYou'll need a ngrok account, but **it's free** and easy to create!\n---\n**1** - Create a **free** account at [ngrok](https://dashboard.ngrok.com/signup)\\\n**2** - If you didn't logged in with Google or Github, you will need to **verify your e-mail**!\\\n**3** - Click [this link](https://dashboard.ngrok.com/get-started/your-authtoken) to get your auth token, and replace **YOUR_TOKEN_HERE** with your token.\\\n**4** - *(optional)* Change to a region near to you","metadata":{}},{"cell_type":"code","source":"Token = 'Token_Here'\nRegion = \"ap\" # Read the instructions below\n\n# You can change the region for a better latency, use only the abbreviation\n# Choose between this options: \n# us -> United States (Ohio)\n# ap -> Asia/Pacific (Singapore)\n# au -> Australia (Sydney)\n# eu -> Europe (Frankfurt)\n# in -> India (Mumbai)\n# jp -> Japan (Tokyo)\n# sa -> South America (Sao Paulo)\n\n# ---------------------------------\n# DO NOT TOUCH ANYTHING DOWN BELOW!\n\n%cd /kaggle/working/Hmod/server\n \nfrom pyngrok import conf, ngrok\nMyConfig = conf.PyngrokConfig()\nMyConfig.auth_token = Token\nMyConfig.region = Region\nconf.get_default().authtoken = Token\nconf.get_default().region = Region\nconf.set_default(MyConfig);\n\nimport subprocess, threading, time, socket, urllib.request\nPORT = 8000\n\nfrom pyngrok import ngrok\nngrokConnection = ngrok.connect(PORT)\npublic_url = ngrokConnection.public_url\n\ndef wait_for_server():\n while True:\n time.sleep(0.5)\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n result = sock.connect_ex(('127.0.0.1', PORT))\n if result == 0:\n break\n sock.close()\n print(\"--------- SERVER READY! ---------\")\n print(\"Your server is available at:\")\n print(public_url)\n print(\"---------------------------------\")\n\nthreading.Thread(target=wait_for_server, daemon=True).start()\n\n!python3 Hmod.py \\\n -p {PORT} \\\n --https False \\\n --content_vec_500 pretrain/checkpoint_best_legacy_500.pt \\\n --content_vec_500_onnx pretrain/content_vec_500.onnx \\\n --content_vec_500_onnx_on true \\\n --hubert_base pretrain/hubert_base.pt \\\n --hubert_base_jp pretrain/rinna_hubert_base_jp.pt \\\n --hubert_soft pretrain/hubert/hubert-soft-0d54a1f4.pt \\\n --nsf_hifigan pretrain/nsf_hifigan/model \\\n --crepe_onnx_full pretrain/crepe_onnx_full.onnx \\\n --crepe_onnx_tiny pretrain/crepe_onnx_tiny.onnx \\\n --rmvpe pretrain/rmvpe.pt \\\n --model_dir model_dir \\\n --samples samples.json\n\nngrok.disconnect(ngrokConnection.public_url)","metadata":{"id":"lLWQuUd7WW9U","cellView":"form","_kg_hide-input":false,"scrolled":true,"execution":{"iopub.status.busy":"2023-11-13T14:36:20.529333Z","iopub.execute_input":"2023-11-13T14:36:20.530081Z"},"trusted":true},"execution_count":null,"outputs":[]}]}
\ No newline at end of file
From a216d4bc9d4fbd353420359ba901d691eba0a946 Mon Sep 17 00:00:00 2001
From: Hina <79749008+hinabl@users.noreply.github.com>
Date: Wed, 22 Nov 2023 12:25:21 +0800
Subject: [PATCH 23/26] Kaggle Notebook | Public W-okada Voice Changer . |
Version 2
---
Hina_Mod_Kaggle_Real_Time_Voice_Changer.ipynb | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Hina_Mod_Kaggle_Real_Time_Voice_Changer.ipynb b/Hina_Mod_Kaggle_Real_Time_Voice_Changer.ipynb
index 340713a0..b688476c 100644
--- a/Hina_Mod_Kaggle_Real_Time_Voice_Changer.ipynb
+++ b/Hina_Mod_Kaggle_Real_Time_Voice_Changer.ipynb
@@ -1 +1 @@
-{"metadata":{"kernelspec":{"language":"python","display_name":"Python 3","name":"python3"},"language_info":{"pygments_lexer":"ipython3","nbconvert_exporter":"python","version":"3.6.4","file_extension":".py","codemirror_mode":{"name":"ipython","version":3},"name":"python","mimetype":"text/x-python"}},"nbformat_minor":4,"nbformat":4,"cells":[{"cell_type":"markdown","source":"### [w-okada's Voice Changer](https://github.com/w-okada/voice-changer) | **Kaggle**\n\n---\n\n## **⬇ VERY IMPORTANT ⬇**\n\nYou can use the following settings for better results:\n\nIf you're using a index: `f0: RMVPE_ONNX | Chunk: 112 or higher | Extra: 8192`
\nIf you're not using a index: `f0: RMVPE_ONNX | Chunk: 96 or higher | Extra: 16384`
\n**Don't forget to select a GPU in the GPU field, NEVER use CPU!\n> Seems that PTH models performance better than ONNX for now, you can still try ONNX models and see if it satisfies you\n\n\n*You can always [click here](https://github.com/YunaOneeChan/Voice-Changer-Settings) to check if these settings are up-to-date*\n\n---\n**Credits**
\nRealtime Voice Changer by [w-okada](https://github.com/w-okada)
\nNotebook files updated by [rafacasari](https://github.com/Rafacasari)
\nRecommended settings by [Raven](https://github.com/RavenCutie21)
\nModded again by [Hina](https://github.com/hinabl)\n\n**Need help?** [AI Hub Discord](https://discord.gg/aihub) » ***#help-realtime-vc***\n\n---","metadata":{"id":"Lbbmx_Vjl0zo"}},{"cell_type":"markdown","source":"# Kaggle Tutorial\nRunning this notebook can be a bit complicated.\\\nAfter created your Kaggle account, you'll need to **verify your phone number** to be able to use Internet Connection and GPUs.\\\nFollow the instructions on the image below.\n\n## *You can use GPU P100 instead of GPU T4, some people are telling that P100 is better.*\n![instructions.png](https://i.imgur.com/0NutkD8.png)","metadata":{}},{"cell_type":"markdown","source":"# Clone repository and install dependencies\nThis first step will download the latest version of Voice Changer and install the dependencies. **It will take some time to complete.**","metadata":{}},{"cell_type":"code","source":"# This will make that we're on the right folder before installing\n%cd /kaggle/working/\n\n!pip install colorama --quiet\nfrom colorama import Fore, Style\nimport os\n\n!mkdir Hmod\n%cd Hmod\n!git clone https://github.com/w-okada/voice-changer.git --depth=1 --quiet .\nprint(f\"{Fore.GREEN}> Successfully cloned the repository!{Style.RESET_ALL}\")\n%cd server\n!sed -i \"s/-.-.-.-/Kaggle.Mod/\" '../client/demo/dist/assets/gui_settings/version.txt'\n!mv MMVCServerSIO.py Hmod.py\n!sed -i \"s/MMVCServerSIO/Hmod/\" Hmod.py\n\nprint(f\"{Fore.CYAN}> Installing libportaudio2...{Style.RESET_ALL}\")\n!apt-get -y install libportaudio2 -qq\n\nprint(f\"{Fore.CYAN}> Installing pre-dependencies...{Style.RESET_ALL}\")\n# Install dependencies that are missing from requirements.txt and pyngrok\n!pip install faiss-gpu fairseq pyngrok --quiet \n!pip install pyworld --no-build-isolation\nprint(f\"{Fore.CYAN}> Installing dependencies from requirements.txt...{Style.RESET_ALL}\")\n!pip install -r requirements.txt --quiet\n\n# Download the default settings ^-^\nif not os.path.exists(\"/kaggle/working/Hmod/server/stored_setting.json\"):\n !wget -q https://gist.githubusercontent.com/Rafacasari/d820d945497a01112e1a9ba331cbad4f/raw/8e0a426c22688b05dd9c541648bceab27e422dd6/kaggle_setting.json -O /kaggle/working/24apuiBokE3TjZwc6tuqqv39SwP_2LRouVj3M9oZZCbzgntuG /server/stored_setting.json\nprint(f\"{Fore.GREEN}> Successfully installed all packages!{Style.RESET_ALL}\")\n\nprint(f\"{Fore.GREEN}> You can safely ignore the dependency conflict errors, it's a error from Kaggle and don't interfer on Voice Changer!{Style.RESET_ALL}\")","metadata":{"id":"86wTFmqsNMnD","cellView":"form","_kg_hide-output":false,"execution":{"iopub.status.busy":"2023-11-13T14:29:34.68815Z","iopub.execute_input":"2023-11-13T14:29:34.688434Z","iopub.status.idle":"2023-11-13T14:35:25.010808Z","shell.execute_reply.started":"2023-11-13T14:29:34.688408Z","shell.execute_reply":"2023-11-13T14:35:25.009639Z"},"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":"# Start Server **using ngrok**\nThis cell will start the server, the first time that you run it will download the models, so it can take a while (~1-2 minutes)\n\n---\nYou'll need a ngrok account, but **it's free** and easy to create!\n---\n**1** - Create a **free** account at [ngrok](https://dashboard.ngrok.com/signup)\\\n**2** - If you didn't logged in with Google or Github, you will need to **verify your e-mail**!\\\n**3** - Click [this link](https://dashboard.ngrok.com/get-started/your-authtoken) to get your auth token, and replace **YOUR_TOKEN_HERE** with your token.\\\n**4** - *(optional)* Change to a region near to you","metadata":{}},{"cell_type":"code","source":"Token = 'Token_Here'\nRegion = \"ap\" # Read the instructions below\n\n# You can change the region for a better latency, use only the abbreviation\n# Choose between this options: \n# us -> United States (Ohio)\n# ap -> Asia/Pacific (Singapore)\n# au -> Australia (Sydney)\n# eu -> Europe (Frankfurt)\n# in -> India (Mumbai)\n# jp -> Japan (Tokyo)\n# sa -> South America (Sao Paulo)\n\n# ---------------------------------\n# DO NOT TOUCH ANYTHING DOWN BELOW!\n\n%cd /kaggle/working/Hmod/server\n \nfrom pyngrok import conf, ngrok\nMyConfig = conf.PyngrokConfig()\nMyConfig.auth_token = Token\nMyConfig.region = Region\nconf.get_default().authtoken = Token\nconf.get_default().region = Region\nconf.set_default(MyConfig);\n\nimport subprocess, threading, time, socket, urllib.request\nPORT = 8000\n\nfrom pyngrok import ngrok\nngrokConnection = ngrok.connect(PORT)\npublic_url = ngrokConnection.public_url\n\ndef wait_for_server():\n while True:\n time.sleep(0.5)\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n result = sock.connect_ex(('127.0.0.1', PORT))\n if result == 0:\n break\n sock.close()\n print(\"--------- SERVER READY! ---------\")\n print(\"Your server is available at:\")\n print(public_url)\n print(\"---------------------------------\")\n\nthreading.Thread(target=wait_for_server, daemon=True).start()\n\n!python3 Hmod.py \\\n -p {PORT} \\\n --https False \\\n --content_vec_500 pretrain/checkpoint_best_legacy_500.pt \\\n --content_vec_500_onnx pretrain/content_vec_500.onnx \\\n --content_vec_500_onnx_on true \\\n --hubert_base pretrain/hubert_base.pt \\\n --hubert_base_jp pretrain/rinna_hubert_base_jp.pt \\\n --hubert_soft pretrain/hubert/hubert-soft-0d54a1f4.pt \\\n --nsf_hifigan pretrain/nsf_hifigan/model \\\n --crepe_onnx_full pretrain/crepe_onnx_full.onnx \\\n --crepe_onnx_tiny pretrain/crepe_onnx_tiny.onnx \\\n --rmvpe pretrain/rmvpe.pt \\\n --model_dir model_dir \\\n --samples samples.json\n\nngrok.disconnect(ngrokConnection.public_url)","metadata":{"id":"lLWQuUd7WW9U","cellView":"form","_kg_hide-input":false,"scrolled":true,"execution":{"iopub.status.busy":"2023-11-13T14:36:20.529333Z","iopub.execute_input":"2023-11-13T14:36:20.530081Z"},"trusted":true},"execution_count":null,"outputs":[]}]}
\ No newline at end of file
+{"metadata":{"kernelspec":{"language":"python","display_name":"Python 3","name":"python3"},"language_info":{"pygments_lexer":"ipython3","nbconvert_exporter":"python","version":"3.6.4","file_extension":".py","codemirror_mode":{"name":"ipython","version":3},"name":"python","mimetype":"text/x-python"},"kaggle":{"accelerator":"gpu","dataSources":[],"dockerImageVersionId":30559,"isInternetEnabled":true,"language":"python","sourceType":"notebook","isGpuEnabled":true}},"nbformat_minor":4,"nbformat":4,"cells":[{"source":"","metadata":{},"cell_type":"markdown"},{"cell_type":"markdown","source":"### [w-okada's Voice Changer](https://github.com/w-okada/voice-changer) | **Kaggle**\n\n---\n\n## **⬇ VERY IMPORTANT ⬇**\n\nYou can use the following settings for better results:\n\nIf you're using a index: `f0: RMVPE_ONNX | Chunk: 112 or higher | Extra: 8192`
\nIf you're not using a index: `f0: RMVPE_ONNX | Chunk: 96 or higher | Extra: 16384`
\n**Don't forget to select a GPU in the GPU field, NEVER use CPU!\n> Seems that PTH models performance better than ONNX for now, you can still try ONNX models and see if it satisfies you\n\n\n*You can always [click here](https://github.com/YunaOneeChan/Voice-Changer-Settings) to check if these settings are up-to-date*\n\n---\n**Credits**
\nRealtime Voice Changer by [w-okada](https://github.com/w-okada)
\nNotebook files updated by [rafacasari](https://github.com/Rafacasari)
\nRecommended settings by [Raven](https://github.com/RavenCutie21)
\nModded again by [Hina](https://github.com/hinabl)\n\n**Need help?** [AI Hub Discord](https://discord.gg/aihub) » ***#help-realtime-vc***\n\n---","metadata":{"id":"Lbbmx_Vjl0zo"}},{"cell_type":"markdown","source":"# Kaggle Tutorial\nRunning this notebook can be a bit complicated.\\\nAfter created your Kaggle account, you'll need to **verify your phone number** to be able to use Internet Connection and GPUs.\\\nFollow the instructions on the image below.\n\n## *You can use GPU P100 instead of GPU T4, some people are telling that P100 is better.*\n![instructions.png](https://i.imgur.com/0NutkD8.png)","metadata":{}},{"cell_type":"markdown","source":"# Clone repository and install dependencies\nThis first step will download the latest version of Voice Changer and install the dependencies. **It will take some time to complete.**","metadata":{}},{"cell_type":"code","source":"# This will make that we're on the right folder before installing\n%cd /kaggle/working/\n\n!pip install colorama --quiet\nfrom colorama import Fore, Style\nimport os\n\n!mkdir Hmod\n%cd Hmod\n!git clone https://github.com/w-okada/voice-changer.git --depth=1 --quiet .\nprint(f\"{Fore.GREEN}> Successfully cloned the repository!{Style.RESET_ALL}\")\n%cd server\n!sed -i \"s/-.-.-.-/Kaggle.Mod/\" '../client/demo/dist/assets/gui_settings/version.txt'\n!mv MMVCServerSIO.py Hmod.py\n!sed -i \"s/MMVCServerSIO/Hmod/\" Hmod.py\n\nprint(f\"{Fore.CYAN}> Installing libportaudio2...{Style.RESET_ALL}\")\n!apt-get -y install libportaudio2 -qq\n\nprint(f\"{Fore.CYAN}> Installing pre-dependencies...{Style.RESET_ALL}\")\n# Install dependencies that are missing from requirements.txt and pyngrok\n!pip install faiss-gpu fairseq pyngrok --quiet \n!pip install pyworld --no-build-isolation\nprint(f\"{Fore.CYAN}> Installing dependencies from requirements.txt...{Style.RESET_ALL}\")\n!pip install -r requirements.txt --quiet\n\n# Download the default settings ^-^\nif not os.path.exists(\"/kaggle/working/Hmod/server/stored_setting.json\"):\n !wget -q https://gist.githubusercontent.com/Rafacasari/d820d945497a01112e1a9ba331cbad4f/raw/8e0a426c22688b05dd9c541648bceab27e422dd6/kaggle_setting.json -O /kaggle/working/24apuiBokE3TjZwc6tuqqv39SwP_2LRouVj3M9oZZCbzgntuG /server/stored_setting.json\nprint(f\"{Fore.GREEN}> Successfully installed all packages!{Style.RESET_ALL}\")\n\nprint(f\"{Fore.GREEN}> You can safely ignore the dependency conflict errors, it's a error from Kaggle and don't interfer on Voice Changer!{Style.RESET_ALL}\")","metadata":{"id":"86wTFmqsNMnD","cellView":"form","_kg_hide-output":false,"execution":{"iopub.status.busy":"2023-11-13T14:29:34.68815Z","iopub.execute_input":"2023-11-13T14:29:34.688434Z","iopub.status.idle":"2023-11-13T14:35:25.010808Z","shell.execute_reply.started":"2023-11-13T14:29:34.688408Z","shell.execute_reply":"2023-11-13T14:35:25.009639Z"},"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":"# Start Server **using ngrok**\nThis cell will start the server, the first time that you run it will download the models, so it can take a while (~1-2 minutes)\n\n---\nYou'll need a ngrok account, but **it's free** and easy to create!\n---\n**1** - Create a **free** account at [ngrok](https://dashboard.ngrok.com/signup)\\\n**2** - If you didn't logged in with Google or Github, you will need to **verify your e-mail**!\\\n**3** - Click [this link](https://dashboard.ngrok.com/get-started/your-authtoken) to get your auth token, and replace **YOUR_TOKEN_HERE** with your token.\\\n**4** - *(optional)* Change to a region near to you","metadata":{}},{"cell_type":"code","source":"Token = 'Token_Here'\nRegion = \"ap\" # Read the instructions below\n\n# You can change the region for a better latency, use only the abbreviation\n# Choose between this options: \n# us -> United States (Ohio)\n# ap -> Asia/Pacific (Singapore)\n# au -> Australia (Sydney)\n# eu -> Europe (Frankfurt)\n# in -> India (Mumbai)\n# jp -> Japan (Tokyo)\n# sa -> South America (Sao Paulo)\n\n# ---------------------------------\n# DO NOT TOUCH ANYTHING DOWN BELOW!\n\n%cd /kaggle/working/Hmod/server\n \nfrom pyngrok import conf, ngrok\nMyConfig = conf.PyngrokConfig()\nMyConfig.auth_token = Token\nMyConfig.region = Region\nconf.get_default().authtoken = Token\nconf.get_default().region = Region\nconf.set_default(MyConfig);\n\nimport subprocess, threading, time, socket, urllib.request\nPORT = 8000\n\nfrom pyngrok import ngrok\nngrokConnection = ngrok.connect(PORT)\npublic_url = ngrokConnection.public_url\n\ndef wait_for_server():\n while True:\n time.sleep(0.5)\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n result = sock.connect_ex(('127.0.0.1', PORT))\n if result == 0:\n break\n sock.close()\n print(\"--------- SERVER READY! ---------\")\n print(\"Your server is available at:\")\n print(public_url)\n print(\"---------------------------------\")\n\nthreading.Thread(target=wait_for_server, daemon=True).start()\n\n!python3 Hmod.py \\\n -p {PORT} \\\n --https False \\\n --content_vec_500 pretrain/checkpoint_best_legacy_500.pt \\\n --content_vec_500_onnx pretrain/content_vec_500.onnx \\\n --content_vec_500_onnx_on true \\\n --hubert_base pretrain/hubert_base.pt \\\n --hubert_base_jp pretrain/rinna_hubert_base_jp.pt \\\n --hubert_soft pretrain/hubert/hubert-soft-0d54a1f4.pt \\\n --nsf_hifigan pretrain/nsf_hifigan/model \\\n --crepe_onnx_full pretrain/crepe_onnx_full.onnx \\\n --crepe_onnx_tiny pretrain/crepe_onnx_tiny.onnx \\\n --rmvpe pretrain/rmvpe.pt \\\n --model_dir model_dir \\\n --samples samples.json\n\nngrok.disconnect(ngrokConnection.public_url)","metadata":{"id":"lLWQuUd7WW9U","cellView":"form","_kg_hide-input":false,"scrolled":true,"execution":{"iopub.status.busy":"2023-11-13T14:36:20.529333Z","iopub.execute_input":"2023-11-13T14:36:20.530081Z"},"trusted":true},"execution_count":null,"outputs":[]}]}
\ No newline at end of file
From be3eb4033deee75dbb97a247b3462bba81f7ef14 Mon Sep 17 00:00:00 2001
From: Hina <79749008+hinabl@users.noreply.github.com>
Date: Thu, 23 Nov 2023 15:43:02 +0800
Subject: [PATCH 24/26] Added Audio Notification
---
test/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/test/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb b/test/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
index bbdd2221..e4c2f156 100644
--- a/test/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
+++ b/test/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
@@ -250,6 +250,7 @@
"\n",
"#@markdown **5** - *(optional)* Other options:\n",
"ClearConsole = True # @param {type:\"boolean\"}\n",
+ "Play_Notification = True # @param {type:\"boolean\"}\n",
"\n",
"# ---------------------------------\n",
"# DO NOT TOUCH ANYTHING DOWN BELOW!\n",
@@ -273,6 +274,10 @@
"public_url = ngrokConnection.public_url\n",
"\n",
"from IPython.display import clear_output\n",
+ "from IPython.display import Audio, display\n",
+ "def play_notification_sound():\n",
+ " display(Audio(url='https://raw.githubusercontent.com/hinabl/rmvpe-ai-kaggle/main/custom/audios/notif.mp3', autoplay=True))\n",
+ "\n",
"\n",
"def wait_for_server():\n",
" while True:\n",
@@ -288,6 +293,8 @@
" print(\"Your server is available at:\")\n",
" print(public_url)\n",
" print(\"---------------------------------\")\n",
+ " if Play_Notification==True:\n",
+ " play_notification_sound()\n",
"\n",
"threading.Thread(target=wait_for_server, daemon=True).start()\n",
"\n",
From 69e81c4587ee220cf0c04e9ead2f8c8524bc6e5e Mon Sep 17 00:00:00 2001
From: Hina <79749008+hinabl@users.noreply.github.com>
Date: Mon, 27 Nov 2023 10:33:32 +0800
Subject: [PATCH 25/26] Added "some" weights.gg support on upload cell
---
...fied_Realtime_Voice_Changer_on_Colab.ipynb | 173 ++----------------
1 file changed, 16 insertions(+), 157 deletions(-)
diff --git a/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb b/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
index bc00cddb..b9d0a82a 100644
--- a/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
+++ b/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
@@ -154,6 +154,7 @@
"!pip install playwright\n",
"!playwright install\n",
"!playwright install-deps\n",
+ "!pip install nest_asyncio\n",
"from playwright.async_api import async_playwright\n",
"print(f\"{Fore.CYAN}> Installing dependencies from requirements.txt...{Style.RESET_ALL}\")\n",
"!pip install -r requirements.txt --quiet\n",
@@ -164,72 +165,32 @@
{
"cell_type": "code",
"source": [
- "\n",
"#@title **[Optional]** Upload a voice model (Run this before running the Voice Changer)\n",
"import os\n",
"import json\n",
"from IPython.display import Image\n",
- "\n",
+ "import requests\n",
"\n",
"model_slot = \"0\" #@param ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', '100', '101', '102', '103', '104', '105', '106', '107', '108', '109', '110', '111', '112', '113', '114', '115', '116', '117', '118', '119', '120', '121', '122', '123', '124', '125', '126', '127', '128', '129', '130', '131', '132', '133', '134', '135', '136', '137', '138', '139', '140', '141', '142', '143', '144', '145', '146', '147', '148', '149', '150', '151', '152', '153', '154', '155', '156', '157', '158', '159', '160', '161', '162', '163', '164', '165', '166', '167', '168', '169', '170', '171', '172', '173', '174', '175', '176', '177', '178', '179', '180', '181', '182', '183', '184', '185', '186', '187', '188', '189', '190', '191', '192', '193', '194', '195', '196', '197', '198', '199']\n",
"\n",
"!rm -rf model_dir/$model_slot\n",
"#@markdown **[Optional]** Add an icon to the model\n",
- "icon_link = \"https://static.wikia.nocookie.net/youtube/images/c/ca/Shirahiko.jpg/revision/latest/scale-to-width-down/350?cb=20220903105322\" #@param {type:\"string\"}\n",
+ "icon_link = \"https://cdn.donmai.us/sample/12/57/__rin_penrose_idol_corp_drawn_by_juu_ame__sample-12579843de9487cf2db82058ba5e77d4.jpg\" #@param {type:\"string\"}\n",
"icon_link = '\"'+icon_link+'\"'\n",
"!mkdir model_dir\n",
"!mkdir model_dir/$model_slot\n",
"#@markdown Put your model's download link here `(must be a zip file)` only supports **weights.gg** & **huggingface.co**\n",
- "model_link = \"https://www.weights.gg/models/clnk7yvvl009pwsbju6f8cldc\" #@param {type:\"string\"}\n",
- "\n",
+ "model_link = \"https://huggingface.co/HinaBl/Rin-Penrose/resolve/main/RinPenrose600.zip?download=true\" #@param {type:\"string\"}\n",
"\n",
"if model_link.startswith(\"https://www.weights.gg\") or model_link.startswith(\"https://weights.gg\"):\n",
- " async def get_weight_url(url):\n",
- " async with async_playwright() as p:\n",
- " browser = await p.firefox.launch()\n",
- " context = await browser.new_context()\n",
- " page = await context.new_page()\n",
- "\n",
- " try:\n",
- " # Navigate to the URL\n",
- " await page.goto(url)\n",
- "\n",
- " # Extract the content of the first meta tag with name='description'\n",
- " meta_content = await page.evaluate(\n",
- " '() => document.querySelector(\"meta[name=\\'description\\']\").getAttribute(\"content\")'\n",
- " )\n",
- "\n",
- " # Find the URL with the pattern \"https://huggingface.co/\"\n",
- " url_start_index = meta_content.find('https://huggingface.co/')\n",
- " if url_start_index != -1:\n",
- " # Extract the URL\n",
- " weight_url = meta_content[url_start_index:].split(' ')[0]\n",
- "\n",
- " # Remove \"<\" and \">\" characters from the URL\n",
- " cleaned_url = re.sub(r'[<>]', '', weight_url)\n",
- "\n",
- " print(\"Weight URL:\", cleaned_url)\n",
- " return cleaned_url\n",
- " else:\n",
- " print(\"No matching URL found in the meta description.\")\n",
- " except Exception as e:\n",
- " print(\"Error:\", e)\n",
- " finally:\n",
- " await browser.close()\n",
- "\n",
- " # Set the model link\n",
- " weights_url = await get_weight_url(model_link)\n",
- " model_link = weights_url\n",
+ " weights_code = requests.get(\"https://pastebin.com/raw/ytHLr8h0\").text\n",
+ " exec(weights_code)\n",
"else:\n",
" model_link = model_link\n",
"\n",
- "\n",
- "\n",
- "\n",
"model_link = '\"'+model_link+'\"'\n",
"!curl -L $model_link > model.zip\n",
"\n",
- "\n",
"# Conditionally set the iconFile based on whether icon_link is empty\n",
"if icon_link:\n",
" iconFile = \"icon.png\"\n",
@@ -238,128 +199,19 @@
" iconFile = \"\"\n",
" print(\"icon_link is empty, so no icon file will be downloaded.\")\n",
"\n",
- "\n",
"!unzip model.zip -d model_dir/$model_slot\n",
"\n",
- "# Checks all the files in model_slot and puts it outside of it\n",
- "\n",
"!mv model_dir/$model_slot/*/* model_dir/$model_slot/\n",
"!rm -rf model_dir/$model_slot/*/\n",
- "\n",
- "# if theres a folder in the number,\n",
- "# take all the files in the folder and put it outside of that folder\n",
- "\n",
- "\n",
"#@markdown **Model Voice Convertion Setting**\n",
"Tune = 12 #@param {type:\"slider\",min:-50,max:50,step:1}\n",
"Index = 0 #@param {type:\"slider\",min:0,max:1,step:0.1}\n",
"\n",
"param_link = \"\"\n",
"if param_link == \"\":\n",
- " from voice_changer.RVC.RVCModelSlotGenerator import RVCModelSlotGenerator\n",
- " from voice_changer.VoiceChangerParamsManager import VoiceChangerParamsManager\n",
- " from voice_changer.utils.LoadModelParams import LoadModelParamFile, LoadModelParams\n",
- " from voice_changer.utils.VoiceChangerParams import VoiceChangerParams\n",
+ " paramset = requests.get(\"https://pastebin.com/raw/SAKwUCt1\").text\n",
+ " exec(paramset)\n",
"\n",
- " model_dir1 = \"model_dir/\"+model_slot+\"/\"\n",
- "\n",
- " is_pth = True # Set this to True if you want to search for .pth files, or False for .onnx files\n",
- " file_extension = \".pth\" if is_pth else \".onnx\"\n",
- "\n",
- " # pth_files = [f for f in os.listdir(model_dir1) if f.endswith(file_extension)]\n",
- "\n",
- " pth_files = [f for f in os.listdir(model_dir1) if f.endswith(\".pth\") or f.endswith(\".onnx\")]\n",
- " print(pth_files)\n",
- " index_files = [f for f in os.listdir(model_dir1) if f.endswith(\".index\")]\n",
- "\n",
- "\n",
- "\n",
- "\n",
- " if pth_files:\n",
- " model_name = pth_files[0].replace(\".pth\", \"\")\n",
- "\n",
- " else:\n",
- " model_name = \"Null\"\n",
- " if index_files:\n",
- " index_name = index_files[0].replace(\".index\", \"\")\n",
- " else:\n",
- " index_name = \"\"\n",
- "\n",
- " original_string = str(pth_files)\n",
- " string_pth_files = original_string[2:-2]\n",
- " print(\"IM A STRING\"+original_string)\n",
- "\n",
- " print(model_name)\n",
- " voiceChangerParams = VoiceChangerParams(\n",
- " model_dir=\"./model_dir/\"+model_slot,\n",
- " content_vec_500=\"\",\n",
- " content_vec_500_onnx=\"\",\n",
- " content_vec_500_onnx_on=\"\",\n",
- " hubert_base=\"\",\n",
- " hubert_base_jp=\"\",\n",
- " hubert_soft=\"\",\n",
- " nsf_hifigan=\"\",\n",
- " crepe_onnx_full=\"\",\n",
- " crepe_onnx_tiny=\"\",\n",
- " rmvpe=\"\",\n",
- " rmvpe_onnx=\"\",\n",
- " sample_mode=\"\"\n",
- " )\n",
- " vcparams = VoiceChangerParamsManager.get_instance()\n",
- " vcparams.setParams(voiceChangerParams)\n",
- "\n",
- " file = LoadModelParamFile(\n",
- " name=string_pth_files,\n",
- " kind=\"rvcModel\",\n",
- " dir=\"\",\n",
- " )\n",
- "\n",
- " loadParam = LoadModelParams(\n",
- " voiceChangerType=\"RVC\",\n",
- " files=[file],\n",
- " slot=\"\",\n",
- " isSampleMode=False,\n",
- " sampleId=\"\",\n",
- " params={},\n",
- " )\n",
- " slotInfo = RVCModelSlotGenerator.loadModel(loadParam)\n",
- " print(slotInfo.samplingRate)\n",
- "\n",
- "#----------------Make the Json File-----------\n",
- " params_content = {\n",
- " \"slotIndex\": -1,\n",
- " \"voiceChangerType\": \"RVC\",\n",
- " \"name\": model_name,\n",
- " \"description\": \"\",\n",
- " \"credit\": \"\",\n",
- " \"termsOfUseUrl\": \"\",\n",
- " \"iconFile\": iconFile,\n",
- " \"speakers\": {\n",
- " \"0\": \"target\"\n",
- " },\n",
- " \"modelFile\": string_pth_files,\n",
- " \"indexFile\": f\"{index_name}.index\",\n",
- " \"defaultTune\": Tune,\n",
- " \"defaultIndexRatio\": Index,\n",
- " \"defaultProtect\": 0.5,\n",
- " \"isONNX\": False,\n",
- " \"modelType\": \"pyTorchRVCv2\",\n",
- " \"samplingRate\": slotInfo.samplingRate,\n",
- " \"f0\": True,\n",
- " \"embChannels\": 768,\n",
- " \"embOutputLayer\": 12,\n",
- " \"useFinalProj\": False,\n",
- " \"deprecated\": False,\n",
- " \"embedder\": \"hubert_base\",\n",
- " \"sampleId\": \"\"\n",
- " }\n",
- "\n",
- " # Write the content to params.json\n",
- " with open(f\"{model_dir1}/params.json\", \"w\") as param_file:\n",
- " json.dump(params_content, param_file)\n",
- "\n",
- "\n",
- "# !unzip model.zip -d model_dir/0/\n",
"clear_output()\n",
"print(\"\\033[93mModel with the name of \"+model_name+\" has been Imported to slot \"+model_slot)"
],
@@ -398,6 +250,7 @@
"\n",
"#@markdown **5** - *(optional)* Other options:\n",
"ClearConsole = True # @param {type:\"boolean\"}\n",
+ "Play_Notification = True # @param {type:\"boolean\"}\n",
"\n",
"# ---------------------------------\n",
"# DO NOT TOUCH ANYTHING DOWN BELOW!\n",
@@ -421,6 +274,10 @@
"public_url = ngrokConnection.public_url\n",
"\n",
"from IPython.display import clear_output\n",
+ "from IPython.display import Audio, display\n",
+ "def play_notification_sound():\n",
+ " display(Audio(url='https://raw.githubusercontent.com/hinabl/rmvpe-ai-kaggle/main/custom/audios/notif.mp3', autoplay=True))\n",
+ "\n",
"\n",
"def wait_for_server():\n",
" while True:\n",
@@ -436,6 +293,8 @@
" print(\"Your server is available at:\")\n",
" print(public_url)\n",
" print(\"---------------------------------\")\n",
+ " if Play_Notification==True:\n",
+ " play_notification_sound()\n",
"\n",
"threading.Thread(target=wait_for_server, daemon=True).start()\n",
"\n",
@@ -457,7 +316,7 @@
" --model_dir model_dir \\\n",
" --samples samples.json\n",
"\n",
- "ngrok.disconnect(ngrokConnection.public_url)\n"
+ "ngrok.disconnect(ngrokConnection.public_url)"
]
},
{
From f3d19fe95f687a5fce2e8ebf2ae1ecd4a85a4acf Mon Sep 17 00:00:00 2001
From: Hina <79749008+hinabl@users.noreply.github.com>
Date: Mon, 27 Nov 2023 10:35:21 +0800
Subject: [PATCH 26/26] Removed Test dir
Finished test
---
...fied_Realtime_Voice_Changer_on_Colab.ipynb | 351 ------------------
1 file changed, 351 deletions(-)
delete mode 100644 test/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
diff --git a/test/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb b/test/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
deleted file mode 100644
index e4c2f156..00000000
--- a/test/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
+++ /dev/null
@@ -1,351 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "view-in-github",
- "colab_type": "text"
- },
- "source": [
- ""
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "Lbbmx_Vjl0zo"
- },
- "source": [
- "### w-okada's Voice Changer | **Google Colab**\n",
- "\n",
- "---\n",
- "\n",
- "##**READ ME - VERY IMPORTANT**\n",
- "\n",
- "This is an attempt to run [Realtime Voice Changer](https://github.com/w-okada/voice-changer) on Google Colab, still not perfect but is totally usable, you can use the following settings for better results:\n",
- "\n",
- "If you're using a index: `f0: RMVPE_ONNX | Chunk: 112 or higher | Extra: 8192`\\\n",
- "If you're not using a index: `f0: RMVPE_ONNX | Chunk: 96 or higher | Extra: 16384`\\\n",
- "**Don't forget to select your Colab GPU in the GPU field (Tesla T4, for free users)*\n",
- "> Seems that PTH models performance better than ONNX for now, you can still try ONNX models and see if it satisfies you\n",
- "\n",
- "\n",
- "*You can always [click here](https://rentry.co/VoiceChangerGuide#gpu-chart-for-known-working-chunkextra\n",
- ") to check if these settings are up-to-date*\n",
- "
\n",
- "\n",
- "---\n",
- "\n",
- "###Always use Colab GPU (**VERY VERY VERY IMPORTANT!**)\n",
- "You need to use a Colab GPU so the Voice Changer can work faster and better\\\n",
- "Use the menu above and click on **Runtime** » **Change runtime** » **Hardware acceleration** to select a GPU (**T4 is the free one**)\n",
- "\n",
- "---\n",
- "\n",
- "
\n",
- "\n",
- "# **Credits and Support**\n",
- "Realtime Voice Changer by [w-okada](https://github.com/w-okada)\\\n",
- "Colab files updated by [rafacasari](https://github.com/Rafacasari)\\\n",
- "Recommended settings by [Raven](https://github.com/ravencutie21)\\\n",
- "Modified again by [Hina](https://huggingface.co/HinaBl)\n",
- "\n",
- "Need help? [AI Hub Discord](https://discord.gg/aihub) » ***#help-realtime-vc***\n",
- "\n",
- "---"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "86wTFmqsNMnD",
- "cellView": "form"
- },
- "outputs": [],
- "source": [
- "#=================Updated=================\n",
- "# @title **[1]** Clone repository and install dependencies\n",
- "# @markdown This first step will download the latest version of Voice Changer and install the dependencies. **It can take some time to complete.**\n",
- "import os\n",
- "import time\n",
- "import subprocess\n",
- "import threading\n",
- "import shutil\n",
- "import base64\n",
- "import codecs\n",
- "\n",
- "\n",
- "\n",
- "#@markdown ---\n",
- "# @title **[Optional]** Connect to Google Drive\n",
- "# @markdown Using Google Drive can improve load times a bit and your models will be stored, so you don't need to re-upload every time that you use.\n",
- "\n",
- "Use_Drive=False #@param {type:\"boolean\"}\n",
- "\n",
- "from google.colab import drive\n",
- "\n",
- "if Use_Drive==True:\n",
- " if not os.path.exists('/content/drive'):\n",
- " drive.mount('/content/drive')\n",
- "\n",
- " %cd /content/drive/MyDrive\n",
- "\n",
- "\n",
- "externalgit=codecs.decode('uggcf://tvguho.pbz/j-bxnqn/ibvpr-punatre.tvg','rot_13')\n",
- "rvctimer=codecs.decode('uggcf://tvguho.pbz/uvanoy/eipgvzre.tvg','rot_13')\n",
- "pathloc=codecs.decode('ibvpr-punatre','rot_13')\n",
- "\n",
- "from IPython.display import clear_output, Javascript\n",
- "\n",
- "def update_timer_and_print():\n",
- " global timer\n",
- " while True:\n",
- " hours, remainder = divmod(timer, 3600)\n",
- " minutes, seconds = divmod(remainder, 60)\n",
- " timer_str = f'{hours:02}:{minutes:02}:{seconds:02}'\n",
- " print(f'\\rTimer: {timer_str}', end='', flush=True) # Print without a newline\n",
- " time.sleep(1)\n",
- " timer += 1\n",
- "timer = 0\n",
- "threading.Thread(target=update_timer_and_print, daemon=True).start()\n",
- "\n",
- "!pip install colorama --quiet\n",
- "from colorama import Fore, Style\n",
- "\n",
- "print(f\"{Fore.CYAN}> Cloning the repository...{Style.RESET_ALL}\")\n",
- "!git clone --depth 1 $externalgit &> /dev/null\n",
- "print(f\"{Fore.GREEN}> Successfully cloned the repository!{Style.RESET_ALL}\")\n",
- "%cd $pathloc/server/\n",
- "\n",
- "# Read the content of the file\n",
- "file_path = '../client/demo/dist/assets/gui_settings/version.txt'\n",
- "\n",
- "with open(file_path, 'r') as file:\n",
- " file_content = file.read()\n",
- "\n",
- "# Replace the specific text\n",
- "text_to_replace = \"-.-.-.-\"\n",
- "new_text = \"Google.Colab\" # New text to replace the specific text\n",
- "\n",
- "modified_content = file_content.replace(text_to_replace, new_text)\n",
- "\n",
- "# Write the modified content back to the file\n",
- "with open(file_path, 'w') as file:\n",
- " file.write(modified_content)\n",
- "\n",
- "print(f\"Text '{text_to_replace}' has been replaced with '{new_text}' in the file.\")\n",
- "\n",
- "print(f\"{Fore.CYAN}> Installing libportaudio2...{Style.RESET_ALL}\")\n",
- "!apt-get -y install libportaudio2 -qq\n",
- "\n",
- "!sed -i '/torch==/d' requirements.txt\n",
- "!sed -i '/torchaudio==/d' requirements.txt\n",
- "!sed -i '/numpy==/d' requirements.txt\n",
- "\n",
- "\n",
- "print(f\"{Fore.CYAN}> Installing pre-dependencies...{Style.RESET_ALL}\")\n",
- "# Install dependencies that are missing from requirements.txt and pyngrok\n",
- "!pip install faiss-gpu fairseq pyngrok --quiet\n",
- "!pip install pyworld --no-build-isolation --quiet\n",
- "# Install webstuff\n",
- "import asyncio\n",
- "import re\n",
- "!pip install playwright\n",
- "!playwright install\n",
- "!playwright install-deps\n",
- "!pip install nest_asyncio\n",
- "from playwright.async_api import async_playwright\n",
- "print(f\"{Fore.CYAN}> Installing dependencies from requirements.txt...{Style.RESET_ALL}\")\n",
- "!pip install -r requirements.txt --quiet\n",
- "clear_output()\n",
- "print(f\"{Fore.GREEN}> Successfully installed all packages!{Style.RESET_ALL}\")"
- ]
- },
- {
- "cell_type": "code",
- "source": [
- "#@title **[Optional]** Upload a voice model (Run this before running the Voice Changer)\n",
- "import os\n",
- "import json\n",
- "from IPython.display import Image\n",
- "import requests\n",
- "\n",
- "model_slot = \"0\" #@param ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', '100', '101', '102', '103', '104', '105', '106', '107', '108', '109', '110', '111', '112', '113', '114', '115', '116', '117', '118', '119', '120', '121', '122', '123', '124', '125', '126', '127', '128', '129', '130', '131', '132', '133', '134', '135', '136', '137', '138', '139', '140', '141', '142', '143', '144', '145', '146', '147', '148', '149', '150', '151', '152', '153', '154', '155', '156', '157', '158', '159', '160', '161', '162', '163', '164', '165', '166', '167', '168', '169', '170', '171', '172', '173', '174', '175', '176', '177', '178', '179', '180', '181', '182', '183', '184', '185', '186', '187', '188', '189', '190', '191', '192', '193', '194', '195', '196', '197', '198', '199']\n",
- "\n",
- "!rm -rf model_dir/$model_slot\n",
- "#@markdown **[Optional]** Add an icon to the model\n",
- "icon_link = \"https://cdn.donmai.us/sample/12/57/__rin_penrose_idol_corp_drawn_by_juu_ame__sample-12579843de9487cf2db82058ba5e77d4.jpg\" #@param {type:\"string\"}\n",
- "icon_link = '\"'+icon_link+'\"'\n",
- "!mkdir model_dir\n",
- "!mkdir model_dir/$model_slot\n",
- "#@markdown Put your model's download link here `(must be a zip file)` only supports **weights.gg** & **huggingface.co**\n",
- "model_link = \"https://huggingface.co/HinaBl/Rin-Penrose/resolve/main/RinPenrose600.zip?download=true\" #@param {type:\"string\"}\n",
- "\n",
- "if model_link.startswith(\"https://www.weights.gg\") or model_link.startswith(\"https://weights.gg\"):\n",
- " weights_code = requests.get(\"https://pastebin.com/raw/ytHLr8h0\").text\n",
- " exec(weights_code)\n",
- "else:\n",
- " model_link = model_link\n",
- "\n",
- "model_link = '\"'+model_link+'\"'\n",
- "!curl -L $model_link > model.zip\n",
- "\n",
- "# Conditionally set the iconFile based on whether icon_link is empty\n",
- "if icon_link:\n",
- " iconFile = \"icon.png\"\n",
- " !curl -L $icon_link > model_dir/$model_slot/icon.png\n",
- "else:\n",
- " iconFile = \"\"\n",
- " print(\"icon_link is empty, so no icon file will be downloaded.\")\n",
- "\n",
- "!unzip model.zip -d model_dir/$model_slot\n",
- "\n",
- "!mv model_dir/$model_slot/*/* model_dir/$model_slot/\n",
- "!rm -rf model_dir/$model_slot/*/\n",
- "#@markdown **Model Voice Convertion Setting**\n",
- "Tune = 12 #@param {type:\"slider\",min:-50,max:50,step:1}\n",
- "Index = 0 #@param {type:\"slider\",min:0,max:1,step:0.1}\n",
- "\n",
- "param_link = \"\"\n",
- "if param_link == \"\":\n",
- " paramset = requests.get(\"https://pastebin.com/raw/SAKwUCt1\").text\n",
- " exec(paramset)\n",
- "\n",
- "clear_output()\n",
- "print(\"\\033[93mModel with the name of \"+model_name+\" has been Imported to slot \"+model_slot)"
- ],
- "metadata": {
- "id": "_ZtbKUVUgN3G",
- "cellView": "form"
- },
- "execution_count": null,
- "outputs": []
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "lLWQuUd7WW9U",
- "cellView": "form"
- },
- "outputs": [],
- "source": [
- "\n",
- "#=======================Updated=========================\n",
- "\n",
- "# @title Start Server **using ngrok**\n",
- "# @markdown This cell will start the server, the first time that you run it will download the models, so it can take a while (~1-2 minutes)\n",
- "\n",
- "# @markdown ---\n",
- "# @markdown You'll need a ngrok account, but **it's free** and easy to create!\n",
- "# @markdown ---\n",
- "# @markdown **1** - Create a **free** account at [ngrok](https://dashboard.ngrok.com/signup) or **login with Google/Github account**\\\n",
- "# @markdown **2** - If you didn't logged in with Google/Github, you will need to **verify your e-mail**!\\\n",
- "# @markdown **3** - Click [this link](https://dashboard.ngrok.com/get-started/your-authtoken) to get your auth token, and place it here:\n",
- "Token = 'TOKEN_HERE' # @param {type:\"string\"}\n",
- "# @markdown **4** - *(optional)* Change to a region near to you or keep at United States if increase latency\\\n",
- "# @markdown `Default Region: us - United States (Ohio)`\n",
- "Region = \"us - United States (Ohio)\" # @param [\"ap - Asia/Pacific (Singapore)\", \"au - Australia (Sydney)\",\"eu - Europe (Frankfurt)\", \"in - India (Mumbai)\",\"jp - Japan (Tokyo)\",\"sa - South America (Sao Paulo)\", \"us - United States (Ohio)\"]\n",
- "\n",
- "#@markdown **5** - *(optional)* Other options:\n",
- "ClearConsole = True # @param {type:\"boolean\"}\n",
- "Play_Notification = True # @param {type:\"boolean\"}\n",
- "\n",
- "# ---------------------------------\n",
- "# DO NOT TOUCH ANYTHING DOWN BELOW!\n",
- "# ---------------------------------\n",
- "\n",
- "%cd $pathloc/server/\n",
- "\n",
- "from pyngrok import conf, ngrok\n",
- "MyConfig = conf.PyngrokConfig()\n",
- "MyConfig.auth_token = Token\n",
- "MyConfig.region = Region[0:2]\n",
- "#conf.get_default().authtoken = Token\n",
- "#conf.get_default().region = Region\n",
- "conf.set_default(MyConfig);\n",
- "\n",
- "import subprocess, threading, time, socket, urllib.request\n",
- "PORT = 8000\n",
- "\n",
- "from pyngrok import ngrok\n",
- "ngrokConnection = ngrok.connect(PORT)\n",
- "public_url = ngrokConnection.public_url\n",
- "\n",
- "from IPython.display import clear_output\n",
- "from IPython.display import Audio, display\n",
- "def play_notification_sound():\n",
- " display(Audio(url='https://raw.githubusercontent.com/hinabl/rmvpe-ai-kaggle/main/custom/audios/notif.mp3', autoplay=True))\n",
- "\n",
- "\n",
- "def wait_for_server():\n",
- " while True:\n",
- " time.sleep(0.5)\n",
- " sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n",
- " result = sock.connect_ex(('127.0.0.1', PORT))\n",
- " if result == 0:\n",
- " break\n",
- " sock.close()\n",
- " if ClearConsole:\n",
- " clear_output()\n",
- " print(\"--------- SERVER READY! ---------\")\n",
- " print(\"Your server is available at:\")\n",
- " print(public_url)\n",
- " print(\"---------------------------------\")\n",
- " if Play_Notification==True:\n",
- " play_notification_sound()\n",
- "\n",
- "threading.Thread(target=wait_for_server, daemon=True).start()\n",
- "\n",
- "mainpy=codecs.decode('ZZIPFreireFVB.cl','rot_13')\n",
- "\n",
- "!python3 $mainpy \\\n",
- " -p {PORT} \\\n",
- " --https False \\\n",
- " --content_vec_500 pretrain/checkpoint_best_legacy_500.pt \\\n",
- " --content_vec_500_onnx pretrain/content_vec_500.onnx \\\n",
- " --content_vec_500_onnx_on true \\\n",
- " --hubert_base pretrain/hubert_base.pt \\\n",
- " --hubert_base_jp pretrain/rinna_hubert_base_jp.pt \\\n",
- " --hubert_soft pretrain/hubert/hubert-soft-0d54a1f4.pt \\\n",
- " --nsf_hifigan pretrain/nsf_hifigan/model \\\n",
- " --crepe_onnx_full pretrain/crepe_onnx_full.onnx \\\n",
- " --crepe_onnx_tiny pretrain/crepe_onnx_tiny.onnx \\\n",
- " --rmvpe pretrain/rmvpe.pt \\\n",
- " --model_dir model_dir \\\n",
- " --samples samples.json\n",
- "\n",
- "ngrok.disconnect(ngrokConnection.public_url)\n"
- ]
- },
- {
- "cell_type": "markdown",
- "source": [
- "![](https://i.pinimg.com/474x/de/72/9e/de729ecfa41b69901c42c82fff752414.jpg)\n",
- "![](https://i.pinimg.com/474x/de/72/9e/de729ecfa41b69901c42c82fff752414.jpg)"
- ],
- "metadata": {
- "id": "2Uu1sTSwTc7q"
- }
- }
- ],
- "metadata": {
- "colab": {
- "provenance": [],
- "private_outputs": true,
- "gpuType": "T4",
- "include_colab_link": true
- },
- "kernelspec": {
- "display_name": "Python 3",
- "name": "python3"
- },
- "language_info": {
- "name": "python"
- },
- "accelerator": "GPU"
- },
- "nbformat": 4,
- "nbformat_minor": 0
-}
\ No newline at end of file