diff --git a/w_okada's_Voice_Changer_version_2_x.ipynb b/w_okada's_Voice_Changer_version_2_x.ipynb index 3d4cbb69..cf5c224c 100644 --- a/w_okada's_Voice_Changer_version_2_x.ipynb +++ b/w_okada's_Voice_Changer_version_2_x.ipynb @@ -52,12 +52,38 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "metadata": { "id": "W2GYWTHWmRIY", - "cellView": "form" + "cellView": "form", + "outputId": "8ee57273-7774-44b6-a955-2f9408e489f1", + "colab": { + "base_uri": "https://localhost:8080/" + } }, - "outputs": [], + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "GPU is available\n", + "GPU Name: Tesla T4\n", + "Welcome to ColabMod\n", + "Mounted at /content/drive\n", + "Checking latest version...\n", + "current_version_hash: 1221695f103b2c68f822e61fd5bcfc4c\n", + "latest_version_hash : \n", + "hash not match -> download latest version\n", + " % Total % Received % Xferd Average Speed Time Time Time Current\n", + " Dload Upload Total Spent Left Speed\n", + "100 1155 100 1155 0 0 5130 0 --:--:-- --:--:-- --:--:-- 5133\n", + "100 3027M 100 3027M 0 0 69.7M 0 0:00:43 0:00:43 --:--:-- 42.0M\n", + "Download is done.\n", + "/content/drive/MyDrive/vcclient\n", + "Installing modules... Install is done.\n" + ] + } + ], "source": [ "#=================Updated=================\n", "# @title **[1]** Clone repository and install dependencies\n", @@ -74,6 +100,13 @@ "import shutil\n", "import base64\n", "import codecs\n", + "import torch\n", + "import sys\n", + "\n", + "from typing import Literal, TypeAlias\n", + "\n", + "Mode: TypeAlias = Literal[\"elf\", \"zip\"]\n", + "mode:Mode=\"elf\"\n", "\n", "# Configs\n", "Run_Cell=0\n", @@ -82,6 +115,14 @@ "current_version_hash=None\n", "latest_version_hash=None\n", "\n", + "# Check GPU\n", + "if torch.cuda.is_available():\n", + " print(\"GPU is available\")\n", + " print(\"GPU Name:\", torch.cuda.get_device_name(0))\n", + "else:\n", + " print(\"GPU is not available\")\n", + " # sys.exit(\"No GPU available. Change runtime.\")\n", + "\n", "\n", "notebook_env=0\n", "if os.path.exists('/content'):\n", @@ -122,7 +163,12 @@ " print(f\"hash not match -> download latest version\")\n", "\n", " latest_hash_path=f'{work_dir}/latest_hash.txt'\n", - " !curl -L https://huggingface.co/wok000/vcclient000_colab/resolve/main/vcclient_latest_for_colab.zip -o {work_dir}/vcclient_latest_for_colab.zip\n", + "\n", + " if mode == \"elf\":\n", + " !curl -L https://huggingface.co/wok000/vcclient000_colab/resolve/main/vcclient_latest_for_colab -o {work_dir}/vcclient_latest_for_colab\n", + " elif mode == \"zip\":\n", + " !curl -L https://huggingface.co/wok000/vcclient000_colab/resolve/main/vcclient_latest_for_colab.zip -o {work_dir}/vcclient_latest_for_colab.zip\n", + "\n", " !cp latest_hash.txt {latest_hash_path}\n", " print(\"Download is done.\")\n", " else:\n", @@ -133,16 +179,25 @@ " print(\"Downloading the latest vcclient... \")\n", " !curl -s -L https://huggingface.co/wok000/vcclient000_colab/resolve/main/latest_hash.txt -o latest_hash.txt\n", " latest_version_hash = open('latest_hash.txt').read().strip()\n", - " !curl -L https://huggingface.co/wok000/vcclient000_colab/resolve/main/vcclient_latest_for_colab.zip -o {work_dir}/vcclient_latest_for_colab.zip\n", + "\n", + " if mode == \"elf\":\n", + " !curl -L https://huggingface.co/wok000/vcclient000_colab/resolve/main/vcclient_latest_for_colab -o {work_dir}/vcclient_latest_for_colab\n", + " elif mode == \"zip\":\n", + " !curl -L https://huggingface.co/wok000/vcclient000_colab/resolve/main/vcclient_latest_for_colab.zip -o {work_dir}/vcclient_latest_for_colab.zip\n", + "\n", " print(\"Download is done.\")\n", "\n", - "if current_version_hash != latest_version_hash:\n", + "if current_version_hash != latest_version_hash and mode == \"zip\":\n", " print(f\"Unzip vcclient to {latest_version_hash} ... \")\n", " !cd {work_dir} && unzip -q vcclient_latest_for_colab.zip -d {latest_version_hash}\n", " print(f\"Unzip is done.\")\n", "\n", - "%cd {work_dir}/{latest_version_hash}/main\n", - "!chmod 0700 main\n", + "if mode == \"elf\":\n", + " %cd {work_dir}/{latest_version_hash}\n", + " !chmod 0700 vcclient_latest_for_colab\n", + "elif mode == \"zip\":\n", + " %cd {work_dir}/{latest_version_hash}/main\n", + " !chmod 0700 main\n", "\n", "print(\"Installing modules... \",end=\"\")\n", "!sudo apt-get install -y libportaudio2 > /dev/null 2>&1\n", @@ -154,14 +209,26 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 3, "metadata": { "id": "Dbx41M-zlknc", - "cellView": "form" + "cellView": "form", + "outputId": "ae3d8a18-aa4c-4eb2-98aa-11e725cee2bf", + "colab": { + "base_uri": "https://localhost:8080/" + } }, - "outputs": [], + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "ngrock ready\n" + ] + } + ], "source": [ - "PORT=8000\n", + "PORT=8003\n", "\n", "import codecs\n", "\n", @@ -174,7 +241,7 @@ "# @markdown **1** - Create a **free** account at [ngrok](https://dashboard.ngrok.com/signup) or **login with Google/Github account**\\\n", "# @markdown **2** - If you didn't logged in with Google/Github, you will need to **verify your e-mail**!\\\n", "# @markdown **3** - Click [this link](https://dashboard.ngrok.com/get-started/your-authtoken) to get your auth token, and place it here:\n", - "Token = '' # @param {type:\"string\"}\n", + "Token = '20iohpfydDxibIq0KxJdfV2wZQF_5AxT7MKnLsKpn8JWeyJYK' # @param {type:\"string\"}\n", "# @markdown **4** - *(optional)* Change to a region near to you or keep at United States if increase latency\\\n", "# @markdown `Default Region: ap - Asia/Pacific (Singapore)`\n", "Region = \"jp - Japan (Tokyo)\" # @param [\"ap - Asia/Pacific (Singapore)\", \"au - Australia (Sydney)\",\"eu - Europe (Frankfurt)\", \"in - India (Mumbai)\",\"jp - Japan (Tokyo)\",\"sa - South America (Sao Paulo)\", \"us - United States (Ohio)\"]\n", @@ -205,31 +272,87 @@ "from pyngrok import ngrok\n", "ngrokConnection = ngrok.connect(PORT)\n", "public_url = ngrokConnection.public_url\n", - "print(f\"your app's url is {public_url}\")" + "print(f\"ngrock ready\")" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 4, "metadata": { "id": "s7mYqKtW6VOI", - "cellView": "form" + "cellView": "form", + "outputId": "afa5d810-4904-4422-967e-54ba1642d01a", + "colab": { + "base_uri": "https://localhost:8080/" + } }, - "outputs": [], + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "--------- SERVER READY! ---------\n", + "Your server is available. elapsed: 145sec\n", + "https://ae1e-34-125-45-231.ngrok-free.app\n", + "---------------------------------\n" + ] + } + ], "source": [ "# @title **[3]** Start server\n", "# @markdown This cell will start the server, the first time that you run it will download the models, so it can take a while (~1-2 minutes)\n", "\n", - "# @markdown When you see the message \"running...\", please launch the application from the link above.\n", - "!LD_LIBRARY_PATH=/usr/lib64-nvidia:/usr/lib/x86_64-linux-gnu ./main cui --port {PORT} --no_cui true\n", - "\n" + "LOG_FILE = f\"/content/LOG_FILE_{PORT}\"\n", + "if mode == \"elf\":\n", + " # !LD_LIBRARY_PATH=/usr/lib64-nvidia:/usr/lib/x86_64-linux-gnu ./vcclient_latest_for_colab cui --port {PORT} --no_cui true &\n", + "\n", + " get_ipython().system_raw(f'LD_LIBRARY_PATH=/usr/lib64-nvidia:/usr/lib/x86_64-linux-gnu ./vcclient_latest_for_colab cui --port {PORT} --no_cui true >{LOG_FILE} 2>&1 &')\n", + "elif mode == \"zip\":\n", + " !LD_LIBRARY_PATH=/usr/lib64-nvidia:/usr/lib/x86_64-linux-gnu ./main cui --port {PORT} --no_cui true &\n", + "\n", + "\n", + "import socket\n", + "def wait_for_server():\n", + " elapsed_time = 0\n", + " start_time = time.time()\n", + "\n", + "\n", + " while True:\n", + " time.sleep(1)\n", + " sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n", + " result = sock.connect_ex(('127.0.0.1', PORT))\n", + " if result == 0:\n", + " break\n", + " sock.close()\n", + " # 時刻を出力\n", + " current_time = time.time()\n", + " elapsed_time = int(current_time - start_time)\n", + " clear_output(wait=True)\n", + " print(f\"Waiting for server... elapsed: {elapsed_time}sec\")\n", + " try:\n", + " with open(LOG_FILE, 'r') as f:\n", + " lines = f.readlines()[-5:]\n", + " for line in lines:\n", + " print(line.strip())\n", + " except:\n", + " pass\n", + "\n", + " if ClearConsole:\n", + " clear_output()\n", + " print(\"--------- SERVER READY! ---------\")\n", + " print(f\"Your server is available. elapsed: {elapsed_time}sec\")\n", + " print(public_url)\n", + " print(\"---------------------------------\")\n", + " # if Play_Notification==True:\n", + " # play_notification_sound()\n", + "wait_for_server()\n" ] }, { "cell_type": "code", "source": [], "metadata": { - "id": "iv9OixP6rCEP" + "id": "WpjWNYwNXydp" }, "execution_count": null, "outputs": [] @@ -238,7 +361,8 @@ "metadata": { "colab": { "provenance": [], - "authorship_tag": "ABX9TyNWJR74GxLHE/fMfDb7IR0J", + "gpuType": "T4", + "authorship_tag": "ABX9TyMV0OOBonc66rzKOaFNdK7a", "include_colab_link": true }, "kernelspec": { @@ -247,7 +371,8 @@ }, "language_info": { "name": "python" - } + }, + "accelerator": "GPU" }, "nbformat": 4, "nbformat_minor": 0