import sys, os, struct, argparse, logging, shutil, base64, traceback sys.path.append("/MMVC_Trainer") sys.path.append("/MMVC_Trainer/text") import uvicorn from fastapi import FastAPI, UploadFile, File, Form from fastapi.middleware.cors import CORSMiddleware from fastapi.responses import JSONResponse from fastapi.encoders import jsonable_encoder from fastapi import FastAPI, HTTPException from fastapi.staticfiles import StaticFiles from pydantic import BaseModel from scipy.io.wavfile import write, read import socketio from distutils.util import strtobool from datetime import datetime import torch import numpy as np from mods.ssl import create_self_signed_cert from mods.VoiceChanger import VoiceChanger # from mods.Whisper import Whisper class UvicornSuppressFilter(logging.Filter): def filter(self, record): return False logger = logging.getLogger("uvicorn.error") logger.addFilter(UvicornSuppressFilter()) # logger.propagate = False logger = logging.getLogger("multipart.multipart") logger.propagate = False class VoiceModel(BaseModel): gpu: int srcId: int dstId: int timestamp: int prefixChunkSize: int buffer: str class MyCustomNamespace(socketio.AsyncNamespace): def __init__(self, namespace): super().__init__(namespace) def loadModel(self, config, model): if hasattr(self, 'voiceChanger') == True: self.voiceChanger.destroy() self.voiceChanger = VoiceChanger(config, model) # def loadWhisperModel(self, model): # self.whisper = Whisper() # self.whisper.loadModel("tiny") # print("load") def changeVoice(self, gpu, srcId, dstId, timestamp, prefixChunkSize, unpackedData): # if hasattr(self, 'whisper') == True: # self.whisper.addData(unpackedData) return self.voiceChanger.on_request(gpu, srcId, dstId, timestamp, prefixChunkSize, unpackedData) # def transcribe(self): # if hasattr(self, 'whisper') == True: # self.whisper.transcribe(0) # else: # print("whisper not found") def on_connect(self, sid, environ): # print('[{}] connet sid : {}'.format(datetime.now().strftime('%Y-%m-%d %H:%M:%S') , sid)) pass async def on_request_message(self, sid, msg): # print("on_request_message", torch.cuda.memory_allocated()) gpu = int(msg[0]) srcId = int(msg[1]) dstId = int(msg[2]) timestamp = int(msg[3]) prefixChunkSize = int(msg[4]) data = msg[5] # print(srcId, dstId, timestamp) unpackedData = np.array(struct.unpack('<%sh'%(len(data) // struct.calcsize(':/ with your browser.", level=0) else: printMessage(f"open http://:/ with your browser.", level=0) if EX_PORT and EX_IP and args.https == 1: printMessage(f"In many cases it is one of the following", level=1) printMessage(f"https://localhost:{EX_PORT}/", level=1) for ip in EX_IP.strip().split(" "): printMessage(f"https://{ip}:{EX_PORT}/", level=1) elif EX_PORT and EX_IP and args.https == 0: printMessage(f"In many cases it is one of the following", level=1) printMessage(f"http://localhost:{EX_PORT}/", level=1) # サーバ起動 if args.https: # HTTPS サーバ起動 uvicorn.run( f"{os.path.basename(__file__)[:-3]}:app_socketio", host="0.0.0.0", port=int(PORT), reload=True, ssl_keyfile = key_path, ssl_certfile = cert_path, log_level="critical" ) else: # HTTP サーバ起動 if args.colab == True: uvicorn.run( f"{os.path.basename(__file__)[:-3]}:app_fastapi", host="0.0.0.0", port=int(PORT), log_level="critical" ) else: uvicorn.run( f"{os.path.basename(__file__)[:-3]}:app_socketio", host="0.0.0.0", port=int(PORT), reload=True, log_level="critical" )