import uvicorn from fastapi import FastAPI from fastapi.middleware.cors import CORSMiddleware from pydantic import BaseModel from fastapi.encoders import jsonable_encoder from fastapi.responses import JSONResponse from fastapi.staticfiles import StaticFiles import logging import os, sys, base64, traceback, struct import torch import numpy as np from scipy.io.wavfile import write, read # sys.path.append("mod") # sys.path.append("mod/text") sys.path.append("/MMVC_Trainer") sys.path.append("/MMVC_Trainer/text") import utils import commons from data_utils import TextAudioSpeakerLoader, TextAudioSpeakerCollate from models import SynthesizerTrn from text.symbols import symbols from mel_processing import spectrogram_torch from text import text_to_sequence, cleaned_text_to_sequence class VoiceChanger(): def __init__(self, config, model): self.hps =utils.get_hparams_from_file(config) self.net_g = SynthesizerTrn( len(symbols), self.hps.data.filter_length // 2 + 1, self.hps.train.segment_size // self.hps.data.hop_length, n_speakers=self.hps.data.n_speakers, **self.hps.model) self.net_g.eval() self.gpu_num = torch.cuda.device_count() print("GPU_NUM:",self.gpu_num) utils.load_checkpoint( model, self.net_g, None) def on_request(self, gpu, srcId, dstId, timestamp, wav): if wav==0: samplerate, data=read("dummy.wav") unpackedData = data else: unpackedData = np.array(struct.unpack('<%sh'%(len(wav) // struct.calcsize('