mirror of
https://github.com/w-okada/voice-changer.git
synced 2025-01-23 21:45:00 +03:00
restrcture
This commit is contained in:
parent
a2a12f54ad
commit
60db7877e2
@ -21,7 +21,6 @@ import uvicorn
|
|||||||
import socketio
|
import socketio
|
||||||
|
|
||||||
from typing import Callable
|
from typing import Callable
|
||||||
from mods.VoiceChanger import VoiceChanger
|
|
||||||
from mods.ssl import create_self_signed_cert
|
from mods.ssl import create_self_signed_cert
|
||||||
from voice_changer.VoiceChangerManager import VoiceChangerManager
|
from voice_changer.VoiceChangerManager import VoiceChangerManager
|
||||||
from sio.MMVC_SocketIOApp import MMVC_SocketIOApp
|
from sio.MMVC_SocketIOApp import MMVC_SocketIOApp
|
||||||
@ -29,11 +28,6 @@ from sio.MMVC_SocketIOApp import MMVC_SocketIOApp
|
|||||||
from restapi.MMVC_Rest import MMVC_Rest
|
from restapi.MMVC_Rest import MMVC_Rest
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def setupArgParser():
|
def setupArgParser():
|
||||||
parser = argparse.ArgumentParser()
|
parser = argparse.ArgumentParser()
|
||||||
parser.add_argument("-t", type=str, default="MMVC",
|
parser.add_argument("-t", type=str, default="MMVC",
|
||||||
|
@ -1,106 +0,0 @@
|
|||||||
import torch
|
|
||||||
|
|
||||||
from scipy.io.wavfile import write, read
|
|
||||||
import numpy as np
|
|
||||||
import traceback
|
|
||||||
|
|
||||||
import utils
|
|
||||||
import commons
|
|
||||||
from models import SynthesizerTrn
|
|
||||||
|
|
||||||
from text.symbols import symbols
|
|
||||||
from data_utils import TextAudioSpeakerLoader, TextAudioSpeakerCollate
|
|
||||||
|
|
||||||
from mel_processing import spectrogram_torch
|
|
||||||
from text import text_to_sequence, cleaned_text_to_sequence
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
class VoiceChanger():
|
|
||||||
def __init__(self, config, model):
|
|
||||||
self.hps = utils.get_hparams_from_file(config)
|
|
||||||
self.net_g = SynthesizerTrn(
|
|
||||||
len(symbols),
|
|
||||||
self.hps.data.filter_length // 2 + 1,
|
|
||||||
self.hps.train.segment_size // self.hps.data.hop_length,
|
|
||||||
n_speakers=self.hps.data.n_speakers,
|
|
||||||
**self.hps.model)
|
|
||||||
self.net_g.eval()
|
|
||||||
self.gpu_num = torch.cuda.device_count()
|
|
||||||
utils.load_checkpoint(model, self.net_g, None)
|
|
||||||
|
|
||||||
text_norm = text_to_sequence("a", self.hps.data.text_cleaners)
|
|
||||||
text_norm = commons.intersperse(text_norm, 0)
|
|
||||||
self.text_norm = torch.LongTensor(text_norm)
|
|
||||||
self.audio_buffer = torch.zeros(1, 0)
|
|
||||||
self.prev_audio = np.zeros(1)
|
|
||||||
self.mps_enabled = getattr(
|
|
||||||
torch.backends, "mps", None) is not None and torch.backends.mps.is_available()
|
|
||||||
|
|
||||||
print(
|
|
||||||
f"VoiceChanger Initialized (GPU_NUM:{self.gpu_num}, mps_enabled:{self.mps_enabled})")
|
|
||||||
|
|
||||||
def destroy(self):
|
|
||||||
del self.net_g
|
|
||||||
|
|
||||||
def on_request(self, gpu, srcId, dstId, timestamp, prefixChunkSize, wav):
|
|
||||||
unpackedData = wav
|
|
||||||
convertSize = unpackedData.shape[0] + (prefixChunkSize * 512)
|
|
||||||
|
|
||||||
try:
|
|
||||||
|
|
||||||
audio = torch.FloatTensor(unpackedData.astype(np.float32))
|
|
||||||
audio_norm = audio / self.hps.data.max_wav_value
|
|
||||||
audio_norm = audio_norm.unsqueeze(0)
|
|
||||||
self.audio_buffer = torch.cat(
|
|
||||||
[self.audio_buffer, audio_norm], axis=1)
|
|
||||||
audio_norm = self.audio_buffer[:, -convertSize:]
|
|
||||||
self.audio_buffer = audio_norm
|
|
||||||
|
|
||||||
spec = spectrogram_torch(audio_norm, self.hps.data.filter_length,
|
|
||||||
self.hps.data.sampling_rate, self.hps.data.hop_length, self.hps.data.win_length,
|
|
||||||
center=False)
|
|
||||||
spec = torch.squeeze(spec, 0)
|
|
||||||
sid = torch.LongTensor([int(srcId)])
|
|
||||||
|
|
||||||
data = (self.text_norm, spec, audio_norm, sid)
|
|
||||||
data = TextAudioSpeakerCollate()([data])
|
|
||||||
|
|
||||||
# if gpu < 0 or (self.gpu_num == 0 and not self.mps_enabled):
|
|
||||||
if gpu < 0 or self.gpu_num == 0:
|
|
||||||
with torch.no_grad():
|
|
||||||
x, x_lengths, spec, spec_lengths, y, y_lengths, sid_src = [
|
|
||||||
x.cpu() for x in data]
|
|
||||||
sid_tgt1 = torch.LongTensor([dstId]).cpu()
|
|
||||||
audio1 = (self.net_g.cpu().voice_conversion(spec, spec_lengths, sid_src=sid_src, sid_tgt=sid_tgt1)[
|
|
||||||
0][0, 0].data * self.hps.data.max_wav_value).cpu().float().numpy()
|
|
||||||
# elif self.mps_enabled == True: # MPS doesnt support aten::weight_norm_interface, and PYTORCH_ENABLE_MPS_FALLBACK=1 cause a big dely.
|
|
||||||
# x, x_lengths, spec, spec_lengths, y, y_lengths, sid_src = [
|
|
||||||
# x.to("mps") for x in data]
|
|
||||||
# sid_tgt1 = torch.LongTensor([dstId]).to("mps")
|
|
||||||
# audio1 = (self.net_g.to("mps").voice_conversion(spec, spec_lengths, sid_src=sid_src, sid_tgt=sid_tgt1)[
|
|
||||||
# 0][0, 0].data * self.hps.data.max_wav_value).cpu().float().numpy()
|
|
||||||
|
|
||||||
else:
|
|
||||||
with torch.no_grad():
|
|
||||||
x, x_lengths, spec, spec_lengths, y, y_lengths, sid_src = [
|
|
||||||
x.cuda(gpu) for x in data]
|
|
||||||
sid_tgt1 = torch.LongTensor([dstId]).cuda(gpu)
|
|
||||||
audio1 = (self.net_g.cuda(gpu).voice_conversion(spec, spec_lengths, sid_src=sid_src, sid_tgt=sid_tgt1)[
|
|
||||||
0][0, 0].data * self.hps.data.max_wav_value).cpu().float().numpy()
|
|
||||||
|
|
||||||
# if len(self.prev_audio) > unpackedData.shape[0]:
|
|
||||||
# prevLastFragment = self.prev_audio[-unpackedData.shape[0]:]
|
|
||||||
# curSecondLastFragment = audio1[-unpackedData.shape[0]*2:-unpackedData.shape[0]]
|
|
||||||
# print("prev, cur", prevLastFragment.shape, curSecondLastFragment.shape)
|
|
||||||
# self.prev_audio = audio1
|
|
||||||
# print("self.prev_audio", self.prev_audio.shape)
|
|
||||||
|
|
||||||
audio1 = audio1[-unpackedData.shape[0]*2:]
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print("VC PROCESSING!!!! EXCEPTION!!!", e)
|
|
||||||
print(traceback.format_exc())
|
|
||||||
|
|
||||||
audio1 = audio1.astype(np.int16)
|
|
||||||
return audio1
|
|
@ -1,36 +0,0 @@
|
|||||||
import whisper
|
|
||||||
import numpy as np
|
|
||||||
import torchaudio
|
|
||||||
from scipy.io.wavfile import write
|
|
||||||
|
|
||||||
_MODELS = {
|
|
||||||
"tiny": "/whisper/tiny.pt",
|
|
||||||
"base": "/whisper/base.pt",
|
|
||||||
"small": "/whisper/small.pt",
|
|
||||||
"medium": "/whisper/medium.pt",
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class Whisper():
|
|
||||||
def __init__(self):
|
|
||||||
self.storedSizeFromTry = 0
|
|
||||||
|
|
||||||
def loadModel(self, model):
|
|
||||||
# self.model = whisper.load_model(_MODELS[model], device="cpu")
|
|
||||||
self.model = whisper.load_model(_MODELS[model])
|
|
||||||
self.data = np.zeros(1).astype(np.float)
|
|
||||||
|
|
||||||
def addData(self, unpackedData):
|
|
||||||
self.data = np.concatenate([self.data, unpackedData], 0)
|
|
||||||
|
|
||||||
def transcribe(self, audio):
|
|
||||||
received_data_file = "received_data.wav"
|
|
||||||
write(received_data_file, 24000, self.data.astype(np.int16))
|
|
||||||
source, sr = torchaudio.load(received_data_file)
|
|
||||||
target = torchaudio.functional.resample(source, 24000, 16000)
|
|
||||||
result = self.model.transcribe(received_data_file)
|
|
||||||
print("WHISPER1:::", result["text"])
|
|
||||||
print("WHISPER2:::", result["segments"])
|
|
||||||
self.data = np.zeros(1).astype(np.float)
|
|
||||||
return result["text"]
|
|
||||||
|
|
@ -5,16 +5,16 @@ from fastapi.encoders import jsonable_encoder
|
|||||||
from fastapi.responses import JSONResponse
|
from fastapi.responses import JSONResponse
|
||||||
|
|
||||||
|
|
||||||
from mods.Trainer_Speakers import mod_get_speakers
|
from restapi.mods.Trainer_Speakers import mod_get_speakers
|
||||||
from mods.Trainer_Training import mod_post_pre_training, mod_post_start_training, mod_post_stop_training, mod_get_related_files, mod_get_tail_training_log
|
from restapi.mods.Trainer_Training import mod_post_pre_training, mod_post_start_training, mod_post_stop_training, mod_get_related_files, mod_get_tail_training_log
|
||||||
from mods.Trainer_Model import mod_get_model, mod_delete_model
|
from restapi.mods.Trainer_Model import mod_get_model, mod_delete_model
|
||||||
|
|
||||||
from mods.Trainer_Models import mod_get_models
|
from restapi.mods.Trainer_Models import mod_get_models
|
||||||
from mods.Trainer_MultiSpeakerSetting import mod_get_multi_speaker_setting, mod_post_multi_speaker_setting
|
from restapi.mods.Trainer_MultiSpeakerSetting import mod_get_multi_speaker_setting, mod_post_multi_speaker_setting
|
||||||
from mods.Trainer_Speaker_Voice import mod_get_speaker_voice
|
from restapi.mods.Trainer_Speaker_Voice import mod_get_speaker_voice
|
||||||
from mods.Trainer_Speaker_Voices import mod_get_speaker_voices
|
from restapi.mods.Trainer_Speaker_Voices import mod_get_speaker_voices
|
||||||
|
|
||||||
from mods.Trainer_Speaker import mod_delete_speaker
|
from restapi.mods.Trainer_Speaker import mod_delete_speaker
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
|
|
||||||
INFO_DIR = "info"
|
INFO_DIR = "info"
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
import shutil
|
import shutil
|
||||||
from mods.Trainer_MultiSpeakerSetting import MULTI_SPEAKER_SETTING_PATH
|
from restapi.mods.Trainer_MultiSpeakerSetting import MULTI_SPEAKER_SETTING_PATH
|
||||||
|
|
||||||
def mod_delete_speaker(speaker:str):
|
def mod_delete_speaker(speaker:str):
|
||||||
shutil.rmtree(f"MMVC_Trainer/dataset/textful/{speaker}")
|
shutil.rmtree(f"MMVC_Trainer/dataset/textful/{speaker}")
|
Loading…
Reference in New Issue
Block a user