mirror of
https://github.com/w-okada/voice-changer.git
synced 2025-03-15 04:13:57 +03:00
WIP: refactoring
This commit is contained in:
parent
bd3667117e
commit
c96609640d
@ -16,6 +16,9 @@ from restapi.MMVC_Rest import MMVC_Rest
|
|||||||
from const import NATIVE_CLIENT_FILE_MAC, NATIVE_CLIENT_FILE_WIN, SSL_KEY_DIR
|
from const import NATIVE_CLIENT_FILE_MAC, NATIVE_CLIENT_FILE_WIN, SSL_KEY_DIR
|
||||||
import subprocess
|
import subprocess
|
||||||
import multiprocessing as mp
|
import multiprocessing as mp
|
||||||
|
from misc.log_control import setup_loggers
|
||||||
|
|
||||||
|
setup_loggers()
|
||||||
|
|
||||||
|
|
||||||
def setupArgParser():
|
def setupArgParser():
|
||||||
|
@ -8,32 +8,31 @@ class UvicornSuppressFilter(logging.Filter):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
# logger = logging.getLogger("uvicorn.error")
|
def setup_loggers():
|
||||||
# logger.addFilter(UvicornSuppressFilter())
|
# logger = logging.getLogger("uvicorn.error")
|
||||||
|
# logger.addFilter(UvicornSuppressFilter())
|
||||||
|
|
||||||
logger = logging.getLogger("fairseq.tasks.hubert_pretraining")
|
logger = logging.getLogger("fairseq.tasks.hubert_pretraining")
|
||||||
logger.addFilter(UvicornSuppressFilter())
|
logger.addFilter(UvicornSuppressFilter())
|
||||||
|
|
||||||
logger = logging.getLogger("fairseq.models.hubert.hubert")
|
logger = logging.getLogger("fairseq.models.hubert.hubert")
|
||||||
logger.addFilter(UvicornSuppressFilter())
|
logger.addFilter(UvicornSuppressFilter())
|
||||||
|
|
||||||
logger = logging.getLogger("fairseq.tasks.text_to_speech")
|
logger = logging.getLogger("fairseq.tasks.text_to_speech")
|
||||||
logger.addFilter(UvicornSuppressFilter())
|
logger.addFilter(UvicornSuppressFilter())
|
||||||
|
|
||||||
|
logger = logging.getLogger("numba.core.ssa")
|
||||||
|
logger.addFilter(UvicornSuppressFilter())
|
||||||
|
|
||||||
logger = logging.getLogger("numba.core.ssa")
|
logger = logging.getLogger("numba.core.interpreter")
|
||||||
logger.addFilter(UvicornSuppressFilter())
|
logger.addFilter(UvicornSuppressFilter())
|
||||||
|
|
||||||
logger = logging.getLogger("numba.core.interpreter")
|
logger = logging.getLogger("numba.core.byteflow")
|
||||||
logger.addFilter(UvicornSuppressFilter())
|
logger.addFilter(UvicornSuppressFilter())
|
||||||
|
|
||||||
logger = logging.getLogger("numba.core.byteflow")
|
# logger.propagate = False
|
||||||
logger.addFilter(UvicornSuppressFilter())
|
|
||||||
|
|
||||||
|
logger = logging.getLogger("multipart.multipart")
|
||||||
|
logger.propagate = False
|
||||||
|
|
||||||
# logger.propagate = False
|
logging.getLogger("asyncio").setLevel(logging.WARNING)
|
||||||
|
|
||||||
logger = logging.getLogger("multipart.multipart")
|
|
||||||
logger.propagate = False
|
|
||||||
|
|
||||||
logging.getLogger('asyncio').setLevel(logging.WARNING)
|
|
||||||
|
@ -1,6 +1,10 @@
|
|||||||
import sys
|
import sys
|
||||||
import os
|
import os
|
||||||
if sys.platform.startswith('darwin'):
|
|
||||||
|
from voice_changer.utils.LoadModelParams import LoadModelParams
|
||||||
|
from voice_changer.utils.VoiceChangerModel import AudioInOut
|
||||||
|
|
||||||
|
if sys.platform.startswith("darwin"):
|
||||||
baseDir = [x for x in sys.path if x.endswith("Contents/MacOS")]
|
baseDir = [x for x in sys.path if x.endswith("Contents/MacOS")]
|
||||||
if len(baseDir) != 1:
|
if len(baseDir) != 1:
|
||||||
print("baseDir should be only one ", baseDir)
|
print("baseDir should be only one ", baseDir)
|
||||||
@ -12,23 +16,32 @@ else:
|
|||||||
sys.path.append(modulePath)
|
sys.path.append(modulePath)
|
||||||
|
|
||||||
|
|
||||||
from dataclasses import dataclass, asdict
|
from dataclasses import dataclass, asdict, field
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import torch
|
import torch
|
||||||
import onnxruntime
|
import onnxruntime
|
||||||
import pyworld as pw
|
|
||||||
|
|
||||||
from symbols import symbols
|
from symbols import symbols # type:ignore
|
||||||
from models import SynthesizerTrn
|
from models import SynthesizerTrn # type:ignore
|
||||||
from voice_changer.MMVCv13.TrainerFunctions import TextAudioSpeakerCollate, spectrogram_torch, load_checkpoint, get_hparams_from_file
|
from voice_changer.MMVCv13.TrainerFunctions import (
|
||||||
|
TextAudioSpeakerCollate,
|
||||||
|
spectrogram_torch,
|
||||||
|
load_checkpoint,
|
||||||
|
get_hparams_from_file,
|
||||||
|
)
|
||||||
|
|
||||||
from Exceptions import NoModeLoadedException
|
from Exceptions import NoModeLoadedException
|
||||||
|
|
||||||
providers = ['OpenVINOExecutionProvider', "CUDAExecutionProvider", "DmlExecutionProvider", "CPUExecutionProvider"]
|
providers = [
|
||||||
|
"OpenVINOExecutionProvider",
|
||||||
|
"CUDAExecutionProvider",
|
||||||
|
"DmlExecutionProvider",
|
||||||
|
"CPUExecutionProvider",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class MMVCv13Settings():
|
class MMVCv13Settings:
|
||||||
gpu: int = 0
|
gpu: int = 0
|
||||||
srcId: int = 0
|
srcId: int = 0
|
||||||
dstId: int = 101
|
dstId: int = 101
|
||||||
@ -40,11 +53,13 @@ class MMVCv13Settings():
|
|||||||
|
|
||||||
# ↓mutableな物だけ列挙
|
# ↓mutableな物だけ列挙
|
||||||
intData = ["gpu", "srcId", "dstId"]
|
intData = ["gpu", "srcId", "dstId"]
|
||||||
floatData = []
|
floatData: list[str] = field(default_factory=lambda: [])
|
||||||
strData = ["framework"]
|
strData = ["framework"]
|
||||||
|
|
||||||
|
|
||||||
class MMVCv13:
|
class MMVCv13:
|
||||||
|
audio_buffer: AudioInOut | None = None
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.settings = MMVCv13Settings()
|
self.settings = MMVCv13Settings()
|
||||||
self.net_g = None
|
self.net_g = None
|
||||||
@ -53,51 +68,62 @@ class MMVCv13:
|
|||||||
self.gpu_num = torch.cuda.device_count()
|
self.gpu_num = torch.cuda.device_count()
|
||||||
self.text_norm = torch.LongTensor([0, 6, 0])
|
self.text_norm = torch.LongTensor([0, 6, 0])
|
||||||
|
|
||||||
def loadModel(self, props):
|
def loadModel(self, props: LoadModelParams):
|
||||||
self.settings.configFile = props["files"]["configFilename"]
|
self.settings.configFile = props.files.configFilename
|
||||||
self.hps = get_hparams_from_file(self.settings.configFile)
|
self.hps = get_hparams_from_file(self.settings.configFile)
|
||||||
|
|
||||||
self.settings.pyTorchModelFile = props["files"]["pyTorchModelFilename"]
|
self.settings.pyTorchModelFile = props.files.pyTorchModelFilename
|
||||||
self.settings.onnxModelFile = props["files"]["onnxModelFilename"]
|
self.settings.onnxModelFile = props.files.onnxModelFilename
|
||||||
|
|
||||||
# PyTorchモデル生成
|
# PyTorchモデル生成
|
||||||
if self.settings.pyTorchModelFile != None:
|
if self.settings.pyTorchModelFile is not None:
|
||||||
self.net_g = SynthesizerTrn(
|
self.net_g = SynthesizerTrn(
|
||||||
len(symbols),
|
len(symbols),
|
||||||
self.hps.data.filter_length // 2 + 1,
|
self.hps.data.filter_length // 2 + 1,
|
||||||
self.hps.train.segment_size // self.hps.data.hop_length,
|
self.hps.train.segment_size // self.hps.data.hop_length,
|
||||||
n_speakers=self.hps.data.n_speakers,
|
n_speakers=self.hps.data.n_speakers,
|
||||||
**self.hps.model)
|
**self.hps.model
|
||||||
|
)
|
||||||
self.net_g.eval()
|
self.net_g.eval()
|
||||||
load_checkpoint(self.settings.pyTorchModelFile, self.net_g, None)
|
load_checkpoint(self.settings.pyTorchModelFile, self.net_g, None)
|
||||||
|
|
||||||
# ONNXモデル生成
|
# ONNXモデル生成
|
||||||
if self.settings.onnxModelFile != None:
|
if self.settings.onnxModelFile is not None:
|
||||||
ort_options = onnxruntime.SessionOptions()
|
ort_options = onnxruntime.SessionOptions()
|
||||||
ort_options.intra_op_num_threads = 8
|
ort_options.intra_op_num_threads = 8
|
||||||
self.onnx_session = onnxruntime.InferenceSession(
|
self.onnx_session = onnxruntime.InferenceSession(
|
||||||
self.settings.onnxModelFile,
|
self.settings.onnxModelFile, providers=providers
|
||||||
providers=providers
|
|
||||||
)
|
)
|
||||||
return self.get_info()
|
return self.get_info()
|
||||||
|
|
||||||
def update_settings(self, key: str, val: any):
|
def update_settings(self, key: str, val: int | float | str):
|
||||||
if key == "onnxExecutionProvider" and self.onnx_session != None:
|
if key == "onnxExecutionProvider" and self.onnx_session is not None:
|
||||||
if val == "CUDAExecutionProvider":
|
if val == "CUDAExecutionProvider":
|
||||||
if self.settings.gpu < 0 or self.settings.gpu >= self.gpu_num:
|
if self.settings.gpu < 0 or self.settings.gpu >= self.gpu_num:
|
||||||
self.settings.gpu = 0
|
self.settings.gpu = 0
|
||||||
provider_options = [{'device_id': self.settings.gpu}]
|
provider_options = [{"device_id": self.settings.gpu}]
|
||||||
self.onnx_session.set_providers(providers=[val], provider_options=provider_options)
|
self.onnx_session.set_providers(
|
||||||
|
providers=[val], provider_options=provider_options
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
self.onnx_session.set_providers(providers=[val])
|
self.onnx_session.set_providers(providers=[val])
|
||||||
elif key in self.settings.intData:
|
elif key in self.settings.intData:
|
||||||
setattr(self.settings, key, int(val))
|
val = int(val)
|
||||||
if key == "gpu" and val >= 0 and val < self.gpu_num and self.onnx_session != None:
|
setattr(self.settings, key, val)
|
||||||
|
if (
|
||||||
|
key == "gpu"
|
||||||
|
and val >= 0
|
||||||
|
and val < self.gpu_num
|
||||||
|
and self.onnx_session is not None
|
||||||
|
):
|
||||||
providers = self.onnx_session.get_providers()
|
providers = self.onnx_session.get_providers()
|
||||||
print("Providers:", providers)
|
print("Providers:", providers)
|
||||||
if "CUDAExecutionProvider" in providers:
|
if "CUDAExecutionProvider" in providers:
|
||||||
provider_options = [{'device_id': self.settings.gpu}]
|
provider_options = [{"device_id": self.settings.gpu}]
|
||||||
self.onnx_session.set_providers(providers=["CUDAExecutionProvider"], provider_options=provider_options)
|
self.onnx_session.set_providers(
|
||||||
|
providers=["CUDAExecutionProvider"],
|
||||||
|
provider_options=provider_options,
|
||||||
|
)
|
||||||
elif key in self.settings.floatData:
|
elif key in self.settings.floatData:
|
||||||
setattr(self.settings, key, float(val))
|
setattr(self.settings, key, float(val))
|
||||||
elif key in self.settings.strData:
|
elif key in self.settings.strData:
|
||||||
@ -110,10 +136,12 @@ class MMVCv13:
|
|||||||
def get_info(self):
|
def get_info(self):
|
||||||
data = asdict(self.settings)
|
data = asdict(self.settings)
|
||||||
|
|
||||||
data["onnxExecutionProviders"] = self.onnx_session.get_providers() if self.onnx_session != None else []
|
data["onnxExecutionProviders"] = (
|
||||||
|
self.onnx_session.get_providers() if self.onnx_session is not None else []
|
||||||
|
)
|
||||||
files = ["configFile", "pyTorchModelFile", "onnxModelFile"]
|
files = ["configFile", "pyTorchModelFile", "onnxModelFile"]
|
||||||
for f in files:
|
for f in files:
|
||||||
if data[f] != None and os.path.exists(data[f]):
|
if data[f] is not None and os.path.exists(data[f]):
|
||||||
data[f] = os.path.basename(data[f])
|
data[f] = os.path.basename(data[f])
|
||||||
else:
|
else:
|
||||||
data[f] = ""
|
data[f] = ""
|
||||||
@ -121,22 +149,35 @@ class MMVCv13:
|
|||||||
return data
|
return data
|
||||||
|
|
||||||
def get_processing_sampling_rate(self):
|
def get_processing_sampling_rate(self):
|
||||||
if hasattr(self, "hps") == False:
|
if hasattr(self, "hps") is False:
|
||||||
raise NoModeLoadedException("config")
|
raise NoModeLoadedException("config")
|
||||||
return self.hps.data.sampling_rate
|
return self.hps.data.sampling_rate
|
||||||
|
|
||||||
def _get_spec(self, audio: any):
|
def _get_spec(self, audio: AudioInOut):
|
||||||
spec = spectrogram_torch(audio, self.hps.data.filter_length,
|
spec = spectrogram_torch(
|
||||||
self.hps.data.sampling_rate, self.hps.data.hop_length, self.hps.data.win_length,
|
audio,
|
||||||
center=False)
|
self.hps.data.filter_length,
|
||||||
|
self.hps.data.sampling_rate,
|
||||||
|
self.hps.data.hop_length,
|
||||||
|
self.hps.data.win_length,
|
||||||
|
center=False,
|
||||||
|
)
|
||||||
spec = torch.squeeze(spec, 0)
|
spec = torch.squeeze(spec, 0)
|
||||||
return spec
|
return spec
|
||||||
|
|
||||||
def generate_input(self, newData: any, inputSize: int, crossfadeSize: int, solaSearchFrame: int = 0):
|
def generate_input(
|
||||||
|
self,
|
||||||
|
newData: AudioInOut,
|
||||||
|
inputSize: int,
|
||||||
|
crossfadeSize: int,
|
||||||
|
solaSearchFrame: int = 0,
|
||||||
|
):
|
||||||
newData = newData.astype(np.float32) / self.hps.data.max_wav_value
|
newData = newData.astype(np.float32) / self.hps.data.max_wav_value
|
||||||
|
|
||||||
if hasattr(self, "audio_buffer"):
|
if self.audio_buffer is not None:
|
||||||
self.audio_buffer = np.concatenate([self.audio_buffer, newData], 0) # 過去のデータに連結
|
self.audio_buffer = np.concatenate(
|
||||||
|
[self.audio_buffer, newData], 0
|
||||||
|
) # 過去のデータに連結
|
||||||
else:
|
else:
|
||||||
self.audio_buffer = newData
|
self.audio_buffer = newData
|
||||||
|
|
||||||
@ -145,9 +186,12 @@ class MMVCv13:
|
|||||||
if convertSize < 8192:
|
if convertSize < 8192:
|
||||||
convertSize = 8192
|
convertSize = 8192
|
||||||
if convertSize % self.hps.data.hop_length != 0: # モデルの出力のホップサイズで切り捨てが発生するので補う。
|
if convertSize % self.hps.data.hop_length != 0: # モデルの出力のホップサイズで切り捨てが発生するので補う。
|
||||||
convertSize = convertSize + (self.hps.data.hop_length - (convertSize % self.hps.data.hop_length))
|
convertSize = convertSize + (
|
||||||
|
self.hps.data.hop_length - (convertSize % self.hps.data.hop_length)
|
||||||
|
)
|
||||||
|
|
||||||
self.audio_buffer = self.audio_buffer[-1 * convertSize:] # 変換対象の部分だけ抽出
|
convertOffset = -1 * convertSize
|
||||||
|
self.audio_buffer = self.audio_buffer[convertOffset:] # 変換対象の部分だけ抽出
|
||||||
|
|
||||||
audio = torch.FloatTensor(self.audio_buffer)
|
audio = torch.FloatTensor(self.audio_buffer)
|
||||||
audio_norm = audio.unsqueeze(0) # unsqueeze
|
audio_norm = audio.unsqueeze(0) # unsqueeze
|
||||||
@ -160,25 +204,29 @@ class MMVCv13:
|
|||||||
return data
|
return data
|
||||||
|
|
||||||
def _onnx_inference(self, data):
|
def _onnx_inference(self, data):
|
||||||
if hasattr(self, "onnx_session") == False or self.onnx_session == None:
|
if hasattr(self, "onnx_session") is False or self.onnx_session is None:
|
||||||
print("[Voice Changer] No ONNX session.")
|
print("[Voice Changer] No ONNX session.")
|
||||||
raise NoModeLoadedException("ONNX")
|
raise NoModeLoadedException("ONNX")
|
||||||
|
|
||||||
x, x_lengths, spec, spec_lengths, y, y_lengths, sid_src = [x for x in data]
|
x, x_lengths, spec, spec_lengths, y, y_lengths, sid_src = [x for x in data]
|
||||||
sid_tgt1 = torch.LongTensor([self.settings.dstId])
|
sid_tgt1 = torch.LongTensor([self.settings.dstId])
|
||||||
# if spec.size()[2] >= 8:
|
# if spec.size()[2] >= 8:
|
||||||
audio1 = self.onnx_session.run(
|
audio1 = (
|
||||||
["audio"],
|
self.onnx_session.run(
|
||||||
{
|
["audio"],
|
||||||
"specs": spec.numpy(),
|
{
|
||||||
"lengths": spec_lengths.numpy(),
|
"specs": spec.numpy(),
|
||||||
"sid_src": sid_src.numpy(),
|
"lengths": spec_lengths.numpy(),
|
||||||
"sid_tgt": sid_tgt1.numpy()
|
"sid_src": sid_src.numpy(),
|
||||||
})[0][0, 0] * self.hps.data.max_wav_value
|
"sid_tgt": sid_tgt1.numpy(),
|
||||||
|
},
|
||||||
|
)[0][0, 0]
|
||||||
|
* self.hps.data.max_wav_value
|
||||||
|
)
|
||||||
return audio1
|
return audio1
|
||||||
|
|
||||||
def _pyTorch_inference(self, data):
|
def _pyTorch_inference(self, data):
|
||||||
if hasattr(self, "net_g") == False or self.net_g == None:
|
if hasattr(self, "net_g") is False or self.net_g is None:
|
||||||
print("[Voice Changer] No pyTorch session.")
|
print("[Voice Changer] No pyTorch session.")
|
||||||
raise NoModeLoadedException("pytorch")
|
raise NoModeLoadedException("pytorch")
|
||||||
|
|
||||||
@ -188,11 +236,19 @@ class MMVCv13:
|
|||||||
dev = torch.device("cuda", index=self.settings.gpu)
|
dev = torch.device("cuda", index=self.settings.gpu)
|
||||||
|
|
||||||
with torch.no_grad():
|
with torch.no_grad():
|
||||||
x, x_lengths, spec, spec_lengths, y, y_lengths, sid_src = [x.to(dev) for x in data]
|
x, x_lengths, spec, spec_lengths, y, y_lengths, sid_src = [
|
||||||
|
x.to(dev) for x in data
|
||||||
|
]
|
||||||
sid_target = torch.LongTensor([self.settings.dstId]).to(dev)
|
sid_target = torch.LongTensor([self.settings.dstId]).to(dev)
|
||||||
|
|
||||||
audio1 = (self.net_g.to(dev).voice_conversion(spec, spec_lengths, sid_src=sid_src,
|
audio1 = (
|
||||||
sid_tgt=sid_target)[0, 0].data * self.hps.data.max_wav_value)
|
self.net_g.to(dev)
|
||||||
|
.voice_conversion(
|
||||||
|
spec, spec_lengths, sid_src=sid_src, sid_tgt=sid_target
|
||||||
|
)[0, 0]
|
||||||
|
.data
|
||||||
|
* self.hps.data.max_wav_value
|
||||||
|
)
|
||||||
result = audio1.float().cpu().numpy()
|
result = audio1.float().cpu().numpy()
|
||||||
|
|
||||||
return result
|
return result
|
||||||
@ -208,7 +264,7 @@ class MMVCv13:
|
|||||||
del self.net_g
|
del self.net_g
|
||||||
del self.onnx_session
|
del self.onnx_session
|
||||||
remove_path = os.path.join("MMVC_Client_v13", "python")
|
remove_path = os.path.join("MMVC_Client_v13", "python")
|
||||||
sys.path = [x for x in sys.path if x.endswith(remove_path) == False]
|
sys.path = [x for x in sys.path if x.endswith(remove_path) is False]
|
||||||
|
|
||||||
for key in list(sys.modules):
|
for key in list(sys.modules):
|
||||||
val = sys.modules.get(key)
|
val = sys.modules.get(key)
|
||||||
@ -217,5 +273,5 @@ class MMVCv13:
|
|||||||
if file_path.find(remove_path + os.path.sep) >= 0:
|
if file_path.find(remove_path + os.path.sep) >= 0:
|
||||||
print("remove", key, file_path)
|
print("remove", key, file_path)
|
||||||
sys.modules.pop(key)
|
sys.modules.pop(key)
|
||||||
except Exception as e:
|
except: # type:ignore
|
||||||
pass
|
pass
|
||||||
|
@ -1,36 +1,58 @@
|
|||||||
import torch
|
import torch
|
||||||
import os, sys, json
|
import os
|
||||||
|
import sys
|
||||||
|
import json
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
|
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
|
||||||
logger = logging
|
logger = logging
|
||||||
|
|
||||||
hann_window = {}
|
hann_window = {}
|
||||||
|
|
||||||
|
|
||||||
def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False):
|
def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False):
|
||||||
if torch.min(y) < -1.:
|
if torch.min(y) < -1.0:
|
||||||
print('min value is ', torch.min(y))
|
print("min value is ", torch.min(y))
|
||||||
if torch.max(y) > 1.:
|
if torch.max(y) > 1.0:
|
||||||
print('max value is ', torch.max(y))
|
print("max value is ", torch.max(y))
|
||||||
|
|
||||||
global hann_window
|
global hann_window
|
||||||
dtype_device = str(y.dtype) + '_' + str(y.device)
|
dtype_device = str(y.dtype) + "_" + str(y.device)
|
||||||
wnsize_dtype_device = str(win_size) + '_' + dtype_device
|
wnsize_dtype_device = str(win_size) + "_" + dtype_device
|
||||||
if wnsize_dtype_device not in hann_window:
|
if wnsize_dtype_device not in hann_window:
|
||||||
hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
|
hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(
|
||||||
|
dtype=y.dtype, device=y.device
|
||||||
|
)
|
||||||
|
|
||||||
y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
|
y = torch.nn.functional.pad(
|
||||||
|
y.unsqueeze(1),
|
||||||
|
(int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)),
|
||||||
|
mode="reflect",
|
||||||
|
)
|
||||||
y = y.squeeze(1)
|
y = y.squeeze(1)
|
||||||
|
|
||||||
spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
|
spec = torch.stft(
|
||||||
center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=True)
|
y,
|
||||||
|
n_fft,
|
||||||
|
hop_length=hop_size,
|
||||||
|
win_length=win_size,
|
||||||
|
window=hann_window[wnsize_dtype_device],
|
||||||
|
center=center,
|
||||||
|
pad_mode="reflect",
|
||||||
|
normalized=False,
|
||||||
|
onesided=True,
|
||||||
|
return_complex=True,
|
||||||
|
)
|
||||||
spec = torch.view_as_real(spec)
|
spec = torch.view_as_real(spec)
|
||||||
|
|
||||||
spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
|
spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
|
||||||
return spec
|
return spec
|
||||||
|
|
||||||
class TextAudioSpeakerCollate():
|
|
||||||
""" Zero-pads model inputs and targets
|
class TextAudioSpeakerCollate:
|
||||||
"""
|
"""Zero-pads model inputs and targets"""
|
||||||
def __init__(self, return_ids=False, no_text = False):
|
|
||||||
|
def __init__(self, return_ids=False, no_text=False):
|
||||||
self.return_ids = return_ids
|
self.return_ids = return_ids
|
||||||
self.no_text = no_text
|
self.no_text = no_text
|
||||||
|
|
||||||
@ -42,8 +64,8 @@ class TextAudioSpeakerCollate():
|
|||||||
"""
|
"""
|
||||||
# Right zero-pad all one-hot text sequences to max input length
|
# Right zero-pad all one-hot text sequences to max input length
|
||||||
_, ids_sorted_decreasing = torch.sort(
|
_, ids_sorted_decreasing = torch.sort(
|
||||||
torch.LongTensor([x[1].size(1) for x in batch]),
|
torch.LongTensor([x[1].size(1) for x in batch]), dim=0, descending=True
|
||||||
dim=0, descending=True)
|
)
|
||||||
|
|
||||||
max_text_len = max([len(x[0]) for x in batch])
|
max_text_len = max([len(x[0]) for x in batch])
|
||||||
max_spec_len = max([x[1].size(1) for x in batch])
|
max_spec_len = max([x[1].size(1) for x in batch])
|
||||||
@ -64,88 +86,108 @@ class TextAudioSpeakerCollate():
|
|||||||
row = batch[ids_sorted_decreasing[i]]
|
row = batch[ids_sorted_decreasing[i]]
|
||||||
|
|
||||||
text = row[0]
|
text = row[0]
|
||||||
text_padded[i, :text.size(0)] = text
|
text_padded[i, : text.size(0)] = text
|
||||||
text_lengths[i] = text.size(0)
|
text_lengths[i] = text.size(0)
|
||||||
|
|
||||||
spec = row[1]
|
spec = row[1]
|
||||||
spec_padded[i, :, :spec.size(1)] = spec
|
spec_padded[i, :, : spec.size(1)] = spec
|
||||||
spec_lengths[i] = spec.size(1)
|
spec_lengths[i] = spec.size(1)
|
||||||
|
|
||||||
wav = row[2]
|
wav = row[2]
|
||||||
wav_padded[i, :, :wav.size(1)] = wav
|
wav_padded[i, :, : wav.size(1)] = wav
|
||||||
wav_lengths[i] = wav.size(1)
|
wav_lengths[i] = wav.size(1)
|
||||||
|
|
||||||
sid[i] = row[3]
|
sid[i] = row[3]
|
||||||
|
|
||||||
if self.return_ids:
|
if self.return_ids:
|
||||||
return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, sid, ids_sorted_decreasing
|
return (
|
||||||
return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, sid
|
text_padded,
|
||||||
|
text_lengths,
|
||||||
|
spec_padded,
|
||||||
|
spec_lengths,
|
||||||
|
wav_padded,
|
||||||
|
wav_lengths,
|
||||||
|
sid,
|
||||||
|
ids_sorted_decreasing,
|
||||||
|
)
|
||||||
|
return (
|
||||||
|
text_padded,
|
||||||
|
text_lengths,
|
||||||
|
spec_padded,
|
||||||
|
spec_lengths,
|
||||||
|
wav_padded,
|
||||||
|
wav_lengths,
|
||||||
|
sid,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def load_checkpoint(checkpoint_path, model, optimizer=None):
|
def load_checkpoint(checkpoint_path, model, optimizer=None):
|
||||||
assert os.path.isfile(checkpoint_path), f"No such file or directory: {checkpoint_path}"
|
assert os.path.isfile(
|
||||||
checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
|
checkpoint_path
|
||||||
iteration = checkpoint_dict['iteration']
|
), f"No such file or directory: {checkpoint_path}"
|
||||||
learning_rate = checkpoint_dict['learning_rate']
|
checkpoint_dict = torch.load(checkpoint_path, map_location="cpu")
|
||||||
if optimizer is not None:
|
iteration = checkpoint_dict["iteration"]
|
||||||
optimizer.load_state_dict(checkpoint_dict['optimizer'])
|
learning_rate = checkpoint_dict["learning_rate"]
|
||||||
saved_state_dict = checkpoint_dict['model']
|
if optimizer is not None:
|
||||||
if hasattr(model, 'module'):
|
optimizer.load_state_dict(checkpoint_dict["optimizer"])
|
||||||
state_dict = model.module.state_dict()
|
saved_state_dict = checkpoint_dict["model"]
|
||||||
else:
|
if hasattr(model, "module"):
|
||||||
state_dict = model.state_dict()
|
state_dict = model.module.state_dict()
|
||||||
new_state_dict= {}
|
else:
|
||||||
for k, v in state_dict.items():
|
state_dict = model.state_dict()
|
||||||
try:
|
new_state_dict = {}
|
||||||
new_state_dict[k] = saved_state_dict[k]
|
for k, v in state_dict.items():
|
||||||
except:
|
try:
|
||||||
logger.info("%s is not in the checkpoint" % k)
|
new_state_dict[k] = saved_state_dict[k]
|
||||||
new_state_dict[k] = v
|
except:
|
||||||
if hasattr(model, 'module'):
|
logger.info("%s is not in the checkpoint" % k)
|
||||||
model.module.load_state_dict(new_state_dict)
|
new_state_dict[k] = v
|
||||||
else:
|
if hasattr(model, "module"):
|
||||||
model.load_state_dict(new_state_dict)
|
model.module.load_state_dict(new_state_dict)
|
||||||
logger.info("Loaded checkpoint '{}' (iteration {})" .format(
|
else:
|
||||||
checkpoint_path, iteration))
|
model.load_state_dict(new_state_dict)
|
||||||
return model, optimizer, learning_rate, iteration
|
logger.info(
|
||||||
|
"Loaded checkpoint '{}' (iteration {})".format(checkpoint_path, iteration)
|
||||||
|
)
|
||||||
|
return model, optimizer, learning_rate, iteration
|
||||||
|
|
||||||
|
|
||||||
def get_hparams_from_file(config_path):
|
def get_hparams_from_file(config_path):
|
||||||
with open(config_path, "r") as f:
|
with open(config_path, "r") as f:
|
||||||
data = f.read()
|
data = f.read()
|
||||||
config = json.loads(data)
|
config = json.loads(data)
|
||||||
|
|
||||||
hparams =HParams(**config)
|
hparams = HParams(**config)
|
||||||
return hparams
|
return hparams
|
||||||
|
|
||||||
class HParams():
|
|
||||||
def __init__(self, **kwargs):
|
|
||||||
for k, v in kwargs.items():
|
|
||||||
if type(v) == dict:
|
|
||||||
v = HParams(**v)
|
|
||||||
self[k] = v
|
|
||||||
|
|
||||||
def keys(self):
|
|
||||||
return self.__dict__.keys()
|
|
||||||
|
|
||||||
def items(self):
|
class HParams:
|
||||||
return self.__dict__.items()
|
def __init__(self, **kwargs):
|
||||||
|
for k, v in kwargs.items():
|
||||||
|
if type(v) == dict:
|
||||||
|
v = HParams(**v)
|
||||||
|
self[k] = v
|
||||||
|
|
||||||
def values(self):
|
def keys(self):
|
||||||
return self.__dict__.values()
|
return self.__dict__.keys()
|
||||||
|
|
||||||
def __len__(self):
|
def items(self):
|
||||||
return len(self.__dict__)
|
return self.__dict__.items()
|
||||||
|
|
||||||
def __getitem__(self, key):
|
def values(self):
|
||||||
return getattr(self, key)
|
return self.__dict__.values()
|
||||||
|
|
||||||
def __setitem__(self, key, value):
|
def __len__(self):
|
||||||
return setattr(self, key, value)
|
return len(self.__dict__)
|
||||||
|
|
||||||
def __contains__(self, key):
|
def __getitem__(self, key):
|
||||||
return key in self.__dict__
|
return getattr(self, key)
|
||||||
|
|
||||||
def __repr__(self):
|
def __setitem__(self, key, value):
|
||||||
return self.__dict__.__repr__()
|
return setattr(self, key, value)
|
||||||
|
|
||||||
|
def __contains__(self, key):
|
||||||
|
return key in self.__dict__
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return self.__dict__.__repr__()
|
||||||
|
@ -124,6 +124,7 @@ class VoiceChanger:
|
|||||||
try:
|
try:
|
||||||
return self.voiceChanger.loadModel(props)
|
return self.voiceChanger.loadModel(props)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
print(traceback.format_exc())
|
||||||
print("[Voice Changer] Model Load Error! Check your model is valid.", e)
|
print("[Voice Changer] Model Load Error! Check your model is valid.", e)
|
||||||
return {"status": "NG"}
|
return {"status": "NG"}
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user