reactor main argument

This commit is contained in:
wataru 2023-04-19 03:06:45 +09:00
parent f4b93ac3e8
commit a82a1ecd71
9 changed files with 60 additions and 36 deletions

5
.gitignore vendored
View File

@ -40,9 +40,6 @@ client/lib/worklet/dist
docker/cudnn/ docker/cudnn/
server/hubert_base.pt server/weights/
server/hubert-soft-0d54a1f4.pt
server/nsf_hifigan/
start_trainer.sh start_trainer.sh

View File

@ -5,3 +5,8 @@ class NoModeLoadedException(Exception):
def __str__(self): def __str__(self):
return repr(f"No model for {self.framework} loaded. Please confirm the model uploaded.") return repr(f"No model for {self.framework} loaded. Please confirm the model uploaded.")
class ONNXInputArgumentException(Exception):
def __str__(self):
return repr(f"ONNX received invalid argument.")

View File

@ -42,11 +42,18 @@ def setupArgParser():
parser.add_argument("--cluster", type=str, help="path to cluster model") parser.add_argument("--cluster", type=str, help="path to cluster model")
parser.add_argument("--internal", type=strtobool, default=False, help="各種パスをmac appの中身に変換") parser.add_argument("--internal", type=strtobool, default=False, help="各種パスをmac appの中身に変換")
parser.add_argument("--hubert", type=str, help="path to hubert model") # parser.add_argument("--hubert", type=str, help="path to hubert model")
parser.add_argument("--useHubertOnnx", type=strtobool, default=False, help="use hubert onnx") # parser.add_argument("--useHubertOnnx", type=strtobool, default=False, help="use hubert onnx")
parser.add_argument("--hubertSoftPt", type=str, help="path to hubert-soft model(pytorch)") # parser.add_argument("--hubertSoftPt", type=str, help="path to hubert-soft model(pytorch)")
parser.add_argument("--enhancerPt", type=str, help="path to enhancer model(pytorch)") # parser.add_argument("--enhancerPt", type=str, help="path to enhancer model(pytorch)")
parser.add_argument("--enhancerOnnx", type=str, help="path to enhancer model(onnx)") # parser.add_argument("--enhancerOnnx", type=str, help="path to enhancer model(onnx)")
parser.add_argument("--content_vec_500", type=str, help="path to content_vec_500 model(pytorch)")
parser.add_argument("--content_vec_500_onnx", type=str, help="path to content_vec_500 model(onnx)")
parser.add_argument("--content_vec_500_onnx_on", type=strtobool, default=False, help="use or not onnx for content_vec_500")
parser.add_argument("--hubert_base", type=str, help="path to hubert_base model(pytorch)")
parser.add_argument("--hubert_soft", type=str, help="path to hubert_soft model(pytorch)")
parser.add_argument("--nsf_hifigan", type=str, help="path to nsf_hifigan model(pytorch)")
return parser return parser
@ -129,11 +136,12 @@ if args.colab == True:
if __name__ == 'MMVCServerSIO': if __name__ == 'MMVCServerSIO':
voiceChangerManager = VoiceChangerManager.get_instance({ voiceChangerManager = VoiceChangerManager.get_instance({
"hubert": args.hubert, "content_vec_500": args.content_vec_500,
"useHubertOnnx": args.useHubertOnnx, "content_vec_500_onnx": args.content_vec_500_onnx,
"hubertSoftPt": args.hubertSoftPt, "content_vec_500_onnx_on": args.content_vec_500_onnx_on,
"enhancerPt": args.enhancerPt, "hubert_base": args.hubert_base,
"enhancerOnnx": args.enhancerOnnx "hubert_soft": args.hubert_soft,
"nsf_hifigan": args.nsf_hifigan,
}) })
if CONFIG and (MODEL or ONNX_MODEL): if CONFIG and (MODEL or ONNX_MODEL):
if MODEL_TYPE == "MMVCv15" or MODEL_TYPE == "MMVCv13": if MODEL_TYPE == "MMVCv15" or MODEL_TYPE == "MMVCv13":

View File

@ -84,7 +84,7 @@ class DDSP_SVC:
print("-------------------hopsize", self.hop_size) print("-------------------hopsize", self.hop_size)
# hubert # hubert
self.vec_path = self.params["hubertSoftPt"] self.vec_path = self.params["hubert_soft"]
self.encoder = vo.Units_Encoder( self.encoder = vo.Units_Encoder(
self.args.data.encoder, self.args.data.encoder,
self.vec_path, self.vec_path,
@ -115,7 +115,7 @@ class DDSP_SVC:
float(1100)) float(1100))
self.volume_extractor = vo.Volume_Extractor(self.hop_size) self.volume_extractor = vo.Volume_Extractor(self.hop_size)
self.enhancer_path = self.params["enhancerPt"] self.enhancer_path = self.params["nsf_hifigan"]
self.enhancer = Enhancer(self.args.enhancer.type, self.enhancer_path, device=self.useDevice()) self.enhancer = Enhancer(self.args.enhancer.type, self.enhancer_path, device=self.useDevice())
return self.get_info() return self.get_info()

View File

@ -20,7 +20,7 @@ import pyworld as pw
from models import SynthesizerTrn from models import SynthesizerTrn
from voice_changer.MMVCv15.client_modules import convert_continuos_f0, spectrogram_torch, get_hparams_from_file, load_checkpoint from voice_changer.MMVCv15.client_modules import convert_continuos_f0, spectrogram_torch, get_hparams_from_file, load_checkpoint
from Exceptions import NoModeLoadedException from Exceptions import NoModeLoadedException, ONNXInputArgumentException
providers = ['OpenVINOExecutionProvider', "CUDAExecutionProvider", "DmlExecutionProvider", "CPUExecutionProvider"] providers = ['OpenVINOExecutionProvider', "CUDAExecutionProvider", "DmlExecutionProvider", "CPUExecutionProvider"]
@ -241,11 +241,15 @@ class MMVCv15:
return result return result
def inference(self, data): def inference(self, data):
try:
if self.settings.framework == "ONNX": if self.settings.framework == "ONNX":
audio = self._onnx_inference(data) audio = self._onnx_inference(data)
else: else:
audio = self._pyTorch_inference(data) audio = self._pyTorch_inference(data)
return audio return audio
except onnxruntime.capi.onnxruntime_pybind11_state.InvalidArgument as e:
print(e)
raise ONNXInputArgumentException()
def __del__(self): def __del__(self):
del self.net_g del self.net_g

View File

@ -94,7 +94,7 @@ class RVC:
print("[Voice Changer] RVC loading... slot:", self.slot) print("[Voice Changer] RVC loading... slot:", self.slot)
try: try:
hubert_path = self.params["hubert"] hubert_path = self.params["hubert_base"]
models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task([hubert_path], suffix="",) models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task([hubert_path], suffix="",)
model = models[0] model = models[0]
model.eval() model.eval()

View File

@ -83,22 +83,27 @@ class SoVitsSvc40:
self.settings.onnxModelFile = props["files"]["onnxModelFilename"] self.settings.onnxModelFile = props["files"]["onnxModelFilename"]
clusterTorchModel = props["files"]["clusterTorchModelFilename"] clusterTorchModel = props["files"]["clusterTorchModelFilename"]
content_vec_path = self.params["content_vec_500"]
content_vec_hubert_onnx_path = self.params["content_vec_500_onnx"]
content_vec_hubert_onnx_on = self.params["content_vec_500_onnx_on"]
hubert_base_path = self.params["hubert_base"]
# hubert model # hubert model
try: try:
hubert_path = self.params["hubert"]
useHubertOnnx = self.params["useHubertOnnx"]
self.useHubertOnnx = useHubertOnnx
if useHubertOnnx == True: if os.path.exists(content_vec_path) == False:
content_vec_path = hubert_base_path
if content_vec_hubert_onnx_on == True:
ort_options = onnxruntime.SessionOptions() ort_options = onnxruntime.SessionOptions()
ort_options.intra_op_num_threads = 8 ort_options.intra_op_num_threads = 8
self.hubert_onnx = onnxruntime.InferenceSession( self.hubert_onnx = onnxruntime.InferenceSession(
HUBERT_ONNX_MODEL_PATH, content_vec_hubert_onnx_path,
providers=providers providers=providers
) )
else: else:
models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task( models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(
[hubert_path], [content_vec_path],
suffix="", suffix="",
) )
model = models[0] model = models[0]

View File

@ -78,16 +78,18 @@ class SoVitsSvc40v2:
self.settings.onnxModelFile = props["files"]["onnxModelFilename"] self.settings.onnxModelFile = props["files"]["onnxModelFilename"]
clusterTorchModel = props["files"]["clusterTorchModelFilename"] clusterTorchModel = props["files"]["clusterTorchModelFilename"]
content_vec_path = self.params["content_vec_500"]
# content_vec_hubert_onnx_path = self.params["content_vec_500_onnx"]
# content_vec_hubert_onnx_on = self.params["content_vec_500_onnx_on"]
hubert_base_path = self.params["hubert_base"]
# hubert model # hubert model
try: try:
# if sys.platform.startswith('darwin'): if os.path.exists(content_vec_path) == False:
# vec_path = os.path.join(sys._MEIPASS, "hubert/checkpoint_best_legacy_500.pt") content_vec_path = hubert_base_path
# else:
# vec_path = "hubert/checkpoint_best_legacy_500.pt"
vec_path = self.params["hubert"]
models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task( models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(
[vec_path], [content_vec_path],
suffix="", suffix="",
) )
model = models[0] model = models[0]

View File

@ -14,7 +14,7 @@ from voice_changer.IORecorder import IORecorder
from voice_changer.utils.Timer import Timer from voice_changer.utils.Timer import Timer
from voice_changer.utils.VoiceChangerModel import VoiceChangerModel, AudioInOut from voice_changer.utils.VoiceChangerModel import VoiceChangerModel, AudioInOut
import time import time
from Exceptions import NoModeLoadedException from Exceptions import NoModeLoadedException, ONNXInputArgumentException
providers = ['OpenVINOExecutionProvider', "CUDAExecutionProvider", "DmlExecutionProvider", "CPUExecutionProvider"] providers = ['OpenVINOExecutionProvider', "CUDAExecutionProvider", "DmlExecutionProvider", "CPUExecutionProvider"]
@ -289,6 +289,9 @@ class VoiceChanger():
except NoModeLoadedException as e: except NoModeLoadedException as e:
print("[Voice Changer] [Exception]", e) print("[Voice Changer] [Exception]", e)
return np.zeros(1).astype(np.int16), [0, 0, 0] return np.zeros(1).astype(np.int16), [0, 0, 0]
except ONNXInputArgumentException as e:
print("[Voice Changer] [Exception]", e)
return np.zeros(1).astype(np.int16), [0, 0, 0]
except Exception as e: except Exception as e:
print("VC PROCESSING!!!! EXCEPTION!!!", e) print("VC PROCESSING!!!! EXCEPTION!!!", e)
print(traceback.format_exc()) print(traceback.format_exc())