refactoring

This commit is contained in:
wataru 2023-04-19 03:35:04 +09:00
parent a82a1ecd71
commit 3853bd6ccf
3 changed files with 83 additions and 174 deletions

View File

@ -9,24 +9,18 @@ import platform
import os
import argparse
import uvicorn
import webbrowser
from mods.ssl import create_self_signed_cert
from voice_changer.VoiceChangerManager import VoiceChangerManager
from sio.MMVC_SocketIOApp import MMVC_SocketIOApp
from restapi.MMVC_Rest import MMVC_Rest
from const import NATIVE_CLIENT_FILE_MAC, NATIVE_CLIENT_FILE_WIN, SSL_KEY_DIR, setModelType
from const import NATIVE_CLIENT_FILE_MAC, NATIVE_CLIENT_FILE_WIN, SSL_KEY_DIR
import subprocess
import multiprocessing as mp
def setupArgParser():
parser = argparse.ArgumentParser()
parser.add_argument("-t", type=str, default="MMVC",
help="Server type. MMVC|TRAIN")
parser.add_argument("-p", type=int, default=18888, help="port")
parser.add_argument("-c", type=str, help="path for the config.json")
parser.add_argument("-m", type=str, help="path for the model file")
parser.add_argument("-o", type=str, help="path for the onnx model file")
parser.add_argument("--https", type=strtobool,
default=False, help="use https")
parser.add_argument("--httpsKey", type=str,
@ -35,18 +29,8 @@ def setupArgParser():
default="ssl.cert", help="path for the cert of https")
parser.add_argument("--httpsSelfSigned", type=strtobool,
default=True, help="generate self-signed certificate")
parser.add_argument("--colab", type=strtobool,
default=False, help="run on colab")
parser.add_argument("--modelType", type=str,
default="MMVCv15", help="model type: MMVCv13, MMVCv15, so-vits-svc-40, so-vits-svc-40v2")
parser.add_argument("--cluster", type=str, help="path to cluster model")
parser.add_argument("--internal", type=strtobool, default=False, help="各種パスをmac appの中身に変換")
# parser.add_argument("--hubert", type=str, help="path to hubert model")
# parser.add_argument("--useHubertOnnx", type=strtobool, default=False, help="use hubert onnx")
# parser.add_argument("--hubertSoftPt", type=str, help="path to hubert-soft model(pytorch)")
# parser.add_argument("--enhancerPt", type=str, help="path to enhancer model(pytorch)")
# parser.add_argument("--enhancerOnnx", type=str, help="path to enhancer model(onnx)")
# parser.add_argument("--internal", type=strtobool, default=False, help="各種パスをmac appの中身に変換")
parser.add_argument("--content_vec_500", type=str, help="path to content_vec_500 model(pytorch)")
parser.add_argument("--content_vec_500_onnx", type=str, help="path to content_vec_500 model(onnx)")
@ -83,42 +67,9 @@ def printMessage(message, level=0):
parser = setupArgParser()
args, unknown = parser.parse_known_args()
# printMessage(f"Phase name:{__name__}", level=2)
# thisFilename = os.path.basename(__file__)[:-3]
# if __name__ == thisFilename or args.colab == True:
# printMessage(f"PHASE3:{__name__}", level=2)
printMessage(f"Booting PHASE :{__name__}", level=2)
TYPE = args.t
PORT = args.p
CONFIG = args.c
MODEL = args.m
ONNX_MODEL = args.o
CLUSTER_MODEL = args.cluster if args.cluster != None else None
if args.internal and hasattr(sys, "_MEIPASS"):
print("use internal path")
if CONFIG != None:
CONFIG = os.path.join(sys._MEIPASS, CONFIG)
if MODEL != None:
MODEL = os.path.join(sys._MEIPASS, MODEL)
if ONNX_MODEL:
ONNX_MODEL = os.path.join(sys._MEIPASS, ONNX_MODEL)
if CLUSTER_MODEL:
CLUSTER_MODEL = os.path.join(sys._MEIPASS, CLUSTER_MODEL)
print(" config path:", CONFIG)
print(" model path:", MODEL)
print(" onnx model path:", ONNX_MODEL)
print(" cluster model path:", CLUSTER_MODEL)
MODEL_TYPE = os.environ.get('MODEL_TYPE', None)
if MODEL_TYPE == None:
MODEL_TYPE = args.modelType
setModelType(MODEL_TYPE)
def localServer():
@ -131,9 +82,6 @@ def localServer():
)
if args.colab == True:
os.environ["colab"] = "True"
if __name__ == 'MMVCServerSIO':
voiceChangerManager = VoiceChangerManager.get_instance({
"content_vec_500": args.content_vec_500,
@ -143,13 +91,6 @@ if __name__ == 'MMVCServerSIO':
"hubert_soft": args.hubert_soft,
"nsf_hifigan": args.nsf_hifigan,
})
if CONFIG and (MODEL or ONNX_MODEL):
if MODEL_TYPE == "MMVCv15" or MODEL_TYPE == "MMVCv13":
voiceChangerManager.loadModel(CONFIG, MODEL, ONNX_MODEL, None)
elif MODEL_TYPE == "so-vits-svc-40" or MODEL_TYPE == "so-vits-svc-40v2" or MODEL_TYPE == "so-vits-svc-40_c":
voiceChangerManager.loadModel(CONFIG, MODEL, ONNX_MODEL, CLUSTER_MODEL)
else:
voiceChangerManager.loadModel(CONFIG, MODEL, ONNX_MODEL, CLUSTER_MODEL)
app_fastapi = MMVC_Rest.get_instance(voiceChangerManager)
app_socketio = MMVC_SocketIOApp.get_instance(app_fastapi, voiceChangerManager)
@ -162,94 +103,74 @@ if __name__ == '__main__':
mp.freeze_support()
printMessage(f"Voice Changerを起動しています。", level=2)
TYPE = args.t
PORT = args.p
CONFIG = args.c
MODEL = args.m if args.m != None else None
ONNX_MODEL = args.o if args.o != None else None
if TYPE != "MMVC" and TYPE != "TRAIN":
print("Type(-t) should be MMVC or TRAIN")
exit(1)
# printMessage(f"Start MMVC SocketIO Server", level=0)
printMessage(f"-- 設定 -- ", level=1)
printMessage(f"CONFIG:{CONFIG}, MODEL:{MODEL} ONNX_MODEL:{ONNX_MODEL}", level=1)
if args.colab == False:
if os.getenv("EX_PORT"):
EX_PORT = os.environ["EX_PORT"]
printMessage(
f"External_Port:{EX_PORT} Internal_Port:{PORT}", level=1)
else:
printMessage(f"Internal_Port:{PORT}", level=1)
if os.getenv("EX_TB_PORT"):
EX_TB_PORT = os.environ["EX_TB_PORT"]
printMessage(f"External_TeonsorBord_Port:{EX_TB_PORT}", level=1)
if os.getenv("EX_IP"):
EX_IP = os.environ["EX_IP"]
printMessage(f"External_IP:{EX_IP}", level=1)
# HTTPS key/cert作成
if args.https and args.httpsSelfSigned == 1:
# HTTPS(おれおれ証明書生成)
os.makedirs(SSL_KEY_DIR, exist_ok=True)
key_base_name = f"{datetime.now().strftime('%Y%m%d_%H%M%S')}"
keyname = f"{key_base_name}.key"
certname = f"{key_base_name}.cert"
create_self_signed_cert(certname, keyname, certargs={"Country": "JP",
"State": "Tokyo",
"City": "Chuo-ku",
"Organization": "F",
"Org. Unit": "F"}, cert_dir=SSL_KEY_DIR)
key_path = os.path.join(SSL_KEY_DIR, keyname)
cert_path = os.path.join(SSL_KEY_DIR, certname)
printMessage(
f"protocol: HTTPS(self-signed), key:{key_path}, cert:{cert_path}", level=1)
elif args.https and args.httpsSelfSigned == 0:
# HTTPS
key_path = args.httpsKey
cert_path = args.httpsCert
printMessage(
f"protocol: HTTPS, key:{key_path}, cert:{cert_path}", level=1)
else:
# HTTP
printMessage(f"protocol: HTTP", level=1)
printMessage(f"-- ---- -- ", level=1)
# アドレス表示
if os.getenv("EX_PORT"):
EX_PORT = os.environ["EX_PORT"]
printMessage(
f"ブラウザで次のURLを開いてください.", level=2)
f"External_Port:{EX_PORT} Internal_Port:{PORT}", level=1)
else:
printMessage(f"Internal_Port:{PORT}", level=1)
if os.getenv("EX_IP"):
EX_IP = os.environ["EX_IP"]
printMessage(f"External_IP:{EX_IP}", level=1)
# HTTPS key/cert作成
if args.https and args.httpsSelfSigned == 1:
# HTTPS(おれおれ証明書生成)
os.makedirs(SSL_KEY_DIR, exist_ok=True)
key_base_name = f"{datetime.now().strftime('%Y%m%d_%H%M%S')}"
keyname = f"{key_base_name}.key"
certname = f"{key_base_name}.cert"
create_self_signed_cert(certname, keyname, certargs={"Country": "JP",
"State": "Tokyo",
"City": "Chuo-ku",
"Organization": "F",
"Org. Unit": "F"}, cert_dir=SSL_KEY_DIR)
key_path = os.path.join(SSL_KEY_DIR, keyname)
cert_path = os.path.join(SSL_KEY_DIR, certname)
printMessage(
f"protocol: HTTPS(self-signed), key:{key_path}, cert:{cert_path}", level=1)
elif args.https and args.httpsSelfSigned == 0:
# HTTPS
key_path = args.httpsKey
cert_path = args.httpsCert
printMessage(
f"protocol: HTTPS, key:{key_path}, cert:{cert_path}", level=1)
else:
# HTTP
printMessage(f"protocol: HTTP", level=1)
printMessage(f"-- ---- -- ", level=1)
# アドレス表示
printMessage(
f"ブラウザで次のURLを開いてください.", level=2)
if args.https == 1:
printMessage(
f"https://<IP>:<PORT>/", level=1)
else:
printMessage(
f"http://<IP>:<PORT>/", level=1)
printMessage(f"多くの場合は次のいずれかのURLにアクセスすると起動します。", level=2)
if "EX_PORT" in locals() and "EX_IP" in locals(): # シェルスクリプト経由起動(docker)
if args.https == 1:
printMessage(
f"https://<IP>:<PORT>/", level=1)
printMessage(f"https://localhost:{EX_PORT}/", level=1)
for ip in EX_IP.strip().split(" "):
printMessage(f"https://{ip}:{EX_PORT}/", level=1)
else:
printMessage(
f"http://<IP>:<PORT>/", level=1)
if TYPE == "MMVC":
path = f""
printMessage(f"http://localhost:{EX_PORT}/", level=1)
else: # 直接python起動
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
hostname = s.getsockname()[0]
if args.https == 1:
printMessage(f"https://localhost:{PORT}/", level=1)
printMessage(f"https://{hostname}:{PORT}/", level=1)
else:
path = "trainer"
printMessage(f"多くの場合は次のいずれかのURLにアクセスすると起動します。", level=2)
if "EX_PORT" in locals() and "EX_IP" in locals(): # シェルスクリプト経由起動(docker)
if args.https == 1:
printMessage(f"https://localhost:{EX_PORT}/{path}", level=1)
for ip in EX_IP.strip().split(" "):
printMessage(f"https://{ip}:{EX_PORT}/{path}", level=1)
else:
printMessage(f"http://localhost:{EX_PORT}/{path}", level=1)
else: # 直接python起動
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
hostname = s.getsockname()[0]
if args.https == 1:
printMessage(f"https://localhost:{PORT}/{path}", level=1)
printMessage(f"https://{hostname}:{PORT}/{path}", level=1)
else:
printMessage(f"http://localhost:{PORT}/{path}", level=1)
printMessage(f"http://localhost:{PORT}/", level=1)
# サーバ起動
if args.https:
@ -273,19 +194,10 @@ if __name__ == '__main__':
log_level="warning"
)
else:
# uvicorn.run(
# f"{os.path.basename(__file__)[:-3]}:app_socketio",
# host="0.0.0.0",
# port=int(PORT),
# reload = False if hasattr(sys, "_MEIPASS") else True,
# log_level="warning"
# )
os.environ['MODEL_TYPE'] = MODEL_TYPE
p = mp.Process(name="p", target=localServer)
p.start()
try:
if sys.platform.startswith('win'):
# process = subprocess.Popen(["voice-changer-native-client.exe", "-u", f"http://localhost:{PORT}/{path}"])
process = subprocess.Popen([NATIVE_CLIENT_FILE_WIN, "-u", f"http://localhost:{PORT}/{path}"])
return_code = process.wait()
print("client closed.")

View File

@ -26,16 +26,12 @@ TMP_DIR = os.path.join(tmpdir.name, "tmp_dir") if hasattr(sys, "_MEIPASS") else
os.makedirs(TMP_DIR, exist_ok=True)
modelType: ModelType = "MMVCv15"
def getModelType() -> ModelType:
return modelType
def setModelType(_modelType: ModelType):
global modelType
modelType = _modelType
# modelType: ModelType = "MMVCv15"
# def getModelType() -> ModelType:
# return modelType
# def setModelType(_modelType: ModelType):
# global modelType
# modelType = _modelType
def getFrontendPath():

View File

@ -84,8 +84,8 @@ class SoVitsSvc40:
clusterTorchModel = props["files"]["clusterTorchModelFilename"]
content_vec_path = self.params["content_vec_500"]
content_vec_hubert_onnx_path = self.params["content_vec_500_onnx"]
content_vec_hubert_onnx_on = self.params["content_vec_500_onnx_on"]
content_vec_onnx_path = self.params["content_vec_500_onnx"]
content_vec_onnx_on = self.params["content_vec_500_onnx_on"]
hubert_base_path = self.params["hubert_base"]
# hubert model
@ -94,11 +94,11 @@ class SoVitsSvc40:
if os.path.exists(content_vec_path) == False:
content_vec_path = hubert_base_path
if content_vec_hubert_onnx_on == True:
if content_vec_onnx_on == True:
ort_options = onnxruntime.SessionOptions()
ort_options.intra_op_num_threads = 8
self.hubert_onnx = onnxruntime.InferenceSession(
content_vec_hubert_onnx_path,
self.content_vec_onnx = onnxruntime.InferenceSession(
content_vec_onnx_path,
providers=providers
)
else:
@ -154,12 +154,12 @@ class SoVitsSvc40:
self.settings.gpu = 0
provider_options = [{'device_id': self.settings.gpu}]
self.onnx_session.set_providers(providers=[val], provider_options=provider_options)
if hasattr(self, "hubert_onnx"):
self.hubert_onnx.set_providers(providers=[val], provider_options=provider_options)
if hasattr(self, "content_vec_onnx"):
self.content_vec_onnx.set_providers(providers=[val], provider_options=provider_options)
else:
self.onnx_session.set_providers(providers=[val])
if hasattr(self, "hubert_onnx"):
self.hubert_onnx.set_providers(providers=[val])
if hasattr(self, "content_vec_onnx"):
self.content_vec_onnx.set_providers(providers=[val])
elif key == "onnxExecutionProvider" and self.onnx_session == None:
print("Onnx is not enabled. Please load model.")
return False
@ -227,14 +227,14 @@ class SoVitsSvc40:
else:
dev = torch.device("cuda", index=self.settings.gpu)
if hasattr(self, "hubert_onnx"):
c = self.hubert_onnx.run(
if hasattr(self, "content_vec_onnx"):
c = self.content_vec_onnx.run(
["units"],
{
"audio": wav16k_numpy.reshape(1, -1),
})
c = torch.from_numpy(np.array(c)).squeeze(0).transpose(1, 2)
# print("onnx hubert:", self.hubert_onnx.get_providers())
# print("onnx hubert:", self.content_vec_onnx.get_providers())
else:
if self.hps.model.ssl_dim == 768:
self.hubert_model = self.hubert_model.to(dev)
@ -257,6 +257,7 @@ class SoVitsSvc40:
else:
cluster_c = cluster.get_cluster_center_result(self.cluster_model, c.cpu().numpy().T, speaker[0]).T
cluster_c = torch.FloatTensor(cluster_c).to(dev)
c = c.to(dev)
c = self.settings.clusterInferRatio * cluster_c + (1 - self.settings.clusterInferRatio) * c
c = c.unsqueeze(0)