mirror of
https://github.com/w-okada/voice-changer.git
synced 2025-02-02 16:23:58 +03:00
WIP:multi 2
This commit is contained in:
parent
e1e1f94fa3
commit
e2f08a17d4
2
client/demo/dist/index.js
vendored
2
client/demo/dist/index.js
vendored
File diff suppressed because one or more lines are too long
@ -110,10 +110,11 @@ export class ServerConfigurator {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// !! 注意!! hubertTorchModelは固定値で上書きされるため、設定しても効果ない。
|
// !! 注意!! hubertTorchModelは固定値で上書きされるため、設定しても効果ない。
|
||||||
loadModel = async (configFilename: string, pyTorchModelFilename: string | null, onnxModelFilename: string | null, clusterTorchModelFilename: string | null, featureFilename: string | null, indexFilename: string | null, isHalf: boolean) => {
|
loadModel = async (slot: number | null, configFilename: string, pyTorchModelFilename: string | null, onnxModelFilename: string | null, clusterTorchModelFilename: string | null, featureFilename: string | null, indexFilename: string | null, isHalf: boolean) => {
|
||||||
const url = this.serverUrl + "/load_model"
|
const url = this.serverUrl + "/load_model"
|
||||||
const info = new Promise<ServerInfo>(async (resolve) => {
|
const info = new Promise<ServerInfo>(async (resolve) => {
|
||||||
const formData = new FormData();
|
const formData = new FormData();
|
||||||
|
formData.append("slot", "" + slot || "-");
|
||||||
formData.append("pyTorchModelFilename", pyTorchModelFilename || "-");
|
formData.append("pyTorchModelFilename", pyTorchModelFilename || "-");
|
||||||
formData.append("onnxModelFilename", onnxModelFilename || "-");
|
formData.append("onnxModelFilename", onnxModelFilename || "-");
|
||||||
formData.append("configFilename", configFilename);
|
formData.append("configFilename", configFilename);
|
||||||
|
@ -282,6 +282,7 @@ export class VoiceChangerClient {
|
|||||||
return this.configurator.concatUploadedFile(filename, chunkNum)
|
return this.configurator.concatUploadedFile(filename, chunkNum)
|
||||||
}
|
}
|
||||||
loadModel = (
|
loadModel = (
|
||||||
|
slot: number | null,
|
||||||
configFilename: string,
|
configFilename: string,
|
||||||
pyTorchModelFilename: string | null,
|
pyTorchModelFilename: string | null,
|
||||||
onnxModelFilename: string | null,
|
onnxModelFilename: string | null,
|
||||||
@ -291,7 +292,7 @@ export class VoiceChangerClient {
|
|||||||
isHalf: boolean
|
isHalf: boolean
|
||||||
) => {
|
) => {
|
||||||
// !! 注意!! hubertTorchModelは固定値で上書きされるため、設定しても効果ない。
|
// !! 注意!! hubertTorchModelは固定値で上書きされるため、設定しても効果ない。
|
||||||
return this.configurator.loadModel(configFilename, pyTorchModelFilename, onnxModelFilename, clusterTorchModelFilename, featureFilename, indexFilename, isHalf)
|
return this.configurator.loadModel(slot, configFilename, pyTorchModelFilename, onnxModelFilename, clusterTorchModelFilename, featureFilename, indexFilename, isHalf)
|
||||||
}
|
}
|
||||||
|
|
||||||
//## Worklet ##//
|
//## Worklet ##//
|
||||||
|
@ -256,6 +256,7 @@ export const useServerSetting = (props: UseServerSettingProps): ServerSettingSta
|
|||||||
const configFileName = fileUploadSetting.configFile ? fileUploadSetting.configFile.filename || "-" : "-"
|
const configFileName = fileUploadSetting.configFile ? fileUploadSetting.configFile.filename || "-" : "-"
|
||||||
console.log("IS HALF", fileUploadSetting.isHalf)
|
console.log("IS HALF", fileUploadSetting.isHalf)
|
||||||
const loadPromise = props.voiceChangerClient.loadModel(
|
const loadPromise = props.voiceChangerClient.loadModel(
|
||||||
|
0,
|
||||||
configFileName,
|
configFileName,
|
||||||
fileUploadSetting.pyTorchModel?.filename || null,
|
fileUploadSetting.pyTorchModel?.filename || null,
|
||||||
fileUploadSetting.onnxModel?.filename || null,
|
fileUploadSetting.onnxModel?.filename || null,
|
||||||
|
@ -63,6 +63,20 @@ class MMVC_Rest_Fileuploader:
|
|||||||
isHalf: bool = Form(...),
|
isHalf: bool = Form(...),
|
||||||
):
|
):
|
||||||
|
|
||||||
|
props = {
|
||||||
|
"slot": slot,
|
||||||
|
"isHalf": isHalf,
|
||||||
|
"files": {
|
||||||
|
"configFilename": configFilename,
|
||||||
|
"pyTorchModelFilename": pyTorchModelFilename,
|
||||||
|
"onnxModelFilename": onnxModelFilename,
|
||||||
|
"clusterTorchModelFilename": clusterTorchModelFilename,
|
||||||
|
"featureFilename": featureFilename,
|
||||||
|
"indexFilename": indexFilename
|
||||||
|
}
|
||||||
|
}
|
||||||
|
# print("---------------------------------------------------->", props)
|
||||||
|
|
||||||
# # Upload File Path
|
# # Upload File Path
|
||||||
# pyTorchModelFilePath = os.path.join(UPLOAD_DIR, pyTorchModelFilename) if pyTorchModelFilename != "-" else None
|
# pyTorchModelFilePath = os.path.join(UPLOAD_DIR, pyTorchModelFilename) if pyTorchModelFilename != "-" else None
|
||||||
# onnxModelFilePath = os.path.join(UPLOAD_DIR, onnxModelFilename) if onnxModelFilename != "-" else None
|
# onnxModelFilePath = os.path.join(UPLOAD_DIR, onnxModelFilename) if onnxModelFilename != "-" else None
|
||||||
@ -85,22 +99,20 @@ class MMVC_Rest_Fileuploader:
|
|||||||
# pyTorchModelStoredFilePath = os.path.join(UPLOAD_DIR, f"{slot}", pyTorchModelFilename)
|
# pyTorchModelStoredFilePath = os.path.join(UPLOAD_DIR, f"{slot}", pyTorchModelFilename)
|
||||||
# shutil.move(pyTorchModelFilePath, pyTorchModelStoredFilePath)
|
# shutil.move(pyTorchModelFilePath, pyTorchModelStoredFilePath)
|
||||||
|
|
||||||
paths = []
|
# Change Filepath
|
||||||
for x in [pyTorchModelFilename, onnxModelFilename, configFilename, clusterTorchModelFilename, featureFilename, indexFilename]:
|
for key, val in props["files"].items():
|
||||||
if x != "-":
|
if val != "-":
|
||||||
uploadPath = os.path.join(UPLOAD_DIR, x)
|
uploadPath = os.path.join(UPLOAD_DIR, val)
|
||||||
storeDir = os.path.join(UPLOAD_DIR, f"{slot}")
|
storeDir = os.path.join(UPLOAD_DIR, f"{slot}")
|
||||||
os.makedirs(storeDir, exist_ok=True)
|
os.makedirs(storeDir, exist_ok=True)
|
||||||
storePath = os.path.join(storeDir, x)
|
storePath = os.path.join(storeDir, val)
|
||||||
shutil.move(uploadPath, storePath)
|
shutil.move(uploadPath, storePath)
|
||||||
paths.push(storePath)
|
props["files"][key] = storePath
|
||||||
else:
|
else:
|
||||||
paths.push(None)
|
props["files"][key] = None
|
||||||
pyTorchModelStoredFilePath, onnxModelStoredFilePath, configStoredFilePath, clusterTorchModelStoredFilePath, featureStoredFilePath, indexStoredFilePath = paths
|
# print("---------------------------------------------------2>", props)
|
||||||
|
|
||||||
info = self.voiceChangerManager.loadModel(slot, configStoredFilePath, pyTorchModelStoredFilePath, onnxModelStoredFilePath,
|
info = self.voiceChangerManager.loadModel(props)
|
||||||
clusterTorchModelStoredFilePath, featureStoredFilePath, indexStoredFilePath,
|
|
||||||
isHalf)
|
|
||||||
json_compatible_item_data = jsonable_encoder(info)
|
json_compatible_item_data = jsonable_encoder(info)
|
||||||
return JSONResponse(content=json_compatible_item_data)
|
return JSONResponse(content=json_compatible_item_data)
|
||||||
# return {"load": f"{configFilePath}, {pyTorchModelFilePath}, {onnxModelFilePath}"}
|
# return {"load": f"{configFilePath}, {pyTorchModelFilePath}, {onnxModelFilePath}"}
|
||||||
|
@ -77,11 +77,19 @@ class RVC:
|
|||||||
self.params = params
|
self.params = params
|
||||||
print("RVC initialization: ", params)
|
print("RVC initialization: ", params)
|
||||||
|
|
||||||
def loadModel(self, config: str, pyTorch_model_file: str = None, onnx_model_file: str = None, feature_file: str = None, index_file: str = None, is_half: bool = True):
|
def loadModel(self, props):
|
||||||
self.settings.configFile = config
|
self.settings.configFile = props["files"]["configFilename"]
|
||||||
self.feature_file = feature_file
|
|
||||||
self.index_file = index_file
|
self.settings.pyTorchModelFile = props["files"]["pyTorchModelFilename"]
|
||||||
self.is_half = is_half
|
self.settings.onnxModelFile = props["files"]["onnxModelFilename"]
|
||||||
|
|
||||||
|
self.feature_file = props["files"]["featureFilename"]
|
||||||
|
self.index_file = props["files"]["indexFilename"]
|
||||||
|
|
||||||
|
self.is_half = props["isHalf"]
|
||||||
|
self.slot = props["slot"]
|
||||||
|
|
||||||
|
print("[Voice Changer] RVC loading... slot:", self.slot)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
hubert_path = self.params["hubert"]
|
hubert_path = self.params["hubert"]
|
||||||
@ -95,14 +103,14 @@ class RVC:
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
print("EXCEPTION during loading hubert/contentvec model", e)
|
print("EXCEPTION during loading hubert/contentvec model", e)
|
||||||
|
|
||||||
if pyTorch_model_file != None:
|
# if pyTorch_model_file != None:
|
||||||
self.settings.pyTorchModelFile = pyTorch_model_file
|
# self.settings.pyTorchModelFile = pyTorch_model_file
|
||||||
if onnx_model_file:
|
# if onnx_model_file:
|
||||||
self.settings.onnxModelFile = onnx_model_file
|
# self.settings.onnxModelFile = onnx_model_file
|
||||||
|
|
||||||
# PyTorchモデル生成
|
# PyTorchモデル生成
|
||||||
if pyTorch_model_file != None:
|
if self.settings.pyTorchModelFile != None:
|
||||||
cpt = torch.load(pyTorch_model_file, map_location="cpu")
|
cpt = torch.load(self.settings.pyTorchModelFile, map_location="cpu")
|
||||||
self.settings.modelSamplingRate = cpt["config"][-1]
|
self.settings.modelSamplingRate = cpt["config"][-1]
|
||||||
net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=self.is_half)
|
net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=self.is_half)
|
||||||
net_g.eval()
|
net_g.eval()
|
||||||
@ -112,8 +120,8 @@ class RVC:
|
|||||||
self.net_g = net_g
|
self.net_g = net_g
|
||||||
|
|
||||||
# ONNXモデル生成
|
# ONNXモデル生成
|
||||||
if onnx_model_file != None:
|
if self.settings.onnxModelFile != None:
|
||||||
self.onnx_session = ModelWrapper(onnx_model_file)
|
self.onnx_session = ModelWrapper(self.settings.onnxModelFile)
|
||||||
return self.get_info()
|
return self.get_info()
|
||||||
|
|
||||||
def update_settings(self, key: str, val: any):
|
def update_settings(self, key: str, val: any):
|
||||||
|
@ -108,29 +108,28 @@ class VoiceChanger():
|
|||||||
|
|
||||||
def loadModel(
|
def loadModel(
|
||||||
self,
|
self,
|
||||||
slot: number,
|
props,
|
||||||
config: str,
|
|
||||||
pyTorch_model_file: Optional[str] = None,
|
|
||||||
onnx_model_file: Optional[str] = None,
|
|
||||||
clusterTorchModel: Optional[str] = None,
|
|
||||||
feature_file: Optional[str] = None,
|
|
||||||
index_file: Optional[str] = None,
|
|
||||||
is_half: bool = True,
|
|
||||||
):
|
):
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if self.modelType == "MMVCv15" or self.modelType == "MMVCv13":
|
return self.voiceChanger.loadModel(props)
|
||||||
return self.voiceChanger.loadModel(config, pyTorch_model_file, onnx_model_file)
|
|
||||||
elif self.modelType == "so-vits-svc-40" or self.modelType == "so-vits-svc-40_c" or self.modelType == "so-vits-svc-40v2":
|
|
||||||
return self.voiceChanger.loadModel(config, pyTorch_model_file, onnx_model_file, clusterTorchModel)
|
|
||||||
elif self.modelType == "RVC":
|
|
||||||
return self.voiceChanger.loadModel(slot, config, pyTorch_model_file, onnx_model_file, feature_file, index_file, is_half)
|
|
||||||
else:
|
|
||||||
return self.voiceChanger.loadModel(config, pyTorch_model_file, onnx_model_file, clusterTorchModel)
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print("[Voice Changer] Model Load Error! Check your model is valid.", e)
|
print("[Voice Changer] Model Load Error! Check your model is valid.", e)
|
||||||
return {"status": "NG"}
|
return {"status": "NG"}
|
||||||
|
|
||||||
|
# try:
|
||||||
|
# if self.modelType == "MMVCv15" or self.modelType == "MMVCv13":
|
||||||
|
# return self.voiceChanger.loadModel(config, pyTorch_model_file, onnx_model_file)
|
||||||
|
# elif self.modelType == "so-vits-svc-40" or self.modelType == "so-vits-svc-40_c" or self.modelType == "so-vits-svc-40v2":
|
||||||
|
# return self.voiceChanger.loadModel(config, pyTorch_model_file, onnx_model_file, clusterTorchModel)
|
||||||
|
# elif self.modelType == "RVC":
|
||||||
|
# return self.voiceChanger.loadModel(slot, config, pyTorch_model_file, onnx_model_file, feature_file, index_file, is_half)
|
||||||
|
# else:
|
||||||
|
# return self.voiceChanger.loadModel(config, pyTorch_model_file, onnx_model_file, clusterTorchModel)
|
||||||
|
# except Exception as e:
|
||||||
|
# print("[Voice Changer] Model Load Error! Check your model is valid.", e)
|
||||||
|
# return {"status": "NG"}
|
||||||
|
|
||||||
def get_info(self):
|
def get_info(self):
|
||||||
data = asdict(self.settings)
|
data = asdict(self.settings)
|
||||||
if hasattr(self, "voiceChanger"):
|
if hasattr(self, "voiceChanger"):
|
||||||
|
@ -11,8 +11,8 @@ class VoiceChangerManager():
|
|||||||
cls._instance.voiceChanger = VoiceChanger(params)
|
cls._instance.voiceChanger = VoiceChanger(params)
|
||||||
return cls._instance
|
return cls._instance
|
||||||
|
|
||||||
def loadModel(self, slot, config, model, onnx_model, clusterTorchModel, feature_file, index_file, is_half: bool = True):
|
def loadModel(self, props):
|
||||||
info = self.voiceChanger.loadModel(slot, config, model, onnx_model, clusterTorchModel, feature_file, index_file, is_half)
|
info = self.voiceChanger.loadModel(props)
|
||||||
if hasattr(info, "status") and info["status"] == "NG":
|
if hasattr(info, "status") and info["status"] == "NG":
|
||||||
return info
|
return info
|
||||||
else:
|
else:
|
||||||
|
Loading…
Reference in New Issue
Block a user