mirror of
https://github.com/w-okada/voice-changer.git
synced 2025-02-02 16:23:58 +03:00
fix model upload bug
This commit is contained in:
parent
0109f2671f
commit
ef89e9aefa
@ -142,10 +142,23 @@ export const useMicrophoneOptions = () => {
|
|||||||
await clientState.loadModel(serverSetting.configFile, serverSetting.pyTorchModel, serverSetting.onnxModel)
|
await clientState.loadModel(serverSetting.configFile, serverSetting.pyTorchModel, serverSetting.onnxModel)
|
||||||
setUploadProgress(0)
|
setUploadProgress(0)
|
||||||
setIsUploading(false)
|
setIsUploading(false)
|
||||||
|
// await clientState.updateSettings(ServerSettingKey.framework, serverSetting.framework)
|
||||||
|
// await clientState.updateSettings(ServerSettingKey.onnxExecutionProvider, serverSetting.onnxExecutionProvider)
|
||||||
|
// await clientState.updateSettings(ServerSettingKey.srcId, speakerSetting.srcId)
|
||||||
|
// await clientState.updateSettings(ServerSettingKey.dstId, speakerSetting.dstId)
|
||||||
|
// await clientState.updateSettings(ServerSettingKey.gpu, convertSetting.gpu)
|
||||||
|
// await clientState.updateSettings(ServerSettingKey.crossFadeOffsetRate, convertSetting.crossFadeOffsetRate)
|
||||||
|
// await clientState.updateSettings(ServerSettingKey.crossFadeEndRate, convertSetting.crossFadeEndRate)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
setLoadModelFunc(loadModel)
|
setLoadModelFunc(loadModel)
|
||||||
}, [serverSetting.configFile, serverSetting.pyTorchModel, serverSetting.onnxModel])
|
}, [serverSetting.configFile, serverSetting.pyTorchModel, serverSetting.onnxModel,
|
||||||
|
serverSetting.framework, serverSetting.onnxExecutionProvider, speakerSetting.srcId, speakerSetting.dstId, convertSetting.gpu, convertSetting.crossFadeOffsetRate, convertSetting.crossFadeEndRate
|
||||||
|
])
|
||||||
|
|
||||||
|
|
||||||
const voiceChangerSetting = useMemo(() => {
|
const voiceChangerSetting = useMemo(() => {
|
||||||
|
@ -155,17 +155,19 @@ export const useClient = (props: UseClientProps): ClientState => {
|
|||||||
const uploadFile = useMemo(() => {
|
const uploadFile = useMemo(() => {
|
||||||
return async (file: File, onprogress: (progress: number, end: boolean) => void) => {
|
return async (file: File, onprogress: (progress: number, end: boolean) => void) => {
|
||||||
await initializedPromise
|
await initializedPromise
|
||||||
|
console.log("uploaded.....")
|
||||||
const num = await voiceChangerClientRef.current.uploadFile(file, onprogress)
|
const num = await voiceChangerClientRef.current.uploadFile(file, onprogress)
|
||||||
const res = await voiceChangerClientRef.current.concatUploadedFile(file, num)
|
const res = await voiceChangerClientRef.current.concatUploadedFile(file, num)
|
||||||
console.log("upload", num, res)
|
console.log("uploaded", num, res)
|
||||||
}
|
}
|
||||||
}, [])
|
}, [])
|
||||||
|
|
||||||
const loadModel = useMemo(() => {
|
const loadModel = useMemo(() => {
|
||||||
return async (configFile: File, pyTorchModelFile: File | null, onnxModelFile: File | null) => {
|
return async (configFile: File, pyTorchModelFile: File | null, onnxModelFile: File | null) => {
|
||||||
|
console.log("load model....")
|
||||||
await initializedPromise
|
await initializedPromise
|
||||||
await voiceChangerClientRef.current.loadModel(configFile, pyTorchModelFile, onnxModelFile)
|
await voiceChangerClientRef.current.loadModel(configFile, pyTorchModelFile, onnxModelFile)
|
||||||
console.log("load model")
|
console.log("loaded model")
|
||||||
}
|
}
|
||||||
}, [])
|
}, [])
|
||||||
|
|
||||||
|
@ -96,7 +96,7 @@ export class ServerConfigurator {
|
|||||||
|
|
||||||
concatUploadedFile = async (file: File, chunkNum: number) => {
|
concatUploadedFile = async (file: File, chunkNum: number) => {
|
||||||
const url = this.serverUrl + "/concat_uploaded_file"
|
const url = this.serverUrl + "/concat_uploaded_file"
|
||||||
new Promise<void>((resolve) => {
|
await new Promise<void>((resolve) => {
|
||||||
const formData = new FormData();
|
const formData = new FormData();
|
||||||
formData.append("filename", file.name);
|
formData.append("filename", file.name);
|
||||||
formData.append("filenameChunkNum", "" + chunkNum);
|
formData.append("filenameChunkNum", "" + chunkNum);
|
||||||
|
@ -36,9 +36,9 @@ class VocieChangerSettings():
|
|||||||
|
|
||||||
class VoiceChanger():
|
class VoiceChanger():
|
||||||
|
|
||||||
def __init__(self, config:str, pyTorch_model_file:str=None, onnx_model_file:str=None):
|
def __init__(self, config:str):
|
||||||
# 初期化
|
# 初期化
|
||||||
self.settings = VocieChangerSettings(config_file=config, pyTorch_model_file=pyTorch_model_file, onnx_model_file=onnx_model_file)
|
self.settings = VocieChangerSettings(config_file=config)
|
||||||
self.unpackedData_length=0
|
self.unpackedData_length=0
|
||||||
# 共通で使用する情報を収集
|
# 共通で使用する情報を収集
|
||||||
self.hps = utils.get_hparams_from_file(config)
|
self.hps = utils.get_hparams_from_file(config)
|
||||||
@ -53,6 +53,11 @@ class VoiceChanger():
|
|||||||
|
|
||||||
print(f"VoiceChanger Initialized (GPU_NUM:{self.gpu_num}, mps_enabled:{self.mps_enabled})")
|
print(f"VoiceChanger Initialized (GPU_NUM:{self.gpu_num}, mps_enabled:{self.mps_enabled})")
|
||||||
|
|
||||||
|
def loadModel(self, config:str, pyTorch_model_file:str=None, onnx_model_file:str=None):
|
||||||
|
self.settings.config_file = config
|
||||||
|
self.settings.pyTorch_model_file = pyTorch_model_file
|
||||||
|
self.settings.onnx_model_file = onnx_model_file
|
||||||
|
|
||||||
# PyTorchモデル生成
|
# PyTorchモデル生成
|
||||||
if pyTorch_model_file != None:
|
if pyTorch_model_file != None:
|
||||||
self.net_g = SynthesizerTrn(
|
self.net_g = SynthesizerTrn(
|
||||||
@ -70,21 +75,14 @@ class VoiceChanger():
|
|||||||
if onnx_model_file != None:
|
if onnx_model_file != None:
|
||||||
ort_options = onnxruntime.SessionOptions()
|
ort_options = onnxruntime.SessionOptions()
|
||||||
ort_options.intra_op_num_threads = 8
|
ort_options.intra_op_num_threads = 8
|
||||||
# ort_options.execution_mode = onnxruntime.ExecutionMode.ORT_SEQUENTIAL
|
|
||||||
# ort_options.execution_mode = onnxruntime.ExecutionMode.ORT_PARALLEL
|
|
||||||
# ort_options.inter_op_num_threads = 8
|
|
||||||
self.onnx_session = onnxruntime.InferenceSession(
|
self.onnx_session = onnxruntime.InferenceSession(
|
||||||
onnx_model_file,
|
onnx_model_file,
|
||||||
providers=providers
|
providers=providers
|
||||||
)
|
)
|
||||||
# print("ONNX_MDEOL!1", self.onnx_session.get_providers())
|
|
||||||
# self.onnx_session.set_providers(providers=["CPUExecutionProvider"])
|
|
||||||
# print("ONNX_MDEOL!1", self.onnx_session.get_providers())
|
|
||||||
# self.onnx_session.set_providers(providers=["DmlExecutionProvider"])
|
|
||||||
# print("ONNX_MDEOL!1", self.onnx_session.get_providers())
|
|
||||||
else:
|
else:
|
||||||
self.onnx_session = None
|
self.onnx_session = None
|
||||||
|
|
||||||
|
|
||||||
def destroy(self):
|
def destroy(self):
|
||||||
if hasattr(self, "net_g"):
|
if hasattr(self, "net_g"):
|
||||||
del self.net_g
|
del self.net_g
|
||||||
|
@ -11,9 +11,9 @@ class VoiceChangerManager():
|
|||||||
return cls._instance
|
return cls._instance
|
||||||
|
|
||||||
def loadModel(self, config, model, onnx_model):
|
def loadModel(self, config, model, onnx_model):
|
||||||
if hasattr(self, 'voiceChanger') == True:
|
if hasattr(self, 'voiceChanger') == False:
|
||||||
self.voiceChanger.destroy()
|
self.voiceChanger = VoiceChanger(config)
|
||||||
self.voiceChanger = VoiceChanger(config, model, onnx_model)
|
self.voiceChanger.loadModel(config, model, onnx_model)
|
||||||
|
|
||||||
def get_info(self):
|
def get_info(self):
|
||||||
if hasattr(self, 'voiceChanger'):
|
if hasattr(self, 'voiceChanger'):
|
||||||
|
Loading…
Reference in New Issue
Block a user