diff --git a/client/demo/src/100_options_microphone.tsx b/client/demo/src/100_options_microphone.tsx index 7509f6a6..1f01d22f 100644 --- a/client/demo/src/100_options_microphone.tsx +++ b/client/demo/src/100_options_microphone.tsx @@ -142,18 +142,7 @@ export const useMicrophoneOptions = () => { await clientState.loadModel(serverSetting.configFile, serverSetting.pyTorchModel, serverSetting.onnxModel) setUploadProgress(0) setIsUploading(false) - // await clientState.updateSettings(ServerSettingKey.framework, serverSetting.framework) - // await clientState.updateSettings(ServerSettingKey.onnxExecutionProvider, serverSetting.onnxExecutionProvider) - // await clientState.updateSettings(ServerSettingKey.srcId, speakerSetting.srcId) - // await clientState.updateSettings(ServerSettingKey.dstId, speakerSetting.dstId) - // await clientState.updateSettings(ServerSettingKey.gpu, convertSetting.gpu) - // await clientState.updateSettings(ServerSettingKey.crossFadeOffsetRate, convertSetting.crossFadeOffsetRate) - // await clientState.updateSettings(ServerSettingKey.crossFadeEndRate, convertSetting.crossFadeEndRate) } - - - - } setLoadModelFunc(loadModel) }, [serverSetting.configFile, serverSetting.pyTorchModel, serverSetting.onnxModel, diff --git a/client/demo/src/hooks/useClient.ts b/client/demo/src/hooks/useClient.ts index 32e0ba40..b3eccb94 100644 --- a/client/demo/src/hooks/useClient.ts +++ b/client/demo/src/hooks/useClient.ts @@ -155,7 +155,6 @@ export const useClient = (props: UseClientProps): ClientState => { const uploadFile = useMemo(() => { return async (file: File, onprogress: (progress: number, end: boolean) => void) => { await initializedPromise - console.log("uploaded.....") const num = await voiceChangerClientRef.current.uploadFile(file, onprogress) const res = await voiceChangerClientRef.current.concatUploadedFile(file, num) console.log("uploaded", num, res) @@ -164,7 +163,6 @@ export const useClient = (props: UseClientProps): ClientState => { const loadModel = useMemo(() => { return async (configFile: File, pyTorchModelFile: File | null, onnxModelFile: File | null) => { - console.log("load model....") await initializedPromise await voiceChangerClientRef.current.loadModel(configFile, pyTorchModelFile, onnxModelFile) console.log("loaded model") diff --git a/server/voice_changer/VoiceChanger.py b/server/voice_changer/VoiceChanger.py index 2e01fff2..60fe4d88 100755 --- a/server/voice_changer/VoiceChanger.py +++ b/server/voice_changer/VoiceChanger.py @@ -68,8 +68,6 @@ class VoiceChanger(): **self.hps.model) self.net_g.eval() utils.load_checkpoint(pyTorch_model_file, self.net_g, None) - else: - self.net_g = None # ONNXモデル生成 if onnx_model_file != None: @@ -79,9 +77,6 @@ class VoiceChanger(): onnx_model_file, providers=providers ) - else: - self.onnx_session = None - def destroy(self): if hasattr(self, "net_g"):