diff --git a/client/demo/src/100_options_microphone.tsx b/client/demo/src/100_options_microphone.tsx
index 264a4d5c..48d4330e 100644
--- a/client/demo/src/100_options_microphone.tsx
+++ b/client/demo/src/100_options_microphone.tsx
@@ -16,6 +16,7 @@ export const useMicrophoneOptions = () => {
audioContext: audioContext,
audioOutputElementId: AUDIO_ELEMENT_FOR_PLAY_RESULT
})
+ console.log("cs", clientState)
const serverSetting = useServerSetting({ clientState })
const deviceSetting = useDeviceSetting(audioContext, { clientState })
diff --git a/client/demo/src/106_server_control.tsx b/client/demo/src/106_server_control.tsx
index 892f4c6b..0cc1a006 100644
--- a/client/demo/src/106_server_control.tsx
+++ b/client/demo/src/106_server_control.tsx
@@ -57,18 +57,22 @@ export const useServerControl = (props: UseServerControlProps) => {
}
return (
<>
-
-
Info:
-
a
-
b
-
c
+
+
Model Info:
+
+ {props.clientState.serverInfo?.configFile || ""}
+ {props.clientState.serverInfo?.pyTorchModelFile || ""}
+ {props.clientState.serverInfo?.onnxModelFile || ""}
+
+
+
>
)
- }, [props.clientState.getInfo])
+ }, [props.clientState.getInfo, props.clientState.serverInfo])
diff --git a/client/demo/src/css/App.css b/client/demo/src/css/App.css
index 3c1ebc57..daffde77 100644
--- a/client/demo/src/css/App.css
+++ b/client/demo/src/css/App.css
@@ -227,6 +227,9 @@ body {
}
.body-item-text {
color: rgb(30, 30, 30);
+ .body-item-text-item{
+ padding-left: 1rem;
+ }
}
.body-item-input {
diff --git a/client/demo/src/hooks/useClient.ts b/client/demo/src/hooks/useClient.ts
index c0da6083..0a150ca0 100644
--- a/client/demo/src/hooks/useClient.ts
+++ b/client/demo/src/hooks/useClient.ts
@@ -1,4 +1,4 @@
-import { BufferSize, createDummyMediaStream, DefaultVoiceChangerOptions, DefaultVoiceChangerRequestParamas, Framework, OnnxExecutionProvider, Protocol, SampleRate, ServerSettingKey, Speaker, VoiceChangerMode, VoiceChnagerClient } from "@dannadori/voice-changer-client-js"
+import { ServerInfo, BufferSize, createDummyMediaStream, DefaultVoiceChangerOptions, DefaultVoiceChangerRequestParamas, Framework, OnnxExecutionProvider, Protocol, SampleRate, ServerSettingKey, Speaker, VoiceChangerMode, VoiceChnagerClient } from "@dannadori/voice-changer-client-js"
import { useEffect, useMemo, useRef, useState } from "react"
export type UseClientProps = {
@@ -38,8 +38,6 @@ export type SettingState = {
// advanced setting
vfForceDisabled: boolean
voiceChangerMode: VoiceChangerMode
-
-
}
const InitialSettingState: SettingState = {
@@ -80,6 +78,7 @@ export type ClientState = {
// Setting
settingState: SettingState
+ serverInfo: ServerInfo | undefined
setSettingState: (setting: SettingState) => void
// Client Control
@@ -143,6 +142,8 @@ export const useClient = (props: UseClientProps): ClientState => {
// (2) 設定
const [settingState, setSettingState] = useState
(InitialSettingState)
+ const [displaySettingState, setDisplaySettingState] = useState(InitialSettingState)
+ const [serverInfo, setServerInfo] = useState()
const [uploadProgress, setUploadProgress] = useState(0)
const [isUploading, setIsUploading] = useState(false)
@@ -153,6 +154,8 @@ export const useClient = (props: UseClientProps): ClientState => {
await initializedPromise
voiceChangerClientRef.current!.setServerUrl(settingState.mmvcServerUrl, true)
voiceChangerClientRef.current!.stop()
+ getInfo()
+
})()
}, [settingState.mmvcServerUrl])
// (b) プロトコル設定
@@ -167,13 +170,17 @@ export const useClient = (props: UseClientProps): ClientState => {
(async () => {
await initializedPromise
const info = await voiceChangerClientRef.current!.updateServerSettings(ServerSettingKey.framework, "" + settingState.framework)
+ setServerInfo(info)
+
})()
}, [settingState.framework])
// (d) OnnxExecutionProvider設定
useEffect(() => {
(async () => {
await initializedPromise
- const info = voiceChangerClientRef.current!.updateServerSettings(ServerSettingKey.onnxExecutionProvider, settingState.onnxExecutionProvider)
+ const info = await voiceChangerClientRef.current!.updateServerSettings(ServerSettingKey.onnxExecutionProvider, settingState.onnxExecutionProvider)
+ setServerInfo(info)
+
})()
}, [settingState.onnxExecutionProvider])
@@ -244,6 +251,8 @@ export const useClient = (props: UseClientProps): ClientState => {
(async () => {
await initializedPromise
const info = await voiceChangerClientRef.current!.updateServerSettings(ServerSettingKey.srcId, "" + settingState.srcId)
+ setServerInfo(info)
+
})()
}, [settingState.srcId])
@@ -252,6 +261,8 @@ export const useClient = (props: UseClientProps): ClientState => {
(async () => {
await initializedPromise
const info = await voiceChangerClientRef.current!.updateServerSettings(ServerSettingKey.dstId, "" + settingState.dstId)
+ setServerInfo(info)
+
})()
}, [settingState.dstId])
@@ -270,6 +281,7 @@ export const useClient = (props: UseClientProps): ClientState => {
(async () => {
await initializedPromise
const info = await voiceChangerClientRef.current!.updateServerSettings(ServerSettingKey.convertChunkNum, "" + settingState.convertChunkNum)
+ setServerInfo(info)
})()
}, [settingState.convertChunkNum])
@@ -278,6 +290,7 @@ export const useClient = (props: UseClientProps): ClientState => {
(async () => {
await initializedPromise
const info = await voiceChangerClientRef.current!.updateServerSettings(ServerSettingKey.gpu, "" + settingState.gpu)
+ setServerInfo(info)
})()
}, [settingState.gpu])
@@ -286,6 +299,7 @@ export const useClient = (props: UseClientProps): ClientState => {
(async () => {
await initializedPromise
const info = await voiceChangerClientRef.current!.updateServerSettings(ServerSettingKey.crossFadeOffsetRate, "" + settingState.crossFadeOffsetRate)
+ setServerInfo(info)
})()
}, [settingState.crossFadeOffsetRate])
@@ -294,6 +308,7 @@ export const useClient = (props: UseClientProps): ClientState => {
(async () => {
await initializedPromise
const info = await voiceChangerClientRef.current!.updateServerSettings(ServerSettingKey.crossFadeEndRate, "" + settingState.crossFadeEndRate)
+ setServerInfo(info)
})()
}, [settingState.crossFadeEndRate])
@@ -331,10 +346,33 @@ export const useClient = (props: UseClientProps): ClientState => {
await initializedPromise
const serverSettings = await voiceChangerClientRef.current!.getServerSettings()
const clientSettings = await voiceChangerClientRef.current!.getClientSettings()
+ setServerInfo(serverSettings)
console.log(serverSettings, clientSettings)
}
}, [])
+ // (x)
+ useEffect(() => {
+ if (serverInfo && serverInfo.status == "OK") {
+ setDisplaySettingState({
+ ...settingState,
+ convertChunkNum: serverInfo.convertChunkNum,
+ crossFadeOffsetRate: serverInfo.crossFadeOffsetRate,
+ crossFadeEndRate: serverInfo.crossFadeEndRate,
+ gpu: serverInfo.gpu,
+ srcId: serverInfo.srcId,
+ dstId: serverInfo.dstId,
+ framework: serverInfo.framework,
+ onnxExecutionProvider: serverInfo.providers.length > 0 ? serverInfo.providers[0] as OnnxExecutionProvider : "CPUExecutionProvider"
+ })
+ } else {
+ setDisplaySettingState({
+ ...settingState,
+ })
+ }
+
+ }, [settingState, serverInfo])
+
return {
clientInitialized,
@@ -344,7 +382,8 @@ export const useClient = (props: UseClientProps): ClientState => {
uploadProgress,
isUploading,
- settingState,
+ settingState: displaySettingState,
+ serverInfo,
setSettingState,
loadModel,
start,
diff --git a/client/lib/src/const.ts b/client/lib/src/const.ts
index 42c9f583..e1f25c9b 100644
--- a/client/lib/src/const.ts
+++ b/client/lib/src/const.ts
@@ -38,12 +38,24 @@ export type Speaker = {
export type ServerInfo = {
+ status: string
+ configFile: string,
pyTorchModelFile: string,
onnxModelFile: string,
- configFile: string,
+ convertChunkNum: number,
+ crossFadeOffsetRate: number,
+ crossFadeEndRate: number,
+ gpu: number,
+ srcId: number,
+ dstId: number,
+ framework: Framework,
providers: string[]
}
+
+
+
+
// Consts
export const Protocol = {
"sio": "sio",
diff --git a/server/voice_changer/VoiceChanger.py b/server/voice_changer/VoiceChanger.py
index 28797a74..e5f91194 100755
--- a/server/voice_changer/VoiceChanger.py
+++ b/server/voice_changer/VoiceChanger.py
@@ -26,9 +26,9 @@ class VocieChangerSettings():
crossFadeEndRate:float = 0.9
convertChunkNum:int = 32
framework:str = "PyTorch" # PyTorch or ONNX
- pyTorch_model_file:str = ""
- onnx_model_file:str = ""
- config_file:str = ""
+ pyTorchModelFile:str = ""
+ onnxModelFile:str = ""
+ configFile:str = ""
# ↓mutableな物だけ列挙
intData = ["gpu","srcId", "dstId", "convertChunkNum"]
floatData = [ "crossFadeOffsetRate", "crossFadeEndRate",]
@@ -38,7 +38,7 @@ class VoiceChanger():
def __init__(self, config:str):
# 初期化
- self.settings = VocieChangerSettings(config_file=config)
+ self.settings = VocieChangerSettings(configFile=config)
self.unpackedData_length=0
self.net_g = None
self.onnx_session = None
@@ -58,9 +58,11 @@ class VoiceChanger():
print(f"VoiceChanger Initialized (GPU_NUM:{self.gpu_num}, mps_enabled:{self.mps_enabled})")
def loadModel(self, config:str, pyTorch_model_file:str=None, onnx_model_file:str=None):
- self.settings.config_file = config
- self.settings.pyTorch_model_file = pyTorch_model_file
- self.settings.onnx_model_file = onnx_model_file
+ self.settings.configFile = config
+ if pyTorch_model_file != None:
+ self.settings.pyTorchModelFile = pyTorch_model_file
+ if onnx_model_file:
+ self.settings.onnxModelFile = onnx_model_file
# PyTorchモデル生成
if pyTorch_model_file != None:
@@ -90,10 +92,10 @@ class VoiceChanger():
def get_info(self):
data = asdict(self.settings)
- data["providers"] = self.onnx_session.get_providers() if self.onnx_session != None else ""
- files = ["config_file", "pyTorch_model_file", "onnx_model_file"]
+ data["providers"] = self.onnx_session.get_providers() if self.onnx_session != None else []
+ files = ["configFile", "pyTorchModelFile", "onnxModelFile"]
for f in files:
- if os.path.exists(f):
+ if data[f]!=None and os.path.exists(data[f]):
data[f] = os.path.basename(data[f])
else:
data[f] = ""
@@ -126,8 +128,6 @@ class VoiceChanger():
return self.get_info()
- self.currentCrossFadeOffsetRate=0
- self.currentCrossFadeEndRate=0
def _generate_strength(self, unpackedData):