mirror of
https://github.com/w-okada/voice-changer.git
synced 2025-01-23 13:35:12 +03:00
api return info
This commit is contained in:
parent
d414b5d3a8
commit
eda0296ce8
@ -11,6 +11,12 @@ export type ClientState = {
|
|||||||
bufferingTime: number;
|
bufferingTime: number;
|
||||||
responseTime: number;
|
responseTime: number;
|
||||||
volume: number;
|
volume: number;
|
||||||
|
|
||||||
|
// Setting
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
// Client Setting
|
// Client Setting
|
||||||
setServerUrl: (mmvcServerUrl: string) => Promise<void>
|
setServerUrl: (mmvcServerUrl: string) => Promise<void>
|
||||||
setProtocol: (protocol: Protocol) => Promise<void>
|
setProtocol: (protocol: Protocol) => Promise<void>
|
||||||
@ -36,17 +42,15 @@ export const useClient = (props: UseClientProps): ClientState => {
|
|||||||
|
|
||||||
const voiceChangerClientRef = useRef<VoiceChnagerClient | null>(null)
|
const voiceChangerClientRef = useRef<VoiceChnagerClient | null>(null)
|
||||||
const [clientInitialized, setClientInitialized] = useState<boolean>(false)
|
const [clientInitialized, setClientInitialized] = useState<boolean>(false)
|
||||||
|
|
||||||
const [bufferingTime, setBufferingTime] = useState<number>(0)
|
|
||||||
const [responseTime, setResponseTime] = useState<number>(0)
|
|
||||||
const [volume, setVolume] = useState<number>(0)
|
|
||||||
|
|
||||||
const initializedResolveRef = useRef<(value: void | PromiseLike<void>) => void>()
|
const initializedResolveRef = useRef<(value: void | PromiseLike<void>) => void>()
|
||||||
const initializedPromise = useMemo(() => {
|
const initializedPromise = useMemo(() => {
|
||||||
return new Promise<void>((resolve) => {
|
return new Promise<void>((resolve) => {
|
||||||
initializedResolveRef.current = resolve
|
initializedResolveRef.current = resolve
|
||||||
})
|
})
|
||||||
}, [])
|
}, [])
|
||||||
|
const [bufferingTime, setBufferingTime] = useState<number>(0)
|
||||||
|
const [responseTime, setResponseTime] = useState<number>(0)
|
||||||
|
const [volume, setVolume] = useState<number>(0)
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
const initialized = async () => {
|
const initialized = async () => {
|
||||||
@ -88,30 +92,30 @@ export const useClient = (props: UseClientProps): ClientState => {
|
|||||||
const setServerUrl = useMemo(() => {
|
const setServerUrl = useMemo(() => {
|
||||||
return async (mmvcServerUrl: string) => {
|
return async (mmvcServerUrl: string) => {
|
||||||
await initializedPromise
|
await initializedPromise
|
||||||
voiceChangerClientRef.current.setServerUrl(mmvcServerUrl, true)
|
voiceChangerClientRef.current!.setServerUrl(mmvcServerUrl, true)
|
||||||
voiceChangerClientRef.current.stop()
|
voiceChangerClientRef.current!.stop()
|
||||||
}
|
}
|
||||||
}, [])
|
}, [])
|
||||||
|
|
||||||
const setProtocol = useMemo(() => {
|
const setProtocol = useMemo(() => {
|
||||||
return async (protocol: Protocol) => {
|
return async (protocol: Protocol) => {
|
||||||
await initializedPromise
|
await initializedPromise
|
||||||
voiceChangerClientRef.current.setProtocol(protocol)
|
voiceChangerClientRef.current!.setProtocol(protocol)
|
||||||
}
|
}
|
||||||
}, [])
|
}, [])
|
||||||
|
|
||||||
const setInputChunkNum = useMemo(() => {
|
const setInputChunkNum = useMemo(() => {
|
||||||
return async (num: number) => {
|
return async (num: number) => {
|
||||||
await initializedPromise
|
await initializedPromise
|
||||||
voiceChangerClientRef.current.setInputChunkNum(num)
|
voiceChangerClientRef.current!.setInputChunkNum(num)
|
||||||
}
|
}
|
||||||
}, [])
|
}, [])
|
||||||
|
|
||||||
const setVoiceChangerMode = useMemo(() => {
|
const setVoiceChangerMode = useMemo(() => {
|
||||||
return async (val: VoiceChangerMode) => {
|
return async (val: VoiceChangerMode) => {
|
||||||
await initializedPromise
|
await initializedPromise
|
||||||
voiceChangerClientRef.current.setVoiceChangerMode(val)
|
voiceChangerClientRef.current!.setVoiceChangerMode(val)
|
||||||
voiceChangerClientRef.current.stop()
|
voiceChangerClientRef.current!.stop()
|
||||||
}
|
}
|
||||||
}, [])
|
}, [])
|
||||||
|
|
||||||
@ -120,14 +124,14 @@ export const useClient = (props: UseClientProps): ClientState => {
|
|||||||
const start = useMemo(() => {
|
const start = useMemo(() => {
|
||||||
return async (mmvcServerUrl: string) => {
|
return async (mmvcServerUrl: string) => {
|
||||||
await initializedPromise
|
await initializedPromise
|
||||||
voiceChangerClientRef.current.setServerUrl(mmvcServerUrl, true)
|
voiceChangerClientRef.current!.setServerUrl(mmvcServerUrl, true)
|
||||||
voiceChangerClientRef.current.start()
|
voiceChangerClientRef.current!.start()
|
||||||
}
|
}
|
||||||
}, [])
|
}, [])
|
||||||
const stop = useMemo(() => {
|
const stop = useMemo(() => {
|
||||||
return async () => {
|
return async () => {
|
||||||
await initializedPromise
|
await initializedPromise
|
||||||
voiceChangerClientRef.current.stop()
|
voiceChangerClientRef.current!.stop()
|
||||||
}
|
}
|
||||||
}, [])
|
}, [])
|
||||||
|
|
||||||
@ -136,15 +140,14 @@ export const useClient = (props: UseClientProps): ClientState => {
|
|||||||
const changeInput = useMemo(() => {
|
const changeInput = useMemo(() => {
|
||||||
return async (audioInput: MediaStream | string, bufferSize: BufferSize, vfForceDisable: boolean) => {
|
return async (audioInput: MediaStream | string, bufferSize: BufferSize, vfForceDisable: boolean) => {
|
||||||
await initializedPromise
|
await initializedPromise
|
||||||
if (!props.audioContext) return
|
|
||||||
if (!audioInput || audioInput == "none") {
|
if (!audioInput || audioInput == "none") {
|
||||||
console.log("[useClient] setup!(1)", audioInput)
|
console.log("[useClient] setup!(1)", audioInput)
|
||||||
const ms = createDummyMediaStream(props.audioContext)
|
const ms = createDummyMediaStream(props.audioContext!)
|
||||||
await voiceChangerClientRef.current.setup(ms, bufferSize, vfForceDisable)
|
await voiceChangerClientRef.current!.setup(ms, bufferSize, vfForceDisable)
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
console.log("[useClient] setup!(2)", audioInput)
|
console.log("[useClient] setup!(2)", audioInput)
|
||||||
await voiceChangerClientRef.current.setup(audioInput, bufferSize, vfForceDisable)
|
await voiceChangerClientRef.current!.setup(audioInput, bufferSize, vfForceDisable)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}, [props.audioContext])
|
}, [props.audioContext])
|
||||||
@ -155,8 +158,8 @@ export const useClient = (props: UseClientProps): ClientState => {
|
|||||||
const uploadFile = useMemo(() => {
|
const uploadFile = useMemo(() => {
|
||||||
return async (file: File, onprogress: (progress: number, end: boolean) => void) => {
|
return async (file: File, onprogress: (progress: number, end: boolean) => void) => {
|
||||||
await initializedPromise
|
await initializedPromise
|
||||||
const num = await voiceChangerClientRef.current.uploadFile(file, onprogress)
|
const num = await voiceChangerClientRef.current!.uploadFile(file, onprogress)
|
||||||
const res = await voiceChangerClientRef.current.concatUploadedFile(file, num)
|
const res = await voiceChangerClientRef.current!.concatUploadedFile(file, num)
|
||||||
console.log("uploaded", num, res)
|
console.log("uploaded", num, res)
|
||||||
}
|
}
|
||||||
}, [])
|
}, [])
|
||||||
@ -164,7 +167,7 @@ export const useClient = (props: UseClientProps): ClientState => {
|
|||||||
const loadModel = useMemo(() => {
|
const loadModel = useMemo(() => {
|
||||||
return async (configFile: File, pyTorchModelFile: File | null, onnxModelFile: File | null) => {
|
return async (configFile: File, pyTorchModelFile: File | null, onnxModelFile: File | null) => {
|
||||||
await initializedPromise
|
await initializedPromise
|
||||||
await voiceChangerClientRef.current.loadModel(configFile, pyTorchModelFile, onnxModelFile)
|
await voiceChangerClientRef.current!.loadModel(configFile, pyTorchModelFile, onnxModelFile)
|
||||||
console.log("loaded model")
|
console.log("loaded model")
|
||||||
}
|
}
|
||||||
}, [])
|
}, [])
|
||||||
@ -172,7 +175,7 @@ export const useClient = (props: UseClientProps): ClientState => {
|
|||||||
const updateSettings = useMemo(() => {
|
const updateSettings = useMemo(() => {
|
||||||
return async (key: ServerSettingKey, val: string | number) => {
|
return async (key: ServerSettingKey, val: string | number) => {
|
||||||
await initializedPromise
|
await initializedPromise
|
||||||
return await voiceChangerClientRef.current.updateServerSettings(key, "" + val)
|
return await voiceChangerClientRef.current!.updateServerSettings(key, "" + val)
|
||||||
}
|
}
|
||||||
}, [])
|
}, [])
|
||||||
|
|
||||||
@ -180,8 +183,8 @@ export const useClient = (props: UseClientProps): ClientState => {
|
|||||||
const getInfo = useMemo(() => {
|
const getInfo = useMemo(() => {
|
||||||
return async () => {
|
return async () => {
|
||||||
await initializedPromise
|
await initializedPromise
|
||||||
const serverSettings = await voiceChangerClientRef.current.getServerSettings()
|
const serverSettings = await voiceChangerClientRef.current!.getServerSettings()
|
||||||
const clientSettings = await voiceChangerClientRef.current.getClientSettings()
|
const clientSettings = await voiceChangerClientRef.current!.getClientSettings()
|
||||||
console.log(serverSettings, clientSettings)
|
console.log(serverSettings, clientSettings)
|
||||||
}
|
}
|
||||||
}, [])
|
}, [])
|
||||||
|
@ -28,7 +28,7 @@ export class ServerConfigurator {
|
|||||||
|
|
||||||
updateSettings = async (key: ServerSettingKey, val: string) => {
|
updateSettings = async (key: ServerSettingKey, val: string) => {
|
||||||
const url = this.serverUrl + "/update_setteings"
|
const url = this.serverUrl + "/update_setteings"
|
||||||
const p = new Promise<void>((resolve) => {
|
const info = await new Promise<ServerInfo>(async (resolve) => {
|
||||||
const formData = new FormData();
|
const formData = new FormData();
|
||||||
formData.append("key", key);
|
formData.append("key", key);
|
||||||
formData.append("val", val);
|
formData.append("val", val);
|
||||||
@ -36,12 +36,9 @@ export class ServerConfigurator {
|
|||||||
method: 'POST',
|
method: 'POST',
|
||||||
body: formData,
|
body: formData,
|
||||||
});
|
});
|
||||||
fetch(request).then(async (response) => {
|
const res = await (await fetch(request)).json() as ServerInfo
|
||||||
console.log(await response.json())
|
resolve(res)
|
||||||
resolve()
|
|
||||||
})
|
|
||||||
})
|
})
|
||||||
const info = await p
|
|
||||||
return info
|
return info
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -113,7 +110,7 @@ export class ServerConfigurator {
|
|||||||
|
|
||||||
loadModel = async (configFile: File, pyTorchModelFile: File | null, onnxModelFile: File | null) => {
|
loadModel = async (configFile: File, pyTorchModelFile: File | null, onnxModelFile: File | null) => {
|
||||||
const url = this.serverUrl + "/load_model"
|
const url = this.serverUrl + "/load_model"
|
||||||
const loadP = new Promise<void>((resolve) => {
|
const info = new Promise<ServerInfo>(async (resolve) => {
|
||||||
const formData = new FormData();
|
const formData = new FormData();
|
||||||
formData.append("pyTorchModelFilename", pyTorchModelFile?.name || "-");
|
formData.append("pyTorchModelFilename", pyTorchModelFile?.name || "-");
|
||||||
formData.append("onnxModelFilename", onnxModelFile?.name || "-");
|
formData.append("onnxModelFilename", onnxModelFile?.name || "-");
|
||||||
@ -122,11 +119,9 @@ export class ServerConfigurator {
|
|||||||
method: 'POST',
|
method: 'POST',
|
||||||
body: formData,
|
body: formData,
|
||||||
});
|
});
|
||||||
fetch(request).then(async (response) => {
|
const res = await (await fetch(request)).json() as ServerInfo
|
||||||
console.log(await response.text())
|
resolve(res)
|
||||||
resolve()
|
|
||||||
})
|
|
||||||
})
|
})
|
||||||
await loadP
|
return await info
|
||||||
}
|
}
|
||||||
}
|
}
|
@ -77,7 +77,6 @@ export class VoiceChnagerClient {
|
|||||||
this.vcNode.connect(this.currentMediaStreamAudioDestinationNode) // vc node -> output node
|
this.vcNode.connect(this.currentMediaStreamAudioDestinationNode) // vc node -> output node
|
||||||
// (vc nodeにはaudio streamerのcallbackでデータが投げ込まれる)
|
// (vc nodeにはaudio streamerのcallbackでデータが投げ込まれる)
|
||||||
this.audioStreamer = new AudioStreamer(this.callbacks, audioStreamerListeners, { objectMode: true, })
|
this.audioStreamer = new AudioStreamer(this.callbacks, audioStreamerListeners, { objectMode: true, })
|
||||||
// this.audioStreamer.setRequestParams(DefaultVoiceChangerRequestParamas)
|
|
||||||
this.audioStreamer.setInputChunkNum(DefaultVoiceChangerOptions.inputChunkNum)
|
this.audioStreamer.setInputChunkNum(DefaultVoiceChangerOptions.inputChunkNum)
|
||||||
this.audioStreamer.setVoiceChangerMode(DefaultVoiceChangerOptions.voiceChangerMode)
|
this.audioStreamer.setVoiceChangerMode(DefaultVoiceChangerOptions.voiceChangerMode)
|
||||||
|
|
||||||
|
@ -44,9 +44,7 @@ export type ServerInfo = {
|
|||||||
providers: string[]
|
providers: string[]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// Consts
|
// Consts
|
||||||
|
|
||||||
export const Protocol = {
|
export const Protocol = {
|
||||||
"sio": "sio",
|
"sio": "sio",
|
||||||
"rest": "rest",
|
"rest": "rest",
|
||||||
|
@ -17,4 +17,21 @@ tqdm==4.64.1
|
|||||||
Unidecode==1.3.6
|
Unidecode==1.3.6
|
||||||
uvicorn==0.20.0
|
uvicorn==0.20.0
|
||||||
websockets==10.4
|
websockets==10.4
|
||||||
onnxruntime==1.13.1
|
onnxruntime==1.13.1
|
||||||
|
onnxruntime-gpu==1.13.1
|
||||||
|
#onnxruntime-openvino==1.13.1
|
||||||
|
#onnxruntime-directml==1.13.1
|
||||||
|
|
||||||
|
#torch==1.12.1+cu113 torchvision==0.13.1+cu113 torchaudio==0.12.1 --extra-index-url https://download.pytorch.org/whl/cu113
|
||||||
|
#torch==1.12.1+cu113
|
||||||
|
#torchaudio==0.12.1+cu113
|
||||||
|
#torchvision==0.13.1+cu113
|
||||||
|
# conda install pytorch torchvision torchaudio pytorch-cuda=11.6 -c pytorch -c nvidia
|
||||||
|
|
||||||
|
# conda install pytorch==1.13.0+cu116 torchvision torchaudio pytorch-cuda=11.6 -c pytorch -c nvidia
|
||||||
|
|
||||||
|
|
||||||
|
#pip install torch==1.13.0+cu116 torchvision==0.13.1+cu116 torchaudio==0.13.0 --extra-index-url https://download.pytorch.org/whl/cu116
|
||||||
|
|
||||||
|
# conda install pytorch torchvision torchaudio pytorch-cuda=11.7 -c pytorch -c nvidia
|
||||||
|
|
||||||
|
@ -28,12 +28,14 @@ class MMVC_Rest_Fileuploader:
|
|||||||
self.onnx_provider=""
|
self.onnx_provider=""
|
||||||
|
|
||||||
def post_upload_file(self, file: UploadFile = File(...), filename: str = Form(...)):
|
def post_upload_file(self, file: UploadFile = File(...), filename: str = Form(...)):
|
||||||
return upload_file(UPLOAD_DIR, file, filename)
|
res = upload_file(UPLOAD_DIR, file, filename)
|
||||||
|
json_compatible_item_data = jsonable_encoder(res)
|
||||||
|
return JSONResponse(content=json_compatible_item_data)
|
||||||
|
|
||||||
def post_concat_uploaded_file(self, filename: str = Form(...), filenameChunkNum: int = Form(...)):
|
def post_concat_uploaded_file(self, filename: str = Form(...), filenameChunkNum: int = Form(...)):
|
||||||
modelFilePath = concat_file_chunks(
|
res = concat_file_chunks(UPLOAD_DIR, filename, filenameChunkNum, UPLOAD_DIR)
|
||||||
UPLOAD_DIR, filename, filenameChunkNum, UPLOAD_DIR)
|
json_compatible_item_data = jsonable_encoder(res)
|
||||||
return {"concat": f"{modelFilePath}"}
|
return JSONResponse(content=json_compatible_item_data)
|
||||||
|
|
||||||
def get_info(self):
|
def get_info(self):
|
||||||
info = self.voiceChangerManager.get_info()
|
info = self.voiceChangerManager.get_info()
|
||||||
@ -57,8 +59,10 @@ class MMVC_Rest_Fileuploader:
|
|||||||
onnxModelFilePath = os.path.join(UPLOAD_DIR, onnxModelFilename) if onnxModelFilename != "-" else None
|
onnxModelFilePath = os.path.join(UPLOAD_DIR, onnxModelFilename) if onnxModelFilename != "-" else None
|
||||||
configFilePath = os.path.join(UPLOAD_DIR, configFilename)
|
configFilePath = os.path.join(UPLOAD_DIR, configFilename)
|
||||||
|
|
||||||
self.voiceChangerManager.loadModel(configFilePath, pyTorchModelFilePath, onnxModelFilePath)
|
info = self.voiceChangerManager.loadModel(configFilePath, pyTorchModelFilePath, onnxModelFilePath)
|
||||||
return {"load": f"{configFilePath}, {pyTorchModelFilePath}, {onnxModelFilePath}"}
|
json_compatible_item_data = jsonable_encoder(info)
|
||||||
|
return JSONResponse(content=json_compatible_item_data)
|
||||||
|
# return {"load": f"{configFilePath}, {pyTorchModelFilePath}, {onnxModelFilePath}"}
|
||||||
|
|
||||||
|
|
||||||
def post_load_model_for_train(
|
def post_load_model_for_train(
|
||||||
@ -82,4 +86,5 @@ class MMVC_Rest_Fileuploader:
|
|||||||
zipFilePath = concat_file_chunks(
|
zipFilePath = concat_file_chunks(
|
||||||
UPLOAD_DIR, zipFilename, zipFileChunkNum, UPLOAD_DIR)
|
UPLOAD_DIR, zipFilename, zipFileChunkNum, UPLOAD_DIR)
|
||||||
shutil.unpack_archive(zipFilePath, "MMVC_Trainer/dataset/textful/")
|
shutil.unpack_archive(zipFilePath, "MMVC_Trainer/dataset/textful/")
|
||||||
return {"Zip file unpacked": f"{zipFilePath}"}
|
return {"Zip file unpacked": f"{zipFilePath}"}
|
||||||
|
|
@ -9,8 +9,9 @@ def upload_file(upload_dirname:str, file:UploadFile, filename: str):
|
|||||||
upload_dir = open(os.path.join(upload_dirname, filename),'wb+')
|
upload_dir = open(os.path.join(upload_dirname, filename),'wb+')
|
||||||
shutil.copyfileobj(fileobj, upload_dir)
|
shutil.copyfileobj(fileobj, upload_dir)
|
||||||
upload_dir.close()
|
upload_dir.close()
|
||||||
return {"uploaded files": f"{filename} "}
|
|
||||||
return {"Error": "uploaded file is not found."}
|
return {"status":"OK", "msg": f"uploaded files {filename} "}
|
||||||
|
return {"status":"ERROR", "msg": "uploaded file is not found."}
|
||||||
|
|
||||||
def concat_file_chunks(upload_dirname:str, filename:str, chunkNum:int, dest_dirname:str):
|
def concat_file_chunks(upload_dirname:str, filename:str, chunkNum:int, dest_dirname:str):
|
||||||
target_file_name = os.path.join(dest_dirname, filename)
|
target_file_name = os.path.join(dest_dirname, filename)
|
||||||
@ -25,5 +26,5 @@ def concat_file_chunks(upload_dirname:str, filename:str, chunkNum:int, dest_dirn
|
|||||||
stored_chunk_file.close()
|
stored_chunk_file.close()
|
||||||
os.unlink(chunk_file_path)
|
os.unlink(chunk_file_path)
|
||||||
target_file.close()
|
target_file.close()
|
||||||
return target_file_name
|
return {"status":"OK", "msg": f"concat files {target_file_name} "}
|
||||||
|
|
||||||
|
@ -40,6 +40,8 @@ class VoiceChanger():
|
|||||||
# 初期化
|
# 初期化
|
||||||
self.settings = VocieChangerSettings(config_file=config)
|
self.settings = VocieChangerSettings(config_file=config)
|
||||||
self.unpackedData_length=0
|
self.unpackedData_length=0
|
||||||
|
self.net_g = None
|
||||||
|
self.onnx_session = None
|
||||||
# 共通で使用する情報を収集
|
# 共通で使用する情報を収集
|
||||||
self.hps = utils.get_hparams_from_file(config)
|
self.hps = utils.get_hparams_from_file(config)
|
||||||
self.gpu_num = torch.cuda.device_count()
|
self.gpu_num = torch.cuda.device_count()
|
||||||
@ -68,8 +70,6 @@ class VoiceChanger():
|
|||||||
**self.hps.model)
|
**self.hps.model)
|
||||||
self.net_g.eval()
|
self.net_g.eval()
|
||||||
utils.load_checkpoint(pyTorch_model_file, self.net_g, None)
|
utils.load_checkpoint(pyTorch_model_file, self.net_g, None)
|
||||||
if hasattr(self, "net_g") == False:
|
|
||||||
self.net_g = None
|
|
||||||
|
|
||||||
# ONNXモデル生成
|
# ONNXモデル生成
|
||||||
if onnx_model_file != None:
|
if onnx_model_file != None:
|
||||||
@ -79,14 +79,11 @@ class VoiceChanger():
|
|||||||
onnx_model_file,
|
onnx_model_file,
|
||||||
providers=providers
|
providers=providers
|
||||||
)
|
)
|
||||||
if hasattr(self, "onnx_session") == False:
|
return self.get_info()
|
||||||
self.onnx_session = None
|
|
||||||
|
|
||||||
def destroy(self):
|
def destroy(self):
|
||||||
if hasattr(self, "net_g"):
|
del self.net_g
|
||||||
del self.net_g
|
del self.onnx_session
|
||||||
if hasattr(self, "onnx_session"):
|
|
||||||
del self.onnx_session
|
|
||||||
|
|
||||||
def get_info(self):
|
def get_info(self):
|
||||||
data = asdict(self.settings)
|
data = asdict(self.settings)
|
||||||
@ -108,7 +105,6 @@ class VoiceChanger():
|
|||||||
self.onnx_session.set_providers(providers=[val], provider_options=provider_options)
|
self.onnx_session.set_providers(providers=[val], provider_options=provider_options)
|
||||||
else:
|
else:
|
||||||
self.onnx_session.set_providers(providers=[val])
|
self.onnx_session.set_providers(providers=[val])
|
||||||
return self.get_info()
|
|
||||||
elif key in self.settings.intData:
|
elif key in self.settings.intData:
|
||||||
setattr(self.settings, key, int(val))
|
setattr(self.settings, key, int(val))
|
||||||
if key == "gpu" and val >= 0 and val < self.gpu_num and self.onnx_session != None:
|
if key == "gpu" and val >= 0 and val < self.gpu_num and self.onnx_session != None:
|
||||||
@ -119,16 +115,13 @@ class VoiceChanger():
|
|||||||
self.onnx_session.set_providers(providers=["CUDAExecutionProvider"], provider_options=provider_options)
|
self.onnx_session.set_providers(providers=["CUDAExecutionProvider"], provider_options=provider_options)
|
||||||
if key == "crossFadeOffsetRate" or key == "crossFadeEndRate":
|
if key == "crossFadeOffsetRate" or key == "crossFadeEndRate":
|
||||||
self.unpackedData_length = 0
|
self.unpackedData_length = 0
|
||||||
return self.get_info()
|
|
||||||
elif key in self.settings.floatData:
|
elif key in self.settings.floatData:
|
||||||
setattr(self.settings, key, float(val))
|
setattr(self.settings, key, float(val))
|
||||||
return self.get_info()
|
|
||||||
elif key in self.settings.strData:
|
elif key in self.settings.strData:
|
||||||
setattr(self.settings, key, str(val))
|
setattr(self.settings, key, str(val))
|
||||||
return self.get_info()
|
|
||||||
else:
|
else:
|
||||||
print(f"{key} is not mutalbe variable!")
|
print(f"{key} is not mutalbe variable!")
|
||||||
return self.get_info()
|
return self.get_info()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -13,19 +13,25 @@ class VoiceChangerManager():
|
|||||||
def loadModel(self, config, model, onnx_model):
|
def loadModel(self, config, model, onnx_model):
|
||||||
if hasattr(self, 'voiceChanger') == False:
|
if hasattr(self, 'voiceChanger') == False:
|
||||||
self.voiceChanger = VoiceChanger(config)
|
self.voiceChanger = VoiceChanger(config)
|
||||||
self.voiceChanger.loadModel(config, model, onnx_model)
|
info = self.voiceChanger.loadModel(config, model, onnx_model)
|
||||||
|
info["status"]="OK"
|
||||||
|
return info
|
||||||
|
|
||||||
def get_info(self):
|
def get_info(self):
|
||||||
if hasattr(self, 'voiceChanger'):
|
if hasattr(self, 'voiceChanger'):
|
||||||
return self.voiceChanger.get_info()
|
info = self.voiceChanger.get_info()
|
||||||
|
info["status"]="OK"
|
||||||
|
return info
|
||||||
else:
|
else:
|
||||||
return {"no info":"no info"}
|
return {"status":"ERROR", "msg":"no model loaded"}
|
||||||
|
|
||||||
def update_setteings(self, key:str, val:any):
|
def update_setteings(self, key:str, val:any):
|
||||||
if hasattr(self, 'voiceChanger'):
|
if hasattr(self, 'voiceChanger'):
|
||||||
return self.voiceChanger.update_setteings(key, val)
|
info = self.voiceChanger.update_setteings(key, val)
|
||||||
|
info["status"]="OK"
|
||||||
|
return info
|
||||||
else:
|
else:
|
||||||
return {"no info":"no info"}
|
return {"status":"ERROR", "msg":"no model loaded"}
|
||||||
|
|
||||||
def changeVoice(self, unpackedData:any):
|
def changeVoice(self, unpackedData:any):
|
||||||
if hasattr(self, 'voiceChanger') == True:
|
if hasattr(self, 'voiceChanger') == True:
|
||||||
|
Loading…
Reference in New Issue
Block a user