voice-changer/client/demo/src/100_options_microphone.tsx

177 lines
7.0 KiB
TypeScript
Raw Normal View History

2023-01-07 14:07:39 +03:00
import * as React from "react";
2023-01-07 18:25:21 +03:00
import { useEffect, useMemo, useState } from "react";
2023-01-08 10:18:20 +03:00
import { AUDIO_ELEMENT_FOR_PLAY_RESULT } from "./const";
2023-01-07 14:07:39 +03:00
import { useServerSetting } from "./101_server_setting";
import { useDeviceSetting } from "./102_device_setting";
import { useConvertSetting } from "./104_convert_setting";
import { useAdvancedSetting } from "./105_advanced_setting";
import { useSpeakerSetting } from "./103_speaker_setting";
import { useClient } from "./hooks/useClient";
import { useServerControl } from "./106_server_control";
2023-01-08 10:18:20 +03:00
import { ServerSettingKey } from "@dannadori/voice-changer-client-js";
2023-01-07 14:07:39 +03:00
export const useMicrophoneOptions = () => {
const [audioContext, setAudioContext] = useState<AudioContext | null>(null)
2023-01-08 14:28:57 +03:00
const [loadModelFunc, setLoadModelFunc] = useState<() => Promise<void>>()
const [uploadProgress, setUploadProgress] = useState<number>(0)
const [isUploading, setIsUploading] = useState<boolean>(false)
2023-01-07 18:25:21 +03:00
const clientState = useClient({
audioContext: audioContext,
audioOutputElementId: AUDIO_ELEMENT_FOR_PLAY_RESULT
})
const serverSetting = useServerSetting({
2023-01-08 14:28:57 +03:00
clientState,
loadModelFunc,
uploadProgress: uploadProgress,
isUploading: isUploading
2023-01-07 18:25:21 +03:00
})
2023-01-07 14:07:39 +03:00
const deviceSetting = useDeviceSetting(audioContext)
const speakerSetting = useSpeakerSetting()
const convertSetting = useConvertSetting()
const advancedSetting = useAdvancedSetting()
2023-01-07 18:25:21 +03:00
2023-01-07 14:07:39 +03:00
const serverControl = useServerControl({
convertStart: async () => { await clientState.start(serverSetting.mmvcServerUrl, serverSetting.protocol) },
convertStop: async () => { clientState.stop() },
2023-01-08 10:18:20 +03:00
getInfo: clientState.getInfo,
2023-01-07 14:07:39 +03:00
volume: clientState.volume,
bufferingTime: clientState.bufferingTime,
2023-01-08 14:28:57 +03:00
responseTime: clientState.responseTime,
2023-01-07 14:07:39 +03:00
})
useEffect(() => {
const createAudioContext = () => {
const ctx = new AudioContext()
setAudioContext(ctx)
document.removeEventListener('touchstart', createAudioContext);
document.removeEventListener('mousedown', createAudioContext);
}
document.addEventListener('touchstart', createAudioContext);
document.addEventListener('mousedown', createAudioContext);
}, [])
2023-01-08 10:18:20 +03:00
// 101 ServerSetting
//// サーバ変更
useEffect(() => {
clientState.setServerUrl(serverSetting.mmvcServerUrl)
2023-01-08 14:28:57 +03:00
}, [serverSetting.mmvcServerUrl])
2023-01-08 10:18:20 +03:00
//// プロトコル変更
useEffect(() => {
clientState.setProtocol(serverSetting.protocol)
2023-01-08 14:28:57 +03:00
}, [serverSetting.protocol])
2023-01-08 10:18:20 +03:00
//// フレームワーク変更
useEffect(() => {
clientState.updateSettings(ServerSettingKey.framework, serverSetting.framework)
2023-01-08 14:28:57 +03:00
}, [serverSetting.framework])
2023-01-08 10:18:20 +03:00
//// OnnxExecutionProvider変更
useEffect(() => {
clientState.updateSettings(ServerSettingKey.onnxExecutionProvider, serverSetting.onnxExecutionProvider)
2023-01-08 14:28:57 +03:00
}, [serverSetting.onnxExecutionProvider])
2023-01-07 14:07:39 +03:00
2023-01-08 10:18:20 +03:00
// 102 DeviceSetting
//// 入力情報の設定
2023-01-07 14:07:39 +03:00
useEffect(() => {
clientState.changeInput(deviceSetting.audioInput, convertSetting.bufferSize, advancedSetting.vfForceDisabled)
2023-01-08 14:28:57 +03:00
}, [deviceSetting.audioInput, convertSetting.bufferSize, advancedSetting.vfForceDisabled])
2023-01-07 14:07:39 +03:00
2023-01-08 10:18:20 +03:00
// 103 SpeakerSetting
// 音声変換元、変換先の設定
useEffect(() => {
clientState.updateSettings(ServerSettingKey.srcId, speakerSetting.srcId)
2023-01-08 14:28:57 +03:00
}, [speakerSetting.srcId])
2023-01-08 10:18:20 +03:00
useEffect(() => {
clientState.updateSettings(ServerSettingKey.dstId, speakerSetting.dstId)
2023-01-08 14:28:57 +03:00
}, [speakerSetting.dstId])
2023-01-08 10:18:20 +03:00
// 104 ConvertSetting
useEffect(() => {
clientState.setInputChunkNum(convertSetting.inputChunkNum)
2023-01-08 14:28:57 +03:00
}, [convertSetting.inputChunkNum])
2023-01-08 10:18:20 +03:00
useEffect(() => {
clientState.updateSettings(ServerSettingKey.convertChunkNum, convertSetting.convertChunkNum)
2023-01-08 14:28:57 +03:00
}, [convertSetting.convertChunkNum])
2023-01-08 10:18:20 +03:00
useEffect(() => {
clientState.updateSettings(ServerSettingKey.gpu, convertSetting.gpu)
2023-01-08 14:28:57 +03:00
}, [convertSetting.gpu])
2023-01-08 10:18:20 +03:00
useEffect(() => {
clientState.updateSettings(ServerSettingKey.crossFadeOffsetRate, convertSetting.crossFadeOffsetRate)
2023-01-08 14:28:57 +03:00
}, [convertSetting.crossFadeOffsetRate])
2023-01-08 10:18:20 +03:00
useEffect(() => {
clientState.updateSettings(ServerSettingKey.crossFadeEndRate, convertSetting.crossFadeEndRate)
2023-01-08 14:28:57 +03:00
}, [convertSetting.crossFadeEndRate])
2023-01-08 10:18:20 +03:00
// 105 AdvancedSetting
useEffect(() => {
clientState.setVoiceChangerMode(advancedSetting.voiceChangerMode)
2023-01-08 14:28:57 +03:00
}, [advancedSetting.voiceChangerMode])
// Model Load
useEffect(() => {
const loadModel = () => {
return async () => {
if (!serverSetting.pyTorchModel && !serverSetting.onnxModel) {
alert("PyTorchモデルとONNXモデルのどちらか一つ以上指定する必要があります。")
return
}
if (!serverSetting.configFile) {
alert("Configファイルを指定する必要があります。")
return
}
setUploadProgress(0)
setIsUploading(true)
const models = [serverSetting.pyTorchModel, serverSetting.onnxModel].filter(x => { return x != null }) as File[]
for (let i = 0; i < models.length; i++) {
const progRate = 1 / models.length
const progOffset = 100 * i * progRate
await clientState.uploadFile(models[i], (progress: number, end: boolean) => {
// console.log(progress * progRate + progOffset, end, progRate,)
setUploadProgress(progress * progRate + progOffset)
})
}
await clientState.uploadFile(serverSetting.configFile, (progress: number, end: boolean) => {
console.log(progress, end)
})
await clientState.loadModel(serverSetting.configFile, serverSetting.pyTorchModel, serverSetting.onnxModel)
setUploadProgress(0)
setIsUploading(false)
}
}
setLoadModelFunc(loadModel)
}, [serverSetting.configFile, serverSetting.pyTorchModel, serverSetting.onnxModel])
2023-01-07 14:07:39 +03:00
const voiceChangerSetting = useMemo(() => {
return (
<>
<div className="body-row left-padding-1">
<div className="body-section-title">Virtual Microphone</div>
</div>
{serverControl.serverControl}
{serverSetting.serverSetting}
{deviceSetting.deviceSetting}
{speakerSetting.speakerSetting}
{convertSetting.convertSetting}
{advancedSetting.advancedSetting}
</>
)
}, [serverControl.serverControl,
serverSetting.serverSetting,
deviceSetting.deviceSetting,
speakerSetting.speakerSetting,
convertSetting.convertSetting,
advancedSetting.advancedSetting])
return {
voiceChangerSetting,
}
}