import { F0Detector } from "@dannadori/voice-changer-client-js"; import React, { useEffect, useMemo, useState } from "react" import { useAppState } from "./001_provider/001_AppStateProvider"; import { AnimationTypes, HeaderButton, HeaderButtonProps } from "./components/101_HeaderButton"; export type QualityControlState = { qualityControl: JSX.Element; } const reloadDevices = async () => { try { const ms = await navigator.mediaDevices.getUserMedia({ video: false, audio: true }); ms.getTracks().forEach(x => { x.stop() }) } catch (e) { console.warn("Enumerate device error::", e) } const mediaDeviceInfos = await navigator.mediaDevices.enumerateDevices(); const audioOutputs = mediaDeviceInfos.filter(x => { return x.kind == "audiooutput" }) return audioOutputs } export const useQualityControl = (): QualityControlState => { const appState = useAppState() const accodionButton = useMemo(() => { const accodionButtonProps: HeaderButtonProps = { stateControlCheckbox: appState.frontendManagerState.stateControls.openQualityControlCheckbox, tooltip: "Open/Close", onIcon: ["fas", "caret-up"], offIcon: ["fas", "caret-up"], animation: AnimationTypes.spinner, tooltipClass: "tooltip-right", }; return ; }, []); const [recording, setRecording] = useState(false) const [outputAudioDeviceInfo, setOutputAudioDeviceInfo] = useState([]) const [audioOutputForGUI, setAudioOutputForGUI] = useState("default") useEffect(() => { const initialize = async () => { const audioInfo = await reloadDevices() setOutputAudioDeviceInfo(audioInfo) } initialize() }, []) const noiseControlRow = useMemo(() => { return (
Noise Suppression
{ appState.clientSetting.updateClientSetting({ ...appState.clientSetting.clientSetting, echoCancel: e.target.checked }) }} /> echo cancel
{ appState.clientSetting.updateClientSetting({ ...appState.clientSetting.clientSetting, noiseSuppression: e.target.checked }) }} /> suppression1
{ appState.clientSetting.updateClientSetting({ ...appState.clientSetting.clientSetting, noiseSuppression2: e.target.checked }) }} /> suppression2
) }, [ appState.clientSetting.clientSetting.echoCancel, appState.clientSetting.clientSetting.noiseSuppression, appState.clientSetting.clientSetting.noiseSuppression2, appState.clientSetting.updateClientSetting ]) const gainControlRow = useMemo(() => { return (
Gain Control
in { appState.clientSetting.updateClientSetting({ ...appState.clientSetting.clientSetting, inputGain: Number(e.target.value) }) }}> {appState.clientSetting.clientSetting.inputGain}
out { appState.clientSetting.updateClientSetting({ ...appState.clientSetting.clientSetting, outputGain: Number(e.target.value) }) }}> {appState.clientSetting.clientSetting.outputGain}
) }, [ appState.clientSetting.clientSetting.inputGain, appState.clientSetting.clientSetting.outputGain, appState.clientSetting.updateClientSetting ]) const f0DetectorRow = useMemo(() => { const desc = { "harvest": "High Quality", "dio": "Light Weight" } return (
F0 Detector
) }, [appState.serverSetting.serverSetting.f0Detector, appState.serverSetting.updateServerSettings]) const recordIORow = useMemo(() => { const onRecordStartClicked = async () => { setRecording(true) await appState.serverSetting.updateServerSettings({ ...appState.serverSetting.serverSetting, recordIO: 1 }) } const onRecordStopClicked = async () => { setRecording(false) await appState.serverSetting.updateServerSettings({ ...appState.serverSetting.serverSetting, recordIO: 0 }) // set wav (input) const wavInput = document.getElementById("body-wav-container-wav-input") as HTMLAudioElement wavInput.src = "/tmp/in.wav?" + new Date().getTime() wavInput.controls = true // @ts-ignore wavInput.setSinkId(audioOutputForGUI) // set wav (output) const wavOutput = document.getElementById("body-wav-container-wav-output") as HTMLAudioElement wavOutput.src = "/tmp/out.wav?" + new Date().getTime() wavOutput.controls = true // @ts-ignore wavOutput.setSinkId(audioOutputForGUI) } const onRecordAnalizeClicked = async () => { if (appState.frontendManagerState.isConverting) { alert("please stop voice conversion. 解析処理と音声変換を同時に行うことはできません。音声変化をストップしてください。") return } appState.frontendManagerState.setIsAnalyzing(true) await appState.serverSetting.updateServerSettings({ ...appState.serverSetting.serverSetting, recordIO: 2 }) // set spectrogram (dio) const imageDio = document.getElementById("body-image-container-img-dio") as HTMLImageElement imageDio.src = "/tmp/analyze-dio.png?" + new Date().getTime() imageDio.style.width = "100%" // set spectrogram (harvest) const imageHarvest = document.getElementById("body-image-container-img-harvest") as HTMLImageElement imageHarvest.src = "/tmp/analyze-harvest.png?" + new Date().getTime() imageHarvest.style.width = "100%" appState.frontendManagerState.setIsAnalyzing(false) } const startClassName = recording ? "body-button-active" : "body-button-stanby" const stopClassName = recording ? "body-button-stanby" : "body-button-active" const analyzeClassName = appState.frontendManagerState.isAnalyzing ? "body-button-active" : "body-button-stanby" const analyzeLabel = appState.frontendManagerState.isAnalyzing ? "wait..." : "Analyze" return ( <>
Analyzer(Experimental)
Sampling
Start
Stop
{/*
{analyzeLabel}
*/}
Play
{/*
Input
Output
*/}
Input
Output
{/*
Spectrogram
PyWorld Dio
PyWorld Harvest
*/} ) }, [appState.serverSetting.serverSetting.recordIO, appState.serverSetting.updateServerSettings, outputAudioDeviceInfo, audioOutputForGUI, appState.frontendManagerState.isAnalyzing, appState.frontendManagerState.isConverting]) const QualityControlContent = useMemo(() => { return ( <> {noiseControlRow} {gainControlRow} {f0DetectorRow}
{recordIORow} ) }, [gainControlRow, noiseControlRow, recordIORow]) const qualityControl = useMemo(() => { return ( <> {appState.frontendManagerState.stateControls.openQualityControlCheckbox.trigger}
{accodionButton} { appState.frontendManagerState.stateControls.openQualityControlCheckbox.updateState(!appState.frontendManagerState.stateControls.openQualityControlCheckbox.checked()) }}> Quality Control
{QualityControlContent}
) }, [QualityControlContent]) return { qualityControl, } }