WIP: refactoring...

This commit is contained in:
wataru 2023-02-19 14:20:37 +09:00
parent e1d2660a59
commit 12e717d1b7
14 changed files with 451 additions and 757 deletions

File diff suppressed because one or more lines are too long

View File

@ -1,4 +1,4 @@
import { OnnxExecutionProvider, Framework, fileSelector } from "@dannadori/voice-changer-client-js" import { OnnxExecutionProvider, Framework, fileSelector, Correspondence } from "@dannadori/voice-changer-client-js"
import React, { useState } from "react" import React, { useState } from "react"
import { useMemo } from "react" import { useMemo } from "react"
import { useAppState } from "./001_provider/001_AppStateProvider"; import { useAppState } from "./001_provider/001_AppStateProvider";
@ -84,10 +84,27 @@ export const useModelSettingArea = (): ServerSettingState => {
} }
const onCorrespondenceFileLoadClicked = async () => { const onCorrespondenceFileLoadClicked = async () => {
const file = await fileSelector("") const file = await fileSelector("")
appState.clientSetting.setCorrespondences(file)
const correspondenceText = await file.text()
const cors = correspondenceText.split("\n").map(line => {
const items = line.split("|")
if (items.length != 3) {
console.warn("Invalid Correspondence Line:", line)
return null
} else {
const cor: Correspondence = {
sid: Number(items[0]),
correspondence: Number(items[1]),
dirname: items[2]
}
return cor
}
}).filter(x => { return x != null }) as Correspondence[]
appState.serverSetting.updateServerSettings({ ...appState.serverSetting.serverSetting, correspondences: cors })
} }
const onCorrespondenceFileClearClicked = () => { const onCorrespondenceFileClearClicked = () => {
appState.clientSetting.setCorrespondences(null) appState.serverSetting.updateServerSettings({ ...appState.serverSetting.serverSetting, correspondences: [] })
} }
const onModelUploadClicked = async () => { const onModelUploadClicked = async () => {
@ -101,7 +118,7 @@ export const useModelSettingArea = (): ServerSettingState => {
const configFilenameText = appState.serverSetting.fileUploadSetting.configFile?.filename || appState.serverSetting.fileUploadSetting.configFile?.file?.name || "" const configFilenameText = appState.serverSetting.fileUploadSetting.configFile?.filename || appState.serverSetting.fileUploadSetting.configFile?.file?.name || ""
const onnxModelFilenameText = appState.serverSetting.fileUploadSetting.onnxModel?.filename || appState.serverSetting.fileUploadSetting.onnxModel?.file?.name || "" const onnxModelFilenameText = appState.serverSetting.fileUploadSetting.onnxModel?.filename || appState.serverSetting.fileUploadSetting.onnxModel?.file?.name || ""
const pyTorchFilenameText = appState.serverSetting.fileUploadSetting.pyTorchModel?.filename || appState.serverSetting.fileUploadSetting.pyTorchModel?.file?.name || "" const pyTorchFilenameText = appState.serverSetting.fileUploadSetting.pyTorchModel?.filename || appState.serverSetting.fileUploadSetting.pyTorchModel?.file?.name || ""
const correspondenceFileText = appState.clientSetting.setting.correspondences ? JSON.stringify(appState.clientSetting.setting.correspondences.map(x => { return x.dirname })) : "" const correspondenceFileText = appState.serverSetting.serverSetting.correspondences ? JSON.stringify(appState.serverSetting.serverSetting.correspondences.map(x => { return x.dirname })) : ""
return ( return (
<> <>
@ -185,7 +202,9 @@ export const useModelSettingArea = (): ServerSettingState => {
appState.serverSetting.loadModel, appState.serverSetting.loadModel,
appState.serverSetting.isUploading, appState.serverSetting.isUploading,
appState.serverSetting.uploadProgress, appState.serverSetting.uploadProgress,
appState.clientSetting.setting.correspondences, appState.serverSetting.serverSetting.correspondences,
appState.serverSetting.updateServerSettings,
appState.serverSetting.setFileUploadSetting,
showPyTorch]) showPyTorch])
const frameworkRow = useMemo(() => { const frameworkRow = useMemo(() => {

View File

@ -1,4 +1,4 @@
import { fileSelectorAsDataURL, ServerAudioDevice, useIndexedDB } from "@dannadori/voice-changer-client-js" import { fileSelectorAsDataURL, useIndexedDB } from "@dannadori/voice-changer-client-js"
import React, { useEffect, useMemo, useRef, useState } from "react" import React, { useEffect, useMemo, useRef, useState } from "react"
import { AUDIO_ELEMENT_FOR_PLAY_RESULT, AUDIO_ELEMENT_FOR_TEST_CONVERTED, AUDIO_ELEMENT_FOR_TEST_CONVERTED_ECHOBACK, AUDIO_ELEMENT_FOR_TEST_ORIGINAL, INDEXEDDB_KEY_AUDIO_OUTPUT } from "./const" import { AUDIO_ELEMENT_FOR_PLAY_RESULT, AUDIO_ELEMENT_FOR_TEST_CONVERTED, AUDIO_ELEMENT_FOR_TEST_CONVERTED_ECHOBACK, AUDIO_ELEMENT_FOR_TEST_ORIGINAL, INDEXEDDB_KEY_AUDIO_OUTPUT } from "./const"
import { useAppState } from "./001_provider/001_AppStateProvider"; import { useAppState } from "./001_provider/001_AppStateProvider";
@ -60,7 +60,6 @@ export const useDeviceSetting = (): DeviceSettingState => {
const [inputAudioDeviceInfo, setInputAudioDeviceInfo] = useState<MediaDeviceInfo[]>([]) const [inputAudioDeviceInfo, setInputAudioDeviceInfo] = useState<MediaDeviceInfo[]>([])
const [outputAudioDeviceInfo, setOutputAudioDeviceInfo] = useState<MediaDeviceInfo[]>([]) const [outputAudioDeviceInfo, setOutputAudioDeviceInfo] = useState<MediaDeviceInfo[]>([])
const [serverInputAudioDeviceInfo, setServerInputAudioDeviceInfo] = useState<ServerAudioDevice[]>([])
const [audioInputForGUI, setAudioInputForGUI] = useState<string>("none") const [audioInputForGUI, setAudioInputForGUI] = useState<string>("none")
const [audioOutputForGUI, setAudioOutputForGUI] = useState<string>("none") const [audioOutputForGUI, setAudioOutputForGUI] = useState<string>("none")
@ -79,30 +78,30 @@ export const useDeviceSetting = (): DeviceSettingState => {
const audioInfo = await reloadDevices() const audioInfo = await reloadDevices()
setInputAudioDeviceInfo(audioInfo[0]) setInputAudioDeviceInfo(audioInfo[0])
setOutputAudioDeviceInfo(audioInfo[1]) setOutputAudioDeviceInfo(audioInfo[1])
if (useServerMicrophone) { // if (useServerMicrophone) {
try { // try {
const serverDevices = await appState.serverSetting.getServerDevices() // const serverDevices = await appState.serverSetting.getServerDevices()
setServerInputAudioDeviceInfo(serverDevices.audio_input_devices) // setServerInputAudioDeviceInfo(serverDevices.audio_input_devices)
} catch (e) { // } catch (e) {
console.warn(e) // console.warn(e)
} // }
} // }
} }
initialize() initialize()
}, [useServerMicrophone]) }, [useServerMicrophone])
// キャッシュの設定は反映(たぶん、設定操作の時も起動していしまう。が問題は起こらないはず) // キャッシュの設定は反映(たぶん、設定操作の時も起動していしまう。が問題は起こらないはず)
useEffect(() => { useEffect(() => {
if (typeof appState.clientSetting.setting.audioInput == "string") { if (typeof appState.clientSetting.clientSetting.audioInput == "string") {
if (inputAudioDeviceInfo.find(x => { if (inputAudioDeviceInfo.find(x => {
// console.log("COMPARE:", x.deviceId, appState.clientSetting.setting.audioInput) // console.log("COMPARE:", x.deviceId, appState.clientSetting.setting.audioInput)
return x.deviceId == appState.clientSetting.setting.audioInput return x.deviceId == appState.clientSetting.clientSetting.audioInput
})) { })) {
setAudioInputForGUI(appState.clientSetting.setting.audioInput) setAudioInputForGUI(appState.clientSetting.clientSetting.audioInput)
} }
} }
}, [inputAudioDeviceInfo, appState.clientSetting.setting.audioInput]) }, [inputAudioDeviceInfo, appState.clientSetting.clientSetting.audioInput])
const audioInputRow = useMemo(() => { const audioInputRow = useMemo(() => {
if (useServerMicrophone) { if (useServerMicrophone) {
@ -126,40 +125,18 @@ export const useDeviceSetting = (): DeviceSettingState => {
) )
}, [inputAudioDeviceInfo, audioInputForGUI, useServerMicrophone]) }, [inputAudioDeviceInfo, audioInputForGUI, useServerMicrophone])
const audioInputServerRow = useMemo(() => {
if (!useServerMicrophone) {
return <></>
}
return (
<div className="body-row split-3-7 left-padding-1 guided">
<div className="body-item-title left-padding-1">AudioInput(Server)</div>
<div className="body-select-container">
<select className="body-select" value={audioInputForGUI} onChange={(e) => {
setAudioInputForGUI(e.target.value)
}}>
{
serverInputAudioDeviceInfo.map(x => {
return <option key={x.name} value={x.index}>{x.name}</option>
})
}
</select>
</div>
</div>
)
}, [serverInputAudioDeviceInfo, audioInputForGUI, useServerMicrophone])
useEffect(() => { useEffect(() => {
if (audioInputForGUI == "file") { if (audioInputForGUI == "file") {
// file selector (audioMediaInputRow) // file selector (audioMediaInputRow)
} else { } else {
if (!useServerMicrophone) { if (!useServerMicrophone) {
appState.clientSetting.setAudioInput(audioInputForGUI) appState.clientSetting.updateClientSetting({ ...appState.clientSetting.clientSetting, audioInput: audioInputForGUI })
} else { } else {
console.log("server mic") console.log("server mic")
appState.clientSetting.setAudioInput(null) appState.clientSetting.updateClientSetting({ ...appState.clientSetting.clientSetting, audioInput: null })
} }
} }
}, [appState.audioContext, audioInputForGUI, appState.clientSetting.setAudioInput]) }, [appState.audioContext, audioInputForGUI, appState.clientSetting.updateClientSetting])
const audioMediaInputRow = useMemo(() => { const audioMediaInputRow = useMemo(() => {
if (audioInputForGUI != "file") { if (audioInputForGUI != "file") {
@ -184,7 +161,7 @@ export const useDeviceSetting = (): DeviceSettingState => {
const dst = appState.audioContext.createMediaStreamDestination() const dst = appState.audioContext.createMediaStreamDestination()
audioSrcNode.current.connect(dst) audioSrcNode.current.connect(dst)
appState.clientSetting.setAudioInput(dst.stream) appState.clientSetting.updateClientSetting({ ...appState.clientSetting.clientSetting, audioInput: dst.stream })
const audio_echo = document.getElementById(AUDIO_ELEMENT_FOR_TEST_CONVERTED_ECHOBACK) as HTMLAudioElement const audio_echo = document.getElementById(AUDIO_ELEMENT_FOR_TEST_CONVERTED_ECHOBACK) as HTMLAudioElement
audio_echo.srcObject = dst.stream audio_echo.srcObject = dst.stream
@ -222,7 +199,7 @@ export const useDeviceSetting = (): DeviceSettingState => {
</div> </div>
</div> </div>
) )
}, [audioInputForGUI, appState.clientSetting.setAudioInput, fileInputEchoback]) }, [audioInputForGUI, appState.clientSetting.updateClientSetting, fileInputEchoback])
@ -345,7 +322,6 @@ export const useDeviceSetting = (): DeviceSettingState => {
<div className="partition-content"> <div className="partition-content">
{audioInputRow} {audioInputRow}
{audioInputServerRow}
{audioMediaInputRow} {audioMediaInputRow}
{audioOutputRow} {audioOutputRow}
{audioOutputRecordingRow} {audioOutputRecordingRow}
@ -353,7 +329,7 @@ export const useDeviceSetting = (): DeviceSettingState => {
</div> </div>
</> </>
) )
}, [audioInputRow, audioInputServerRow, audioMediaInputRow, audioOutputRow, audioOutputRecordingRow, useServerMicrophone]) }, [audioInputRow, audioMediaInputRow, audioOutputRow, audioOutputRecordingRow, useServerMicrophone])
// 出力の録音データ(from worklet)がストアされたら実行 // 出力の録音データ(from worklet)がストアされたら実行

View File

@ -53,18 +53,18 @@ export const useQualityControl = (): QualityControlState => {
<div className="body-row split-3-2-2-2-1 left-padding-1 guided"> <div className="body-row split-3-2-2-2-1 left-padding-1 guided">
<div className="body-item-title left-padding-1 ">Noise Suppression</div> <div className="body-item-title left-padding-1 ">Noise Suppression</div>
<div> <div>
<input type="checkbox" checked={appState.clientSetting.setting.echoCancel} onChange={(e) => { <input type="checkbox" checked={appState.clientSetting.clientSetting.echoCancel} onChange={(e) => {
appState.clientSetting.setEchoCancel(e.target.checked) appState.clientSetting.updateClientSetting({ ...appState.clientSetting.clientSetting, echoCancel: e.target.checked })
}} /> echo cancel }} /> echo cancel
</div> </div>
<div> <div>
<input type="checkbox" checked={appState.clientSetting.setting.noiseSuppression} onChange={(e) => { <input type="checkbox" checked={appState.clientSetting.clientSetting.noiseSuppression} onChange={(e) => {
appState.clientSetting.setNoiseSuppression(e.target.checked) appState.clientSetting.updateClientSetting({ ...appState.clientSetting.clientSetting, noiseSuppression: e.target.checked })
}} /> suppression1 }} /> suppression1
</div> </div>
<div> <div>
<input type="checkbox" checked={appState.clientSetting.setting.noiseSuppression2} onChange={(e) => { <input type="checkbox" checked={appState.clientSetting.clientSetting.noiseSuppression2} onChange={(e) => {
appState.clientSetting.setNoiseSuppression2(e.target.checked) appState.clientSetting.updateClientSetting({ ...appState.clientSetting.clientSetting, noiseSuppression2: e.target.checked })
}} /> suppression2 }} /> suppression2
</div> </div>
<div className="body-button-container"> <div className="body-button-container">
@ -72,9 +72,10 @@ export const useQualityControl = (): QualityControlState => {
</div> </div>
) )
}, [ }, [
appState.clientSetting.setting.echoCancel, appState.clientSetting.setEchoCancel, appState.clientSetting.clientSetting.echoCancel,
appState.clientSetting.setting.noiseSuppression, appState.clientSetting.setNoiseSuppression, appState.clientSetting.clientSetting.noiseSuppression,
appState.clientSetting.setting.noiseSuppression2, appState.clientSetting.setNoiseSuppression2, appState.clientSetting.clientSetting.noiseSuppression2,
appState.clientSetting.updateClientSetting
]) ])
const gainControlRow = useMemo(() => { const gainControlRow = useMemo(() => {
@ -83,25 +84,26 @@ export const useQualityControl = (): QualityControlState => {
<div className="body-item-title left-padding-1 ">Gain Control</div> <div className="body-item-title left-padding-1 ">Gain Control</div>
<div> <div>
<span className="body-item-input-slider-label">in</span> <span className="body-item-input-slider-label">in</span>
<input type="range" className="body-item-input-slider" min="0.0" max="1.0" step="0.1" value={appState.clientSetting.setting.inputGain} onChange={(e) => { <input type="range" className="body-item-input-slider" min="0.0" max="1.0" step="0.1" value={appState.clientSetting.clientSetting.inputGain} onChange={(e) => {
appState.clientSetting.setInputGain(Number(e.target.value)) appState.clientSetting.updateClientSetting({ ...appState.clientSetting.clientSetting, inputGain: Number(e.target.value) })
}}></input> }}></input>
<span className="body-item-input-slider-val">{appState.clientSetting.setting.inputGain}</span> <span className="body-item-input-slider-val">{appState.clientSetting.clientSetting.inputGain}</span>
</div> </div>
<div> <div>
<span className="body-item-input-slider-label">out</span> <span className="body-item-input-slider-label">out</span>
<input type="range" className="body-item-input-slider" min="0.0" max="1.0" step="0.1" value={appState.clientSetting.setting.outputGain} onChange={(e) => { <input type="range" className="body-item-input-slider" min="0.0" max="1.0" step="0.1" value={appState.clientSetting.clientSetting.outputGain} onChange={(e) => {
appState.clientSetting.setOutputGain(Number(e.target.value)) appState.clientSetting.updateClientSetting({ ...appState.clientSetting.clientSetting, outputGain: Number(e.target.value) })
}}></input> }}></input>
<span className="body-item-input-slider-val">{appState.clientSetting.setting.outputGain}</span> <span className="body-item-input-slider-val">{appState.clientSetting.clientSetting.outputGain}</span>
</div> </div>
<div className="body-button-container"> <div className="body-button-container">
</div> </div>
</div> </div>
) )
}, [ }, [
appState.clientSetting.setting.inputGain, appState.clientSetting.setting.inputGain, appState.clientSetting.clientSetting.inputGain,
appState.clientSetting.setting.outputGain, appState.clientSetting.setOutputGain, appState.clientSetting.clientSetting.outputGain,
appState.clientSetting.updateClientSetting
]) ])
const f0DetectorRow = useMemo(() => { const f0DetectorRow = useMemo(() => {

View File

@ -33,10 +33,10 @@ export const useSpeakerSetting = () => {
const calcDefaultF0Factor = (srcId: number, dstId: number) => { const calcDefaultF0Factor = (srcId: number, dstId: number) => {
const src = appState.clientSetting.setting.correspondences?.find(x => { const src = appState.serverSetting.serverSetting.correspondences?.find(x => {
return x.sid == srcId return x.sid == srcId
}) })
const dst = appState.clientSetting.setting.correspondences?.find(x => { const dst = appState.serverSetting.serverSetting.correspondences?.find(x => {
return x.sid == dstId return x.sid == dstId
}) })
const recommendedF0Factor = dst && src ? dst.correspondence / src.correspondence : 0 const recommendedF0Factor = dst && src ? dst.correspondence / src.correspondence : 0
@ -46,7 +46,7 @@ export const useSpeakerSetting = () => {
console.log() console.log()
const srcIdRow = useMemo(() => { const srcIdRow = useMemo(() => {
const selected = appState.clientSetting.setting.correspondences?.find(x => { const selected = appState.serverSetting.serverSetting.correspondences?.find(x => {
return x.sid == appState.serverSetting.serverSetting.srcId return x.sid == appState.serverSetting.serverSetting.srcId
}) })
return ( return (
@ -58,7 +58,7 @@ export const useSpeakerSetting = () => {
appState.serverSetting.updateServerSettings({ ...appState.serverSetting.serverSetting, srcId: Number(e.target.value), f0Factor: recF0 }) appState.serverSetting.updateServerSettings({ ...appState.serverSetting.serverSetting, srcId: Number(e.target.value), f0Factor: recF0 })
}}> }}>
{ {
appState.clientSetting.setting.correspondences?.map(x => { appState.serverSetting.serverSetting.correspondences?.map(x => {
return <option key={x.sid} value={x.sid}>{x.dirname}({x.sid})</option> return <option key={x.sid} value={x.sid}>{x.dirname}({x.sid})</option>
}) })
@ -71,10 +71,10 @@ export const useSpeakerSetting = () => {
<div className="body-item-text"></div> <div className="body-item-text"></div>
</div> </div>
) )
}, [appState.clientSetting.setting.speakers, appState.serverSetting.serverSetting.srcId, appState.serverSetting.serverSetting.dstId, appState.clientSetting.setting.correspondences, appState.serverSetting.updateServerSettings]) }, [appState.serverSetting.serverSetting.srcId, appState.serverSetting.serverSetting.dstId, appState.serverSetting.serverSetting.correspondences, appState.serverSetting.updateServerSettings])
const dstIdRow = useMemo(() => { const dstIdRow = useMemo(() => {
const selected = appState.clientSetting.setting.correspondences?.find(x => { const selected = appState.serverSetting.serverSetting.correspondences?.find(x => {
return x.sid == appState.serverSetting.serverSetting.dstId return x.sid == appState.serverSetting.serverSetting.dstId
}) })
return ( return (
@ -90,7 +90,7 @@ export const useSpeakerSetting = () => {
// appState.clientSetting.setting.speakers.map(x => { // appState.clientSetting.setting.speakers.map(x => {
// return <option key={x.id} value={x.id}>{x.name}({x.id})</option> // return <option key={x.id} value={x.id}>{x.name}({x.id})</option>
// }) // })
appState.clientSetting.setting.correspondences?.map(x => { appState.serverSetting.serverSetting.correspondences?.map(x => {
return <option key={x.sid} value={x.sid}>{x.dirname}({x.sid})</option> return <option key={x.sid} value={x.sid}>{x.dirname}({x.sid})</option>
}) })
} }
@ -102,60 +102,60 @@ export const useSpeakerSetting = () => {
<div className="body-item-text"></div> <div className="body-item-text"></div>
</div> </div>
) )
}, [appState.clientSetting.setting.speakers, appState.serverSetting.serverSetting.srcId, appState.serverSetting.serverSetting.dstId, appState.clientSetting.setting.correspondences, appState.serverSetting.updateServerSettings]) }, [appState.serverSetting.serverSetting.srcId, appState.serverSetting.serverSetting.dstId, appState.serverSetting.serverSetting.correspondences, appState.serverSetting.updateServerSettings])
const editSpeakerIdMappingRow = useMemo(() => { // const editSpeakerIdMappingRow = useMemo(() => {
const onSetSpeakerMappingClicked = async () => { // const onSetSpeakerMappingClicked = async () => {
const targetId = editSpeakerTargetId // const targetId = editSpeakerTargetId
const targetName = editSpeakerTargetName // const targetName = editSpeakerTargetName
const targetSpeaker = appState.clientSetting.setting.speakers.find(x => { return x.id == targetId }) // const targetSpeaker = appState.clientSetting.setting.speakers.find(x => { return x.id == targetId })
if (targetSpeaker) { // if (targetSpeaker) {
if (targetName.length == 0) { // Delete // if (targetName.length == 0) { // Delete
const newSpeakers = appState.clientSetting.setting.speakers.filter(x => { return x.id != targetId }) // const newSpeakers = appState.clientSetting.setting.speakers.filter(x => { return x.id != targetId })
appState.clientSetting.setSpeakers(newSpeakers) // appState.clientSetting.setSpeakers(newSpeakers)
} else { // Update // } else { // Update
targetSpeaker.name = targetName // targetSpeaker.name = targetName
appState.clientSetting.setSpeakers([...appState.clientSetting.setting.speakers]) // appState.clientSetting.setSpeakers([...appState.clientSetting.setting.speakers])
} // }
} else { // } else {
if (targetName.length == 0) { // Noop // if (targetName.length == 0) { // Noop
} else {// add // } else {// add
appState.clientSetting.setting.speakers.push({ // appState.clientSetting.setting.speakers.push({
id: targetId, // id: targetId,
name: targetName // name: targetName
}) // })
appState.clientSetting.setSpeakers([...appState.clientSetting.setting.speakers]) // appState.clientSetting.setSpeakers([...appState.clientSetting.setting.speakers])
} // }
} // }
} // }
return ( // return (
<div className="body-row split-3-1-2-4 left-padding-1 guided"> // <div className="body-row split-3-1-2-4 left-padding-1 guided">
<div className="body-item-title left-padding-1">Edit Speaker Mapping</div> // <div className="body-item-title left-padding-1">Edit Speaker Mapping</div>
<div className="body-input-container"> // <div className="body-input-container">
<input type="number" min={1} max={256} step={1} value={editSpeakerTargetId} onChange={(e) => { // <input type="number" min={1} max={256} step={1} value={editSpeakerTargetId} onChange={(e) => {
const id = Number(e.target.value) // const id = Number(e.target.value)
setEditSpeakerTargetId(id) // setEditSpeakerTargetId(id)
setEditSpeakerTargetName(appState.clientSetting.setting.speakers.find(x => { return x.id == id })?.name || "") // setEditSpeakerTargetName(appState.clientSetting.setting.speakers.find(x => { return x.id == id })?.name || "")
}} /> // }} />
</div> // </div>
<div className="body-input-container"> // <div className="body-input-container">
<input type="text" value={editSpeakerTargetName} onChange={(e) => { // <input type="text" value={editSpeakerTargetName} onChange={(e) => {
setEditSpeakerTargetName(e.target.value) // setEditSpeakerTargetName(e.target.value)
}} /> // }} />
</div> // </div>
<div className="body-button-container"> // <div className="body-button-container">
<div className="body-button" onClick={onSetSpeakerMappingClicked}>set</div> // <div className="body-button" onClick={onSetSpeakerMappingClicked}>set</div>
</div> // </div>
</div> // </div>
) // )
}, [appState.clientSetting.setting.speakers, editSpeakerTargetId, editSpeakerTargetName]) // }, [appState.clientSetting.setting.speakers, editSpeakerTargetId, editSpeakerTargetName])
const f0FactorRow = useMemo(() => { const f0FactorRow = useMemo(() => {
const src = appState.clientSetting.setting.correspondences?.find(x => { const src = appState.serverSetting.serverSetting.correspondences?.find(x => {
return x.sid == appState.serverSetting.serverSetting.srcId return x.sid == appState.serverSetting.serverSetting.srcId
}) })
const dst = appState.clientSetting.setting.correspondences?.find(x => { const dst = appState.serverSetting.serverSetting.correspondences?.find(x => {
return x.sid == appState.serverSetting.serverSetting.dstId return x.sid == appState.serverSetting.serverSetting.dstId
}) })
@ -174,7 +174,7 @@ export const useSpeakerSetting = () => {
<div className="body-item-text">recommend: {recommendedF0Factor.toFixed(1)}</div> <div className="body-item-text">recommend: {recommendedF0Factor.toFixed(1)}</div>
</div> </div>
) )
}, [appState.serverSetting.serverSetting.f0Factor, appState.serverSetting.serverSetting.srcId, appState.serverSetting.serverSetting.dstId, appState.clientSetting.setting.correspondences, appState.serverSetting.updateServerSettings]) }, [appState.serverSetting.serverSetting.f0Factor, appState.serverSetting.serverSetting.srcId, appState.serverSetting.serverSetting.dstId, appState.serverSetting.serverSetting.correspondences, appState.serverSetting.updateServerSettings])
const speakerSetting = useMemo(() => { const speakerSetting = useMemo(() => {
return ( return (
@ -198,7 +198,7 @@ export const useSpeakerSetting = () => {
</div> </div>
</> </>
) )
}, [srcIdRow, dstIdRow, editSpeakerIdMappingRow, f0FactorRow]) }, [srcIdRow, dstIdRow, f0FactorRow])
return { return {
speakerSetting, speakerSetting,

View File

@ -25,18 +25,18 @@ export const useConvertSetting = (): ConvertSettingState => {
<div className="body-row split-3-2-1-4 left-padding-1 guided"> <div className="body-row split-3-2-1-4 left-padding-1 guided">
<div className="body-item-title left-padding-1">Input Chunk Num(128sample/chunk)</div> <div className="body-item-title left-padding-1">Input Chunk Num(128sample/chunk)</div>
<div className="body-input-container"> <div className="body-input-container">
<input type="number" min={1} max={256} step={1} value={appState.clientSetting.setting.inputChunkNum} onChange={(e) => { <input type="number" min={1} max={256} step={1} value={appState.streamerSetting.audioStreamerSetting.inputChunkNum} onChange={(e) => {
appState.clientSetting.setInputChunkNum(Number(e.target.value)) appState.streamerSetting.setSetting({ ...appState.streamerSetting.audioStreamerSetting, inputChunkNum: Number(e.target.value) })
}} /> }} />
</div> </div>
<div className="body-item-text"> <div className="body-item-text">
<div>buff: {(appState.clientSetting.setting.inputChunkNum * 128 * 1000 / 24000).toFixed(1)}ms</div> <div>buff: {(appState.streamerSetting.audioStreamerSetting.inputChunkNum * 128 * 1000 / 24000).toFixed(1)}ms</div>
</div> </div>
<div className="body-item-text"></div> <div className="body-item-text"></div>
</div> </div>
) )
}, [appState.clientSetting.setting.inputChunkNum, appState.clientSetting.setInputChunkNum]) }, [appState.streamerSetting.audioStreamerSetting.inputChunkNum, appState.streamerSetting.setSetting])
const gpuRow = useMemo(() => { const gpuRow = useMemo(() => {
return ( return (

View File

@ -1,4 +1,4 @@
import { BufferSize, CrossFadeOverlapSize, DownSamplingMode, InputSampleRate, Protocol, SampleRate, VoiceChangerMode } from "@dannadori/voice-changer-client-js" import { BufferSize, CrossFadeOverlapSize, DownSamplingMode, InputSampleRate, Protocol, SampleRate } from "@dannadori/voice-changer-client-js"
import React, { useMemo } from "react" import React, { useMemo } from "react"
import { useAppState } from "./001_provider/001_AppStateProvider"; import { useAppState } from "./001_provider/001_AppStateProvider";
import { AnimationTypes, HeaderButton, HeaderButtonProps } from "./components/101_HeaderButton"; import { AnimationTypes, HeaderButton, HeaderButtonProps } from "./components/101_HeaderButton";
@ -30,24 +30,24 @@ export const useAdvancedSetting = (): AdvancedSettingState => {
<div className="body-row split-3-3-4 left-padding-1 guided"> <div className="body-row split-3-3-4 left-padding-1 guided">
<div className="body-item-title left-padding-1">MMVC Server</div> <div className="body-item-title left-padding-1">MMVC Server</div>
<div className="body-input-container"> <div className="body-input-container">
<input type="text" defaultValue={appState.clientSetting.setting.mmvcServerUrl} id="mmvc-server-url" className="body-item-input" /> <input type="text" defaultValue={appState.streamerSetting.audioStreamerSetting.serverUrl} id="mmvc-server-url" className="body-item-input" />
</div> </div>
<div className="body-button-container"> <div className="body-button-container">
<div className="body-button" onClick={onSetServerClicked}>set</div> <div className="body-button" onClick={onSetServerClicked}>set</div>
</div> </div>
</div> </div>
) )
}, [appState.clientSetting.setting.mmvcServerUrl, appState.clientSetting.setServerUrl]) }, [appState.streamerSetting.audioStreamerSetting.serverUrl, appState.clientSetting.setServerUrl])
const protocolRow = useMemo(() => { const protocolRow = useMemo(() => {
const onProtocolChanged = async (val: Protocol) => { const onProtocolChanged = async (val: Protocol) => {
appState.clientSetting.setProtocol(val) appState.streamerSetting.setSetting({ ...appState.streamerSetting.audioStreamerSetting, protocol: val })
} }
return ( return (
<div className="body-row split-3-7 left-padding-1 guided"> <div className="body-row split-3-7 left-padding-1 guided">
<div className="body-item-title left-padding-1">Protocol</div> <div className="body-item-title left-padding-1">Protocol</div>
<div className="body-select-container"> <div className="body-select-container">
<select className="body-select" value={appState.clientSetting.setting.protocol} onChange={(e) => { <select className="body-select" value={appState.streamerSetting.audioStreamerSetting.protocol} onChange={(e) => {
onProtocolChanged(e.target.value as onProtocolChanged(e.target.value as
Protocol) Protocol)
}}> }}>
@ -60,7 +60,7 @@ export const useAdvancedSetting = (): AdvancedSettingState => {
</div> </div>
</div> </div>
) )
}, [appState.clientSetting.setting.protocol, appState.clientSetting.setProtocol]) }, [appState.streamerSetting.audioStreamerSetting.protocol, appState.streamerSetting.setSetting])
const sampleRateRow = useMemo(() => { const sampleRateRow = useMemo(() => {
@ -68,8 +68,8 @@ export const useAdvancedSetting = (): AdvancedSettingState => {
<div className="body-row split-3-7 left-padding-1 guided"> <div className="body-row split-3-7 left-padding-1 guided">
<div className="body-item-title left-padding-1">Sample Rate</div> <div className="body-item-title left-padding-1">Sample Rate</div>
<div className="body-select-container"> <div className="body-select-container">
<select className="body-select" value={appState.clientSetting.setting.sampleRate} onChange={(e) => { <select className="body-select" value={appState.clientSetting.clientSetting.sampleRate} onChange={(e) => {
appState.clientSetting.setSampleRate(Number(e.target.value) as SampleRate) appState.clientSetting.updateClientSetting({ ...appState.clientSetting.clientSetting, sampleRate: Number(e.target.value) as SampleRate })
}}> }}>
{ {
Object.values(SampleRate).map(x => { Object.values(SampleRate).map(x => {
@ -80,15 +80,15 @@ export const useAdvancedSetting = (): AdvancedSettingState => {
</div> </div>
</div> </div>
) )
}, [appState.clientSetting.setting.sampleRate, appState.clientSetting.setSampleRate]) }, [appState.clientSetting.clientSetting.sampleRate, appState.clientSetting.updateClientSetting])
const sendingSampleRateRow = useMemo(() => { const sendingSampleRateRow = useMemo(() => {
return ( return (
<div className="body-row split-3-7 left-padding-1 guided"> <div className="body-row split-3-7 left-padding-1 guided">
<div className="body-item-title left-padding-1">Sending Sample Rate</div> <div className="body-item-title left-padding-1">Sending Sample Rate</div>
<div className="body-select-container"> <div className="body-select-container">
<select className="body-select" value={appState.clientSetting.setting.sendingSampleRate} onChange={(e) => { <select className="body-select" value={appState.streamerSetting.audioStreamerSetting.sendingSampleRate} onChange={(e) => {
appState.clientSetting.setSendingSampleRate(Number(e.target.value) as InputSampleRate) appState.streamerSetting.setSetting({ ...appState.streamerSetting.audioStreamerSetting, sendingSampleRate: Number(e.target.value) as InputSampleRate })
appState.serverSetting.updateServerSettings({ ...appState.serverSetting.serverSetting, inputSampleRate: Number(e.target.value) as InputSampleRate }) appState.serverSetting.updateServerSettings({ ...appState.serverSetting.serverSetting, inputSampleRate: Number(e.target.value) as InputSampleRate })
}}> }}>
{ {
@ -100,7 +100,7 @@ export const useAdvancedSetting = (): AdvancedSettingState => {
</div> </div>
</div> </div>
) )
}, [appState.clientSetting.setting.sendingSampleRate, appState.clientSetting.setSendingSampleRate, appState.serverSetting.updateServerSettings]) }, [appState.streamerSetting.audioStreamerSetting.sendingSampleRate, appState.streamerSetting.setSetting, appState.serverSetting.updateServerSettings])
const bufferSizeRow = useMemo(() => { const bufferSizeRow = useMemo(() => {
return ( return (
@ -108,8 +108,8 @@ export const useAdvancedSetting = (): AdvancedSettingState => {
<div className="body-row split-3-7 left-padding-1 guided"> <div className="body-row split-3-7 left-padding-1 guided">
<div className="body-item-title left-padding-1">Buffer Size</div> <div className="body-item-title left-padding-1">Buffer Size</div>
<div className="body-select-container"> <div className="body-select-container">
<select className="body-select" value={appState.clientSetting.setting.bufferSize} onChange={(e) => { <select className="body-select" value={appState.clientSetting.clientSetting.bufferSize} onChange={(e) => {
appState.clientSetting.setBufferSize(Number(e.target.value) as BufferSize) appState.clientSetting.updateClientSetting({ ...appState.clientSetting.clientSetting, bufferSize: Number(e.target.value) as BufferSize })
}}> }}>
{ {
Object.values(BufferSize).map(x => { Object.values(BufferSize).map(x => {
@ -120,7 +120,7 @@ export const useAdvancedSetting = (): AdvancedSettingState => {
</div> </div>
</div> </div>
) )
}, [appState.clientSetting.setting.bufferSize, appState.clientSetting.setBufferSize]) }, [appState.clientSetting.clientSetting.bufferSize, appState.clientSetting.updateClientSetting])
const crossFadeOverlapSizeRow = useMemo(() => { const crossFadeOverlapSizeRow = useMemo(() => {
@ -169,33 +169,13 @@ export const useAdvancedSetting = (): AdvancedSettingState => {
}, [appState.serverSetting.serverSetting.crossFadeEndRate, appState.serverSetting.updateServerSettings]) }, [appState.serverSetting.serverSetting.crossFadeEndRate, appState.serverSetting.updateServerSettings])
const voiceChangeModeRow = useMemo(() => {
return (
<div className="body-row split-3-7 left-padding-1 guided">
<div className="body-item-title left-padding-1 ">Voice Change Mode</div>
<div className="body-select-container">
<select className="body-select" value={appState.clientSetting.setting.voiceChangerMode} onChange={(e) => {
appState.clientSetting.setVoiceChangerMode(e.target.value as VoiceChangerMode)
}}>
{
Object.values(VoiceChangerMode).map(x => {
return <option key={x} value={x}>{x}</option>
})
}
</select>
</div>
</div>
)
}, [appState.clientSetting.setting.voiceChangerMode, appState.clientSetting.setVoiceChangerMode])
const downSamplingModeRow = useMemo(() => { const downSamplingModeRow = useMemo(() => {
return ( return (
<div className="body-row split-3-7 left-padding-1 guided"> <div className="body-row split-3-7 left-padding-1 guided">
<div className="body-item-title left-padding-1 ">DownSamplingMode</div> <div className="body-item-title left-padding-1 ">DownSamplingMode</div>
<div className="body-select-container"> <div className="body-select-container">
<select className="body-select" value={appState.clientSetting.setting.downSamplingMode} onChange={(e) => { <select className="body-select" value={appState.streamerSetting.audioStreamerSetting.downSamplingMode} onChange={(e) => {
appState.clientSetting.setDownSamplingMode(e.target.value as DownSamplingMode) appState.streamerSetting.setSetting({ ...appState.streamerSetting.audioStreamerSetting, downSamplingMode: e.target.value as DownSamplingMode })
}}> }}>
{ {
Object.values(DownSamplingMode).map(x => { Object.values(DownSamplingMode).map(x => {
@ -206,7 +186,7 @@ export const useAdvancedSetting = (): AdvancedSettingState => {
</div> </div>
</div> </div>
) )
}, [appState.clientSetting.setting.downSamplingMode, appState.clientSetting.setDownSamplingMode]) }, [appState.streamerSetting.audioStreamerSetting.downSamplingMode, appState.streamerSetting.setSetting])
const workletSettingRow = useMemo(() => { const workletSettingRow = useMemo(() => {
@ -268,15 +248,13 @@ export const useAdvancedSetting = (): AdvancedSettingState => {
{crossFadeOffsetRateRow} {crossFadeOffsetRateRow}
{crossFadeEndRateRow} {crossFadeEndRateRow}
<div className="body-row divider"></div> <div className="body-row divider"></div>
{voiceChangeModeRow}
<div className="body-row divider"></div>
{workletSettingRow} {workletSettingRow}
<div className="body-row divider"></div> <div className="body-row divider"></div>
{downSamplingModeRow} {downSamplingModeRow}
</> </>
) )
}, [mmvcServerUrlRow, protocolRow, sampleRateRow, sendingSampleRateRow, bufferSizeRow, crossFadeOverlapSizeRow, crossFadeOffsetRateRow, crossFadeEndRateRow, voiceChangeModeRow, workletSettingRow, downSamplingModeRow]) }, [mmvcServerUrlRow, protocolRow, sampleRateRow, sendingSampleRateRow, bufferSizeRow, crossFadeOverlapSizeRow, crossFadeOffsetRateRow, crossFadeEndRateRow, workletSettingRow, downSamplingModeRow])
const advancedSetting = useMemo(() => { const advancedSetting = useMemo(() => {

View File

@ -1,11 +1,11 @@
import { io, Socket } from "socket.io-client"; import { io, Socket } from "socket.io-client";
import { DefaultEventsMap } from "@socket.io/component-emitter"; import { DefaultEventsMap } from "@socket.io/component-emitter";
import { Duplex, DuplexOptions } from "readable-stream"; import { Duplex, DuplexOptions } from "readable-stream";
import { DefaultVoiceChangerClientSetting, DownSamplingMode, Protocol, SendingSampleRate, VoiceChangerMode, VOICE_CHANGER_CLIENT_EXCEPTION } from "./const"; import { AudioStreamerSetting, DefaultAudioStreamerSetting, DownSamplingMode, VOICE_CHANGER_CLIENT_EXCEPTION } from "./const";
export type Callbacks = { export type Callbacks = {
onVoiceReceived: (voiceChangerMode: VoiceChangerMode, data: ArrayBuffer) => void onVoiceReceived: (data: ArrayBuffer) => void
} }
export type AudioStreamerListeners = { export type AudioStreamerListeners = {
notifySendBufferingTime: (time: number) => void notifySendBufferingTime: (time: number) => void
@ -13,34 +13,17 @@ export type AudioStreamerListeners = {
notifyException: (code: VOICE_CHANGER_CLIENT_EXCEPTION, message: string) => void notifyException: (code: VOICE_CHANGER_CLIENT_EXCEPTION, message: string) => void
} }
export type AudioStreamerSettings = {
serverUrl: string;
protocol: Protocol;
inputChunkNum: number;
voiceChangerMode: VoiceChangerMode;
}
export class AudioStreamer extends Duplex { export class AudioStreamer extends Duplex {
private setting: AudioStreamerSetting = DefaultAudioStreamerSetting
private callbacks: Callbacks private callbacks: Callbacks
private audioStreamerListeners: AudioStreamerListeners private audioStreamerListeners: AudioStreamerListeners
private protocol: Protocol = "sio"
private serverUrl = ""
private socket: Socket<DefaultEventsMap, DefaultEventsMap> | null = null private socket: Socket<DefaultEventsMap, DefaultEventsMap> | null = null
private voiceChangerMode: VoiceChangerMode = "realtime"
private inputChunkNum = 128
private requestChunks: ArrayBuffer[] = [] private requestChunks: ArrayBuffer[] = []
private recordChunks: ArrayBuffer[] = []
private isRecording = false
// performance monitor // performance monitor
private bufferStart = 0; private bufferStart = 0;
// Flags
// private downSamplingMode: DownSamplingMode = DownSamplingMode.decimate
private downSamplingMode: DownSamplingMode = DownSamplingMode.average
private sendingSampleRate: number = DefaultVoiceChangerClientSetting.sendingSampleRate
constructor(callbacks: Callbacks, audioStreamerListeners: AudioStreamerListeners, options?: DuplexOptions) { constructor(callbacks: Callbacks, audioStreamerListeners: AudioStreamerListeners, options?: DuplexOptions) {
super(options); super(options);
this.callbacks = callbacks this.callbacks = callbacks
@ -51,13 +34,13 @@ export class AudioStreamer extends Duplex {
if (this.socket) { if (this.socket) {
this.socket.close() this.socket.close()
} }
if (this.protocol === "sio") { if (this.setting.protocol === "sio") {
this.socket = io(this.serverUrl + "/test"); this.socket = io(this.setting.serverUrl + "/test");
this.socket.on('connect_error', (err) => { this.socket.on('connect_error', (err) => {
this.audioStreamerListeners.notifyException(VOICE_CHANGER_CLIENT_EXCEPTION.ERR_SIO_CONNECT_FAILED, `[SIO] rconnection failed ${err}`) this.audioStreamerListeners.notifyException(VOICE_CHANGER_CLIENT_EXCEPTION.ERR_SIO_CONNECT_FAILED, `[SIO] rconnection failed ${err}`)
}) })
this.socket.on('connect', () => { this.socket.on('connect', () => {
console.log(`[SIO] sonnect to ${this.serverUrl}`) console.log(`[SIO] sonnect to ${this.setting.serverUrl}`)
console.log(`[SIO] ${this.socket?.id}`) console.log(`[SIO] ${this.socket?.id}`)
}); });
this.socket.on('response', (response: any[]) => { this.socket.on('response', (response: any[]) => {
@ -67,66 +50,40 @@ export class AudioStreamer extends Duplex {
if (result.byteLength < 128 * 2) { if (result.byteLength < 128 * 2) {
this.audioStreamerListeners.notifyException(VOICE_CHANGER_CLIENT_EXCEPTION.ERR_SIO_INVALID_RESPONSE, `[SIO] recevied data is too short ${result.byteLength}`) this.audioStreamerListeners.notifyException(VOICE_CHANGER_CLIENT_EXCEPTION.ERR_SIO_INVALID_RESPONSE, `[SIO] recevied data is too short ${result.byteLength}`)
} else { } else {
this.callbacks.onVoiceReceived(this.voiceChangerMode, response[1]) this.callbacks.onVoiceReceived(response[1])
this.audioStreamerListeners.notifyResponseTime(responseTime) this.audioStreamerListeners.notifyResponseTime(responseTime)
} }
}); });
} }
} }
// Option Change // Option Change
setServerUrl = (serverUrl: string) => { updateSetting = (setting: AudioStreamerSetting) => {
this.serverUrl = serverUrl console.log(`[AudioStreamer] Updating AudioStreamer Setting,`, this.setting, setting)
console.log(`[AudioStreamer] Server Setting:${this.serverUrl} ${this.protocol}`) let recreateSocketIoRequired = false
this.createSocketIO()// mode check is done in the method. if (this.setting.serverUrl != setting.serverUrl || this.setting.protocol != setting.protocol) {
} recreateSocketIoRequired = true
setProtocol = (mode: Protocol) => {
this.protocol = mode
console.log(`[AudioStreamer] Server Setting:${this.serverUrl} ${this.protocol}`)
this.createSocketIO()// mode check is done in the method.
}
setInputChunkNum = (num: number) => {
this.inputChunkNum = num
}
setVoiceChangerMode = (val: VoiceChangerMode) => {
this.voiceChangerMode = val
}
// set Flags
setDownSamplingMode = (val: DownSamplingMode) => {
this.downSamplingMode = val
}
setSendingSampleRate = (val: SendingSampleRate) => {
this.sendingSampleRate = val
}
getSettings = (): AudioStreamerSettings => {
return {
serverUrl: this.serverUrl,
protocol: this.protocol,
inputChunkNum: this.inputChunkNum,
voiceChangerMode: this.voiceChangerMode
} }
this.setting = setting
if (recreateSocketIoRequired) {
this.createSocketIO()
}
}
getSettings = (): AudioStreamerSetting => {
return this.setting
} }
getSocketId = () => { getSocketId = () => {
return this.socket?.id return this.socket?.id
} }
// Main Process // Main Process
//// Pipe from mic stream //// Pipe from mic stream
_write = (chunk: AudioBuffer, _encoding: any, callback: any) => { _write = (chunk: AudioBuffer, _encoding: any, callback: any) => {
const buffer = chunk.getChannelData(0); const buffer = chunk.getChannelData(0);
// console.log("SAMPLERATE:", chunk.sampleRate, chunk.numberOfChannels, chunk.length, buffer) this._write_realtime(buffer)
if (this.voiceChangerMode === "realtime") {
this._write_realtime(buffer)
} else {
this._write_record(buffer)
}
callback(); callback();
} }
@ -163,9 +120,9 @@ export class AudioStreamer extends Duplex {
private _write_realtime = async (buffer: Float32Array) => { private _write_realtime = async (buffer: Float32Array) => {
let downsampledBuffer: Float32Array | null = null let downsampledBuffer: Float32Array | null = null
if (this.sendingSampleRate == 48000) { if (this.setting.sendingSampleRate == 48000) {
downsampledBuffer = buffer downsampledBuffer = buffer
} else if (this.downSamplingMode == DownSamplingMode.decimate) { } else if (this.setting.downSamplingMode == DownSamplingMode.decimate) {
//////// (Kind 1) 間引き ////////// //////// (Kind 1) 間引き //////////
// bufferSize個のデータ48Khzが入ってくる。 // bufferSize個のデータ48Khzが入ってくる。
//// 48000Hz で入ってくるので間引いて24000Hzに変換する。 //// 48000Hz で入ってくるので間引いて24000Hzに変換する。
@ -178,7 +135,7 @@ export class AudioStreamer extends Duplex {
} else { } else {
//////// (Kind 2) 平均 ////////// //////// (Kind 2) 平均 //////////
// downsampledBuffer = this._averageDownsampleBuffer(buffer, 48000, 24000) // downsampledBuffer = this._averageDownsampleBuffer(buffer, 48000, 24000)
downsampledBuffer = this._averageDownsampleBuffer(buffer, 48000, this.sendingSampleRate) downsampledBuffer = this._averageDownsampleBuffer(buffer, 48000, this.setting.sendingSampleRate)
} }
// Float to signed16 // Float to signed16
@ -194,7 +151,7 @@ export class AudioStreamer extends Duplex {
// 256byte(最低バッファサイズ256から間引いた個数x2byte)をchunkとして管理 // 256byte(最低バッファサイズ256から間引いた個数x2byte)をchunkとして管理
// const chunkByteSize = 256 // (const.ts ★1) // const chunkByteSize = 256 // (const.ts ★1)
// const chunkByteSize = 256 * 2 // (const.ts ★1) // const chunkByteSize = 256 * 2 // (const.ts ★1)
const chunkByteSize = (256 * 2) * (this.sendingSampleRate / 48000) // (const.ts ★1) const chunkByteSize = (256 * 2) * (this.setting.sendingSampleRate / 48000) // (const.ts ★1)
for (let i = 0; i < arrayBuffer.byteLength / chunkByteSize; i++) { for (let i = 0; i < arrayBuffer.byteLength / chunkByteSize; i++) {
const ab = arrayBuffer.slice(i * chunkByteSize, (i + 1) * chunkByteSize) const ab = arrayBuffer.slice(i * chunkByteSize, (i + 1) * chunkByteSize)
this.requestChunks.push(ab) this.requestChunks.push(ab)
@ -202,7 +159,7 @@ export class AudioStreamer extends Duplex {
//// リクエストバッファの中身が、リクエスト送信数と違う場合は処理終了。 //// リクエストバッファの中身が、リクエスト送信数と違う場合は処理終了。
if (this.requestChunks.length < this.inputChunkNum) { if (this.requestChunks.length < this.setting.inputChunkNum) {
return return
} }
@ -227,51 +184,9 @@ export class AudioStreamer extends Duplex {
this.bufferStart = Date.now() this.bufferStart = Date.now()
} }
private _write_record = (buffer: Float32Array) => {
if (!this.isRecording) { return }
// buffer(for48Khz)x16bit * chunksize / 2(for24Khz)
const sendBuffer = new ArrayBuffer(buffer.length * 2 / 2);
const sendDataView = new DataView(sendBuffer);
for (var i = 0; i < buffer.length; i++) {
if (i % 2 == 0) {
let s = Math.max(-1, Math.min(1, buffer[i]));
s = s < 0 ? s * 0x8000 : s * 0x7FFF
sendDataView.setInt16(i, s, true);
// if (i % 3000 === 0) {
// console.log("buffer_converting", s, buffer[i])
// }
}
}
this.recordChunks.push(sendBuffer)
}
// Near Realtime用のトリガ
sendRecordedData = () => {
const length = this.recordChunks.reduce((prev, cur) => {
return prev + cur.byteLength
}, 0)
const newBuffer = new Uint8Array(length);
this.recordChunks.reduce((prev, cur) => {
newBuffer.set(new Uint8Array(cur), prev)
return prev + cur.byteLength
}, 0)
this.sendBuffer(newBuffer)
}
startRecord = () => {
this.recordChunks = []
this.isRecording = true
}
stopRecord = () => {
this.isRecording = false
}
private sendBuffer = async (newBuffer: Uint8Array) => { private sendBuffer = async (newBuffer: Uint8Array) => {
const timestamp = Date.now() const timestamp = Date.now()
if (this.protocol === "sio") { if (this.setting.protocol === "sio") {
if (!this.socket) { if (!this.socket) {
console.warn(`sio is not initialized`) console.warn(`sio is not initialized`)
return return
@ -282,14 +197,14 @@ export class AudioStreamer extends Duplex {
newBuffer.buffer]); newBuffer.buffer]);
} else { } else {
const res = await postVoice( const res = await postVoice(
this.serverUrl + "/test", this.setting.serverUrl + "/test",
timestamp, timestamp,
newBuffer.buffer) newBuffer.buffer)
if (res.byteLength < 128 * 2) { if (res.byteLength < 128 * 2) {
this.audioStreamerListeners.notifyException(VOICE_CHANGER_CLIENT_EXCEPTION.ERR_REST_INVALID_RESPONSE, `[REST] recevied data is too short ${res.byteLength}`) this.audioStreamerListeners.notifyException(VOICE_CHANGER_CLIENT_EXCEPTION.ERR_REST_INVALID_RESPONSE, `[REST] recevied data is too short ${res.byteLength}`)
} else { } else {
this.callbacks.onVoiceReceived(this.voiceChangerMode, res) this.callbacks.onVoiceReceived(res)
this.audioStreamerListeners.notifyResponseTime(Date.now() - timestamp) this.audioStreamerListeners.notifyResponseTime(Date.now() - timestamp)
} }
} }

View File

@ -3,7 +3,7 @@ import { VoiceChangerWorkletNode, VoiceChangerWorkletListener } from "./VoiceCha
import workerjs from "raw-loader!../worklet/dist/index.js"; import workerjs from "raw-loader!../worklet/dist/index.js";
import { VoiceFocusDeviceTransformer, VoiceFocusTransformDevice } from "amazon-chime-sdk-js"; import { VoiceFocusDeviceTransformer, VoiceFocusTransformDevice } from "amazon-chime-sdk-js";
import { createDummyMediaStream, validateUrl } from "./util"; import { createDummyMediaStream, validateUrl } from "./util";
import { BufferSize, DefaultVoiceChangerClientSetting, DownSamplingMode, Protocol, SendingSampleRate, ServerSettingKey, VoiceChangerMode, VOICE_CHANGER_CLIENT_EXCEPTION, WorkletSetting } from "./const"; import { AudioStreamerSetting, DefaultVoiceChangerClientSetting, ServerSettingKey, VoiceChangerClientSetting, VOICE_CHANGER_CLIENT_EXCEPTION, WorkletSetting } from "./const";
import MicrophoneStream from "microphone-stream"; import MicrophoneStream from "microphone-stream";
import { AudioStreamer, Callbacks, AudioStreamerListeners } from "./AudioStreamer"; import { AudioStreamer, Callbacks, AudioStreamerListeners } from "./AudioStreamer";
import { ServerConfigurator } from "./ServerConfigurator"; import { ServerConfigurator } from "./ServerConfigurator";
@ -31,41 +31,19 @@ export class VoiceChangerClient {
private vcNode!: VoiceChangerWorkletNode private vcNode!: VoiceChangerWorkletNode
private currentMediaStreamAudioDestinationNode!: MediaStreamAudioDestinationNode private currentMediaStreamAudioDestinationNode!: MediaStreamAudioDestinationNode
private inputGain = 1.0
private promiseForInitialize: Promise<void> private promiseForInitialize: Promise<void>
private _isVoiceChanging = false private _isVoiceChanging = false
private setting: VoiceChangerClientSetting = DefaultVoiceChangerClientSetting
private sslCertified: string[] = [] private sslCertified: string[] = []
private sem = new BlockingQueue<number>(); private sem = new BlockingQueue<number>();
private callbacks: Callbacks = { private callbacks: Callbacks = {
onVoiceReceived: (voiceChangerMode: VoiceChangerMode, data: ArrayBuffer): void => { onVoiceReceived: (data: ArrayBuffer): void => {
// console.log(voiceChangerMode, data) this.vcNode.postReceivedVoice(data)
if (voiceChangerMode === "realtime") {
this.vcNode.postReceivedVoice(data)
return
}
// For Near Realtime Mode
console.log("near realtime mode")
const i16Data = new Int16Array(data)
const f32Data = new Float32Array(i16Data.length)
// https://stackoverflow.com/questions/35234551/javascript-converting-from-int16-to-float32
i16Data.forEach((x, i) => {
const float = (x >= 0x8000) ? -(0x10000 - x) / 0x8000 : x / 0x7FFF;
f32Data[i] = float
})
const source = this.ctx.createBufferSource();
const buffer = this.ctx.createBuffer(1, f32Data.length, 24000);
buffer.getChannelData(0).set(f32Data);
source.buffer = buffer;
source.start();
source.connect(this.currentMediaStreamAudioDestinationNode)
} }
} }
@ -81,12 +59,11 @@ export class VoiceChangerClient {
this.vcNode = new VoiceChangerWorkletNode(this.ctx, voiceChangerWorkletListener); // vc node this.vcNode = new VoiceChangerWorkletNode(this.ctx, voiceChangerWorkletListener); // vc node
this.currentMediaStreamAudioDestinationNode = this.ctx.createMediaStreamDestination() // output node this.currentMediaStreamAudioDestinationNode = this.ctx.createMediaStreamDestination() // output node
this.outputGainNode = this.ctx.createGain() this.outputGainNode = this.ctx.createGain()
this.outputGainNode.gain.value = this.setting.outputGain
this.vcNode.connect(this.outputGainNode) // vc node -> output node this.vcNode.connect(this.outputGainNode) // vc node -> output node
this.outputGainNode.connect(this.currentMediaStreamAudioDestinationNode) this.outputGainNode.connect(this.currentMediaStreamAudioDestinationNode)
// (vc nodeにはaudio streamerのcallbackでデータが投げ込まれる) // (vc nodeにはaudio streamerのcallbackでデータが投げ込まれる)
this.audioStreamer = new AudioStreamer(this.callbacks, audioStreamerListeners, { objectMode: true, }) this.audioStreamer = new AudioStreamer(this.callbacks, audioStreamerListeners, { objectMode: true, })
this.audioStreamer.setInputChunkNum(DefaultVoiceChangerClientSetting.inputChunkNum)
this.audioStreamer.setVoiceChangerMode(DefaultVoiceChangerClientSetting.voiceChangerMode)
if (this.vfEnable) { if (this.vfEnable) {
this.vf = await VoiceFocusDeviceTransformer.create({ variant: 'c20' }) this.vf = await VoiceFocusDeviceTransformer.create({ variant: 'c20' })
@ -106,7 +83,6 @@ export class VoiceChangerClient {
this.sem.enqueue(num + 1); this.sem.enqueue(num + 1);
}; };
isInitialized = async () => { isInitialized = async () => {
if (this.promiseForInitialize) { if (this.promiseForInitialize) {
await this.promiseForInitialize await this.promiseForInitialize
@ -114,15 +90,15 @@ export class VoiceChangerClient {
return true return true
} }
///////////////////////////////////////////////////// /////////////////////////////////////////////////////
// オペレーション // オペレーション
///////////////////////////////////////////////////// /////////////////////////////////////////////////////
/// Operations /// /// Operations ///
setup = async (input: string | MediaStream | null, bufferSize: BufferSize, echoCancel: boolean = true, noiseSuppression: boolean = true, noiseSuppression2: boolean = false) => { // setup = async (input: string | MediaStream | null, bufferSize: BufferSize, echoCancel: boolean = true, noiseSuppression: boolean = true, noiseSuppression2: boolean = false) => {
setup = async () => {
const lockNum = await this.lock() const lockNum = await this.lock()
console.log(`Input Setup=> echo: ${echoCancel}, noise1: ${noiseSuppression}, noise2: ${noiseSuppression2}`) console.log(`Input Setup=> echo: ${this.setting.echoCancel}, noise1: ${this.setting.noiseSuppression}, noise2: ${this.setting.noiseSuppression2}`)
// condition check // condition check
if (!this.vcNode) { if (!this.vcNode) {
console.warn("vc node is not initialized.") console.warn("vc node is not initialized.")
@ -137,7 +113,7 @@ export class VoiceChangerClient {
} }
//// Input デバイスがnullの時はmicStreamを止めてリターン //// Input デバイスがnullの時はmicStreamを止めてリターン
if (!input) { if (!this.setting.audioInput) {
console.log(`Input Setup=> client mic is disabled.`) console.log(`Input Setup=> client mic is disabled.`)
if (this.micStream) { if (this.micStream) {
this.micStream.pauseRecording() this.micStream.pauseRecording()
@ -146,16 +122,16 @@ export class VoiceChangerClient {
return return
} }
if (typeof input == "string") { if (typeof this.setting.audioInput == "string") {
this.currentMediaStream = await navigator.mediaDevices.getUserMedia({ this.currentMediaStream = await navigator.mediaDevices.getUserMedia({
audio: { audio: {
deviceId: input, deviceId: this.setting.audioInput,
channelCount: 1, channelCount: 1,
sampleRate: 48000, sampleRate: this.setting.sampleRate,
sampleSize: 16, sampleSize: 16,
autoGainControl: false, autoGainControl: false,
echoCancellation: echoCancel, echoCancellation: this.setting.echoCancel,
noiseSuppression: noiseSuppression noiseSuppression: this.setting.noiseSuppression
} }
}) })
// this.currentMediaStream.getAudioTracks().forEach((x) => { // this.currentMediaStream.getAudioTracks().forEach((x) => {
@ -164,7 +140,7 @@ export class VoiceChangerClient {
// console.log("MIC Setting(setting)", x.getSettings()) // console.log("MIC Setting(setting)", x.getSettings())
// }) // })
} else { } else {
this.currentMediaStream = input this.currentMediaStream = this.setting.audioInput
} }
// create mic stream // create mic stream
@ -175,15 +151,15 @@ export class VoiceChangerClient {
} }
this.micStream = new MicrophoneStream({ this.micStream = new MicrophoneStream({
objectMode: true, objectMode: true,
bufferSize: bufferSize, bufferSize: this.setting.bufferSize,
context: this.ctx context: this.ctx
}) })
// connect nodes. // connect nodes.
this.currentMediaStreamAudioSourceNode = this.ctx.createMediaStreamSource(this.currentMediaStream) this.currentMediaStreamAudioSourceNode = this.ctx.createMediaStreamSource(this.currentMediaStream)
this.inputGainNode = this.ctx.createGain() this.inputGainNode = this.ctx.createGain()
this.inputGainNode.gain.value = this.inputGain this.inputGainNode.gain.value = this.setting.inputGain
this.currentMediaStreamAudioSourceNode.connect(this.inputGainNode) this.currentMediaStreamAudioSourceNode.connect(this.inputGainNode)
if (this.currentDevice && noiseSuppression2) { if (this.currentDevice && this.setting.noiseSuppression2) {
this.currentDevice.chooseNewInnerDevice(this.currentMediaStream) this.currentDevice.chooseNewInnerDevice(this.currentMediaStream)
const voiceFocusNode = await this.currentDevice.createAudioNode(this.ctx); // vf node const voiceFocusNode = await this.currentDevice.createAudioNode(this.ctx); // vf node
this.inputGainNode.connect(voiceFocusNode.start) // input node -> vf node this.inputGainNode.connect(voiceFocusNode.start) // input node -> vf node
@ -242,12 +218,32 @@ export class VoiceChangerClient {
} }
} }
} }
this.audioStreamer.setServerUrl(url) this.audioStreamer.updateSetting({ ...this.audioStreamer.getSettings(), serverUrl: url })
this.configurator.setServerUrl(url) this.configurator.setServerUrl(url)
} }
updateClientSetting = (setting: VoiceChangerClientSetting) => {
console.log(`[VoiceChangerClient] Updating Client Setting,`, this.setting, setting)
let reconstructInputRequired = false
if (
this.setting.audioInput != setting.audioInput ||
this.setting.bufferSize != setting.bufferSize ||
this.setting.echoCancel != setting.echoCancel ||
this.setting.noiseSuppression != setting.noiseSuppression ||
this.setting.noiseSuppression2 != setting.noiseSuppression2 ||
this.setting.sampleRate != setting.sampleRate
) {
reconstructInputRequired = true
}
this.setting = setting
if (reconstructInputRequired) {
this.setup()
}
}
setInputGain = (val: number) => { setInputGain = (val: number) => {
this.inputGain = val this.setting.inputGain = val
if (!this.inputGainNode) { if (!this.inputGainNode) {
return return
} }
@ -291,23 +287,8 @@ export class VoiceChangerClient {
//## Audio Streamer ##// //## Audio Streamer ##//
setProtocol = (mode: Protocol) => { updateAudioStreamerSetting = (setting: AudioStreamerSetting) => {
this.audioStreamer.setProtocol(mode) this.audioStreamer.updateSetting(setting)
}
setInputChunkNum = (num: number) => {
this.audioStreamer.setInputChunkNum(num)
}
setVoiceChangerMode = (val: VoiceChangerMode) => {
this.audioStreamer.setVoiceChangerMode(val)
}
//// Audio Streamer Flag
setDownSamplingMode = (val: DownSamplingMode) => {
this.audioStreamer.setDownSamplingMode(val)
}
setSendingSampleRate = (val: SendingSampleRate) => {
this.audioStreamer.setSendingSampleRate(val)
} }

View File

@ -41,8 +41,6 @@ export const F0Detector = {
} as const } as const
export type F0Detector = typeof F0Detector[keyof typeof F0Detector] export type F0Detector = typeof F0Detector[keyof typeof F0Detector]
export const ServerSettingKey = { export const ServerSettingKey = {
"srcId": "srcId", "srcId": "srcId",
"dstId": "dstId", "dstId": "dstId",
@ -64,6 +62,16 @@ export const ServerSettingKey = {
export type ServerSettingKey = typeof ServerSettingKey[keyof typeof ServerSettingKey] export type ServerSettingKey = typeof ServerSettingKey[keyof typeof ServerSettingKey]
export type Speaker = {
"id": number,
"name": string,
}
export type Correspondence = {
"sid": number,
"correspondence": number,
"dirname": string
}
export type VoiceChangerServerSetting = { export type VoiceChangerServerSetting = {
srcId: number, srcId: number,
dstId: number, dstId: number,
@ -81,6 +89,7 @@ export type VoiceChangerServerSetting = {
recordIO: number // 0:off, 1:on recordIO: number // 0:off, 1:on
inputSampleRate: InputSampleRate inputSampleRate: InputSampleRate
} }
export type ServerInfo = VoiceChangerServerSetting & { export type ServerInfo = VoiceChangerServerSetting & {
@ -89,6 +98,9 @@ export type ServerInfo = VoiceChangerServerSetting & {
pyTorchModelFile: string, pyTorchModelFile: string,
onnxModelFile: string, onnxModelFile: string,
onnxExecutionProviders: OnnxExecutionProvider[] onnxExecutionProviders: OnnxExecutionProvider[]
speakers: Speaker[],
correspondences: Correspondence[],
} }
export const DefaultServerSetting: ServerInfo = { export const DefaultServerSetting: ServerInfo = {
@ -113,122 +125,9 @@ export const DefaultServerSetting: ServerInfo = {
configFile: "", configFile: "",
pyTorchModelFile: "", pyTorchModelFile: "",
onnxModelFile: "", onnxModelFile: "",
onnxExecutionProviders: [] onnxExecutionProviders: [],
}
//
///////////////////////
// Workletセッティング
///////////////////////
///////////////////////
// Clientセッティング
///////////////////////
export type VoiceChangerClientSetting = {
audioInput: string | MediaStream | null,
mmvcServerUrl: string,
protocol: Protocol,
sampleRate: SampleRate, // 48000Hz
sendingSampleRate: SendingSampleRate,
bufferSize: BufferSize, // 256, 512, 1024, 2048, 4096, 8192, 16384 (for mic stream)
inputChunkNum: number, // n of (256 x n) for send buffer
speakers: Speaker[],
correspondences: Correspondence[],
echoCancel: boolean,
noiseSuppression: boolean,
noiseSuppression2: boolean,
voiceChangerMode: VoiceChangerMode,
downSamplingMode: DownSamplingMode,
inputGain: number
outputGain: number
}
export type WorkletSetting = {
numTrancateTreshold: number,
volTrancateThreshold: number,
volTrancateLength: number
}
export type Speaker = {
"id": number,
"name": string,
}
export type Correspondence = {
"sid": number,
"correspondence": number,
"dirname": string
}
export type ServerAudioDevice = {
kind: string,
index: number,
name: string,
hostAPI: string
}
export type ServerAudioDevices = {
audio_input_devices: ServerAudioDevice[]
audio_output_devices: ServerAudioDevice[]
}
// Consts
export const Protocol = {
"sio": "sio",
"rest": "rest",
} as const
export type Protocol = typeof Protocol[keyof typeof Protocol]
export const VoiceChangerMode = {
"realtime": "realtime",
"near-realtime": "near-realtime",
} as const
export type VoiceChangerMode = typeof VoiceChangerMode[keyof typeof VoiceChangerMode]
export const DownSamplingMode = {
"decimate": "decimate",
"average": "average"
} as const
export type DownSamplingMode = typeof DownSamplingMode[keyof typeof DownSamplingMode]
export const SampleRate = {
"48000": 48000,
} as const
export type SampleRate = typeof SampleRate[keyof typeof SampleRate]
export const SendingSampleRate = {
"48000": 48000,
"24000": 24000
} as const
export type SendingSampleRate = typeof SendingSampleRate[keyof typeof SendingSampleRate]
export const BufferSize = {
"256": 256,
"512": 512,
"1024": 1024,
"2048": 2048,
"4096": 4096,
"8192": 8192,
"16384": 16384
} as const
export type BufferSize = typeof BufferSize[keyof typeof BufferSize]
// Defaults
export const DefaultVoiceChangerClientSetting: VoiceChangerClientSetting = {
audioInput: null,
mmvcServerUrl: "",
protocol: "sio",
sampleRate: 48000,
sendingSampleRate: 48000,
bufferSize: 1024,
inputChunkNum: 48,
speakers: [ speakers: [
{ {
"id": 0, "id": 0,
@ -252,21 +151,108 @@ export const DefaultVoiceChangerClientSetting: VoiceChangerClientSetting = {
} }
], ],
correspondences: [], correspondences: [],
echoCancel: true,
noiseSuppression: true,
noiseSuppression2: false,
voiceChangerMode: "realtime",
downSamplingMode: "average",
inputGain: 1.0,
outputGain: 1.0
} }
///////////////////////
// Workletセッティング
///////////////////////
export type WorkletSetting = {
numTrancateTreshold: number,
volTrancateThreshold: number,
volTrancateLength: number
}
export const DefaultWorkletSetting: WorkletSetting = { export const DefaultWorkletSetting: WorkletSetting = {
numTrancateTreshold: 188, numTrancateTreshold: 188,
volTrancateThreshold: 0.0005, volTrancateThreshold: 0.0005,
volTrancateLength: 32 volTrancateLength: 32
} }
///////////////////////
// Audio Streamerセッティング
///////////////////////
export const Protocol = {
"sio": "sio",
"rest": "rest",
} as const
export type Protocol = typeof Protocol[keyof typeof Protocol]
export const SendingSampleRate = {
"48000": 48000,
"24000": 24000
} as const
export type SendingSampleRate = typeof SendingSampleRate[keyof typeof SendingSampleRate]
export const DownSamplingMode = {
"decimate": "decimate",
"average": "average"
} as const
export type DownSamplingMode = typeof DownSamplingMode[keyof typeof DownSamplingMode]
export type AudioStreamerSetting = {
serverUrl: string,
protocol: Protocol,
sendingSampleRate: SendingSampleRate,
inputChunkNum: number,
downSamplingMode: DownSamplingMode,
}
export const DefaultAudioStreamerSetting: AudioStreamerSetting = {
serverUrl: "",
protocol: "sio",
sendingSampleRate: 48000,
inputChunkNum: 48,
downSamplingMode: "average"
}
///////////////////////
// クライアントセッティング
///////////////////////
export const SampleRate = {
"48000": 48000,
} as const
export type SampleRate = typeof SampleRate[keyof typeof SampleRate]
export const BufferSize = {
"256": 256,
"512": 512,
"1024": 1024,
"2048": 2048,
"4096": 4096,
"8192": 8192,
"16384": 16384
} as const
export type BufferSize = typeof BufferSize[keyof typeof BufferSize]
export type VoiceChangerClientSetting = {
audioInput: string | MediaStream | null,
sampleRate: SampleRate, // 48000Hz
bufferSize: BufferSize, // 256, 512, 1024, 2048, 4096, 8192, 16384 (for mic stream)
echoCancel: boolean,
noiseSuppression: boolean,
noiseSuppression2: boolean
inputGain: number
outputGain: number
}
export const DefaultVoiceChangerClientSetting: VoiceChangerClientSetting = {
audioInput: null,
sampleRate: 48000,
bufferSize: 1024,
echoCancel: true,
noiseSuppression: true,
noiseSuppression2: false,
inputGain: 1.0,
outputGain: 1.0
}
////////////////////////////////////
// Exceptions
////////////////////////////////////
export const VOICE_CHANGER_CLIENT_EXCEPTION = { export const VOICE_CHANGER_CLIENT_EXCEPTION = {
ERR_SIO_CONNECT_FAILED: "ERR_SIO_CONNECT_FAILED", ERR_SIO_CONNECT_FAILED: "ERR_SIO_CONNECT_FAILED",
ERR_SIO_INVALID_RESPONSE: "ERR_SIO_INVALID_RESPONSE", ERR_SIO_INVALID_RESPONSE: "ERR_SIO_INVALID_RESPONSE",
@ -284,6 +270,7 @@ export const INDEXEDDB_DB_APP_NAME = "INDEXEDDB_KEY_VOICE_CHANGER"
export const INDEXEDDB_DB_NAME = "INDEXEDDB_KEY_VOICE_CHANGER_DB" export const INDEXEDDB_DB_NAME = "INDEXEDDB_KEY_VOICE_CHANGER_DB"
export const INDEXEDDB_KEY_CLIENT = "INDEXEDDB_KEY_VOICE_CHANGER_LIB_CLIENT" export const INDEXEDDB_KEY_CLIENT = "INDEXEDDB_KEY_VOICE_CHANGER_LIB_CLIENT"
export const INDEXEDDB_KEY_SERVER = "INDEXEDDB_KEY_VOICE_CHANGER_LIB_SERVER" export const INDEXEDDB_KEY_SERVER = "INDEXEDDB_KEY_VOICE_CHANGER_LIB_SERVER"
export const INDEXEDDB_KEY_STREAMER = "INDEXEDDB_KEY_VOICE_CHANGER_LIB_STREAMER"
export const INDEXEDDB_KEY_MODEL_DATA = "INDEXEDDB_KEY_VOICE_CHANGER_LIB_MODEL_DATA" export const INDEXEDDB_KEY_MODEL_DATA = "INDEXEDDB_KEY_VOICE_CHANGER_LIB_MODEL_DATA"
export const INDEXEDDB_KEY_WORKLET = "INDEXEDDB_KEY_VOICE_CHANGER_LIB_WORKLET" export const INDEXEDDB_KEY_WORKLET = "INDEXEDDB_KEY_VOICE_CHANGER_LIB_WORKLET"

View File

@ -0,0 +1,77 @@
import { useState, useMemo, useEffect } from "react"
import { INDEXEDDB_KEY_CLIENT, INDEXEDDB_KEY_STREAMER, AudioStreamerSetting, DefaultAudioStreamerSetting } from "../const"
import { VoiceChangerClient } from "../VoiceChangerClient"
import { useIndexedDB } from "./useIndexedDB"
export type UseAudioStreamerSettingProps = {
voiceChangerClient: VoiceChangerClient | null
}
export type AudioStreamerSettingState = {
audioStreamerSetting: AudioStreamerSetting;
clearSetting: () => Promise<void>
setSetting: (setting: AudioStreamerSetting) => void
}
export const useAudioStreamerSetting = (props: UseAudioStreamerSettingProps): AudioStreamerSettingState => {
const [audioStreamerSetting, _setAudioStreamerSetting] = useState<AudioStreamerSetting>(DefaultAudioStreamerSetting)
const { setItem, getItem, removeItem } = useIndexedDB()
// 初期化 その1 DBから取得
useEffect(() => {
const loadCache = async () => {
const setting = await getItem(INDEXEDDB_KEY_STREAMER) as AudioStreamerSetting
if (setting) {
_setAudioStreamerSetting(setting)
}
}
loadCache()
}, [])
// 初期化 その2 クライアントに設定
useEffect(() => {
if (!props.voiceChangerClient) return
props.voiceChangerClient.setServerUrl(audioStreamerSetting.serverUrl)
props.voiceChangerClient.updateAudioStreamerSetting(audioStreamerSetting)
}, [props.voiceChangerClient])
const clearSetting = async () => {
await removeItem(INDEXEDDB_KEY_STREAMER)
}
//////////////
// 設定
/////////////
// const setServerUrl = useMemo(() => {
// return (url: string) => {
// if (!props.voiceChangerClient) return
// props.voiceChangerClient.setServerUrl(url, true)
// settingRef.current.mmvcServerUrl = url
// setSetting({ ...settingRef.current })
// }
// }, [props.voiceChangerClient])
const setSetting = useMemo(() => {
return (setting: AudioStreamerSetting) => {
if (!props.voiceChangerClient) return
_setAudioStreamerSetting(setting)
setItem(INDEXEDDB_KEY_CLIENT, setting)
props.voiceChangerClient.updateAudioStreamerSetting(setting)
}
}, [props.voiceChangerClient])
console.log("AUDIO STREAMER SETTING", audioStreamerSetting)
return {
audioStreamerSetting,
clearSetting,
setSetting,
}
}

View File

@ -1,5 +1,6 @@
import { useEffect, useMemo, useRef, useState } from "react" import { useEffect, useMemo, useRef, useState } from "react"
import { VoiceChangerClient } from "../VoiceChangerClient" import { VoiceChangerClient } from "../VoiceChangerClient"
import { AudioStreamerSettingState, useAudioStreamerSetting } from "./useAudioStreamerSetting"
import { ClientSettingState, useClientSetting } from "./useClientSetting" import { ClientSettingState, useClientSetting } from "./useClientSetting"
import { ServerSettingState, useServerSetting } from "./useServerSetting" import { ServerSettingState, useServerSetting } from "./useServerSetting"
import { useWorkletSetting, WorkletSettingState } from "./useWorkletSetting" import { useWorkletSetting, WorkletSettingState } from "./useWorkletSetting"
@ -13,6 +14,7 @@ export type ClientState = {
// 各種設定I/Fへの参照 // 各種設定I/Fへの参照
workletSetting: WorkletSettingState workletSetting: WorkletSettingState
clientSetting: ClientSettingState clientSetting: ClientSettingState
streamerSetting: AudioStreamerSettingState
serverSetting: ServerSettingState serverSetting: ServerSettingState
// モニタリングデータ // モニタリングデータ
@ -45,6 +47,7 @@ export const useClient = (props: UseClientProps): ClientState => {
// (1-2) 各種設定I/F // (1-2) 各種設定I/F
const clientSetting = useClientSetting({ voiceChangerClient, audioContext: props.audioContext }) const clientSetting = useClientSetting({ voiceChangerClient, audioContext: props.audioContext })
const streamerSetting = useAudioStreamerSetting({ voiceChangerClient })
const workletSetting = useWorkletSetting({ voiceChangerClient }) const workletSetting = useWorkletSetting({ voiceChangerClient })
const serverSetting = useServerSetting({ voiceChangerClient }) const serverSetting = useServerSetting({ voiceChangerClient })
@ -118,10 +121,12 @@ export const useClient = (props: UseClientProps): ClientState => {
await workletSetting.clearSetting() await workletSetting.clearSetting()
await serverSetting.clearSetting() await serverSetting.clearSetting()
} }
console.log("AUDIO STREAMER SETTING USE CLIENT", clientSetting, streamerSetting)
return { return {
// 各種設定I/Fへの参照 // 各種設定I/Fへの参照
clientSetting, clientSetting,
streamerSetting,
workletSetting, workletSetting,
serverSetting, serverSetting,

View File

@ -1,6 +1,6 @@
import { useState, useMemo, useRef, useEffect } from "react" import { useState, useMemo, useEffect } from "react"
import { VoiceChangerClientSetting, Protocol, BufferSize, VoiceChangerMode, SampleRate, Speaker, DefaultVoiceChangerClientSetting, INDEXEDDB_KEY_CLIENT, Correspondence, DownSamplingMode, SendingSampleRate } from "../const" import { VoiceChangerClientSetting, DefaultVoiceChangerClientSetting, INDEXEDDB_KEY_CLIENT } from "../const"
import { VoiceChangerClient } from "../VoiceChangerClient" import { VoiceChangerClient } from "../VoiceChangerClient"
import { useIndexedDB } from "./useIndexedDB" import { useIndexedDB } from "./useIndexedDB"
@ -10,79 +10,48 @@ export type UseClientSettingProps = {
} }
export type ClientSettingState = { export type ClientSettingState = {
setting: VoiceChangerClientSetting; clientSetting: VoiceChangerClientSetting;
clearSetting: () => Promise<void> clearSetting: () => Promise<void>
setServerUrl: (url: string) => void; setServerUrl: (url: string) => void;
setProtocol: (proto: Protocol) => void; updateClientSetting: (clientSetting: VoiceChangerClientSetting) => void
setAudioInput: (audioInput: string | MediaStream | null) => Promise<void>
setBufferSize: (bufferSize: BufferSize) => Promise<void>
setEchoCancel: (voiceFocus: boolean) => Promise<void>
setNoiseSuppression: (voiceFocus: boolean) => Promise<void>
setNoiseSuppression2: (voiceFocus: boolean) => Promise<void>
setInputChunkNum: (num: number) => void;
setVoiceChangerMode: (mode: VoiceChangerMode) => void
setDownSamplingMode: (mode: DownSamplingMode) => void
setSendingSampleRate: (val: SendingSampleRate) => void
setSampleRate: (num: SampleRate) => void
setSpeakers: (speakers: Speaker[]) => void
setCorrespondences: (file: File | null) => Promise<void>
setInputGain: (val: number) => void
setOutputGain: (val: number) => void
start: () => Promise<void> start: () => Promise<void>
stop: () => Promise<void> stop: () => Promise<void>
reloadClientSetting: () => Promise<void> reloadClientSetting: () => Promise<void>
} }
export const useClientSetting = (props: UseClientSettingProps): ClientSettingState => { export const useClientSetting = (props: UseClientSettingProps): ClientSettingState => {
const settingRef = useRef<VoiceChangerClientSetting>(DefaultVoiceChangerClientSetting) const [clientSetting, setClientSetting] = useState<VoiceChangerClientSetting>(DefaultVoiceChangerClientSetting)
const [setting, _setSetting] = useState<VoiceChangerClientSetting>(settingRef.current)
const { setItem, getItem, removeItem } = useIndexedDB() const { setItem, getItem, removeItem } = useIndexedDB()
// 初期化 その1 DBから取得 // 初期化 その1 DBから取得
useEffect(() => { useEffect(() => {
const loadCache = async () => { const loadCache = async () => {
const setting = await getItem(INDEXEDDB_KEY_CLIENT) const setting = await getItem(INDEXEDDB_KEY_CLIENT) as VoiceChangerClientSetting
if (!setting) { console.log("[ClientSetting] Load Setting from db", setting)
// デフォルト設定 if (setting.audioInput == "null") {
console.log("No Chache",) setting.audioInput = null
const params = new URLSearchParams(location.search); }
const colab = params.get("colab") if (setting) {
if (colab == "true") { setClientSetting({ ...setting })
settingRef.current.protocol = "rest"
settingRef.current.inputChunkNum = 64
} else {
settingRef.current.protocol = "sio"
settingRef.current.inputChunkNum = 32
}
} else {
settingRef.current = setting as VoiceChangerClientSetting
} }
_setSetting({ ...settingRef.current })
} }
loadCache() loadCache()
}, []) }, [])
// 初期化 その2 クライアントに設定 // 初期化 その2 クライアントに設定
useEffect(() => { useEffect(() => {
if (!props.voiceChangerClient) return if (!props.voiceChangerClient) return
props.voiceChangerClient.setServerUrl(settingRef.current.mmvcServerUrl) props.voiceChangerClient.updateClientSetting(clientSetting)
props.voiceChangerClient.setInputChunkNum(settingRef.current.inputChunkNum)
props.voiceChangerClient.setProtocol(settingRef.current.protocol)
props.voiceChangerClient.setVoiceChangerMode(settingRef.current.voiceChangerMode)
props.voiceChangerClient.setInputGain(settingRef.current.inputGain)
// Input, bufferSize, VoiceFocus Disableは_setInputで設定
_setInput()
}, [props.voiceChangerClient]) }, [props.voiceChangerClient])
const setSetting = async (setting: VoiceChangerClientSetting) => { const storeSetting = async (setting: VoiceChangerClientSetting) => {
const storeData = { ...setting } const storeData = { ...setting }
if (typeof storeData.audioInput != "string") { if (typeof storeData.audioInput != "string") {
storeData.audioInput = null storeData.audioInput = null
} }
setItem(INDEXEDDB_KEY_CLIENT, storeData) setItem(INDEXEDDB_KEY_CLIENT, storeData)
_setSetting(setting) setClientSetting(setting)
} }
const clearSetting = async () => { const clearSetting = async () => {
@ -92,177 +61,21 @@ export const useClientSetting = (props: UseClientSettingProps): ClientSettingSta
////////////// //////////////
// 設定 // 設定
///////////// /////////////
const updateClientSetting = useMemo(() => {
return (clientSetting: VoiceChangerClientSetting) => {
if (!props.voiceChangerClient) return
// storeSetting(clientSetting)
// props.voiceChangerClient.updateClientSetting(clientSetting)
}
}, [props.voiceChangerClient, clientSetting])
const setServerUrl = useMemo(() => { const setServerUrl = useMemo(() => {
return (url: string) => { return (url: string) => {
if (!props.voiceChangerClient) return if (!props.voiceChangerClient) return
props.voiceChangerClient.setServerUrl(url, true) props.voiceChangerClient.setServerUrl(url, true)
settingRef.current.mmvcServerUrl = url
setSetting({ ...settingRef.current })
} }
}, [props.voiceChangerClient]) }, [props.voiceChangerClient])
const setProtocol = useMemo(() => {
return (proto: Protocol) => {
if (!props.voiceChangerClient) return
props.voiceChangerClient.setProtocol(proto)
settingRef.current.protocol = proto
setSetting({ ...settingRef.current })
}
}, [props.voiceChangerClient])
const _setInput = async () => {
if (!props.voiceChangerClient) return
if (!settingRef.current.audioInput || settingRef.current.audioInput == "none") {
await props.voiceChangerClient.setup(null, settingRef.current.bufferSize, settingRef.current.echoCancel, settingRef.current.noiseSuppression, settingRef.current.noiseSuppression2)
} else {
// console.log("[useClient] setup!(2)", settingRef.current.audioInput)
await props.voiceChangerClient.setup(settingRef.current.audioInput, settingRef.current.bufferSize, settingRef.current.echoCancel, settingRef.current.noiseSuppression, settingRef.current.noiseSuppression2)
}
}
const setAudioInput = useMemo(() => {
return async (audioInput: string | MediaStream | null) => {
if (!props.voiceChangerClient) return
settingRef.current.audioInput = audioInput
await _setInput()
setSetting({ ...settingRef.current })
}
}, [props.voiceChangerClient])
const setBufferSize = useMemo(() => {
return async (bufferSize: BufferSize) => {
if (!props.voiceChangerClient) return
settingRef.current.bufferSize = bufferSize
await _setInput()
setSetting({ ...settingRef.current })
}
}, [props.voiceChangerClient])
const setEchoCancel = useMemo(() => {
return async (val: boolean) => {
if (!props.voiceChangerClient) return
settingRef.current.echoCancel = val
await _setInput()
setSetting({ ...settingRef.current })
}
}, [props.voiceChangerClient])
const setNoiseSuppression = useMemo(() => {
return async (val: boolean) => {
if (!props.voiceChangerClient) return
settingRef.current.noiseSuppression = val
await _setInput()
setSetting({ ...settingRef.current })
}
}, [props.voiceChangerClient])
const setNoiseSuppression2 = useMemo(() => {
return async (val: boolean) => {
if (!props.voiceChangerClient) return
settingRef.current.noiseSuppression2 = val
await _setInput()
setSetting({ ...settingRef.current })
}
}, [props.voiceChangerClient])
const setInputChunkNum = useMemo(() => {
return (num: number) => {
if (!props.voiceChangerClient) return
props.voiceChangerClient.setInputChunkNum(num)
settingRef.current.inputChunkNum = num
setSetting({ ...settingRef.current })
}
}, [props.voiceChangerClient])
const setVoiceChangerMode = useMemo(() => {
return (mode: VoiceChangerMode) => {
if (!props.voiceChangerClient) return
props.voiceChangerClient.setVoiceChangerMode(mode)
settingRef.current.voiceChangerMode = mode
setSetting({ ...settingRef.current })
}
}, [props.voiceChangerClient])
const setDownSamplingMode = useMemo(() => {
return (mode: DownSamplingMode) => {
if (!props.voiceChangerClient) return
props.voiceChangerClient.setDownSamplingMode(mode)
settingRef.current.downSamplingMode = mode
setSetting({ ...settingRef.current })
}
}, [props.voiceChangerClient])
const setSendingSampleRate = useMemo(() => {
return (val: SendingSampleRate) => {
if (!props.voiceChangerClient) return
props.voiceChangerClient.setSendingSampleRate(val)
settingRef.current.sendingSampleRate = val
setSetting({ ...settingRef.current })
}
}, [props.voiceChangerClient])
const setSampleRate = useMemo(() => {
return (num: SampleRate) => {
if (!props.voiceChangerClient) return
//props.voiceChangerClient.setSampleRate(num) // Not Implemented
settingRef.current.sampleRate = num
setSetting({ ...settingRef.current })
}
}, [props.voiceChangerClient])
const setSpeakers = useMemo(() => {
return (speakers: Speaker[]) => {
if (!props.voiceChangerClient) return
settingRef.current.speakers = speakers
setSetting({ ...settingRef.current })
}
}, [props.voiceChangerClient])
const setCorrespondences = useMemo(() => {
return async (file: File | null) => {
if (!props.voiceChangerClient) return
if (!file) {
settingRef.current.correspondences = []
} else {
const correspondenceText = await file.text()
const cors = correspondenceText.split("\n").map(line => {
const items = line.split("|")
if (items.length != 3) {
console.warn("Invalid Correspondence Line:", line)
return null
} else {
const cor: Correspondence = {
sid: Number(items[0]),
correspondence: Number(items[1]),
dirname: items[2]
}
return cor
}
}).filter(x => { return x != null }) as Correspondence[]
settingRef.current.correspondences = cors
}
setSetting({ ...settingRef.current })
}
}, [props.voiceChangerClient])
const setInputGain = useMemo(() => {
return (val: number) => {
if (!props.voiceChangerClient) return
props.voiceChangerClient.setInputGain(val)
settingRef.current.inputGain = val
setSetting({ ...settingRef.current })
}
}, [props.voiceChangerClient])
const setOutputGain = useMemo(() => {
return (val: number) => {
if (!props.voiceChangerClient) return
props.voiceChangerClient.setOutputGain(val)
settingRef.current.outputGain = val
setSetting({ ...settingRef.current })
}
}, [props.voiceChangerClient])
////////////// //////////////
// 操作 // 操作
@ -271,10 +84,10 @@ export const useClientSetting = (props: UseClientSettingProps): ClientSettingSta
const start = useMemo(() => { const start = useMemo(() => {
return async () => { return async () => {
if (!props.voiceChangerClient) return if (!props.voiceChangerClient) return
props.voiceChangerClient.setServerUrl(setting.mmvcServerUrl, true) // props.voiceChangerClient.setServerUrl(setting.mmvcServerUrl, true)
props.voiceChangerClient.start() props.voiceChangerClient.start()
} }
}, [setting.mmvcServerUrl, props.voiceChangerClient]) }, [props.voiceChangerClient])
// (2) stop // (2) stop
const stop = useMemo(() => { const stop = useMemo(() => {
return async () => { return async () => {
@ -290,24 +103,10 @@ export const useClientSetting = (props: UseClientSettingProps): ClientSettingSta
}, [props.voiceChangerClient]) }, [props.voiceChangerClient])
return { return {
setting, clientSetting,
clearSetting, clearSetting,
setServerUrl, setServerUrl,
setProtocol, updateClientSetting,
setAudioInput,
setBufferSize,
setEchoCancel,
setNoiseSuppression,
setNoiseSuppression2,
setInputChunkNum,
setVoiceChangerMode,
setDownSamplingMode,
setSendingSampleRate,
setSampleRate,
setSpeakers,
setCorrespondences,
setInputGain,
setOutputGain,
start, start,
stop, stop,

View File

@ -4,17 +4,16 @@
"declaration": true, "declaration": true,
"outDir": "./dist", "outDir": "./dist",
/* */ /* */
"forceConsistentCasingInFileNames": true, "forceConsistentCasingInFileNames": true,
/* */ // /* */
"strict": true, // "strict": true,
"noImplicitAny": true, // "noImplicitAny": true,
"strictNullChecks": true, // "strictNullChecks": true,
"noUnusedLocals": true, // "noUnusedLocals": true,
"noUnusedParameters": true, // "noUnusedParameters": true,
"noImplicitReturns": true, // "noImplicitReturns": true,
/* Module */ /* Module */
"moduleResolution": "node", "moduleResolution": "node",