voice-changer/client/lib/src/VoiceChangerClient.ts

322 lines
13 KiB
TypeScript
Raw Normal View History

import { VoiceChangerWorkletNode, VoiceChangerWorkletListener } from "./VoiceChangerWorkletNode";
2023-01-04 20:28:36 +03:00
// @ts-ignore
import workerjs from "raw-loader!../worklet/dist/index.js";
import { VoiceFocusDeviceTransformer, VoiceFocusTransformDevice } from "amazon-chime-sdk-js";
2023-01-08 03:22:22 +03:00
import { createDummyMediaStream, validateUrl } from "./util";
2023-02-19 08:20:37 +03:00
import { AudioStreamerSetting, DefaultVoiceChangerClientSetting, ServerSettingKey, VoiceChangerClientSetting, VOICE_CHANGER_CLIENT_EXCEPTION, WorkletSetting } from "./const";
2023-01-04 20:28:36 +03:00
import MicrophoneStream from "microphone-stream";
import { AudioStreamer, Callbacks, AudioStreamerListeners } from "./AudioStreamer";
2023-01-08 10:18:20 +03:00
import { ServerConfigurator } from "./ServerConfigurator";
2023-01-04 20:28:36 +03:00
// オーディオデータの流れ
// input node(mic or MediaStream) -> [vf node] -> microphne stream -> audio streamer ->
// sio/rest server -> audio streamer-> vc node -> output node
2023-01-29 03:42:45 +03:00
import { BlockingQueue } from "./utils/BlockingQueue";
2023-01-04 20:28:36 +03:00
2023-01-11 22:52:01 +03:00
export class VoiceChangerClient {
2023-01-08 10:18:20 +03:00
private configurator: ServerConfigurator
2023-01-04 20:28:36 +03:00
private ctx: AudioContext
private vfEnable = false
private vf: VoiceFocusDeviceTransformer | null = null
private currentDevice: VoiceFocusTransformDevice | null = null
private currentMediaStream: MediaStream | null = null
private currentMediaStreamAudioSourceNode: MediaStreamAudioSourceNode | null = null
private outputNodeFromVF: MediaStreamAudioDestinationNode | null = null
2023-02-12 12:19:22 +03:00
private inputGainNode: GainNode | null = null
private outputGainNode: GainNode | null = null
2023-01-04 20:28:36 +03:00
private micStream: MicrophoneStream | null = null
private audioStreamer!: AudioStreamer
private vcNode!: VoiceChangerWorkletNode
private currentMediaStreamAudioDestinationNode!: MediaStreamAudioDestinationNode
2023-02-12 12:19:22 +03:00
2023-01-04 20:28:36 +03:00
private promiseForInitialize: Promise<void>
2023-01-05 05:45:42 +03:00
private _isVoiceChanging = false
2023-01-04 20:28:36 +03:00
2023-02-19 08:20:37 +03:00
private setting: VoiceChangerClientSetting = DefaultVoiceChangerClientSetting
2023-01-08 11:58:27 +03:00
private sslCertified: string[] = []
2023-01-29 03:42:45 +03:00
private sem = new BlockingQueue<number>();
2023-01-04 20:28:36 +03:00
private callbacks: Callbacks = {
2023-02-19 08:20:37 +03:00
onVoiceReceived: (data: ArrayBuffer): void => {
this.vcNode.postReceivedVoice(data)
2023-01-04 20:28:36 +03:00
}
}
constructor(ctx: AudioContext, vfEnable: boolean, audioStreamerListeners: AudioStreamerListeners, voiceChangerWorkletListener: VoiceChangerWorkletListener) {
2023-01-29 03:42:45 +03:00
this.sem.enqueue(0);
2023-01-08 10:18:20 +03:00
this.configurator = new ServerConfigurator()
2023-01-04 20:28:36 +03:00
this.ctx = ctx
this.vfEnable = vfEnable
this.promiseForInitialize = new Promise<void>(async (resolve) => {
const scriptUrl = URL.createObjectURL(new Blob([workerjs], { type: "text/javascript" }));
await this.ctx.audioWorklet.addModule(scriptUrl)
this.vcNode = new VoiceChangerWorkletNode(this.ctx, voiceChangerWorkletListener); // vc node
2023-01-04 20:28:36 +03:00
this.currentMediaStreamAudioDestinationNode = this.ctx.createMediaStreamDestination() // output node
2023-02-12 12:19:22 +03:00
this.outputGainNode = this.ctx.createGain()
2023-02-19 08:20:37 +03:00
this.outputGainNode.gain.value = this.setting.outputGain
2023-02-12 12:19:22 +03:00
this.vcNode.connect(this.outputGainNode) // vc node -> output node
this.outputGainNode.connect(this.currentMediaStreamAudioDestinationNode)
2023-01-04 20:28:36 +03:00
// (vc nodeにはaudio streamerのcallbackでデータが投げ込まれる)
2023-01-05 05:45:42 +03:00
this.audioStreamer = new AudioStreamer(this.callbacks, audioStreamerListeners, { objectMode: true, })
2023-01-04 20:28:36 +03:00
if (this.vfEnable) {
this.vf = await VoiceFocusDeviceTransformer.create({ variant: 'c20' })
const dummyMediaStream = createDummyMediaStream(this.ctx)
this.currentDevice = (await this.vf.createTransformDevice(dummyMediaStream)) || null;
this.outputNodeFromVF = this.ctx.createMediaStreamDestination();
}
resolve()
})
}
2023-01-29 03:42:45 +03:00
private lock = async () => {
const num = await this.sem.dequeue();
return num;
};
private unlock = (num: number) => {
this.sem.enqueue(num + 1);
};
2023-01-04 20:28:36 +03:00
isInitialized = async () => {
if (this.promiseForInitialize) {
await this.promiseForInitialize
}
return true
}
/////////////////////////////////////////////////////
// オペレーション
/////////////////////////////////////////////////////
/// Operations ///
2023-02-19 08:20:37 +03:00
// setup = async (input: string | MediaStream | null, bufferSize: BufferSize, echoCancel: boolean = true, noiseSuppression: boolean = true, noiseSuppression2: boolean = false) => {
setup = async () => {
2023-01-29 03:42:45 +03:00
const lockNum = await this.lock()
2023-02-17 22:15:34 +03:00
2023-02-19 08:20:37 +03:00
console.log(`Input Setup=> echo: ${this.setting.echoCancel}, noise1: ${this.setting.noiseSuppression}, noise2: ${this.setting.noiseSuppression2}`)
2023-01-04 20:28:36 +03:00
// condition check
if (!this.vcNode) {
console.warn("vc node is not initialized.")
throw "vc node is not initialized."
}
// Main Process
//// shutdown & re-generate mediastream
if (this.currentMediaStream) {
this.currentMediaStream.getTracks().forEach(x => { x.stop() })
this.currentMediaStream = null
}
2023-02-17 22:15:34 +03:00
//// Input デバイスがnullの時はmicStreamを止めてリターン
2023-02-19 08:20:37 +03:00
if (!this.setting.audioInput) {
2023-02-17 22:15:34 +03:00
console.log(`Input Setup=> client mic is disabled.`)
if (this.micStream) {
this.micStream.pauseRecording()
}
await this.unlock(lockNum)
return
}
2023-02-19 08:20:37 +03:00
if (typeof this.setting.audioInput == "string") {
2023-01-04 20:28:36 +03:00
this.currentMediaStream = await navigator.mediaDevices.getUserMedia({
audio: {
2023-02-19 08:20:37 +03:00
deviceId: this.setting.audioInput,
2023-02-14 16:32:25 +03:00
channelCount: 1,
2023-02-19 08:20:37 +03:00
sampleRate: this.setting.sampleRate,
2023-02-14 16:32:25 +03:00
sampleSize: 16,
2023-02-14 23:02:51 +03:00
autoGainControl: false,
2023-02-19 08:20:37 +03:00
echoCancellation: this.setting.echoCancel,
noiseSuppression: this.setting.noiseSuppression
}
2023-01-04 20:28:36 +03:00
})
2023-02-14 23:02:51 +03:00
// this.currentMediaStream.getAudioTracks().forEach((x) => {
// console.log("MIC Setting(cap)", x.getCapabilities())
// console.log("MIC Setting(const)", x.getConstraints())
// console.log("MIC Setting(setting)", x.getSettings())
// })
2023-01-04 20:28:36 +03:00
} else {
2023-02-19 08:20:37 +03:00
this.currentMediaStream = this.setting.audioInput
2023-01-04 20:28:36 +03:00
}
// create mic stream
2023-01-05 05:45:42 +03:00
if (this.micStream) {
2023-01-05 12:35:56 +03:00
this.micStream.unpipe()
2023-01-05 05:45:42 +03:00
this.micStream.destroy()
this.micStream = null
}
2023-01-04 20:28:36 +03:00
this.micStream = new MicrophoneStream({
objectMode: true,
2023-02-19 08:20:37 +03:00
bufferSize: this.setting.bufferSize,
2023-01-04 20:28:36 +03:00
context: this.ctx
})
// connect nodes.
2023-02-12 12:50:10 +03:00
this.currentMediaStreamAudioSourceNode = this.ctx.createMediaStreamSource(this.currentMediaStream)
this.inputGainNode = this.ctx.createGain()
2023-02-19 08:20:37 +03:00
this.inputGainNode.gain.value = this.setting.inputGain
2023-02-12 12:50:10 +03:00
this.currentMediaStreamAudioSourceNode.connect(this.inputGainNode)
2023-02-19 08:20:37 +03:00
if (this.currentDevice && this.setting.noiseSuppression2) {
2023-01-04 20:28:36 +03:00
this.currentDevice.chooseNewInnerDevice(this.currentMediaStream)
const voiceFocusNode = await this.currentDevice.createAudioNode(this.ctx); // vf node
2023-02-12 12:19:22 +03:00
this.inputGainNode.connect(voiceFocusNode.start) // input node -> vf node
2023-01-04 20:28:36 +03:00
voiceFocusNode.end.connect(this.outputNodeFromVF!)
this.micStream.setStream(this.outputNodeFromVF!.stream) // vf node -> mic stream
} else {
2023-02-12 12:19:22 +03:00
const inputDestinationNodeForMicStream = this.ctx.createMediaStreamDestination()
2023-02-12 12:50:10 +03:00
this.inputGainNode.connect(inputDestinationNodeForMicStream)
2023-02-12 12:19:22 +03:00
this.micStream.setStream(inputDestinationNodeForMicStream.stream) // input device -> mic stream
2023-01-04 20:28:36 +03:00
}
2023-01-05 12:35:56 +03:00
this.micStream.pipe(this.audioStreamer) // mic stream -> audio streamer
if (!this._isVoiceChanging) {
this.micStream.pauseRecording()
} else {
this.micStream.playRecording()
}
2023-02-16 20:11:03 +03:00
console.log("Input Setup=> success")
2023-01-29 03:42:45 +03:00
await this.unlock(lockNum)
2023-01-04 20:28:36 +03:00
}
get stream(): MediaStream {
return this.currentMediaStreamAudioDestinationNode.stream
}
2023-01-05 05:45:42 +03:00
start = () => {
2023-01-07 14:07:39 +03:00
if (!this.micStream) {
throw `Exception:${VOICE_CHANGER_CLIENT_EXCEPTION.ERR_MIC_STREAM_NOT_INITIALIZED}`
return
}
2023-01-05 05:45:42 +03:00
this.micStream.playRecording()
this._isVoiceChanging = true
}
stop = () => {
if (!this.micStream) { return }
this.micStream.pauseRecording()
this._isVoiceChanging = false
}
get isVoiceChanging(): boolean {
return this._isVoiceChanging
}
////////////////////////
/// 設定
//////////////////////////////
2023-01-08 10:18:20 +03:00
setServerUrl = (serverUrl: string, openTab: boolean = false) => {
2023-01-08 03:22:22 +03:00
const url = validateUrl(serverUrl)
const pageUrl = `${location.protocol}//${location.host}`
2023-01-08 14:28:57 +03:00
if (url != pageUrl && url.length != 0 && location.protocol == "https:" && this.sslCertified.includes(url) == false) {
2023-01-08 03:22:22 +03:00
if (openTab) {
const value = window.confirm("MMVC Server is different from this page's origin. Open tab to open ssl connection. OK? (You can close the opened tab after ssl connection succeed.)");
if (value) {
window.open(url, '_blank')
2023-01-08 11:58:27 +03:00
this.sslCertified.push(url)
2023-01-08 03:22:22 +03:00
} else {
alert("Your voice conversion may fail...")
}
}
}
2023-02-19 08:20:37 +03:00
this.audioStreamer.updateSetting({ ...this.audioStreamer.getSettings(), serverUrl: url })
2023-01-08 10:18:20 +03:00
this.configurator.setServerUrl(url)
2023-01-04 20:28:36 +03:00
}
2023-02-19 08:20:37 +03:00
updateClientSetting = (setting: VoiceChangerClientSetting) => {
console.log(`[VoiceChangerClient] Updating Client Setting,`, this.setting, setting)
let reconstructInputRequired = false
if (
this.setting.audioInput != setting.audioInput ||
this.setting.bufferSize != setting.bufferSize ||
this.setting.echoCancel != setting.echoCancel ||
this.setting.noiseSuppression != setting.noiseSuppression ||
this.setting.noiseSuppression2 != setting.noiseSuppression2 ||
this.setting.sampleRate != setting.sampleRate
) {
reconstructInputRequired = true
}
2023-02-19 13:22:00 +03:00
if (this.setting.inputGain != setting.inputGain) {
this.setInputGain(setting.inputGain)
}
if (this.setting.outputGain != setting.outputGain) {
this.setOutputGain(setting.outputGain)
}
2023-02-19 08:20:37 +03:00
this.setting = setting
if (reconstructInputRequired) {
this.setup()
}
2023-02-19 13:22:00 +03:00
2023-02-19 08:20:37 +03:00
}
setInputGain = (val: number) => {
2023-02-19 08:20:37 +03:00
this.setting.inputGain = val
if (!this.inputGainNode) {
return
}
this.inputGainNode.gain.value = val
2023-01-04 20:28:36 +03:00
}
setOutputGain = (val: number) => {
if (!this.outputGainNode) {
return
}
this.outputGainNode.gain.value = val
2023-02-18 14:53:15 +03:00
}
2023-01-05 05:45:42 +03:00
/////////////////////////////////////////////////////
// コンポーネント設定、操作
/////////////////////////////////////////////////////
//## Server ##//
updateServerSettings = (key: ServerSettingKey, val: string) => {
return this.configurator.updateSettings(key, val)
}
uploadFile = (buf: ArrayBuffer, filename: string, onprogress: (progress: number, end: boolean) => void) => {
return this.configurator.uploadFile(buf, filename, onprogress)
}
concatUploadedFile = (filename: string, chunkNum: number) => {
return this.configurator.concatUploadedFile(filename, chunkNum)
}
loadModel = (configFilename: string, pyTorchModelFilename: string | null, onnxModelFilename: string | null) => {
return this.configurator.loadModel(configFilename, pyTorchModelFilename, onnxModelFilename)
}
//## Worklet ##//
2023-01-11 22:52:01 +03:00
configureWorklet = (setting: WorkletSetting) => {
this.vcNode.configure(setting)
}
startOutputRecordingWorklet = () => {
this.vcNode.startOutputRecordingWorklet()
2023-01-11 22:52:01 +03:00
}
stopOutputRecordingWorklet = () => {
this.vcNode.stopOutputRecordingWorklet()
}
2023-01-11 22:52:01 +03:00
//## Audio Streamer ##//
2023-02-19 08:20:37 +03:00
updateAudioStreamerSetting = (setting: AudioStreamerSetting) => {
this.audioStreamer.updateSetting(setting)
2023-02-12 12:19:22 +03:00
}
/////////////////////////////////////////////////////
// 情報取得
/////////////////////////////////////////////////////
2023-01-08 10:18:20 +03:00
// Information
getClientSettings = () => {
return this.audioStreamer.getSettings()
}
getServerSettings = () => {
return this.configurator.getSettings()
}
2023-02-17 22:15:34 +03:00
getSocketId = () => {
return this.audioStreamer.getSocketId()
}
2023-01-08 10:18:20 +03:00
2023-01-05 05:45:42 +03:00
2023-01-04 20:28:36 +03:00
}