diff --git a/client/lib/.vscode/settings.json b/client/lib/.vscode/settings.json new file mode 100644 index 00000000..a1af19b9 --- /dev/null +++ b/client/lib/.vscode/settings.json @@ -0,0 +1,8 @@ +{ + "workbench.colorCustomizations": { + "tab.activeBackground": "#65952acc" + }, + "editor.defaultFormatter": "esbenp.prettier-vscode", + "prettier.printWidth": 1024, + "prettier.tabWidth": 4 +} diff --git a/client/lib/package.json b/client/lib/package.json index 1f642df8..8faf9f81 100644 --- a/client/lib/package.json +++ b/client/lib/package.json @@ -1,58 +1,58 @@ { - "name": "@dannadori/voice-changer-client-js", - "version": "1.0.172", - "description": "", - "main": "dist/index.js", - "directories": { - "lib": "lib" - }, - "scripts": { - "clean:worklet": "rimraf worklet/dist/", - "webpack:worklet:dev": "webpack --config webpack.worklet.dev.js", - "webpack:worklet:prod": "webpack --config webpack.worklet.prod.js", - "build:worklet:dev": "npm-run-all clean:worklet webpack:worklet:dev", - "build:worklet:prod": "npm-run-all clean:worklet webpack:worklet:prod", - "clean": "rimraf dist/", - "webpack:dev": "webpack --config webpack.dev.js", - "webpack:prod": "webpack --config webpack.prod.js", - "build:dev": "npm-run-all build:worklet:dev clean webpack:dev", - "build:prod": "npm-run-all build:worklet:prod clean webpack:prod", - "release": "npm version patch && npm publish --access=public", - "test": "echo \"Error: no test specified\" && exit 1" - }, - "keywords": [ - "voice conversion" - ], - "author": "wataru.okada@flect.co.jp", - "license": "ISC", - "devDependencies": { - "@types/audioworklet": "^0.0.50", - "@types/node": "^20.6.3", - "@types/react": "18.2.22", - "@types/react-dom": "18.2.7", - "eslint": "^8.49.0", - "eslint-config-prettier": "^9.0.0", - "eslint-plugin-prettier": "^5.0.0", - "eslint-plugin-react": "^7.33.2", - "eslint-webpack-plugin": "^4.0.1", - "npm-run-all": "^4.1.5", - "prettier": "^3.0.3", - "raw-loader": "^4.0.2", - "rimraf": "^5.0.1", - "ts-loader": "^9.4.4", - "typescript": "^5.2.2", - "webpack": "^5.88.2", - "webpack-cli": "^5.1.4", - "webpack-dev-server": "^4.15.1" - }, - "dependencies": { - "@types/readable-stream": "^4.0.2", - "amazon-chime-sdk-js": "^3.17.0", - "buffer": "^6.0.3", - "localforage": "^1.10.0", - "protobufjs": "^7.2.5", - "react": "^18.2.0", - "react-dom": "^18.2.0", - "socket.io-client": "^4.7.2" - } + "name": "@dannadori/voice-changer-client-js", + "version": "1.0.172", + "description": "", + "main": "dist/index.js", + "directories": { + "lib": "lib" + }, + "scripts": { + "clean:worklet": "rimraf worklet/dist/", + "webpack:worklet:dev": "webpack --config webpack.worklet.dev.js", + "webpack:worklet:prod": "webpack --config webpack.worklet.prod.js", + "build:worklet:dev": "npm-run-all clean:worklet webpack:worklet:dev", + "build:worklet:prod": "npm-run-all clean:worklet webpack:worklet:prod", + "clean": "rimraf dist/", + "webpack:dev": "webpack --config webpack.dev.js", + "webpack:prod": "webpack --config webpack.prod.js", + "build:dev": "npm-run-all build:worklet:dev clean webpack:dev", + "build:prod": "npm-run-all build:worklet:prod clean webpack:prod", + "release": "npm version patch && npm publish --access=public", + "test": "echo \"Error: no test specified\" && exit 1" + }, + "keywords": [ + "voice conversion" + ], + "author": "wataru.okada@flect.co.jp", + "license": "ISC", + "devDependencies": { + "@types/audioworklet": "^0.0.50", + "@types/node": "^20.6.3", + "@types/react": "18.2.22", + "@types/react-dom": "18.2.7", + "eslint": "^8.49.0", + "eslint-config-prettier": "^9.0.0", + "eslint-plugin-prettier": "^5.0.0", + "eslint-plugin-react": "^7.33.2", + "eslint-webpack-plugin": "^4.0.1", + "npm-run-all": "^4.1.5", + "prettier": "^3.0.3", + "raw-loader": "^4.0.2", + "rimraf": "^5.0.1", + "ts-loader": "^9.4.4", + "typescript": "^5.2.2", + "webpack": "^5.88.2", + "webpack-cli": "^5.1.4", + "webpack-dev-server": "^4.15.1" + }, + "dependencies": { + "@types/readable-stream": "^4.0.2", + "amazon-chime-sdk-js": "^3.17.0", + "buffer": "^6.0.3", + "localforage": "^1.10.0", + "protobufjs": "^7.2.5", + "react": "^18.2.0", + "react-dom": "^18.2.0", + "socket.io-client": "^4.7.2" + } } diff --git a/client/lib/src/@types/voice-changer-worklet-processor.d.ts b/client/lib/src/@types/voice-changer-worklet-processor.d.ts index 0a44059f..08e320fb 100644 --- a/client/lib/src/@types/voice-changer-worklet-processor.d.ts +++ b/client/lib/src/@types/voice-changer-worklet-processor.d.ts @@ -5,14 +5,14 @@ export declare const RequestType: { readonly stop: "stop"; readonly trancateBuffer: "trancateBuffer"; }; -export type RequestType = typeof RequestType[keyof typeof RequestType]; +export type RequestType = (typeof RequestType)[keyof typeof RequestType]; export declare const ResponseType: { readonly volume: "volume"; readonly inputData: "inputData"; readonly start_ok: "start_ok"; readonly stop_ok: "stop_ok"; }; -export type ResponseType = typeof ResponseType[keyof typeof ResponseType]; +export type ResponseType = (typeof ResponseType)[keyof typeof ResponseType]; export type VoiceChangerWorkletProcessorRequest = { requestType: RequestType; voice: Float32Array; diff --git a/client/lib/src/VoiceChangerClient.ts b/client/lib/src/VoiceChangerClient.ts index 9e1b61a9..ed5bd4b3 100644 --- a/client/lib/src/VoiceChangerClient.ts +++ b/client/lib/src/VoiceChangerClient.ts @@ -1,92 +1,89 @@ -import { VoiceChangerWorkletNode, VoiceChangerWorkletListener } from "./VoiceChangerWorkletNode"; +import { VoiceChangerWorkletNode, VoiceChangerWorkletListener } from "./client/VoiceChangerWorkletNode"; // @ts-ignore import workerjs from "raw-loader!../worklet/dist/index.js"; import { VoiceFocusDeviceTransformer, VoiceFocusTransformDevice } from "amazon-chime-sdk-js"; import { createDummyMediaStream, validateUrl } from "./util"; import { DefaultClientSettng, MergeModelRequest, ServerSettingKey, VoiceChangerClientSetting, WorkletNodeSetting, WorkletSetting } from "./const"; -import { ServerConfigurator } from "./ServerConfigurator"; +import { ServerConfigurator } from "./client/ServerConfigurator"; // オーディオデータの流れ -// input node(mic or MediaStream) -> [vf node] -> [vc node] -> +// input node(mic or MediaStream) -> [vf node] -> [vc node] -> // sio/rest server -> [vc node] -> output node import { BlockingQueue } from "./utils/BlockingQueue"; export class VoiceChangerClient { - private configurator: ServerConfigurator - private ctx: AudioContext - private vfEnable = false - private vf: VoiceFocusDeviceTransformer | null = null - private currentDevice: VoiceFocusTransformDevice | null = null + private configurator: ServerConfigurator; + private ctx: AudioContext; + private vfEnable = false; + private vf: VoiceFocusDeviceTransformer | null = null; + private currentDevice: VoiceFocusTransformDevice | null = null; - private currentMediaStream: MediaStream | null = null - private currentMediaStreamAudioSourceNode: MediaStreamAudioSourceNode | null = null - private inputGainNode: GainNode | null = null - private outputGainNode: GainNode | null = null - private monitorGainNode: GainNode | null = null - private vcInNode!: VoiceChangerWorkletNode - private vcOutNode!: VoiceChangerWorkletNode - private currentMediaStreamAudioDestinationNode!: MediaStreamAudioDestinationNode - private currentMediaStreamAudioDestinationMonitorNode!: MediaStreamAudioDestinationNode + private currentMediaStream: MediaStream | null = null; + private currentMediaStreamAudioSourceNode: MediaStreamAudioSourceNode | null = null; + private inputGainNode: GainNode | null = null; + private outputGainNode: GainNode | null = null; + private monitorGainNode: GainNode | null = null; + private vcInNode!: VoiceChangerWorkletNode; + private vcOutNode!: VoiceChangerWorkletNode; + private currentMediaStreamAudioDestinationNode!: MediaStreamAudioDestinationNode; + private currentMediaStreamAudioDestinationMonitorNode!: MediaStreamAudioDestinationNode; + private promiseForInitialize: Promise; + private _isVoiceChanging = false; - private promiseForInitialize: Promise - private _isVoiceChanging = false + private setting: VoiceChangerClientSetting = DefaultClientSettng.voiceChangerClientSetting; - private setting: VoiceChangerClientSetting = DefaultClientSettng.voiceChangerClientSetting - - private sslCertified: string[] = [] + private sslCertified: string[] = []; private sem = new BlockingQueue(); constructor(ctx: AudioContext, vfEnable: boolean, voiceChangerWorkletListener: VoiceChangerWorkletListener) { this.sem.enqueue(0); - this.configurator = new ServerConfigurator() - this.ctx = ctx - this.vfEnable = vfEnable + this.configurator = new ServerConfigurator(); + this.ctx = ctx; + this.vfEnable = vfEnable; this.promiseForInitialize = new Promise(async (resolve) => { const scriptUrl = URL.createObjectURL(new Blob([workerjs], { type: "text/javascript" })); // await this.ctx.audioWorklet.addModule(scriptUrl) - // this.vcInNode = new VoiceChangerWorkletNode(this.ctx, voiceChangerWorkletListener); // vc node + // this.vcInNode = new VoiceChangerWorkletNode(this.ctx, voiceChangerWorkletListener); // vc node try { - this.vcInNode = new VoiceChangerWorkletNode(this.ctx, voiceChangerWorkletListener); // vc node + this.vcInNode = new VoiceChangerWorkletNode(this.ctx, voiceChangerWorkletListener); // vc node } catch (err) { - await this.ctx.audioWorklet.addModule(scriptUrl) - this.vcInNode = new VoiceChangerWorkletNode(this.ctx, voiceChangerWorkletListener); // vc node + await this.ctx.audioWorklet.addModule(scriptUrl); + this.vcInNode = new VoiceChangerWorkletNode(this.ctx, voiceChangerWorkletListener); // vc node } - - // const ctx44k = new AudioContext({ sampleRate: 44100 }) // これでもプチプチが残る - const ctx44k = new AudioContext({ sampleRate: 48000 }) // 結局これが一番まし。 - console.log("audio out:", ctx44k) + const ctx44k = new AudioContext({ sampleRate: 48000 }); // 結局これが一番まし。 + console.log("audio out:", ctx44k); try { - this.vcOutNode = new VoiceChangerWorkletNode(ctx44k, voiceChangerWorkletListener); // vc node + this.vcOutNode = new VoiceChangerWorkletNode(ctx44k, voiceChangerWorkletListener); // vc node } catch (err) { - await ctx44k.audioWorklet.addModule(scriptUrl) - this.vcOutNode = new VoiceChangerWorkletNode(ctx44k, voiceChangerWorkletListener); // vc node + await ctx44k.audioWorklet.addModule(scriptUrl); + this.vcOutNode = new VoiceChangerWorkletNode(ctx44k, voiceChangerWorkletListener); // vc node } - this.currentMediaStreamAudioDestinationNode = ctx44k.createMediaStreamDestination() // output node - this.outputGainNode = ctx44k.createGain() - this.outputGainNode.gain.value = this.setting.outputGain - this.vcOutNode.connect(this.outputGainNode) // vc node -> output node - this.outputGainNode.connect(this.currentMediaStreamAudioDestinationNode) + this.currentMediaStreamAudioDestinationNode = ctx44k.createMediaStreamDestination(); // output node + this.outputGainNode = ctx44k.createGain(); + this.outputGainNode.gain.value = this.setting.outputGain; + this.vcOutNode.connect(this.outputGainNode); // vc node -> output node + this.outputGainNode.connect(this.currentMediaStreamAudioDestinationNode); - this.currentMediaStreamAudioDestinationMonitorNode = ctx44k.createMediaStreamDestination() // output node - this.monitorGainNode = ctx44k.createGain() - this.monitorGainNode.gain.value = this.setting.monitorGain - this.vcOutNode.connect(this.monitorGainNode) // vc node -> monitor node - this.monitorGainNode.connect(this.currentMediaStreamAudioDestinationMonitorNode) + this.currentMediaStreamAudioDestinationMonitorNode = ctx44k.createMediaStreamDestination(); // output node + this.monitorGainNode = ctx44k.createGain(); + this.monitorGainNode.gain.value = this.setting.monitorGain; + this.vcOutNode.connect(this.monitorGainNode); // vc node -> monitor node + this.monitorGainNode.connect(this.currentMediaStreamAudioDestinationMonitorNode); if (this.vfEnable) { - this.vf = await VoiceFocusDeviceTransformer.create({ variant: 'c20' }) - const dummyMediaStream = createDummyMediaStream(this.ctx) + this.vf = await VoiceFocusDeviceTransformer.create({ variant: "c20" }); + const dummyMediaStream = createDummyMediaStream(this.ctx); this.currentDevice = (await this.vf.createTransformDevice(dummyMediaStream)) || null; } - resolve() - }) + resolve(); + }); } private lock = async () => { @@ -99,44 +96,46 @@ export class VoiceChangerClient { isInitialized = async () => { if (this.promiseForInitialize) { - await this.promiseForInitialize + await this.promiseForInitialize; } - return true - } + return true; + }; ///////////////////////////////////////////////////// // オペレーション ///////////////////////////////////////////////////// /// Operations /// setup = async () => { - const lockNum = await this.lock() + const lockNum = await this.lock(); - console.log(`Input Setup=> echo: ${this.setting.echoCancel}, noise1: ${this.setting.noiseSuppression}, noise2: ${this.setting.noiseSuppression2}`) + console.log(`Input Setup=> echo: ${this.setting.echoCancel}, noise1: ${this.setting.noiseSuppression}, noise2: ${this.setting.noiseSuppression2}`); // condition check if (!this.vcInNode) { - console.warn("vc node is not initialized.") - throw "vc node is not initialized." + console.warn("vc node is not initialized."); + throw "vc node is not initialized."; } // Main Process //// shutdown & re-generate mediastream if (this.currentMediaStream) { - this.currentMediaStream.getTracks().forEach(x => { x.stop() }) - this.currentMediaStream = null + this.currentMediaStream.getTracks().forEach((x) => { + x.stop(); + }); + this.currentMediaStream = null; } //// Input デバイスがnullの時はmicStreamを止めてリターン if (!this.setting.audioInput) { - console.log(`Input Setup=> client mic is disabled. ${this.setting.audioInput}`) - this.vcInNode.stop() - await this.unlock(lockNum) - return + console.log(`Input Setup=> client mic is disabled. ${this.setting.audioInput}`); + this.vcInNode.stop(); + await this.unlock(lockNum); + return; } if (typeof this.setting.audioInput == "string") { try { if (this.setting.audioInput == "none") { - this.currentMediaStream = createDummyMediaStream(this.ctx) + this.currentMediaStream = createDummyMediaStream(this.ctx); } else { this.currentMediaStream = await navigator.mediaDevices.getUserMedia({ audio: { @@ -146,15 +145,15 @@ export class VoiceChangerClient { sampleSize: 16, autoGainControl: false, echoCancellation: this.setting.echoCancel, - noiseSuppression: this.setting.noiseSuppression - } - }) + noiseSuppression: this.setting.noiseSuppression, + }, + }); } } catch (e) { - console.warn(e) - this.vcInNode.stop() - await this.unlock(lockNum) - throw e + console.warn(e); + this.vcInNode.stop(); + await this.unlock(lockNum); + throw e; } // this.currentMediaStream.getAudioTracks().forEach((x) => { // console.log("MIC Setting(cap)", x.getCapabilities()) @@ -162,19 +161,19 @@ export class VoiceChangerClient { // console.log("MIC Setting(setting)", x.getSettings()) // }) } else { - this.currentMediaStream = this.setting.audioInput + this.currentMediaStream = this.setting.audioInput; } // connect nodes. - this.currentMediaStreamAudioSourceNode = this.ctx.createMediaStreamSource(this.currentMediaStream) - this.inputGainNode = this.ctx.createGain() - this.inputGainNode.gain.value = this.setting.inputGain - this.currentMediaStreamAudioSourceNode.connect(this.inputGainNode) + this.currentMediaStreamAudioSourceNode = this.ctx.createMediaStreamSource(this.currentMediaStream); + this.inputGainNode = this.ctx.createGain(); + this.inputGainNode.gain.value = this.setting.inputGain; + this.currentMediaStreamAudioSourceNode.connect(this.inputGainNode); if (this.currentDevice && this.setting.noiseSuppression2) { - this.currentDevice.chooseNewInnerDevice(this.currentMediaStream) + this.currentDevice.chooseNewInnerDevice(this.currentMediaStream); const voiceFocusNode = await this.currentDevice.createAudioNode(this.ctx); // vf node - this.inputGainNode.connect(voiceFocusNode.start) // input node -> vf node - voiceFocusNode.end.connect(this.vcInNode) + this.inputGainNode.connect(voiceFocusNode.start); // input node -> vf node + voiceFocusNode.end.connect(this.vcInNode); } else { // console.log("input___ media stream", this.currentMediaStream) // this.currentMediaStream.getTracks().forEach(x => { @@ -184,193 +183,181 @@ export class VoiceChangerClient { // }) // console.log("input___ media node", this.currentMediaStreamAudioSourceNode) // console.log("input___ gain node", this.inputGainNode.channelCount, this.inputGainNode) - this.inputGainNode.connect(this.vcInNode) + this.inputGainNode.connect(this.vcInNode); } - this.vcInNode.setOutputNode(this.vcOutNode) - console.log("Input Setup=> success") - await this.unlock(lockNum) - } + this.vcInNode.setOutputNode(this.vcOutNode); + console.log("Input Setup=> success"); + await this.unlock(lockNum); + }; get stream(): MediaStream { - return this.currentMediaStreamAudioDestinationNode.stream + return this.currentMediaStreamAudioDestinationNode.stream; } get monitorStream(): MediaStream { - return this.currentMediaStreamAudioDestinationMonitorNode.stream + return this.currentMediaStreamAudioDestinationMonitorNode.stream; } start = async () => { - await this.vcInNode.start() - this._isVoiceChanging = true - } + await this.vcInNode.start(); + this._isVoiceChanging = true; + }; stop = async () => { - await this.vcInNode.stop() - this._isVoiceChanging = false - } + await this.vcInNode.stop(); + this._isVoiceChanging = false; + }; get isVoiceChanging(): boolean { - return this._isVoiceChanging + return this._isVoiceChanging; } //////////////////////// /// 設定 ////////////////////////////// setServerUrl = (serverUrl: string, openTab: boolean = false) => { - const url = validateUrl(serverUrl) - const pageUrl = `${location.protocol}//${location.host}` + const url = validateUrl(serverUrl); + const pageUrl = `${location.protocol}//${location.host}`; if (url != pageUrl && url.length != 0 && location.protocol == "https:" && this.sslCertified.includes(url) == false) { if (openTab) { const value = window.confirm("MMVC Server is different from this page's origin. Open tab to open ssl connection. OK? (You can close the opened tab after ssl connection succeed.)"); if (value) { - window.open(url, '_blank') - this.sslCertified.push(url) + window.open(url, "_blank"); + this.sslCertified.push(url); } else { - alert("Your voice conversion may fail...") + alert("Your voice conversion may fail..."); } } } - this.vcInNode.updateSetting({ ...this.vcInNode.getSettings(), serverUrl: url }) - this.configurator.setServerUrl(url) - } + this.vcInNode.updateSetting({ ...this.vcInNode.getSettings(), serverUrl: url }); + this.configurator.setServerUrl(url); + }; updateClientSetting = async (setting: VoiceChangerClientSetting) => { - let reconstructInputRequired = false - if ( - this.setting.audioInput != setting.audioInput || - this.setting.echoCancel != setting.echoCancel || - this.setting.noiseSuppression != setting.noiseSuppression || - this.setting.noiseSuppression2 != setting.noiseSuppression2 || - this.setting.sampleRate != setting.sampleRate - ) { - reconstructInputRequired = true + let reconstructInputRequired = false; + if (this.setting.audioInput != setting.audioInput || this.setting.echoCancel != setting.echoCancel || this.setting.noiseSuppression != setting.noiseSuppression || this.setting.noiseSuppression2 != setting.noiseSuppression2 || this.setting.sampleRate != setting.sampleRate) { + reconstructInputRequired = true; } if (this.setting.inputGain != setting.inputGain) { - this.setInputGain(setting.inputGain) + this.setInputGain(setting.inputGain); } if (this.setting.outputGain != setting.outputGain) { - this.setOutputGain(setting.outputGain) + this.setOutputGain(setting.outputGain); } if (this.setting.monitorGain != setting.monitorGain) { - this.setMonitorGain(setting.monitorGain) + this.setMonitorGain(setting.monitorGain); } - this.setting = setting + this.setting = setting; if (reconstructInputRequired) { - await this.setup() + await this.setup(); } - } + }; setInputGain = (val: number) => { - this.setting.inputGain = val + this.setting.inputGain = val; if (!this.inputGainNode) { - return + return; } - if(!val){ - return + if (!val) { + return; } - this.inputGainNode.gain.value = val - } + this.inputGainNode.gain.value = val; + }; setOutputGain = (val: number) => { if (!this.outputGainNode) { - return + return; } - if(!val){ - return + if (!val) { + return; } - this.outputGainNode.gain.value = val - } + this.outputGainNode.gain.value = val; + }; setMonitorGain = (val: number) => { if (!this.monitorGainNode) { - return + return; } - if(!val){ - return + if (!val) { + return; } - this.monitorGainNode.gain.value = val - } + this.monitorGainNode.gain.value = val; + }; ///////////////////////////////////////////////////// // コンポーネント設定、操作 ///////////////////////////////////////////////////// //## Server ##// getModelType = () => { - return this.configurator.getModelType() - } + return this.configurator.getModelType(); + }; getOnnx = async () => { - return this.configurator.export2onnx() - } + return this.configurator.export2onnx(); + }; mergeModel = async (req: MergeModelRequest) => { - return this.configurator.mergeModel(req) - } + return this.configurator.mergeModel(req); + }; updateModelDefault = async () => { - return this.configurator.updateModelDefault() - } + return this.configurator.updateModelDefault(); + }; updateModelInfo = async (slot: number, key: string, val: string) => { - return this.configurator.updateModelInfo(slot, key, val) - } + return this.configurator.updateModelInfo(slot, key, val); + }; updateServerSettings = (key: ServerSettingKey, val: string) => { - return this.configurator.updateSettings(key, val) - } + return this.configurator.updateSettings(key, val); + }; uploadFile = (buf: ArrayBuffer, filename: string, onprogress: (progress: number, end: boolean) => void) => { - return this.configurator.uploadFile(buf, filename, onprogress) - } + return this.configurator.uploadFile(buf, filename, onprogress); + }; uploadFile2 = (dir: string, file: File, onprogress: (progress: number, end: boolean) => void) => { - return this.configurator.uploadFile2(dir, file, onprogress) - } + return this.configurator.uploadFile2(dir, file, onprogress); + }; concatUploadedFile = (filename: string, chunkNum: number) => { - return this.configurator.concatUploadedFile(filename, chunkNum) - } - loadModel = ( - slot: number, - isHalf: boolean, - params: string, - ) => { - return this.configurator.loadModel(slot, isHalf, params) - } + return this.configurator.concatUploadedFile(filename, chunkNum); + }; + loadModel = (slot: number, isHalf: boolean, params: string) => { + return this.configurator.loadModel(slot, isHalf, params); + }; uploadAssets = (params: string) => { - return this.configurator.uploadAssets(params) - } + return this.configurator.uploadAssets(params); + }; //## Worklet ##// configureWorklet = (setting: WorkletSetting) => { - this.vcInNode.configure(setting) - this.vcOutNode.configure(setting) - } + this.vcInNode.configure(setting); + this.vcOutNode.configure(setting); + }; startOutputRecording = () => { - this.vcOutNode.startOutputRecording() - } + this.vcOutNode.startOutputRecording(); + }; stopOutputRecording = () => { - return this.vcOutNode.stopOutputRecording() - } + return this.vcOutNode.stopOutputRecording(); + }; trancateBuffer = () => { - this.vcOutNode.trancateBuffer() - } + this.vcOutNode.trancateBuffer(); + }; //## Worklet Node ##// updateWorkletNodeSetting = (setting: WorkletNodeSetting) => { - this.vcInNode.updateSetting(setting) - this.vcOutNode.updateSetting(setting) - } - + this.vcInNode.updateSetting(setting); + this.vcOutNode.updateSetting(setting); + }; ///////////////////////////////////////////////////// // 情報取得 ///////////////////////////////////////////////////// // Information getClientSettings = () => { - return this.vcInNode.getSettings() - } + return this.vcInNode.getSettings(); + }; getServerSettings = () => { - return this.configurator.getSettings() - } + return this.configurator.getSettings(); + }; getPerformance = () => { - return this.configurator.getPerformance() - } + return this.configurator.getPerformance(); + }; getSocketId = () => { - return this.vcInNode.getSocketId() - } - -} \ No newline at end of file + return this.vcInNode.getSocketId(); + }; +} diff --git a/client/lib/src/ServerConfigurator.ts b/client/lib/src/client/ServerConfigurator.ts similarity index 57% rename from client/lib/src/ServerConfigurator.ts rename to client/lib/src/client/ServerConfigurator.ts index a3d250f0..0ed4a97d 100644 --- a/client/lib/src/ServerConfigurator.ts +++ b/client/lib/src/client/ServerConfigurator.ts @@ -1,108 +1,107 @@ -import { MergeModelRequest, OnnxExporterInfo, ServerInfo, ServerSettingKey } from "./const"; - +import { MergeModelRequest, OnnxExporterInfo, ServerInfo, ServerSettingKey } from "../const"; type FileChunk = { - hash: number, - chunk: ArrayBuffer -} + hash: number; + chunk: ArrayBuffer; +}; export class ServerConfigurator { - private serverUrl = "" + private serverUrl = ""; setServerUrl = (serverUrl: string) => { - this.serverUrl = serverUrl - console.log(`[ServerConfigurator] Server URL: ${this.serverUrl}`) - } + this.serverUrl = serverUrl; + console.log(`[ServerConfigurator] Server URL: ${this.serverUrl}`); + }; getSettings = async () => { - const url = this.serverUrl + "/info" + const url = this.serverUrl + "/info"; const info = await new Promise((resolve) => { const request = new Request(url, { - method: 'GET', + method: "GET", }); fetch(request).then(async (response) => { - const json = await response.json() as ServerInfo - resolve(json) - }) - }) - return info - } + const json = (await response.json()) as ServerInfo; + resolve(json); + }); + }); + return info; + }; getPerformance = async () => { - const url = this.serverUrl + "/performance" + const url = this.serverUrl + "/performance"; const info = await new Promise((resolve) => { const request = new Request(url, { - method: 'GET', + method: "GET", }); fetch(request).then(async (response) => { - const json = await response.json() as number[] - resolve(json) - }) - }) - return info - } + const json = (await response.json()) as number[]; + resolve(json); + }); + }); + return info; + }; updateSettings = async (key: ServerSettingKey, val: string) => { - const url = this.serverUrl + "/update_settings" + const url = this.serverUrl + "/update_settings"; const info = await new Promise(async (resolve) => { const formData = new FormData(); formData.append("key", key); formData.append("val", val); const request = new Request(url, { - method: 'POST', + method: "POST", body: formData, }); - const res = await (await fetch(request)).json() as ServerInfo - resolve(res) - }) - return info - } + const res = (await (await fetch(request)).json()) as ServerInfo; + resolve(res); + }); + return info; + }; uploadFile2 = async (dir: string, file: File, onprogress: (progress: number, end: boolean) => void) => { - const url = this.serverUrl + "/upload_file" - onprogress(0, false) + const url = this.serverUrl + "/upload_file"; + onprogress(0, false); const size = 1024 * 1024; let index = 0; // index値 - const fileLength = file.size - const filename = dir + file.name - const fileChunkNum = Math.ceil(fileLength / size) + const fileLength = file.size; + const filename = dir + file.name; + const fileChunkNum = Math.ceil(fileLength / size); while (true) { - const promises: Promise[] = [] + const promises: Promise[] = []; for (let i = 0; i < 10; i++) { if (index * size >= fileLength) { - break + break; } - const chunk = file.slice(index * size, (index + 1) * size) + const chunk = file.slice(index * size, (index + 1) * size); const p = new Promise((resolve) => { const formData = new FormData(); formData.append("file", new Blob([chunk])); formData.append("filename", `${filename}_${index}`); const request = new Request(url, { - method: 'POST', + method: "POST", body: formData, }); fetch(request).then(async (_response) => { // console.log(await response.text()) - resolve() - }) - }) - index += 1 - promises.push(p) + resolve(); + }); + }); + index += 1; + promises.push(p); } - await Promise.all(promises) + await Promise.all(promises); if (index * size >= fileLength) { - break + break; } - onprogress(Math.floor(((index) / (fileChunkNum + 1)) * 100), false) + onprogress(Math.floor((index / (fileChunkNum + 1)) * 100), false); } - return fileChunkNum - } + return fileChunkNum; + }; uploadFile = async (buf: ArrayBuffer, filename: string, onprogress: (progress: number, end: boolean) => void) => { - const url = this.serverUrl + "/upload_file" - onprogress(0, false) + const url = this.serverUrl + "/upload_file"; + onprogress(0, false); const size = 1024 * 1024; const fileChunks: FileChunk[] = []; let index = 0; // index値 @@ -113,65 +112,64 @@ export class ServerConfigurator { }); } - const chunkNum = fileChunks.length + const chunkNum = fileChunks.length; // console.log("FILE_CHUNKS:", chunkNum, fileChunks) - while (true) { - const promises: Promise[] = [] + const promises: Promise[] = []; for (let i = 0; i < 10; i++) { - const chunk = fileChunks.shift() + const chunk = fileChunks.shift(); if (!chunk) { - break + break; } const p = new Promise((resolve) => { const formData = new FormData(); formData.append("file", new Blob([chunk.chunk])); formData.append("filename", `${filename}_${chunk.hash}`); const request = new Request(url, { - method: 'POST', + method: "POST", body: formData, }); fetch(request).then(async (_response) => { // console.log(await response.text()) - resolve() - }) - }) + resolve(); + }); + }); - promises.push(p) + promises.push(p); } - await Promise.all(promises) + await Promise.all(promises); if (fileChunks.length == 0) { - break + break; } - onprogress(Math.floor(((chunkNum - fileChunks.length) / (chunkNum + 1)) * 100), false) + onprogress(Math.floor(((chunkNum - fileChunks.length) / (chunkNum + 1)) * 100), false); } - return chunkNum - } + return chunkNum; + }; concatUploadedFile = async (filename: string, chunkNum: number) => { - const url = this.serverUrl + "/concat_uploaded_file" + const url = this.serverUrl + "/concat_uploaded_file"; await new Promise((resolve) => { const formData = new FormData(); formData.append("filename", filename); formData.append("filenameChunkNum", "" + chunkNum); const request = new Request(url, { - method: 'POST', + method: "POST", body: formData, }); fetch(request).then(async (response) => { - console.log(await response.text()) - resolve() - }) - }) - } + console.log(await response.text()); + resolve(); + }); + }); + }; loadModel = async (slot: number, isHalf: boolean, params: string = "{}") => { if (isHalf == undefined || isHalf == null) { - console.warn("isHalf is invalid value", isHalf) - isHalf = false + console.warn("isHalf is invalid value", isHalf); + isHalf = false; } - const url = this.serverUrl + "/load_model" + const url = this.serverUrl + "/load_model"; const info = new Promise(async (resolve) => { const formData = new FormData(); formData.append("slot", "" + slot); @@ -179,102 +177,101 @@ export class ServerConfigurator { formData.append("params", params); const request = new Request(url, { - method: 'POST', + method: "POST", body: formData, }); - const res = await (await fetch(request)).json() as ServerInfo - resolve(res) - }) - return await info - } + const res = (await (await fetch(request)).json()) as ServerInfo; + resolve(res); + }); + return await info; + }; uploadAssets = async (params: string) => { - const url = this.serverUrl + "/upload_model_assets" + const url = this.serverUrl + "/upload_model_assets"; const info = new Promise(async (resolve) => { const formData = new FormData(); formData.append("params", params); const request = new Request(url, { - method: 'POST', + method: "POST", body: formData, }); - const res = await (await fetch(request)).json() as ServerInfo - resolve(res) - }) - return await info - } + const res = (await (await fetch(request)).json()) as ServerInfo; + resolve(res); + }); + return await info; + }; getModelType = async () => { - const url = this.serverUrl + "/model_type" + const url = this.serverUrl + "/model_type"; const info = new Promise(async (resolve) => { const request = new Request(url, { - method: 'GET', + method: "GET", }); - const res = await (await fetch(request)).json() as ServerInfo - resolve(res) - }) - return await info - } + const res = (await (await fetch(request)).json()) as ServerInfo; + resolve(res); + }); + return await info; + }; export2onnx = async () => { - const url = this.serverUrl + "/onnx" + const url = this.serverUrl + "/onnx"; const info = new Promise(async (resolve) => { const request = new Request(url, { - method: 'GET', + method: "GET", }); - const res = await (await fetch(request)).json() as OnnxExporterInfo - resolve(res) - }) - return await info - } + const res = (await (await fetch(request)).json()) as OnnxExporterInfo; + resolve(res); + }); + return await info; + }; mergeModel = async (req: MergeModelRequest) => { - const url = this.serverUrl + "/merge_model" + const url = this.serverUrl + "/merge_model"; const info = new Promise(async (resolve) => { const formData = new FormData(); formData.append("request", JSON.stringify(req)); const request = new Request(url, { - method: 'POST', + method: "POST", body: formData, }); - const res = await (await fetch(request)).json() as ServerInfo - console.log("RESPONSE", res) - resolve(res) - }) - return await info - } + const res = (await (await fetch(request)).json()) as ServerInfo; + console.log("RESPONSE", res); + resolve(res); + }); + return await info; + }; updateModelDefault = async () => { - const url = this.serverUrl + "/update_model_default" + const url = this.serverUrl + "/update_model_default"; const info = new Promise(async (resolve) => { const request = new Request(url, { - method: 'POST', + method: "POST", }); - const res = await (await fetch(request)).json() as ServerInfo - console.log("RESPONSE", res) - resolve(res) - }) - return await info - } + const res = (await (await fetch(request)).json()) as ServerInfo; + console.log("RESPONSE", res); + resolve(res); + }); + return await info; + }; updateModelInfo = async (slot: number, key: string, val: string) => { - const url = this.serverUrl + "/update_model_info" - const newData = { slot, key, val } + const url = this.serverUrl + "/update_model_info"; + const newData = { slot, key, val }; const info = new Promise(async (resolve) => { const formData = new FormData(); formData.append("newData", JSON.stringify(newData)); const request = new Request(url, { - method: 'POST', + method: "POST", body: formData, }); - const res = await (await fetch(request)).json() as ServerInfo - console.log("RESPONSE", res) - resolve(res) - }) - return await info - } - + const res = (await (await fetch(request)).json()) as ServerInfo; + console.log("RESPONSE", res); + resolve(res); + }); + return await info; + }; } diff --git a/client/lib/src/VoiceChangerWorkletNode.ts b/client/lib/src/client/VoiceChangerWorkletNode.ts similarity index 65% rename from client/lib/src/VoiceChangerWorkletNode.ts rename to client/lib/src/client/VoiceChangerWorkletNode.ts index 48d886d8..e6090e6f 100644 --- a/client/lib/src/VoiceChangerWorkletNode.ts +++ b/client/lib/src/client/VoiceChangerWorkletNode.ts @@ -1,140 +1,134 @@ -import { VoiceChangerWorkletProcessorRequest } from "./@types/voice-changer-worklet-processor"; -import { DefaultClientSettng, DownSamplingMode, VOICE_CHANGER_CLIENT_EXCEPTION, WorkletNodeSetting, WorkletSetting } from "./const"; +import { VoiceChangerWorkletProcessorRequest } from "../@types/voice-changer-worklet-processor"; +import { DefaultClientSettng, DownSamplingMode, VOICE_CHANGER_CLIENT_EXCEPTION, WorkletNodeSetting, WorkletSetting } from "../const"; import { io, Socket } from "socket.io-client"; import { DefaultEventsMap } from "@socket.io/component-emitter"; export type VoiceChangerWorkletListener = { - notifyVolume: (vol: number) => void - notifySendBufferingTime: (time: number) => void - notifyResponseTime: (time: number, perf?: number[]) => void - notifyException: (code: VOICE_CHANGER_CLIENT_EXCEPTION, message: string) => void -} + notifyVolume: (vol: number) => void; + notifySendBufferingTime: (time: number) => void; + notifyResponseTime: (time: number, perf?: number[]) => void; + notifyException: (code: VOICE_CHANGER_CLIENT_EXCEPTION, message: string) => void; +}; export class VoiceChangerWorkletNode extends AudioWorkletNode { - private listener: VoiceChangerWorkletListener + private listener: VoiceChangerWorkletListener; - private setting: WorkletNodeSetting = DefaultClientSettng.workletNodeSetting - private requestChunks: ArrayBuffer[] = [] - private socket: Socket | null = null + private setting: WorkletNodeSetting = DefaultClientSettng.workletNodeSetting; + private requestChunks: ArrayBuffer[] = []; + private socket: Socket | null = null; // performance monitor private bufferStart = 0; private isOutputRecording = false; - private recordingOutputChunk: Float32Array[] = [] - private outputNode: VoiceChangerWorkletNode | null = null + private recordingOutputChunk: Float32Array[] = []; + private outputNode: VoiceChangerWorkletNode | null = null; // Promises - private startPromiseResolve: ((value: void | PromiseLike) => void) | null = null - private stopPromiseResolve: ((value: void | PromiseLike) => void) | null = null - - + private startPromiseResolve: ((value: void | PromiseLike) => void) | null = null; + private stopPromiseResolve: ((value: void | PromiseLike) => void) | null = null; constructor(context: AudioContext, listener: VoiceChangerWorkletListener) { super(context, "voice-changer-worklet-processor"); this.port.onmessage = this.handleMessage.bind(this); - this.listener = listener - this.createSocketIO() + this.listener = listener; + this.createSocketIO(); console.log(`[worklet_node][voice-changer-worklet-processor] created.`); } setOutputNode = (outputNode: VoiceChangerWorkletNode | null) => { - this.outputNode = outputNode - } - + this.outputNode = outputNode; + }; // 設定 updateSetting = (setting: WorkletNodeSetting) => { - console.log(`[WorkletNode] Updating WorkletNode Setting,`, this.setting, setting) - let recreateSocketIoRequired = false + console.log(`[WorkletNode] Updating WorkletNode Setting,`, this.setting, setting); + let recreateSocketIoRequired = false; if (this.setting.serverUrl != setting.serverUrl || this.setting.protocol != setting.protocol) { - recreateSocketIoRequired = true + recreateSocketIoRequired = true; } - this.setting = setting + this.setting = setting; if (recreateSocketIoRequired) { - this.createSocketIO() + this.createSocketIO(); } - } + }; getSettings = (): WorkletNodeSetting => { - return this.setting - } + return this.setting; + }; getSocketId = () => { - return this.socket?.id - } + return this.socket?.id; + }; // 処理 private createSocketIO = () => { if (this.socket) { - this.socket.close() + this.socket.close(); } if (this.setting.protocol === "sio") { this.socket = io(this.setting.serverUrl + "/test"); - this.socket.on('connect_error', (err) => { - this.listener.notifyException(VOICE_CHANGER_CLIENT_EXCEPTION.ERR_SIO_CONNECT_FAILED, `[SIO] rconnection failed ${err}`) - }) - this.socket.on('connect', () => { - console.log(`[SIO] connect to ${this.setting.serverUrl}`) - console.log(`[SIO] ${this.socket?.id}`) + this.socket.on("connect_error", (err) => { + this.listener.notifyException(VOICE_CHANGER_CLIENT_EXCEPTION.ERR_SIO_CONNECT_FAILED, `[SIO] rconnection failed ${err}`); }); - this.socket.on('close', function (socket) { - console.log(`[SIO] close ${socket.id}`) + this.socket.on("connect", () => { + console.log(`[SIO] connect to ${this.setting.serverUrl}`); + console.log(`[SIO] ${this.socket?.id}`); + }); + this.socket.on("close", function (socket) { + console.log(`[SIO] close ${socket.id}`); }); - - this.socket.on('message', (response: any[]) => { - console.log("message:", response) + this.socket.on("message", (response: any[]) => { + console.log("message:", response); }); - this.socket.on('response', (response: any[]) => { - - const cur = Date.now() - const responseTime = cur - response[0] - const result = response[1] as ArrayBuffer - const perf = response[2] + this.socket.on("response", (response: any[]) => { + const cur = Date.now(); + const responseTime = cur - response[0]; + const result = response[1] as ArrayBuffer; + const perf = response[2]; // Quick hack for server device mode if (response[0] == 0) { - this.listener.notifyResponseTime(Math.round(perf[0] * 1000), perf.slice(1, 4)) - return + this.listener.notifyResponseTime(Math.round(perf[0] * 1000), perf.slice(1, 4)); + return; } - if (result.byteLength < 128 * 2) { - this.listener.notifyException(VOICE_CHANGER_CLIENT_EXCEPTION.ERR_SIO_INVALID_RESPONSE, `[SIO] recevied data is too short ${result.byteLength}`) + this.listener.notifyException(VOICE_CHANGER_CLIENT_EXCEPTION.ERR_SIO_INVALID_RESPONSE, `[SIO] recevied data is too short ${result.byteLength}`); } else { if (this.outputNode != null) { - this.outputNode.postReceivedVoice(response[1]) + this.outputNode.postReceivedVoice(response[1]); } else { - this.postReceivedVoice(response[1]) + this.postReceivedVoice(response[1]); } - this.listener.notifyResponseTime(responseTime, perf) + this.listener.notifyResponseTime(responseTime, perf); } }); } - } + }; postReceivedVoice = (data: ArrayBuffer) => { // Int16 to Float - const i16Data = new Int16Array(data) - const f32Data = new Float32Array(i16Data.length) + const i16Data = new Int16Array(data); + const f32Data = new Float32Array(i16Data.length); // console.log(`[worklet] f32DataLength${f32Data.length} i16DataLength${i16Data.length}`) i16Data.forEach((x, i) => { - const float = (x >= 0x8000) ? -(0x10000 - x) / 0x8000 : x / 0x7FFF; - f32Data[i] = float - }) + const float = x >= 0x8000 ? -(0x10000 - x) / 0x8000 : x / 0x7fff; + f32Data[i] = float; + }); // アップサンプリング - let upSampledBuffer: Float32Array | null = null + let upSampledBuffer: Float32Array | null = null; if (this.setting.sendingSampleRate == 48000) { - upSampledBuffer = f32Data + upSampledBuffer = f32Data; } else { - upSampledBuffer = new Float32Array(f32Data.length * 2) + upSampledBuffer = new Float32Array(f32Data.length * 2); for (let i = 0; i < f32Data.length; i++) { - const currentFrame = f32Data[i] - const nextFrame = i + 1 < f32Data.length ? f32Data[i + 1] : f32Data[i] - upSampledBuffer[i * 2] = currentFrame - upSampledBuffer[i * 2 + 1] = (currentFrame + nextFrame) / 2 + const currentFrame = f32Data[i]; + const nextFrame = i + 1 < f32Data.length ? f32Data[i + 1] : f32Data[i]; + upSampledBuffer[i * 2] = currentFrame; + upSampledBuffer[i * 2 + 1] = (currentFrame + nextFrame) / 2; } } @@ -143,15 +137,14 @@ export class VoiceChangerWorkletNode extends AudioWorkletNode { voice: upSampledBuffer, numTrancateTreshold: 0, volTrancateThreshold: 0, - volTrancateLength: 0 - } - this.port.postMessage(req) + volTrancateLength: 0, + }; + this.port.postMessage(req); if (this.isOutputRecording) { - this.recordingOutputChunk.push(upSampledBuffer) + this.recordingOutputChunk.push(upSampledBuffer); } - - } + }; private _averageDownsampleBuffer(buffer: Float32Array, originalSampleRate: number, destinationSamplerate: number) { if (originalSampleRate == destinationSamplerate) { @@ -168,7 +161,8 @@ export class VoiceChangerWorkletNode extends AudioWorkletNode { while (offsetResult < result.length) { var nextOffsetBuffer = Math.round((offsetResult + 1) * sampleRateRatio); // Use average value of skipped samples - var accum = 0, count = 0; + var accum = 0, + count = 0; for (var i = offsetBuffer; i < nextOffsetBuffer && i < buffer.length; i++) { accum += buffer[i]; count++; @@ -185,112 +179,102 @@ export class VoiceChangerWorkletNode extends AudioWorkletNode { // console.log(`[Node:handleMessage_] `, event.data.volume); if (event.data.responseType === "start_ok") { if (this.startPromiseResolve) { - this.startPromiseResolve() - this.startPromiseResolve = null + this.startPromiseResolve(); + this.startPromiseResolve = null; } } else if (event.data.responseType === "stop_ok") { if (this.stopPromiseResolve) { - this.stopPromiseResolve() - this.stopPromiseResolve = null + this.stopPromiseResolve(); + this.stopPromiseResolve = null; } } else if (event.data.responseType === "volume") { - this.listener.notifyVolume(event.data.volume as number) + this.listener.notifyVolume(event.data.volume as number); } else if (event.data.responseType === "inputData") { - const inputData = event.data.inputData as Float32Array + const inputData = event.data.inputData as Float32Array; // console.log("receive input data", inputData) // ダウンサンプリング - let downsampledBuffer: Float32Array | null = null + let downsampledBuffer: Float32Array | null = null; if (this.setting.sendingSampleRate == 48000) { - downsampledBuffer = inputData + downsampledBuffer = inputData; } else if (this.setting.downSamplingMode == DownSamplingMode.decimate) { //////// (Kind 1) 間引き ////////// //// 48000Hz で入ってくるので間引いて24000Hzに変換する。 downsampledBuffer = new Float32Array(inputData.length / 2); for (let i = 0; i < inputData.length; i++) { if (i % 2 == 0) { - downsampledBuffer[i / 2] = inputData[i] + downsampledBuffer[i / 2] = inputData[i]; } } } else { //////// (Kind 2) 平均 ////////// // downsampledBuffer = this._averageDownsampleBuffer(buffer, 48000, 24000) - downsampledBuffer = this._averageDownsampleBuffer(inputData, 48000, this.setting.sendingSampleRate) + downsampledBuffer = this._averageDownsampleBuffer(inputData, 48000, this.setting.sendingSampleRate); } // Float to Int16 - const arrayBuffer = new ArrayBuffer(downsampledBuffer.length * 2) + const arrayBuffer = new ArrayBuffer(downsampledBuffer.length * 2); const dataView = new DataView(arrayBuffer); for (let i = 0; i < downsampledBuffer.length; i++) { let s = Math.max(-1, Math.min(1, downsampledBuffer[i])); - s = s < 0 ? s * 0x8000 : s * 0x7FFF + s = s < 0 ? s * 0x8000 : s * 0x7fff; dataView.setInt16(i * 2, s, true); } // バッファリング - this.requestChunks.push(arrayBuffer) + this.requestChunks.push(arrayBuffer); //// リクエストバッファの中身が、リクエスト送信数と違う場合は処理終了。 if (this.requestChunks.length < this.setting.inputChunkNum) { - return + return; } // リクエスト用の入れ物を作成 const windowByteLength = this.requestChunks.reduce((prev, cur) => { - return prev + cur.byteLength - }, 0) + return prev + cur.byteLength; + }, 0); const newBuffer = new Uint8Array(windowByteLength); // リクエストのデータをセット this.requestChunks.reduce((prev, cur) => { - newBuffer.set(new Uint8Array(cur), prev) - return prev + cur.byteLength - }, 0) + newBuffer.set(new Uint8Array(cur), prev); + return prev + cur.byteLength; + }, 0); + this.sendBuffer(newBuffer); + this.requestChunks = []; - this.sendBuffer(newBuffer) - this.requestChunks = [] - - this.listener.notifySendBufferingTime(Date.now() - this.bufferStart) - this.bufferStart = Date.now() - + this.listener.notifySendBufferingTime(Date.now() - this.bufferStart); + this.bufferStart = Date.now(); } else { - console.warn(`[worklet_node][voice-changer-worklet-processor] unknown response ${event.data.responseType}`, event.data) + console.warn(`[worklet_node][voice-changer-worklet-processor] unknown response ${event.data.responseType}`, event.data); } } - - private sendBuffer = async (newBuffer: Uint8Array) => { - const timestamp = Date.now() + const timestamp = Date.now(); if (this.setting.protocol === "sio") { if (!this.socket) { - console.warn(`sio is not initialized`) - return + console.warn(`sio is not initialized`); + return; } // console.log("emit!") - this.socket.emit('request_message', [ - timestamp, - newBuffer.buffer]); + this.socket.emit("request_message", [timestamp, newBuffer.buffer]); } else { - const res = await postVoice( - this.setting.serverUrl + "/test", - timestamp, - newBuffer.buffer) + const res = await postVoice(this.setting.serverUrl + "/test", timestamp, newBuffer.buffer); if (res.byteLength < 128 * 2) { - this.listener.notifyException(VOICE_CHANGER_CLIENT_EXCEPTION.ERR_REST_INVALID_RESPONSE, `[REST] recevied data is too short ${res.byteLength}`) + this.listener.notifyException(VOICE_CHANGER_CLIENT_EXCEPTION.ERR_REST_INVALID_RESPONSE, `[REST] recevied data is too short ${res.byteLength}`); } else { if (this.outputNode != null) { - this.outputNode.postReceivedVoice(res) + this.outputNode.postReceivedVoice(res); } else { - this.postReceivedVoice(res) + this.postReceivedVoice(res); } - this.listener.notifyResponseTime(Date.now() - timestamp) + this.listener.notifyResponseTime(Date.now() - timestamp); } } - } - + }; configure = (setting: WorkletSetting) => { const req: VoiceChangerWorkletProcessorRequest = { @@ -298,105 +282,100 @@ export class VoiceChangerWorkletNode extends AudioWorkletNode { voice: new Float32Array(1), numTrancateTreshold: setting.numTrancateTreshold, volTrancateThreshold: setting.volTrancateThreshold, - volTrancateLength: setting.volTrancateLength - } - this.port.postMessage(req) - } + volTrancateLength: setting.volTrancateLength, + }; + this.port.postMessage(req); + }; start = async () => { const p = new Promise((resolve) => { - this.startPromiseResolve = resolve - }) + this.startPromiseResolve = resolve; + }); const req: VoiceChangerWorkletProcessorRequest = { requestType: "start", voice: new Float32Array(1), numTrancateTreshold: 0, volTrancateThreshold: 0, - volTrancateLength: 0 - } - this.port.postMessage(req) - await p - - } + volTrancateLength: 0, + }; + this.port.postMessage(req); + await p; + }; stop = async () => { const p = new Promise((resolve) => { - this.stopPromiseResolve = resolve - }) + this.stopPromiseResolve = resolve; + }); const req: VoiceChangerWorkletProcessorRequest = { requestType: "stop", voice: new Float32Array(1), numTrancateTreshold: 0, volTrancateThreshold: 0, - volTrancateLength: 0 - } - this.port.postMessage(req) - await p - } + volTrancateLength: 0, + }; + this.port.postMessage(req); + await p; + }; trancateBuffer = () => { const req: VoiceChangerWorkletProcessorRequest = { requestType: "trancateBuffer", voice: new Float32Array(1), numTrancateTreshold: 0, volTrancateThreshold: 0, - volTrancateLength: 0 - } - this.port.postMessage(req) - } + volTrancateLength: 0, + }; + this.port.postMessage(req); + }; startOutputRecording = () => { - this.recordingOutputChunk = [] - this.isOutputRecording = true - } + this.recordingOutputChunk = []; + this.isOutputRecording = true; + }; stopOutputRecording = () => { - this.isOutputRecording = false + this.isOutputRecording = false; const dataSize = this.recordingOutputChunk.reduce((prev, cur) => { - return prev + cur.length - }, 0) + return prev + cur.length; + }, 0); const samples = new Float32Array(dataSize); - let sampleIndex = 0 + let sampleIndex = 0; for (let i = 0; i < this.recordingOutputChunk.length; i++) { for (let j = 0; j < this.recordingOutputChunk[i].length; j++) { samples[sampleIndex] = this.recordingOutputChunk[i][j]; sampleIndex++; } } - return samples - } + return samples; + }; } - -export const postVoice = async ( - url: string, - timestamp: number, - buffer: ArrayBuffer) => { +export const postVoice = async (url: string, timestamp: number, buffer: ArrayBuffer) => { const obj = { timestamp, - buffer: Buffer.from(buffer).toString('base64') + buffer: Buffer.from(buffer).toString("base64"), }; const body = JSON.stringify(obj); const res = await fetch(`${url}`, { method: "POST", headers: { - 'Accept': 'application/json', - 'Content-Type': 'application/json' + Accept: "application/json", + "Content-Type": "application/json", }, - body: body - }) + body: body, + }); try { - const receivedJson = await res.json() - const changedVoiceBase64 = receivedJson["changedVoiceBase64"] - const buf = Buffer.from(changedVoiceBase64, "base64") + const receivedJson = await res.json(); + const changedVoiceBase64 = receivedJson["changedVoiceBase64"]; + const buf = Buffer.from(changedVoiceBase64, "base64"); const ab = new ArrayBuffer(buf.length); const view = new Uint8Array(ab); for (let i = 0; i < buf.length; ++i) { view[i] = buf[i]; } - return ab + return ab; } catch (e) { - console.log("Exception:", e) + console.log("Exception:", e); return new ArrayBuffer(10); } -} \ No newline at end of file +}; diff --git a/client/lib/src/const.ts b/client/lib/src/const.ts index 1a000130..100e609b 100644 --- a/client/lib/src/const.ts +++ b/client/lib/src/const.ts @@ -1,20 +1,18 @@ - // (★1) chunk sizeは 128サンプル, 256byte(int16)と定義。 // (★2) 256byte(最低バッファサイズ256から間引いた個数x2byte)をchunkとして管理。 // 24000sample -> 1sec, 128sample(1chunk) -> 5.333msec // 187.5chunk -> 1sec export const VoiceChangerType = { - "MMVCv15": "MMVCv15", - "MMVCv13": "MMVCv13", + MMVCv15: "MMVCv15", + MMVCv13: "MMVCv13", "so-vits-svc-40": "so-vits-svc-40", "DDSP-SVC": "DDSP-SVC", - "RVC": "RVC", - "Diffusion-SVC":"Diffusion-SVC", - "Beatrice": "Beatrice" - -} as const -export type VoiceChangerType = typeof VoiceChangerType[keyof typeof VoiceChangerType] + RVC: "RVC", + "Diffusion-SVC": "Diffusion-SVC", + Beatrice: "Beatrice", +} as const; +export type VoiceChangerType = (typeof VoiceChangerType)[keyof typeof VoiceChangerType]; /////////////////////// // サーバセッティング @@ -22,342 +20,333 @@ export type VoiceChangerType = typeof VoiceChangerType[keyof typeof VoiceChanger export const InputSampleRate = { "48000": 48000, "44100": 44100, - "24000": 24000 -} as const -export type InputSampleRate = typeof InputSampleRate[keyof typeof InputSampleRate] + "24000": 24000, +} as const; +export type InputSampleRate = (typeof InputSampleRate)[keyof typeof InputSampleRate]; export const ModelSamplingRate = { "48000": 48000, "40000": 40000, - "32000": 32000 -} as const -export type ModelSamplingRate = typeof InputSampleRate[keyof typeof InputSampleRate] - + "32000": 32000, +} as const; +export type ModelSamplingRate = (typeof InputSampleRate)[keyof typeof InputSampleRate]; export const CrossFadeOverlapSize = { "1024": 1024, "2048": 2048, "4096": 4096, -} as const -export type CrossFadeOverlapSize = typeof CrossFadeOverlapSize[keyof typeof CrossFadeOverlapSize] +} as const; +export type CrossFadeOverlapSize = (typeof CrossFadeOverlapSize)[keyof typeof CrossFadeOverlapSize]; export const F0Detector = { - "dio": "dio", - "harvest": "harvest", - "crepe": "crepe", - "crepe_full": "crepe_full", - "crepe_tiny": "crepe_tiny", - "rmvpe": "rmvpe", - "rmvpe_onnx": "rmvpe_onnx", -} as const -export type F0Detector = typeof F0Detector[keyof typeof F0Detector] + dio: "dio", + harvest: "harvest", + crepe: "crepe", + crepe_full: "crepe_full", + crepe_tiny: "crepe_tiny", + rmvpe: "rmvpe", + rmvpe_onnx: "rmvpe_onnx", +} as const; +export type F0Detector = (typeof F0Detector)[keyof typeof F0Detector]; export const DiffMethod = { - "pndm": "pndm", + pndm: "pndm", "dpm-solver": "dpm-solver", -} as const -export type DiffMethod = typeof DiffMethod[keyof typeof DiffMethod] +} as const; +export type DiffMethod = (typeof DiffMethod)[keyof typeof DiffMethod]; export const RVCModelType = { - "pyTorchRVC": "pyTorchRVC", - "pyTorchRVCNono": "pyTorchRVCNono", - "pyTorchRVCv2": "pyTorchRVCv2", - "pyTorchRVCv2Nono": "pyTorchRVCv2Nono", - "pyTorchWebUI": "pyTorchWebUI", - "pyTorchWebUINono": "pyTorchWebUINono", - "onnxRVC": "onnxRVC", - "onnxRVCNono": "onnxRVCNono", -} as const -export type RVCModelType = typeof RVCModelType[keyof typeof RVCModelType] + pyTorchRVC: "pyTorchRVC", + pyTorchRVCNono: "pyTorchRVCNono", + pyTorchRVCv2: "pyTorchRVCv2", + pyTorchRVCv2Nono: "pyTorchRVCv2Nono", + pyTorchWebUI: "pyTorchWebUI", + pyTorchWebUINono: "pyTorchWebUINono", + onnxRVC: "onnxRVC", + onnxRVCNono: "onnxRVCNono", +} as const; +export type RVCModelType = (typeof RVCModelType)[keyof typeof RVCModelType]; export const ServerSettingKey = { - "passThrough":"passThrough", - "srcId": "srcId", - "dstId": "dstId", - "gpu": "gpu", + passThrough: "passThrough", + srcId: "srcId", + dstId: "dstId", + gpu: "gpu", - "crossFadeOffsetRate": "crossFadeOffsetRate", - "crossFadeEndRate": "crossFadeEndRate", - "crossFadeOverlapSize": "crossFadeOverlapSize", + crossFadeOffsetRate: "crossFadeOffsetRate", + crossFadeEndRate: "crossFadeEndRate", + crossFadeOverlapSize: "crossFadeOverlapSize", - "framework": "framework", - "onnxExecutionProvider": "onnxExecutionProvider", + framework: "framework", + onnxExecutionProvider: "onnxExecutionProvider", - "f0Factor": "f0Factor", - "f0Detector": "f0Detector", - "recordIO": "recordIO", + f0Factor: "f0Factor", + f0Detector: "f0Detector", + recordIO: "recordIO", - "enableServerAudio": "enableServerAudio", - "serverAudioStated": "serverAudioStated", - "serverAudioSampleRate": "serverAudioSampleRate", - "serverInputAudioSampleRate": "serverInputAudioSampleRate", - "serverOutputAudioSampleRate": "serverOutputAudioSampleRate", - "serverMonitorAudioSampleRate": "serverMonitorAudioSampleRate", - "serverInputAudioBufferSize": "serverInputAudioBufferSize", - "serverOutputAudioBufferSize": "serverOutputAudioBufferSize", - "serverInputDeviceId": "serverInputDeviceId", - "serverOutputDeviceId": "serverOutputDeviceId", - "serverMonitorDeviceId": "serverMonitorDeviceId", - "serverReadChunkSize": "serverReadChunkSize", - "serverInputAudioGain": "serverInputAudioGain", - "serverOutputAudioGain": "serverOutputAudioGain", - "serverMonitorAudioGain": "serverMonitorAudioGain", + enableServerAudio: "enableServerAudio", + serverAudioStated: "serverAudioStated", + serverAudioSampleRate: "serverAudioSampleRate", + serverInputAudioSampleRate: "serverInputAudioSampleRate", + serverOutputAudioSampleRate: "serverOutputAudioSampleRate", + serverMonitorAudioSampleRate: "serverMonitorAudioSampleRate", + serverInputAudioBufferSize: "serverInputAudioBufferSize", + serverOutputAudioBufferSize: "serverOutputAudioBufferSize", + serverInputDeviceId: "serverInputDeviceId", + serverOutputDeviceId: "serverOutputDeviceId", + serverMonitorDeviceId: "serverMonitorDeviceId", + serverReadChunkSize: "serverReadChunkSize", + serverInputAudioGain: "serverInputAudioGain", + serverOutputAudioGain: "serverOutputAudioGain", + serverMonitorAudioGain: "serverMonitorAudioGain", - "tran": "tran", - "noiseScale": "noiseScale", - "predictF0": "predictF0", - "silentThreshold": "silentThreshold", - "extraConvertSize": "extraConvertSize", - "clusterInferRatio": "clusterInferRatio", + tran: "tran", + noiseScale: "noiseScale", + predictF0: "predictF0", + silentThreshold: "silentThreshold", + extraConvertSize: "extraConvertSize", + clusterInferRatio: "clusterInferRatio", - "indexRatio": "indexRatio", - "protect": "protect", - "rvcQuality": "rvcQuality", - "modelSamplingRate": "modelSamplingRate", - "silenceFront": "silenceFront", - "modelSlotIndex": "modelSlotIndex", + indexRatio: "indexRatio", + protect: "protect", + rvcQuality: "rvcQuality", + modelSamplingRate: "modelSamplingRate", + silenceFront: "silenceFront", + modelSlotIndex: "modelSlotIndex", - "useEnhancer": "useEnhancer", - "useDiff": "useDiff", + useEnhancer: "useEnhancer", + useDiff: "useDiff", // "useDiffDpm": "useDiffDpm", - "diffMethod": "diffMethod", - "useDiffSilence": "useDiffSilence", - "diffAcc": "diffAcc", - "diffSpkId": "diffSpkId", - "kStep": "kStep", - "threshold": "threshold", + diffMethod: "diffMethod", + useDiffSilence: "useDiffSilence", + diffAcc: "diffAcc", + diffSpkId: "diffSpkId", + kStep: "kStep", + threshold: "threshold", - "speedUp": "speedUp", - "skipDiffusion": "skipDiffusion", - - "inputSampleRate": "inputSampleRate", - "enableDirectML": "enableDirectML", -} as const -export type ServerSettingKey = typeof ServerSettingKey[keyof typeof ServerSettingKey] + speedUp: "speedUp", + skipDiffusion: "skipDiffusion", + inputSampleRate: "inputSampleRate", + enableDirectML: "enableDirectML", +} as const; +export type ServerSettingKey = (typeof ServerSettingKey)[keyof typeof ServerSettingKey]; export type VoiceChangerServerSetting = { - passThrough: boolean - srcId: number, - dstId: number, - gpu: number, + passThrough: boolean; + srcId: number; + dstId: number; + gpu: number; - crossFadeOffsetRate: number, - crossFadeEndRate: number, - crossFadeOverlapSize: CrossFadeOverlapSize, + crossFadeOffsetRate: number; + crossFadeEndRate: number; + crossFadeOverlapSize: CrossFadeOverlapSize; - f0Factor: number - f0Detector: F0Detector // dio or harvest - recordIO: number // 0:off, 1:on + f0Factor: number; + f0Detector: F0Detector; // dio or harvest + recordIO: number; // 0:off, 1:on - enableServerAudio: number // 0:off, 1:on - serverAudioStated: number // 0:off, 1:on - serverAudioSampleRate: number - serverInputAudioSampleRate: number - serverOutputAudioSampleRate: number - serverMonitorAudioSampleRate: number - serverInputAudioBufferSize: number - serverOutputAudioBufferSize: number - serverInputDeviceId: number - serverOutputDeviceId: number - serverMonitorDeviceId: number - serverReadChunkSize: number - serverInputAudioGain: number - serverOutputAudioGain: number - serverMonitorAudioGain: number + enableServerAudio: number; // 0:off, 1:on + serverAudioStated: number; // 0:off, 1:on + serverAudioSampleRate: number; + serverInputAudioSampleRate: number; + serverOutputAudioSampleRate: number; + serverMonitorAudioSampleRate: number; + serverInputAudioBufferSize: number; + serverOutputAudioBufferSize: number; + serverInputDeviceId: number; + serverOutputDeviceId: number; + serverMonitorDeviceId: number; + serverReadChunkSize: number; + serverInputAudioGain: number; + serverOutputAudioGain: number; + serverMonitorAudioGain: number; + tran: number; // so-vits-svc + noiseScale: number; // so-vits-svc + predictF0: number; // so-vits-svc + silentThreshold: number; // so-vits-svc + extraConvertSize: number; // so-vits-svc + clusterInferRatio: number; // so-vits-svc - tran: number // so-vits-svc - noiseScale: number // so-vits-svc - predictF0: number // so-vits-svc - silentThreshold: number // so-vits-svc - extraConvertSize: number// so-vits-svc - clusterInferRatio: number // so-vits-svc + indexRatio: number; // RVC + protect: number; // RVC + rvcQuality: number; // 0:low, 1:high + silenceFront: number; // 0:off, 1:on + modelSamplingRate: ModelSamplingRate; // 32000,40000,48000 + modelSlotIndex: number; - indexRatio: number // RVC - protect: number // RVC - rvcQuality: number // 0:low, 1:high - silenceFront: number // 0:off, 1:on - modelSamplingRate: ModelSamplingRate // 32000,40000,48000 - modelSlotIndex: number, - - useEnhancer: number// DDSP-SVC - useDiff: number// DDSP-SVC + useEnhancer: number; // DDSP-SVC + useDiff: number; // DDSP-SVC // useDiffDpm: number// DDSP-SVC - diffMethod: DiffMethod, // DDSP-SVC - useDiffSilence: number// DDSP-SVC - diffAcc: number// DDSP-SVC - diffSpkId: number// DDSP-SVC - kStep: number// DDSP-SVC - threshold: number// DDSP-SVC + diffMethod: DiffMethod; // DDSP-SVC + useDiffSilence: number; // DDSP-SVC + diffAcc: number; // DDSP-SVC + diffSpkId: number; // DDSP-SVC + kStep: number; // DDSP-SVC + threshold: number; // DDSP-SVC - speedUp: number // Diffusion-SVC - skipDiffusion: number // Diffusion-SVC 0:off, 1:on + speedUp: number; // Diffusion-SVC + skipDiffusion: number; // Diffusion-SVC 0:off, 1:on - inputSampleRate: InputSampleRate - enableDirectML: number -} + inputSampleRate: InputSampleRate; + enableDirectML: number; +}; type ModelSlot = { - slotIndex: number - voiceChangerType: VoiceChangerType - name: string, - description: string, - credit: string, - termsOfUseUrl: string, - iconFile: string - speakers: { [key: number]: string } -} + slotIndex: number; + voiceChangerType: VoiceChangerType; + name: string; + description: string; + credit: string; + termsOfUseUrl: string; + iconFile: string; + speakers: { [key: number]: string }; +}; export type RVCModelSlot = ModelSlot & { - modelFile: string - indexFile: string, - defaultIndexRatio: number, - defaultProtect: number, - defaultTune: number, - modelType: RVCModelType, + modelFile: string; + indexFile: string; + defaultIndexRatio: number; + defaultProtect: number; + defaultTune: number; + modelType: RVCModelType; - embChannels: number, - f0: boolean, - samplingRate: number - deprecated: boolean -} + embChannels: number; + f0: boolean; + samplingRate: number; + deprecated: boolean; +}; export type MMVCv13ModelSlot = ModelSlot & { - modelFile: string - configFile: string, - srcId: number - dstId: number + modelFile: string; + configFile: string; + srcId: number; + dstId: number; - samplingRate: number - speakers: { [key: number]: string } -} + samplingRate: number; + speakers: { [key: number]: string }; +}; export type MMVCv15ModelSlot = ModelSlot & { - modelFile: string - configFile: string, - srcId: number - dstId: number - f0Factor: number - samplingRate: number - f0: { [key: number]: number } -} + modelFile: string; + configFile: string; + srcId: number; + dstId: number; + f0Factor: number; + samplingRate: number; + f0: { [key: number]: number }; +}; export type SoVitsSvc40ModelSlot = ModelSlot & { - modelFile: string - configFile: string, - clusterFile: string, - dstId: number + modelFile: string; + configFile: string; + clusterFile: string; + dstId: number; - samplingRate: number + samplingRate: number; - defaultTune: number - defaultClusterInferRatio: number - noiseScale: number - speakers: { [key: number]: string } -} + defaultTune: number; + defaultClusterInferRatio: number; + noiseScale: number; + speakers: { [key: number]: string }; +}; export type DDSPSVCModelSlot = ModelSlot & { - modelFile: string - configFile: string, - diffModelFile: string - diffConfigFile: string - dstId: number + modelFile: string; + configFile: string; + diffModelFile: string; + diffConfigFile: string; + dstId: number; - samplingRate: number - - defaultTune: number - enhancer: boolean - diffusion: boolean - acc: number - kstep: number - speakers: { [key: number]: string } -} + samplingRate: number; + defaultTune: number; + enhancer: boolean; + diffusion: boolean; + acc: number; + kstep: number; + speakers: { [key: number]: string }; +}; export type DiffusionSVCModelSlot = ModelSlot & { - modelFile: string - dstId: number + modelFile: string; + dstId: number; - samplingRate: number - - defaultTune: number - defaultKstep : number - defaultSpeedup: number - kStepMax: number - nLayers: number - nnLayers: number - speakers: { [key: number]: string } -} + samplingRate: number; + defaultTune: number; + defaultKstep: number; + defaultSpeedup: number; + kStepMax: number; + nLayers: number; + nnLayers: number; + speakers: { [key: number]: string }; +}; export type BeatriceModelSlot = ModelSlot & { - modelFile: string - dstId: number + modelFile: string; + dstId: number; - speakers: { [key: number]: string } -} + speakers: { [key: number]: string }; +}; -export type ModelSlotUnion = RVCModelSlot | MMVCv13ModelSlot | MMVCv15ModelSlot | SoVitsSvc40ModelSlot | DDSPSVCModelSlot | DiffusionSVCModelSlot | BeatriceModelSlot +export type ModelSlotUnion = RVCModelSlot | MMVCv13ModelSlot | MMVCv15ModelSlot | SoVitsSvc40ModelSlot | DDSPSVCModelSlot | DiffusionSVCModelSlot | BeatriceModelSlot; type ServerAudioDevice = { - kind: "audioinput" | "audiooutput", - index: number, - name: string - hostAPI: string -} + kind: "audioinput" | "audiooutput"; + index: number; + name: string; + hostAPI: string; +}; export type ServerInfo = VoiceChangerServerSetting & { // コンフィグ対象外 (getInfoで取得のみ可能な情報) - status: string - modelSlots: ModelSlotUnion[] - serverAudioInputDevices: ServerAudioDevice[] - serverAudioOutputDevices: ServerAudioDevice[] - sampleModels: (RVCSampleModel|DiffusionSVCSampleModel)[] + status: string; + modelSlots: ModelSlotUnion[]; + serverAudioInputDevices: ServerAudioDevice[]; + serverAudioOutputDevices: ServerAudioDevice[]; + sampleModels: (RVCSampleModel | DiffusionSVCSampleModel)[]; gpus: { - id: number, - name: string, - memory: number, - }[] - maxInputLength: number // MMVCv15 + id: number; + name: string; + memory: number; + }[]; + maxInputLength: number; // MMVCv15 voiceChangerParams: { - model_dir: string - } -} + model_dir: string; + }; +}; export type SampleModel = { - id: string - voiceChangerType: VoiceChangerType - lang: string - tag: string[] - name: string - modelUrl: string - termsOfUseUrl: string - icon: string - credit: string - description: string - sampleRate: number - modelType: string - f0: boolean -} - -export type RVCSampleModel =SampleModel & { - indexUrl: string - featureUrl: string -} - - -export type DiffusionSVCSampleModel =SampleModel & { - numOfDiffLayers: number - numOfNativeLayers: number - maxKStep: number -} - + id: string; + voiceChangerType: VoiceChangerType; + lang: string; + tag: string[]; + name: string; + modelUrl: string; + termsOfUseUrl: string; + icon: string; + credit: string; + description: string; + sampleRate: number; + modelType: string; + f0: boolean; +}; +export type RVCSampleModel = SampleModel & { + indexUrl: string; + featureUrl: string; +}; +export type DiffusionSVCSampleModel = SampleModel & { + numOfDiffLayers: number; + numOfNativeLayers: number; + maxKStep: number; +}; export const DefaultServerSetting: ServerInfo = { - // VC Common + // VC Common passThrough: false, inputSampleRate: 48000, @@ -388,7 +377,6 @@ export const DefaultServerSetting: ServerInfo = { dstId: 1, gpu: 0, - f0Factor: 1.0, f0Detector: F0Detector.rmvpe_onnx, @@ -421,101 +409,99 @@ export const DefaultServerSetting: ServerInfo = { skipDiffusion: 1, enableDirectML: 0, - // + // status: "ok", modelSlots: [], serverAudioInputDevices: [], serverAudioOutputDevices: [], - maxInputLength: 128 * 2048, + maxInputLength: 128 * 2048, voiceChangerParams: { - model_dir: "" - } -} + model_dir: "", + }, +}; /////////////////////// // Workletセッティング /////////////////////// export type WorkletSetting = { - numTrancateTreshold: number, - volTrancateThreshold: number, - volTrancateLength: number -} + numTrancateTreshold: number; + volTrancateThreshold: number; + volTrancateLength: number; +}; /////////////////////// // Worklet Nodeセッティング /////////////////////// export const Protocol = { - "sio": "sio", - "rest": "rest", -} as const -export type Protocol = typeof Protocol[keyof typeof Protocol] + sio: "sio", + rest: "rest", +} as const; +export type Protocol = (typeof Protocol)[keyof typeof Protocol]; export const SendingSampleRate = { "48000": 48000, "44100": 44100, - "24000": 24000 -} as const -export type SendingSampleRate = typeof SendingSampleRate[keyof typeof SendingSampleRate] + "24000": 24000, +} as const; +export type SendingSampleRate = (typeof SendingSampleRate)[keyof typeof SendingSampleRate]; export const DownSamplingMode = { - "decimate": "decimate", - "average": "average" -} as const -export type DownSamplingMode = typeof DownSamplingMode[keyof typeof DownSamplingMode] - + decimate: "decimate", + average: "average", +} as const; +export type DownSamplingMode = (typeof DownSamplingMode)[keyof typeof DownSamplingMode]; export type WorkletNodeSetting = { - serverUrl: string, - protocol: Protocol, - sendingSampleRate: SendingSampleRate, - inputChunkNum: number, - downSamplingMode: DownSamplingMode, -} - + serverUrl: string; + protocol: Protocol; + sendingSampleRate: SendingSampleRate; + inputChunkNum: number; + downSamplingMode: DownSamplingMode; +}; /////////////////////// // クライアントセッティング /////////////////////// export const SampleRate = { "48000": 48000, -} as const -export type SampleRate = typeof SampleRate[keyof typeof SampleRate] +} as const; +export type SampleRate = (typeof SampleRate)[keyof typeof SampleRate]; export type VoiceChangerClientSetting = { - audioInput: string | MediaStream | null, - sampleRate: SampleRate, // 48000Hz - echoCancel: boolean, - noiseSuppression: boolean, - noiseSuppression2: boolean + audioInput: string | MediaStream | null; + sampleRate: SampleRate; // 48000Hz + echoCancel: boolean; + noiseSuppression: boolean; + noiseSuppression2: boolean; - inputGain: number - outputGain: number - monitorGain: number + inputGain: number; + outputGain: number; + monitorGain: number; - passThroughConfirmationSkip: boolean -} + passThroughConfirmationSkip: boolean; +}; /////////////////////// // Client セッティング /////////////////////// export type ClientSetting = { - workletSetting: WorkletSetting - workletNodeSetting: WorkletNodeSetting - voiceChangerClientSetting: VoiceChangerClientSetting -} + workletSetting: WorkletSetting; + workletNodeSetting: WorkletNodeSetting; + voiceChangerClientSetting: VoiceChangerClientSetting; +}; export const DefaultClientSettng: ClientSetting = { workletSetting: { numTrancateTreshold: 100, volTrancateThreshold: 0.0005, - volTrancateLength: 32 + volTrancateLength: 32, }, workletNodeSetting: { serverUrl: "", protocol: "sio", sendingSampleRate: 48000, inputChunkNum: 48, - downSamplingMode: "average" + downSamplingMode: "average", }, voiceChangerClientSetting: { audioInput: null, @@ -526,10 +512,9 @@ export const DefaultClientSettng: ClientSetting = { inputGain: 1.0, outputGain: 1.0, monitorGain: 1.0, - passThroughConfirmationSkip: false - } -} - + passThroughConfirmationSkip: false, + }, +}; //////////////////////////////////// // Exceptions @@ -538,36 +523,33 @@ export const VOICE_CHANGER_CLIENT_EXCEPTION = { ERR_SIO_CONNECT_FAILED: "ERR_SIO_CONNECT_FAILED", ERR_SIO_INVALID_RESPONSE: "ERR_SIO_INVALID_RESPONSE", ERR_REST_INVALID_RESPONSE: "ERR_REST_INVALID_RESPONSE", - ERR_MIC_STREAM_NOT_INITIALIZED: "ERR_MIC_STREAM_NOT_INITIALIZED" - -} as const -export type VOICE_CHANGER_CLIENT_EXCEPTION = typeof VOICE_CHANGER_CLIENT_EXCEPTION[keyof typeof VOICE_CHANGER_CLIENT_EXCEPTION] - + ERR_MIC_STREAM_NOT_INITIALIZED: "ERR_MIC_STREAM_NOT_INITIALIZED", +} as const; +export type VOICE_CHANGER_CLIENT_EXCEPTION = (typeof VOICE_CHANGER_CLIENT_EXCEPTION)[keyof typeof VOICE_CHANGER_CLIENT_EXCEPTION]; //////////////////////////////////// // indexedDB //////////////////////////////////// -export const INDEXEDDB_DB_APP_NAME = "INDEXEDDB_KEY_VOICE_CHANGER" -export const INDEXEDDB_DB_NAME = "INDEXEDDB_KEY_VOICE_CHANGER_DB" -export const INDEXEDDB_KEY_CLIENT = "INDEXEDDB_KEY_VOICE_CHANGER_LIB_CLIENT" -export const INDEXEDDB_KEY_SERVER = "INDEXEDDB_KEY_VOICE_CHANGER_LIB_SERVER" -export const INDEXEDDB_KEY_MODEL_DATA = "INDEXEDDB_KEY_VOICE_CHANGER_LIB_MODEL_DATA" - +export const INDEXEDDB_DB_APP_NAME = "INDEXEDDB_KEY_VOICE_CHANGER"; +export const INDEXEDDB_DB_NAME = "INDEXEDDB_KEY_VOICE_CHANGER_DB"; +export const INDEXEDDB_KEY_CLIENT = "INDEXEDDB_KEY_VOICE_CHANGER_LIB_CLIENT"; +export const INDEXEDDB_KEY_SERVER = "INDEXEDDB_KEY_VOICE_CHANGER_LIB_SERVER"; +export const INDEXEDDB_KEY_MODEL_DATA = "INDEXEDDB_KEY_VOICE_CHANGER_LIB_MODEL_DATA"; // ONNX export type OnnxExporterInfo = { - "status": string - "path": string - "filename": string -} + status: string; + path: string; + filename: string; +}; // Merge export type MergeElement = { - slotIndex: number - strength: number -} + slotIndex: number; + strength: number; +}; export type MergeModelRequest = { - voiceChangerType: VoiceChangerType - command: "mix", - files: MergeElement[] -} + voiceChangerType: VoiceChangerType; + command: "mix"; + files: MergeElement[]; +}; diff --git a/client/lib/src/exceptions.ts b/client/lib/src/exceptions.ts index acda23ca..cf532837 100644 --- a/client/lib/src/exceptions.ts +++ b/client/lib/src/exceptions.ts @@ -1,5 +1,5 @@ export class ModelLoadException extends Error { - public causeFileType: string = "" + public causeFileType: string = ""; constructor(causeFileType: string) { super(`Model Load Exception:${causeFileType}`); this.causeFileType = causeFileType; diff --git a/client/lib/src/hooks/useClient.ts b/client/lib/src/hooks/useClient.ts index 1a99ff30..93093347 100644 --- a/client/lib/src/hooks/useClient.ts +++ b/client/lib/src/hooks/useClient.ts @@ -1,269 +1,258 @@ -import { useEffect, useMemo, useRef, useState } from "react" -import { VoiceChangerClient } from "../VoiceChangerClient" -import { useClientSetting } from "./useClientSetting" -import { IndexedDBStateAndMethod, useIndexedDB } from "./useIndexedDB" -import { ServerSettingState, useServerSetting } from "./useServerSetting" -import { useWorkletNodeSetting } from "./useWorkletNodeSetting" -import { useWorkletSetting } from "./useWorkletSetting" -import { ClientSetting, DefaultClientSettng, VoiceChangerClientSetting, WorkletNodeSetting, WorkletSetting } from "../const" +import { useEffect, useMemo, useRef, useState } from "react"; +import { VoiceChangerClient } from "../VoiceChangerClient"; +import { useClientSetting } from "./useClientSetting"; +import { IndexedDBStateAndMethod, useIndexedDB } from "./useIndexedDB"; +import { ServerSettingState, useServerSetting } from "./useServerSetting"; +import { useWorkletNodeSetting } from "./useWorkletNodeSetting"; +import { useWorkletSetting } from "./useWorkletSetting"; +import { ClientSetting, DefaultClientSettng, VoiceChangerClientSetting, WorkletNodeSetting, WorkletSetting } from "../const"; export type UseClientProps = { - audioContext: AudioContext | null -} + audioContext: AudioContext | null; +}; export type ClientState = { - initialized: boolean - setting: ClientSetting, + initialized: boolean; + setting: ClientSetting; // 各種設定I/Fへの参照 - setVoiceChangerClientSetting: (_voiceChangerClientSetting: VoiceChangerClientSetting) => void + setVoiceChangerClientSetting: (_voiceChangerClientSetting: VoiceChangerClientSetting) => void; setServerUrl: (url: string) => void; - start: () => Promise - stop: () => Promise - reloadClientSetting: () => Promise + start: () => Promise; + stop: () => Promise; + reloadClientSetting: () => Promise; - setWorkletNodeSetting: (_workletNodeSetting: WorkletNodeSetting) => void - startOutputRecording: () => void - stopOutputRecording: () => Promise - trancateBuffer: () => Promise + setWorkletNodeSetting: (_workletNodeSetting: WorkletNodeSetting) => void; + startOutputRecording: () => void; + stopOutputRecording: () => Promise; + trancateBuffer: () => Promise; - setWorkletSetting: (_workletSetting: WorkletSetting) => void + setWorkletSetting: (_workletSetting: WorkletSetting) => void; // workletSetting: WorkletSetting // workletSetting: WorkletSettingState // clientSetting: ClientSettingState // workletNodeSetting: WorkletNodeSettingState - serverSetting: ServerSettingState - indexedDBState: IndexedDBStateAndMethod + serverSetting: ServerSettingState; + indexedDBState: IndexedDBStateAndMethod; // モニタリングデータ bufferingTime: number; volume: number; - performance: PerformanceData - updatePerformance: (() => Promise) | null + performance: PerformanceData; + updatePerformance: (() => Promise) | null; // setClientType: (val: ClientType) => void // 情報取得 - getInfo: () => Promise + getInfo: () => Promise; // 設定クリア - clearSetting: () => Promise + clearSetting: () => Promise; // AudioOutputElement 設定 - setAudioOutputElementId: (elemId: string) => void - setAudioMonitorElementId: (elemId: string) => void + setAudioOutputElementId: (elemId: string) => void; + setAudioMonitorElementId: (elemId: string) => void; - ioErrorCount: number - resetIoErrorCount: () => void -} + ioErrorCount: number; + resetIoErrorCount: () => void; +}; export type PerformanceData = { - responseTime: number - preprocessTime: number - mainprocessTime: number - postprocessTime: number -} + responseTime: number; + preprocessTime: number; + mainprocessTime: number; + postprocessTime: number; +}; const InitialPerformanceData: PerformanceData = { responseTime: 0, preprocessTime: 0, mainprocessTime: 0, - postprocessTime: 0 -} + postprocessTime: 0, +}; export const useClient = (props: UseClientProps): ClientState => { - - const [initialized, setInitialized] = useState(false) - const [setting, setSetting] = useState(DefaultClientSettng) - // (1-1) クライアント - const voiceChangerClientRef = useRef(null) - const [voiceChangerClient, setVoiceChangerClient] = useState(voiceChangerClientRef.current) + const [initialized, setInitialized] = useState(false); + const [setting, setSetting] = useState(DefaultClientSettng); + // (1-1) クライアント + const voiceChangerClientRef = useRef(null); + const [voiceChangerClient, setVoiceChangerClient] = useState(voiceChangerClientRef.current); //// クライアント初期化待ち用フラグ - const initializedResolveRef = useRef<(value: void | PromiseLike) => void>() + const initializedResolveRef = useRef<(value: void | PromiseLike) => void>(); const initializedPromise = useMemo(() => { return new Promise((resolve) => { - initializedResolveRef.current = resolve - }) - }, []) + initializedResolveRef.current = resolve; + }); + }, []); // (1-2) 各種設定I/F - const voiceChangerClientSetting = useClientSetting({ voiceChangerClient, voiceChangerClientSetting: setting.voiceChangerClientSetting }) - const workletNodeSetting = useWorkletNodeSetting({ voiceChangerClient: voiceChangerClient, workletNodeSetting: setting.workletNodeSetting }) - useWorkletSetting({ voiceChangerClient, workletSetting: setting.workletSetting }) - const serverSetting = useServerSetting({ voiceChangerClient }) - const indexedDBState = useIndexedDB({ clientType: null }) - + const voiceChangerClientSetting = useClientSetting({ voiceChangerClient, voiceChangerClientSetting: setting.voiceChangerClientSetting }); + const workletNodeSetting = useWorkletNodeSetting({ voiceChangerClient: voiceChangerClient, workletNodeSetting: setting.workletNodeSetting }); + useWorkletSetting({ voiceChangerClient, workletSetting: setting.workletSetting }); + const serverSetting = useServerSetting({ voiceChangerClient }); + const indexedDBState = useIndexedDB({ clientType: null }); // (1-3) モニタリングデータ - const [bufferingTime, setBufferingTime] = useState(0) - const [performance, setPerformance] = useState(InitialPerformanceData) - const [volume, setVolume] = useState(0) - const [ioErrorCount, setIoErrorCount] = useState(0) + const [bufferingTime, setBufferingTime] = useState(0); + const [performance, setPerformance] = useState(InitialPerformanceData); + const [volume, setVolume] = useState(0); + const [ioErrorCount, setIoErrorCount] = useState(0); //// Server Audio Deviceを使うとき、モニタリングデータはpolling const updatePerformance = useMemo(() => { if (!voiceChangerClientRef.current) { - return null + return null; } return async () => { if (voiceChangerClientRef.current) { - const performance = await voiceChangerClientRef.current!.getPerformance() - const responseTime = performance[0] - const preprocessTime = performance[1] - const mainprocessTime = performance[2] - const postprocessTime = performance[3] - setPerformance({ responseTime, preprocessTime, mainprocessTime, postprocessTime }) + const performance = await voiceChangerClientRef.current!.getPerformance(); + const responseTime = performance[0]; + const preprocessTime = performance[1]; + const mainprocessTime = performance[2]; + const postprocessTime = performance[3]; + setPerformance({ responseTime, preprocessTime, mainprocessTime, postprocessTime }); } else { - const responseTime = 0 - const preprocessTime = 0 - const mainprocessTime = 0 - const postprocessTime = 0 - setPerformance({ responseTime, preprocessTime, mainprocessTime, postprocessTime }) + const responseTime = 0; + const preprocessTime = 0; + const mainprocessTime = 0; + const postprocessTime = 0; + setPerformance({ responseTime, preprocessTime, mainprocessTime, postprocessTime }); } - } - }, [voiceChangerClientRef.current]) - - + }; + }, [voiceChangerClientRef.current]); // (1-4) エラーステータス - const ioErrorCountRef = useRef(0) + const ioErrorCountRef = useRef(0); const resetIoErrorCount = () => { - ioErrorCountRef.current = 0 - setIoErrorCount(ioErrorCountRef.current) - } + ioErrorCountRef.current = 0; + setIoErrorCount(ioErrorCountRef.current); + }; // 設定データ管理 - const { setItem, getItem } = useIndexedDB({ clientType: null }) + const { setItem, getItem } = useIndexedDB({ clientType: null }); // 設定データの更新と保存 const _setSetting = (_setting: ClientSetting) => { - const storeData = { ..._setting } - storeData.voiceChangerClientSetting = { ...storeData.voiceChangerClientSetting } + const storeData = { ..._setting }; + storeData.voiceChangerClientSetting = { ...storeData.voiceChangerClientSetting }; if (typeof storeData.voiceChangerClientSetting.audioInput != "string") { - storeData.voiceChangerClientSetting.audioInput = "none" + storeData.voiceChangerClientSetting.audioInput = "none"; } - setItem("clientSetting", storeData) + setItem("clientSetting", storeData); - setSetting(_setting) - } + setSetting(_setting); + }; // 設定データ初期化 useEffect(() => { if (!voiceChangerClient) { - return + return; } const loadCache = async () => { - const _setting = await getItem("clientSetting") as ClientSetting + const _setting = (await getItem("clientSetting")) as ClientSetting; if (_setting) { - setSetting(_setting) - serverSetting.reloadServerInfo() - + setSetting(_setting); + serverSetting.reloadServerInfo(); } - } - loadCache() - }, [voiceChangerClient]) - - - - + }; + loadCache(); + }, [voiceChangerClient]); // (2-1) クライアント初期化処理 useEffect(() => { const initialized = async () => { if (!props.audioContext) { - return + return; } const voiceChangerClient = new VoiceChangerClient(props.audioContext, true, { notifySendBufferingTime: (val: number) => { - setBufferingTime(val) + setBufferingTime(val); }, notifyResponseTime: (val: number, perf?: number[]) => { - const responseTime = val - const preprocessTime = perf ? Math.ceil(perf[0] * 1000) : 0 - const mainprocessTime = perf ? Math.ceil(perf[1] * 1000) : 0 - const postprocessTime = perf ? Math.ceil(perf[2] * 1000) : 0 - setPerformance({ responseTime, preprocessTime, mainprocessTime, postprocessTime }) + const responseTime = val; + const preprocessTime = perf ? Math.ceil(perf[0] * 1000) : 0; + const mainprocessTime = perf ? Math.ceil(perf[1] * 1000) : 0; + const postprocessTime = perf ? Math.ceil(perf[2] * 1000) : 0; + setPerformance({ responseTime, preprocessTime, mainprocessTime, postprocessTime }); }, notifyException: (mes: string) => { if (mes.length > 0) { - console.log(`error:${mes}`) - ioErrorCountRef.current += 1 - setIoErrorCount(ioErrorCountRef.current) + console.log(`error:${mes}`); + ioErrorCountRef.current += 1; + setIoErrorCount(ioErrorCountRef.current); } }, notifyVolume: (vol: number) => { - setVolume(vol) - } - }) + setVolume(vol); + }, + }); - await voiceChangerClient.isInitialized() - voiceChangerClientRef.current = voiceChangerClient - setVoiceChangerClient(voiceChangerClientRef.current) - console.log("[useClient] client initialized") + await voiceChangerClient.isInitialized(); + voiceChangerClientRef.current = voiceChangerClient; + setVoiceChangerClient(voiceChangerClientRef.current); + console.log("[useClient] client initialized"); // const audio = document.getElementById(props.audioOutputElementId) as HTMLAudioElement // audio.srcObject = voiceChangerClientRef.current.stream // audio.play() - initializedResolveRef.current!() - setInitialized(true) - } - initialized() - }, [props.audioContext]) + initializedResolveRef.current!(); + setInitialized(true); + }; + initialized(); + }, [props.audioContext]); const setAudioOutputElementId = (elemId: string) => { if (!voiceChangerClientRef.current) { - console.warn("[voiceChangerClient] is not ready for set audio output.") - return + console.warn("[voiceChangerClient] is not ready for set audio output."); + return; } - const audio = document.getElementById(elemId) as HTMLAudioElement + const audio = document.getElementById(elemId) as HTMLAudioElement; if (audio.paused) { - audio.srcObject = voiceChangerClientRef.current.stream - audio.play() + audio.srcObject = voiceChangerClientRef.current.stream; + audio.play(); } - } + }; const setAudioMonitorElementId = (elemId: string) => { if (!voiceChangerClientRef.current) { - console.warn("[voiceChangerClient] is not ready for set audio output.") - return + console.warn("[voiceChangerClient] is not ready for set audio output."); + return; } - const audio = document.getElementById(elemId) as HTMLAudioElement + const audio = document.getElementById(elemId) as HTMLAudioElement; if (audio.paused) { - audio.srcObject = voiceChangerClientRef.current.monitorStream - audio.play() + audio.srcObject = voiceChangerClientRef.current.monitorStream; + audio.play(); } - } + }; // (2-2) 情報リロード const getInfo = useMemo(() => { return async () => { - await initializedPromise - await voiceChangerClientSetting.reloadClientSetting() // 実質的な処理の意味はない - await serverSetting.reloadServerInfo() - } - }, [voiceChangerClientSetting.reloadClientSetting, serverSetting.reloadServerInfo]) - + await initializedPromise; + await voiceChangerClientSetting.reloadClientSetting(); // 実質的な処理の意味はない + await serverSetting.reloadServerInfo(); + }; + }, [voiceChangerClientSetting.reloadClientSetting, serverSetting.reloadServerInfo]); const clearSetting = async () => { // TBD - } + }; // 設定変更 const setVoiceChangerClientSetting = (_voiceChangerClientSetting: VoiceChangerClientSetting) => { - setting.voiceChangerClientSetting = _voiceChangerClientSetting - console.log("setting.voiceChangerClientSetting", setting.voiceChangerClientSetting) + setting.voiceChangerClientSetting = _voiceChangerClientSetting; + console.log("setting.voiceChangerClientSetting", setting.voiceChangerClientSetting); // workletSettingIF.setSetting(_workletSetting) - _setSetting({ ...setting }) - } - + _setSetting({ ...setting }); + }; const setWorkletNodeSetting = (_workletNodeSetting: WorkletNodeSetting) => { - setting.workletNodeSetting = _workletNodeSetting - console.log("setting.workletNodeSetting", setting.workletNodeSetting) + setting.workletNodeSetting = _workletNodeSetting; + console.log("setting.workletNodeSetting", setting.workletNodeSetting); // workletSettingIF.setSetting(_workletSetting) - _setSetting({ ...setting }) - } + _setSetting({ ...setting }); + }; const setWorkletSetting = (_workletSetting: WorkletSetting) => { - setting.workletSetting = _workletSetting - console.log("setting.workletSetting", setting.workletSetting) + setting.workletSetting = _workletSetting; + console.log("setting.workletSetting", setting.workletSetting); // workletSettingIF.setSetting(_workletSetting) - _setSetting({ ...setting }) - } + _setSetting({ ...setting }); + }; return { initialized, @@ -302,6 +291,6 @@ export const useClient = (props: UseClientProps): ClientState => { setAudioMonitorElementId, ioErrorCount, - resetIoErrorCount - } -} \ No newline at end of file + resetIoErrorCount, + }; +}; diff --git a/client/lib/src/hooks/useClientSetting.ts b/client/lib/src/hooks/useClientSetting.ts index ffb5c1b5..80c42fc7 100644 --- a/client/lib/src/hooks/useClientSetting.ts +++ b/client/lib/src/hooks/useClientSetting.ts @@ -1,49 +1,46 @@ -import { useState, useMemo, useEffect } from "react" +import { useState, useMemo, useEffect } from "react"; -import { VoiceChangerClientSetting } from "../const" -import { VoiceChangerClient } from "../VoiceChangerClient" +import { VoiceChangerClientSetting } from "../const"; +import { VoiceChangerClient } from "../VoiceChangerClient"; export type UseClientSettingProps = { - voiceChangerClient: VoiceChangerClient | null - voiceChangerClientSetting: VoiceChangerClientSetting -} + voiceChangerClient: VoiceChangerClient | null; + voiceChangerClientSetting: VoiceChangerClientSetting; +}; export type ClientSettingState = { - setServerUrl: (url: string) => void; - start: () => Promise - stop: () => Promise - reloadClientSetting: () => Promise -} + start: () => Promise; + stop: () => Promise; + reloadClientSetting: () => Promise; +}; export const useClientSetting = (props: UseClientSettingProps): ClientSettingState => { // 更新比較用 - const [voiceChangerClientSetting, setVoiceChangerClientSetting] = useState(props.voiceChangerClientSetting) + const [voiceChangerClientSetting, setVoiceChangerClientSetting] = useState(props.voiceChangerClientSetting); useEffect(() => { const update = async () => { - if (!props.voiceChangerClient) return + if (!props.voiceChangerClient) return; for (let k in props.voiceChangerClientSetting) { - const cur_v = voiceChangerClientSetting[k as keyof VoiceChangerClientSetting] - const new_v = props.voiceChangerClientSetting[k as keyof VoiceChangerClientSetting] + const cur_v = voiceChangerClientSetting[k as keyof VoiceChangerClientSetting]; + const new_v = props.voiceChangerClientSetting[k as keyof VoiceChangerClientSetting]; if (cur_v != new_v) { - setVoiceChangerClientSetting(props.voiceChangerClientSetting) - await props.voiceChangerClient.updateClientSetting(props.voiceChangerClientSetting) - break + setVoiceChangerClientSetting(props.voiceChangerClientSetting); + await props.voiceChangerClient.updateClientSetting(props.voiceChangerClientSetting); + break; } } - } - update() - }, [props.voiceChangerClient, props.voiceChangerClientSetting]) - + }; + update(); + }, [props.voiceChangerClient, props.voiceChangerClientSetting]); const setServerUrl = useMemo(() => { return (url: string) => { - if (!props.voiceChangerClient) return - props.voiceChangerClient.setServerUrl(url, true) - } - }, [props.voiceChangerClient]) - + if (!props.voiceChangerClient) return; + props.voiceChangerClient.setServerUrl(url, true); + }; + }, [props.voiceChangerClient]); ////////////// // 操作 @@ -51,29 +48,29 @@ export const useClientSetting = (props: UseClientSettingProps): ClientSettingSta // (1) start const start = useMemo(() => { return async () => { - if (!props.voiceChangerClient) return - await props.voiceChangerClient.start() - } - }, [props.voiceChangerClient]) + if (!props.voiceChangerClient) return; + await props.voiceChangerClient.start(); + }; + }, [props.voiceChangerClient]); // (2) stop const stop = useMemo(() => { return async () => { - if (!props.voiceChangerClient) return - await props.voiceChangerClient.stop() - } - }, [props.voiceChangerClient]) + if (!props.voiceChangerClient) return; + await props.voiceChangerClient.stop(); + }; + }, [props.voiceChangerClient]); const reloadClientSetting = useMemo(() => { return async () => { - if (!props.voiceChangerClient) return - await props.voiceChangerClient.getClientSettings() - } - }, [props.voiceChangerClient]) + if (!props.voiceChangerClient) return; + await props.voiceChangerClient.getClientSettings(); + }; + }, [props.voiceChangerClient]); return { setServerUrl, start, stop, - reloadClientSetting - } -} \ No newline at end of file + reloadClientSetting, + }; +}; diff --git a/client/lib/src/hooks/useIndexedDB.ts b/client/lib/src/hooks/useIndexedDB.ts index 8977f3db..bca35123 100644 --- a/client/lib/src/hooks/useIndexedDB.ts +++ b/client/lib/src/hooks/useIndexedDB.ts @@ -3,67 +3,65 @@ import { useMemo } from "react"; import { INDEXEDDB_DB_APP_NAME, INDEXEDDB_DB_NAME } from "../const"; export type UseIndexedDBProps = { - clientType: null -} + clientType: null; +}; export type IndexedDBState = { - dummy: string -} + dummy: string; +}; export type IndexedDBStateAndMethod = IndexedDBState & { - setItem: (key: string, value: unknown) => Promise, - getItem: (key: string) => Promise - removeItem: (key: string) => Promise - removeDB: () => Promise -} + setItem: (key: string, value: unknown) => Promise; + getItem: (key: string) => Promise; + removeItem: (key: string) => Promise; + removeDB: () => Promise; +}; export const useIndexedDB = (props: UseIndexedDBProps): IndexedDBStateAndMethod => { - const clientType = props.clientType || "default" + const clientType = props.clientType || "default"; localForage.config({ driver: localForage.INDEXEDDB, name: INDEXEDDB_DB_APP_NAME, version: 1.0, storeName: `${INDEXEDDB_DB_NAME}`, - description: 'appStorage' - - }) + description: "appStorage", + }); const setItem = useMemo(() => { return async (key: string, value: unknown) => { - const clientKey = `${clientType}_${key}` - await localForage.setItem(clientKey, value) - } - }, [props.clientType]) + const clientKey = `${clientType}_${key}`; + await localForage.setItem(clientKey, value); + }; + }, [props.clientType]); const getItem = useMemo(() => { return async (key: string) => { - const clientKey = `${clientType}_${key}` - return await localForage.getItem(clientKey) - } - }, [props.clientType]) + const clientKey = `${clientType}_${key}`; + return await localForage.getItem(clientKey); + }; + }, [props.clientType]); const removeItem = useMemo(() => { return async (key: string) => { - const clientKey = `${clientType}_${key}` - console.log("remove key:", clientKey) - return await localForage.removeItem(clientKey) - } - }, [props.clientType]) + const clientKey = `${clientType}_${key}`; + console.log("remove key:", clientKey); + return await localForage.removeItem(clientKey); + }; + }, [props.clientType]); const removeDB = useMemo(() => { return async () => { - const keys = await localForage.keys() + const keys = await localForage.keys(); for (const key of keys) { - console.log("remove key:", key) - await localForage.removeItem(key) + console.log("remove key:", key); + await localForage.removeItem(key); } - } - }, [props.clientType]) - + }; + }, [props.clientType]); return { dummy: "", setItem, getItem, removeItem, - removeDB - } -} \ No newline at end of file + removeDB, + }; +}; diff --git a/client/lib/src/hooks/useServerSetting.ts b/client/lib/src/hooks/useServerSetting.ts index a130fe40..38414a12 100644 --- a/client/lib/src/hooks/useServerSetting.ts +++ b/client/lib/src/hooks/useServerSetting.ts @@ -1,215 +1,208 @@ -import { useState, useMemo } from "react" -import { VoiceChangerServerSetting, ServerInfo, ServerSettingKey, OnnxExporterInfo, MergeModelRequest, VoiceChangerType, DefaultServerSetting } from "../const" -import { VoiceChangerClient } from "../VoiceChangerClient" +import { useState, useMemo } from "react"; +import { VoiceChangerServerSetting, ServerInfo, ServerSettingKey, OnnxExporterInfo, MergeModelRequest, VoiceChangerType, DefaultServerSetting } from "../const"; +import { VoiceChangerClient } from "../VoiceChangerClient"; export const ModelAssetName = { - iconFile: "iconFile" -} as const -export type ModelAssetName = typeof ModelAssetName[keyof typeof ModelAssetName] - + iconFile: "iconFile", +} as const; +export type ModelAssetName = (typeof ModelAssetName)[keyof typeof ModelAssetName]; export const ModelFileKind = { - "mmvcv13Config": "mmvcv13Config", - "mmvcv13Model": "mmvcv13Model", - "mmvcv15Config": "mmvcv15Config", - "mmvcv15Model": "mmvcv15Model", - "mmvcv15Correspondence": "mmvcv15Correspondence", + mmvcv13Config: "mmvcv13Config", + mmvcv13Model: "mmvcv13Model", + mmvcv15Config: "mmvcv15Config", + mmvcv15Model: "mmvcv15Model", + mmvcv15Correspondence: "mmvcv15Correspondence", - "soVitsSvc40Config": "soVitsSvc40Config", - "soVitsSvc40Model": "soVitsSvc40Model", - "soVitsSvc40Cluster": "soVitsSvc40Cluster", + soVitsSvc40Config: "soVitsSvc40Config", + soVitsSvc40Model: "soVitsSvc40Model", + soVitsSvc40Cluster: "soVitsSvc40Cluster", - "rvcModel": "rvcModel", - "rvcIndex": "rvcIndex", + rvcModel: "rvcModel", + rvcIndex: "rvcIndex", - "ddspSvcModel": "ddspSvcModel", - "ddspSvcModelConfig": "ddspSvcModelConfig", - "ddspSvcDiffusion": "ddspSvcDiffusion", - "ddspSvcDiffusionConfig": "ddspSvcDiffusionConfig", + ddspSvcModel: "ddspSvcModel", + ddspSvcModelConfig: "ddspSvcModelConfig", + ddspSvcDiffusion: "ddspSvcDiffusion", + ddspSvcDiffusionConfig: "ddspSvcDiffusionConfig", - "diffusionSVCModel": "diffusionSVCModel", + diffusionSVCModel: "diffusionSVCModel", - "beatriceModel": "beatriceModel", - -} as const -export type ModelFileKind = typeof ModelFileKind[keyof typeof ModelFileKind] + beatriceModel: "beatriceModel", +} as const; +export type ModelFileKind = (typeof ModelFileKind)[keyof typeof ModelFileKind]; export type ModelFile = { - file: File, - kind: ModelFileKind - dir: string -} + file: File; + kind: ModelFileKind; + dir: string; +}; export type ModelUploadSetting = { - voiceChangerType: VoiceChangerType, - slot: number - isSampleMode: boolean - sampleId: string | null + voiceChangerType: VoiceChangerType; + slot: number; + isSampleMode: boolean; + sampleId: string | null; - files: ModelFile[] - params: any -} + files: ModelFile[]; + params: any; +}; export type ModelFileForServer = Omit & { - name: string, - kind: ModelFileKind -} + name: string; + kind: ModelFileKind; +}; export type ModelUploadSettingForServer = Omit & { - files: ModelFileForServer[] -} + files: ModelFileForServer[]; +}; type AssetUploadSetting = { - slot: number - name: ModelAssetName - file: string -} + slot: number; + name: ModelAssetName; + file: string; +}; export type UseServerSettingProps = { - voiceChangerClient: VoiceChangerClient | null -} + voiceChangerClient: VoiceChangerClient | null; +}; export type ServerSettingState = { - serverSetting: ServerInfo - updateServerSettings: (setting: ServerInfo) => Promise + serverSetting: ServerInfo; + updateServerSettings: (setting: ServerInfo) => Promise; reloadServerInfo: () => Promise; - uploadModel: (setting: ModelUploadSetting) => Promise - uploadProgress: number - isUploading: boolean + uploadModel: (setting: ModelUploadSetting) => Promise; + uploadProgress: number; + isUploading: boolean; - getOnnx: () => Promise - mergeModel: (request: MergeModelRequest) => Promise - updateModelDefault: () => Promise - updateModelInfo: (slot: number, key: string, val: string) => Promise - uploadAssets: (slot: number, name: ModelAssetName, file: File) => Promise -} + getOnnx: () => Promise; + mergeModel: (request: MergeModelRequest) => Promise; + updateModelDefault: () => Promise; + updateModelInfo: (slot: number, key: string, val: string) => Promise; + uploadAssets: (slot: number, name: ModelAssetName, file: File) => Promise; +}; export const useServerSetting = (props: UseServerSettingProps): ServerSettingState => { - const [serverSetting, setServerSetting] = useState(DefaultServerSetting) + const [serverSetting, setServerSetting] = useState(DefaultServerSetting); ////////////// // 設定 ///////////// const updateServerSettings = useMemo(() => { return async (setting: ServerInfo) => { - if (!props.voiceChangerClient) return + if (!props.voiceChangerClient) return; for (let i = 0; i < Object.values(ServerSettingKey).length; i++) { - const k = Object.values(ServerSettingKey)[i] as keyof VoiceChangerServerSetting - const cur_v = serverSetting[k] - const new_v = setting[k] + const k = Object.values(ServerSettingKey)[i] as keyof VoiceChangerServerSetting; + const cur_v = serverSetting[k]; + const new_v = setting[k]; if (cur_v != new_v) { - const res = await props.voiceChangerClient.updateServerSettings(k, "" + new_v) - setServerSetting(res) + const res = await props.voiceChangerClient.updateServerSettings(k, "" + new_v); + setServerSetting(res); } } - } - }, [props.voiceChangerClient, serverSetting]) - - + }; + }, [props.voiceChangerClient, serverSetting]); ////////////// // 操作 ///////////// - const [uploadProgress, setUploadProgress] = useState(0) - const [isUploading, setIsUploading] = useState(false) + const [uploadProgress, setUploadProgress] = useState(0); + const [isUploading, setIsUploading] = useState(false); // (e) モデルアップロード const _uploadFile2 = useMemo(() => { return async (file: File, onprogress: (progress: number, end: boolean) => void, dir: string = "") => { - if (!props.voiceChangerClient) return - const num = await props.voiceChangerClient.uploadFile2(dir, file, onprogress) - const res = await props.voiceChangerClient.concatUploadedFile(dir + file.name, num) - console.log("uploaded", num, res) - } - }, [props.voiceChangerClient]) + if (!props.voiceChangerClient) return; + const num = await props.voiceChangerClient.uploadFile2(dir, file, onprogress); + const res = await props.voiceChangerClient.concatUploadedFile(dir + file.name, num); + console.log("uploaded", num, res); + }; + }, [props.voiceChangerClient]); // 新しいアップローダ const uploadModel = useMemo(() => { return async (setting: ModelUploadSetting) => { if (!props.voiceChangerClient) { - return + return; } - setUploadProgress(0) - setIsUploading(true) - + setUploadProgress(0); + setIsUploading(true); if (setting.isSampleMode == false) { - const progRate = 1 / setting.files.length + const progRate = 1 / setting.files.length; for (let i = 0; i < setting.files.length; i++) { - const progOffset = 100 * i * progRate - await _uploadFile2(setting.files[i].file, (progress: number, _end: boolean) => { - setUploadProgress(progress * progRate + progOffset) - }, setting.files[i].dir) + const progOffset = 100 * i * progRate; + await _uploadFile2( + setting.files[i].file, + (progress: number, _end: boolean) => { + setUploadProgress(progress * progRate + progOffset); + }, + setting.files[i].dir + ); } } const params: ModelUploadSettingForServer = { - ...setting, files: setting.files.map((f) => { return { name: f.file.name, kind: f.kind, dir: f.dir } }) - } + ...setting, + files: setting.files.map((f) => { + return { name: f.file.name, kind: f.kind, dir: f.dir }; + }), + }; - const loadPromise = props.voiceChangerClient.loadModel( - 0, - false, - JSON.stringify(params), - ) - await loadPromise + const loadPromise = props.voiceChangerClient.loadModel(0, false, JSON.stringify(params)); + await loadPromise; - setUploadProgress(0) - setIsUploading(false) - reloadServerInfo() - - } - }, [props.voiceChangerClient]) + setUploadProgress(0); + setIsUploading(false); + reloadServerInfo(); + }; + }, [props.voiceChangerClient]); const uploadAssets = useMemo(() => { return async (slot: number, name: ModelAssetName, file: File) => { - if (!props.voiceChangerClient) return + if (!props.voiceChangerClient) return; await _uploadFile2(file, (progress: number, _end: boolean) => { - console.log(progress, _end) - }) + console.log(progress, _end); + }); const assetUploadSetting: AssetUploadSetting = { slot, name, - file: file.name - } - await props.voiceChangerClient.uploadAssets(JSON.stringify(assetUploadSetting)) - reloadServerInfo() - } - }, [props.voiceChangerClient]) - - + file: file.name, + }; + await props.voiceChangerClient.uploadAssets(JSON.stringify(assetUploadSetting)); + reloadServerInfo(); + }; + }, [props.voiceChangerClient]); const reloadServerInfo = useMemo(() => { return async () => { - - if (!props.voiceChangerClient) return - const res = await props.voiceChangerClient.getServerSettings() - setServerSetting(res) - } - }, [props.voiceChangerClient]) - + if (!props.voiceChangerClient) return; + const res = await props.voiceChangerClient.getServerSettings(); + setServerSetting(res); + }; + }, [props.voiceChangerClient]); const getOnnx = async () => { - return props.voiceChangerClient!.getOnnx() - } + return props.voiceChangerClient!.getOnnx(); + }; const mergeModel = async (request: MergeModelRequest) => { - const serverInfo = await props.voiceChangerClient!.mergeModel(request) - setServerSetting(serverInfo) - return serverInfo - } + const serverInfo = await props.voiceChangerClient!.mergeModel(request); + setServerSetting(serverInfo); + return serverInfo; + }; const updateModelDefault = async () => { - const serverInfo = await props.voiceChangerClient!.updateModelDefault() - setServerSetting(serverInfo) - return serverInfo - } + const serverInfo = await props.voiceChangerClient!.updateModelDefault(); + setServerSetting(serverInfo); + return serverInfo; + }; const updateModelInfo = async (slot: number, key: string, val: string) => { - const serverInfo = await props.voiceChangerClient!.updateModelInfo(slot, key, val) - setServerSetting(serverInfo) - return serverInfo - } + const serverInfo = await props.voiceChangerClient!.updateModelInfo(slot, key, val); + setServerSetting(serverInfo); + return serverInfo; + }; return { serverSetting, @@ -223,6 +216,6 @@ export const useServerSetting = (props: UseServerSettingProps): ServerSettingSta mergeModel, updateModelDefault, updateModelInfo, - uploadAssets - } -} \ No newline at end of file + uploadAssets, + }; +}; diff --git a/client/lib/src/hooks/useWorkletNodeSetting.ts b/client/lib/src/hooks/useWorkletNodeSetting.ts index 1f934260..ed20bd67 100644 --- a/client/lib/src/hooks/useWorkletNodeSetting.ts +++ b/client/lib/src/hooks/useWorkletNodeSetting.ts @@ -1,67 +1,63 @@ -import { useState, useMemo, useEffect } from "react" - -import { WorkletNodeSetting } from "../const" -import { VoiceChangerClient } from "../VoiceChangerClient" +import { useState, useMemo, useEffect } from "react"; +import { WorkletNodeSetting } from "../const"; +import { VoiceChangerClient } from "../VoiceChangerClient"; export type UseWorkletNodeSettingProps = { - voiceChangerClient: VoiceChangerClient | null - workletNodeSetting: WorkletNodeSetting -} + voiceChangerClient: VoiceChangerClient | null; + workletNodeSetting: WorkletNodeSetting; +}; export type WorkletNodeSettingState = { - startOutputRecording: () => void - stopOutputRecording: () => Promise - trancateBuffer: () => Promise -} + startOutputRecording: () => void; + stopOutputRecording: () => Promise; + trancateBuffer: () => Promise; +}; export const useWorkletNodeSetting = (props: UseWorkletNodeSettingProps): WorkletNodeSettingState => { // 更新比較用 - const [workletNodeSetting, _setWorkletNodeSetting] = useState(props.workletNodeSetting) + const [workletNodeSetting, _setWorkletNodeSetting] = useState(props.workletNodeSetting); ////////////// // 設定 ///////////// useEffect(() => { - - if (!props.voiceChangerClient) return + if (!props.voiceChangerClient) return; for (let k in props.workletNodeSetting) { - const cur_v = workletNodeSetting[k as keyof WorkletNodeSetting] - const new_v = props.workletNodeSetting[k as keyof WorkletNodeSetting] + const cur_v = workletNodeSetting[k as keyof WorkletNodeSetting]; + const new_v = props.workletNodeSetting[k as keyof WorkletNodeSetting]; if (cur_v != new_v) { - _setWorkletNodeSetting(props.workletNodeSetting) - props.voiceChangerClient.updateWorkletNodeSetting(props.workletNodeSetting) - break + _setWorkletNodeSetting(props.workletNodeSetting); + props.voiceChangerClient.updateWorkletNodeSetting(props.workletNodeSetting); + break; } } - - }, [props.voiceChangerClient, props.workletNodeSetting]) - + }, [props.voiceChangerClient, props.workletNodeSetting]); const startOutputRecording = useMemo(() => { return () => { - if (!props.voiceChangerClient) return - props.voiceChangerClient.startOutputRecording() - } - }, [props.voiceChangerClient]) + if (!props.voiceChangerClient) return; + props.voiceChangerClient.startOutputRecording(); + }; + }, [props.voiceChangerClient]); const stopOutputRecording = useMemo(() => { return async () => { - if (!props.voiceChangerClient) return new Float32Array() - return props.voiceChangerClient.stopOutputRecording() - } - }, [props.voiceChangerClient]) + if (!props.voiceChangerClient) return new Float32Array(); + return props.voiceChangerClient.stopOutputRecording(); + }; + }, [props.voiceChangerClient]); const trancateBuffer = useMemo(() => { return async () => { - if (!props.voiceChangerClient) return - props.voiceChangerClient.trancateBuffer() - } - }, [props.voiceChangerClient]) + if (!props.voiceChangerClient) return; + props.voiceChangerClient.trancateBuffer(); + }; + }, [props.voiceChangerClient]); return { startOutputRecording, stopOutputRecording, - trancateBuffer - } -} \ No newline at end of file + trancateBuffer, + }; +}; diff --git a/client/lib/src/hooks/useWorkletSetting.ts b/client/lib/src/hooks/useWorkletSetting.ts index 4848f8a2..37c93f57 100644 --- a/client/lib/src/hooks/useWorkletSetting.ts +++ b/client/lib/src/hooks/useWorkletSetting.ts @@ -1,27 +1,24 @@ -import { useState, useEffect } from "react" +import { useState, useEffect } from "react"; import { WorkletSetting } from "../const"; import { VoiceChangerClient } from "../VoiceChangerClient"; export type UseWorkletSettingProps = { - voiceChangerClient: VoiceChangerClient | null - workletSetting: WorkletSetting -} + voiceChangerClient: VoiceChangerClient | null; + workletSetting: WorkletSetting; +}; export type WorkletSettingState = { // setting: WorkletSetting; // setSetting: (setting: WorkletSetting) => void; - -} +}; export const useWorkletSetting = (props: UseWorkletSettingProps): WorkletSettingState => { - const [setting, _setSetting] = useState(props.workletSetting) + const [setting, _setSetting] = useState(props.workletSetting); useEffect(() => { - if (!props.voiceChangerClient) return - props.voiceChangerClient.configureWorklet(setting) - }, [props.voiceChangerClient, props.workletSetting]) - - + if (!props.voiceChangerClient) return; + props.voiceChangerClient.configureWorklet(setting); + }, [props.voiceChangerClient, props.workletSetting]); // // 設定 _setSettingがトリガでuseEffectが呼ばれて、workletに設定が飛ぶ // const setSetting = useMemo(() => { @@ -34,5 +31,5 @@ export const useWorkletSetting = (props: UseWorkletSettingProps): WorkletSetting return { // setting, // setSetting, - } -} \ No newline at end of file + }; +}; diff --git a/client/lib/src/index.ts b/client/lib/src/index.ts index 64650c8c..e39f2e45 100644 --- a/client/lib/src/index.ts +++ b/client/lib/src/index.ts @@ -1,7 +1,7 @@ -export * from "./const" -export * from "./exceptions" -export * from "./VoiceChangerClient" -export * from "./util" -export * from "./hooks/useClient" -export * from "./hooks/useIndexedDB" -export * from "./hooks/useServerSetting" \ No newline at end of file +export * from "./const"; +export * from "./exceptions"; +export * from "./VoiceChangerClient"; +export * from "./util"; +export * from "./hooks/useClient"; +export * from "./hooks/useIndexedDB"; +export * from "./hooks/useServerSetting"; diff --git a/client/lib/src/util.ts b/client/lib/src/util.ts index 6268d27a..dd02b5d8 100644 --- a/client/lib/src/util.ts +++ b/client/lib/src/util.ts @@ -17,32 +17,32 @@ export const fileSelector = async (regex: string) => { const p = new Promise((resolve, reject) => { fileInput.onchange = (e) => { if (e.target instanceof HTMLInputElement == false) { - console.log("invalid target!", e.target) - reject("invalid target") - return null + console.log("invalid target!", e.target); + reject("invalid target"); + return null; } - const target = e.target as HTMLInputElement + const target = e.target as HTMLInputElement; if (!target.files || target.files.length == 0) { - reject("no file selected") - return null + reject("no file selected"); + return null; } if (regex != "" && target.files[0].type.match(regex)) { reject(`not target file type ${target.files[0].type}`); - return null + return null; } - resolve(target.files[0]) - return null + resolve(target.files[0]); + return null; }; fileInput.click(); }); - return await p -} + return await p; +}; export const fileSelectorAsDataURL = async (regex: string) => { - const f = await fileSelector(regex) + const f = await fileSelector(regex); if (!f) { - return f + return f; } const url = await new Promise((resolve) => { @@ -52,15 +52,13 @@ export const fileSelectorAsDataURL = async (regex: string) => { resolve(reader.result as string); }; reader.readAsDataURL(f); - }) - return url -} - + }); + return url; +}; export const validateUrl = (url: string) => { - if (url?.endsWith("/")) { - return url.substring(0, url.length - 1) + return url.substring(0, url.length - 1); } - return url -} + return url; +}; diff --git a/client/lib/src/utils/BlockingQueue.ts b/client/lib/src/utils/BlockingQueue.ts index fc32a5d2..3ff232f0 100644 --- a/client/lib/src/utils/BlockingQueue.ts +++ b/client/lib/src/utils/BlockingQueue.ts @@ -38,4 +38,4 @@ export class BlockingQueue { get length() { return this._promises.length - this._resolvers.length; } -} \ No newline at end of file +} diff --git a/client/lib/tsconfig.json b/client/lib/tsconfig.json index 138fa2a2..9c5293ae 100644 --- a/client/lib/tsconfig.json +++ b/client/lib/tsconfig.json @@ -1,33 +1,33 @@ { - "compilerOptions": { - "target": "ES2020", - "declaration": true, - "outDir": "./dist", + "compilerOptions": { + "target": "ES2020", + "declaration": true, + "outDir": "./dist", - /* ファイル名の大文字小文字を区別 */ - "forceConsistentCasingInFileNames": true, + /* ファイル名の大文字小文字を区別 */ + "forceConsistentCasingInFileNames": true, - /* 型チェック関係のオプション */ - "strict": true, - "noImplicitAny": true, - "strictNullChecks": true, - "noUnusedLocals": true, - "noUnusedParameters": true, - "noImplicitReturns": true, + /* 型チェック関係のオプション */ + "strict": true, + "noImplicitAny": true, + "strictNullChecks": true, + "noUnusedLocals": true, + "noUnusedParameters": true, + "noImplicitReturns": true, - /* Module解決方法 */ - "moduleResolution": "node", - "esModuleInterop": true, - // "isolatedModules": true, - "allowSyntheticDefaultImports": true, + /* Module解決方法 */ + "moduleResolution": "node", + "esModuleInterop": true, + // "isolatedModules": true, + "allowSyntheticDefaultImports": true, - // /* 型チェックだけさせたいので出力なし */ - // "noEmit": true, - /* For avoid WebGL2 error */ - /* https://stackoverflow.com/questions/52846622/error-ts2430-interface-webglrenderingcontext-incorrectly-extends-interface-w */ - "skipLibCheck": true - }, - /* tscコマンドで読み込むファイルを指定 */ - "include": ["src/**/*.ts"], - "exclude": ["node_modules"] + // /* 型チェックだけさせたいので出力なし */ + // "noEmit": true, + /* For avoid WebGL2 error */ + /* https://stackoverflow.com/questions/52846622/error-ts2430-interface-webglrenderingcontext-incorrectly-extends-interface-w */ + "skipLibCheck": true + }, + /* tscコマンドで読み込むファイルを指定 */ + "include": ["src/**/*.ts"], + "exclude": ["node_modules"] } diff --git a/client/lib/tsconfig.worklet.json b/client/lib/tsconfig.worklet.json index 882f308e..4fca4532 100644 --- a/client/lib/tsconfig.worklet.json +++ b/client/lib/tsconfig.worklet.json @@ -1,33 +1,33 @@ { - "compilerOptions": { - "target": "ES2020", - "lib": ["ES2020"], - "outDir": "./worklet/dist", - "declaration": true, - /* ファイル名の大文字小文字を区別 */ - "forceConsistentCasingInFileNames": true, + "compilerOptions": { + "target": "ES2020", + "lib": ["ES2020"], + "outDir": "./worklet/dist", + "declaration": true, + /* ファイル名の大文字小文字を区別 */ + "forceConsistentCasingInFileNames": true, - /* 型チェック関係のオプション */ - "strict": true, - "noImplicitAny": true, - "strictNullChecks": true, - "noUnusedLocals": true, - "noUnusedParameters": true, - "noImplicitReturns": true, + /* 型チェック関係のオプション */ + "strict": true, + "noImplicitAny": true, + "strictNullChecks": true, + "noUnusedLocals": true, + "noUnusedParameters": true, + "noImplicitReturns": true, - /* Module解決方法 */ - "moduleResolution": "node", - "esModuleInterop": true, - // "isolatedModules": true, - "allowSyntheticDefaultImports": true, + /* Module解決方法 */ + "moduleResolution": "node", + "esModuleInterop": true, + // "isolatedModules": true, + "allowSyntheticDefaultImports": true, - // /* 型チェックだけさせたいので出力なし */ - // "noEmit": true, - /* For avoid WebGL2 error */ - /* https://stackoverflow.com/questions/52846622/error-ts2430-interface-webglrenderingcontext-incorrectly-extends-interface-w */ - "skipLibCheck": true - }, - /* tscコマンドで読み込むファイルを指定 */ - "include": ["worklet/src/*.ts"], - "exclude": ["node_modules"] + // /* 型チェックだけさせたいので出力なし */ + // "noEmit": true, + /* For avoid WebGL2 error */ + /* https://stackoverflow.com/questions/52846622/error-ts2430-interface-webglrenderingcontext-incorrectly-extends-interface-w */ + "skipLibCheck": true + }, + /* tscコマンドで読み込むファイルを指定 */ + "include": ["worklet/src/*.ts"], + "exclude": ["node_modules"] } diff --git a/client/lib/webpack.common.js b/client/lib/webpack.common.js index b41b78af..32a4c47c 100644 --- a/client/lib/webpack.common.js +++ b/client/lib/webpack.common.js @@ -5,8 +5,8 @@ module.exports = { resolve: { extensions: [".ts", ".js"], fallback: { - "buffer": require.resolve("buffer/") - } + buffer: require.resolve("buffer/"), + }, }, module: { rules: [ @@ -32,11 +32,11 @@ module.exports = { plugins: [ new webpack.ProvidePlugin({ - Buffer: ['buffer', 'Buffer'], + Buffer: ["buffer", "Buffer"], }), ], externals: { react: "react", "react-dom": "reactDOM", - } + }, }; diff --git a/client/lib/webpack.dev.js b/client/lib/webpack.dev.js index b4048025..5adb66a6 100644 --- a/client/lib/webpack.dev.js +++ b/client/lib/webpack.dev.js @@ -1,6 +1,6 @@ -const { merge } = require('webpack-merge'); -const common = require('./webpack.common.js') +const { merge } = require("webpack-merge"); +const common = require("./webpack.common.js"); module.exports = merge(common, { - mode: 'development', -}) + mode: "development", +}); diff --git a/client/lib/webpack.prod.js b/client/lib/webpack.prod.js index 9dcd611f..95b8303e 100644 --- a/client/lib/webpack.prod.js +++ b/client/lib/webpack.prod.js @@ -1,6 +1,6 @@ -const { merge } = require('webpack-merge'); -const common = require('./webpack.common.js') +const { merge } = require("webpack-merge"); +const common = require("./webpack.common.js"); module.exports = merge(common, { - mode: 'production', -}) + mode: "production", +}); diff --git a/client/lib/webpack.worklet.common.js b/client/lib/webpack.worklet.common.js index 0ae1d7b4..c7bb090f 100644 --- a/client/lib/webpack.worklet.common.js +++ b/client/lib/webpack.worklet.common.js @@ -27,5 +27,5 @@ module.exports = { ], }, ], - } + }, }; diff --git a/client/lib/webpack.worklet.dev.js b/client/lib/webpack.worklet.dev.js index d65a0569..dd160deb 100644 --- a/client/lib/webpack.worklet.dev.js +++ b/client/lib/webpack.worklet.dev.js @@ -1,7 +1,7 @@ -const { merge } = require('webpack-merge'); -const common = require('./webpack.worklet.common.js') +const { merge } = require("webpack-merge"); +const common = require("./webpack.worklet.common.js"); const worklet = merge(common, { - mode: 'development', -}) -module.exports = [worklet]; \ No newline at end of file + mode: "development", +}); +module.exports = [worklet]; diff --git a/client/lib/webpack.worklet.prod.js b/client/lib/webpack.worklet.prod.js index 1e05eeb7..4b40302a 100644 --- a/client/lib/webpack.worklet.prod.js +++ b/client/lib/webpack.worklet.prod.js @@ -1,8 +1,7 @@ -const { merge } = require('webpack-merge'); -const common = require('./webpack.worklet.common.js') +const { merge } = require("webpack-merge"); +const common = require("./webpack.worklet.common.js"); const worklet = merge(common, { - mode: 'production', -}) + mode: "production", +}); module.exports = [worklet]; - diff --git a/client/lib/worklet/src/voice-changer-worklet-processor.ts b/client/lib/worklet/src/voice-changer-worklet-processor.ts index b5ed6130..123e8a15 100644 --- a/client/lib/worklet/src/voice-changer-worklet-processor.ts +++ b/client/lib/worklet/src/voice-changer-worklet-processor.ts @@ -1,50 +1,47 @@ export const RequestType = { - "voice": "voice", - "config": "config", - "start": "start", - "stop": "stop", - "trancateBuffer": "trancateBuffer", -} as const -export type RequestType = typeof RequestType[keyof typeof RequestType] - + voice: "voice", + config: "config", + start: "start", + stop: "stop", + trancateBuffer: "trancateBuffer", +} as const; +export type RequestType = (typeof RequestType)[keyof typeof RequestType]; export const ResponseType = { - "volume": "volume", - "inputData": "inputData", - "start_ok": "start_ok", - "stop_ok": "stop_ok", -} as const -export type ResponseType = typeof ResponseType[keyof typeof ResponseType] - - + volume: "volume", + inputData: "inputData", + start_ok: "start_ok", + stop_ok: "stop_ok", +} as const; +export type ResponseType = (typeof ResponseType)[keyof typeof ResponseType]; export type VoiceChangerWorkletProcessorRequest = { - requestType: RequestType, - voice: Float32Array, - numTrancateTreshold: number - volTrancateThreshold: number - volTrancateLength: number -} + requestType: RequestType; + voice: Float32Array; + numTrancateTreshold: number; + volTrancateThreshold: number; + volTrancateLength: number; +}; export type VoiceChangerWorkletProcessorResponse = { - responseType: ResponseType, - volume?: number, - recordData?: Float32Array[] - inputData?: Float32Array -} + responseType: ResponseType; + volume?: number; + recordData?: Float32Array[]; + inputData?: Float32Array; +}; class VoiceChangerWorkletProcessor extends AudioWorkletProcessor { - private BLOCK_SIZE = 128 + private BLOCK_SIZE = 128; private initialized = false; - private volume = 0 - private numTrancateTreshold = 150 + private volume = 0; + private numTrancateTreshold = 150; // private volTrancateThreshold = 0.0005 // private volTrancateLength = 32 // private volTrancateCount = 0 - private isRecording = false + private isRecording = false; - playBuffer: Float32Array[] = [] + playBuffer: Float32Array[] = []; /** * @constructor */ @@ -56,73 +53,72 @@ class VoiceChangerWorkletProcessor extends AudioWorkletProcessor { calcVol = (data: Float32Array, prevVol: number) => { const sum = data.reduce((prev, cur) => { - return prev + cur * cur - }, 0) - const rms = Math.sqrt(sum / data.length) - return Math.max(rms, prevVol * 0.95) - } + return prev + cur * cur; + }, 0); + const rms = Math.sqrt(sum / data.length); + return Math.max(rms, prevVol * 0.95); + }; trancateBuffer = () => { - console.log("[worklet] Buffer truncated") + console.log("[worklet] Buffer truncated"); while (this.playBuffer.length > 2) { - this.playBuffer.shift() + this.playBuffer.shift(); } - } + }; handleMessage(event: any) { - const request = event.data as VoiceChangerWorkletProcessorRequest + const request = event.data as VoiceChangerWorkletProcessorRequest; if (request.requestType === "config") { - this.numTrancateTreshold = request.numTrancateTreshold + this.numTrancateTreshold = request.numTrancateTreshold; // this.volTrancateLength = request.volTrancateLength // this.volTrancateThreshold = request.volTrancateThreshold - console.log("[worklet] worklet configured", request) - return + console.log("[worklet] worklet configured", request); + return; } else if (request.requestType === "start") { if (this.isRecording) { - console.warn("[worklet] recoring is already started") - return + console.warn("[worklet] recoring is already started"); + return; } - this.isRecording = true + this.isRecording = true; const startResponse: VoiceChangerWorkletProcessorResponse = { responseType: "start_ok", - } + }; this.port.postMessage(startResponse); - return + return; } else if (request.requestType === "stop") { if (!this.isRecording) { - console.warn("[worklet] recoring is not started") - return + console.warn("[worklet] recoring is not started"); + return; } - this.isRecording = false + this.isRecording = false; const stopResponse: VoiceChangerWorkletProcessorResponse = { responseType: "stop_ok", - } + }; this.port.postMessage(stopResponse); - return + return; } else if (request.requestType === "trancateBuffer") { - this.trancateBuffer() - return + this.trancateBuffer(); + return; } if (this.playBuffer.length > this.numTrancateTreshold) { - this.trancateBuffer() + this.trancateBuffer(); } - const f32Data = request.voice - const chunkNum = f32Data.length / this.BLOCK_SIZE + const f32Data = request.voice; + const chunkNum = f32Data.length / this.BLOCK_SIZE; for (let i = 0; i < chunkNum; i++) { - const block = f32Data.slice(i * this.BLOCK_SIZE, (i + 1) * this.BLOCK_SIZE) - this.playBuffer.push(block) + const block = f32Data.slice(i * this.BLOCK_SIZE, (i + 1) * this.BLOCK_SIZE); + this.playBuffer.push(block); } } - pushData = (inputData: Float32Array) => { const volumeResponse: VoiceChangerWorkletProcessorResponse = { responseType: ResponseType.inputData, - inputData: inputData - } + inputData: inputData, + }; this.port.postMessage(volumeResponse); - } + }; process(_inputs: Float32Array[][], outputs: Float32Array[][], _parameters: Record) { if (!this.initialized) { @@ -132,13 +128,13 @@ class VoiceChangerWorkletProcessor extends AudioWorkletProcessor { if (this.isRecording) { if (_inputs.length > 0 && _inputs[0].length > 0) { - this.pushData(_inputs[0][0]) + this.pushData(_inputs[0][0]); } } if (this.playBuffer.length === 0) { // console.log("[worklet] no play buffer") - return true + return true; } //// 一定期間無音状態が続いている場合はスキップ。 @@ -155,7 +151,6 @@ class VoiceChangerWorkletProcessor extends AudioWorkletProcessor { // this.volTrancateCount = 0 // } - // // V.1.5.0よりsilent skipで音飛びするようになったので無効化 // if (this.volTrancateCount < this.volTrancateLength || this.volTrancateLength < 0) { // break @@ -164,22 +159,19 @@ class VoiceChangerWorkletProcessor extends AudioWorkletProcessor { // // console.log("silent...skip") // } // } - let voice = this.playBuffer.shift() - - + let voice = this.playBuffer.shift(); if (voice) { - this.volume = this.calcVol(voice, this.volume) + this.volume = this.calcVol(voice, this.volume); const volumeResponse: VoiceChangerWorkletProcessorResponse = { responseType: ResponseType.volume, - volume: this.volume - } + volume: this.volume, + }; this.port.postMessage(volumeResponse); - outputs[0][0].set(voice) + outputs[0][0].set(voice); if (outputs[0].length == 2) { - outputs[0][1].set(voice) + outputs[0][1].set(voice); } - } return true;