WIP: support v1.5.x, improve quality 1

This commit is contained in:
wataru 2023-02-12 12:25:57 +09:00
parent 9f117ee88f
commit ea7690c2cd
9 changed files with 327 additions and 38 deletions

File diff suppressed because one or more lines are too long

View File

@ -71,6 +71,14 @@ export const useServerSettingArea = (props: UseServerSettingProps): ServerSettin
onnxModel: null onnxModel: null
}) })
} }
const onCorrespondenceFileLoadClicked = async () => {
const file = await fileSelector("")
props.clientState.clientSetting.setCorrespondences(file)
}
const onCorrespondenceFileClearClicked = () => {
props.clientState.clientSetting.setCorrespondences(null)
}
const onModelUploadClicked = async () => { const onModelUploadClicked = async () => {
props.clientState.serverSetting.loadModel() props.clientState.serverSetting.loadModel()
} }
@ -82,6 +90,7 @@ export const useServerSettingArea = (props: UseServerSettingProps): ServerSettin
const configFilenameText = props.clientState.serverSetting.fileUploadSetting.configFile?.filename || props.clientState.serverSetting.fileUploadSetting.configFile?.file?.name || "" const configFilenameText = props.clientState.serverSetting.fileUploadSetting.configFile?.filename || props.clientState.serverSetting.fileUploadSetting.configFile?.file?.name || ""
const onnxModelFilenameText = props.clientState.serverSetting.fileUploadSetting.onnxModel?.filename || props.clientState.serverSetting.fileUploadSetting.onnxModel?.file?.name || "" const onnxModelFilenameText = props.clientState.serverSetting.fileUploadSetting.onnxModel?.filename || props.clientState.serverSetting.fileUploadSetting.onnxModel?.file?.name || ""
const pyTorchFilenameText = props.clientState.serverSetting.fileUploadSetting.pyTorchModel?.filename || props.clientState.serverSetting.fileUploadSetting.pyTorchModel?.file?.name || "" const pyTorchFilenameText = props.clientState.serverSetting.fileUploadSetting.pyTorchModel?.filename || props.clientState.serverSetting.fileUploadSetting.pyTorchModel?.file?.name || ""
const correspondenceFileText = JSON.stringify(props.clientState.clientSetting.setting.correspondences.map(x => { return x.dirname }))
return ( return (
<> <>
@ -98,6 +107,7 @@ export const useServerSettingArea = (props: UseServerSettingProps): ServerSettin
</div> </div>
</div> </div>
</div> </div>
<div className="body-row split-3-3-4 left-padding-1 guided"> <div className="body-row split-3-3-4 left-padding-1 guided">
<div className="body-item-title left-padding-2">Config(.json)</div> <div className="body-item-title left-padding-2">Config(.json)</div>
<div className="body-item-text"> <div className="body-item-text">
@ -108,6 +118,17 @@ export const useServerSettingArea = (props: UseServerSettingProps): ServerSettin
<div className="body-button left-margin-1" onClick={onConfigFileClearClicked}>clear</div> <div className="body-button left-margin-1" onClick={onConfigFileClearClicked}>clear</div>
</div> </div>
</div> </div>
<div className="body-row split-3-3-4 left-padding-1 guided">
<div className="body-item-title left-padding-2">Correspondence</div>
<div className="body-item-text">
<div>{correspondenceFileText}</div>
</div>
<div className="body-button-container">
<div className="body-button" onClick={onCorrespondenceFileLoadClicked}>select</div>
<div className="body-button left-margin-1" onClick={onCorrespondenceFileClearClicked}>clear</div>
</div>
</div>
<div className="body-row split-3-3-4 left-padding-1 guided"> <div className="body-row split-3-3-4 left-padding-1 guided">
<div className="body-item-title left-padding-2">Onnx(.onnx)</div> <div className="body-item-title left-padding-2">Onnx(.onnx)</div>
<div className="body-item-text"> <div className="body-item-text">
@ -153,6 +174,7 @@ export const useServerSettingArea = (props: UseServerSettingProps): ServerSettin
props.clientState.serverSetting.loadModel, props.clientState.serverSetting.loadModel,
props.clientState.serverSetting.isUploading, props.clientState.serverSetting.isUploading,
props.clientState.serverSetting.uploadProgress, props.clientState.serverSetting.uploadProgress,
props.clientState.clientSetting.setting.correspondences,
showPyTorch]) showPyTorch])
const frameworkRow = useMemo(() => { const frameworkRow = useMemo(() => {

View File

@ -1,5 +1,5 @@
import { fileSelectorAsDataURL, useIndexedDB } from "@dannadori/voice-changer-client-js" import { fileSelectorAsDataURL, useIndexedDB } from "@dannadori/voice-changer-client-js"
import React, { useEffect, useMemo, useState } from "react" import React, { useEffect, useMemo, useRef, useState } from "react"
import { AUDIO_ELEMENT_FOR_PLAY_RESULT, AUDIO_ELEMENT_FOR_TEST_CONVERTED, AUDIO_ELEMENT_FOR_TEST_CONVERTED_ECHOBACK, AUDIO_ELEMENT_FOR_TEST_ORIGINAL, INDEXEDDB_KEY_AUDIO_OUTPUT } from "./const" import { AUDIO_ELEMENT_FOR_PLAY_RESULT, AUDIO_ELEMENT_FOR_TEST_CONVERTED, AUDIO_ELEMENT_FOR_TEST_CONVERTED_ECHOBACK, AUDIO_ELEMENT_FOR_TEST_ORIGINAL, INDEXEDDB_KEY_AUDIO_OUTPUT } from "./const"
import { ClientState } from "@dannadori/voice-changer-client-js"; import { ClientState } from "@dannadori/voice-changer-client-js";
@ -48,6 +48,8 @@ export const useDeviceSetting = (audioContext: AudioContext | null, props: UseDe
const [fileInputEchoback, setFileInputEchoback] = useState<boolean>()//最初のmuteが有効になるように。undefined const [fileInputEchoback, setFileInputEchoback] = useState<boolean>()//最初のmuteが有効になるように。undefined
const { getItem, setItem } = useIndexedDB() const { getItem, setItem } = useIndexedDB()
const audioSrcNode = useRef<MediaElementAudioSourceNode>()
useEffect(() => { useEffect(() => {
const initialize = async () => { const initialize = async () => {
const audioInfo = await reloadDevices() const audioInfo = await reloadDevices()
@ -111,11 +113,19 @@ export const useDeviceSetting = (audioContext: AudioContext | null, props: UseDe
// input stream for client. // input stream for client.
const audio = document.getElementById(AUDIO_ELEMENT_FOR_TEST_CONVERTED) as HTMLAudioElement const audio = document.getElementById(AUDIO_ELEMENT_FOR_TEST_CONVERTED) as HTMLAudioElement
audio.pause()
audio.srcObject = null
audio.src = url audio.src = url
await audio.play() await audio.play()
const src = audioContext!.createMediaElementSource(audio); if (!audioSrcNode.current) {
audioSrcNode.current = audioContext!.createMediaElementSource(audio);
}
if (audioSrcNode.current.mediaElement != audio) {
audioSrcNode.current = audioContext!.createMediaElementSource(audio);
}
const dst = audioContext!.createMediaStreamDestination() const dst = audioContext!.createMediaStreamDestination()
src.connect(dst) audioSrcNode.current.connect(dst)
props.clientState.clientSetting.setAudioInput(dst.stream) props.clientState.clientSetting.setAudioInput(dst.stream)
const audio_echo = document.getElementById(AUDIO_ELEMENT_FOR_TEST_CONVERTED_ECHOBACK) as HTMLAudioElement const audio_echo = document.getElementById(AUDIO_ELEMENT_FOR_TEST_CONVERTED_ECHOBACK) as HTMLAudioElement
@ -185,14 +195,19 @@ export const useDeviceSetting = (audioContext: AudioContext | null, props: UseDe
if (audioOutputForGUI == "none") { if (audioOutputForGUI == "none") {
// @ts-ignore // @ts-ignore
audio.setSinkId("") audio.setSinkId("")
if (x == AUDIO_ELEMENT_FOR_TEST_CONVERTED_ECHOBACK) {
audio.volume = fileInputEchoback ? 1 : 0
}
} else { } else {
// @ts-ignore // @ts-ignore
audio.setSinkId(audioOutputForGUI) audio.setSinkId(audioOutputForGUI)
if (x == AUDIO_ELEMENT_FOR_TEST_CONVERTED_ECHOBACK) {
audio.volume = fileInputEchoback ? 1 : 0
}
} }
} }
}) })
}, [audioOutputForGUI]) }, [audioOutputForGUI, audioInputForGUI])
useEffect(() => { useEffect(() => {

View File

@ -10,42 +10,63 @@ export const useSpeakerSetting = (props: UseSpeakerSettingProps) => {
const [editSpeakerTargetName, setEditSpeakerTargetName] = useState<string>("") const [editSpeakerTargetName, setEditSpeakerTargetName] = useState<string>("")
const srcIdRow = useMemo(() => { const srcIdRow = useMemo(() => {
const selected = props.clientState.clientSetting.setting.correspondences.find(x => {
return x.sid == props.clientState.serverSetting.setting.srcId
})
return ( return (
<div className="body-row split-3-7 left-padding-1 guided"> <div className="body-row split-3-2-1-4 left-padding-1 guided">
<div className="body-item-title left-padding-1">Source Speaker Id</div> <div className="body-item-title left-padding-1">Source Speaker Id</div>
<div className="body-select-container"> <div className="body-select-container">
<select className="body-select" value={props.clientState.serverSetting.setting.srcId} onChange={(e) => { <select className="body-select" value={props.clientState.serverSetting.setting.srcId} onChange={(e) => {
props.clientState.serverSetting.setSrcId(Number(e.target.value)) props.clientState.serverSetting.setSrcId(Number(e.target.value))
}}> }}>
{ {
props.clientState.clientSetting.setting.speakers.map(x => { // props.clientState.clientSetting.setting.speakers.map(x => {
return <option key={x.id} value={x.id}>{x.name}({x.id})</option> // return <option key={x.id} value={x.id}>{x.name}({x.id})</option>
// })
props.clientState.clientSetting.setting.correspondences.map(x => {
return <option key={x.sid} value={x.sid}>{x.dirname}({x.sid})</option>
}) })
} }
</select> </select>
</div> </div>
<div className="body-item-text">
<div>F0: {selected?.correspondence.toFixed(1) || ""}</div>
</div>
<div className="body-item-text"></div>
</div> </div>
) )
}, [props.clientState.clientSetting.setting.speakers, props.clientState.serverSetting.setting.srcId, props.clientState.serverSetting.setSrcId]) }, [props.clientState.clientSetting.setting.speakers, props.clientState.serverSetting.setting.srcId, props.clientState.clientSetting.setting.correspondences, props.clientState.serverSetting.setSrcId])
const dstIdRow = useMemo(() => { const dstIdRow = useMemo(() => {
const selected = props.clientState.clientSetting.setting.correspondences.find(x => {
return x.sid == props.clientState.serverSetting.setting.dstId
})
return ( return (
<div className="body-row split-3-7 left-padding-1 guided"> <div className="body-row split-3-2-1-4 left-padding-1 guided">
<div className="body-item-title left-padding-1">Destination Speaker Id</div> <div className="body-item-title left-padding-1">Destination Speaker Id</div>
<div className="body-select-container"> <div className="body-select-container">
<select className="body-select" value={props.clientState.serverSetting.setting.dstId} onChange={(e) => { <select className="body-select" value={props.clientState.serverSetting.setting.dstId} onChange={(e) => {
props.clientState.serverSetting.setDstId(Number(e.target.value)) props.clientState.serverSetting.setDstId(Number(e.target.value))
}}> }}>
{ {
props.clientState.clientSetting.setting.speakers.map(x => { // props.clientState.clientSetting.setting.speakers.map(x => {
return <option key={x.id} value={x.id}>{x.name}({x.id})</option> // return <option key={x.id} value={x.id}>{x.name}({x.id})</option>
// })
props.clientState.clientSetting.setting.correspondences.map(x => {
return <option key={x.sid} value={x.sid}>{x.dirname}({x.sid})</option>
}) })
} }
</select> </select>
</div> </div>
<div className="body-item-text">
<div>F0: {selected?.correspondence.toFixed(1) || ""}</div>
</div>
<div className="body-item-text"></div>
</div> </div>
) )
}, [props.clientState.clientSetting.setting.speakers, props.clientState.serverSetting.setting.dstId, props.clientState.serverSetting.setDstId]) }, [props.clientState.clientSetting.setting.speakers, props.clientState.serverSetting.setting.dstId, props.clientState.clientSetting.setting.correspondences, props.clientState.serverSetting.setDstId])
const editSpeakerIdMappingRow = useMemo(() => { const editSpeakerIdMappingRow = useMemo(() => {
const onSetSpeakerMappingClicked = async () => { const onSetSpeakerMappingClicked = async () => {
@ -95,6 +116,15 @@ export const useSpeakerSetting = (props: UseSpeakerSettingProps) => {
const f0FactorRow = useMemo(() => { const f0FactorRow = useMemo(() => {
const src = props.clientState.clientSetting.setting.correspondences.find(x => {
return x.sid == props.clientState.serverSetting.setting.srcId
})
const dst = props.clientState.clientSetting.setting.correspondences.find(x => {
return x.sid == props.clientState.serverSetting.setting.dstId
})
const recommendedF0Factor = dst && src ? dst.correspondence / src.correspondence : 0
return ( return (
<div className="body-row split-3-2-1-4 left-padding-1 guided"> <div className="body-row split-3-2-1-4 left-padding-1 guided">
<div className="body-item-title left-padding-1">F0 Factor</div> <div className="body-item-title left-padding-1">F0 Factor</div>
@ -106,10 +136,10 @@ export const useSpeakerSetting = (props: UseSpeakerSettingProps) => {
<div className="body-item-text"> <div className="body-item-text">
<div>{props.clientState.serverSetting.setting.f0Factor}</div> <div>{props.clientState.serverSetting.setting.f0Factor}</div>
</div> </div>
<div className="body-item-text"></div> <div className="body-item-text">recommended:{recommendedF0Factor.toFixed(1)}</div>
</div> </div>
) )
}, [props.clientState.serverSetting.setting.f0Factor, props.clientState.serverSetting.setF0Factor]) }, [props.clientState.serverSetting.setting.f0Factor, props.clientState.serverSetting.setting.srcId, props.clientState.serverSetting.setting.dstId, props.clientState.clientSetting.setting.correspondences, props.clientState.serverSetting.setF0Factor])
const speakerSetting = useMemo(() => { const speakerSetting = useMemo(() => {
return ( return (

View File

@ -12,13 +12,18 @@ export type ConvertSettingState = {
export const useConvertSetting = (props: UseConvertSettingProps): ConvertSettingState => { export const useConvertSetting = (props: UseConvertSettingProps): ConvertSettingState => {
const inputChunkNumRow = useMemo(() => { const inputChunkNumRow = useMemo(() => {
return ( return (
<div className="body-row split-3-7 left-padding-1 guided"> <div className="body-row split-3-2-1-4 left-padding-1 guided">
<div className="body-item-title left-padding-1">Input Chunk Num(128sample/chunk)</div> <div className="body-item-title left-padding-1">Input Chunk Num(128sample/chunk)</div>
<div className="body-input-container"> <div className="body-input-container">
<input type="number" min={1} max={256} step={1} value={props.clientState.clientSetting.setting.inputChunkNum} onChange={(e) => { <input type="number" min={1} max={256} step={1} value={props.clientState.clientSetting.setting.inputChunkNum} onChange={(e) => {
props.clientState.clientSetting.setInputChunkNum(Number(e.target.value)) props.clientState.clientSetting.setInputChunkNum(Number(e.target.value))
}} /> }} />
</div> </div>
<div className="body-item-text">
<div>buff: {(props.clientState.clientSetting.setting.inputChunkNum * 128 * 1000 / 24000).toFixed(1)}ms</div>
</div>
<div className="body-item-text"></div>
</div> </div>
) )
}, [props.clientState.clientSetting.setting.inputChunkNum, props.clientState.clientSetting.setInputChunkNum]) }, [props.clientState.clientSetting.setting.inputChunkNum, props.clientState.clientSetting.setInputChunkNum])

View File

@ -292,6 +292,9 @@ body {
} }
.body-item-text { .body-item-text {
color: rgb(30, 30, 30); color: rgb(30, 30, 30);
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
.body-item-text-item { .body-item-text-item {
padding-left: 1rem; padding-left: 1rem;
} }

View File

@ -32,6 +32,7 @@ export type VoiceChangerClientSetting = {
bufferSize: BufferSize, // 256, 512, 1024, 2048, 4096, 8192, 16384 (for mic stream) bufferSize: BufferSize, // 256, 512, 1024, 2048, 4096, 8192, 16384 (for mic stream)
inputChunkNum: number, // n of (256 x n) for send buffer inputChunkNum: number, // n of (256 x n) for send buffer
speakers: Speaker[], speakers: Speaker[],
correspondences: Correspondence[],
forceVfDisable: boolean, forceVfDisable: boolean,
voiceChangerMode: VoiceChangerMode, voiceChangerMode: VoiceChangerMode,
} }
@ -46,6 +47,11 @@ export type Speaker = {
"id": number, "id": number,
"name": string, "name": string,
} }
export type Correspondence = {
"sid": number,
"correspondence": number,
"dirname": string
}
export type ServerInfo = { export type ServerInfo = {
@ -154,26 +160,27 @@ export const DefaultVoiceChangerClientSetting: VoiceChangerClientSetting = {
inputChunkNum: 48, inputChunkNum: 48,
speakers: [ speakers: [
{ {
"id": 100, "id": 0,
"name": "ずんだもん"
},
{
"id": 107,
"name": "user" "name": "user"
}, },
{ {
"id": 101, "id": 101,
"name": "そら" "name": "ずんだもん"
}, },
{ {
"id": 102, "id": 102,
"name": "めたん" "name": "そら"
}, },
{ {
"id": 103, "id": 103,
"name": "めたん"
},
{
"id": 104,
"name": "つむぎ" "name": "つむぎ"
} }
], ],
correspondences: [],
forceVfDisable: false, forceVfDisable: false,
voiceChangerMode: "realtime", voiceChangerMode: "realtime",
} }

View File

@ -1,5 +1,5 @@
import { useState, useMemo, useRef, useEffect } from "react" import { useState, useMemo, useRef, useEffect } from "react"
import { VoiceChangerClientSetting, Protocol, BufferSize, VoiceChangerMode, SampleRate, Speaker, DefaultVoiceChangerClientSetting, INDEXEDDB_KEY_CLIENT } from "../const" import { VoiceChangerClientSetting, Protocol, BufferSize, VoiceChangerMode, SampleRate, Speaker, DefaultVoiceChangerClientSetting, INDEXEDDB_KEY_CLIENT, Correspondence } from "../const"
import { createDummyMediaStream } from "../util" import { createDummyMediaStream } from "../util"
import { VoiceChangerClient } from "../VoiceChangerClient" import { VoiceChangerClient } from "../VoiceChangerClient"
import { useIndexedDB } from "./useIndexedDB" import { useIndexedDB } from "./useIndexedDB"
@ -21,6 +21,7 @@ export type ClientSettingState = {
setVoiceChangerMode: (mode: VoiceChangerMode) => void setVoiceChangerMode: (mode: VoiceChangerMode) => void
setSampleRate: (num: SampleRate) => void setSampleRate: (num: SampleRate) => void
setSpeakers: (speakers: Speaker[]) => void setSpeakers: (speakers: Speaker[]) => void
setCorrespondences: (file: File | null) => Promise<void>
start: () => Promise<void> start: () => Promise<void>
stop: () => Promise<void> stop: () => Promise<void>
@ -179,6 +180,33 @@ export const useClientSetting = (props: UseClientSettingProps): ClientSettingSta
} }
}, [props.voiceChangerClient]) }, [props.voiceChangerClient])
const setCorrespondences = useMemo(() => {
return async (file: File | null) => {
if (!props.voiceChangerClient) return
if (!file) {
settingRef.current.correspondences = []
} else {
const correspondenceText = await file.text()
const cors = correspondenceText.split("\n").map(line => {
const items = line.split("|")
if (items.length != 3) {
console.warn("Invalid Correspondence Line:", line)
return null
} else {
const cor: Correspondence = {
sid: Number(items[0]),
correspondence: Number(items[1]),
dirname: items[2]
}
return cor
}
}).filter(x => { return x != null }) as Correspondence[]
settingRef.current.correspondences = cors
}
setSetting({ ...settingRef.current })
}
}, [props.voiceChangerClient])
////////////// //////////////
// 操作 // 操作
///////////// /////////////
@ -204,7 +232,6 @@ export const useClientSetting = (props: UseClientSettingProps): ClientSettingSta
} }
}, [props.voiceChangerClient]) }, [props.voiceChangerClient])
return { return {
setting, setting,
clearSetting, clearSetting,
@ -217,6 +244,7 @@ export const useClientSetting = (props: UseClientSettingProps): ClientSettingSta
setVoiceChangerMode, setVoiceChangerMode,
setSampleRate, setSampleRate,
setSpeakers, setSpeakers,
setCorrespondences,
start, start,
stop, stop,

View File

@ -20,6 +20,58 @@ from voice_changer.client_modules import convert_continuos_f0, spectrogram_torch
providers = ['OpenVINOExecutionProvider', "CUDAExecutionProvider", "DmlExecutionProvider", "CPUExecutionProvider"] providers = ['OpenVINOExecutionProvider', "CUDAExecutionProvider", "DmlExecutionProvider", "CPUExecutionProvider"]
import wave
class MockStream:
"""
オーディオストリーミング入出力をファイル入出力にそのまま置き換えるためのモック
"""
def __init__(self, sampling_rate):
self.sampling_rate = sampling_rate
self.start_count = 2
self.end_count = 2
self.fr = None
self.fw = None
def open_inputfile(self, input_filename):
self.fr = wave.open(input_filename, 'rb')
def open_outputfile(self, output_filename):
self.fw = wave.open(output_filename, 'wb')
self.fw.setnchannels(1)
self.fw.setsampwidth(2)
self.fw.setframerate(self.sampling_rate)
def read(self, length, exception_on_overflow=False):
if self.start_count > 0:
wav = bytes(length * 2)
self.start_count -= 1 # 最初の2回はダミーの空データ送る
else:
wav = self.fr.readframes(length)
if len(wav) <= 0: # データなくなってから最後の2回はダミーの空データを送る
wav = bytes(length * 2)
self.end_count -= 1
if self.end_count < 0:
Hyperparameters.VC_END_FLAG = True
return wav
def write(self, wav):
self.fw.writeframes(wav)
def stop_stream(self):
pass
def close(self):
if self.fr != None:
self.fr.close()
self.fr = None
if self.fw != None:
self.fw.close()
self.fw = None
@dataclass @dataclass
class VocieChangerSettings(): class VocieChangerSettings():
gpu: int = 0 gpu: int = 0
@ -61,6 +113,13 @@ class VoiceChanger():
self.prev_audio = np.zeros(1) self.prev_audio = np.zeros(1)
self.mps_enabled = getattr(torch.backends, "mps", None) is not None and torch.backends.mps.is_available() self.mps_enabled = getattr(torch.backends, "mps", None) is not None and torch.backends.mps.is_available()
mock_stream = MockStream(24000)
mock_stream.open_outputfile("out.wav")
self.out = mock_stream
mock_stream_in = MockStream(24000)
mock_stream_in.open_outputfile("in.wav")
self.stream_in = mock_stream_in
print(f"VoiceChanger Initialized (GPU_NUM:{self.gpu_num}, mps_enabled:{self.mps_enabled})") print(f"VoiceChanger Initialized (GPU_NUM:{self.gpu_num}, mps_enabled:{self.mps_enabled})")
def loadModel(self, config: str, pyTorch_model_file: str = None, onnx_model_file: str = None): def loadModel(self, config: str, pyTorch_model_file: str = None, onnx_model_file: str = None):
@ -191,13 +250,16 @@ class VoiceChanger():
audio_norm = audio / self.hps.data.max_wav_value # normalize audio_norm = audio / self.hps.data.max_wav_value # normalize
audio_norm = audio_norm.unsqueeze(0) # unsqueeze audio_norm = audio_norm.unsqueeze(0) # unsqueeze
self.audio_buffer = torch.cat([self.audio_buffer, audio_norm], axis=1) # 過去のデータに連結 self.audio_buffer = torch.cat([self.audio_buffer, audio_norm], axis=1) # 過去のデータに連結
audio_norm = self.audio_buffer[:, -convertSize:] # 変換対象の部分だけ抽出 # audio_norm = self.audio_buffer[:, -(convertSize + 1280 * 2):] # 変換対象の部分だけ抽出
audio_norm = self.audio_buffer[:, -(convertSize):] # 変換対象の部分だけ抽出
self.audio_buffer = audio_norm self.audio_buffer = audio_norm
# TBD: numpy <--> pytorch変換が行ったり来たりしているが、まずは動かすことを最優先。 # TBD: numpy <--> pytorch変換が行ったり来たりしているが、まずは動かすことを最優先。
audio_norm_np = audio_norm.squeeze().numpy().astype(np.double) audio_norm_np = audio_norm.squeeze().numpy().astype(np.float64)
_f0, _time = pw.dio(audio_norm_np, self.hps.data.sampling_rate, frame_period=5.5) _f0, _time = pw.dio(audio_norm_np, self.hps.data.sampling_rate, frame_period=5.5)
f0 = pw.stonemask(audio_norm_np, _f0, _time, self.hps.data.sampling_rate) f0 = pw.stonemask(audio_norm_np, _f0, _time, self.hps.data.sampling_rate)
# print("type:", audio_norm_np.dtype)
# f0, t = pw.harvest(audio_norm_np, self.hps.data.sampling_rate, frame_period=5.5, f0_floor=71.0, f0_ceil=1000.0)
f0 = convert_continuos_f0(f0, int(audio_norm_np.shape[0] / self.hps.data.hop_length)) f0 = convert_continuos_f0(f0, int(audio_norm_np.shape[0] / self.hps.data.hop_length))
f0 = torch.from_numpy(f0.astype(np.float32)) f0 = torch.from_numpy(f0.astype(np.float32))
@ -215,8 +277,7 @@ class VoiceChanger():
data = TextAudioSpeakerCollate( data = TextAudioSpeakerCollate(
sample_rate=self.hps.data.sampling_rate, sample_rate=self.hps.data.sampling_rate,
hop_size=self.hps.data.hop_length, hop_size=self.hps.data.hop_length,
f0_factor=self.settings.f0Factor # TBD: parameter f0_factor=self.settings.f0Factor
# f0_factor=2.4 # TBD: parameter
)([(spec, sid, f0)]) )([(spec, sid, f0)])
return data return data
@ -312,7 +373,6 @@ class VoiceChanger():
audio1 = self.net_g.cuda(self.settings.gpu).voice_conversion(spec, spec_lengths, sin, d, audio1 = self.net_g.cuda(self.settings.gpu).voice_conversion(spec, spec_lengths, sin, d,
sid_src, sid_target)[0, 0].data * self.hps.data.max_wav_value sid_src, sid_target)[0, 0].data * self.hps.data.max_wav_value
# audio1 = audio1[10:-10]
if self.prev_strength.device != torch.device('cuda', self.settings.gpu): if self.prev_strength.device != torch.device('cuda', self.settings.gpu):
print(f"prev_strength move from {self.prev_strength.device} to gpu{self.settings.gpu}") print(f"prev_strength move from {self.prev_strength.device} to gpu{self.settings.gpu}")
self.prev_strength = self.prev_strength.cuda(self.settings.gpu) self.prev_strength = self.prev_strength.cuda(self.settings.gpu)
@ -339,15 +399,18 @@ class VoiceChanger():
result = result.cpu().float().numpy() result = result.cpu().float().numpy()
return result return result
def on_request(self, unpackedData: any): def on_request_(self, unpackedData: any):
convertSize = self.settings.convertChunkNum * 128 # 128sample/1chunk convertSize = self.settings.convertChunkNum * 128 # 128sample/1chunk
self.stream_in.write(unpackedData.astype(np.int16).tobytes())
# print("convsize:", unpackedData.shape[0] * (1 + self.settings.crossFadeOverlapRate))
if unpackedData.shape[0] * (1 + self.settings.crossFadeOverlapRate) + 1024 > convertSize: if unpackedData.shape[0] * (1 + self.settings.crossFadeOverlapRate) + 1024 > convertSize:
convertSize = int(unpackedData.shape[0] * (1 + self.settings.crossFadeOverlapRate)) + 1024 convertSize = int(unpackedData.shape[0] * (1 + self.settings.crossFadeOverlapRate)) + 1024
if convertSize < self.settings.minConvertSize: if convertSize < self.settings.minConvertSize:
convertSize = self.settings.minConvertSize convertSize = self.settings.minConvertSize
# print("convert Size", unpackedData.shape[0], unpackedData.shape[0]*(1 + self.settings.crossFadeOverlapRate), convertSize, self.settings.minConvertSize) # print("convert Size", unpackedData.shape[0], unpackedData.shape[0]*(1 + self.settings.crossFadeOverlapRate), convertSize, self.settings.minConvertSize)
# convertSize = 8192
self._generate_strength(unpackedData) self._generate_strength(unpackedData)
data = self._generate_input(unpackedData, convertSize) data = self._generate_input(unpackedData, convertSize)
@ -369,3 +432,119 @@ class VoiceChanger():
result = result.astype(np.int16) result = result.astype(np.int16)
# print("on_request result size:",result.shape) # print("on_request result size:",result.shape)
return result return result
#########################################################################################
def overlap_merge(self, now_wav, prev_wav, overlap_length):
"""
生成したwavデータを前回生成したwavデータとoverlap_lengthだけ重ねてグラデーション的にマージします
終端のoverlap_lengthぶんは次回マージしてから再生するので削除します
Parameters
----------
now_wav: 今回生成した音声wavデータ
prev_wav: 前回生成した音声wavデータ
overlap_length: 重ねる長さ
"""
if overlap_length == 0:
return now_wav
gradation = np.arange(overlap_length) / overlap_length
now = np.frombuffer(now_wav, dtype='int16')
prev = np.frombuffer(prev_wav, dtype='int16')
now_head = now[:overlap_length]
prev_tail = prev[-overlap_length:]
print("merge params:", gradation.shape, now.shape, prev.shape, now_head.shape, prev_tail.shape)
merged = prev_tail * (np.cos(gradation * np.pi * 0.5) ** 2) + now_head * (np.cos((1 - gradation) * np.pi * 0.5) ** 2)
# merged = prev_tail * (1 - gradation) + now_head * gradation
overlapped = np.append(merged, now[overlap_length:-overlap_length])
signal = np.round(overlapped, decimals=0)
signal = signal.astype(np.int16)
# signal = signal.astype(np.int16).tobytes()
return signal
def on_request(self, unpackedData: any):
self._generate_strength(unpackedData)
convertSize = 8192
unpackedData = unpackedData.astype(np.int16)
if hasattr(self, 'stored_raw_input') == False:
self.stored_raw_input = unpackedData
else:
self.stored_raw_input = np.concatenate([self.stored_raw_input, unpackedData])
self.stored_raw_input = self.stored_raw_input[-1 * (convertSize):]
processing_input = self.stored_raw_input
print("signal_shape1", unpackedData.shape, processing_input.shape, processing_input.dtype)
processing_input = processing_input / self.hps.data.max_wav_value
print("type:", processing_input.dtype)
_f0, _time = pw.dio(processing_input, self.hps.data.sampling_rate, frame_period=5.5)
f0 = pw.stonemask(processing_input, _f0, _time, self.hps.data.sampling_rate)
f0 = convert_continuos_f0(f0, int(processing_input.shape[0] / self.hps.data.hop_length))
f0 = torch.from_numpy(f0.astype(np.float32))
print("signal_shape2", f0.shape)
processing_input = torch.from_numpy(processing_input.astype(np.float32)).clone()
with torch.no_grad():
trans_length = processing_input.size()[0]
# spec, sid = get_audio_text_speaker_pair(signal.view(1, trans_length), Hyperparameters.SOURCE_ID)
processing_input_v = processing_input.view(1, trans_length) # unsqueezeと同じ
print("processing_input_v shape:", processing_input_v.shape)
spec = spectrogram_torch(processing_input_v, self.hps.data.filter_length,
self.hps.data.sampling_rate, self.hps.data.hop_length, self.hps.data.win_length,
center=False)
spec = torch.squeeze(spec, 0)
sid = torch.LongTensor([int(self.settings.srcId)])
dispose_stft_specs = 2
spec = spec[:, dispose_stft_specs:-dispose_stft_specs]
f0 = f0[dispose_stft_specs:-dispose_stft_specs]
print("spec shape:", spec.shape)
data = TextAudioSpeakerCollate(
sample_rate=self.hps.data.sampling_rate,
hop_size=self.hps.data.hop_length,
f0_factor=self.settings.f0Factor
)([(spec, sid, f0)])
if self.settings.gpu >= 0 or self.gpu_num > 0:
# spec, spec_lengths, sid_src, sin, d = [x.cuda(Hyperparameters.GPU_ID) for x in data]
spec, spec_lengths, sid_src, sin, d = data
spec = spec.cuda(self.settings.gpu)
spec_lengths = spec_lengths.cuda(self.settings.gpu)
sid_src = sid_src.cuda(self.settings.gpu)
sin = sin.cuda(self.settings.gpu)
d = tuple([d[:1].cuda(self.settings.gpu) for d in d])
sid_target = torch.LongTensor([self.settings.dstId]).cuda(self.settings.gpu)
audio = self.net_g.cuda(self.settings.gpu).voice_conversion(spec, spec_lengths,
sin, d, sid_src, sid_target)[0, 0].data.cpu().float().numpy()
else:
spec, spec_lengths, sid_src, sin, d = data
sid_target = torch.LongTensor([self.settings.dstId])
audio = self.net_g.voice_conversion(spec, spec_lengths, sin, d, sid_src, sid_target)[0, 0].data.cpu().float().numpy()
dispose_conv1d_length = 1280
audio = audio[dispose_conv1d_length:-dispose_conv1d_length]
audio = audio * self.hps.data.max_wav_value
audio = audio.astype(np.int16)
print("fin audio shape:", audio.shape)
audio = audio.tobytes()
if hasattr(self, "prev_audio"):
try:
audio1 = self.overlap_merge(audio, self.prev_audio, 1024)
except:
audio1 = np.zeros(1).astype(np.int16)
pass
# return np.zeros(1).astype(np.int16)
else:
audio1 = np.zeros(1).astype(np.int16)
self.prev_audio = audio
self.out.write(audio)
self.stream_in.write(unpackedData.tobytes())
return audio1