mirror of
https://github.com/w-okada/voice-changer.git
synced 2025-01-23 05:25:01 +03:00
first try
This commit is contained in:
parent
9a6b9851db
commit
a51fa4d4cb
7
.gitignore
vendored
7
.gitignore
vendored
@ -13,8 +13,15 @@ server/out.wav
|
|||||||
server/G_*.pth
|
server/G_*.pth
|
||||||
server/train_config.json
|
server/train_config.json
|
||||||
|
|
||||||
|
# v.1.3.xテスト用モデルフォルダ
|
||||||
server/v13
|
server/v13
|
||||||
|
|
||||||
|
server/hubert
|
||||||
|
server/so-vits-svc
|
||||||
|
# sovitsテスト用モデルフォルダ
|
||||||
|
server/sovits
|
||||||
|
server/test
|
||||||
|
|
||||||
server/memo.md
|
server/memo.md
|
||||||
|
|
||||||
client/lib/dist
|
client/lib/dist
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
import sys
|
import sys
|
||||||
sys.path.append("MMVC_Client/python")
|
# sys.path.append("MMVC_Client/python")
|
||||||
|
sys.path.append("so-vits-svc")
|
||||||
|
|
||||||
from distutils.util import strtobool
|
from distutils.util import strtobool
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
@ -8,11 +8,11 @@ import resampy
|
|||||||
|
|
||||||
import onnxruntime
|
import onnxruntime
|
||||||
|
|
||||||
from symbols import symbols
|
# from symbols import symbols
|
||||||
from models import SynthesizerTrn
|
# from models import SynthesizerTrn
|
||||||
|
|
||||||
import pyworld as pw
|
import pyworld as pw
|
||||||
from voice_changer.client_modules import convert_continuos_f0, spectrogram_torch, TextAudioSpeakerCollate, get_hparams_from_file, load_checkpoint
|
# from voice_changer.client_modules import convert_continuos_f0, spectrogram_torch, TextAudioSpeakerCollate, get_hparams_from_file, load_checkpoint
|
||||||
|
|
||||||
import time
|
import time
|
||||||
|
|
||||||
@ -28,6 +28,13 @@ import librosa
|
|||||||
import librosa.display
|
import librosa.display
|
||||||
SAMPLING_RATE = 24000
|
SAMPLING_RATE = 24000
|
||||||
|
|
||||||
|
from inference.infer_tool import Svc
|
||||||
|
|
||||||
|
import soundfile
|
||||||
|
from scipy.io.wavfile import write
|
||||||
|
import io
|
||||||
|
import torchaudio
|
||||||
|
|
||||||
|
|
||||||
class MockStream:
|
class MockStream:
|
||||||
"""gi
|
"""gi
|
||||||
@ -123,12 +130,17 @@ class VoiceChanger():
|
|||||||
|
|
||||||
self.gpu_num = torch.cuda.device_count()
|
self.gpu_num = torch.cuda.device_count()
|
||||||
self.text_norm = torch.LongTensor([0, 6, 0])
|
self.text_norm = torch.LongTensor([0, 6, 0])
|
||||||
self.audio_buffer = torch.zeros(1, 0)
|
self.audio_buffer = torch.zeros(0)
|
||||||
self.prev_audio = np.zeros(1)
|
self.prev_audio = np.zeros(1)
|
||||||
self.mps_enabled = getattr(torch.backends, "mps", None) is not None and torch.backends.mps.is_available()
|
self.mps_enabled = getattr(torch.backends, "mps", None) is not None and torch.backends.mps.is_available()
|
||||||
|
|
||||||
print(f"VoiceChanger Initialized (GPU_NUM:{self.gpu_num}, mps_enabled:{self.mps_enabled})")
|
print(f"VoiceChanger Initialized (GPU_NUM:{self.gpu_num}, mps_enabled:{self.mps_enabled})")
|
||||||
|
|
||||||
|
###############################################
|
||||||
|
###############################################
|
||||||
|
# self.raw_path2 = "test/test.wav"
|
||||||
|
self.raw_path = io.BytesIO()
|
||||||
|
|
||||||
def _setupRecordIO(self):
|
def _setupRecordIO(self):
|
||||||
# IO Recorder Setup
|
# IO Recorder Setup
|
||||||
if hasattr(self, "stream_out"):
|
if hasattr(self, "stream_out"):
|
||||||
@ -158,7 +170,7 @@ class VoiceChanger():
|
|||||||
|
|
||||||
def loadModel(self, config: str, pyTorch_model_file: str = None, onnx_model_file: str = None):
|
def loadModel(self, config: str, pyTorch_model_file: str = None, onnx_model_file: str = None):
|
||||||
self.settings.configFile = config
|
self.settings.configFile = config
|
||||||
self.hps = get_hparams_from_file(config)
|
# self.hps = get_hparams_from_file(config)
|
||||||
if pyTorch_model_file != None:
|
if pyTorch_model_file != None:
|
||||||
self.settings.pyTorchModelFile = pyTorch_model_file
|
self.settings.pyTorchModelFile = pyTorch_model_file
|
||||||
if onnx_model_file:
|
if onnx_model_file:
|
||||||
@ -166,27 +178,28 @@ class VoiceChanger():
|
|||||||
|
|
||||||
# PyTorchモデル生成
|
# PyTorchモデル生成
|
||||||
if pyTorch_model_file != None:
|
if pyTorch_model_file != None:
|
||||||
self.net_g = SynthesizerTrn(
|
# self.net_g = SynthesizerTrn(
|
||||||
spec_channels=self.hps.data.filter_length // 2 + 1,
|
# spec_channels=self.hps.data.filter_length // 2 + 1,
|
||||||
segment_size=self.hps.train.segment_size // self.hps.data.hop_length,
|
# segment_size=self.hps.train.segment_size // self.hps.data.hop_length,
|
||||||
inter_channels=self.hps.model.inter_channels,
|
# inter_channels=self.hps.model.inter_channels,
|
||||||
hidden_channels=self.hps.model.hidden_channels,
|
# hidden_channels=self.hps.model.hidden_channels,
|
||||||
upsample_rates=self.hps.model.upsample_rates,
|
# upsample_rates=self.hps.model.upsample_rates,
|
||||||
upsample_initial_channel=self.hps.model.upsample_initial_channel,
|
# upsample_initial_channel=self.hps.model.upsample_initial_channel,
|
||||||
upsample_kernel_sizes=self.hps.model.upsample_kernel_sizes,
|
# upsample_kernel_sizes=self.hps.model.upsample_kernel_sizes,
|
||||||
n_flow=self.hps.model.n_flow,
|
# n_flow=self.hps.model.n_flow,
|
||||||
dec_out_channels=1,
|
# dec_out_channels=1,
|
||||||
dec_kernel_size=7,
|
# dec_kernel_size=7,
|
||||||
n_speakers=self.hps.data.n_speakers,
|
# n_speakers=self.hps.data.n_speakers,
|
||||||
gin_channels=self.hps.model.gin_channels,
|
# gin_channels=self.hps.model.gin_channels,
|
||||||
requires_grad_pe=self.hps.requires_grad.pe,
|
# requires_grad_pe=self.hps.requires_grad.pe,
|
||||||
requires_grad_flow=self.hps.requires_grad.flow,
|
# requires_grad_flow=self.hps.requires_grad.flow,
|
||||||
requires_grad_text_enc=self.hps.requires_grad.text_enc,
|
# requires_grad_text_enc=self.hps.requires_grad.text_enc,
|
||||||
requires_grad_dec=self.hps.requires_grad.dec
|
# requires_grad_dec=self.hps.requires_grad.dec
|
||||||
)
|
# )
|
||||||
self.net_g.eval()
|
# self.net_g.eval()
|
||||||
load_checkpoint(pyTorch_model_file, self.net_g, None)
|
# load_checkpoint(pyTorch_model_file, self.net_g, None)
|
||||||
# utils.load_checkpoint(pyTorch_model_file, self.net_g, None)
|
|
||||||
|
self.net_g = Svc(pyTorch_model_file, config)
|
||||||
|
|
||||||
# ONNXモデル生成
|
# ONNXモデル生成
|
||||||
if onnx_model_file != None:
|
if onnx_model_file != None:
|
||||||
@ -325,42 +338,11 @@ class VoiceChanger():
|
|||||||
|
|
||||||
def _generate_input(self, unpackedData: any, convertSize: int):
|
def _generate_input(self, unpackedData: any, convertSize: int):
|
||||||
# 今回変換するデータをテンソルとして整形する
|
# 今回変換するデータをテンソルとして整形する
|
||||||
audio = torch.FloatTensor(unpackedData.astype(np.float32)) # float32でtensorfを作成
|
# unpackedData = unpackedData / self.hps.data.max_wav_value # normalize
|
||||||
audio_norm = audio / self.hps.data.max_wav_value # normalize
|
self.audio_buffer = np.concatenate([self.audio_buffer, unpackedData], 0) # 過去のデータに連結
|
||||||
audio_norm = audio_norm.unsqueeze(0) # unsqueeze
|
self.audio_buffer = self.audio_buffer[-(convertSize):] # 変換対象の部分だけ抽出
|
||||||
self.audio_buffer = torch.cat([self.audio_buffer, audio_norm], axis=1) # 過去のデータに連結
|
# print("convert size", self.audio_buffer.shape)
|
||||||
# audio_norm = self.audio_buffer[:, -(convertSize + 1280 * 2):] # 変換対象の部分だけ抽出
|
return
|
||||||
audio_norm = self.audio_buffer[:, -(convertSize):] # 変換対象の部分だけ抽出
|
|
||||||
self.audio_buffer = audio_norm
|
|
||||||
|
|
||||||
# TBD: numpy <--> pytorch変換が行ったり来たりしているが、まずは動かすことを最優先。
|
|
||||||
audio_norm_np = audio_norm.squeeze().numpy().astype(np.float64)
|
|
||||||
if self.settings.f0Detector == "dio":
|
|
||||||
_f0, _time = pw.dio(audio_norm_np, self.hps.data.sampling_rate, frame_period=5.5)
|
|
||||||
f0 = pw.stonemask(audio_norm_np, _f0, _time, self.hps.data.sampling_rate)
|
|
||||||
else:
|
|
||||||
f0, t = pw.harvest(audio_norm_np, self.hps.data.sampling_rate, frame_period=5.5, f0_floor=71.0, f0_ceil=1000.0)
|
|
||||||
f0 = convert_continuos_f0(f0, int(audio_norm_np.shape[0] / self.hps.data.hop_length))
|
|
||||||
f0 = torch.from_numpy(f0.astype(np.float32))
|
|
||||||
|
|
||||||
spec = spectrogram_torch(audio_norm, self.hps.data.filter_length,
|
|
||||||
self.hps.data.sampling_rate, self.hps.data.hop_length, self.hps.data.win_length,
|
|
||||||
center=False)
|
|
||||||
# dispose_stft_specs = 2
|
|
||||||
# spec = spec[:, dispose_stft_specs:-dispose_stft_specs]
|
|
||||||
# f0 = f0[dispose_stft_specs:-dispose_stft_specs]
|
|
||||||
spec = torch.squeeze(spec, 0)
|
|
||||||
sid = torch.LongTensor([int(self.settings.srcId)])
|
|
||||||
|
|
||||||
# data = (self.text_norm, spec, audio_norm, sid)
|
|
||||||
# data = TextAudioSpeakerCollate()([data])
|
|
||||||
data = TextAudioSpeakerCollate(
|
|
||||||
sample_rate=self.hps.data.sampling_rate,
|
|
||||||
hop_size=self.hps.data.hop_length,
|
|
||||||
f0_factor=self.settings.f0Factor
|
|
||||||
)([(spec, sid, f0)])
|
|
||||||
|
|
||||||
return data
|
|
||||||
|
|
||||||
def _onnx_inference(self, data, inputSize):
|
def _onnx_inference(self, data, inputSize):
|
||||||
if hasattr(self, "onnx_session") == False or self.onnx_session == None:
|
if hasattr(self, "onnx_session") == False or self.onnx_session == None:
|
||||||
@ -404,114 +386,154 @@ class VoiceChanger():
|
|||||||
self.np_prev_audio1 = audio1
|
self.np_prev_audio1 = audio1
|
||||||
return result
|
return result
|
||||||
|
|
||||||
def _pyTorch_inference(self, data, inputSize):
|
def _pyTorch_inference(self, inputSize):
|
||||||
if hasattr(self, "net_g") == False or self.net_g == None:
|
if hasattr(self, "net_g") == False or self.net_g == None:
|
||||||
print("[Voice Changer] No pyTorch session.")
|
print("[Voice Changer] No pyTorch session.")
|
||||||
return np.zeros(1).astype(np.int16)
|
return np.zeros(1).astype(np.int16)
|
||||||
|
|
||||||
if self.settings.gpu < 0 or self.gpu_num == 0:
|
self.raw_path.seek(0)
|
||||||
with torch.no_grad():
|
soundfile.write(self.raw_path, self.audio_buffer.astype(np.int16), 32000, format="wav")
|
||||||
spec, spec_lengths, sid_src, sin, d = data
|
write("test/received_data.wav", 32000, self.audio_buffer.astype(np.int16))
|
||||||
spec = spec.cpu()
|
|
||||||
spec_lengths = spec_lengths.cpu()
|
|
||||||
sid_src = sid_src.cpu()
|
|
||||||
sin = sin.cpu()
|
|
||||||
d = tuple([d[:1].cpu() for d in d])
|
|
||||||
sid_target = torch.LongTensor([self.settings.dstId]).cpu()
|
|
||||||
|
|
||||||
audio1 = self.net_g.cpu().voice_conversion(spec, spec_lengths, sin, d, sid_src, sid_target)[0, 0].data * self.hps.data.max_wav_value
|
self.raw_path.seek(0)
|
||||||
|
out_audio, out_sr = self.net_g.infer('speaker1', 20, self.raw_path)
|
||||||
|
audio1 = out_audio * 32768.0
|
||||||
|
print("audio1.shape1", self.audio_buffer.shape, audio1.shape)
|
||||||
|
|
||||||
if self.prev_strength.device != torch.device('cpu'):
|
if self.prev_strength.device != torch.device('cuda', self.settings.gpu):
|
||||||
print(f"prev_strength move from {self.prev_strength.device} to cpu")
|
print(f"prev_strength move from {self.prev_strength.device} to gpu{self.settings.gpu}")
|
||||||
self.prev_strength = self.prev_strength.cpu()
|
self.prev_strength = self.prev_strength.cuda(self.settings.gpu)
|
||||||
if self.cur_strength.device != torch.device('cpu'):
|
if self.cur_strength.device != torch.device('cuda', self.settings.gpu):
|
||||||
print(f"cur_strength move from {self.cur_strength.device} to cpu")
|
print(f"cur_strength move from {self.cur_strength.device} to gpu{self.settings.gpu}")
|
||||||
self.cur_strength = self.cur_strength.cpu()
|
self.cur_strength = self.cur_strength.cuda(self.settings.gpu)
|
||||||
|
|
||||||
if hasattr(self, 'prev_audio1') == True and self.prev_audio1.device == torch.device('cpu'): # prev_audio1が所望のデバイスに無い場合は一回休み。
|
if hasattr(self, 'prev_audio1') == True and self.prev_audio1.device == torch.device('cuda', self.settings.gpu):
|
||||||
overlapSize = min(self.settings.crossFadeOverlapSize, inputSize)
|
print("crossfade")
|
||||||
prev_overlap = self.prev_audio1[-1 * overlapSize:]
|
overlapSize = min(self.settings.crossFadeOverlapSize, inputSize)
|
||||||
cur_overlap = audio1[-1 * (inputSize + overlapSize):-1 * inputSize]
|
prev_overlap = self.prev_audio1[-1 * overlapSize:]
|
||||||
powered_prev = prev_overlap * self.prev_strength
|
cur_overlap = audio1[-1 * (inputSize + overlapSize):-1 * inputSize]
|
||||||
powered_cur = cur_overlap * self.cur_strength
|
powered_prev = prev_overlap * self.prev_strength
|
||||||
powered_result = powered_prev + powered_cur
|
powered_cur = cur_overlap * self.cur_strength
|
||||||
|
powered_result = powered_prev + powered_cur
|
||||||
|
|
||||||
cur = audio1[-1 * inputSize:-1 * overlapSize] # 今回のインプットの生部分。(インプット - 次回のCrossfade部分)。
|
# print(overlapSize, prev_overlap.shape, cur_overlap.shape, self.prev_strength.shape, self.cur_strength.shape)
|
||||||
result = torch.cat([powered_result, cur], axis=0) # Crossfadeと今回のインプットの生部分を結合
|
# print(self.prev_audio1.shape, audio1.shape, inputSize, overlapSize)
|
||||||
|
|
||||||
else:
|
cur = audio1[-1 * inputSize:-1 * overlapSize] # 今回のインプットの生部分。(インプット - 次回のCrossfade部分)。
|
||||||
cur = audio1[-2 * inputSize:-1 * inputSize]
|
result = torch.cat([powered_result, cur], axis=0) # Crossfadeと今回のインプットの生部分を結合
|
||||||
result = cur
|
|
||||||
|
|
||||||
self.prev_audio1 = audio1
|
|
||||||
result = result.cpu().float().numpy()
|
|
||||||
|
|
||||||
else:
|
else:
|
||||||
with torch.no_grad():
|
print("no crossfade")
|
||||||
spec, spec_lengths, sid_src, sin, d = data
|
cur = audio1[-2 * inputSize:-1 * inputSize]
|
||||||
spec = spec.cuda(self.settings.gpu)
|
result = cur
|
||||||
spec_lengths = spec_lengths.cuda(self.settings.gpu)
|
self.prev_audio1 = audio1
|
||||||
sid_src = sid_src.cuda(self.settings.gpu)
|
|
||||||
sin = sin.cuda(self.settings.gpu)
|
|
||||||
d = tuple([d[:1].cuda(self.settings.gpu) for d in d])
|
|
||||||
sid_target = torch.LongTensor([self.settings.dstId]).cuda(self.settings.gpu)
|
|
||||||
|
|
||||||
# audio1 = self.net_g.cuda(self.settings.gpu).voice_conversion(spec, spec_lengths, sid_src=sid_src,
|
result = result.cpu().float().numpy()
|
||||||
# sid_tgt=sid_tgt1)[0, 0].data * self.hps.data.max_wav_value
|
|
||||||
|
|
||||||
audio1 = self.net_g.cuda(self.settings.gpu).voice_conversion(spec, spec_lengths, sin, d,
|
# if self.settings.gpu < 0 or self.gpu_num == 0:
|
||||||
sid_src, sid_target)[0, 0].data * self.hps.data.max_wav_value
|
# with torch.no_grad():
|
||||||
|
# spec, spec_lengths, sid_src, sin, d = data
|
||||||
|
# spec = spec.cpu()
|
||||||
|
# spec_lengths = spec_lengths.cpu()
|
||||||
|
# sid_src = sid_src.cpu()
|
||||||
|
# sin = sin.cpu()
|
||||||
|
# d = tuple([d[:1].cpu() for d in d])
|
||||||
|
# sid_target = torch.LongTensor([self.settings.dstId]).cpu()
|
||||||
|
|
||||||
if self.prev_strength.device != torch.device('cuda', self.settings.gpu):
|
# audio1 = self.net_g.cpu().voice_conversion(spec, spec_lengths, sin, d, sid_src, sid_target)[0, 0].data * self.hps.data.max_wav_value
|
||||||
print(f"prev_strength move from {self.prev_strength.device} to gpu{self.settings.gpu}")
|
|
||||||
self.prev_strength = self.prev_strength.cuda(self.settings.gpu)
|
|
||||||
if self.cur_strength.device != torch.device('cuda', self.settings.gpu):
|
|
||||||
print(f"cur_strength move from {self.cur_strength.device} to gpu{self.settings.gpu}")
|
|
||||||
self.cur_strength = self.cur_strength.cuda(self.settings.gpu)
|
|
||||||
|
|
||||||
if hasattr(self, 'prev_audio1') == True and self.prev_audio1.device == torch.device('cuda', self.settings.gpu):
|
# if self.prev_strength.device != torch.device('cpu'):
|
||||||
overlapSize = min(self.settings.crossFadeOverlapSize, inputSize)
|
# print(f"prev_strength move from {self.prev_strength.device} to cpu")
|
||||||
prev_overlap = self.prev_audio1[-1 * overlapSize:]
|
# self.prev_strength = self.prev_strength.cpu()
|
||||||
cur_overlap = audio1[-1 * (inputSize + overlapSize):-1 * inputSize]
|
# if self.cur_strength.device != torch.device('cpu'):
|
||||||
powered_prev = prev_overlap * self.prev_strength
|
# print(f"cur_strength move from {self.cur_strength.device} to cpu")
|
||||||
powered_cur = cur_overlap * self.cur_strength
|
# self.cur_strength = self.cur_strength.cpu()
|
||||||
powered_result = powered_prev + powered_cur
|
|
||||||
|
|
||||||
# print(overlapSize, prev_overlap.shape, cur_overlap.shape, self.prev_strength.shape, self.cur_strength.shape)
|
# if hasattr(self, 'prev_audio1') == True and self.prev_audio1.device == torch.device('cpu'): # prev_audio1が所望のデバイスに無い場合は一回休み。
|
||||||
# print(self.prev_audio1.shape, audio1.shape, inputSize, overlapSize)
|
# overlapSize = min(self.settings.crossFadeOverlapSize, inputSize)
|
||||||
|
# prev_overlap = self.prev_audio1[-1 * overlapSize:]
|
||||||
|
# cur_overlap = audio1[-1 * (inputSize + overlapSize):-1 * inputSize]
|
||||||
|
# powered_prev = prev_overlap * self.prev_strength
|
||||||
|
# powered_cur = cur_overlap * self.cur_strength
|
||||||
|
# powered_result = powered_prev + powered_cur
|
||||||
|
|
||||||
cur = audio1[-1 * inputSize:-1 * overlapSize] # 今回のインプットの生部分。(インプット - 次回のCrossfade部分)。
|
# cur = audio1[-1 * inputSize:-1 * overlapSize] # 今回のインプットの生部分。(インプット - 次回のCrossfade部分)。
|
||||||
result = torch.cat([powered_result, cur], axis=0) # Crossfadeと今回のインプットの生部分を結合
|
# result = torch.cat([powered_result, cur], axis=0) # Crossfadeと今回のインプットの生部分を結合
|
||||||
|
|
||||||
else:
|
# else:
|
||||||
cur = audio1[-2 * inputSize:-1 * inputSize]
|
# cur = audio1[-2 * inputSize:-1 * inputSize]
|
||||||
result = cur
|
# result = cur
|
||||||
self.prev_audio1 = audio1
|
|
||||||
|
|
||||||
result = result.cpu().float().numpy()
|
# self.prev_audio1 = audio1
|
||||||
|
# result = result.cpu().float().numpy()
|
||||||
|
|
||||||
|
# else:
|
||||||
|
# with torch.no_grad():
|
||||||
|
# spec, spec_lengths, sid_src, sin, d = data
|
||||||
|
# spec = spec.cuda(self.settings.gpu)
|
||||||
|
# spec_lengths = spec_lengths.cuda(self.settings.gpu)
|
||||||
|
# sid_src = sid_src.cuda(self.settings.gpu)
|
||||||
|
# sin = sin.cuda(self.settings.gpu)
|
||||||
|
# d = tuple([d[:1].cuda(self.settings.gpu) for d in d])
|
||||||
|
# sid_target = torch.LongTensor([self.settings.dstId]).cuda(self.settings.gpu)
|
||||||
|
|
||||||
|
# # audio1 = self.net_g.cuda(self.settings.gpu).voice_conversion(spec, spec_lengths, sid_src=sid_src,
|
||||||
|
# # sid_tgt=sid_tgt1)[0, 0].data * self.hps.data.max_wav_value
|
||||||
|
|
||||||
|
# audio1 = self.net_g.cuda(self.settings.gpu).voice_conversion(spec, spec_lengths, sin, d,
|
||||||
|
# sid_src, sid_target)[0, 0].data * self.hps.data.max_wav_value
|
||||||
|
|
||||||
|
# if self.prev_strength.device != torch.device('cuda', self.settings.gpu):
|
||||||
|
# print(f"prev_strength move from {self.prev_strength.device} to gpu{self.settings.gpu}")
|
||||||
|
# self.prev_strength = self.prev_strength.cuda(self.settings.gpu)
|
||||||
|
# if self.cur_strength.device != torch.device('cuda', self.settings.gpu):
|
||||||
|
# print(f"cur_strength move from {self.cur_strength.device} to gpu{self.settings.gpu}")
|
||||||
|
# self.cur_strength = self.cur_strength.cuda(self.settings.gpu)
|
||||||
|
|
||||||
|
# if hasattr(self, 'prev_audio1') == True and self.prev_audio1.device == torch.device('cuda', self.settings.gpu):
|
||||||
|
# overlapSize = min(self.settings.crossFadeOverlapSize, inputSize)
|
||||||
|
# prev_overlap = self.prev_audio1[-1 * overlapSize:]
|
||||||
|
# cur_overlap = audio1[-1 * (inputSize + overlapSize):-1 * inputSize]
|
||||||
|
# powered_prev = prev_overlap * self.prev_strength
|
||||||
|
# powered_cur = cur_overlap * self.cur_strength
|
||||||
|
# powered_result = powered_prev + powered_cur
|
||||||
|
|
||||||
|
# # print(overlapSize, prev_overlap.shape, cur_overlap.shape, self.prev_strength.shape, self.cur_strength.shape)
|
||||||
|
# # print(self.prev_audio1.shape, audio1.shape, inputSize, overlapSize)
|
||||||
|
|
||||||
|
# cur = audio1[-1 * inputSize:-1 * overlapSize] # 今回のインプットの生部分。(インプット - 次回のCrossfade部分)。
|
||||||
|
# result = torch.cat([powered_result, cur], axis=0) # Crossfadeと今回のインプットの生部分を結合
|
||||||
|
|
||||||
|
# else:
|
||||||
|
# cur = audio1[-2 * inputSize:-1 * inputSize]
|
||||||
|
# result = cur
|
||||||
|
# self.prev_audio1 = audio1
|
||||||
|
|
||||||
|
# result = result.cpu().float().numpy()
|
||||||
return result
|
return result
|
||||||
|
|
||||||
def on_request(self, unpackedData: any):
|
def on_request(self, unpackedData: any):
|
||||||
|
print("input size:", unpackedData.shape)
|
||||||
|
unpackedData = resampy.resample(unpackedData, 24000, 32000)
|
||||||
with Timer("pre-process") as t:
|
with Timer("pre-process") as t:
|
||||||
if self.settings.inputSampleRate != 24000:
|
|
||||||
unpackedData = resampy.resample(unpackedData, 48000, 24000)
|
|
||||||
convertSize = unpackedData.shape[0] + min(self.settings.crossFadeOverlapSize, unpackedData.shape[0])
|
convertSize = unpackedData.shape[0] + min(self.settings.crossFadeOverlapSize, unpackedData.shape[0])
|
||||||
# print(convertSize, unpackedData.shape[0])
|
|
||||||
if convertSize < 8192:
|
if convertSize < 8192:
|
||||||
convertSize = 8192
|
convertSize = 8192
|
||||||
if convertSize % 128 != 0: # モデルの出力のホップサイズで切り捨てが発生するので補う。
|
if convertSize % 128 != 0: # モデルの出力のホップサイズで切り捨てが発生するので補う。
|
||||||
convertSize = convertSize + (128 - (convertSize % 128))
|
convertSize = convertSize + (128 - (convertSize % 128))
|
||||||
|
|
||||||
self._generate_strength(unpackedData)
|
self._generate_strength(unpackedData)
|
||||||
data = self._generate_input(unpackedData, convertSize)
|
self._generate_input(unpackedData, convertSize)
|
||||||
|
|
||||||
preprocess_time = t.secs
|
preprocess_time = t.secs
|
||||||
|
|
||||||
with Timer("main-process") as t:
|
with Timer("main-process") as t:
|
||||||
try:
|
try:
|
||||||
if self.settings.framework == "ONNX":
|
if self.settings.framework == "ONNX":
|
||||||
result = self._onnx_inference(data, unpackedData.shape[0])
|
result = self._onnx_inference(unpackedData.shape[0])
|
||||||
else:
|
else:
|
||||||
result = self._pyTorch_inference(data, unpackedData.shape[0])
|
result = self._pyTorch_inference(unpackedData.shape[0])
|
||||||
|
write("test/out_data.wav", 32000, result.astype(np.int16))
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print("VC PROCESSING!!!! EXCEPTION!!!", e)
|
print("VC PROCESSING!!!! EXCEPTION!!!", e)
|
||||||
@ -524,18 +546,21 @@ class VoiceChanger():
|
|||||||
mainprocess_time = t.secs
|
mainprocess_time = t.secs
|
||||||
|
|
||||||
with Timer("post-process") as t:
|
with Timer("post-process") as t:
|
||||||
|
result = resampy.resample(result, 32000, 24000).astype(np.int16)
|
||||||
|
|
||||||
result = result.astype(np.int16)
|
# result = result.astype(np.int16)
|
||||||
# print("on_request result size:",result.shape)
|
# # print("on_request result size:",result.shape)
|
||||||
if self.settings.recordIO == 1:
|
# if self.settings.recordIO == 1:
|
||||||
self.stream_in.write(unpackedData.astype(np.int16).tobytes())
|
# self.stream_in.write(unpackedData.astype(np.int16).tobytes())
|
||||||
self.stream_out.write(result.tobytes())
|
# self.stream_out.write(result.tobytes())
|
||||||
|
|
||||||
if self.settings.inputSampleRate != 24000:
|
# if self.settings.inputSampleRate != 24000:
|
||||||
result = resampy.resample(result, 24000, 48000).astype(np.int16)
|
# result = resampy.resample(result, 24000, 48000).astype(np.int16)
|
||||||
postprocess_time = t.secs
|
postprocess_time = t.secs
|
||||||
|
|
||||||
perf = [preprocess_time, mainprocess_time, postprocess_time]
|
perf = [preprocess_time, mainprocess_time, postprocess_time]
|
||||||
|
|
||||||
|
print("output size:", result.shape)
|
||||||
return result, perf
|
return result, perf
|
||||||
|
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user