voice-changer/server/voice_changer/VoiceChanger.py

555 lines
25 KiB
Python
Raw Normal View History

2023-02-14 23:02:51 +03:00
from const import ERROR_NO_ONNX_SESSION, TMP_DIR
2022-12-31 10:08:14 +03:00
import torch
2023-01-28 09:56:56 +03:00
import os
import traceback
2022-12-31 10:08:14 +03:00
import numpy as np
2023-01-08 10:18:20 +03:00
from dataclasses import dataclass, asdict
2023-02-18 14:53:15 +03:00
import resampy
import onnxruntime
from symbols import symbols
2022-12-31 10:08:14 +03:00
from models import SynthesizerTrn
2023-02-10 18:59:44 +03:00
import pyworld as pw
# from voice_changer.TrainerFunctions import TextAudioSpeakerCollate, spectrogram_torch, load_checkpoint, get_hparams_from_file
from voice_changer.client_modules import convert_continuos_f0, spectrogram_torch, TextAudioSpeakerCollate, get_hparams_from_file, load_checkpoint
2022-12-31 10:08:14 +03:00
2023-01-28 09:56:56 +03:00
providers = ['OpenVINOExecutionProvider', "CUDAExecutionProvider", "DmlExecutionProvider", "CPUExecutionProvider"]
2022-12-31 10:08:14 +03:00
2023-02-12 06:25:57 +03:00
import wave
2023-02-15 01:18:05 +03:00
import matplotlib
matplotlib.use('Agg')
import pylab
import librosa
import librosa.display
SAMPLING_RATE = 24000
2023-02-17 22:15:34 +03:00
import pyaudio
import json
from multiprocessing import Process, Queue
2023-02-12 06:25:57 +03:00
class MockStream:
"""
オーディオストリーミング入出力をファイル入出力にそのまま置き換えるためのモック
"""
def __init__(self, sampling_rate):
self.sampling_rate = sampling_rate
self.start_count = 2
self.end_count = 2
self.fr = None
self.fw = None
def open_inputfile(self, input_filename):
self.fr = wave.open(input_filename, 'rb')
def open_outputfile(self, output_filename):
self.fw = wave.open(output_filename, 'wb')
self.fw.setnchannels(1)
self.fw.setsampwidth(2)
self.fw.setframerate(self.sampling_rate)
def read(self, length, exception_on_overflow=False):
if self.start_count > 0:
wav = bytes(length * 2)
self.start_count -= 1 # 最初の2回はダミーの空データ送る
else:
wav = self.fr.readframes(length)
if len(wav) <= 0: # データなくなってから最後の2回はダミーの空データを送る
wav = bytes(length * 2)
self.end_count -= 1
if self.end_count < 0:
Hyperparameters.VC_END_FLAG = True
return wav
def write(self, wav):
self.fw.writeframes(wav)
def stop_stream(self):
pass
def close(self):
if self.fr != None:
self.fr.close()
self.fr = None
if self.fw != None:
self.fw.close()
self.fw = None
2023-01-08 10:18:20 +03:00
@dataclass
class VocieChangerSettings():
2023-01-28 09:56:56 +03:00
gpu: int = 0
srcId: int = 107
dstId: int = 100
crossFadeOffsetRate: float = 0.1
crossFadeEndRate: float = 0.9
crossFadeOverlapRate: float = 0.9
convertChunkNum: int = 32
minConvertSize: int = 0
2023-02-12 16:14:34 +03:00
framework: str = "PyTorch" # PyTorch or ONNX
2023-02-10 18:59:44 +03:00
f0Factor: float = 1.0
2023-02-14 23:02:51 +03:00
f0Detector: str = "dio" # dio or harvest
recordIO: int = 1 # 0:off, 1:on
2023-02-17 22:15:34 +03:00
serverMicProps: str = ""
2023-02-18 14:53:15 +03:00
inputSampleRate: int = 48000 # 48000 or 24000
2023-02-10 18:59:44 +03:00
2023-01-28 09:56:56 +03:00
pyTorchModelFile: str = ""
onnxModelFile: str = ""
configFile: str = ""
2023-02-10 18:59:44 +03:00
2023-01-08 10:18:20 +03:00
# ↓mutableな物だけ列挙
2023-02-18 14:53:15 +03:00
intData = ["gpu", "srcId", "dstId", "convertChunkNum", "minConvertSize", "recordIO", "inputSampleRate"]
2023-02-10 18:59:44 +03:00
floatData = ["crossFadeOffsetRate", "crossFadeEndRate", "crossFadeOverlapRate", "f0Factor"]
2023-02-17 22:15:34 +03:00
strData = ["framework", "f0Detector", "serverMicProps"]
def readMicrophone(queue, sid, deviceIndex):
print("READ MIC", queue, sid, deviceIndex)
2023-01-08 10:18:20 +03:00
2023-01-28 09:56:56 +03:00
2022-12-31 10:08:14 +03:00
class VoiceChanger():
2023-01-08 10:18:20 +03:00
2023-01-29 03:42:45 +03:00
def __init__(self):
2023-01-08 10:18:20 +03:00
# 初期化
2023-01-29 03:42:45 +03:00
self.settings = VocieChangerSettings()
2023-01-28 09:56:56 +03:00
self.unpackedData_length = 0
2023-01-10 16:49:16 +03:00
self.net_g = None
self.onnx_session = None
2023-01-28 09:56:56 +03:00
self.currentCrossFadeOffsetRate = 0
self.currentCrossFadeEndRate = 0
self.currentCrossFadeOverlapRate = 0
2022-12-31 10:08:14 +03:00
self.gpu_num = torch.cuda.device_count()
2023-01-28 09:56:56 +03:00
self.text_norm = torch.LongTensor([0, 6, 0])
2022-12-31 10:08:14 +03:00
self.audio_buffer = torch.zeros(1, 0)
self.prev_audio = np.zeros(1)
2023-01-07 18:25:21 +03:00
self.mps_enabled = getattr(torch.backends, "mps", None) is not None and torch.backends.mps.is_available()
2022-12-31 10:08:14 +03:00
2023-02-14 23:02:51 +03:00
self._setupRecordIO()
2023-02-12 06:25:57 +03:00
2023-01-04 20:28:36 +03:00
print(f"VoiceChanger Initialized (GPU_NUM:{self.gpu_num}, mps_enabled:{self.mps_enabled})")
2023-02-14 23:02:51 +03:00
def _setupRecordIO(self):
# IO Recorder Setup
2023-02-16 22:15:31 +03:00
if hasattr(self, "stream_out"):
self.stream_out.close()
2023-02-14 23:02:51 +03:00
mock_stream_out = MockStream(24000)
stream_output_file = os.path.join(TMP_DIR, "out.wav")
if os.path.exists(stream_output_file):
2023-02-16 22:15:31 +03:00
print("delete old analyze file.", stream_output_file)
2023-02-16 21:03:21 +03:00
os.remove(stream_output_file)
2023-02-16 22:15:31 +03:00
else:
print("old analyze file not exist.", stream_output_file)
2023-02-14 23:02:51 +03:00
mock_stream_out.open_outputfile(stream_output_file)
self.stream_out = mock_stream_out
2023-02-16 22:15:31 +03:00
if hasattr(self, "stream_in"):
self.stream_in.close()
2023-02-14 23:02:51 +03:00
mock_stream_in = MockStream(24000)
stream_input_file = os.path.join(TMP_DIR, "in.wav")
if os.path.exists(stream_input_file):
2023-02-16 22:15:31 +03:00
print("delete old analyze file.", stream_input_file)
2023-02-16 21:03:21 +03:00
os.remove(stream_input_file)
2023-02-16 22:15:31 +03:00
else:
print("old analyze file not exist.", stream_output_file)
2023-02-14 23:02:51 +03:00
mock_stream_in.open_outputfile(stream_input_file)
self.stream_in = mock_stream_in
2023-01-28 09:56:56 +03:00
def loadModel(self, config: str, pyTorch_model_file: str = None, onnx_model_file: str = None):
2023-01-10 20:19:54 +03:00
self.settings.configFile = config
2023-01-29 03:42:45 +03:00
self.hps = get_hparams_from_file(config)
2023-01-10 20:19:54 +03:00
if pyTorch_model_file != None:
self.settings.pyTorchModelFile = pyTorch_model_file
if onnx_model_file:
self.settings.onnxModelFile = onnx_model_file
2023-01-28 09:56:56 +03:00
2023-01-07 18:25:21 +03:00
# PyTorchモデル生成
2023-01-08 10:18:20 +03:00
if pyTorch_model_file != None:
2023-01-07 18:25:21 +03:00
self.net_g = SynthesizerTrn(
2023-02-10 18:59:44 +03:00
spec_channels=self.hps.data.filter_length // 2 + 1,
segment_size=self.hps.train.segment_size // self.hps.data.hop_length,
inter_channels=self.hps.model.inter_channels,
hidden_channels=self.hps.model.hidden_channels,
upsample_rates=self.hps.model.upsample_rates,
upsample_initial_channel=self.hps.model.upsample_initial_channel,
upsample_kernel_sizes=self.hps.model.upsample_kernel_sizes,
n_flow=self.hps.model.n_flow,
dec_out_channels=1,
dec_kernel_size=7,
2023-01-07 18:25:21 +03:00
n_speakers=self.hps.data.n_speakers,
2023-02-10 18:59:44 +03:00
gin_channels=self.hps.model.gin_channels,
requires_grad_pe=self.hps.requires_grad.pe,
requires_grad_flow=self.hps.requires_grad.flow,
requires_grad_text_enc=self.hps.requires_grad.text_enc,
requires_grad_dec=self.hps.requires_grad.dec
)
2023-01-07 18:25:21 +03:00
self.net_g.eval()
load_checkpoint(pyTorch_model_file, self.net_g, None)
# utils.load_checkpoint(pyTorch_model_file, self.net_g, None)
2023-01-07 18:25:21 +03:00
# ONNXモデル生成
2023-01-08 10:18:20 +03:00
if onnx_model_file != None:
2023-01-07 14:07:39 +03:00
ort_options = onnxruntime.SessionOptions()
ort_options.intra_op_num_threads = 8
self.onnx_session = onnxruntime.InferenceSession(
2023-01-08 10:18:20 +03:00
onnx_model_file,
2023-01-07 18:25:21 +03:00
providers=providers
2023-01-07 14:07:39 +03:00
)
2023-01-10 16:49:16 +03:00
return self.get_info()
2023-01-08 15:56:39 +03:00
2022-12-31 10:08:14 +03:00
def destroy(self):
2023-01-10 16:49:16 +03:00
del self.net_g
2023-01-28 09:56:56 +03:00
del self.onnx_session
2022-12-31 10:08:14 +03:00
2023-01-07 18:25:21 +03:00
def get_info(self):
2023-01-08 10:18:20 +03:00
data = asdict(self.settings)
2023-01-09 15:52:39 +03:00
2023-01-12 10:38:45 +03:00
data["onnxExecutionProvider"] = self.onnx_session.get_providers() if self.onnx_session != None else []
2023-01-10 20:19:54 +03:00
files = ["configFile", "pyTorchModelFile", "onnxModelFile"]
2023-01-08 10:18:20 +03:00
for f in files:
2023-01-28 09:56:56 +03:00
if data[f] != None and os.path.exists(data[f]):
2023-01-08 14:28:57 +03:00
data[f] = os.path.basename(data[f])
else:
data[f] = ""
2023-01-08 10:18:20 +03:00
return data
2023-02-15 01:18:05 +03:00
def _get_f0_dio(self, y, sr=SAMPLING_RATE):
_f0, time = pw.dio(y, sr, frame_period=5)
f0 = pw.stonemask(y, _f0, time, sr)
time = np.linspace(0, y.shape[0] / sr, len(time))
return f0, time
def _get_f0_harvest(self, y, sr=SAMPLING_RATE):
_f0, time = pw.harvest(y, sr, frame_period=5)
f0 = pw.stonemask(y, _f0, time, sr)
time = np.linspace(0, y.shape[0] / sr, len(time))
return f0, time
2023-01-28 09:56:56 +03:00
def update_setteings(self, key: str, val: any):
2023-01-08 14:28:57 +03:00
if key == "onnxExecutionProvider" and self.onnx_session != None:
2023-01-08 11:58:27 +03:00
if val == "CUDAExecutionProvider":
2023-01-10 23:21:56 +03:00
if self.settings.gpu < 0 or self.settings.gpu >= self.gpu_num:
self.settings.gpu = 0
2023-01-28 09:56:56 +03:00
provider_options = [{'device_id': self.settings.gpu}]
2023-01-08 11:58:27 +03:00
self.onnx_session.set_providers(providers=[val], provider_options=provider_options)
else:
self.onnx_session.set_providers(providers=[val])
2023-01-08 10:18:20 +03:00
elif key in self.settings.intData:
setattr(self.settings, key, int(val))
2023-01-08 14:28:57 +03:00
if key == "gpu" and val >= 0 and val < self.gpu_num and self.onnx_session != None:
2023-01-08 11:58:27 +03:00
providers = self.onnx_session.get_providers()
2023-01-10 18:59:09 +03:00
print("Providers:", providers)
2023-01-08 11:58:27 +03:00
if "CUDAExecutionProvider" in providers:
2023-01-28 09:56:56 +03:00
provider_options = [{'device_id': self.settings.gpu}]
2023-01-08 11:58:27 +03:00
self.onnx_session.set_providers(providers=["CUDAExecutionProvider"], provider_options=provider_options)
2023-01-08 15:19:44 +03:00
if key == "crossFadeOffsetRate" or key == "crossFadeEndRate":
self.unpackedData_length = 0
2023-02-14 23:02:51 +03:00
if key == "recordIO" and val == 1:
self._setupRecordIO()
2023-02-15 01:18:05 +03:00
if key == "recordIO" and val == 0:
2023-02-16 21:03:21 +03:00
pass
if key == "recordIO" and val == 2:
2023-02-15 01:18:05 +03:00
try:
stream_input_file = os.path.join(TMP_DIR, "in.wav")
analyze_file_dio = os.path.join(TMP_DIR, "analyze-dio.png")
analyze_file_harvest = os.path.join(TMP_DIR, "analyze-harvest.png")
y, sr = librosa.load(stream_input_file, SAMPLING_RATE)
y = y.astype(np.float64)
spec = librosa.amplitude_to_db(np.abs(librosa.stft(y, n_fft=2048, win_length=2048, hop_length=128)), ref=np.max)
f0_dio, times = self._get_f0_dio(y)
f0_harvest, times = self._get_f0_harvest(y)
pylab.close()
HOP_LENGTH = 128
img = librosa.display.specshow(spec, sr=SAMPLING_RATE, hop_length=HOP_LENGTH, x_axis='time', y_axis='log', )
pylab.plot(times, f0_dio, label='f0', color=(0, 1, 1, 0.6), linewidth=3)
pylab.savefig(analyze_file_dio)
pylab.close()
HOP_LENGTH = 128
img = librosa.display.specshow(spec, sr=SAMPLING_RATE, hop_length=HOP_LENGTH, x_axis='time', y_axis='log', )
pylab.plot(times, f0_harvest, label='f0', color=(0, 1, 1, 0.6), linewidth=3)
pylab.savefig(analyze_file_harvest)
except Exception as e:
print("recordIO exception", e)
2023-01-08 10:18:20 +03:00
elif key in self.settings.floatData:
setattr(self.settings, key, float(val))
elif key in self.settings.strData:
setattr(self.settings, key, str(val))
2023-02-17 22:15:34 +03:00
if key == "serverMicProps":
if hasattr(self, "serverMicrophoneReaderProcess"):
self.serverMicrophoneReaderProcess.terminate()
if len(val) == 0:
print("server mic close")
pass
else:
props = json.loads(val)
print(props)
sid = props["sid"]
deviceIndex = props["deviceIndex"]
self.serverMicrophoneReaderProcessQueue = Queue()
self.serverMicrophoneReaderProcess = Process(target=readMicrophone, args=(
self.serverMicrophoneReaderProcessQueue, sid, deviceIndex,))
self.serverMicrophoneReaderProcess.start()
try:
print(sid, deviceIndex)
except Exception as e:
print(e)
# audio = pyaudio.PyAudio()
# audio_input_stream = audio.open(format=pyaudio.paInt16,
# channels=1,
# rate=SAMPLING_RATE,
# frames_per_buffer=4096,
# input_device_index=val,
# input=True)
2023-01-08 03:45:58 +03:00
else:
2023-01-08 10:18:20 +03:00
print(f"{key} is not mutalbe variable!")
2023-01-10 18:59:09 +03:00
return self.get_info()
2023-01-08 10:18:20 +03:00
def _generate_strength(self, unpackedData):
2023-01-07 14:07:39 +03:00
2023-01-11 19:05:38 +03:00
if self.unpackedData_length != unpackedData.shape[0] or self.currentCrossFadeOffsetRate != self.settings.crossFadeOffsetRate or self.currentCrossFadeEndRate != self.settings.crossFadeEndRate or self.currentCrossFadeOverlapRate != self.settings.crossFadeOverlapRate:
2023-01-04 20:28:36 +03:00
self.unpackedData_length = unpackedData.shape[0]
2023-01-10 18:59:09 +03:00
self.currentCrossFadeOffsetRate = self.settings.crossFadeOffsetRate
self.currentCrossFadeEndRate = self.settings.crossFadeEndRate
2023-01-11 19:05:38 +03:00
self.currentCrossFadeOverlapRate = self.settings.crossFadeOverlapRate
overlapSize = int(unpackedData.shape[0] * self.settings.crossFadeOverlapRate)
cf_offset = int(overlapSize * self.settings.crossFadeOffsetRate)
2023-01-28 09:56:56 +03:00
cf_end = int(overlapSize * self.settings.crossFadeEndRate)
2023-01-04 20:28:36 +03:00
cf_range = cf_end - cf_offset
percent = np.arange(cf_range) / cf_range
2023-01-28 09:56:56 +03:00
np_prev_strength = np.cos(percent * 0.5 * np.pi) ** 2
np_cur_strength = np.cos((1 - percent) * 0.5 * np.pi) ** 2
2023-01-04 20:28:36 +03:00
2023-01-11 19:05:38 +03:00
self.np_prev_strength = np.concatenate([np.ones(cf_offset), np_prev_strength, np.zeros(overlapSize - cf_offset - len(np_prev_strength))])
self.np_cur_strength = np.concatenate([np.zeros(cf_offset), np_cur_strength, np.ones(overlapSize - cf_offset - len(np_cur_strength))])
2023-01-04 20:28:36 +03:00
2023-01-07 14:07:39 +03:00
self.prev_strength = torch.FloatTensor(self.np_prev_strength)
self.cur_strength = torch.FloatTensor(self.np_cur_strength)
2023-01-04 20:28:36 +03:00
2023-01-08 10:18:20 +03:00
# torch.set_printoptions(edgeitems=2100)
2023-01-04 20:28:36 +03:00
print("Generated Strengths")
2023-01-05 16:08:26 +03:00
# print(f"cross fade: start:{cf_offset} end:{cf_end} range:{cf_range}")
# print(f"target_len:{unpackedData.shape[0]}, prev_len:{len(self.prev_strength)} cur_len:{len(self.cur_strength)}")
# print("Prev", self.prev_strength)
# print("Cur", self.cur_strength)
2023-01-28 09:56:56 +03:00
2023-01-04 20:28:36 +03:00
# ひとつ前の結果とサイズが変わるため、記録は消去する。
2023-01-05 15:51:06 +03:00
if hasattr(self, 'prev_audio1') == True:
2023-01-28 09:56:56 +03:00
delattr(self, "prev_audio1")
2023-01-04 20:28:36 +03:00
2023-01-28 09:56:56 +03:00
def _generate_input(self, unpackedData: any, convertSize: int):
2023-01-08 03:22:22 +03:00
# 今回変換するデータをテンソルとして整形する
2023-01-28 09:56:56 +03:00
audio = torch.FloatTensor(unpackedData.astype(np.float32)) # float32でtensorfを作成
audio_norm = audio / self.hps.data.max_wav_value # normalize
audio_norm = audio_norm.unsqueeze(0) # unsqueeze
self.audio_buffer = torch.cat([self.audio_buffer, audio_norm], axis=1) # 過去のデータに連結
2023-02-12 06:25:57 +03:00
# audio_norm = self.audio_buffer[:, -(convertSize + 1280 * 2):] # 変換対象の部分だけ抽出
audio_norm = self.audio_buffer[:, -(convertSize):] # 変換対象の部分だけ抽出
2023-01-08 03:22:22 +03:00
self.audio_buffer = audio_norm
2023-02-10 18:59:44 +03:00
# TBD: numpy <--> pytorch変換が行ったり来たりしているが、まずは動かすことを最優先。
2023-02-12 06:25:57 +03:00
audio_norm_np = audio_norm.squeeze().numpy().astype(np.float64)
2023-02-14 23:02:51 +03:00
if self.settings.f0Detector == "dio":
_f0, _time = pw.dio(audio_norm_np, self.hps.data.sampling_rate, frame_period=5.5)
f0 = pw.stonemask(audio_norm_np, _f0, _time, self.hps.data.sampling_rate)
else:
f0, t = pw.harvest(audio_norm_np, self.hps.data.sampling_rate, frame_period=5.5, f0_floor=71.0, f0_ceil=1000.0)
2023-02-10 18:59:44 +03:00
f0 = convert_continuos_f0(f0, int(audio_norm_np.shape[0] / self.hps.data.hop_length))
f0 = torch.from_numpy(f0.astype(np.float32))
2023-01-08 03:22:22 +03:00
spec = spectrogram_torch(audio_norm, self.hps.data.filter_length,
2023-01-28 09:56:56 +03:00
self.hps.data.sampling_rate, self.hps.data.hop_length, self.hps.data.win_length,
center=False)
2023-02-10 18:59:44 +03:00
# dispose_stft_specs = 2
# spec = spec[:, dispose_stft_specs:-dispose_stft_specs]
# f0 = f0[dispose_stft_specs:-dispose_stft_specs]
2023-01-08 03:22:22 +03:00
spec = torch.squeeze(spec, 0)
2023-01-08 10:18:20 +03:00
sid = torch.LongTensor([int(self.settings.srcId)])
2023-01-08 03:22:22 +03:00
2023-02-10 18:59:44 +03:00
# data = (self.text_norm, spec, audio_norm, sid)
# data = TextAudioSpeakerCollate()([data])
data = TextAudioSpeakerCollate(
sample_rate=self.hps.data.sampling_rate,
hop_size=self.hps.data.hop_length,
2023-02-12 06:25:57 +03:00
f0_factor=self.settings.f0Factor
2023-02-10 18:59:44 +03:00
)([(spec, sid, f0)])
2023-02-14 23:02:51 +03:00
return data, f0.numpy()
2023-01-04 20:28:36 +03:00
2023-01-08 11:58:27 +03:00
def _onnx_inference(self, data, inputSize):
2023-01-08 14:28:57 +03:00
if hasattr(self, "onnx_session") == False or self.onnx_session == None:
print("[Voice Changer] No ONNX session.")
return np.zeros(1).astype(np.int16)
x, x_lengths, spec, spec_lengths, y, y_lengths, sid_src = [x for x in data]
sid_tgt1 = torch.LongTensor([self.settings.dstId])
# if spec.size()[2] >= 8:
audio1 = self.onnx_session.run(
["audio"],
{
"specs": spec.numpy(),
"lengths": spec_lengths.numpy(),
"sid_src": sid_src.numpy(),
"sid_tgt": sid_tgt1.numpy()
2023-01-28 09:56:56 +03:00
})[0][0, 0] * self.hps.data.max_wav_value
2023-01-08 14:28:57 +03:00
if hasattr(self, 'np_prev_audio1') == True:
2023-01-11 19:05:38 +03:00
overlapSize = int(inputSize * self.settings.crossFadeOverlapRate)
2023-01-28 09:56:56 +03:00
prev_overlap = self.np_prev_audio1[-1 * overlapSize:]
cur_overlap = audio1[-1 * (inputSize + overlapSize):-1 * inputSize]
2023-01-11 19:05:38 +03:00
# print(prev_overlap.shape, self.np_prev_strength.shape, cur_overlap.shape, self.np_cur_strength.shape)
# print(">>>>>>>>>>>", -1*(inputSize + overlapSize) , -1*inputSize)
powered_prev = prev_overlap * self.np_prev_strength
powered_cur = cur_overlap * self.np_cur_strength
powered_result = powered_prev + powered_cur
2023-01-28 09:56:56 +03:00
cur = audio1[-1 * inputSize:-1 * overlapSize]
result = np.concatenate([powered_result, cur], axis=0)
2023-01-08 11:58:27 +03:00
else:
2023-01-11 19:05:38 +03:00
result = np.zeros(1).astype(np.int16)
2023-01-08 14:28:57 +03:00
self.np_prev_audio1 = audio1
return result
2023-01-08 11:58:27 +03:00
def _pyTorch_inference(self, data, inputSize):
2023-01-28 09:56:56 +03:00
if hasattr(self, "net_g") == False or self.net_g == None:
2023-01-08 14:28:57 +03:00
print("[Voice Changer] No pyTorch session.")
return np.zeros(1).astype(np.int16)
2023-01-08 11:58:27 +03:00
if self.settings.gpu < 0 or self.gpu_num == 0:
with torch.no_grad():
2023-02-10 18:59:44 +03:00
spec, spec_lengths, sid_src, sin, d = data
spec = spec.cpu()
spec_lengths = spec_lengths.cpu()
sid_src = sid_src.cpu()
sin = sin.cpu()
d = tuple([d[:1].cpu() for d in d])
sid_target = torch.LongTensor([self.settings.dstId]).cpu()
audio1 = self.net_g.cpu().voice_conversion(spec, spec_lengths, sin, d, sid_src, sid_target)[0, 0].data * self.hps.data.max_wav_value
2023-01-08 11:58:27 +03:00
if self.prev_strength.device != torch.device('cpu'):
print(f"prev_strength move from {self.prev_strength.device} to cpu")
self.prev_strength = self.prev_strength.cpu()
if self.cur_strength.device != torch.device('cpu'):
print(f"cur_strength move from {self.cur_strength.device} to cpu")
self.cur_strength = self.cur_strength.cpu()
2023-01-28 09:56:56 +03:00
if hasattr(self, 'prev_audio1') == True and self.prev_audio1.device == torch.device('cpu'): # prev_audio1が所望のデバイスに無い場合は一回休み。
2023-01-12 11:01:57 +03:00
overlapSize = int(inputSize * self.settings.crossFadeOverlapRate)
2023-01-28 09:56:56 +03:00
prev_overlap = self.prev_audio1[-1 * overlapSize:]
cur_overlap = audio1[-1 * (inputSize + overlapSize):-1 * inputSize]
2023-01-12 11:01:57 +03:00
powered_prev = prev_overlap * self.prev_strength
powered_cur = cur_overlap * self.cur_strength
powered_result = powered_prev + powered_cur
2023-01-28 09:56:56 +03:00
cur = audio1[-1 * inputSize:-1 * overlapSize] # 今回のインプットの生部分。(インプット - 次回のCrossfade部分)。
result = torch.cat([powered_result, cur], axis=0) # Crossfadeと今回のインプットの生部分を結合
2023-01-12 11:01:57 +03:00
2023-01-08 11:58:27 +03:00
else:
2023-01-28 09:56:56 +03:00
cur = audio1[-2 * inputSize:-1 * inputSize]
2023-01-08 11:58:27 +03:00
result = cur
self.prev_audio1 = audio1
result = result.cpu().float().numpy()
else:
with torch.no_grad():
2023-02-10 18:59:44 +03:00
spec, spec_lengths, sid_src, sin, d = data
spec = spec.cuda(self.settings.gpu)
spec_lengths = spec_lengths.cuda(self.settings.gpu)
sid_src = sid_src.cuda(self.settings.gpu)
sin = sin.cuda(self.settings.gpu)
d = tuple([d[:1].cuda(self.settings.gpu) for d in d])
sid_target = torch.LongTensor([self.settings.dstId]).cuda(self.settings.gpu)
# audio1 = self.net_g.cuda(self.settings.gpu).voice_conversion(spec, spec_lengths, sid_src=sid_src,
# sid_tgt=sid_tgt1)[0, 0].data * self.hps.data.max_wav_value
audio1 = self.net_g.cuda(self.settings.gpu).voice_conversion(spec, spec_lengths, sin, d,
sid_src, sid_target)[0, 0].data * self.hps.data.max_wav_value
2023-01-08 11:58:27 +03:00
if self.prev_strength.device != torch.device('cuda', self.settings.gpu):
print(f"prev_strength move from {self.prev_strength.device} to gpu{self.settings.gpu}")
self.prev_strength = self.prev_strength.cuda(self.settings.gpu)
if self.cur_strength.device != torch.device('cuda', self.settings.gpu):
print(f"cur_strength move from {self.cur_strength.device} to gpu{self.settings.gpu}")
self.cur_strength = self.cur_strength.cuda(self.settings.gpu)
if hasattr(self, 'prev_audio1') == True and self.prev_audio1.device == torch.device('cuda', self.settings.gpu):
2023-01-12 11:01:57 +03:00
overlapSize = int(inputSize * self.settings.crossFadeOverlapRate)
2023-01-28 09:56:56 +03:00
prev_overlap = self.prev_audio1[-1 * overlapSize:]
cur_overlap = audio1[-1 * (inputSize + overlapSize):-1 * inputSize]
2023-01-12 11:01:57 +03:00
powered_prev = prev_overlap * self.prev_strength
powered_cur = cur_overlap * self.cur_strength
powered_result = powered_prev + powered_cur
2023-01-28 09:56:56 +03:00
cur = audio1[-1 * inputSize:-1 * overlapSize] # 今回のインプットの生部分。(インプット - 次回のCrossfade部分)。
result = torch.cat([powered_result, cur], axis=0) # Crossfadeと今回のインプットの生部分を結合
2023-01-12 11:01:57 +03:00
2023-01-08 11:58:27 +03:00
else:
2023-01-28 09:56:56 +03:00
cur = audio1[-2 * inputSize:-1 * inputSize]
2023-01-08 11:58:27 +03:00
result = cur
self.prev_audio1 = audio1
result = result.cpu().float().numpy()
return result
def on_request(self, unpackedData: any):
2023-02-18 14:53:15 +03:00
if self.settings.inputSampleRate != 24000:
print("convert sampling rate!", self.settings.inputSampleRate)
unpackedData = resampy.resample(unpackedData, 48000, 24000)
2023-01-28 09:56:56 +03:00
convertSize = self.settings.convertChunkNum * 128 # 128sample/1chunk
2023-02-12 06:25:57 +03:00
# print("convsize:", unpackedData.shape[0] * (1 + self.settings.crossFadeOverlapRate))
2023-01-28 09:56:56 +03:00
if unpackedData.shape[0] * (1 + self.settings.crossFadeOverlapRate) + 1024 > convertSize:
convertSize = int(unpackedData.shape[0] * (1 + self.settings.crossFadeOverlapRate)) + 1024
2023-01-12 15:42:02 +03:00
if convertSize < self.settings.minConvertSize:
convertSize = self.settings.minConvertSize
# print("convert Size", unpackedData.shape[0], unpackedData.shape[0]*(1 + self.settings.crossFadeOverlapRate), convertSize, self.settings.minConvertSize)
2023-01-08 03:22:22 +03:00
2023-02-12 06:25:57 +03:00
# convertSize = 8192
2023-01-08 10:18:20 +03:00
self._generate_strength(unpackedData)
2023-02-14 23:02:51 +03:00
# f0はデバッグ用
data, f0 = self._generate_input(unpackedData, convertSize)
2023-01-08 10:18:20 +03:00
2023-01-08 11:58:27 +03:00
try:
if self.settings.framework == "ONNX":
result = self._onnx_inference(data, unpackedData.shape[0])
else:
result = self._pyTorch_inference(data, unpackedData.shape[0])
except Exception as e:
2023-01-28 09:56:56 +03:00
print("VC PROCESSING!!!! EXCEPTION!!!", e)
2023-01-08 11:58:27 +03:00
print(traceback.format_exc())
2023-01-08 14:28:57 +03:00
if hasattr(self, "np_prev_audio1"):
del self.np_prev_audio1
if hasattr(self, "prev_audio1"):
del self.prev_audio1
return np.zeros(1).astype(np.int16)
2023-01-08 11:58:27 +03:00
result = result.astype(np.int16)
# print("on_request result size:",result.shape)
2023-02-14 23:02:51 +03:00
if self.settings.recordIO == 1:
self.stream_in.write(unpackedData.astype(np.int16).tobytes())
self.stream_out.write(result.tobytes())
2023-01-08 11:58:27 +03:00
return result