mirror of
https://github.com/w-okada/voice-changer.git
synced 2025-01-23 21:45:00 +03:00
WIP: Pitch extractor refactoring
This commit is contained in:
parent
a69c89255b
commit
6a09338af5
@ -110,7 +110,6 @@ class Pipeline(object):
|
||||
# -> [Perform]: 0.029046058654785156 0.0025115013122558594 (CPU i9 13900KF)
|
||||
# ---> これくらいの処理ならCPU上のTorchでやった方が早い?
|
||||
'''
|
||||
# volume_t = self.volumeExtractor.extract_t(audio)
|
||||
volume_t = self.volumeExtractor.extract_t(audio)
|
||||
mask = self.volumeExtractor.get_mask_from_volume_t(volume_t, self.inferencer_block_size, threshold=threshold)
|
||||
volume = volume_t.unsqueeze(-1).unsqueeze(0)
|
||||
@ -136,8 +135,7 @@ class Pipeline(object):
|
||||
|
||||
# ピッチ検出
|
||||
try:
|
||||
# print("[SRC AUDIO----]", audio_pad)
|
||||
pitch, pitchf = self.pitchExtractor.extract(
|
||||
pitch = self.pitchExtractor.extract(
|
||||
audio16k.squeeze(),
|
||||
pitchf,
|
||||
f0_up_key,
|
||||
@ -146,8 +144,7 @@ class Pipeline(object):
|
||||
silence_front=silence_front,
|
||||
)
|
||||
|
||||
pitch = torch.tensor(pitch[-n_frames:], device=self.device).unsqueeze(0).long() # 160window sizeを前提にバッファを作っているので切る。
|
||||
pitchf = torch.tensor(pitchf[-n_frames:], device=self.device, dtype=torch.float).unsqueeze(0) # 160window sizeを前提にバッファを作っているので切る。
|
||||
pitch = torch.tensor(pitch[-n_frames:], device=self.device).unsqueeze(0).long()
|
||||
except IndexError as e: # NOQA
|
||||
# print(e)
|
||||
raise NotEnoughDataExtimateF0()
|
||||
@ -217,12 +214,12 @@ class Pipeline(object):
|
||||
try:
|
||||
with torch.no_grad():
|
||||
with autocast(enabled=self.isHalf):
|
||||
print("[EMBEDDER EXTRACT:::]", feats.shape, pitchf.unsqueeze(-1).shape, volume.shape, mask.shape)
|
||||
print("[EMBEDDER EXTRACT:::]", feats.shape, pitch.unsqueeze(-1).shape, volume.shape, mask.shape)
|
||||
audio1 = (
|
||||
torch.clip(
|
||||
self.inferencer.infer(
|
||||
feats,
|
||||
pitchf.unsqueeze(-1),
|
||||
pitch.unsqueeze(-1),
|
||||
volume,
|
||||
mask,
|
||||
sid,
|
||||
@ -243,12 +240,12 @@ class Pipeline(object):
|
||||
raise e
|
||||
|
||||
feats_buffer = feats.squeeze(0).detach().cpu()
|
||||
if pitchf is not None:
|
||||
pitchf_buffer = pitchf.squeeze(0).detach().cpu()
|
||||
if pitch is not None:
|
||||
pitch_buffer = pitch.squeeze(0).detach().cpu()
|
||||
else:
|
||||
pitchf_buffer = None
|
||||
pitch_buffer = None
|
||||
|
||||
del pitch, pitchf, feats, sid
|
||||
torch.cuda.empty_cache()
|
||||
audio1 = self.resamplerOut(audio1.float())
|
||||
return audio1, pitchf_buffer, feats_buffer
|
||||
return audio1, pitch_buffer, feats_buffer
|
||||
|
@ -2,7 +2,7 @@ import pyworld
|
||||
import numpy as np
|
||||
import scipy.signal as signal
|
||||
from const import PitchExtractorType
|
||||
|
||||
import torch
|
||||
from voice_changer.RVC.pitchExtractor.PitchExtractor import PitchExtractor
|
||||
|
||||
|
||||
@ -12,45 +12,18 @@ class HarvestPitchExtractor(PitchExtractor):
|
||||
super().__init__()
|
||||
self.pitchExtractorType: PitchExtractorType = "harvest"
|
||||
|
||||
def extract(self, audio, pitchf, f0_up_key, sr, window, silence_front=0):
|
||||
def extract(self, audio: torch.Tensor, pitchf, f0_up_key, sr, window, silence_front=0):
|
||||
audio = audio.detach().cpu().numpy()
|
||||
n_frames = int(len(audio) // window) + 1 # NOQA
|
||||
start_frame = int(silence_front * sr / window)
|
||||
real_silence_front = start_frame * window / sr
|
||||
|
||||
# silence_front_offset = int(np.round(real_silence_front * sr))
|
||||
# audio = audio[silence_front_offset:]
|
||||
|
||||
f0_min = 50
|
||||
f0_max = 1100
|
||||
f0_mel_min = 1127 * np.log(1 + f0_min / 700)
|
||||
f0_mel_max = 1127 * np.log(1 + f0_max / 700)
|
||||
# f0_mel_min = 1127 * np.log(1 + f0_min / 700)
|
||||
# f0_mel_max = 1127 * np.log(1 + f0_max / 700)
|
||||
|
||||
f0 = self.extract2(audio, uv_interp=True, hop_size=window, silence_front=silence_front)
|
||||
f0 = f0 * 2 ** (float(f0_up_key) / 12)
|
||||
pitchf = f0
|
||||
|
||||
# f0, t = pyworld.harvest(
|
||||
# audio.astype(np.double),
|
||||
# fs=sr,
|
||||
# f0_ceil=f0_max,
|
||||
# frame_period=10,
|
||||
# )
|
||||
# f0 = pyworld.stonemask(audio.astype(np.double), f0, t, sr)
|
||||
# f0 = signal.medfilt(f0, 3)
|
||||
|
||||
# f0 *= pow(2, f0_up_key / 12)
|
||||
# pitchf[-f0.shape[0]:] = f0[:pitchf.shape[0]]
|
||||
f0bak = pitchf.copy()
|
||||
f0_mel = 1127 * np.log(1 + f0bak / 700)
|
||||
f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (
|
||||
f0_mel_max - f0_mel_min
|
||||
) + 1
|
||||
f0_mel[f0_mel <= 1] = 1
|
||||
f0_mel[f0_mel > 255] = 255
|
||||
pitch_coarse = np.rint(f0_mel).astype(int)
|
||||
|
||||
return pitch_coarse, pitchf
|
||||
return f0
|
||||
|
||||
def extract2(self, audio, uv_interp, hop_size: int, silence_front=0): # audio: 1d numpy array
|
||||
n_frames = int(len(audio) // hop_size) + 1
|
||||
@ -75,38 +48,3 @@ class HarvestPitchExtractor(PitchExtractor):
|
||||
|
||||
return f0
|
||||
|
||||
def extract_old(self, audio, pitchf, f0_up_key, sr, window, silence_front=0):
|
||||
audio = audio.detach().cpu().numpy()
|
||||
n_frames = int(len(audio) // window) + 1 # NOQA
|
||||
start_frame = int(silence_front * sr / window)
|
||||
real_silence_front = start_frame * window / sr
|
||||
|
||||
silence_front_offset = int(np.round(real_silence_front * sr))
|
||||
audio = audio[silence_front_offset:]
|
||||
|
||||
f0_min = 50
|
||||
f0_max = 1100
|
||||
f0_mel_min = 1127 * np.log(1 + f0_min / 700)
|
||||
f0_mel_max = 1127 * np.log(1 + f0_max / 700)
|
||||
|
||||
f0, t = pyworld.harvest(
|
||||
audio.astype(np.double),
|
||||
fs=sr,
|
||||
f0_ceil=f0_max,
|
||||
frame_period=10,
|
||||
)
|
||||
f0 = pyworld.stonemask(audio.astype(np.double), f0, t, sr)
|
||||
f0 = signal.medfilt(f0, 3)
|
||||
|
||||
f0 *= pow(2, f0_up_key / 12)
|
||||
pitchf[-f0.shape[0]:] = f0[:pitchf.shape[0]]
|
||||
f0bak = pitchf.copy()
|
||||
f0_mel = 1127 * np.log(1 + f0bak / 700)
|
||||
f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (
|
||||
f0_mel_max - f0_mel_min
|
||||
) + 1
|
||||
f0_mel[f0_mel <= 1] = 1
|
||||
f0_mel[f0_mel > 255] = 255
|
||||
pitch_coarse = np.rint(f0_mel).astype(int)
|
||||
|
||||
return pitch_coarse, pitchf
|
@ -2,9 +2,6 @@ import numpy as np
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
from voice_changer.utils.VoiceChangerModel import AudioInOut
|
||||
|
||||
|
||||
class VolumeExtractor:
|
||||
|
||||
def __init__(self, hop_size: float):
|
||||
@ -56,7 +53,6 @@ class VolumeExtractor:
|
||||
mask = torch.max(mask.unfold(-1, 9, 1), -1)[0]
|
||||
mask = mask.to(device).unsqueeze(-1).unsqueeze(0)
|
||||
mask = upsample(mask, block_size).squeeze(-1)
|
||||
print("[get_mask_from_volume_t 3]", mask.shape)
|
||||
return mask
|
||||
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user