Modification:

- Timer update
  - Diffusion SVC Performance monitor
This commit is contained in:
w-okada 2023-12-21 04:11:25 +09:00
parent 0f0225cfcd
commit b215f3ba84
3 changed files with 15 additions and 10 deletions

View File

@ -107,18 +107,19 @@ class DiffusionSVCInferencer(Inferencer):
silence_front: float,
skip_diffusion: bool = True,
) -> torch.Tensor:
with Timer2("pre-process", False) as t:
use_timer = False
with Timer2(" Naive", use_timer) as t:
gt_spec = self.naive_model_call(feats, pitch, volume, spk_id=sid, spk_mix_dict=None, aug_shift=0, spk_emb=None)
# print("[ ----Timer::1: ]", t.secs)
with Timer2("pre-process", False) as t:
with Timer2(" Diffuser", use_timer) as t:
if skip_diffusion == 0:
out_mel = self.__call__(feats, pitch, volume, spk_id=sid, spk_mix_dict=None, aug_shift=0, gt_spec=gt_spec, infer_speedup=infer_speedup, method="dpm-solver", k_step=k_step, use_tqdm=False, spk_emb=None)
gt_spec = out_mel
# print("[ ----Timer::2: ]", t.secs)
with Timer2("pre-process", False) as t: # NOQA
with Timer2(" Vocoder", use_timer) as t: # NOQA
if self.vocoder_onnx is None:
start_frame = int(silence_front * self.vocoder.vocoder_sample_rate / self.vocoder.vocoder_hop_size)
out_wav = self.mel2wav(gt_spec, pitch, start_frame=start_frame)

View File

@ -102,8 +102,9 @@ class Pipeline(object):
protect=0.5,
skip_diffusion=True,
):
use_timer = False
# print("---------- pipe line --------------------")
with Timer2("pre-process", False) as t:
with Timer2("pre-process", use_timer) as t:
audio_t = torch.from_numpy(audio).float().unsqueeze(0).to(self.device)
audio16k = self.resamplerIn(audio_t)
volume, mask = self.extract_volume_and_mask(audio16k, threshold=-60.0)
@ -111,7 +112,7 @@ class Pipeline(object):
n_frames = int(audio16k.size(-1) // self.hop_size + 1)
# print("[Timer::1: ]", t.secs)
with Timer2("pre-process", False) as t:
with Timer2("extract pitch", use_timer) as t:
# ピッチ検出
try:
# pitch = self.pitchExtractor.extract(
@ -141,7 +142,7 @@ class Pipeline(object):
feats = feats.view(1, -1)
# print("[Timer::2: ]", t.secs)
with Timer2("pre-process", False) as t:
with Timer2("extract feature", use_timer) as t:
# embedding
with autocast(enabled=self.isHalf):
try:
@ -158,7 +159,7 @@ class Pipeline(object):
feats = F.interpolate(feats.permute(0, 2, 1), size=int(n_frames), mode="nearest").permute(0, 2, 1)
# print("[Timer::3: ]", t.secs)
with Timer2("pre-process", False) as t:
with Timer2("infer", use_timer) as t:
# 推論実行
try:
with torch.no_grad():
@ -179,7 +180,7 @@ class Pipeline(object):
raise e
# print("[Timer::4: ]", t.secs)
with Timer2("pre-process", False) as t: # NOQA
with Timer2("post-process", use_timer) as t: # NOQA
feats_buffer = feats.squeeze(0).detach().cpu()
if pitch is not None:
pitch_buffer = pitch.squeeze(0).detach().cpu()

View File

@ -72,6 +72,7 @@ class Timer2(object):
if self.enable is False:
return self
self.current = time.time()
self.start = time.time()
return self
def record(self, lapname: str):
@ -88,9 +89,11 @@ class Timer2(object):
def __exit__(self, *_):
if self.enable is False:
return
self.end = time.time()
self.elapsed = (self.end - self.start) * 1000
title = self.key.split("_")[-1]
print(f"---- {title} ----")
print(f"---- {title}(elapsed:{round(self.elapsed,1)}ms) ----")
for key, val in self.storedSecs[self.key].items():
section = key.split("_")[-1]
milisecAvr = sum(val) / len(val) * 1000
print(f"{section}: {milisecAvr} msec")
print(f"{section}: {round(milisecAvr,1)} msec, {val[-1]}")