mirror of
https://github.com/w-okada/voice-changer.git
synced 2025-01-24 05:55:01 +03:00
219 lines
9.0 KiB
Python
219 lines
9.0 KiB
Python
import math
|
||
import torch
|
||
from torch import nn
|
||
|
||
from .rvc_models.infer_pack.models import GeneratorNSF, PosteriorEncoder, ResidualCouplingBlock, Generator
|
||
|
||
# from infer_pack import commons, attentions
|
||
from .rvc_models.infer_pack.commons import sequence_mask, rand_slice_segments, slice_segments2
|
||
from .rvc_models.infer_pack.attentions import Encoder
|
||
|
||
|
||
class TextEncoder(nn.Module):
|
||
def __init__(
|
||
self,
|
||
out_channels,
|
||
hidden_channels,
|
||
filter_channels,
|
||
emb_channels,
|
||
n_heads,
|
||
n_layers,
|
||
kernel_size,
|
||
p_dropout,
|
||
f0=True,
|
||
):
|
||
super().__init__()
|
||
self.out_channels = out_channels
|
||
self.hidden_channels = hidden_channels
|
||
self.filter_channels = filter_channels
|
||
self.emb_channels = emb_channels
|
||
self.n_heads = n_heads
|
||
self.n_layers = n_layers
|
||
self.kernel_size = kernel_size
|
||
self.p_dropout = p_dropout
|
||
self.emb_phone = nn.Linear(emb_channels, hidden_channels)
|
||
self.lrelu = nn.LeakyReLU(0.1, inplace=True)
|
||
if f0 is True:
|
||
self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
|
||
self.encoder = Encoder(hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout)
|
||
self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
|
||
|
||
def forward(self, phone, pitch, lengths):
|
||
if pitch is None:
|
||
x = self.emb_phone(phone)
|
||
else:
|
||
x = self.emb_phone(phone) + self.emb_pitch(pitch)
|
||
x = x * math.sqrt(self.hidden_channels) # [b, t, h]
|
||
x = self.lrelu(x)
|
||
x = torch.transpose(x, 1, -1) # [b, h, t]
|
||
x_mask = torch.unsqueeze(sequence_mask(lengths, x.size(2)), 1).to(x.dtype)
|
||
x = self.encoder(x * x_mask, x_mask)
|
||
stats = self.proj(x) * x_mask
|
||
|
||
m, logs = torch.split(stats, self.out_channels, dim=1)
|
||
return m, logs, x_mask
|
||
|
||
|
||
class SynthesizerTrnMsNSFsid(nn.Module):
|
||
def __init__(self, spec_channels, segment_size, inter_channels, hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, spk_embed_dim, gin_channels, emb_channels, sr, **kwargs):
|
||
super().__init__()
|
||
self.spec_channels = spec_channels
|
||
self.inter_channels = inter_channels
|
||
self.hidden_channels = hidden_channels
|
||
self.filter_channels = filter_channels
|
||
self.n_heads = n_heads
|
||
self.n_layers = n_layers
|
||
self.kernel_size = kernel_size
|
||
self.p_dropout = p_dropout
|
||
self.resblock = resblock
|
||
self.resblock_kernel_sizes = resblock_kernel_sizes
|
||
self.resblock_dilation_sizes = resblock_dilation_sizes
|
||
self.upsample_rates = upsample_rates
|
||
self.upsample_initial_channel = upsample_initial_channel
|
||
self.upsample_kernel_sizes = upsample_kernel_sizes
|
||
self.segment_size = segment_size
|
||
self.gin_channels = gin_channels
|
||
self.emb_channels = emb_channels
|
||
# self.hop_length = hop_length#
|
||
self.spk_embed_dim = spk_embed_dim
|
||
self.enc_p = TextEncoder(
|
||
inter_channels,
|
||
hidden_channels,
|
||
filter_channels,
|
||
emb_channels,
|
||
n_heads,
|
||
n_layers,
|
||
kernel_size,
|
||
p_dropout,
|
||
)
|
||
self.dec = GeneratorNSF(
|
||
inter_channels,
|
||
resblock,
|
||
resblock_kernel_sizes,
|
||
resblock_dilation_sizes,
|
||
upsample_rates,
|
||
upsample_initial_channel,
|
||
upsample_kernel_sizes,
|
||
gin_channels=gin_channels,
|
||
sr=sr,
|
||
is_half=kwargs["is_half"],
|
||
)
|
||
self.enc_q = PosteriorEncoder(
|
||
spec_channels,
|
||
inter_channels,
|
||
hidden_channels,
|
||
5,
|
||
1,
|
||
16,
|
||
gin_channels=gin_channels,
|
||
)
|
||
self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels)
|
||
self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
|
||
print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
|
||
|
||
def remove_weight_norm(self):
|
||
self.dec.remove_weight_norm()
|
||
self.flow.remove_weight_norm()
|
||
self.enc_q.remove_weight_norm()
|
||
|
||
def forward(self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds): # 这里ds是id,[bs,1]
|
||
# print(1,pitch.shape)#[bs,t]
|
||
g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
|
||
m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
|
||
z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
|
||
z_p = self.flow(z, y_mask, g=g)
|
||
z_slice, ids_slice = rand_slice_segments(z, y_lengths, self.segment_size)
|
||
# print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)
|
||
pitchf = slice_segments2(pitchf, ids_slice, self.segment_size)
|
||
# print(-2,pitchf.shape,z_slice.shape)
|
||
o = self.dec(z_slice, pitchf, g=g)
|
||
return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
|
||
|
||
def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None, convert_length=None):
|
||
g = self.emb_g(sid).unsqueeze(-1)
|
||
m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
|
||
z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
|
||
z = self.flow(z_p, x_mask, g=g, reverse=True)
|
||
o = self.dec.infer_realtime((z * x_mask)[:, :, :max_len], nsff0, g=g, convert_length=convert_length)
|
||
return o, x_mask, (z, z_p, m_p, logs_p)
|
||
|
||
|
||
|
||
class SynthesizerTrnMsNSFsidNono(nn.Module):
|
||
def __init__(self, spec_channels, segment_size, inter_channels, hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, spk_embed_dim, gin_channels, emb_channels, sr=None, **kwargs):
|
||
super().__init__()
|
||
self.spec_channels = spec_channels
|
||
self.inter_channels = inter_channels
|
||
self.hidden_channels = hidden_channels
|
||
self.filter_channels = filter_channels
|
||
self.n_heads = n_heads
|
||
self.n_layers = n_layers
|
||
self.kernel_size = kernel_size
|
||
self.p_dropout = p_dropout
|
||
self.resblock = resblock
|
||
self.resblock_kernel_sizes = resblock_kernel_sizes
|
||
self.resblock_dilation_sizes = resblock_dilation_sizes
|
||
self.upsample_rates = upsample_rates
|
||
self.upsample_initial_channel = upsample_initial_channel
|
||
self.upsample_kernel_sizes = upsample_kernel_sizes
|
||
self.segment_size = segment_size
|
||
self.gin_channels = gin_channels
|
||
self.emb_channels = emb_channels
|
||
# self.hop_length = hop_length#
|
||
self.spk_embed_dim = spk_embed_dim
|
||
self.enc_p = TextEncoder(
|
||
inter_channels,
|
||
hidden_channels,
|
||
filter_channels,
|
||
emb_channels,
|
||
n_heads,
|
||
n_layers,
|
||
kernel_size,
|
||
p_dropout,
|
||
f0=False,
|
||
)
|
||
self.dec = Generator(
|
||
inter_channels,
|
||
resblock,
|
||
resblock_kernel_sizes,
|
||
resblock_dilation_sizes,
|
||
upsample_rates,
|
||
upsample_initial_channel,
|
||
upsample_kernel_sizes,
|
||
gin_channels=gin_channels,
|
||
)
|
||
self.enc_q = PosteriorEncoder(
|
||
spec_channels,
|
||
inter_channels,
|
||
hidden_channels,
|
||
5,
|
||
1,
|
||
16,
|
||
gin_channels=gin_channels,
|
||
)
|
||
self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels)
|
||
self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
|
||
print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
|
||
|
||
def remove_weight_norm(self):
|
||
self.dec.remove_weight_norm()
|
||
self.flow.remove_weight_norm()
|
||
self.enc_q.remove_weight_norm()
|
||
|
||
def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]
|
||
g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
|
||
m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
|
||
z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
|
||
z_p = self.flow(z, y_mask, g=g)
|
||
z_slice, ids_slice = rand_slice_segments(z, y_lengths, self.segment_size)
|
||
o = self.dec(z_slice, g=g)
|
||
return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
|
||
|
||
def infer(self, phone, phone_lengths, sid, max_len=None, convert_length=None):
|
||
g = self.emb_g(sid).unsqueeze(-1)
|
||
m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
|
||
z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
|
||
z = self.flow(z_p, x_mask, g=g, reverse=True)
|
||
o = self.dec.infer_realtime((z * x_mask)[:, :, :max_len], g=g, convert_length=convert_length)
|
||
return o, x_mask, (z, z_p, m_p, logs_p)
|