예제 #1
0
 def __init__(self,
              num_chars,
              num_speakers,
              r=5,
              linear_dim=1025,
              mel_dim=80,
              memory_size=5,
              attn_win=False,
              attn_norm="sigmoid",
              prenet_type="original",
              prenet_dropout=True,
              forward_attn=False,
              trans_agent=False,
              forward_attn_mask=False,
              location_attn=True,
              separate_stopnet=True):
     super(Tacotron, self).__init__()
     self.r = r
     self.mel_dim = mel_dim
     self.linear_dim = linear_dim
     self.embedding = nn.Embedding(num_chars, 256)
     self.embedding.weight.data.normal_(0, 0.3)
     if num_speakers > 1:
         self.speaker_embedding = nn.Embedding(num_speakers, 256)
         self.speaker_embedding.weight.data.normal_(0, 0.3)
     self.encoder = Encoder(256)
     self.decoder = Decoder(256, mel_dim, r, memory_size, attn_win,
                            attn_norm, prenet_type, prenet_dropout,
                            forward_attn, trans_agent, forward_attn_mask,
                            location_attn, separate_stopnet)
     self.postnet = PostCBHG(mel_dim)
     self.last_linear = nn.Linear(self.postnet.cbhg.gru_features * 2,
                                  linear_dim)
예제 #2
0
 def __init__(self,
              num_chars,
              embedding_dim=256,
              linear_dim=1025,
              mel_dim=80,
              r=5,
              padding_idx=None,
              memory_size=5,
              attn_windowing=False,
              forward_attention=False):
     super(Tacotron, self).__init__()
     self.r = r
     self.mel_dim = mel_dim
     self.linear_dim = linear_dim
     self.embedding = nn.Embedding(num_chars,
                                   embedding_dim,
                                   padding_idx=padding_idx)
     self.embedding.weight.data.normal_(0, 0.3)
     self.encoder = Encoder(embedding_dim)
     self.decoder = Decoder(256, mel_dim, r, memory_size, attn_windowing,
                            forward_attention)
     self.postnet = PostCBHG(mel_dim)
     self.last_linear = nn.Sequential(
         nn.Linear(self.postnet.cbhg.gru_features * 2, linear_dim),
         nn.Sigmoid())
예제 #3
0
    def test_in_out():
        layer = Decoder(
            in_features=256,
            memory_dim=80,
            r=2,
            memory_size=4,
            attn_windowing=False,
            attn_norm="sigmoid",
            prenet_type='original',
            prenet_dropout=True,
            forward_attn=True,
            trans_agent=True,
            forward_attn_mask=True,
            location_attn=True,
            separate_stopnet=True)
        dummy_input = T.rand(4, 8, 256)
        dummy_memory = T.rand(4, 2, 80)

        output, alignment, stop_tokens = layer(
            dummy_input, dummy_memory, mask=None)

        assert output.shape[0] == 4
        assert output.shape[1] == 1, "size not {}".format(output.shape[1])
        assert output.shape[2] == 80 * 2, "size not {}".format(output.shape[2])
        assert stop_tokens.shape[0] == 4
예제 #4
0
    def test_in_out(self):
        layer = Decoder(in_features=256, memory_dim=80, r=2, memory_size=4, attn_windowing=False, attn_norm="sigmoid")
        dummy_input = T.rand(4, 8, 256)
        dummy_memory = T.rand(4, 2, 80)

        output, alignment, stop_tokens = layer(dummy_input, dummy_memory, mask=None)

        assert output.shape[0] == 4
        assert output.shape[1] == 1, "size not {}".format(output.shape[1])
        assert output.shape[2] == 80 * 2, "size not {}".format(output.shape[2])
        assert stop_tokens.shape[0] == 4
예제 #5
0
파일: tacotron.py 프로젝트: zbloss/TTS
class Tacotron(nn.Module):
    def __init__(self,
                 num_chars,
                 r=5,
                 linear_dim=1025,
                 mel_dim=80,
                 memory_size=5,
                 attn_win=False,
                 attn_norm="sigmoid",
                 prenet_type="original",
                 prenet_dropout=True,
                 forward_attn=False,
                 trans_agent=False,
                 location_attn=True,
                 separate_stopnet=True):
        super(Tacotron, self).__init__()
        self.r = r
        self.mel_dim = mel_dim
        self.linear_dim = linear_dim
        self.embedding = nn.Embedding(num_chars, 256)
        self.embedding.weight.data.normal_(0, 0.3)
        self.encoder = Encoder(256)
        self.decoder = Decoder(256, mel_dim, r, memory_size, attn_win,
                               attn_norm, prenet_type, prenet_dropout,
                               forward_attn, trans_agent, location_attn,
                               separate_stopnet)
        self.postnet = PostCBHG(mel_dim)
        self.last_linear = nn.Sequential(
            nn.Linear(self.postnet.cbhg.gru_features * 2, linear_dim),
            nn.Sigmoid())

    def forward(self, characters, text_lengths, mel_specs):
        B = characters.size(0)
        mask = sequence_mask(text_lengths).to(characters.device)
        inputs = self.embedding(characters)
        encoder_outputs = self.encoder(inputs)
        mel_outputs, alignments, stop_tokens = self.decoder(
            encoder_outputs, mel_specs, mask)
        mel_outputs = mel_outputs.view(B, -1, self.mel_dim)
        linear_outputs = self.postnet(mel_outputs)
        linear_outputs = self.last_linear(linear_outputs)
        return mel_outputs, linear_outputs, alignments, stop_tokens

    def inference(self, characters):
        B = characters.size(0)
        inputs = self.embedding(characters)
        encoder_outputs = self.encoder(inputs)
        mel_outputs, alignments, stop_tokens = self.decoder.inference(
            encoder_outputs)
        mel_outputs = mel_outputs.view(B, -1, self.mel_dim)
        linear_outputs = self.postnet(mel_outputs)
        linear_outputs = self.last_linear(linear_outputs)
        return mel_outputs, linear_outputs, alignments, stop_tokens
예제 #6
0
    def test_in_out(self):
        layer = Decoder(in_features=256, memory_dim=80, r=2)
        dummy_input = T.rand(4, 8, 256)
        dummy_memory = T.rand(4, 2, 80)

        output, alignment, stop_tokens = layer(dummy_input, dummy_memory)

        assert output.shape[0] == 4
        assert output.shape[1] == 1, "size not {}".format(output.shape[1])
        assert output.shape[2] == 80 * 2, "size not {}".format(output.shape[2])
        assert stop_tokens.shape[0] == 4
        assert stop_tokens.max() <= 1.0
        assert stop_tokens.min() >= 0
예제 #7
0
파일: tacotron.py 프로젝트: vlinhd11/TTS
 def __init__(self,
              embedding_dim=256,
              linear_dim=1025,
              mel_dim=80,
              r=5,
              padding_idx=None):
     super(Tacotron, self).__init__()
     self.r = r
     self.mel_dim = mel_dim
     self.linear_dim = linear_dim
     self.embedding = nn.Embedding(len(symbols),
                                   embedding_dim,
                                   padding_idx=padding_idx)
     print(" | > Number of characters : {}".format(len(symbols)))
     self.embedding.weight.data.normal_(0, 0.3)
     self.encoder = Encoder(embedding_dim)
     self.decoder = Decoder(256, mel_dim, r)
     self.postnet = CBHG(mel_dim, K=8, projections=[256, mel_dim])
     self.last_linear = nn.Linear(mel_dim * 2, linear_dim)
예제 #8
0
파일: tacotron.py 프로젝트: yweweler/TTS
 def __init__(self,
              embedding_dim=256,
              linear_dim=1025,
              mel_dim=80,
              r=5,
              padding_idx=None):
     super(Tacotron, self).__init__()
     self.r = r
     self.mel_dim = mel_dim
     self.linear_dim = linear_dim
     self.embedding = nn.Embedding(
         len(symbols), embedding_dim, padding_idx=padding_idx)
     print(" | > Number of characters : {}".format(len(symbols)))
     self.embedding.weight.data.normal_(0, 0.3)
     self.encoder = Encoder(embedding_dim)
     self.decoder = Decoder(256, mel_dim, r)
     self.postnet = PostCBHG(mel_dim)
     self.last_linear = nn.Sequential(
         nn.Linear(self.postnet.cbhg.gru_features * 2, linear_dim),
         nn.Sigmoid())
예제 #9
0
class TacotronGST(nn.Module):
    def __init__(self,
                 num_chars,
                 num_speakers,
                 r=5,
                 linear_dim=1025,
                 mel_dim=80,
                 memory_size=5,
                 attn_win=False,
                 attn_norm="sigmoid",
                 prenet_type="original",
                 prenet_dropout=True,
                 forward_attn=False,
                 trans_agent=False,
                 forward_attn_mask=False,
                 location_attn=True,
                 separate_stopnet=True):
        super(TacotronGST, self).__init__()
        self.r = r
        self.mel_dim = mel_dim
        self.linear_dim = linear_dim
        self.embedding = nn.Embedding(num_chars, 256)
        self.embedding.weight.data.normal_(0, 0.3)
        if num_speakers > 1:
            self.speaker_embedding = nn.Embedding(num_speakers, 256)
            self.speaker_embedding.weight.data.normal_(0, 0.3)
        self.encoder = Encoder(256)
        self.gst = GST(num_mel=80,
                       num_heads=4,
                       num_style_tokens=10,
                       embedding_dim=256)
        self.decoder = Decoder(256, mel_dim, r, memory_size, attn_win,
                               attn_norm, prenet_type, prenet_dropout,
                               forward_attn, trans_agent, forward_attn_mask,
                               location_attn, separate_stopnet)
        self.postnet = PostCBHG(mel_dim)
        self.last_linear = nn.Sequential(
            nn.Linear(self.postnet.cbhg.gru_features * 2, linear_dim),
            nn.Sigmoid())

    def forward(self, characters, text_lengths, mel_specs, speaker_ids=None):
        B = characters.size(0)
        mask = sequence_mask(text_lengths).to(characters.device)
        inputs = self.embedding(characters)
        encoder_outputs = self.encoder(inputs)
        encoder_outputs = self._add_speaker_embedding(encoder_outputs,
                                                      speaker_ids)
        gst_outputs = self.gst(mel_specs)
        gst_outputs = gst_outputs.expand(-1, encoder_outputs.size(1), -1)
        encoder_outputs = encoder_outputs + gst_outputs
        mel_outputs, alignments, stop_tokens = self.decoder(
            encoder_outputs, mel_specs, mask)
        mel_outputs = mel_outputs.view(B, -1, self.mel_dim)
        linear_outputs = self.postnet(mel_outputs)
        linear_outputs = self.last_linear(linear_outputs)
        return mel_outputs, linear_outputs, alignments, stop_tokens

    def inference(self, characters, speaker_ids=None, style_mel=None):
        B = characters.size(0)
        inputs = self.embedding(characters)
        encoder_outputs = self.encoder(inputs)
        encoder_outputs = self._add_speaker_embedding(encoder_outputs,
                                                      speaker_ids)
        if style_mel is not None:
            gst_outputs = self.gst(style_mel)
            gst_outputs = gst_outputs.expand(-1, encoder_outputs.size(1), -1)
            encoder_outputs = encoder_outputs + gst_outputs
        mel_outputs, alignments, stop_tokens = self.decoder.inference(
            encoder_outputs)
        mel_outputs = mel_outputs.view(B, -1, self.mel_dim)
        linear_outputs = self.postnet(mel_outputs)
        linear_outputs = self.last_linear(linear_outputs)
        return mel_outputs, linear_outputs, alignments, stop_tokens

    def _add_speaker_embedding(self, encoder_outputs, speaker_ids):
        if hasattr(self, "speaker_embedding") and speaker_ids is not None:
            speaker_embeddings = self.speaker_embedding(speaker_ids)

            speaker_embeddings.unsqueeze_(1)
            speaker_embeddings = speaker_embeddings.expand(
                encoder_outputs.size(0), encoder_outputs.size(1), -1)
            encoder_outputs = encoder_outputs + speaker_embeddings
        return encoder_outputs