def __init__(self, hparams):
        super(Postnet, self).__init__()
        self.convolutions = nn.ModuleList()

        self.convolutions.append(
            nn.Sequential(
                ConvNorm(hparams.n_mel_channels,
                         hparams.postnet_embedding_dim,
                         kernel_size=hparams.postnet_kernel_size,
                         stride=1,
                         padding=int((hparams.postnet_kernel_size - 1) / 2),
                         dilation=1,
                         w_init_gain='tanh'),
                nn.BatchNorm1d(hparams.postnet_embedding_dim)))

        for i in range(1, hparams.postnet_n_convolutions - 1):
            self.convolutions.append(
                nn.Sequential(
                    ConvNorm(hparams.postnet_embedding_dim,
                             hparams.postnet_embedding_dim,
                             kernel_size=hparams.postnet_kernel_size,
                             stride=1,
                             padding=int(
                                 (hparams.postnet_kernel_size - 1) / 2),
                             dilation=1,
                             w_init_gain='tanh'),
                    nn.BatchNorm1d(hparams.postnet_embedding_dim)))

        self.convolutions.append(
            nn.Sequential(
                ConvNorm(hparams.postnet_embedding_dim,
                         hparams.n_mel_channels,
                         kernel_size=hparams.postnet_kernel_size,
                         stride=1,
                         padding=int((hparams.postnet_kernel_size - 1) / 2),
                         dilation=1,
                         w_init_gain='linear'),
                nn.BatchNorm1d(hparams.n_mel_channels)))
 def __init__(self, attention_n_filters, attention_kernel_size,
              attention_dim):
     super(LocationLayer, self).__init__()
     padding = int((attention_kernel_size - 1) / 2)
     self.location_conv = ConvNorm(2,
                                   attention_n_filters,
                                   kernel_size=attention_kernel_size,
                                   padding=padding,
                                   bias=False,
                                   stride=1,
                                   dilation=1)
     self.location_dense = LinearNorm(attention_n_filters,
                                      attention_dim,
                                      bias=False,
                                      w_init_gain='tanh')
    def __init__(self, hparams):
        super(Encoder, self).__init__()

        convolutions = []
        for _ in range(hparams.encoder_n_convolutions):
            conv_layer = nn.Sequential(
                ConvNorm(hparams.encoder_embedding_dim,
                         hparams.encoder_embedding_dim,
                         kernel_size=hparams.encoder_kernel_size,
                         stride=1,
                         padding=int((hparams.encoder_kernel_size - 1) / 2),
                         dilation=1,
                         w_init_gain='relu'),
                nn.BatchNorm1d(hparams.encoder_embedding_dim))
            convolutions.append(conv_layer)
        self.convolutions = nn.ModuleList(convolutions)

        self.lstm = nn.LSTM(hparams.encoder_embedding_dim,
                            int(hparams.encoder_embedding_dim / 2),
                            1,
                            batch_first=True,
                            bidirectional=True)
示例#4
0
    def __init__(self, config):
        super(ConvModule, self).__init__()

        self.embedding = torch.nn.Embedding(config["n_symbols"],
                                            config["symbols_embedding_dim"])
        std = sqrt(2.0 /
                   (config["n_symbols"] + config["symbols_embedding_dim"]))
        val = sqrt(3.0) * std  # uniform bounds for std
        self.embedding.weight.data.uniform_(-val, val)

        convolutions = []
        for _ in range(config["encoder_n_convolutions"]):
            conv_layer = torch.nn.Sequential(
                ConvNorm(config["encoder_embedding_dim"],
                         config["encoder_embedding_dim"],
                         kernel_size=config["encoder_kernel_size"],
                         stride=1,
                         padding=int((config["encoder_kernel_size"] - 1) / 2),
                         dilation=1,
                         w_init_gain='relu'),
                torch.nn.BatchNorm1d(config["encoder_embedding_dim"]))
            convolutions.append(conv_layer)
        self.convolutions = torch.nn.ModuleList(convolutions)