Ejemplo n.º 1
0
 def __init__(self):
     super(LocationLayer, self).__init__()
     kernel_size = 31
     padding = int(((kernel_size - 1) / 2))
     self.location_conv = ConvNorm(2, 32,
                                   kernel_size=kernel_size,
                                   padding=padding,
                                   bias=False, stride=1, dilation=1)
     self.location_dense = LinearNorm(32, 128,
                                      bias=False, w_init_gain='tanh')
Ejemplo n.º 2
0
	def __init__(self, attention_n_filters, attention_kernel_size,
				 attention_dim):
		super(LocationLayer, self).__init__()
		padding = int((attention_kernel_size - 1) / 2)
		self.location_conv = ConvNorm(2, attention_n_filters,
									  kernel_size=attention_kernel_size,
									  padding=padding, bias=False, stride=1,
									  dilation=1)
		self.location_dense = LinearNorm(attention_n_filters, attention_dim,
										 bias=False, w_init_gain='tanh')
Ejemplo n.º 3
0
    def __init__(self):
        super(Postnet, self).__init__()
        self.convolutions = nn.ModuleList()

        self.convolutions.append(
            nn.Sequential(
                ConvNorm(hparams.n_mel_channels,
                         hparams.postnet_embedding_dim,
                         kernel_size=hparams.postnet_kernel_size,
                         stride=1,
                         padding=int((hparams.postnet_kernel_size - 1) / 2),
                         dilation=1,
                         w_init_gain='tanh'),
                nn.BatchNorm1d(hparams.postnet_embedding_dim)))

        for i in range(1, hparams.postnet_n_convolutions - 1):
            self.convolutions.append(
                nn.Sequential(
                    ConvNorm(hparams.postnet_embedding_dim,
                             hparams.postnet_embedding_dim,
                             kernel_size=hparams.postnet_kernel_size,
                             stride=1,
                             padding=int(
                                 (hparams.postnet_kernel_size - 1) / 2),
                             dilation=1,
                             w_init_gain='tanh'),
                    nn.BatchNorm1d(hparams.postnet_embedding_dim)))

        self.convolutions.append(
            nn.Sequential(
                ConvNorm(hparams.postnet_embedding_dim,
                         hparams.n_mel_channels,
                         kernel_size=hparams.postnet_kernel_size,
                         stride=1,
                         padding=int((hparams.postnet_kernel_size - 1) / 2),
                         dilation=1,
                         w_init_gain='linear'),
                nn.BatchNorm1d(hparams.n_mel_channels)))
Ejemplo n.º 4
0
    def __init__(self):
        super(PostNet, self).__init__()
        kernel_size = 5
        padding = int((kernel_size - 1) / 2)

        self.convolutions = nn.ModuleList()

        self.convolutions.append(
            nn.Sequential(
                ConvNorm(80,
                         512,
                         kernel_size=kernel_size,
                         stride=1,
                         padding=padding,
                         dilation=1,
                         w_init_gain='tanh'), nn.BatchNorm1d(512)))

        for i in range(3):
            self.convolutions.append(
                nn.Sequential(
                    ConvNorm(512,
                             512,
                             kernel_size=kernel_size,
                             padding=padding,
                             stride=1,
                             dilation=1,
                             w_init_gain='tanh'), nn.BatchNorm1d(512)))

        self.convolutions.append(
            nn.Sequential(
                ConvNorm(512,
                         80,
                         kernel_size=kernel_size,
                         padding=padding,
                         stride=1,
                         dilation=1), nn.BatchNorm1d(80)))
Ejemplo n.º 5
0
	def __init__(self):
		super(Encoder, self).__init__()

		convolutions = []
		for _ in range(hps.encoder_n_convolutions):
			conv_layer = nn.Sequential(
				ConvNorm(hps.encoder_embedding_dim,
						 hps.encoder_embedding_dim,
						 kernel_size=hps.encoder_kernel_size, stride=1,
						 padding=int((hps.encoder_kernel_size - 1) / 2),
						 dilation=1, w_init_gain='relu'),
				nn.BatchNorm1d(hps.encoder_embedding_dim))
			convolutions.append(conv_layer)
		self.convolutions = nn.ModuleList(convolutions)

		self.lstm = nn.LSTM(hps.encoder_embedding_dim,
							int(hps.encoder_embedding_dim / 2), 1,
							batch_first=True, bidirectional=True)