Exemplo n.º 1
0
 def __init__(self, input_size, hidden_size, latent_size):
     super().__init__()
     self.samplelayer = NormalDistributed(latent_size=latent_size)
     self.linear = nn.Linear(input_size, hidden_size)
     self.batch_norm = nn.BatchNorm1d(hidden_size, momentum=0.1)
     self.h2z = nn.Linear(hidden_size, self.samplelayer.inputShape()[-1])
     self.elu = nn.ELU()
Exemplo n.º 2
0
class HybridVAESmall(nn.Module):
	def __init__(self,input_size,conv_size,rnn_size,latent_size,output_size,use_softmax=False):
		super().__init__()
		"""
		Layer definitions
		"""
		self.aux_loss = True
		self.kl_loss = False
		# sample layer with normal distribution
		self.samplelayer = NormalDistributed(latent_size=latent_size)
		# encode from input space to hidden space
		self.encoder = CNNEncoderSmall(input_size=input_size,conv_size=conv_size)
		# encoded to latent layer
		self.h2z = nn.Sequential(
			nn.Linear(conv_size,self.samplelayer.inputShape()[-1]),
			nn.ELU()
			)
		# latent to decoded layer
		self.z2h = nn.Sequential(
			nn.Linear(self.samplelayer.outputShape()[-1],conv_size),
			nn.ELU()
			)
		# decode from hidden space to input space
		self.decoder = HybridDecoderSmall(input_size=input_size,conv_size=conv_size,rnn_size=rnn_size,output_size=output_size,use_softmax=use_softmax)

	def forward(self,x):
		num_steps = x.size()[0]
		enc = self.encoder(x)
		enc = self.h2z(enc)
		z,qz = self.samplelayer(enc)
		dec = self.z2h(z)
		dec,aux_x = self.decoder(dec,num_steps)
		return dec,qz,aux_x
Exemplo n.º 3
0
 def __init__(self, input_size, hidden_size, latent_size):
     super().__init__()
     self.samplelayer = NormalDistributed(latent_size=latent_size)
     self.conv = nn.Conv1d(in_channels=input_size,
                           out_channels=hidden_size,
                           kernel_size=3)
     self.batch_norm = nn.BatchNorm1d(hidden_size, momentum=0.1)
     self.h2z = nn.Linear(hidden_size, self.samplelayer.inputShape()[-1])
     self.elu = nn.ELU()
Exemplo n.º 4
0
    def __init__(self, input_size, hidden_size, latent_size):
        super().__init__()
        self.samplelayer1 = GaussianMerge(latent_size=latent_size)
        self.linear1 = nn.Linear(input_size, hidden_size)
        self.batch_norm1 = nn.BatchNorm1d(hidden_size, momentum=0.1)
        self.h2z1 = nn.Linear(hidden_size, self.samplelayer1.inputShape()[-1])

        self.samplelayer2 = NormalDistributed(latent_size=latent_size)
        self.linear2 = nn.Linear(input_size, hidden_size)
        self.batch_norm2 = nn.BatchNorm1d(hidden_size, momentum=0.1)
        self.h2z2 = nn.Linear(hidden_size, self.samplelayer2.inputShape()[-1])

        self.elu = nn.ELU()
Exemplo n.º 5
0
class LadderDecoder(nn.Module):
    def __init__(self, input_size, hidden_size, latent_size):
        super().__init__()
        self.samplelayer1 = GaussianMerge(latent_size=latent_size)
        self.linear1 = nn.Linear(input_size, hidden_size)
        self.batch_norm1 = nn.BatchNorm1d(hidden_size, momentum=0.1)
        self.h2z1 = nn.Linear(hidden_size, self.samplelayer1.inputShape()[-1])

        self.samplelayer2 = NormalDistributed(latent_size=latent_size)
        self.linear2 = nn.Linear(input_size, hidden_size)
        self.batch_norm2 = nn.BatchNorm1d(hidden_size, momentum=0.1)
        self.h2z2 = nn.Linear(hidden_size, self.samplelayer2.inputShape()[-1])

        self.elu = nn.ELU()

    def forward(self, x, l_qz=None):
        if l_qz:
            # sample from encoder layer and then merge
            z = self.linear1(x)
            z = self.batch_norm1(z.transpose(1, 2)).transpose(1, 2)
            z = self.elu(z)
            z = self.h2z1(z)
            z1, qz1 = self.samplelayer1(z, l_qz.mu, l_qz.logvar)
        # sample from decoder
        z = self.linear2(x)
        z = self.batch_norm2(z.transpose(1, 2)).transpose(1, 2)
        z = self.elu(z)
        z = self.h2z2(z)
        z2, qz2 = self.samplelayer2(z)

        if l_qz is None:
            return z2
        else:
            return z2, (z1, qz1, qz2)
Exemplo n.º 6
0
    def __init__(self, input_size, hidden_size, latent_size):
        super().__init__()
        self.samplelayer1 = GaussianMerge(latent_size=latent_size)
        self.conv1 = nn.ConvTranspose1d(in_channels=input_size,
                                        out_channels=hidden_size,
                                        kernel_size=3)
        self.batch_norm1 = nn.BatchNorm1d(hidden_size, momentum=0.1)
        self.h2z1 = nn.Linear(hidden_size, self.samplelayer1.inputShape()[-1])

        self.samplelayer2 = NormalDistributed(latent_size=latent_size)
        self.conv2 = nn.ConvTranspose1d(in_channels=input_size,
                                        out_channels=hidden_size,
                                        kernel_size=3)
        self.batch_norm2 = nn.BatchNorm1d(hidden_size, momentum=0.1)
        self.h2z2 = nn.Linear(hidden_size, self.samplelayer2.inputShape()[-1])

        self.elu = nn.ELU()
Exemplo n.º 7
0
class CNNVAE(nn.Module):
	def __init__(self,input_size,conv_size,latent_size,output_size,use_softmax=False):
		super().__init__()
		"""
		Layer definitions
		"""
		self.aux_loss = False
		self.kl_loss = False
		# sample layer with normal distribution
		self.samplelayer = NormalDistributed(latent_size=latent_size)
		# encode from input space to hidden space
		self.encoder = CNNEncoder(input_size=input_size,conv_size=conv_size)
		# encoded to latent layer
		self.h2z = nn.Sequential(
			nn.Linear(conv_size,self.samplelayer.inputShape()[-1]),
			nn.ELU()
			)
		# latent to decoded layer
		self.z2h = nn.Sequential(
			nn.Linear(self.samplelayer.outputShape()[-1],conv_size),
			nn.ELU()
			)
		# decode from hidden space to input space
		self.decoder = CNNDecoder(input_size=input_size,conv_size=conv_size,output_size=output_size,use_softmax=use_softmax)

	def forward(self,x):
		enc = self.encoder(x)
		enc = self.h2z(enc)
		z,qz = self.samplelayer(enc)
		dec = self.z2h(z)
		dec = self.decoder(dec)
		return dec,qz

	def sample(self,z):
		'''
		h = Variable(torch.zeros(num_samples,self.samplelayer.inputShape()[-1]))
		mu,logvar = h.chunk(2,1)
		qz = Normal(mu,logvar)
		z = qz.sample()
		'''
		dec = self.z2h(z)
		dec = self.decoder(dec)
		return dec
Exemplo n.º 8
0
	def __init__(self,input_size,rnn_size,latent_size,output_size,use_softmax=False):
		super().__init__()
		"""
		Layer definitions
		"""
		self.aux_loss = False
		self.kl_loss = False
		# sample layer with normal distribution
		self.samplelayer = NormalDistributed(latent_size=latent_size)
		# encode from input space to hidden space
		self.encoder = RNNEncoder(input_size=input_size,rnn_size=rnn_size)
		# encoded to latent layer
		self.h2z = nn.Sequential(
			nn.Linear(rnn_size,self.samplelayer.inputShape()[-1]),
			nn.ELU()
			)
		# latent to decoded layer
		self.z2h = nn.Sequential(
			nn.Linear(self.samplelayer.outputShape()[-1],rnn_size),
			nn.ELU()
			)
		# decode from hidden space to input space
		self.decoder = RNNDecoder(input_size=input_size,rnn_size=rnn_size,output_size=output_size,use_softmax=use_softmax)
Exemplo n.º 9
0
class LadderEncoder(nn.Module):
    def __init__(self, input_size, hidden_size, latent_size):
        super().__init__()
        self.samplelayer = NormalDistributed(latent_size=latent_size)
        self.linear = nn.Linear(input_size, hidden_size)
        self.batch_norm = nn.BatchNorm1d(hidden_size, momentum=0.1)
        self.h2z = nn.Linear(hidden_size, self.samplelayer.inputShape()[-1])
        self.elu = nn.ELU()

    def forward(self, x):
        x = self.linear(x)
        x = self.batch_norm(x.transpose(1, 2)).transpose(1, 2)
        x = self.elu(x)
        z_in = self.h2z(x)
        z, qz = self.samplelayer(z_in)
        return x, z, qz
Exemplo n.º 10
0
class LadderCNNEncoder(nn.Module):
    def __init__(self, input_size, hidden_size, latent_size):
        super().__init__()
        self.samplelayer = NormalDistributed(latent_size=latent_size)
        self.conv = nn.Conv1d(in_channels=input_size,
                              out_channels=hidden_size,
                              kernel_size=3)
        self.batch_norm = nn.BatchNorm1d(hidden_size, momentum=0.1)
        self.h2z = nn.Linear(hidden_size, self.samplelayer.inputShape()[-1])
        self.elu = nn.ELU()

    def forward(self, x):
        x = x.transpose(0, 1).transpose(1, 2)
        x = self.conv(x).transpose(2, 1).transpose(1, 0)
        x = self.batch_norm(x.transpose(1, 2)).transpose(1, 2)
        x = self.elu(x)
        z_in = self.h2z(x)
        z, qz = self.samplelayer(z_in)
        return x, z, qz