Ejemplo n.º 1
0
    def decoder(state, training=True):
        x = tf.reshape(state, [batch_size, -1])
        x = lyr.dense('decoder.dense1.matrix', 'decoder.dense1.bias',
                      'decoder', latent_dim, 512, x)
        x = tf.nn.leaky_relu(x)
        x = lyr.batchnorm(x, 'decoder.batchnorm1.offset',
                          'decoder.batchnorm1.scale', 'decoder')

        x = lyr.dense('decoder.dense2.matrix', 'decoder.dense2.bias',
                      'decoder', 512, 512, x)
        x = tf.nn.leaky_relu(x)
        x = lyr.batchnorm(x, 'decoder.batchnorm2.offset',
                          'decoder.batchnorm2.scale', 'decoder')

        x = lyr.dense('decoder.dense3.matrix', 'decoder.dense3.bias',
                      'decoder', 512, 256, x)
        x = tf.nn.leaky_relu(x)
        x = lyr.batchnorm(x, 'decoder.batchnorm3.offset',
                          'decoder.batchnorm3.scale', 'decoder')

        x = lyr.dense('decoder.dense4.matrix', 'decoder.dense4.bias',
                      'decoder', 256, max_size * encode_length, x)
        x = tf.reshape(x, [batch_size, max_size, encode_length])
        x = lyr.batchnorm(x, 'decoder.batchnorm4.offset',
                          'decoder.batchnorm4.scale', 'decoder')

        return x
Ejemplo n.º 2
0
def discriminator(inp, reuse=False):
    with tf.variable_scope('Encoder', reuse=reuse):
        # 32
        inp = gaussnoise(inp, std=0.05)
        conv1 = conv2d(inp, 96, kernel=3, strides=1, name=dname + 'conv1')
        conv1 = lrelu(conv1, 0.2)

        conv1b = conv2d(conv1, 96, kernel=3, strides=2, name=dname + 'conv1b')
        conv1b = batchnorm(conv1b, is_training=is_train, name=dname + 'bn1b')
        conv1b = lrelu(conv1b, 0.2)
        conv1b = tf.nn.dropout(conv1b, keep_prob)
        # 16
        conv2 = conv2d(conv1b, 192, kernel=3, strides=1, name=dname + 'conv2')
        conv2 = batchnorm(conv2, is_training=is_train, name=dname + 'bn2')
        conv2 = lrelu(conv2, 0.2)

        conv2b = conv2d(conv2, 192, kernel=3, strides=2, name=dname + 'conv2b')
        conv2b = batchnorm(conv2b, is_training=is_train, name=dname + 'bn2b')
        conv2b = lrelu(conv2b, 0.2)
        conv2b = tf.nn.dropout(conv2b, keep_prob)
        # 8
        conv3 = conv2d(conv2b, 256, kernel=3, strides=1, name=dname + 'conv3')
        conv3 = batchnorm(conv3, is_training=is_train, name=dname + 'bn3')
        conv3 = lrelu(conv3, 0.2)

        conv3b = conv2d(conv3, 256, kernel=1, strides=1, name=dname + 'conv3b')
        conv3b = batchnorm(conv3b, is_training=is_train, name=dname + 'bn3b')
        conv3b = lrelu(conv3b, 0.2)

        conv4 = conv2d(conv3b, 512, kernel=1, strides=1, name=dname + 'conv4')
        conv4 = batchnorm(conv4, is_training=is_train, name=dname + 'bn4')
        conv4 = lrelu(conv4, 0.2)

        flat = flatten(conv4)
        # Classifier
        clspred = linear(flat, n_classes, name=dname + 'cpred')
        # Decoder
        g2 = conv2d(conv4, nout=256, kernel=3, name=dname + 'deconv2')
        g2 = batchnorm(g2, is_training=tf.constant(True), name=dname + 'bn2g')
        g2 = lrelu(g2, 0.2)

        g3 = nnupsampling(g2, [16, 16])
        g3 = conv2d(g3, nout=128, kernel=3, name=dname + 'deconv3')
        g3 = batchnorm(g3, is_training=tf.constant(True), name=dname + 'bn3g')
        g3 = lrelu(g3, 0.2)

        g3b = conv2d(g3, nout=128, kernel=3, name=dname + 'deconv3b')
        g3b = batchnorm(g3b,
                        is_training=tf.constant(True),
                        name=dname + 'bn3bg')
        g3b = lrelu(g3b, 0.2)

        g4 = nnupsampling(g3b, [32, 32])
        g4 = conv2d(g4, nout=64, kernel=3, name=dname + 'deconv4')
        g4 = batchnorm(g4, is_training=tf.constant(True), name=dname + 'bn4g')
        g4 = lrelu(g4, 0.2)

        g4b = conv2d(g4, nout=3, kernel=3, name=dname + 'deconv4b')
        g4b = tf.nn.tanh(g4b)
        return clspred, g4b
Ejemplo n.º 3
0
def discriminator(inp, reuse=False):
    with tf.variable_scope('Encoder', reuse=reuse):
        # 64
        inp = gaussnoise(inp, std=0.05)
        conv1 = conv2d(inp, 128, kernel=3, strides=2, name=dname + 'conv1')
        conv1 = lrelu(conv1, 0.2)
        # 32
        conv2 = tf.nn.dropout(conv1, keep_prob)
        conv2 = conv2d(conv2, 256, kernel=3, strides=2, name=dname + 'conv2')
        conv2 = batchnorm(conv2, is_training=is_train, name=dname + 'bn2')
        conv2 = lrelu(conv2, 0.2)
        # 16
        conv3 = tf.nn.dropout(conv2, keep_prob)
        conv3 = conv2d(conv3, 512, kernel=3, strides=2, name=dname + 'conv3')
        conv3 = batchnorm(conv3, is_training=is_train, name=dname + 'bn3')
        conv3 = lrelu(conv3, 0.2)
        # 8
        conv3b = conv2d(conv3, 512, kernel=3, strides=1, name=dname + 'conv3b')
        conv3b = batchnorm(conv3b, is_training=is_train, name=dname + 'bn3b')
        conv3b = lrelu(conv3b, 0.2)

        conv4 = tf.nn.dropout(conv3b, keep_prob)
        conv4 = conv2d(conv4, 1024, kernel=3, strides=2, name=dname + 'conv4')
        conv4 = batchnorm(conv4, is_training=is_train, name=dname + 'bn4')
        conv4 = lrelu(conv4, 0.2)
        # 4

        flat = flatten(conv4)
        # Classifier
        clspred = linear(flat, n_classes, name=dname + 'cpred')
        # Decoder
        g1 = conv2d(conv4, nout=512, kernel=3, name=dname + 'deconv1')
        g1 = batchnorm(g1, is_training=tf.constant(True), name=dname + 'bn1g')
        g1 = lrelu(g1, 0.2)

        g2 = nnupsampling(g1, [8, 8])
        g2 = conv2d(g2, nout=256, kernel=3, name=dname + 'deconv2')
        g2 = batchnorm(g2, is_training=tf.constant(True), name=dname + 'bn2g')
        g2 = lrelu(g2, 0.2)

        g3 = nnupsampling(g2, [16, 16])
        g3 = conv2d(g3, nout=128, kernel=3, name=dname + 'deconv3')
        g3 = batchnorm(g3, is_training=tf.constant(True), name=dname + 'bn3g')
        g3 = lrelu(g3, 0.2)

        g4 = nnupsampling(g3, [32, 32])
        g4 = conv2d(g4, nout=64, kernel=3, name=dname + 'deconv4')
        g4 = batchnorm(g4, is_training=tf.constant(True), name=dname + 'bn4g')
        g4 = lrelu(g4, 0.2)

        g5 = nnupsampling(g4, [64, 64])
        g5 = conv2d(g5, nout=32, kernel=3, name=dname + 'deconv5')
        g5 = batchnorm(g5, is_training=tf.constant(True), name=dname + 'bn5g')
        g5 = lrelu(g5, 0.2)

        g5b = conv2d(g5, nout=3, kernel=3, name=dname + 'deconv5b')
        g5b = tf.nn.tanh(g5b)
        return clspred, g5b
Ejemplo n.º 4
0
 def generator(self, z, const_init=False, trainable=True):
     # (n, 256, 7, 7)
     h0 = layers.dense(z,
                       7 * 7 * 256,
                       name="g_fc1",
                       const_init=const_init,
                       trainable=trainable)
     h0 = layers.batchnorm(h0, axis=1, name="g_bn1", trainable=trainable)
     # h0 = layers.batchnorm(h0, axis=1, name="g_bn1")
     h0 = flow.nn.leaky_relu(h0, 0.3)
     h0 = flow.reshape(h0, (-1, 256, 7, 7))
     # (n, 128, 7, 7)
     h1 = layers.deconv2d(
         h0,
         128,
         5,
         strides=1,
         name="g_deconv1",
         const_init=const_init,
         trainable=trainable,
     )
     h1 = layers.batchnorm(h1, name="g_bn2", trainable=trainable)
     # h1 = layers.batchnorm(h1, name="g_bn2")
     h1 = flow.nn.leaky_relu(h1, 0.3)
     # (n, 64, 14, 14)
     h2 = layers.deconv2d(
         h1,
         64,
         5,
         strides=2,
         name="g_deconv2",
         const_init=const_init,
         trainable=trainable,
     )
     h2 = layers.batchnorm(h2, name="g_bn3", trainable=trainable)
     # h2 = layers.batchnorm(h2, name="g_bn3")
     h2 = flow.nn.leaky_relu(h2, 0.3)
     # (n, 1, 28, 28)
     out = layers.deconv2d(
         h2,
         1,
         5,
         strides=2,
         name="g_deconv3",
         const_init=const_init,
         trainable=trainable,
     )
     out = flow.math.tanh(out)
     return out
Ejemplo n.º 5
0
 def inception(name, l, wf):
     """Inception module.
     Args:
       name: Scope name of this function.
       l: Output of previous layer.
       wf: Channel width factor of this module.
     """
     with tf.variable_scope(name):
         branchpool = tf.nn.max_pool(l, [1, 2, 2, 1], [1, 1, 1, 1], 'SAME')
         branchpool = layers.conv('conv_pool',
                                  branchpool,
                                  32 * wf,
                                  kernel_size=1)
         branch5x5 = layers.conv('conv_5x5_0', l, 16 * wf, kernel_size=1)
         branch5x5 = tf.nn.relu(branch5x5)
         branch5x5 = layers.conv('conv_5x5_1',
                                 branch5x5,
                                 32 * wf,
                                 kernel_size=5)
         branch3x3 = layers.conv('conv_3x3_0', l, 32 * wf, kernel_size=1)
         branch3x3 = tf.nn.relu(branch3x3)
         branch3x3 = layers.conv('conv_3x3_1',
                                 branch3x3,
                                 64 * wf,
                                 kernel_size=3)
         branch1x1 = layers.conv('conv_1x1_0', l, 64 * wf, kernel_size=1)
         branch1x1 = tf.nn.relu(branch1x1)
         cc = tf.concat([branch1x1, branch3x3, branch5x5, branchpool], 3)
         cc = layers.batchnorm('bn_0', cc, is_train)
         return tf.nn.relu(cc)
Ejemplo n.º 6
0
 def encoder(sequence,training=True):
     num = tf.shape(sequence)[0]
     
     x = lyr.conv('encoder.conv1.filter','encoder.conv1.bias','encoder',(5,encode_length,args.channels),sequence,max_size)
     x = tf.nn.leaky_relu(x)
     x = lyr.batchnorm(x,'encoder.batchnorm1.offset','encoder.batchnorm1.scale','encoder.batchnorm1.average_means','encoder.batchnorm1.average_variances','encoder.num_means','encoder',(max_size,args.channels),training=training)
     
     x = lyr.residual_block('encoder.res1.filter1','encoder.res1.bias1','encoder.res1.filter2','encoder.res1.bias1','encoder',args.channels,args.channels,x,max_size,channels=args.channels)
     x = lyr.batchnorm(x,'encoder.batchnorm2.offset','encoder.batchnorm2.scale','encoder.batchnorm2.average_means','encoder.batchnorm2.average_variances','encoder.num_means','encoder',(max_size,args.channels),training=training)
     
     x = tf.reshape(x,(num,max_size*args.channels))
     
     x = lyr.dense('encoder.dense1.matrix','encoder.dense1.bias','encoder',max_size*args.channels,2*latent_dim,x)
     x = tf.nn.leaky_relu(x)
     output = lyr.batchnorm(x,'encoder.batchnorm3.offset','encoder.batchnorm3.scale','encoder.batchnorm3.average_means','encoder.batchnorm3.average_variances','encoder.num_means','encoder',(2*latent_dim),training=training)
     
     return output
Ejemplo n.º 7
0
 def residual(name, l, in_channel, out_channel, stride):
     """Residual function.
     Args:
       name: Scope name of this function.
       l: Output of previous layer.
       in_channel: # of channels of l.
       out_channel: # of channels of each output feature.
       stride: Stride of the first convolution in residual function.
     """
     with tf.variable_scope(name):
         sc = l if stride == 1 else shortcut(l, in_channel, out_channel)
         l = layers.conv('conv_0', l, out_channel, stride=stride)
         l = layers.batchnorm('bn_0', l, is_train)
         l = tf.nn.relu(l)
         l = layers.conv('conv_1', l, out_channel, stride=1)
         l = layers.batchnorm('bn_1', l, is_train)
         l = tf.nn.relu(l + sc)
         return l
Ejemplo n.º 8
0
 def conv_bn_relu(name, l, out_channel):
     """A sequence of convolution, batch normalization and ReLU.
     Args:
       name: Scope name of this function.
       l: Output of previous layer.
       out_channel: # of channels of each output feature.
     """
     with tf.variable_scope(name):
         l = layers.conv('conv_0', l, out_channel)
         l = layers.batchnorm('bn_0', l, is_train)
         return tf.nn.relu(l)
Ejemplo n.º 9
0
def generator(inp_z, inp_y, reuse=False):
    with tf.variable_scope('Generator', reuse=reuse):
        inp = tf.concat([inp_z, inp_y], 1)
        sz = 4
        g1 = linear(inp, 512 * sz * sz, name=gname + 'deconv1')
        g1 = batchnorm(g1, is_training=tf.constant(True), name=gname + 'bn1g')
        g1 = lrelu(g1, 0.2)
        g1_reshaped = tf.reshape(g1, [-1, 512, sz, sz])
        print 'genreshape: ' + str(g1_reshaped.get_shape().as_list())

        g2 = nnupsampling(g1_reshaped, [8, 8])
        g2 = conv2d(g2, nout=512, kernel=3, name=gname + 'deconv2')
        g2 = batchnorm(g2, is_training=tf.constant(True), name=gname + 'bn2g')
        g2 = lrelu(g2, 0.2)

        g3 = nnupsampling(g2, [16, 16])
        g3 = conv2d(g3, nout=256, kernel=3, name=gname + 'deconv3')
        g3 = batchnorm(g3, is_training=tf.constant(True), name=gname + 'bn3g')
        g3 = lrelu(g3, 0.2)

        g4 = nnupsampling(g3, [32, 32])
        g4 = conv2d(g4, nout=128, kernel=3, name=gname + 'deconv4')
        g4 = batchnorm(g4, is_training=tf.constant(True), name=gname + 'bn4g')
        g4 = lrelu(g4, 0.2)

        g4b = conv2d(g4, nout=128, kernel=3, name=gname + 'deconv4b')
        g4b = batchnorm(g4b,
                        is_training=tf.constant(True),
                        name=gname + 'bn4bg')
        g4b = lrelu(g4b, 0.2)

        g5 = nnupsampling(g4b, [64, 64])
        g5 = conv2d(g5, nout=64, kernel=3, name=gname + 'deconv5')
        g5 = batchnorm(g5, is_training=tf.constant(True), name=gname + 'bn5g')
        g5 = lrelu(g5, 0.2)

        g5b = conv2d(g5, nout=64, kernel=3, name=gname + 'deconv5b')
        g5b = batchnorm(g5b,
                        is_training=tf.constant(True),
                        name=gname + 'bn5bg')
        g5b = lrelu(g5b, 0.2)

        g6 = nnupsampling(g5b, [128, 128])
        g6 = conv2d(g6, nout=32, kernel=3, name=gname + 'deconv6')
        g6 = batchnorm(g6, is_training=tf.constant(True), name=gname + 'bn6g')
        g6 = lrelu(g6, 0.2)

        g6b = conv2d(g6, nout=3, kernel=3, name=gname + 'deconv6b')
        g6b = tf.nn.tanh(g6b)
        g6b_64 = pool(g6b, fsize=3, strides=2, op='avg')
        return g6b_64, g6b
Ejemplo n.º 10
0
    def decoder(state,training=tf.constant(True)):
        num = tf.shape(state)[0]
        
        x = tf.reshape(state,[num,-1])
        x = lyr.dense('decoder.dense1.matrix','decoder.dense1.bias','decoder',latent_dim,512,x)
        x = tf.nn.leaky_relu(x)
        x = lyr.batchnorm(x,'decoder.batchnorm1.offset','decoder.batchnorm1.scale','decoder.batchnorm1.average_means','decoder.batchnorm1.average_variances','decoder.num_means','decoder',(512,),training=training)
        
        x = lyr.dense('decoder.dense2.matrix','decoder.dense2.bias','decoder',512,512,x)
        x = tf.nn.leaky_relu(x)
        x = lyr.batchnorm(x,'decoder.batchnorm2.offset','decoder.batchnorm2.scale','decoder.batchnorm2.average_means','decoder.batchnorm2.average_variances','decoder.num_means','decoder',(512,),training=training)
        
        x = lyr.dense('decoder.dense3.matrix','decoder.dense3.bias','decoder',512,256,x)
        x = tf.nn.leaky_relu(x)
        x = lyr.batchnorm(x,'decoder.batchnorm3.offset','decoder.batchnorm3.scale','decoder.batchnorm3.average_means','decoder.batchnorm3.average_variances','decoder.num_means','decoder',(256,),training=training)

        x = lyr.dense('decoder.dense4.matrix','decoder.dense4.bias','decoder',256,max_size*encode_length,x)
        x = lyr.batchnorm(x,'decoder.batchnorm4.offset','decoder.batchnorm4.scale','decoder.batchnorm4.average_means','decoder.batchnorm4.average_variances','decoder.num_means','decoder',(max_size*encode_length,),training=training)
        
        x = tf.reshape(x,[num,max_size,encode_length])
        return x
Ejemplo n.º 11
0
    def generator(seed,training=tf.constant(True)):
        num = tf.shape(seed)[0]
        
        seed = tf.reshape(seed,(num,100))
        
        seed2 = lyr.dense('generator.dense1.matrix','generator.dense1.bias','generator',100,max_size*64,seed)
        seed2 = tf.nn.leaky_relu(seed2)
        seed2 = lyr.batchnorm(seed2,'generator.batchnorm1.offset','generator.batchnorm1.scale','generator.batchnorm1.average_means','generator.batchnorm1.average_variances','generator.num_means','generator',(max_size*64,),training=training)
        
        seed2 = tf.reshape(seed2,[num,max_size,64])

        x = lyr.residual_block('generator.res1.filter1','generator.res1.bias1','generator.res1.filter2','generator.res1.bias2','generator',64,64,seed2,max_size)
        x = lyr.batchnorm(x,'generator.batchnorm2.offset','generator.batchnorm2.scale','generator.batchnorm2.average_means','generator.batchnorm2.average_variances','generator.num_means','generator',(max_size,64),training=training)
        
        x = lyr.residual_block('generator.res2.filter1','generator.res2.bias1','generator.res2.filter2','generator.res2.bias2','generator',64,64,x,max_size)
        x = lyr.batchnorm(x,'generator.batchnorm3.offset','generator.batchnorm3.scale','generator.batchnorm3.average_means','generator.batchnorm3.average_variances','generator.num_means','generator',(max_size,64),training=training)
        
        x = lyr.residual_block('generator.res3.filter1','generator.res3.bias1','generator.res3.filter2','generator.res3.bias2','generator',64,64,x,max_size)
        x = lyr.batchnorm(x,'generator.batchnorm4.offset','generator.batchnorm4.scale','generator.batchnorm4.average_means','generator.batchnorm4.average_variances','generator.num_means','generator',(max_size,64),training=training)

        x = lyr.conv('generator.conv1.filter','generator.conv1.bias','generator',(5,64,encode_length),x,max_size)
        x = tf.nn.softmax(x)
        return x
Ejemplo n.º 12
0
def generator(inp_z, inp_y):
    with tf.variable_scope('Generator'):
        inp = tf.concat([inp_z, inp_y], 1)

        g1 = linear(inp, 512 * 4 * 4, name=gname + 'deconv1')
        g1 = batchnorm(g1, is_training=tf.constant(True), name=gname + 'bn1g')
        g1 = lrelu(g1, 0.2)
        g1_reshaped = tf.reshape(g1, [-1, 512, 4, 4])
        print 'genreshape: ' + str(g1_reshaped.get_shape().as_list())

        g2 = nnupsampling(g1_reshaped, [8, 8])
        g2 = conv2d(g2, nout=256, kernel=3, name=gname + 'deconv2')
        g2 = batchnorm(g2, is_training=tf.constant(True), name=gname + 'bn2g')
        g2 = lrelu(g2, 0.2)

        g3 = nnupsampling(g2, [16, 16])
        g3 = conv2d(g3, nout=128, kernel=3, name=gname + 'deconv3')
        g3 = batchnorm(g3, is_training=tf.constant(True), name=gname + 'bn3g')
        g3 = lrelu(g3, 0.2)

        g3b = conv2d(g3, nout=128, kernel=3, name=gname + 'deconv3b')
        g3b = batchnorm(g3b,
                        is_training=tf.constant(True),
                        name=gname + 'bn3bg')
        g3b = lrelu(g3b, 0.2)

        g4 = nnupsampling(g3b, [32, 32])
        g4 = conv2d(g4, nout=64, kernel=3, name=gname + 'deconv4')
        g4 = batchnorm(g4, is_training=tf.constant(True), name=gname + 'bn4g')
        g4 = lrelu(g4, 0.2)

        g4b = conv2d(g4, nout=64, kernel=3, name=gname + 'deconv4b')
        g4b = batchnorm(g4b,
                        is_training=tf.constant(True),
                        name=gname + 'bn4bg')
        g4b = lrelu(g4b, 0.2)

        g5 = nnupsampling(g4b, [64, 64])
        g5 = conv2d(g5, nout=32, kernel=3, name=gname + 'deconv5')
        g5 = batchnorm(g5, is_training=tf.constant(True), name=gname + 'bn5g')
        g5 = lrelu(g5, 0.2)

        g5b = conv2d(g5, nout=3, kernel=3, name=gname + 'deconv5b')
        g5b = tf.nn.tanh(g5b)
        g5b_32 = pool(g5b, fsize=3, strides=2, op='avg', pad='SAME')
        return g5b_32, g5b
Ejemplo n.º 13
0
def build_projection_model(images,
                           is_train,
                           n_reference,
                           use_bias=True,
                           reuse=None):
    """
    Build the graph for the projection network, which shares the architecture of a typical autoencoder.
    To improve contextual awareness, we add a channel-wise fully-connected layer followed by a 2-by-2
    convolution layer at the middle.
    """
    channel_compress_ratio = 4
    dim_latent = 1024

    with tf.variable_scope('PROJ', reuse=reuse):

        with tf.variable_scope('ENCODE'):
            conv0 = layers.new_conv_layer(images, [4, 4, 3, 64],
                                          stride=1,
                                          bias=use_bias,
                                          name="conv0")  #64
            bn0 = tf.nn.elu(
                layers.batchnorm(conv0, is_train, n_reference, name='bn0'))
            conv1 = layers.new_conv_layer(bn0, [4, 4, 64, 128],
                                          stride=1,
                                          bias=use_bias,
                                          name="conv1")  #64
            bn1 = tf.nn.elu(
                layers.batchnorm(conv1, is_train, n_reference, name='bn1'))
            conv2 = layers.new_conv_layer(bn1, [4, 4, 128, 256],
                                          stride=2,
                                          bias=use_bias,
                                          name="conv2")  #32
            bn2 = tf.nn.elu(
                layers.batchnorm(conv2, is_train, n_reference, name='bn2'))
            conv3 = layers.new_conv_layer(bn2, [4, 4, 256, 512],
                                          stride=2,
                                          bias=use_bias,
                                          name="conv3")  #16
            bn3 = tf.nn.elu(
                layers.batchnorm(conv3, is_train, n_reference, name='bn3'))
            conv4 = layers.new_conv_layer(bn3, [4, 4, 512, dim_latent],
                                          stride=2,
                                          bias=use_bias,
                                          name="conv4")  #8
            bn4 = tf.nn.elu(
                layers.batchnorm(conv4, is_train, n_reference, name='bn4'))
            fc5 = layers.channel_wise_fc_layer(bn4, 'fc5', bias=False)
            fc5_conv = layers.new_conv_layer(fc5,
                                             [2, 2, dim_latent, dim_latent],
                                             stride=1,
                                             bias=use_bias,
                                             name="conv_fc")
            latent = tf.nn.elu(
                layers.batchnorm(fc5_conv,
                                 is_train,
                                 n_reference,
                                 name='latent'))

        deconv3 = layers.new_deconv_layer(latent, [4, 4, 512, dim_latent],
                                          conv3.get_shape().as_list(),
                                          stride=2,
                                          bias=use_bias,
                                          name="deconv3")
        debn3 = tf.nn.elu(
            layers.batchnorm(deconv3, is_train, n_reference, name='debn3'))
        deconv2 = layers.new_deconv_layer(debn3, [4, 4, 256, 512],
                                          conv2.get_shape().as_list(),
                                          stride=2,
                                          bias=use_bias,
                                          name="deconv2")
        debn2 = tf.nn.elu(
            layers.batchnorm(deconv2, is_train, n_reference, name='debn2'))
        deconv1 = layers.new_deconv_layer(debn2, [4, 4, 128, 256],
                                          conv1.get_shape().as_list(),
                                          stride=2,
                                          bias=use_bias,
                                          name="deconv1")
        debn1 = tf.nn.elu(
            layers.batchnorm(deconv1, is_train, n_reference, name='debn1'))
        deconv0 = layers.new_deconv_layer(debn1, [4, 4, 64, 128],
                                          conv0.get_shape().as_list(),
                                          stride=1,
                                          bias=use_bias,
                                          name="deconv0")
        debn0 = tf.nn.elu(
            layers.batchnorm(deconv0, is_train, n_reference, name='debn0'))
        proj_ori = layers.new_deconv_layer(debn0, [4, 4, 3, 64],
                                           images.get_shape().as_list(),
                                           stride=1,
                                           bias=use_bias,
                                           name="recon")
        proj = proj_ori

    return proj, latent
    def __init__(self, dim_z, x_train, x_test, diff=None, magic=5000):
        ####################################### SETTINGS ###################################
        self.x_train = x_train
        self.x_test = x_test
        self.diff = diff
        self.batch_size = 100.0
        self.learning_rate = theano.shared(np.float32(0.0008))
        self.momentum = 0.3
        self.performance = {"train": [], "test": []}
        self.inpt = T.ftensor4(name="input")
        self.df = T.fmatrix(name="differential")
        self.dim_z = dim_z
        self.generative_z = theano.shared(np.float32(np.zeros([1, dim_z])))
        self.activation = relu
        self.generative = False
        self.out_distribution = False
        # self.y = T.matrix(name="y")
        self.in_filters = [64, 64, 64]
        self.filter_lengths = [10.0, 10.0, 10.0]
        self.params = []
        # magic = 73888.
        self.magic = magic

        self.dropout_symbolic = T.fscalar()
        self.dropout_prob = theano.shared(np.float32(0.0))
        ####################################### LAYERS ######################################
        # LAYER 1 ##############################
        self.conv1 = one_d_conv_layer(
            self.inpt, self.in_filters[0], 1, self.filter_lengths[0], param_names=["W1", "b1"]
        )
        self.params += self.conv1.params
        self.bn1 = batchnorm(self.conv1.output)
        self.nl1 = self.activation(self.bn1.X)
        self.maxpool1 = pool_2d(self.nl1, [3, 1], stride=[2, 1], mode="average_exc_pad").astype(theano.config.floatX)
        self.layer1_out = dropout(self.maxpool1, self.dropout_symbolic)
        # self.layer1_out = self.maxpool1
        # LAYER2 ################################
        self.flattened = T.flatten(self.layer1_out, outdim=2)
        # Variational Layer #####################
        self.latent_layer = variational_gauss_layer(self.flattened, self.magic, dim_z)
        self.params += self.latent_layer.params
        self.latent_out = self.latent_layer.output
        # Hidden Layer #########################
        self.hidden_layer = hidden_layer(self.latent_out, dim_z, self.magic)
        self.params += self.hidden_layer.params
        self.hid_out = dropout(
            self.activation(self.hidden_layer.output).reshape(
                (self.inpt.shape[0], self.in_filters[-1], int(self.magic / self.in_filters[-1]), 1)
            ),
            self.dropout_symbolic,
        )
        # Devonvolutional 1 ######################
        self.deconv1 = one_d_deconv_layer(
            self.hid_out,
            1,
            self.in_filters[2],
            self.filter_lengths[2],
            pool=2.0,
            param_names=["W3", "b3"],
            distribution=False,
        )
        self.params += self.deconv1.params
        # self.nl_deconv1 = dropout(self.activation(self.deconv1.output),self.dropout_symbolic)
        self.tanh_out = self.deconv1.output
        self.last_layer = self.deconv1

        if self.out_distribution == True:
            self.trunk_sigma = self.last_layer.log_sigma[:, :, : self.inpt.shape[2], :]
        self.trunc_output = self.tanh_out[:, :, : self.inpt.shape[2], :]

        ################################### FUNCTIONS ######################################################
        self.get_latent_states = theano.function(
            [self.inpt], self.latent_out, givens=[[self.dropout_symbolic, self.dropout_prob]]
        )
        # self.prior_debug = theano.function([self.inpt],[self.latent_out,self.latent_layer.mu_encoder,self.latent_layer.log_sigma_encoder,self.latent_layer.prior])
        # self.get_prior = theano.function([self.inpt],self.latent_layer.prior)
        # self.convolve1 = theano.function([self.inpt],self.layer1_out)
        # self.convolve2 = theano.function([self.inpt],self.layer2_out)
        self.output = theano.function(
            [self.inpt], self.trunc_output, givens=[[self.dropout_symbolic, self.dropout_prob]]
        )
        self.get_flattened = theano.function(
            [self.inpt], self.flattened, givens=[[self.dropout_symbolic, self.dropout_prob]]
        )
        # self.deconvolve1 = theano.function([self.inpt],self.deconv1.output)
        # self.deconvolve2 = theano.function([self.inpt],self.deconv2.output)
        # self.sig_out = theano.function([self.inpt],T.flatten(self.trunk_sigma,outdim=2))
        self.output = theano.function(
            [self.inpt], self.trunc_output, givens=[[self.dropout_symbolic, self.dropout_prob]]
        )
        # self.generate_from_z = theano.function([self.inpt],self.trunc_output,givens = [[self.latent_out,self.generative_z]])
        self.generate_from_z = theano.function(
            [self.inpt],
            self.trunc_output,
            givens=[[self.dropout_symbolic, self.dropout_prob], [self.latent_out, self.generative_z]],
        )

        self.cost = self.MSE()
        self.mse = self.MSE()
        # self.likelihood = self.log_px_z()
        # self.get_cost = theano.function([self.inpt],[self.cost,self.mse])

        # self.get_likelihood = theano.function([self.layer1.inpt],[self.likelihood])
        self.derivatives = T.grad(self.cost, self.params)
        # self.get_gradients = theano.function([self.inpt],self.derivatives)
        self.updates = adam(self.params, self.derivatives, self.learning_rate)
        # self.updates =momentum_update(self.params,self.derivatives,self.learning_rate,self.momentum)
        self.train_model = theano.function(
            inputs=[self.inpt, self.df],
            outputs=self.cost,
            updates=self.updates,
            givens=[[self.dropout_symbolic, self.dropout_prob]],
        )
Ejemplo n.º 15
0
def inference(images):
    """Definition of model inference.
    Args:
      images: A batch of images to process. Shape [batch_size,32,32,3]
    """
    is_train = tf.get_collection('is_train')[0]

    def shortcut(l, in_channel, out_channel):
        """Shortcut for residual function.
        Args:
          l: Output of previous layer.
          in_channel: # of channels of l.
          out_channel: # of channels of each output feature.
        """
        shortcut = tf.nn.avg_pool(l, [1, 2, 2, 1], [1, 2, 2, 1], 'VALID')
        pad = (out_channel - in_channel) // 2
        return tf.pad(shortcut, [[0, 0], [0, 0], [0, 0], [pad, pad]])

    def residual(name, l, in_channel, out_channel, stride):
        """Residual function.
        Args:
          name: Scope name of this function.
          l: Output of previous layer.
          in_channel: # of channels of l.
          out_channel: # of channels of each output feature.
          stride: Stride of the first convolution in residual function.
        """
        with tf.variable_scope(name):
            sc = l if stride == 1 else shortcut(l, in_channel, out_channel)
            l = layers.conv('conv_0', l, out_channel, stride=stride)
            l = layers.batchnorm('bn_0', l, is_train)
            l = tf.nn.relu(l)
            l = layers.conv('conv_1', l, out_channel, stride=1)
            l = layers.batchnorm('bn_1', l, is_train)
            l = tf.nn.relu(l + sc)
            return l

    # ResNet-20 inference
    with tf.variable_scope('inference'):
        features = []
        for m in range(FLAGS.num_model):
            l = images
            with tf.variable_scope('model_%d' % m):
                l = layers.conv('conv_init', l, 16, stride=1)
                l = residual('res_1_1', l, 16, 16, 1)
                l = residual('res_1_2', l, 16, 16, 1)
                l = residual('res_1_3', l, 16, 16, 1)
                features.append(l)

        # stochastically share hidden features right before the first pooling
        if FLAGS.feature_sharing:
            features = feature_sharing(features)

        for m in range(FLAGS.num_model):
            l = features[m]
            with tf.variable_scope('model_%d' % m):
                l = residual('res_2_1', l, 16, 32, 2)
                l = residual('res_2_2', l, 32, 32, 1)
                l = residual('res_2_3', l, 32, 32, 1)

                l = residual('res_3_1', l, 32, 64, 2)
                l = residual('res_3_2', l, 64, 64, 1)
                l = residual('res_3_3', l, 64, 64, 1)

                l = layers.batchnorm('bn_0', l, is_train)
                l = tf.nn.relu(l)
                # global average pooling
                l = tf.reduce_mean(l, [1, 2])
                l = layers.fully_connected('fc_0', l, 10)
            features[m] = l
        return features
Ejemplo n.º 16
0
def build_classifier_model_imagespace(image,
                                      is_train,
                                      n_reference,
                                      reuse=None):
    """
    Build the graph for the classifier in the image space
    """

    channel_compress_ratio = 4

    with tf.variable_scope('DIS', reuse=reuse):

        with tf.variable_scope('IMG'):
            ## image space D
            # 1
            conv1 = layers.new_conv_layer(image, [4, 4, 3, 64],
                                          stride=1,
                                          name="conv1")  #64

            # 2
            nBlocks = 3
            module2 = layers.add_bottleneck_module(
                conv1,
                is_train,
                nBlocks,
                n_reference,
                channel_compress_ratio=channel_compress_ratio,
                name='module2')  # 32

            # 3
            nBlocks = 4
            module3 = layers.add_bottleneck_module(
                module2,
                is_train,
                nBlocks,
                n_reference,
                channel_compress_ratio=channel_compress_ratio,
                name='module3')  # 16

            # 4
            nBlocks = 6
            module4 = layers.add_bottleneck_module(
                module3,
                is_train,
                nBlocks,
                n_reference,
                channel_compress_ratio=channel_compress_ratio,
                name='module4')  # 8

            # 5
            nBlocks = 3
            module5 = layers.add_bottleneck_module(
                module4,
                is_train,
                nBlocks,
                n_reference,
                channel_compress_ratio=channel_compress_ratio,
                name='module5')  # 4
            bn_module5 = tf.nn.elu(
                layers.batchnorm(module5,
                                 is_train,
                                 n_reference,
                                 name='bn_module5'))

            (dis, last_w) = layers.new_fc_layer(bn_module5,
                                                output_size=1,
                                                name='dis')

    return dis[:, 0], last_w
Ejemplo n.º 17
0
def inference(images):
    """Definition of model inference.
    Args:
      images: A batch of images to process. Shape [batch_size,32,32,3]
    """
    is_train = tf.get_collection('is_train')[0]

    def inception(name, l, wf):
        """Inception module.
        Args:
          name: Scope name of this function.
          l: Output of previous layer.
          wf: Channel width factor of this module.
        """
        with tf.variable_scope(name):
            branchpool = tf.nn.max_pool(l, [1, 2, 2, 1], [1, 1, 1, 1], 'SAME')
            branchpool = layers.conv('conv_pool',
                                     branchpool,
                                     32 * wf,
                                     kernel_size=1)
            branch5x5 = layers.conv('conv_5x5_0', l, 16 * wf, kernel_size=1)
            branch5x5 = tf.nn.relu(branch5x5)
            branch5x5 = layers.conv('conv_5x5_1',
                                    branch5x5,
                                    32 * wf,
                                    kernel_size=5)
            branch3x3 = layers.conv('conv_3x3_0', l, 32 * wf, kernel_size=1)
            branch3x3 = tf.nn.relu(branch3x3)
            branch3x3 = layers.conv('conv_3x3_1',
                                    branch3x3,
                                    64 * wf,
                                    kernel_size=3)
            branch1x1 = layers.conv('conv_1x1_0', l, 64 * wf, kernel_size=1)
            branch1x1 = tf.nn.relu(branch1x1)
            cc = tf.concat([branch1x1, branch3x3, branch5x5, branchpool], 3)
            cc = layers.batchnorm('bn_0', cc, is_train)
            return tf.nn.relu(cc)

    # GoogLeNet-18 inference
    with tf.variable_scope('inference'):
        features = []
        for m in range(FLAGS.num_model):
            l = images
            with tf.variable_scope('model_%d' % m):
                l = layers.conv('conv_init', l, 32, kernel_size=3)
                l = layers.batchnorm('bn_init', l, is_train)
                l = tf.nn.relu(l)
                features.append(l)

        # stochastically share hidden features right before the first pooling
        if FLAGS.feature_sharing:
            features = feature_sharing(features)

        for m in range(FLAGS.num_model):
            l = features[m]
            with tf.variable_scope('model_%d' % m):
                l = tf.nn.max_pool(l, [1, 2, 2, 1], [1, 2, 2, 1], 'VALID')
                l = inception('inception_1a', l, 1)
                l = inception('inception_1b', l, 2)

                l = tf.nn.max_pool(l, [1, 2, 2, 1], [1, 2, 2, 1], 'VALID')
                l = inception('inception_2a', l, 2)
                l = inception('inception_2b', l, 2)
                l = inception('inception_2c', l, 2)
                l = inception('inception_2d', l, 4)

                l = tf.nn.max_pool(l, [1, 2, 2, 1], [1, 2, 2, 1], 'VALID')
                l = inception('inception_3a', l, 4)
                l = inception('inception_3b', l, 4)

                # global average pooling
                l = tf.reduce_mean(l, [1, 2])
                l = layers.fully_connected('fc_0', l, 10)
            features[m] = l
        return features
Ejemplo n.º 18
0
	def __init__(self,x_train,dim_z=10,batch_size = 10,filter_no = [5.,5.,5.],filter_l = [10.,10.,10.],
		pooling_d=3,pooling_s=2,learning_rate = 0.0008,dim_y=None,y_train=None,diff=None,magic=5000):
		####################################### SETTINGS ###################################
		self.x_train = x_train
		self.y_train = y_train
		if y_train !=None:
			self.dim_y = dim_y
		self.diff=diff
		self.batch_size = batch_size
		self.learning_rate = theano.shared(np.float32(learning_rate))
		self.performance = {"train":[]}
		self.inpt = T.ftensor4(name='input')
		self.Y = T.fcol(name= 'label')
		self.df = T.fmatrix(name='differential')
		self.dim_z = dim_z
		self.magic =magic
		self.pooling_d = pooling_d
		self.pooling_s = pooling_s
		self.generative_z = theano.shared(np.float32(np.zeros([1,dim_z])))
		self.generative_hid = theano.shared(np.float32(np.zeros([1,magic])))
		self.activation =relu
		self.out_distribution=False
		self.in_filters = filter_l
		self.filter_lengths = filter_no
		self.params = []


		self.d_o_prob = theano.shared(np.float32(0.0))
		####################################### LAYERS ######################################
		# LAYER 1 ##############################
		self.conv1 = one_d_conv_layer(self.inpt,self.in_filters[0],1,self.filter_lengths[0],param_names = ["W1",'b1']) 
		self.params+=self.conv1.params
		self.bn1 = batchnorm(self.conv1.output)
		self.nl1 = self.activation(self.bn1.X)
		self.maxpool1 = ds.max_pool_2d(self.nl1,[self.pooling_d,1],st=[self.pooling_s,1],ignore_border = False).astype(theano.config.floatX)
		self.layer1_out = dropout(self.maxpool1,self.d_o_prob)
		self.flattened = T.flatten(self.layer1_out,outdim = 2)
		# Conditional +variational layer layer #####################
		if y_train != None:
			self.c_enc =hidden_layer(self.Y,1,self.dim_y)
			self.c_dec = hidden_layer(self.Y,1,self.dim_y,param_names = ["W10",'b10'])
			self.params+=self.c_enc.params
			self.params+=self.c_dec.params
			self.c_nl = self.activation(self.c_enc.output)
			self.c_nl_dec = self.activation(self.c_dec.output)
			self.concatenated = T.concatenate((self.flattened,self.c_nl),axis = 1)
			self.latent_layer = variational_gauss_layer(self.concatenated,self.magic+self.dim_y,dim_z)
		else:
			self.latent_layer = variational_gauss_layer(self.flattened,self.magic,dim_z)
		self.params+=self.latent_layer.params
		self.latent_out = self.latent_layer.output
		# Hidden Layer #########################
		if y_train!= None:
			self.dec_concat = T.concatenate((self.latent_out,self.c_nl_dec),axis = 1)
			self.hidden_layer = hidden_layer(self.dec_concat,self.dim_z+self.dim_y,self.magic)
		else:
			self.hidden_layer = hidden_layer(self.latent_out,dim_z,self.magic)
		self.params+=self.hidden_layer.params
		self.hid_out = dropout(self.activation(self.hidden_layer.output).reshape((self.inpt.shape[0],self.in_filters[-1],int(self.magic/self.in_filters[-1]),1)),self.d_o_prob)
		# Devonvolutional 1 ######################
		self.deconv1 = one_d_deconv_layer(self.hid_out,1,self.in_filters[2],self.filter_lengths[2],pool=self.pooling_d,param_names = ["W3",'b3'],distribution=False)
		self.params+=self.deconv1.params
		#self.nl_deconv1 = dropout(self.activation(self.deconv1.output),self.dropout_symbolic)
		self.tanh_out = self.deconv1.output
		self.last_layer = self.deconv1

		if self.out_distribution==True:
			self.trunk_sigma =  self.last_layer.log_sigma[:,:,:self.inpt.shape[2],:]
		self.trunc_output = self.tanh_out[:,:,:self.inpt.shape[2],:]
		self.cost = self.MSE()
		self.mse = self.MSE()
		#self.likelihood = self.log_px_z()
		#self.get_cost = theano.function([self.inpt],[self.cost,self.mse])

		#self.get_likelihood = theano.function([self.layer1.inpt],[self.likelihood])
		self.derivatives = T.grad(self.cost,self.params)
		#self.get_gradients = theano.function([self.inpt],self.derivatives)
		self.updates =adam(self.params,self.derivatives,self.learning_rate)
		
		################################### FUNCTIONS ######################################################
		#self.prior_debug = theano.function([self.inpt],[self.latent_out,self.latent_layer.mu_encoder,self.latent_layer.log_sigma_encoder,self.latent_layer.prior])
		#self.get_prior = theano.function([self.inpt],self.latent_layer.prior)
		#self.convolve1 = theano.function([self.inpt],self.layer1_out)
		#self.convolve2 = theano.function([self.inpt],self.layer2_out)
		#self.deconvolve1 = theano.function([self.inpt],self.deconv1.output)
		#self.deconvolve2 = theano.function([self.inpt],self.deconv2.output)
		#self.sig_out = theano.function([self.inpt],T.flatten(self.trunk_sigma,outdim=2))
		#self.output = theano.function([self.inpt],self.trunc_output,givens=[[self.dropout_symbolic,self.dropout_prob]])
		#self.generate_from_z = theano.function([self.inpt],self.trunc_output,givens = [[self.latent_out,self.generative_z]])
		#self.get_cost = theano.function([self.inpt],[self.cost,self.mse])
		#self.get_likelihood = theano.function([self.layer1.inpt],[self.likelihood])
		#self.get_gradients = theano.function([self.inpt],self.derivatives)

		self.generate_from_hid = theano.function([self.inpt],self.trunc_output,givens = [[self.hidden_layer.output,self.generative_hid]])
		self.get_flattened = theano.function([self.inpt],self.flattened)
		if self.y_train!=None:
			self.generate_from_z = theano.function([self.inpt,self.Y],self.trunc_output,givens = [[self.latent_out,self.generative_z]])
			self.train_model = theano.function(inputs = [self.inpt,self.df,self.Y],outputs = self.cost,updates = self.updates)
			self.get_latent_states = theano.function([self.inpt,self.Y],self.latent_out)
			self.get_c_enc = theano.function([self.Y],self.c_enc.output)
			self.output = theano.function([self.inpt,self.Y],self.trunc_output)
			self.get_concat = theano.function([self.inpt,self.Y],self.concatenated)
		else:
			self.generate_from_z = theano.function([self.inpt],self.trunc_output,givens = [[self.latent_out,self.generative_z]])
			self.train_model = theano.function(inputs = [self.inpt,self.df],outputs = self.cost,updates = self.updates)
			self.output = theano.function([self.inpt],self.trunc_output)
			self.get_latent_states = theano.function([self.inpt],self.latent_out)
Ejemplo n.º 19
0
def build_classifier_model_latentspace(latent,
                                       is_train,
                                       n_reference,
                                       reuse=None):
    """
    Build the graph for the classifier in the latent space
    """

    channel_compress_ratio = 4

    with tf.variable_scope('DIS', reuse=reuse):

        with tf.variable_scope('LATENT'):

            out = layers.bottleneck(
                latent,
                is_train,
                n_reference,
                channel_compress_ratio=channel_compress_ratio,
                stride=1,
                name='block0')  # 8*8*4096
            out = layers.bottleneck(
                out,
                is_train,
                n_reference,
                channel_compress_ratio=channel_compress_ratio,
                stride=1,
                name='block1')  # 8*8*4096
            out = layers.bottleneck(
                out,
                is_train,
                n_reference,
                channel_compress_ratio=channel_compress_ratio,
                stride=1,
                name='block2')  # 8*8*4096

            output_channel = out.get_shape().as_list()[-1]
            out = layers.bottleneck_flexible(out,
                                             is_train,
                                             output_channel,
                                             n_reference,
                                             channel_compress_ratio=4,
                                             stride=2,
                                             name='block3')  # 4*4*4096
            out = layers.bottleneck(out,
                                    is_train,
                                    n_reference,
                                    channel_compress_ratio=4,
                                    stride=1,
                                    name='block4')  # 4*4*4096
            out = layers.bottleneck(out,
                                    is_train,
                                    n_reference,
                                    channel_compress_ratio=4,
                                    stride=1,
                                    name='block5')  # 4*4*4096

            bn1 = tf.nn.elu(
                layers.batchnorm(out, is_train, n_reference, name='bn1'))
            (dis, last_w) = layers.new_fc_layer(bn1, output_size=1, name='dis')

    return dis[:, 0], last_w
Ejemplo n.º 20
0
    def __init__(self, dim_z, x_train, x_test, diff=None, magic=5000):
        ####################################### SETTINGS ###################################
        self.x_train = x_train
        self.x_test = x_test
        self.diff = diff
        self.batch_size = 100.
        self.learning_rate = theano.shared(np.float32(0.0008))
        self.momentum = 0.3
        self.performance = {"train": [], "test": []}
        self.inpt = T.ftensor4(name='input')
        self.df = T.fmatrix(name='differential')
        self.dim_z = dim_z
        self.generative_z = theano.shared(np.float32(np.zeros([1, dim_z])))
        self.activation = relu
        self.generative = False
        self.out_distribution = False
        #self.y = T.matrix(name="y")
        self.in_filters = [5, 5, 5]
        self.filter_lengths = [10., 10., 10.]
        self.params = []
        #magic = 73888.
        self.magic = magic

        self.dropout_symbolic = T.fscalar()
        self.dropout_prob = theano.shared(np.float32(0.0))
        ####################################### LAYERS ######################################
        # LAYER 1 ##############################
        self.conv1 = one_d_conv_layer(self.inpt,
                                      self.in_filters[0],
                                      1,
                                      self.filter_lengths[0],
                                      param_names=["W1", 'b1'])
        self.params += self.conv1.params
        self.bn1 = batchnorm(self.conv1.output)
        self.nl1 = self.activation(self.bn1.X)
        self.maxpool1 = ds.max_pool_2d(self.nl1, [3, 1],
                                       st=[2, 1],
                                       ignore_border=False).astype(
                                           theano.config.floatX)
        self.layer1_out = dropout(self.maxpool1, self.dropout_symbolic)
        #self.layer1_out = self.maxpool1
        # LAYER2 ################################
        self.flattened = T.flatten(self.layer1_out, outdim=2)
        # Variational Layer #####################
        self.latent_layer = variational_gauss_layer(self.flattened, self.magic,
                                                    dim_z)
        self.params += self.latent_layer.params
        self.latent_out = self.latent_layer.output
        # Hidden Layer #########################
        self.hidden_layer = hidden_layer(self.latent_out, dim_z, self.magic)
        self.params += self.hidden_layer.params
        self.hid_out = dropout(
            self.activation(self.hidden_layer.output).reshape(
                (self.inpt.shape[0], self.in_filters[-1],
                 int(self.magic / self.in_filters[-1]), 1)),
            self.dropout_symbolic)
        # Devonvolutional 1 ######################
        self.deconv1 = one_d_deconv_layer(self.hid_out,
                                          1,
                                          self.in_filters[2],
                                          self.filter_lengths[2],
                                          pool=2.,
                                          param_names=["W3", 'b3'],
                                          distribution=False)
        self.params += self.deconv1.params
        #self.nl_deconv1 = dropout(self.activation(self.deconv1.output),self.dropout_symbolic)
        self.tanh_out = self.deconv1.output
        self.last_layer = self.deconv1

        if self.out_distribution == True:
            self.trunk_sigma = self.last_layer.log_sigma[:, :, :self.inpt.
                                                         shape[2], :]
        self.trunc_output = self.tanh_out[:, :, :self.inpt.shape[2], :]

        ################################### FUNCTIONS ######################################################
        self.get_latent_states = theano.function(
            [self.inpt],
            self.latent_out,
            givens=[[self.dropout_symbolic, self.dropout_prob]])
        #self.prior_debug = theano.function([self.inpt],[self.latent_out,self.latent_layer.mu_encoder,self.latent_layer.log_sigma_encoder,self.latent_layer.prior])
        #self.get_prior = theano.function([self.inpt],self.latent_layer.prior)
        #self.convolve1 = theano.function([self.inpt],self.layer1_out)
        #self.convolve2 = theano.function([self.inpt],self.layer2_out)
        self.output = theano.function(
            [self.inpt],
            self.trunc_output,
            givens=[[self.dropout_symbolic, self.dropout_prob]])
        self.get_flattened = theano.function(
            [self.inpt],
            self.flattened,
            givens=[[self.dropout_symbolic, self.dropout_prob]])
        #self.deconvolve1 = theano.function([self.inpt],self.deconv1.output)
        #self.deconvolve2 = theano.function([self.inpt],self.deconv2.output)
        #self.sig_out = theano.function([self.inpt],T.flatten(self.trunk_sigma,outdim=2))
        self.output = theano.function(
            [self.inpt],
            self.trunc_output,
            givens=[[self.dropout_symbolic, self.dropout_prob]])
        #self.generate_from_z = theano.function([self.inpt],self.trunc_output,givens = [[self.latent_out,self.generative_z]])
        self.generate_from_z = theano.function(
            [self.inpt],
            self.trunc_output,
            givens=[[self.dropout_symbolic, self.dropout_prob],
                    [self.latent_out, self.generative_z]])

        self.cost = self.MSE()
        self.mse = self.MSE()
        #self.likelihood = self.log_px_z()
        #self.get_cost = theano.function([self.inpt],[self.cost,self.mse])

        #self.get_likelihood = theano.function([self.layer1.inpt],[self.likelihood])
        self.derivatives = T.grad(self.cost, self.params)
        #self.get_gradients = theano.function([self.inpt],self.derivatives)
        self.updates = adam(self.params, self.derivatives, self.learning_rate)
        #self.updates =momentum_update(self.params,self.derivatives,self.learning_rate,self.momentum)
        self.train_model = theano.function(
            inputs=[self.inpt, self.df],
            outputs=self.cost,
            updates=self.updates,
            givens=[[self.dropout_symbolic, self.dropout_prob]])