Example #1
0
    def encoder(self, x, is_training=True, reuse=False):
        with tf.variable_scope("encoder", reuse=reuse):
            if self.verbose: print(x.shape)

            # Layer 1
            net = layers.conv2d(x, self.conv_dim >> 1, name='en_conv1')
            net = tf.nn.leaky_relu(net)
            if self.verbose: print(net.shape)

            # Layer 2
            net = layers.conv2d(net, self.conv_dim, name='en_conv2')
            net = tf.nn.leaky_relu(net)
            if self.verbose: print(net.shape)

            # Layer 3
            net = layers.flatten(net)
            if self.verbose: print(net.shape)

            # Layer 4
            net = layers.linear(net, self.linear_dim, scope='en_fc1')
            net = tf.nn.leaky_relu(net)
            if self.verbose: print(net.shape)

            # Layer 5
            net = layers.linear(net, self.latent_dim, scope='en_fc2')
            net = tf.identity(net)
            if self.verbose: print(net.shape)

            return net
Example #2
0
    def encoder(self, x, is_training=True, reuse=False):
        with tf.variable_scope("encoder", reuse=reuse):
            if self.verbose: print(x.shape)

            # Layer 0: Flattening
            net = layers.flatten(x)
            if self.verbose: print(net.shape, ": Flatten")

            # Layer 1
            net = layers.linear(net,
                                self.linear_dim,
                                scope='en_fc1',
                                w_initializer=self.w_initializer,
                                b_initializer=self.b_initializer)
            net = tf.nn.tanh(net)
            if self.verbose: print(net.shape)

            # Layer 2
            net = layers.linear(net,
                                self.linear_dim >> 1,
                                scope='en_fc2',
                                w_initializer=self.w_initializer,
                                b_initializer=self.b_initializer)
            net = tf.identity(net, name="main_out")
            if self.verbose: print(net.shape)
            return net
Example #3
0
    def encoder_v2(self, x, is_training=True, reuse=False):
        with tf.variable_scope("encoder_v2", reuse=reuse):
            if self.verbose: print(x.shape)

            # Layer 1
            net = layers.conv2d(x, self.conv_dim >> 1, name='en_conv1')
            net = tf.nn.leaky_relu(net)
            if self.verbose: print(net.shape)

            # Layer 2
            net = layers.conv2d(net, self.conv_dim, name='en_conv2')
            net = layers.batch_norm(net,
                                    is_training=is_training,
                                    scope='en_bn2')
            net = tf.nn.leaky_relu(net)
            if self.verbose: print(net.shape)

            # Layer 3
            net = layers.flatten(net)
            if self.verbose: print(net.shape)

            # Layer 4
            net = layers.linear(net, self.linear_dim, scope='en_fc3')
            net = layers.batch_norm(net,
                                    is_training=is_training,
                                    scope='en_bn3')
            net = tf.nn.leaky_relu(net)
            if self.verbose: print(net.shape)

            # Layer 5
            out_logit = layers.linear(net, self.latent_dim, scope='en_fc4')
            out = tf.nn.sigmoid(out_logit, name="main_out")
            if self.verbose: print(out.shape)

            return out
    def encoder(self,x, is_training=True, reuse=False):
        with tf.variable_scope("encoder", reuse=reuse):
            if self.verbose: print(x.shape)

            # Encoder Space
            net = layers.flatten(x)
            if self.verbose: print(net.shape,": Flatten")

            # Latent Space
            net = layers.linear(net, self.latent_dim, scope = 'en_fc1',
                                w_initializer = self.w_initializer,
                                b_initializer = self.b_initializer)
            net = tf.nn.relu(net, name = "main_out")
            if self.verbose: print(net.shape)

            return net
Example #5
0
    def encoder(self, x, is_training=True, reuse=False):
        with tf.variable_scope("encoder", reuse=reuse):
            if self.verbose: print(x.shape)

            net = tf.keras.layers.Conv2D(
                filters=32,
                kernel_size=[5, 5],
                strides=2,
                padding='same',
                kernel_initializer='glorot_uniform')(x)
            if self.verbose: print(net.shape)

            # net = tf.keras.layers.MaxPooling2D((2, 2), padding='same')(net)
            # if self.verbose: print(net.shape)

            net = tf.keras.layers.Conv2D(
                filters=16,
                kernel_size=[5, 5],
                strides=2,
                padding='same',
                kernel_initializer='glorot_uniform')(net)
            if self.verbose: print(net.shape)

            # net = tf.keras.layers.MaxPooling2D((2, 2), padding='same')(net)
            # if self.verbose: print(net.shape)

            # net = tf.keras.layers.Conv2D(filters    = 8,
            #                             kernel_size = [5,5],
            #                             strides     = 4,
            #                             padding     = 'same',
            #                             kernel_initializer='glorot_uniform')(net)
            # if self.verbose: print(net.shape)
            self.main_shape = [
                -1,
                int(net.shape[1]),
                int(net.shape[2]),
                int(net.shape[3])
            ]  # will be used by the decoder

            # net = tf.keras.layers.MaxPooling2D((2, 2), padding='same')(net)
            # if self.verbose: print(net.shape)

            net = layers.flatten(net)
            if self.verbose: print(net.shape)

            net = layers.linear(net,
                                self.latent_dim << 2,
                                scope='en_fc1',
                                w_initializer=self.w_initializer,
                                b_initializer=self.b_initializer)
            net = tf.nn.relu(net)
            if self.verbose: print(net.shape)

            net = layers.linear(net,
                                self.latent_dim << 1,
                                scope='en_fc2',
                                w_initializer=self.w_initializer,
                                b_initializer=self.b_initializer)
            net = tf.nn.relu(net)
            if self.verbose: print(net.shape)

            net = layers.linear(net,
                                self.latent_dim,
                                scope='en_fc3',
                                w_initializer=self.w_initializer,
                                b_initializer=self.b_initializer)
            net = tf.nn.relu(net, name='main_out')
            if self.verbose: print(net.shape)

            return net