def gan(self, x_image, reuse=False, features=False):

        with tf.variable_scope('gan') as scope:
            if reuse:
                scope.reuse_variables()
            c_i = x_image
            # Now c_i has shape batch_size x 32 x 32 x self.channels

            c_i = conv(c_i, 4, 2, 32 * self.k, name="gan_conv_1")
            # Now c_i has shape batch_size x 16 x 16 x 128*k

            c_i = lrelu_conv(c_i, 4, 2, 64 * self.k, name="gan_conv_2")
            # c_i shape is batch_size x 8 x 8 x 256*k

            c_i = bn_lrelu_conv(c_i, 4, 2, 128 * self.k, self.bn_settings, name="gan_conv_3")
            # c_i shape is batch_size x 4 x 4 x 512*k
            if features:
                return c_i

            c_i = bn_lrelu_conv(c_i, 4, 4, 1, self.bn_settings, name="gan_conv_fin")
            # c_i shape is batch_size x 1 x 1 x 1

            y = tf.reshape(c_i, shape=[self.batch_size, 1])

            return y
Exemple #2
0
    def gan(self, x_image, reuse=False, features=False):
        f = []
        with tf.variable_scope('gan') as scope:
            if reuse:
                scope.reuse_variables()

            c_i = x_image
            c_i = tf.reshape(c_i, shape=[self.batch_size, 28, 28, 1])
            # Now c_i has shape batch_size x 28 x 28 x 1

            c_i = conv(c_i, 3, 1, 16 * self.k, name="enc_conv_1")
            # Now c_i has shape batch_size x 28 x 28 x 16*k

            c_i = bn_lrelu_conv(c_i, 4, 2, 32 * self.k, self.bn_settings, name="gan_conv_1")
            # Now c_i has shape batch_size x 14 x 14 x 32*k
            f.append(c_i)
            if features:
                return f
            c_i = bn_lrelu_conv(c_i, 4, 2, 64 * self.k, self.bn_settings, name="gan_conv_2")
            # Now c_i has shape batch_size x 7 x 7 x 64*k

            c_i = bn_lrelu_conv(c_i, 7, 7, 1, self.bn_settings, name="gan_conv_fin")
            # Now c_i has shape batch_size x 1 x 1 x z_dim

            y = tf.reshape(c_i, shape=[self.batch_size, 1])
            return y
    def decoder(self, z, reuse=False):
        with tf.variable_scope('decoder') as scope:
            if reuse:
                scope.reuse_variables()
            z_dim = self.z_dim
            if self.y_dim:
                z = tf.concat(1, [z, self.y_labels])
                z_dim = self.z_dim + self.y_dim

            c_i = tf.reshape(z, shape=[self.batch_size, 1, 1, z_dim])
            # Now c_i has shape batch_size x 1 x 1 x z_dim(+y_dim)

            c_i = conv(c_i,
                       filter_size=1,
                       stride=1,
                       out_channels=2048 * self.k,
                       name="dec_conv_1")
            c_i = tf.reshape(c_i, shape=[self.batch_size, 4, 4, 128 * self.k])
            # Now c_i has shape batch_size x 4 x 4 x 128*k

            c_i = bn_lrelu_conv(c_i,
                                3,
                                1,
                                256 * self.k,
                                self.bn_settings,
                                name="dec_conv_2")
            c_i = PS(c_i, 2, 64 * self.k)
            # Now c_i has shape batch_size x 8 x 8 x 64*k

            c_i = bn_lrelu_conv(c_i,
                                3,
                                1,
                                128 * self.k,
                                self.bn_settings,
                                name="dec_conv_3")
            c_i = PS(c_i, 2, 32 * self.k)
            # Now c_i has shape batch_size x 16 x 16 x 32*k

            c_i = bn_lrelu_conv(c_i,
                                3,
                                1,
                                64,
                                self.bn_settings,
                                name="dec_conv_4")
            c_i = PS(c_i, 2, 16)
            # Now c_i has shape batch_size x 32 x 32 x 16*k

            c_i = bn_lrelu_conv(c_i,
                                3,
                                1,
                                self.channels,
                                self.bn_settings,
                                name="dec_conv_fin")

            y_image = tf.nn.tanh(c_i)
            return y_image
Exemple #4
0
    def encoder(self):
        c_i = self.x_image
        # c_i shape is batch_size x 128 x 128 x self.channels

        c_i = conv(c_i, 4, 2, 32 * self.k, name="enc_conv_1")
        # c_i shape is batch_size x 64 x 64 x 32*k

        c_i = bn_lrelu_conv(c_i,
                            4,
                            2,
                            64 * self.k,
                            self.bn_settings,
                            name="enc_conv_2")
        # c_i shape is batch_size x 32 x 32 x 64*k

        c_i = bn_lrelu_conv(c_i,
                            4,
                            2,
                            128 * self.k,
                            self.bn_settings,
                            name="enc_conv_3")
        # c_i shape is batch_size x 16 x 16 x 128*k

        c_i = bn_lrelu_conv(c_i,
                            4,
                            2,
                            256 * self.k,
                            self.bn_settings,
                            name="enc_conv_4")
        # c_i shape is batch_size x 8 x 8 x 256*k

        c_i = bn_lrelu_conv(c_i,
                            4,
                            2,
                            512 * self.k,
                            self.bn_settings,
                            name="enc_conv_5")
        # c_i shape is batch_size x 4 x 4 x 512*k

        c_i = bn_lrelu_conv(c_i,
                            4,
                            4,
                            self.z_dim,
                            self.bn_settings,
                            name="enc_conv_6")
        # c_i shape is batch_size x 1 x 1 x z_dim

        z = tf.reshape(c_i, shape=[self.batch_size, self.z_dim])
        return z
Exemple #5
0
    def encoder(self):
        c_i = self.x_image
        c_i = tf.reshape(c_i, shape=[self.batch_size, 64, 64, 1])
        # Now c_i has shape batch_size x 64 x 64 x self.channels

        c_i = conv(c_i,
                   filter_size=3,
                   stride=1,
                   out_channels=16 * self.k,
                   name="enc_conv_1")
        # Now c_i has shape batch_size x 64 x 64 x 16*k

        c_i = lrelu_conv(c_i, 4, 2, 32 * self.k, name="enc_conv_2")
        # Now c_i has shape batch_size x 32 x 32 x 32*k

        c_i = bn_lrelu_conv(c_i,
                            4,
                            2,
                            64 * self.k,
                            self.bn_settings,
                            name="enc_conv_3")
        # Now c_i has shape batch_size x 16 x 16 x 64*k

        c_i = bn_lrelu_conv(c_i,
                            4,
                            2,
                            128 * self.k,
                            self.bn_settings,
                            name="enc_conv_4")
        # Now c_i has shape batch_size x 8 x 8 x 128*k

        c_i = bn_lrelu_conv(c_i,
                            4,
                            2,
                            256 * self.k,
                            self.bn_settings,
                            name="enc_conv_5")
        # Now c_i has shape batch_size x 4 x 4 x 256*k

        c_i = bn_lrelu_conv(c_i,
                            4,
                            4,
                            self.z_dim,
                            self.bn_settings,
                            name="enc_conv_fin")
        # Now c_i has shape batch_size x 1 x 1 x z_dim

        z = tf.reshape(c_i, shape=[self.batch_size, self.z_dim])
        return z
Exemple #6
0
    def decoder(self, z, reuse=False):
        with tf.variable_scope('decoder') as scope:
            if reuse:
                scope.reuse_variables()
            z_dim = self.z_dim
            if self.y_dim:
                z = tf.concat(1, [z, self.y_labels])
                z_dim = self.z_dim + self.y_dim

            c_i = tf.reshape(z, shape=[self.batch_size, 1, 1, z_dim])
            # Now c_i has shape batch_size x 1 x 1 x z_dim(+y_dim)

            c_i = conv(c_i, 1, 1, 7*7*64*self.k, name="dec_conv_1")
            c_i = tf.reshape(c_i, shape=[self.batch_size, 7, 7, 64*self.k])
            # Now c_i has shape batch_size x 7 x 7 x 64*k

            c_i = bn_lrelu_tconv(c_i, 4, 2, self.batch_size, 32 * self.k,
                                 self.bn_settings, name="dec_tconv_2")
            # Now c_i has shape batch_size x 14 x 14 x 32*k

            c_i = bn_lrelu_tconv(c_i, 4, 2, self.batch_size, 16*self.k,
                                 self.bn_settings, name="dec_tconv_3")
            # Now c_i has shape batch_size x 28 x 28 x 16*k
            c_i = bn_lrelu_conv(c_i, 3, 1, 1,
                                self.bn_settings, name="dec_conv_fin")
            c_i = tf.reshape(c_i, shape=[self.batch_size, 784])
            y_image = tf.nn.sigmoid(c_i)
            return y_image
    def encoder(self):
        c_i = self.x_image
        # Now c_i has shape batch_size x 32 x 32 x self.channels

        c_i = conv(c_i, 4, 2, 32*self.k, name="enc_conv_1")
        # Now c_i has shape batch_size x 16 x 16 x 32*k

        c_i = lrelu_conv(c_i, 4, 2, 64*self.k, name="enc_conv_2")
        # Now c_i has shape batch_size x 8 x 8 x 64*k

        c_i = bn_lrelu_conv(c_i, 4, 2, 128*self.k, self.bn_settings, name="enc_conv_3")
        # Now c_i has shape batch_size x 4 x 4 x 128*k

        c_i = bn_lrelu_conv(c_i, 4, 4, self.z_dim, self.bn_settings, name="enc_conv_fin")
        # Now c_i has shape batch_size x 1 x 1 x z_dim

        z = tf.reshape(c_i, shape=[self.batch_size, self.z_dim])
        return z
Exemple #8
0
    def encoder(self):
        c_i = self.x_image
        c_i = tf.reshape(c_i, shape=[self.batch_size, 28, 28, 1])
        # Now c_i has shape batch_size x 28 x 28 x 1

        c_i = conv(c_i, 3, 1, 16*self.k, name="enc_conv_1")
        # Now c_i has shape batch_size x 28 x 28 x 16*k

        c_i = bn_lrelu_conv(c_i, 4, 2, 32*self.k, self.bn_settings, name="enc_conv_2")
        # Now c_i has shape batch_size x 14 x 14 x 32*k

        c_i = bn_lrelu_conv(c_i, 4, 2, 64*self.k, self.bn_settings, name="enc_conv_3")
        # Now c_i has shape batch_size x 7 x 7 x 64*k

        c_i = bn_lrelu_conv(c_i, 7, 7, self.z_dim, self.bn_settings, name="enc_conv_fin")
        # Now c_i has shape batch_size x 1 x 1 x z_dim

        z = tf.reshape(c_i, shape=[self.batch_size, self.z_dim])
        return z