Exemplo n.º 1
0
    def __init__(self,
                 sess=tf.Session(),
                 image_size=(32, 32),
                 z_size=201,
                 n_iterations=50,
                 dataset="None",
                 batch_size=64,
                 lrate=0.002,
                 d_size=64):

        self.image_size = image_size
        self.n_iterations = n_iterations
        self.batch_size = batch_size
        self.lrate = lrate
        self.session = sess
        self.base_dim = 512
        self.d_size = 32
        self.z_size = z_size
        self.tau = 1
        self.dataset_name = dataset
        #self.alpha = tf.constant(1e-6)
        self.alpha = tf.constant(0.0)
        self.beta = tf.constant(0.0)
        self.size = image_size[0]
        self.logpath = "log"

        self.d_bn0 = ops.BatchNormalization([256], 'd_bn0')
        #self.d_bn0 = ops.BatchNormalization([self.d_size], 'd_bn0')
        self.d_bn1 = ops.BatchNormalization([self.d_size * 2], 'd_bn1')
        self.d_bn2 = ops.BatchNormalization([self.d_size * 4], 'd_bn2')

        self.history = {}
        self.history["generator"] = []
        self.history["discriminator_real"] = []
        self.history["discriminator_fake"] = []

        with tf.variable_scope('inverse_prgan'):
            self.images = tf.placeholder(
                tf.float32,
                shape=[batch_size, image_size[0], image_size[1], 1],
                name='final_image')
            self.encodings = tf.placeholder(tf.float32,
                                            shape=[batch_size, self.z_size],
                                            name="encoding")

        self.train_flag = tf.placeholder(tf.bool)

        with tf.variable_scope('inverse_prgan'):
            self.z = self.encode(self.images, self.train_flag)
            self.loss = ops.l2(self.encodings, self.z)
            self.optimizer = tf.train.AdamOptimizer(1e-5, beta1=0.9).minimize(
                self.loss)

            self.saver = tf.train.Saver(write_version=tf.train.SaverDef.V2)
Exemplo n.º 2
0
    def __init__(self,
                 sess=tf.Session(),
                 image_size=(32, 32),
                 z_size=201,
                 n_iterations=50,
                 dataset="None",
                 batch_size=64,
                 lrate=0.002,
                 d_size=64):

        self.image_size = image_size
        self.n_iterations = n_iterations
        self.batch_size = batch_size
        self.lrate = lrate
        self.session = sess
        self.base_dim = 512
        self.d_size = 256
        self.z_size = z_size
        self.tau = 1
        self.dataset_name = dataset
        #self.alpha = tf.constant(1e-6)
        self.alpha = tf.constant(0.0)
        self.beta = tf.constant(0.0)
        self.size = image_size[0]
        self.logpath = "log"

        self.g_bn0 = ops.BatchNormalization([self.d_size], 'g_bn0')
        self.g_bn1 = ops.BatchNormalization([self.d_size / 2], 'g_bn1')
        self.g_bn2 = ops.BatchNormalization([self.d_size / 4], 'g_bn2')
        self.g_bn3 = ops.BatchNormalization([self.d_size / 8], 'g_bn2')

        self.d_bn0 = ops.BatchNormalization([self.d_size], 'd_bn0')
        self.d_bn1 = ops.BatchNormalization([self.d_size * 2], 'd_bn1')
        self.d_bn2 = ops.BatchNormalization([self.d_size * 4], 'd_bn2')

        self.history = {}
        self.history["generator"] = []
        self.history["discriminator_real"] = []
        self.history["discriminator_fake"] = []

        with tf.variable_scope('gan'):
            self.images = tf.placeholder(
                tf.float32,
                shape=[batch_size, image_size[0], image_size[1], 1],
                name='final_image')
            self.z = tf.placeholder(tf.float32,
                                    shape=[batch_size, self.z_size],
                                    name='z')

        self.train_flag = tf.placeholder(tf.bool)
        self.G = self.generator(self.z, self.train_flag)

        with tf.variable_scope('gan'):
            self.D_real, self.D_real_logits, self.D_stats_real = self.discriminator(
                self.images, self.train_flag)
            self.D_fake, self.D_fake_logits, self.D_stats_fake = self.discriminator(
                self.G, self.train_flag, reuse=True)

            self.D_loss_real = tf.reduce_mean(
                tf.nn.sigmoid_cross_entropy_with_logits(
                    self.D_real_logits, tf.ones_like(self.D_real)))
            self.D_loss_fake = tf.reduce_mean(
                tf.nn.sigmoid_cross_entropy_with_logits(
                    self.D_fake_logits, tf.zeros_like(self.D_fake)))
            self.G_loss_classic = tf.reduce_mean(
                tf.nn.sigmoid_cross_entropy_with_logits(
                    self.D_fake_logits, tf.ones_like(self.D_fake)))
            dr_mean, dr_var = tf.nn.moments(self.D_stats_real, axes=[0])
            dl_mean, dl_var = tf.nn.moments(self.D_stats_fake, axes=[0])
            self.G_loss = ops.l2(dr_mean, dl_mean)
            self.G_loss += ops.l2(dr_var, dl_var)
            #print self.G_loss.get_shape()
            self.D_loss = self.D_loss_real + self.D_loss_fake

            allvars = tf.trainable_variables()
            self.D_vars = [v for v in allvars if 'd_' in v.name]
            self.G_vars = [v for v in allvars if 'g_' in v.name]

            self.D_optim = tf.train.AdamOptimizer(1e-4, beta1=0.5).minimize(
                self.D_loss, var_list=self.D_vars)
            self.G_optim = tf.train.AdamOptimizer(0.0025, beta1=0.5).minimize(
                self.G_loss, var_list=self.G_vars)
            self.G_optim_classic = tf.train.AdamOptimizer(
                0.0025, beta1=0.5).minimize(self.G_loss_classic,
                                            var_list=self.G_vars)

            self.saver = tf.train.Saver(write_version=tf.train.SaverDef.V2)