コード例 #1
0
ファイル: wgan.py プロジェクト: sahpat229/WGAN
    def build_v3(self):
        """
		Normal Improved WGAN, with only real vs fake (no classes)
		"""
        self.z = tf.placeholder(tf.float32,
                                shape=[self.batch_size, self.data.latent_dim])
        with tf.variable_scope("generator") as scope:
            self.generator_output = Generator.generator(
                self.z, self.is_training, self.gen_var_coll, self.gen_upd_coll)

        with tf.variable_scope("discriminator") as scope:
            disc_output_x = Discriminator.discriminator_v3(
                self.x, self.batch_size, self.disc_var_coll)
            scope.reuse_variables()
            disc_output_gz = Discriminator.discriminator_v3(
                self.generator_output, self.batch_size, self.disc_var_coll)
            interpolates = tf.multiply(self.epsilon, self.x) + tf.multiply(
                1 - self.epsilon, self.generator_output)
            disc_interpolates = Discriminator.discriminator_v3(
                interpolates, self.batch_size, self.disc_var_coll)

        self.generator_loss = -tf.reduce_mean(disc_output_gz)
        self.disc_loss = -tf.reduce_mean(disc_output_x) + tf.reduce_mean(
            disc_output_gz)

        gradients = tf.gradients(disc_interpolates, [interpolates])[0]
        self.slopes = tf.sqrt(
            tf.reduce_sum(tf.square(gradients), reduction_indices=[1]))
        self.gradient_penalty = tf.reduce_mean((self.slopes - 1)**2)
        self.disc_loss += self.lambdah * self.gradient_penalty
コード例 #2
0
ファイル: wgan.py プロジェクト: sahpat229/WGAN
    def build_v1(self):
        """
		Version 1:
		- D(x) has [num_classes+1] outputs
		- D(x) uses one_hot class vector to compute its loss,
		- G(z) uses one_hot class vector to generate, one_hot is the first [num_classes] elements of z
		"""

        self.z = tf.placeholder(
            tf.float32, shape=[self.batch_size, self.data.latent_output_size])
        with tf.variable_scope("generator") as scope:
            self.generator_output = Generator.generator(
                self.z, self.is_training, self.gen_var_coll, self.gen_upd_coll)

        with tf.variable_scope("discriminator") as scope:
            disc_output_x = Discriminator.discriminator_v1(
                self.x, self.batch_size, self.num_classes, self.disc_var_coll)
            scope.reuse_variables()
            disc_output_gz = Discriminator.discriminator_v1(
                self.generator_output, self.batch_size, self.num_classes,
                disc_var_coll)
            interpolates = tf.multiply(self.epsilon, self.x) + \
             tf.multiply(1-self.epsilon, self.generator_output)
            disc_interpolates = Discriminator.discriminator_v1(
                interpolates, self.batch_size, self.num_classes, disc_var_coll)

        # discriminator(self.generator_output_inner) will be of size:
        # 	[batch_size, num_classes+1]
        # labels will be of shape:
        #	[batch_size, num_classes+1]

        self.generator_loss = tf.reduce_sum(tf.multiply(disc_output_gz, self.zlabels), axis=1) + \
         tf.reduce_sum(tf.multiply(disc_output_gz, self.zlabels-1), axis=1)
        batch_gen_loss = self.generator_loss
        self.generator_loss = tf.reduce_mean(self.generator_loss)

        self.disc_loss = tf.reduce_sum(tf.multiply(disc_output_x, self.xlabels), axis=1) + \
         tf.reduce_sum(tf.multiply(disc_output_x, self.xlabels-1), axis=1) - batch_gen_loss
        self.disc_loss = tf.reduce_mean(self.disc_loss)

        gradients_per_dim = [
            tf.gradients(
                tf.slice(disc_interpolates, [0, i], [self.batch_size, 1]),
                [interpolates]) for i in range(self.num_classes + 1)
        ]
        slopes_per_dim = [
            tf.sqrt(
                tf.reduce_sum(tf.square(gradients_per_dim[i]),
                              reduction_indices=[1]))
            for i in range(self.num_classes + 1)
        ]
        gradient_penalty_per_dim = [
            tf.reduce_mean((slopes_per_dim[i] - 1)**2)
            for i in range(self.num_classes + 1)
        ]

        total_grad_penalty = tf.zeros([])
        for grad_penalty in gradient_penalty_per_dim:
            total_grad_penalty += grad_penalty
        self.disc_loss += self.lambdah * total_grad_penalty
コード例 #3
0
ファイル: wgan.py プロジェクト: sahpat229/WGAN
    def build_v2(self):
        """
		Version 2:
		- D(x) has 1 output
		- D(x) takes in the one_hot class vector as an input to compute that 1 output
		- G(z) takes in the one_hot class vector as before in Version 1
		"""
        self.z = tf.placeholder(
            tf.float32, shape=[self.batch_size, self.data.latent_output_size])
        with tf.variable_scope("generator") as scope:
            self.generator_output = Generator.generator(
                self.z, self.is_training, self.gen_var_coll, self.gen_upd_coll)

        with tf.variable_scope("discriminator") as scope:
            disc_output_x = Discriminator.discriminator_v2(
                self.x, self.xlabels, self.batch_size, self.disc_var_coll, 50)
            scope.reuse_variables()
            disc_output_gz = Discriminator.discriminator_v2(
                self.generator_output, self.zlabels, self.batch_size,
                self.disc_var_coll, 50)
            interpolates = tf.multiply(self.epsilon, self.x) + tf.multiply(
                1 - self.epsilon, self.generator_output)
            disc_interpolates = Discriminator.discriminator_v2(
                interpolates, self.xlabels, self.batch_size,
                self.disc_var_coll, 50)

        self.generator_loss = tf.reduce_mean(disc_output_gz)
        self.disc_loss = tf.reduce_mean(disc_output_x) - self.generator_loss

        gradients = tf.gradients(disc_interpolates, [interpolates])[0]
        slopes = tf.sqrt(
            tf.reduce_sum(tf.square(gradients), reduction_indices=[1]))
        gradient_penalty = tf.reduce_mean((slopes - 1)**2)
        self.disc_loss += self.lambdah * gradient_penalty