Exemplo n.º 1
0
    def bulid_mrgan(self):
        # Generator
        self.g = self.generator(self.z)
        self.g_reg = self.generator(self.encoder(self.x), reuse=True)

        # Discriminator
        d_real = self.discriminator(self.x)
        d_real_reg = self.discriminator(self.g_reg, reuse=True)
        d_fake = self.discriminator(self.g, reuse=True)

        # Losses
        # Manifold Step
        # d_loss_1 = tf.reduce_mean(t.safe_log(d_real) + t.safe_log(1. - d_real_reg))
        # g_loss_1 = tf.reduce_mean(self.lambda_1 * t.safe_log(d_real_reg)) - \
        #            t.mse_loss(self.x, self.g_reg, self.batch_size)
        # Diffusion Step
        # d_loss_2 = tf.reduce_mean(t.safe_log(d_real_reg) + t.safe_log(1. - d_fake))
        # g_loss_2 = tf.reduce_mean(t.safe_log(d_fake))

        d_real_loss = -tf.reduce_mean(t.safe_log(d_real))
        d_fake_loss = -tf.reduce_mean(t.safe_log(1.0 - d_fake))
        self.d_loss = d_real_loss + d_fake_loss
        e_mse_loss = self.lambda_1 * t.mse_loss(
            self.x, self.g_reg, self.batch_size, is_mean=True)
        e_adv_loss = self.lambda_2 * tf.reduce_mean(t.safe_log(d_real_reg))
        self.e_loss = e_adv_loss + e_mse_loss
        self.g_loss = -tf.reduce_mean(t.safe_log(d_fake)) + self.e_loss

        # Summary
        tf.summary.scalar("loss/d_real_loss", d_real_loss)
        tf.summary.scalar("loss/d_fake_loss", d_fake_loss)
        tf.summary.scalar("loss/d_loss", self.d_loss)
        tf.summary.scalar("loss/e_adv_loss", e_adv_loss)
        tf.summary.scalar("loss/e_mse_loss", e_mse_loss)
        tf.summary.scalar("loss/e_loss", self.e_loss)
        tf.summary.scalar("loss/g_loss", self.g_loss)

        # Collect trainer values
        t_vars = tf.trainable_variables()
        d_params = [v for v in t_vars if v.name.startswith('d')]
        g_params = [v for v in t_vars if v.name.startswith('g')]
        e_params = [v for v in t_vars if v.name.startswith('e')]

        # Optimizer
        self.d_op = tf.train.AdamOptimizer(learning_rate=self.lr,
                                           beta1=self.beta1).minimize(
                                               self.d_loss, var_list=d_params)
        self.g_op = tf.train.AdamOptimizer(learning_rate=self.lr,
                                           beta1=self.beta1).minimize(
                                               self.g_loss, var_list=g_params)
        self.e_op = tf.train.AdamOptimizer(learning_rate=self.lr,
                                           beta1=self.beta1).minimize(
                                               self.e_loss, var_list=e_params)

        # Merge summary
        self.merged = tf.summary.merge_all()

        # Model Saver
        self.saver = tf.train.Saver(max_to_keep=1)
        self.writer = tf.summary.FileWriter('./model/', self.s.graph)
Exemplo n.º 2
0
    def build_infogan(self):
        # Generator
        self.g = self.generator(self.z, self.c)
        # self.g_test = self.generator(self.z, self.c, reuse=True, is_train=False)

        # Discriminator
        d_real, d_real_cont, d_real_cat = self.discriminator(self.x)
        d_fake, d_fake_cont, d_fake_cat = self.discriminator(self.g,
                                                             reuse=True)

        # Losses
        self.d_adv_loss = -tf.reduce_mean(
            t.safe_log(d_real) + t.safe_log(1.0 - d_fake))

        d_cont_loss = tf.reduce_mean(tf.square(d_fake_cont / 0.5))
        cat = self.c[:, self.n_cont:]
        d_cat_loss = -(tf.reduce_mean(tf.reduce_sum(cat * d_fake_cont)) +
                       tf.reduce_mean(cat * cat))

        d_info_loss = self.lambda_ * (d_cont_loss + d_cat_loss)

        self.d_loss = self.d_adv_loss + d_info_loss
        self.g_loss = -tf.reduce_mean(t.safe_log(d_fake)) + d_info_loss

        # Summary
        tf.summary.scalar("loss/d_adv_loss", self.d_adv_loss)
        tf.summary.scalar("loss/d_cont_loss", d_cont_loss)
        tf.summary.scalar("loss/d_cat_loss", d_cat_loss)
        tf.summary.scalar("loss/d_loss", self.d_loss)
        tf.summary.scalar("loss/g_loss", self.g_loss)

        # Optimizer
        t_vars = tf.trainable_variables()
        d_params = [v for v in t_vars if v.name.startswith('d')]
        g_params = [v for v in t_vars if v.name.startswith('g')]

        self.d_op = tf.train.AdamOptimizer(learning_rate=self.d_lr,
                                           beta1=self.beta1,
                                           beta2=self.beta2).minimize(
                                               self.d_loss, var_list=d_params)
        self.g_op = tf.train.AdamOptimizer(learning_rate=self.g_lr,
                                           beta1=self.beta1,
                                           beta2=self.beta2).minimize(
                                               self.g_loss, var_list=g_params)

        # Merge summary
        self.merged = tf.summary.merge_all()

        # Model saver
        self.saver = tf.train.Saver(max_to_keep=1)
        self.writer = tf.summary.FileWriter('./model/', self.s.graph)
Exemplo n.º 3
0
    def build_cgan(self):
        # Generator
        self.g = self.generator(self.z, self.c, self.do_rate)

        # Discriminator
        d_real = self.discriminator(self.x, self.c, self.do_rate)
        d_fake = self.discriminator(self.g, self.c, self.do_rate, reuse=True)

        # Losses
        d_real_loss = -tf.reduce_mean(t.safe_log(d_real))
        d_fake_loss = -tf.reduce_mean(t.safe_log(1.0 - d_fake))
        self.d_loss = d_real_loss + d_fake_loss
        self.g_loss = -tf.reduce_mean(t.safe_log(d_fake))
        # d_real_loss = t.sce_loss(d_real, tf.ones_like(d_real))
        # d_fake_loss = t.sce_loss(d_fake, tf.zeros_like(d_fake))
        # self.d_loss = d_real_loss + d_fake_loss
        # self.g_loss = t.sce_loss(d_fake, tf.ones_like(d_fake))

        # Summary
        tf.summary.scalar("loss/d_real_loss", d_real_loss)
        tf.summary.scalar("loss/d_fake_loss", d_fake_loss)
        tf.summary.scalar("loss/d_loss", self.d_loss)
        tf.summary.scalar("loss/g_loss", self.g_loss)

        # Collect trainer values
        t_vars = tf.trainable_variables()
        d_params = [v for v in t_vars if v.name.startswith('d')]
        g_params = [v for v in t_vars if v.name.startswith('g')]

        # Optimizer
        self.d_op = tf.train.AdamOptimizer(learning_rate=self.d_lr,
                                           beta1=self.beta1).minimize(
                                               self.d_loss, var_list=d_params)
        self.g_op = tf.train.AdamOptimizer(learning_rate=self.g_lr,
                                           beta1=self.beta1).minimize(
                                               self.g_loss, var_list=g_params)

        # Merge summary
        self.merged = tf.summary.merge_all()

        # Model saver
        self.saver = tf.train.Saver(max_to_keep=1)
        self.writer = tf.summary.FileWriter('./model/', self.s.graph)
Exemplo n.º 4
0
    def build_bgan(self):
        # Generator
        self.g = self.generator(self.z)

        # Discriminator
        d_real, _ = self.discriminator(self.x)
        d_fake, _ = self.discriminator(self.g, reuse=True)

        # Losses
        d_real_loss = -tf.reduce_mean(t.safe_log(d_real))
        d_fake_loss = -tf.reduce_mean(t.safe_log(1.0 - d_fake))
        self.d_loss = d_real_loss + d_fake_loss
        self.g_loss = tf.reduce_mean(
            tf.square(t.safe_log(d_fake) + t.safe_log(1.0 - d_fake))) / 2.0

        # Summary
        tf.summary.scalar("loss/d_real_loss", d_real_loss)
        tf.summary.scalar("loss/d_fake_loss", d_fake_loss)
        tf.summary.scalar("loss/d_loss", self.d_loss)
        tf.summary.scalar("loss/g_loss", self.g_loss)

        # Optimizer
        t_vars = tf.trainable_variables()
        d_params = [v for v in t_vars if v.name.startswith('d')]
        g_params = [v for v in t_vars if v.name.startswith('g')]

        self.d_op = tf.train.AdamOptimizer(learning_rate=self.d_lr,
                                           beta1=self.beta1,
                                           beta2=self.beta2).minimize(
                                               self.d_loss, var_list=d_params)
        self.g_op = tf.train.AdamOptimizer(learning_rate=self.g_lr,
                                           beta1=self.beta1,
                                           beta2=self.beta2).minimize(
                                               self.g_loss, var_list=g_params)

        # Merge summary
        self.merged = tf.summary.merge_all()

        # Model saver
        self.saver = tf.train.Saver(max_to_keep=1)
        self.writer = tf.summary.FileWriter('./model/', self.s.graph)
Exemplo n.º 5
0
    def build_gan(self):
        # Generator
        self.g = self.generator(self.z)

        # Discriminator
        d_real = self.discriminator(self.x)
        d_fake = self.discriminator(self.g, reuse=True)

        # General GAN loss function referred in the paper
        """
        self.g_loss = -tf.reduce_mean(t.safe_log(d_fake))
        self.d_loss = -tf.reduce_mean(t.safe_log(d_real) + t.safe_log(1. - d_fake))
        """

        # Softmax Loss
        # Z_B = sigma x∈B exp(−μ(x)), −μ(x) is discriminator
        z_b = tf.reduce_sum(tf.exp(-d_real)) + tf.reduce_sum(tf.exp(-d_fake)) + t.eps

        b_plus = self.batch_size
        b_minus = self.batch_size * 2

        # L_G = sigma x∈B+ μ(x)/abs(B) + sigma x∈B- μ(x)/abs(B) + ln(Z_B), B+ : batch _size
        self.g_loss = tf.reduce_sum(d_real / b_plus) + tf.reduce_sum(d_fake / b_minus) + t.safe_log(z_b)

        # L_D = sigma x∈B+ μ(x)/abs(B) + ln(Z_B), B+ : batch _size
        self.d_loss = tf.reduce_sum(d_real / b_plus) + t.safe_log(z_b)

        # Summary
        tf.summary.scalar("loss/d_loss", self.d_loss)
        tf.summary.scalar("loss/g_loss", self.g_loss)

        # Optimizer
        t_vars = tf.trainable_variables()
        d_params = [v for v in t_vars if v.name.startswith('d')]
        g_params = [v for v in t_vars if v.name.startswith('g')]

        self.d_op = tf.train.AdamOptimizer(learning_rate=self.d_lr, beta1=self.beta1).minimize(
            self.d_loss, var_list=d_params
        )
        self.g_op = tf.train.AdamOptimizer(learning_rate=self.g_lr, beta1=self.beta1).minimize(
            self.g_loss, var_list=g_params
        )

        # Merge summary
        self.merged = tf.summary.merge_all()

        # Model saver
        self.saver = tf.train.Saver(max_to_keep=1)
        self.writer = tf.summary.FileWriter('./model/', self.s.graph)
Exemplo n.º 6
0
 def conjugate(x):
     return -tf.reduce_mean((1.0 - np.pi) * t.safe_log((1.0 - np.pi) / (1.0 - np.pi * tf.exp(x / np.pi))))
Exemplo n.º 7
0
 def activation(x):
     return -tf.reduce_mean(-np.pi * np.log(np.pi) - t.safe_log(1.0 + tf.exp(-x)))
Exemplo n.º 8
0
 def conjugate(x):
     return -tf.reduce_mean(-t.safe_log(2.0 - tf.exp(x)))
Exemplo n.º 9
0
 def activation(x):
     return -tf.reduce_mean(tf.log(2.0) - t.safe_log(1.0 + tf.exp(-x)))