def bulid_mrgan(self): # Generator self.g = self.generator(self.z) self.g_reg = self.generator(self.encoder(self.x), reuse=True) # Discriminator d_real = self.discriminator(self.x) d_real_reg = self.discriminator(self.g_reg, reuse=True) d_fake = self.discriminator(self.g, reuse=True) # Losses # Manifold Step # d_loss_1 = tf.reduce_mean(t.safe_log(d_real) + t.safe_log(1. - d_real_reg)) # g_loss_1 = tf.reduce_mean(self.lambda_1 * t.safe_log(d_real_reg)) - \ # t.mse_loss(self.x, self.g_reg, self.batch_size) # Diffusion Step # d_loss_2 = tf.reduce_mean(t.safe_log(d_real_reg) + t.safe_log(1. - d_fake)) # g_loss_2 = tf.reduce_mean(t.safe_log(d_fake)) d_real_loss = -tf.reduce_mean(t.safe_log(d_real)) d_fake_loss = -tf.reduce_mean(t.safe_log(1. - d_fake)) self.d_loss = d_real_loss + d_fake_loss e_mse_loss = self.lambda_1 * t.mse_loss( self.x, self.g_reg, self.batch_size, is_mean=True) e_adv_loss = self.lambda_2 * tf.reduce_mean(t.safe_log(d_real_reg)) self.e_loss = e_adv_loss + e_mse_loss self.g_loss = -tf.reduce_mean(t.safe_log(d_fake)) + self.e_loss # Summary tf.summary.scalar("loss/d_real_loss", d_real_loss) tf.summary.scalar("loss/d_fake_loss", d_fake_loss) tf.summary.scalar("loss/d_loss", self.d_loss) tf.summary.scalar("loss/e_adv_loss", e_adv_loss) tf.summary.scalar("loss/e_mse_loss", e_mse_loss) tf.summary.scalar("loss/e_loss", self.e_loss) tf.summary.scalar("loss/g_loss", self.g_loss) # Collect trainer values t_vars = tf.trainable_variables() d_params = [v for v in t_vars if v.name.startswith('d')] g_params = [v for v in t_vars if v.name.startswith('g')] e_params = [v for v in t_vars if v.name.startswith('e')] # Optimizer self.d_op = tf.train.AdamOptimizer(learning_rate=self.lr, beta1=self.beta1).minimize( self.d_loss, var_list=d_params) self.g_op = tf.train.AdamOptimizer(learning_rate=self.lr, beta1=self.beta1).minimize( self.g_loss, var_list=g_params) self.e_op = tf.train.AdamOptimizer(learning_rate=self.lr, beta1=self.beta1).minimize( self.e_loss, var_list=e_params) # Merge summary self.merged = tf.summary.merge_all() # Model Saver self.saver = tf.train.Saver(max_to_keep=1) self.writer = tf.summary.FileWriter('./model/', self.s.graph)
def build_gan(self): # Generator self.g = self.generator(self.z) # Discriminator d_real = self.discriminator(self.x) d_fake = self.discriminator(self.g, reuse=True) # General GAN loss function referred in the paper """ self.g_loss = -tf.reduce_mean(t.safe_log(d_fake)) self.d_loss = -tf.reduce_mean(t.safe_log(d_real) + t.safe_log(1. - d_fake)) """ # Softmax Loss # Z_B = sigma x∈B exp(−μ(x)), −μ(x) is discriminator z_b = tf.reduce_sum(tf.exp(-d_real)) + tf.reduce_sum( tf.exp(-d_fake)) + t.eps b_plus = self.batch_size b_minus = self.batch_size * 2 # L_G = sigma x∈B+ μ(x)/abs(B) + sigma x∈B- μ(x)/abs(B) + ln(Z_B), B+ : batch _size self.g_loss = tf.reduce_sum(d_real / b_plus) + tf.reduce_sum( d_fake / b_minus) + t.safe_log(z_b) # L_D = sigma x∈B+ μ(x)/abs(B) + ln(Z_B), B+ : batch _size self.d_loss = tf.reduce_sum(d_real / b_plus) + t.safe_log(z_b) # Summary tf.summary.scalar("loss/d_loss", self.d_loss) tf.summary.scalar("loss/g_loss", self.g_loss) # Optimizer t_vars = tf.trainable_variables() d_params = [v for v in t_vars if v.name.startswith('d')] g_params = [v for v in t_vars if v.name.startswith('g')] self.d_op = tf.train.AdamOptimizer(learning_rate=self.d_lr, beta1=self.beta1).minimize( self.d_loss, var_list=d_params) self.g_op = tf.train.AdamOptimizer(learning_rate=self.g_lr, beta1=self.beta1).minimize( self.g_loss, var_list=g_params) # Merge summary self.merged = tf.summary.merge_all() # Model saver self.saver = tf.train.Saver(max_to_keep=1) self.writer = tf.summary.FileWriter('./model/', self.s.graph)
def build_infogan(self): # Generator self.g = self.generator(self.z, self.c) # self.g_test = self.generator(self.z, self.c, reuse=True, is_train=False) # Discriminator d_real, d_real_cont, d_real_cat = self.discriminator(self.x) d_fake, d_fake_cont, d_fake_cat = self.discriminator(self.g, reuse=True) # Losses self.d_adv_loss = -tf.reduce_mean(t.safe_log(d_real) + t.safe_log(1. - d_fake)) d_cont_loss = tf.reduce_mean(tf.square(d_fake_cont / .5)) cat = self.c[:, self.n_cont:] d_cat_loss = -(tf.reduce_mean(tf.reduce_sum(cat * d_fake_cont)) + tf.reduce_mean(cat * cat)) d_info_loss = self.lambda_ * (d_cont_loss + d_cat_loss) self.d_loss = self.d_adv_loss + d_info_loss self.g_loss = -tf.reduce_mean(t.safe_log(d_fake)) + d_info_loss # Summary tf.summary.scalar("loss/d_adv_loss", self.d_adv_loss) tf.summary.scalar("loss/d_cont_loss", d_cont_loss) tf.summary.scalar("loss/d_cat_loss", d_cat_loss) tf.summary.scalar("loss/d_loss", self.d_loss) tf.summary.scalar("loss/g_loss", self.g_loss) # Optimizer t_vars = tf.trainable_variables() d_params = [v for v in t_vars if v.name.startswith('d')] g_params = [v for v in t_vars if v.name.startswith('g')] self.d_op = tf.train.AdamOptimizer(learning_rate=self.d_lr, beta1=self.beta1, beta2=self.beta2).minimize(self.d_loss, var_list=d_params) self.g_op = tf.train.AdamOptimizer(learning_rate=self.g_lr, beta1=self.beta1, beta2=self.beta2).minimize(self.g_loss, var_list=g_params) # Merge summary self.merged = tf.summary.merge_all() # Model saver self.saver = tf.train.Saver(max_to_keep=1) self.writer = tf.summary.FileWriter('./model/', self.s.graph)
def build_adagan(self): # Generator self.g = self.generator(self.z) # Discriminator d_real, _ = self.discriminator(self.x) d_fake, _ = self.discriminator(self.g, reuse=True) # Losses d_real_loss = -tf.reduce_mean(t.safe_log(d_real)) d_fake_loss = -tf.reduce_mean(t.safe_log(1. - d_fake)) self.d_loss = d_real_loss + d_fake_loss self.g_loss = tf.reduce_mean(t.safe_log(d_fake)) # Summary tf.summary.scalar("loss/d_real_loss", d_real_loss) tf.summary.scalar("loss/d_fake_loss", d_fake_loss) tf.summary.scalar("loss/d_loss", self.d_loss) tf.summary.scalar("loss/g_loss", self.g_loss) tf.summary.scalar("loss/c_loss", self.c_loss) # Optimizer t_vars = tf.trainable_variables() d_params = [v for v in t_vars if v.name.startswith('d')] g_params = [v for v in t_vars if v.name.startswith('g')] c_params = [v for v in t_vars if v.name.startswith('c')] self.d_op = tf.train.AdamOptimizer(learning_rate=self.d_lr, beta1=self.beta1).minimize( self.d_loss, var_list=d_params) self.g_op = tf.train.AdamOptimizer(learning_rate=self.g_lr, beta1=self.beta1).minimize( self.g_loss, var_list=g_params) self.c_op = tf.train.AdamOptimizer(learning_rate=self.c_lr, beta1=self.beta1).minimize( self.c_loss, var_list=c_params) # Merge summary self.merged = tf.summary.merge_all() # Model saver self.saver = tf.train.Saver(max_to_keep=1) self.writer = tf.summary.FileWriter('./model/', self.s.graph)
def conjugate(x): return -tf.reduce_mean((1. - np.pi) * t.safe_log( (1. - np.pi) / (1. - np.pi * tf.exp(x / np.pi))))
def activation(x): return -tf.reduce_mean(-np.pi * np.log(np.pi) - t.safe_log(1. + tf.exp(-x)))
def conjugate(x): return -tf.reduce_mean(-t.safe_log(2. - tf.exp(x)))
def activation(x): return -tf.reduce_mean( tf.log(2.) - t.safe_log(1. + tf.exp(-x)))
def bulid_fgan(self): # Generator self.g = self.generator(self.z) # Discriminator d_real = self.discriminator(self.x) d_fake = self.discriminator(self.g, reuse=True) # Losses if self.divergence == 'GAN': d_real_loss = -tf.reduce_mean(-t.safe_log(1. + tf.exp(-d_real))) d_fake_loss = -tf.reduce_mean(-t.safe_log(1. - tf.exp(d_fake))) elif self.divergence == 'KL': # tf.distribution.kl_divergence d_real_loss = -tf.reduce_mean(d_real) d_fake_loss = -tf.reduce_mean(tf.exp(d_fake - 1.)) elif self.divergence == 'Reverse-KL': d_real_loss = -tf.reduce_mean(-tf.exp(d_real)) d_fake_loss = -tf.reduce_mean(-1. - d_fake) # remove log elif self.divergence == 'JS': d_real_loss = -tf.reduce_mean( t.safe_log(2. / (1. + tf.exp(-d_real)))) d_fake_loss = -tf.reduce_mean(-t.safe_log(2. - tf.exp(d_fake))) elif self.divergence == 'JS-Weighted': d_real_loss = -tf.reduce_mean(-np.pi * np.log(np.pi) - (1. + tf.exp(-d_real))) # remove log d_fake_loss = -tf.reduce_mean((1. - np.pi) * t.safe_log( (1. - np.pi) / (1. - np.pi * tf.exp(d_fake / np.pi)))) elif self.divergence == 'Squared-Hellinger': d_real_loss = -tf.reduce_mean(1. - tf.exp(d_real)) d_fake_loss = -tf.reduce_mean(d_fake / (1. - d_fake)) elif self.divergence == 'Pearson': d_real_loss = -tf.reduce_mean(d_real) d_fake_loss = -tf.reduce_mean(tf.square(d_fake) / 4. + d_fake) elif self.divergence == 'Neyman': d_real_loss = -tf.reduce_mean(1. - tf.exp(d_real)) d_fake_loss = -tf.reduce_mean( 2. - 2. * tf.sqrt(1. - d_fake)) # d_fake < 1 elif self.divergence == 'Jeffrey': from scipy.special import lambertw d_real_loss = -tf.reduce_mean(d_real) lambert_w = lambertw(self.s.run(tf.exp( 1. - d_fake))) # need to be replaced with another tensor func d_fake_loss = -tf.reduce_mean(lambert_w + 1. / lambert_w + d_fake - 2.) elif self.divergence == 'Total-Variation': d_real_loss = -tf.reduce_mean(tf.nn.tanh(d_real) / 2.) d_fake_loss = -tf.reduce_mean(d_fake) # |d_fake| < 0.5 else: raise NotImplementedError("[-] Not Implemented f-divergence %s" % self.divergence) self.d_loss = d_real_loss - d_fake_loss self.g_loss = d_fake_loss # Summary tf.summary.scalar("loss/d_real_loss", d_real_loss) tf.summary.scalar("loss/d_fake_loss", d_fake_loss) tf.summary.scalar("loss/d_loss", self.d_loss) tf.summary.scalar("loss/g_loss", self.g_loss) # Collect trainer values t_vars = tf.trainable_variables() d_params = [v for v in t_vars if v.name.startswith('d')] g_params = [v for v in t_vars if v.name.startswith('g')] # Optimizer self.d_op = tf.train.AdamOptimizer(learning_rate=self.lr, beta1=self.beta1).minimize( self.d_loss, var_list=d_params) self.g_op = tf.train.AdamOptimizer(learning_rate=self.lr, beta1=self.beta1).minimize( self.g_loss, var_list=g_params) # Merge summary self.merged = tf.summary.merge_all() # Model Saver self.saver = tf.train.Saver(max_to_keep=1) self.writer = tf.summary.FileWriter('./model/%s/' % self.divergence, self.s.graph)