class Model(object): def __init__(self, input_dim, z_dim): self.input_dim = input_dim self.z_dim = z_dim self.enc_layer_list = [input_dim, 1000, 1000, z_dim] self.dec_layer_list = [z_dim, 1000, 1000, input_dim] self.disc_layer_list = [z_dim, 500, 500, 1] self.rec_lr = 5e-5 self.gen_lr = 2e-5 self.disc_lr = 2e-5 self.mode = 'deterministic' # -- encoder ------- self.encoder = Encoder(self.enc_layer_list) # -- decoder ------- self.decoder = Decoder(self.dec_layer_list) # -- discriminator -- self.discriminator = Discriminator(self.disc_layer_list) def set_model(self): self.x = tf.placeholder(tf.float32, [None, self.input_dim]) self.p_z = tf.placeholder(dtype=tf.float32, shape=[None, self.z_dim]) self.batch_size = tf.shape(self.x)[0] # ----- encoding ----- mu, log_sigma = self.encoder(self.x, is_training=True, reuse=False) if self.mode == 'Non-deterministic': eps = tf.random_normal([self.batch_size, self.z_dim]) q_z = eps * tf.exp(log_sigma) + mu elif self.mode == 'deterministic': q_z = mu # ----- decoding ----- rec_x = self.decoder(q_z, is_training=True, reuse=False) # ----- loss ----- reconstruct_error = 0.5 * tf.reduce_mean( tf.reduce_sum(tf.square(rec_x - self.x), 1)) real_logits = self.discriminator(self.p_z, is_training=True, reuse=False) fake_logits = self.discriminator(q_z, is_training=True, reuse=True) d_loss_from_real = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits( logits=real_logits, labels=tf.ones_like(real_logits))) d_loss_from_fake = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits( logits=fake_logits, labels=tf.zeros_like(fake_logits))) g_loss = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits( logits=fake_logits, labels=tf.ones_like(fake_logits))) # ----- train ----- self.obj_rec = reconstruct_error train_vars = self.encoder.get_variables() train_vars.extend(self.decoder.get_variables()) self.train_rec = tf.train.RMSPropOptimizer( self.rec_lr, decay=0.5).minimize(self.obj_rec, var_list=train_vars) self.obj_gen = g_loss train_vars = self.encoder.get_variables() self.train_gen = tf.train.RMSPropOptimizer( self.gen_lr, decay=0.5).minimize(self.obj_gen, var_list=train_vars) self.obj_disc = d_loss_from_real + d_loss_from_fake train_vars = self.discriminator.get_variables() self.train_disc = tf.train.RMSPropOptimizer( self.disc_lr, decay=0.5).minimize(self.obj_disc, var_list=train_vars) # ---- for using ---- self.gen_z, _ = self.encoder(self.x, is_training=False, reuse=True) self.gen_x = self.decoder(self.p_z, is_training=False, reuse=True) def training_rec(self, sess, data): _, obj_rec = sess.run([self.train_rec, self.obj_rec], feed_dict={self.x: data}) return obj_rec def training_gen(self, sess, data): _, obj_gen = sess.run([self.train_gen, self.obj_gen], feed_dict={self.x: data}) return obj_gen def training_disc(self, sess, data, p_z): _, obj_disc = sess.run([self.train_disc, self.obj_disc], feed_dict={ self.x: data, self.p_z: p_z }) return obj_disc def encoding(self, sess, data): ret = sess.run(self.gen_z, feed_dict={self.x: data}) return ret def decoding(self, sess, z): ret = sess.run(self.gen_x, feed_dict={self.p_z: z}) return ret def setting(self): setting = { 'input_dim': self.input_dim, 'z_dim': self.z_dim, 'enc_layer_list': self.enc_layer_list, 'dec_layer_list': self.dec_layer_list, 'disc_layer_list': self.disc_layer_list, 'rec_lr': self.rec_lr, 'gen_lr': self.gen_lr, 'disc_lr': self.disc_lr, 'mode': self.mode } return setting
class Model(object): def __init__(self, z_dim): self.z_dim = z_dim self.true_input_dim = 2 self.lr = 0.001 self.beta = 0.1 # generator config gen_layer = [z_dim, 128, 128, self.true_input_dim] #discriminato config disc_layer = [self.true_input_dim, 128, 128, 1] # -- generator ----- self.gen = Generator([u'gen_deconv'], gen_layer) # -- discriminator -- self.disc = Discriminator([u'disc_conv'], disc_layer) def set_model(self): # -- define place holder ------- self.z = tf.placeholder(tf.float32, [None, self.z_dim]) self.z2 = tf.placeholder(tf.float32, [None, self.z_dim]) self.true_input= tf.placeholder(tf.float32, [None, self.true_input_dim]) # -- generator ----------------- gen_out = self.gen.set_model(self.z, True, False) g_logits = self.disc.set_model(gen_out, True, False) gen_out2 = self.gen.set_model(self.z2, True, True) gen_dist = 0.5 * tf.log( tf.reduce_sum(tf.square(gen_out - gen_out2), axis = 1) ) z_dist = 0.5 * tf.log( tf.reduce_sum(tf.square(self.z - self.z2), axis = 1) ) self.g_obj = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits( logits = g_logits, labels = tf.ones_like(g_logits) ) ) self.g_obj += self.beta * tf.reduce_mean(tf.square(gen_dist - z_dist)) self.train_gen = tf.train.AdamOptimizer(self.lr, beta1 = 0.5).minimize(self.g_obj, var_list = self.gen.get_variables()) # -- discriminator -------- d_logits = self.disc.set_model(self.true_input, True, True) d_obj_true = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits( logits = d_logits, labels = tf.ones_like(d_logits) ) ) d_obj_false = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits( logits = g_logits, labels = tf.zeros_like(g_logits) ) ) self.d_obj = d_obj_true + d_obj_false self.train_disc = tf.train.AdamOptimizer(self.lr, beta1 = 0.5).minimize(self.d_obj, var_list = self.disc.get_variables()) # -- for figure generation ------- self.gen_figs = self.gen.set_model(self.z, False, True) self.sigmoid_d = tf.nn.sigmoid(self.disc.set_model(self.true_input ,False, True)) def training_gen(self, sess, z_list, z_list2): _, g_obj = sess.run([self.train_gen, self.g_obj], feed_dict = {self.z: z_list, self.z2: z_list2}) return g_obj def training_disc(self, sess, z_list, figs): _, d_obj = sess.run([self.train_disc, self.d_obj], feed_dict = { self.z: z_list, self.true_input:figs}) return d_obj def generate(self, sess, z): ret_ = sess.run(self.gen_figs, feed_dict = {self.z: z}) return ret_ def get_disc_value(self, sess, inputs): ret = sess.run(self.sigmoid_d, feed_dict = {self.true_input:inputs}) return ret
class Model(object): def __init__(self, z_dim, batch_size, coulomb_dim, coulomb_epsilon): self.z_dim = z_dim self.true_input_dim = 2 self.lr = 0.0005 self.batch_size = batch_size self.coulomb_dim = coulomb_dim self.coulomb_epsilon = coulomb_epsilon # generator config gen_layer = [z_dim, 128, 128, self.true_input_dim] #discriminato config disc_layer = [self.true_input_dim, 128, 128, 1] # -- generator ----- self.gen = Generator([u'gen_deconv'], gen_layer) # -- discriminator -- self.disc = Discriminator([u'disc_conv'], disc_layer) def set_model(self): # -- define place holder ------- self.z = tf.placeholder(tf.float32, [self.batch_size, self.z_dim]) self.true_input= tf.placeholder(tf.float32, [self.batch_size, self.true_input_dim]) # -- generator ----------------- gen_out = self.gen.set_model(self.z, True, False) g_logits = self.disc.set_model(gen_out, True, False) # -- discriminator -------- d_logits = self.disc.set_model(self.true_input, True, True) # -- for losses ----------- g_logits = tf.reshape(g_logits, [-1]) d_logits = tf.reshape(d_logits, [-1]) potential_gen, potential_input = get_potentials(gen_out, self.true_input, self.coulomb_dim, self.coulomb_epsilon) loss_d_gen = tf.losses.mean_squared_error(potential_gen, g_logits) loss_d_input = tf.losses.mean_squared_error(potential_input, d_logits) self.g_obj = tf.reduce_mean(g_logits) self.d_obj = loss_d_gen + loss_d_input # -- for train op ---------- self.train_gen = tf.train.AdamOptimizer(self.lr, beta1 = 0.5).minimize(self.g_obj, var_list = self.gen.get_variables()) self.train_disc = tf.train.AdamOptimizer(self.lr, beta1 = 0.5).minimize(self.d_obj, var_list = self.disc.get_variables()) # -- for figure generation ------- self.gen_figs = self.gen.set_model(self.z, False, True) def training_gen(self, sess, z_list): _, g_obj = sess.run([self.train_gen, self.g_obj], feed_dict = {self.z: z_list}) return g_obj def training_disc(self, sess, z_list, figs): _, d_obj = sess.run([self.train_disc, self.d_obj], feed_dict = { self.z: z_list, self.true_input:figs}) return d_obj def generate(self, sess, z): ret_ = sess.run(self.gen_figs, feed_dict = {self.z: z}) return ret_
class Model(object): def __init__(self, z_dim, batch_size): self.input_size = 256 self.z_dim = z_dim self.batch_size = batch_size self.lr = 0.0001 # generator config gen_layer = [512, 256, 128, 3] gen_in_dim = int(self.input_size / 2**(len(gen_layer) - 1)) #discriminato config disc_layer = [3, 64, 128, 256] # -- generator ----- self.gen = Generator([u'gen_reshape', u'gen_deconv'], gen_in_dim, gen_layer) # -- discriminator -- self.disc = Discriminator([u'disc_conv', u'disc_fc'], disc_layer) def set_model(self): # -- define place holder ------- self.z = tf.placeholder(tf.float32, [self.batch_size, self.z_dim]) self.figs = tf.placeholder( tf.float32, [self.batch_size, self.input_size, self.input_size, 3]) # -- generator ----------------- gen_figs = self.gen.set_model(self.z, self.batch_size, True, False) g_logits = self.disc.set_model(gen_figs, True, False) self.g_obj = -tf.reduce_mean(f_star(g_logits)) self.train_gen = tf.train.AdamOptimizer(self.lr, beta1=0.5).minimize( self.g_obj, var_list=self.gen.get_variables()) # -- discriminator -------- d_logits = self.disc.set_model(self.figs, True, True) self.d_obj = -tf.reduce_mean(d_logits) + tf.reduce_mean( f_star(g_logits)) self.train_disc = tf.train.AdamOptimizer(self.lr, beta1=0.5).minimize( self.d_obj, var_list=self.disc.get_variables()) # -- for figure generation ------- self.gen_figs = self.gen.set_model(self.z, self.batch_size, False, True) def training_gen(self, sess, z_list): _, g_obj = sess.run([self.train_gen, self.g_obj], feed_dict={self.z: z_list}) return g_obj def training_disc(self, sess, z_list, figs): _, d_obj = sess.run([self.train_disc, self.d_obj], feed_dict={ self.z: z_list, self.figs: figs }) return d_obj def gen_fig(self, sess, z): ret = sess.run(self.gen_figs, feed_dict={self.z: z}) return ret
class Model(object): def __init__(self, fv_dim, class_num): self.lr = 0.002 # -- placeholder ------- self.source_figs = tf.placeholder(tf.float32, [None, 32, 32, 3]) self.labels = tf.placeholder(tf.float32, [None, class_num]) # -- FeatureExtractor ----- self.source_fe = FeatureExtractor('source_fe', fv_dim) # -- classifier --- self.cl = Classifier('classifier', class_num) # -- discriminator -- self.disc = Discriminator('discriminator') def set_source_model(self): # -- classify -- tmp_fv = self.source_fe(self.source_figs, True) logits, probs = self.cl(tmp_fv, True) self.obj = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=self.labels)) var_list = self.source_fe.get_variables() var_list.extend(self.cl.get_variables()) self.train_source_fe = tf.train.AdamOptimizer(self.lr).minimize( self.obj, var_list=var_list) # -- for fv -- self.source_fv = self.source_fe(self.source_figs, False) self.source_prob = self.cl(self.source_fv, False) def init(self, sess): var_list = self.source_fe.get_variables() var_list.extend(self.cl.get_variables()) init_op = tf.variables_initializer(var_list=var_list) sess.run(init_op) def set_target(self, sess): # -- make target fe --- self.target_figs = tf.placeholder(tf.float32, [None, 32, 32, 3]) self.target_fe = self.source_fe.copy('target_fe') # -- make target fe loss -- target_fv = self.target_fe(self.target_figs, True) t_logits = self.disc(target_fv, True) self.target_obj = -tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits( logits=t_logits, labels=tf.zeros_like(t_logits))) # -- make adversarial loss --- d_logits = self.disc(self.source_fv, True) d_obj_true = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits( logits=d_logits, labels=tf.ones_like(d_logits))) d_obj_false = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits( logits=t_logits, labels=tf.zeros_like(t_logits))) self.disc_obj = d_obj_true + d_obj_false # -- make traing_ops --- self.train_target_fe = tf.train.AdamOptimizer( self.lr, beta1=0.5).minimize(self.target_obj, var_list=self.target_fe.get_variables()) self.train_discriminator = tf.train.AdamOptimizer( self.lr, beta1=0.5).minimize(self.disc_obj, var_list=self.disc.get_variables()) # -- target_fv --- self.target_fv = self.target_fe(self.target_figs, False) self.target_prob = self.cl(self.target_fv, False) # -- initialize target fe variables -- var_list = self.target_fe.get_variables() init_op = tf.variables_initializer(var_list=var_list) sess.run(init_op) def training_source(self, sess, source_figs): _, obj = sess.run([self.train_source_fe, self.obj], feed_dict={self.source_figs: source_figs}) return obj def training_target(self, sess, target_figs): _, obj = sess.run([self.train_target_fe, self.target_obj], feed_dict={self.target_figs: target_figs}) return obj def training_discriminator(self, sess, souce_figs, target_figs): _, obj = sess.run([self.train_discriminator, self.disc_obj], feed_dict={ self.target_figs: target_figs, self.source_figs: source_figs }) return obj def get_souce_fv_and_prob(self, sess, figs): fv, prob = sess.run([self.souce_fv, self.source_prob], feed_dict={self.souce_figs: figs}) return fv, prob def get_target_fv_and_prob(self, sess, target_figs): fv, prob = sess.run([self.targe_fv, self.target_prob], feed_dict={self.target_figs: target_figs}) return fv, prob
class Model(object): def __init__(self, input_dim, z_dim, class_num, batch_size): self.input_dim = input_dim self.z_dim = z_dim self.class_num = class_num self.batch_size = batch_size self.lr = 0.0001 # -- encoder ------- self.encoder = Encoder([input_dim, 1200, 600, 100], z_dim) # -- decoder ------- self.decoder = Decoder([z_dim, 100, 600, 1200, input_dim]) # -- discriminator -- self.discriminator = Discriminator( [z_dim + (class_num + 1), 50, 20, 10, 1]) # -- sampler ---- self.sampler = Sampler(class_num) def set_model(self): # TODO: only labeled # -- for labeled data ------ self.x_labeled = tf.placeholder(tf.float32, [self.batch_size, self.input_dim]) # encode and decode mu, log_sigma = self.encoder.set_model(self.x_labeled, is_training=True) eps = tf.random_normal([self.batch_size, self.z_dim]) z = eps * tf.exp(log_sigma) + mu gen_figs = self.decoder.set_model(z, is_training=True) reconstruct_error = tf.reduce_mean( tf.reduce_sum(tf.pow(gen_figs - self.x_labeled, 2), [1])) # make GAN loss self.y_labeled = tf.placeholder(tf.float32, [self.batch_size, self.class_num + 1]) vae_logits = self.discriminator.set_model(z, self.y_labeled, is_training=True, reuse=False) obj_disc_from_vae = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits( logits=vae_logits, labels=tf.zeros_like(vae_logits))) obj_gen_from_vae = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits( logits=vae_logits, labels=tf.ones_like(vae_logits))) # discriminator self.z_input = tf.placeholder(dtype=tf.float32, shape=[self.batch_size, self.z_dim]) disc_logits = self.discriminator.set_model(self.z_input, self.y_labeled, is_training=True, reuse=True) obj_disc_from_inputs = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits( logits=disc_logits, labels=tf.ones_like(disc_logits))) # -- train ----- self.obj_vae = reconstruct_error train_vars = self.encoder.get_variables() train_vars.extend(self.decoder.get_variables()) self.train_vae = tf.train.AdamOptimizer(self.lr).minimize( self.obj_vae, var_list=train_vars) self.obj_gen = obj_gen_from_vae train_vars = self.encoder.get_variables() self.train_gen = tf.train.AdamOptimizer(self.lr).minimize( self.obj_gen, var_list=train_vars) self.obj_disc = obj_disc_from_vae + obj_disc_from_inputs train_vars = self.discriminator.get_variables() self.train_disc = tf.train.AdamOptimizer(self.lr).minimize( self.obj_disc, var_list=train_vars) # -- for using --------------------- self.mu, _ = self.encoder.set_model(self.x_labeled, is_training=False, reuse=True) self.generate_figs = self.decoder.set_model(self.z_input, is_training=False, reuse=True) def training_vae(self, sess, figs): _, obj_vae = sess.run([self.train_vae, self.obj_vae], feed_dict={self.x_labeled: figs}) return obj_vae def training_gen(self, sess, figs, y): _, obj_gen = sess.run([self.train_gen, self.obj_gen], feed_dict={ self.x_labeled: figs, self.y_labeled: y }) return obj_gen def training_disc(self, sess, figs, y): tmp = np.argmax(y, axis=1) z = self.sampler(tmp) _, obj_disc = sess.run([self.train_disc, self.obj_disc], feed_dict={ self.x_labeled: figs, self.y_labeled: y, self.z_input: z }) return obj_disc def encoding(self, sess, figs): ret = sess.run(self.mu, feed_dict={self.x_labeled: figs}) return ret def figure_generate(self, sess, z): figs = sess.run(self.generate_figs, feed_dict={self.z_input: z}) return figs
class Model(object): def __init__(self, class_num, z_dim, batch_size): self.input_size = 32 self.class_num = class_num self.z_dim = z_dim self.batch_size = batch_size self.Lambda = 10 self.lr = 0.001 # generator config gen_layer = [512, 256, 128, 1] gen_in_dim = int(self.input_size / 2**(len(gen_layer) - 1)) #discriminato config disc_layer = [1, 64, 128, 256] # -- generator ----- self.gen = Generator([u'gen_reshape', u'gen_deconv'], gen_in_dim, gen_layer) # -- discriminator -- self.disc = Discriminator([u'disc_conv', u'disc_fc'], disc_layer) # -- q --------------- self.Q_value = Discriminator([u'Q_val_conv', u'Q_val_fc'], disc_layer, class_num) def set_model(self): # -- define place holder ------- self.z = tf.placeholder(tf.float32, [self.batch_size, self.z_dim]) self.c = tf.placeholder(tf.float32, [self.batch_size, self.class_num]) self.figs = tf.placeholder( tf.float32, [self.batch_size, self.input_size, self.input_size, 1]) #figs_ = flatten(self.figs) # -- generator ----------------- gen_figs = self.gen.set_model(self.c, self.z, self.batch_size, True, False) g_logits = self.disc.set_model(gen_figs, True, False) self.g_obj = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits( logits=g_logits, labels=tf.ones_like(g_logits))) self.train_gen = tf.train.AdamOptimizer(self.lr, beta1=0.5).minimize( self.g_obj, var_list=self.gen.get_variables()) # -- q loss ------------------ q_logits = self.Q_value.set_model(gen_figs, True, False) self.q_obj = self.Lambda * tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits=q_logits, labels=self.c)) train_var = self.gen.get_variables() + self.Q_value.get_variables() self.train_q = tf.train.AdamOptimizer(self.lr, beta1=0.5).minimize( self.g_obj, var_list=train_var) # -- discriminator -------- d_logits = self.disc.set_model(self.figs, True, True) d_obj_true = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits( logits=d_logits, labels=tf.ones_like(d_logits))) d_obj_false = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits( logits=g_logits, labels=tf.zeros_like(g_logits))) self.d_obj = d_obj_true + d_obj_false self.train_disc = tf.train.AdamOptimizer(self.lr, beta1=0.5).minimize( self.d_obj, var_list=self.disc.get_variables()) # -- for figure generation ------- self.gen_figs = self.gen.set_model(self.c, self.z, self.batch_size, False, True) def training_gen(self, sess, c_list, z_list): _, g_obj = sess.run([self.train_gen, self.g_obj], feed_dict={ self.c: c_list, self.z: z_list }) return g_obj def training_disc(self, sess, c_list, z_list, figs): _, d_obj = sess.run([self.train_disc, self.d_obj], feed_dict={ self.c: c_list, self.z: z_list, self.figs: figs }) return d_obj def training_q(self, sess, c_list, z_list): _, d_obj = sess.run([self.train_q, self.q_obj], feed_dict={ self.c: c_list, self.z: z_list, }) return d_obj def gen_fig(self, sess, c, z): ret_ = sess.run(self.gen_figs, feed_dict={self.c: c, self.z: z}) ret = [] for fig in ret_: ret.append(np.reshape(fig, [32, 32, 1])) return ret
class Model(object): def __init__(self, z_dim, batch_size, class_num): self.z_dim = z_dim self.batch_size = batch_size self.lr = 0.0001 self.gamma = 0.5 self.class_num = class_num # -- encoder ------- self.enc = Encoder([1, 64, 128, 256], 2048, z_dim) # -- decoder ------- self.dec = Decoder(z_dim, [256, 128, 32, 1]) # -- discriminator -- self.disc = Discriminator([1, 32, 128, 256], 512) # -- classifier ----- self.cla = Classifier([1, 32, 128, 256], 512, class_num) def set_model(self): self.x = tf.placeholder(tf.float32, [self.batch_size, 32, 32, 1]) self.labels = tf.placeholder(tf.float32, [self.batch_size, self.class_num]) # -- VAE --------- mu, log_sigma = self.enc.set_model(self.x, self.labels, is_training=True) obj_kl = tf.reduce_sum( mu * mu / 2.0 - log_sigma + tf.exp(2.0 * log_sigma) / 2.0 - 0.5, 1) obj_kl = tf.reduce_mean(obj_kl, 0) eps = tf.random_normal([self.batch_size, self.z_dim]) z = eps * tf.exp(log_sigma) + mu vae_gen_figs = self.dec.set_model(z, self.labels, self.batch_size, is_training=True) vae_logits, vae_feature_image = self.disc.set_model(vae_gen_figs, is_training=True) reconstruct_error = tf.reduce_mean( tf.reduce_sum(tf.pow(vae_gen_figs - self.x, 2), [1, 2, 3])) obj_dec_from_vae = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits( logits=vae_logits, labels=tf.ones_like(vae_logits))) obj_disc_from_vae = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits( logits=vae_logits, labels=tf.zeros_like(vae_logits))) # -- classifier ------------ class_logits, class_feature_image_x = self.cla.set_model(self.x, True) _, class_feature_image_vae = self.cla.set_model( vae_gen_figs, True, True) classifier_loss = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits=class_logits, labels=self.labels)) classifier_feature_map_loss = tf.reduce_mean( tf.reduce_sum( pow(class_feature_image_x - class_feature_image_vae, 2), [1, 2, 3])) # -- draw from prior ------- self.z_pr = tf.placeholder(dtype=tf.float32, shape=[self.batch_size, self.z_dim]) dec_figs = self.dec.set_model(self.z_pr, self.labels, self.batch_size, is_training=True, reuse=True) dec_logits, _ = self.disc.set_model(dec_figs, is_training=True, reuse=True) obj_dec_from_prior = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits( logits=dec_logits, labels=tf.ones_like(dec_logits))) obj_disc_from_prior = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits( logits=dec_logits, labels=tf.zeros_like(dec_logits))) # -- obj from inputs -------- disc_logits, input_feature_image = self.disc.set_model( self.x, is_training=True, reuse=True) obj_disc_from_inputs = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits( logits=disc_logits, labels=tf.ones_like(disc_logits))) u''' dis_similar = tf.reduce_mean( tf.reduce_sum(pow(tf.nn.sigmoid(vae_logits) - tf.nn.sigmoid(disc_logits), 2), 1)) ''' dis_similar = tf.reduce_mean( tf.reduce_sum(pow(vae_feature_image - input_feature_image, 2), [1, 2, 3])) # == setting obj ============ # -- pretrain -------- self.pre_obj_class = classifier_loss train_vars = self.cla.get_variables() self.pretrain_class = tf.train.AdamOptimizer(self.lr).minimize( self.pre_obj_class, var_list=train_vars) self.pre_obj_vae = reconstruct_error + obj_kl train_vars = self.enc.get_variables() train_vars.extend(self.dec.get_variables()) self.pretrain_vae = tf.train.AdamOptimizer(self.lr).minimize( self.pre_obj_vae, var_list=train_vars) self.pre_obj_dec = obj_dec_from_prior train_vars = self.dec.get_variables() self.pretrain_dec = tf.train.AdamOptimizer(self.lr).minimize( self.pre_obj_dec, var_list=train_vars) self.pre_obj_disc = obj_disc_from_prior + obj_disc_from_inputs train_vars = self.disc.get_variables() self.pretrain_disc = tf.train.AdamOptimizer(self.lr).minimize( self.pre_obj_disc, var_list=train_vars) # -- train ----- self.obj_class = classifier_loss train_vars = self.cla.get_variables() self.train_class = tf.train.AdamOptimizer(self.lr).minimize( self.obj_class, var_list=train_vars) self.obj_vae = dis_similar + obj_kl + classifier_feature_map_loss train_vars = self.enc.get_variables() self.train_vae = tf.train.AdamOptimizer(self.lr).minimize( self.obj_vae, var_list=train_vars) self.obj_dec = obj_dec_from_vae + obj_dec_from_prior + self.gamma * dis_similar + classifier_feature_map_loss train_vars = self.dec.get_variables() self.train_dec = tf.train.AdamOptimizer(self.lr).minimize( self.obj_dec, var_list=train_vars) self.obj_disc = obj_disc_from_vae + obj_disc_from_prior + obj_disc_from_inputs train_vars = self.disc.get_variables() self.train_disc = tf.train.AdamOptimizer(self.lr).minimize( self.obj_disc, var_list=train_vars) # -- for using --------------------- self.mu, _ = self.enc.set_model(self.x, self.labels, is_training=False, reuse=True) self.dec_figs = self.dec.set_model(self.z_pr, self.labels, self.batch_size, is_training=False, reuse=True) def pretraining_class(self, sess, figs, labels): _, pre_obj_class = sess.run([self.pretrain_class, self.pre_obj_class], feed_dict={ self.x: figs, self.labels: labels }) return pre_obj_class def pretraining_vae(self, sess, figs, labels): _, pre_obj_vae = sess.run([self.pretrain_vae, self.pre_obj_vae], feed_dict={ self.x: figs, self.labels: labels }) return pre_obj_vae def pretraining_dec(self, sess, figs, labels, z): _, pre_obj_dec = sess.run([self.pretrain_dec, self.pre_obj_dec], feed_dict={ self.x: figs, self.labels: labels, self.z_pr: z }) return pre_obj_dec def pretraining_disc(self, sess, figs, labels, z): _, pre_obj_disc = sess.run([self.pretrain_disc, self.pre_obj_disc], feed_dict={ self.x: figs, self.labels: labels, self.z_pr: z }) return pre_obj_disc def training_class(self, sess, figs, labels): _, obj_class = sess.run([self.train_class, self.obj_class], feed_dict={ self.x: figs, self.labels: labels }) return obj_class def training_vae(self, sess, figs, labels): _, obj_vae = sess.run([self.train_vae, self.obj_vae], feed_dict={ self.x: figs, self.labels: labels }) return obj_vae def training_dec(self, sess, figs, labels, z): _, obj_dec = sess.run([self.train_dec, self.obj_dec], feed_dict={ self.x: figs, self.labels: labels, self.z_pr: z }) return obj_dec def training_disc(self, sess, figs, labels, z): _, obj_disc = sess.run([self.train_disc, self.obj_disc], feed_dict={ self.x: figs, self.labels: labels, self.z_pr: z }) return obj_disc def encoding(self, sess, figs, labels): ret = sess.run(self.mu, feed_dict={self.x: figs, self.labels: labels}) return ret def gen_fig(self, sess, labels, z): ret = sess.run(self.dec_figs, feed_dict={ self.labels: labels, self.z_pr: z }) return ret
class Model(object): def __init__(self, z_dim, mode): self.z_dim = z_dim self.true_input_dim = 1 self.lr = 0.005 #self.lr = 0.002 self.mode = mode self.gamma = tf.Variable(name="gamma", initial_value=10.0, trainable=False) self.gamma_decay_op = tf.assign(self.gamma, 0.99 * self.gamma) # generator config gen_layer = [z_dim, 32, 32, 32, self.true_input_dim] #discriminato config disc_layer = [self.true_input_dim, 32, 32, 32, 1] # -- generator ----- self.gen = Generator([u'gen_deconv'], gen_layer) # -- discriminator -- self.disc = Discriminator([u'disc_conv'], disc_layer) def set_model(self): # -- define place holder ------- self.z = tf.placeholder(tf.float32, [None, self.z_dim]) self.true_input = tf.placeholder(tf.float32, [None, self.true_input_dim]) # -- generator ----------------- gen_out = self.gen.set_model(self.z, True, False) g_logits = self.disc.set_model(gen_out, True, False) ''' self.g_obj = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits( logits = g_logits, labels = tf.ones_like(g_logits) ) ) ''' self.g_obj = -tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits( logits=g_logits, labels=tf.zeros_like(g_logits))) # -- discriminator -------- d_logits = self.disc.set_model(self.true_input, True, True) d_obj_true = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits( logits=d_logits, labels=tf.ones_like(d_logits))) d_obj_false = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits( logits=g_logits, labels=tf.zeros_like(g_logits))) self.d_obj = d_obj_true + d_obj_false # -- gradient penalty ------- ''' g_grad = tf.gradients(self.g_obj, self.gen.get_variables()) d_grad = tf.gradients(self.d_obj, self.disc.get_variables()) grads = g_grad + d_grad L = sum( tf.reduce_mean(tf.square(g)) for g in grads ) ''' d_grad = tf.gradients(tf.nn.sigmoid(d_logits), self.true_input) L = tf.reduce_mean(tf.reduce_sum(tf.square(d_grad), axis=1)) # -- for train operation ---- #self.train_gen = tf.train.AdamOptimizer(self.lr, beta1 = 0.5).minimize(self.g_obj + self.gamma * L, var_list = self.gen.get_variables()) self.train_gen = tf.train.AdamOptimizer(self.lr, beta1=0.5).minimize( self.g_obj, var_list=self.gen.get_variables()) if self.mode == 'normal': print('normal mode') self.train_disc = tf.train.AdamOptimizer( self.lr, beta1=0.5).minimize(self.d_obj, var_list=self.disc.get_variables()) elif self.mode == 'zero': print('zero mode') self.train_disc = tf.train.AdamOptimizer( self.lr, beta1=0.5).minimize(self.d_obj + self.gamma * L, var_list=self.disc.get_variables()) # -- for figure generation ------- self.gen_figs = self.gen.set_model(self.z, False, True) self.sigmoid_d = tf.nn.sigmoid( self.disc.set_model(self.true_input, False, True)) self.grad_sigmoid_d = tf.gradients(tf.log(self.sigmoid_d), self.true_input) def training_gen(self, sess, z_list, figs): _, g_obj = sess.run([self.train_gen, self.g_obj], feed_dict={ self.z: z_list, self.true_input: figs }) return g_obj def training_disc(self, sess, z_list, figs): _, d_obj = sess.run([self.train_disc, self.d_obj], feed_dict={ self.z: z_list, self.true_input: figs }) #sess.run(self.gamma_decay_op) return d_obj def generate(self, sess, z): ret_ = sess.run(self.gen_figs, feed_dict={self.z: z}) return ret_ def get_disc_value(self, sess, inputs): val, grad = sess.run([self.sigmoid_d, self.grad_sigmoid_d], feed_dict={self.true_input: inputs}) return val, grad
class Model(object): def __init__(self, z_dim): self.z_dim = z_dim self.true_input_dim = 2 self.lr = 0.0005 # generator config gen_layer = [z_dim, 128, 128, self.true_input_dim] #discriminato config disc_layer = [self.true_input_dim, 128, 128, 1] # -- generator ----- self.gen = Generator([u'gen_deconv'], gen_layer) # -- discriminator -- self.disc = Discriminator([u'disc_conv'], disc_layer) def set_model(self): # -- define place holder ------- self.z = tf.placeholder(tf.float32, [None, self.z_dim]) self.true_input = tf.placeholder(tf.float32, [None, self.true_input_dim]) # -- generator ----------------- gen_out = self.gen.set_model(self.z, True, False) g_logits = self.disc.set_model(gen_out, True, False) self.g_obj = -tf.reduce_mean(tf.reduce_sum(g_logits, 1)) self.train_gen = tf.train.RMSPropOptimizer(self.lr).minimize( self.g_obj, var_list=self.gen.get_variables()) # -- discriminator -------- d_logits = self.disc.set_model(self.true_input, True, True) d_obj_true = tf.reduce_mean(tf.reduce_sum(d_logits, 1)) d_obj_false = tf.reduce_mean(tf.reduce_sum(g_logits, 1)) d_weight_decay = 0 for i in self.disc.get_variables(): d_weight_decay += tf.nn.l2_loss(i) self.d_obj = -d_obj_true + d_obj_false self.train_disc = tf.train.RMSPropOptimizer(self.lr).minimize( self.d_obj, var_list=self.disc.get_variables()) # -- for clipping ---------------- c = 0.01 self.clip_D = [ _.assign(tf.clip_by_value(_, -c, c)) for _ in self.disc.get_variables() ] # -- for figure generation ------- self.gen_figs = self.gen.set_model(self.z, False, True) def training_gen(self, sess, z_list): _, g_obj = sess.run([self.train_gen, self.g_obj], feed_dict={self.z: z_list}) return g_obj def training_disc(self, sess, z_list, figs): _, d_obj = sess.run([self.train_disc, self.d_obj], feed_dict={ self.z: z_list, self.true_input: figs }) sess.run([self.clip_D]) return d_obj def generate(self, sess, z): ret_ = sess.run(self.gen_figs, feed_dict={self.z: z}) return ret_
class Model(object): def __init__(self, z_dim): self.z_dim = z_dim self.true_input_dim = 1 self.lr = 0.005 self.gamma = 0.3 # generator config gen_layer = [z_dim, 32, 32, self.true_input_dim] #discriminato config disc_layer = [self.true_input_dim, 32, 32, 1] # -- generator ----- self.gen = Generator([u'gen_deconv'], gen_layer) # -- discriminator -- self.disc = Discriminator([u'disc_conv'], disc_layer) def set_model(self): # -- define place holder ------- self.z = tf.placeholder(tf.float32, [None, self.z_dim]) self.true_input = tf.placeholder(tf.float32, [None, self.true_input_dim]) # -- generator ----------------- gen_out = self.gen.set_model(self.z, True, False) g_logits = self.disc.set_model(gen_out, True, False) ''' self.g_obj = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits( logits = g_logits, labels = tf.ones_like(g_logits) ) ) ''' self.g_obj = -tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits( logits=g_logits, labels=tf.zeros_like(g_logits))) # -- discriminator -------- d_logits = self.disc.set_model(self.true_input, True, True) d_obj_true = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits( logits=d_logits, labels=tf.ones_like(d_logits))) d_obj_false = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits( logits=g_logits, labels=tf.zeros_like(g_logits))) self.d_obj = d_obj_true + d_obj_false # -- gradient penalty ------- g_grad = tf.gradients(self.g_obj, self.gen.get_variables()) d_grad = tf.gradients(self.d_obj, self.disc.get_variables()) grads = g_grad + d_grad L = sum(tf.reduce_mean(tf.square(g)) for g in grads) # -- for train operation ---- self.train_gen = tf.train.AdamOptimizer(self.lr, beta1=0.5).minimize( self.g_obj + self.gamma * L, var_list=self.gen.get_variables()) self.train_disc = tf.train.AdamOptimizer(self.lr, beta1=0.5).minimize( self.d_obj + self.gamma * L, var_list=self.disc.get_variables()) # -- for figure generation ------- self.gen_figs = self.gen.set_model(self.z, False, True) self.sigmoid_d = tf.nn.sigmoid( self.disc.set_model(self.true_input, False, True)) def training_gen(self, sess, z_list, figs): _, g_obj = sess.run([self.train_gen, self.g_obj], feed_dict={ self.z: z_list, self.true_input: figs }) return g_obj def training_disc(self, sess, z_list, figs): _, d_obj = sess.run([self.train_disc, self.d_obj], feed_dict={ self.z: z_list, self.true_input: figs }) return d_obj def generate(self, sess, z): ret_ = sess.run(self.gen_figs, feed_dict={self.z: z}) return ret_ def get_disc_value(self, sess, inputs): ret = sess.run(self.sigmoid_d, feed_dict={self.true_input: inputs}) return ret
class Model(object): def __init__(self, m, pt_coeff, z_dim, ae_hidden_dim, batch_size): self.input_size = 256 self.z_dim = z_dim self.batch_size = batch_size self.m = m self.pt_coeff = pt_coeff # generator config gen_layer = [1024, 512, 512, 256, 256, 128, 3] gen_in_dim = int(self.input_size / 2**(len(gen_layer) - 1)) #discriminato config disc_enc_layer = [3, 64, 256, 512] disc_dec_layer = [1024, 512, 512, 256, 256, 128, 3] disc_dec_in_dim = int(self.input_size / 2**(len(disc_dec_layer) - 1)) # -- generator ----- self.gen = Generator([u'gen_reshape', u'gen_deconv'], gen_in_dim, gen_layer) # -- discriminator -- self.disc = Discriminator(ae_hidden_dim, disc_enc_layer, disc_dec_in_dim, disc_dec_layer) self.lr = 0.0002 def set_model(self): # -- z -> gen_fig -> disc --- self.z = tf.placeholder(tf.float32, [self.batch_size, self.z_dim]) gen_figs = self.gen.set_model(self.z, self.batch_size, True, False) g_encoded, g_loss = self.disc.set_model(gen_figs, self.batch_size, True, False) self.g_obj = g_loss + self.pt_coeff * get_pt(g_encoded) self.train_gen = tf.train.AdamOptimizer(self.lr, beta1=0.5).minimize( self.g_obj, var_list=self.gen.get_variables()) # -- true_fig -> disc -------- self.figs = tf.placeholder( tf.float32, [self.batch_size, self.input_size, self.input_size, 3]) _, d_loss = self.disc.set_model(self.figs, self.batch_size, True, True) d_obj_true = d_loss d_obj_fake = tf.maximum(0.0, self.m - g_loss) self.d_obj = d_obj_true + d_obj_fake self.train_disc = tf.train.AdamOptimizer(self.lr, beta1=0.5).minimize( self.d_obj, var_list=self.disc.get_variables()) # -- for figure generation ------- self.gen_figs = self.gen.set_model(self.z, self.batch_size, False, True) def training_gen(self, sess, z_list): _, g_obj = sess.run([self.train_gen, self.g_obj], feed_dict={self.z: z_list}) return g_obj def training_disc(self, sess, z_list, figs): _, d_obj = sess.run([self.train_disc, self.d_obj], feed_dict={ self.z: z_list, self.figs: figs }) return d_obj def gen_fig(self, sess, z): ret = sess.run(self.gen_figs, feed_dict={self.z: z}) return ret
class Model(object): def __init__(self, z_dim, batch_size, clip_threshold): self.input_size = 256 self.z_dim = z_dim self.batch_size = batch_size self.clip_threshold = clip_threshold # generator config gen_conv_layer = [3, 64, 128, 256, 512, 512, 512, 512, 512] gen_deconv_layer = [512, 512, 512, 512, 256, 128, 64, 3] #discriminato config disc_layer = [3, 64, 256, 512, 512, 512] # -- generator ----- self.genA = Generator([u'reshape_zA', u'gen_convA', u'gen_deconvA'], gen_conv_layer, gen_deconv_layer) self.genB = Generator([u'reshape_zB', u'gen_convB', u'gen_deconvB'], gen_conv_layer, gen_deconv_layer) # -- discriminator -- self.discA = Discriminator([u'disc_convA', u'disc_fcA'], disc_layer) self.discB = Discriminator([u'disc_convB', u'disc_fcB'], disc_layer) self.lr = 0.00005 def set_model(self): # -- place holder -------- self.figsA = tf.placeholder( tf.float32, [self.batch_size, self.input_size, self.input_size, 3]) self.figsB = tf.placeholder( tf.float32, [self.batch_size, self.input_size, self.input_size, 3]) self.z = tf.placeholder(tf.float32, [self.batch_size, self.z_dim]) # -- generators ----------------- # gen-disc loss gen_figsB_from_A = self.genA.set_model(self.figsA, self.z, self.batch_size, True, False) gen_loss_from_A = self.discB.set_model(gen_figsB_from_A, True, False) gen_figsA_from_B = self.genB.set_model(self.figsB, self.z, self.batch_size, True, False) gen_loss_from_B = self.discA.set_model(gen_figsA_from_B, True, False) # reconstraction error re_figA = self.genB.set_model(gen_figsB_from_A, self.z, self.batch_size, True, True) figA_recon_error = tf.reduce_sum(tf.abs(self.figsA - re_figA), [1, 2, 3]) re_figB = self.genA.set_model(gen_figsA_from_B, self.z, self.batch_size, True, True) figB_recon_error = tf.reduce_sum(tf.abs(self.figsB - re_figB), [1, 2, 3]) self.g_obj = tf.reduce_mean(-gen_loss_from_A - gen_loss_from_B + figA_recon_error + figB_recon_error) train_var = self.genA.get_variables() train_var.extend(self.genB.get_variables()) self.train_gen = tf.train.RMSPropOptimizer(self.lr).minimize( self.g_obj, var_list=train_var) # -- discA -------- d_lossA = self.discA.set_model(self.figsA, True, True) self.d_objA = tf.reduce_mean(-d_lossA + gen_loss_from_B) self.train_discA = tf.train.RMSPropOptimizer(self.lr).minimize( self.d_objA, var_list=self.discA.get_variables()) # -- discB -------- d_lossB = self.discA.set_model(self.figsB, True, True) self.d_objB = tf.reduce_mean(-d_lossB + gen_loss_from_A) self.train_discB = tf.train.RMSPropOptimizer(self.lr).minimize( self.d_objB, var_list=self.discB.get_variables()) # -- clipping -------- c = self.clip_threshold var_list = self.discA.get_variables() var_list.extend(self.discB.get_variables()) self.disc_clip = [ _.assign(tf.clip_by_value(_, -c, c)) for _ in var_list ] # -- for figure generation ------- self.gen_figsB_from_A = self.genA.set_model(self.figsA, self.z, self.batch_size, False, True) self.gen_figsA_from_B = self.genB.set_model(self.figsB, self.z, self.batch_size, False, True) def training_gen(self, sess, figsA, figsB, z_list): _, g_obj = sess.run([self.train_gen, self.g_obj], feed_dict={ self.figsA: figsA, self.figsB: figsB, self.z: z_list }) return g_obj def training_disc(self, sess, figsA, figsB, z_list): # optimize _, d_objA = sess.run([self.train_discA, self.d_objA], feed_dict={ self.z: z_list, self.figsA: figsA, self.figsB: figsB }) _, d_objB = sess.run([self.train_discB, self.d_objB], feed_dict={ self.z: z_list, self.figsA: figsA, self.figsB: figsB }) # clipping sess.run(self.disc_clip) return d_objA, d_objB def gen_figA(self, sess, figsB, z): ret = sess.run(self.gen_figsA_from_B, feed_dict={ self.figsB: figsB, self.z: z }) return ret def gen_figB(self, sess, figsA, z): ret = sess.run(self.gen_figsB_from_A, feed_dict={ self.figsA: figsA, self.z: z }) return ret
class Model(object): def __init__(self, z_dim, batch_size, clip_threshold): self.input_size = 256 self.z_dim = z_dim self.batch_size = batch_size self.clip_threshold = clip_threshold # generator config gen_layer = [1024, 512, 512, 256, 256, 128, 3] gen_in_dim = int(self.input_size/2**(len(gen_layer) - 1)) #discriminato config disc_layer = [3, 64, 256, 512] # -- generator ----- self.gen = Generator([u'gen_reshape', u'gen_deconv'], gen_in_dim, gen_layer) # -- discriminator -- self.disc = Discriminator([u'disc_conv', u'disc_fc'], disc_layer) self.lr = 0.00005 def set_model(self): # -- z -> gen_fig -> disc --- self.z = tf.placeholder(tf.float32, [self.batch_size, self.z_dim]) gen_figs = self.gen.set_model(self.z, self.batch_size, True, False) g_loss = self.disc.set_model(gen_figs, True, False) self.g_obj = - tf.reduce_mean(g_loss) # minus corresponds to maximization self.train_gen = tf.train.RMSPropOptimizer(self.lr).minimize(self.g_obj, var_list = self.gen.get_variables()) # -- true_fig -> disc -------- self.figs= tf.placeholder(tf.float32, [self.batch_size, self.input_size, self.input_size, 3]) d_loss = self.disc.set_model(self.figs, True, True) self.d_obj = tf.reduce_mean(-d_loss + g_loss) self.train_disc = tf.train.RMSPropOptimizer(self.lr).minimize(self.d_obj, var_list = self.disc.get_variables()) # -- clipping -------- c = self.clip_threshold self.disc_clip = [_.assign(tf.clip_by_value(_, -c, c)) for _ in self.disc.get_variables()] # -- for figure generation ------- self.gen_figs = self.gen.set_model(self.z, self.batch_size, False, True) def training_gen(self, sess, z_list): _, g_obj = sess.run([self.train_gen, self.g_obj], feed_dict = {self.z: z_list}) return g_obj def training_disc(self, sess, z_list, figs): # optimize _, d_obj = sess.run([self.train_disc, self.d_obj], feed_dict = {self.z: z_list, self.figs:figs}) # clipping sess.run(self.disc_clip) return d_obj def gen_fig(self, sess, z): ret = sess.run(self.gen_figs, feed_dict = {self.z: z}) return ret
class Model(object): def __init__(self, z_dim): self.z_dim = z_dim self.true_input_dim = 2 self.lr = 0.001 # generator config gen_layer = [z_dim, 128, 128, self.true_input_dim] #discriminato config disc_layer = [self.true_input_dim, 128, 128, 1] # -- generator ----- self.gen = Generator([u'gen_deconv'], gen_layer) # -- discriminator -- self.disc = Discriminator([u'disc_conv'], disc_layer) def set_model(self): # -- define place holder ------- self.z = tf.placeholder(tf.float32, [None, self.z_dim]) self.true_input = tf.placeholder(tf.float32, [None, self.true_input_dim]) # -- generator ----------------- gen_out = self.gen.set_model(self.z, True, False) g_logits = self.disc.set_model(gen_out, True, False) self.g_obj = 0.5 * tf.reduce_mean( tf.reduce_sum(tf.square(g_logits - tf.ones_like(g_logits)), 1)) self.train_gen = tf.train.AdamOptimizer(self.lr, beta1=0.5).minimize( self.g_obj, var_list=self.gen.get_variables()) # -- discriminator -------- d_logits = self.disc.set_model(self.true_input, True, True) d_obj_true = tf.reduce_mean( tf.reduce_sum(tf.square(d_logits - tf.ones_like(d_logits)), 1)) d_obj_false = tf.reduce_mean( tf.reduce_sum(tf.square(g_logits - tf.zeros_like(g_logits)), 1)) self.d_obj = 0.5 * (d_obj_true + d_obj_false) self.train_disc = tf.train.AdamOptimizer(self.lr, beta1=0.5).minimize( self.d_obj, var_list=self.disc.get_variables()) # -- for figure generation ------- self.gen_figs = self.gen.set_model(self.z, False, True) def training_gen(self, sess, z_list): _, g_obj = sess.run([self.train_gen, self.g_obj], feed_dict={self.z: z_list}) return g_obj def training_disc(self, sess, z_list, figs): _, d_obj = sess.run([self.train_disc, self.d_obj], feed_dict={ self.z: z_list, self.true_input: figs }) return d_obj def generate(self, sess, z): ret_ = sess.run(self.gen_figs, feed_dict={self.z: z}) return ret_
class Model(object): def __init__(self, z_dim, batch_size, image_height, image_width, image_channels, lr=0.0001, f_divergence='pearson'): self.z_dim = z_dim self.batch_size = batch_size self.image_height = image_height self.image_width = image_width self.image_channels = image_channels self.lr = lr self.f_divergence = f_divergence gen_layer = [500, 500] disc_layer = [500, 500, 500] # generator self.G = Generator(['g/linear1', 'g/linear2', 'g/linear3'], z_dim, gen_layer, image_height, image_width, image_channels) # discriminator self.D = Discriminator( ['d_linear1', 'd_linear2', 'd_linear3', 'd_linear4'], disc_layer, image_height, image_width, image_channels, f_divergence=f_divergence, output_dim=1) def set_model(self): self.z = tf.placeholder(tf.float32, [self.batch_size, self.z_dim]) self.x = tf.placeholder(tf.float32, [ self.batch_size, self.image_height * self.image_width * self.image_channels ]) # generator gen_x = self.G.set_model(self.z, is_training=True, reuse=False) g_logits = self.D.set_model(gen_x, is_training=True, reuse=False) self.g_obj = -tf.reduce_mean(self.f_star(g_logits)) self.train_G = tf.train.AdamOptimizer(self.lr, beta1=0.5)\ .minimize(self.g_obj, var_list=self.G.get_variables()) # discriminator d_logits = self.D.set_model(self.x, is_training=True, reuse=True) self.d_obj = -tf.reduce_mean(d_logits) + tf.reduce_mean( self.f_star(g_logits)) self.train_D = tf.train.AdamOptimizer(self.lr, beta1=0.5)\ .minimize(self.d_obj, var_list=self.D.get_variables()) # for images generation self.gen_images = self.G.set_model(self.z, is_training=False, reuse=True) def training_G(self, sess, z_list): _, g_obj = sess.run([self.train_G, self.g_obj], feed_dict={self.z: z_list}) return g_obj def training_D(self, sess, z_list, x_list): _, d_obj = sess.run([self.train_D, self.d_obj], feed_dict={ self.z: z_list, self.x: x_list }) return d_obj def gen_samples(self, sess, z): ret = sess.run(self.gen_images, feed_dict={self.z: z}) return ret def f_star(self, logits): if self.f_divergence == 'pearson': # pearson:1/4 t^2 + t return 0.25 * tf.square(logits) + logits elif self.f_divergence == 'kl': # KL:\exp(t-1) return tf.exp(logits - 1) elif self.f_divergence == 'rkl': # RKL:-1-\log(-t) return tf.subscribe(tf.ones_like(logits) - logits) elif self.f_divergence == 'squared_hellinger': # squared_hellinger:t / (1/t) return tf.divide(logits, tf.ones_like(logits) - logits) elif self.f_divergence == 'jensen_shannon': # -\log(2-\exp(t)) return -tf.log(2 * tf.ones_like(logits) - tf.exp(logits)) elif self.f_divergence == 'original_gan': # -\log(1-\exp(t)) return -tf.log(tf.ones_like(logits) - tf.exp(logits))