def train(self): # set phase self._enter_('train') # get data pipeline data, info, _ = loads(self.config) c1_real, p1_real, c2_real, p2_real = tf.unstack(data, axis=1) label, cond = tf.unstack(info, axis=1) # encode image to a vector c1_mu, c1_sigma, feat_c1 = self._encoder(c1_real) p2_mu, p2_sigma, feat_p2 = self._encoder(p2_real, True) # children to parent c1_z = c1_mu + c1_sigma * tf.random_normal(tf.shape(c1_mu)) c1_z = self._generator(c1_z, cond) c1_fake = tf.clip_by_value(c1_z, 1e-8, 1 - 1e-8) # parent to children p2_z = p2_mu + p2_sigma * tf.random_normal(tf.shape(p2_mu)) p2_z = self._generator(p2_z, cond, True) p2_fake = tf.clip_by_value(p2_z, 1e-8, 1 - 1e-8) # loss for genertor E1_loss = self._loss_vae(c1_real, c1_fake, c1_mu, c1_sigma) E2_loss = self._loss_vae(p2_real, p2_fake, p2_mu, p2_sigma) E_loss = E1_loss + E2_loss loss = E_loss # # allocate two optimizer global_step = tf.train.create_global_step() var_e = variables.select_vars('encoder') var_g = variables.select_vars('generator') op1 = updater.default(self.config, loss, global_step, var_e, 0) op2 = updater.default(self.config, loss, None, var_g, 1) train_op = tf.group(op1, op2) # update at the same time saver = tf.train.Saver(var_list=variables.all()) # hooks self.add_hook(self.snapshot.init()) self.add_hook(self.summary.init()) self.add_hook( context.Running_Hook(config=self.config.log, step=global_step, keys=['E'], values=[E_loss], func_test=self.test, func_val=None)) with context.DefaultSession(self.hooks) as sess: self.snapshot.restore(sess, saver) while not sess.should_stop(): sess.run(train_op)
def train(self): """ """ # set phase self._enter_('train') # get data pipeline data, label, _ = loads(self.config) # generate fake images logit_G, net_G = self._generator(label) # discriminate fake images logit_F, net_F, out_F = self._discriminator(logit_G, label, reuse=False) # discriminate real images logit_R, net_R, out_R = self._discriminator(data, label, reuse=True) # classify fake c_F = self._classifier(out_F, reuse=False) # classify real c_R = self._classifier(out_R, reuse=True) # loss D_loss, G_loss, C_loss = self._loss(label, logit_F, logit_R, c_F, c_R) # t_vars = tf.trainable_variables() d_vars = variables.select_vars('discriminator') g_vars = variables.select_vars('generator') c_vars = variables.select_vars('classifier') # update step = tf.train.create_global_step() d_op = updater.default(self.config, D_loss, step, d_vars, 0) g_op = updater.default(self.config, G_loss, step, g_vars, 0) c_op = updater.default(self.config, C_loss, step, c_vars, 0) # assemble train_op = [[d_op, g_op, c_op]] saver = tf.train.Saver(variables.all()) # hooks self.add_hook(self.snapshot.init()) self.add_hook(self.summary.init()) self.add_hook( context.Running_Hook(config=self.config.log, step=step, keys=['D_loss', 'G_loss', 'C_loss'], values=[D_loss, G_loss, C_loss], func_test=self.test, func_val=None)) # monitor session with context.DefaultSession(self.hooks) as sess: self.snapshot.restore(sess, saver) while not sess.should_stop(): sess.run(train_op)
def train(self): """ """ # set phase self._enter_('train') # get data pipeline data, info, _ = loads(self.config) c1_real, p1_real, c2_real, p2_real = tf.unstack(data, axis=1) label, cond = tf.unstack(info, axis=1) # encode image to a vector c1_mu, c1_sigma, feat_c1 = self._encoder(c1_real) p2_mu, p2_sigma, feat_p2 = self._encoder(p2_real, True) # children to parent with tf.variable_scope('net1'): c1_z = c1_mu + c1_sigma * tf.random_normal(tf.shape(c1_mu)) c1_z = self._generator(c1_z, cond) c1_fake = tf.clip_by_value(c1_z, 1e-8, 1 - 1e-8) # parent to children with tf.variable_scope('net2'): p2_z = p2_mu + p2_sigma * tf.random_normal(tf.shape(p2_mu)) p2_z = self._generator(p2_z, cond) p2_fake = tf.clip_by_value(p2_z, 1e-8, 1 - 1e-8) # discriminator D_c1_fake = self._discriminator(c1_fake, cond, reuse=False) D_p1_real = self._discriminator(p1_real, cond, reuse=True) D_p2_fake = self._discriminator(p2_fake, cond, reuse=True) D_c2_real = self._discriminator(c2_real, cond, reuse=True) # loss for encoder R_loss, _ = self._loss_metric(feat_c1, feat_p2, label) # loss for genertor E1_loss = self._loss_vae(p1_real, c1_fake, c1_mu, c1_sigma) E2_loss = self._loss_vae(c2_real, p2_fake, p2_mu, p2_sigma) E_loss = E1_loss + E2_loss # loss for discriminator D1_loss, G1_loss = self._loss_gan(D_c1_fake, D_p1_real) D2_loss, G2_loss = self._loss_gan(D_p2_fake, D_c2_real) D_loss = D1_loss + D2_loss G_loss = G1_loss + G2_loss loss = E_loss + D_loss + G_loss + R_loss # # allocate two optimizer global_step = tf.train.create_global_step() var_e = variables.select_vars('encoder') var_g = variables.select_vars('generator') var_d = variables.select_vars('discriminator') op1 = updater.default(self.config, loss, global_step, var_e, 0) op2 = updater.default(self.config, loss, None, var_g, 1) op3 = updater.default(self.config, loss, None, var_d, 0) train_op = tf.group(op1, op2, op3) # update at the same time saver = tf.train.Saver(var_list=variables.all()) # hooks self.add_hook(self.snapshot.init()) self.add_hook(self.summary.init()) self.add_hook( context.Running_Hook(config=self.config.log, step=global_step, keys=['E', 'D', 'G', 'R'], values=[E_loss, D_loss, G_loss, R_loss], func_test=self.test, func_val=None)) with context.DefaultSession(self.hooks) as sess: self.snapshot.restore(sess, saver) while not sess.should_stop(): sess.run(train_op)