def train(self): print('Start to run in mode [Domain Adaptation Across Source and Target Domain]') self.sess.run(tf.global_variables_initializer()) self.train_itr = len(self.source_training_data[0]) // self.bs for e in range(1, self.eps + 1): _src_tr_img, _src_tr_lab = DA_init.shuffle_data(self.source_training_data[0], self.source_training_data[1]) _tar_tr_img = DA_init.shuffle_data_nolabel(self.target_training_data) g_loss = 0.0 d_loss = 0.0 for itr in range(self.train_itr): _src_tr_img_batch, _src_tr_lab_batch = DA_init.next_batch(_src_tr_img, _src_tr_lab, self.bs, itr) _tar_tr_img_batch = DA_init.next_batch_nolabel(_tar_tr_img, self.bs) feed_dict = {self.x_source: _src_tr_img_batch, self.x_target: _tar_tr_img_batch, self.is_training: True} feed_dict_eval = {self.x_source: _src_tr_img_batch, self.x_target: _tar_tr_img_batch, self.is_training: False} _ = self.sess.run(self.d_train_op, feed_dict=feed_dict) _ = self.sess.run(self.g_train_op, feed_dict=feed_dict) _g_loss, _d_loss = self.sess.run([self.g_loss, self.d_loss], feed_dict=feed_dict_eval) g_loss += _g_loss d_loss += _d_loss summary = self.sess.run(self.merged, feed_dict=feed_dict_eval) g_loss = float(g_loss / self.train_itr) d_loss = float(d_loss / self.train_itr) log1 = "Epoch: [%d], G Loss: [%g], D Loss: [%g], Time: [%s]" % (e, g_loss, d_loss, time.ctime(time.time())) self.plt_epoch.append(e) self.plt_d_loss.append(d_loss) self.plt_g_loss.append(g_loss) utils_model1.plotLoss(x=self.plt_epoch, y1=self.plt_d_loss, y2=self.plt_g_loss, figName=self.model + '_GD_Loss', line1Name='D_Loss', line2Name='G_Loss', savePath=self.ckptDir) utils_model1.save2file(log1, self.ckptDir, self.model) self.writer.add_summary(summary, e) self.saver.save(self.sess, self.ckptDir + self.model + '-' + str(e))
def getBatchData(self): _src_tr_img_batch, _src_tr_lab_batch = DA_init.next_batch(self.source_training_data[0], self.source_training_data[1], self.bs) feed_dict = {self.x_source: _src_tr_img_batch, self.y_source: _src_tr_lab_batch, self.is_training: True} feed_dict_eval = {self.x_source: _src_tr_img_batch, self.y_source: _src_tr_lab_batch, self.is_training: False} return feed_dict, feed_dict_eval
def getBatchData(self): _src_tr_img_batch, _src_tr_lab_batch = DA_init.next_batch(self.source_training_data[0], self.source_training_data[1], self.bs) _tar_tr_img_batch = DA_init.next_batch_unpaired(self.target_training_data, self.bs) feed_dict = {self.x_source: _src_tr_img_batch, self.y_source: _src_tr_lab_batch, self.x_target: _tar_tr_img_batch, self.is_training: True, self.keep_rate: 0.5} feed_dict_eval = {self.x_source: _src_tr_img_batch, self.y_source: _src_tr_lab_batch, self.x_target: _tar_tr_img_batch, self.is_training: False, self.keep_rate: 0.5} return feed_dict, feed_dict_eval
def getBatchData(self): _src_tr_img_batch, _src_tr_lab_batch = DA_init.next_batch( self.source_training_data[0], self.source_training_data[1], self.bs, data_aug=False) _tar_tr_img_batch = DA_init.next_batch_unpaired( self.target_training_data, self.bs, data_aug=False) feed_dict = { self.X: _src_tr_img_batch, self.Y: _tar_tr_img_batch, self.is_training: True } feed_dict_eval = { self.X: _src_tr_img_batch, self.Y: _tar_tr_img_batch, self.is_training: False } return feed_dict, feed_dict_eval
def train(self): print( 'Start to run in mode [Domain Adaptation Across Source and Target Domain]' ) self.sess.run(tf.global_variables_initializer()) self.preTrained_saver = tf.train.Saver(var_list=self.g2_preTrained_var) self.preTrained_saver.restore(self.sess, self.preTrained_path) print('Pre-trained model has been successfully restored !') self.train_itr = len(self.source_training_data[0]) // self.bs for e in range(1, self.eps + 1): _src_tr_img, _src_tr_lab = DA_init.shuffle_data( self.source_training_data[0], self.source_training_data[1]) _tar_tr_img = DA_init.shuffle_data_nolabel( self.target_training_data) source_training_acc = 0.0 source_training_loss = 0.0 g_loss = 0.0 d_loss = 0.0 for itr in range(self.train_itr): _src_tr_img_batch, _src_tr_lab_batch = DA_init.next_batch( _src_tr_img, _src_tr_lab, self.bs, itr) _tar_tr_img_batch = DA_init.next_batch_nolabel( _tar_tr_img, self.bs) feed_dict = { self.x_source: _src_tr_img_batch, self.y_source: _src_tr_lab_batch, self.x_target: _tar_tr_img_batch, self.is_training: True, self.keep_prob: self.kp } feed_dict_eval = { self.x_source: _src_tr_img_batch, self.y_source: _src_tr_lab_batch, self.x_target: _tar_tr_img_batch, self.is_training: False, self.keep_prob: 1.0 } if e < 100: _ = self.sess.run(self.g_train_op_step1, feed_dict=feed_dict) _training_accuracy, _training_loss = self.sess.run( [self.accuracy_source, self.loss_source], feed_dict=feed_dict_eval) source_training_acc += _training_accuracy source_training_loss += _training_loss elif e < 200: _, _ = self.sess.run( [self.g_train_op_step2, self.d_train_op_step1], feed_dict=feed_dict) _training_accuracy, _training_loss, _g_loss, _d_loss = self.sess.run( [ self.accuracy_source, self.loss_source, self.g_loss_step2, self.d_loss_step1 ], feed_dict=feed_dict_eval) source_training_acc += _training_accuracy source_training_loss += _training_loss g_loss += _g_loss d_loss += _d_loss elif e < self.eps: _, _ = self.sess.run( [self.g_train_op_step3, self.d_train_op_step2], feed_dict=feed_dict) _training_accuracy, _training_loss, _g_loss, _d_loss = self.sess.run( [ self.accuracy_source, self.loss_source, self.g_loss_step3, self.d_loss_step2 ], feed_dict=feed_dict_eval) source_training_acc += _training_accuracy source_training_loss += _training_loss g_loss += _g_loss d_loss += _d_loss summary = self.sess.run(self.merged, feed_dict=feed_dict_eval) source_training_acc = float(source_training_acc / self.train_itr) source_training_loss = float(source_training_loss / self.train_itr) g_loss = float(g_loss / self.train_itr) d_loss = float(d_loss / self.train_itr) source_validation_acc, source_validation_loss = self.validation_procedure( validation_data=self.source_validation_data, distribution_op=self.distribution_source, loss_op=self.loss_source, inputX=self.x_source, inputY=self.y_source) log1 = "Epoch: [%d], Domain: Source, Training Accuracy: [%g], Validation Accuracy: [%g], " \ "Training Loss: [%g], Validation Loss: [%g], generator Loss: [%g], Discriminator Loss: [%g], " \ "Time: [%s]" % ( e, source_training_acc, source_validation_acc, source_training_loss, source_validation_loss, g_loss, d_loss, time.ctime(time.time())) self.plt_epoch.append(e) self.plt_training_accuracy.append(source_training_acc) self.plt_training_loss.append(source_training_loss) self.plt_validation_accuracy.append(source_validation_acc) self.plt_validation_loss.append(source_validation_loss) self.plt_d_loss.append(d_loss) self.plt_g_loss.append(g_loss) da_utils.plotAccuracy(x=self.plt_epoch, y1=self.plt_training_accuracy, y2=self.plt_validation_accuracy, figName=self.model, line1Name='training', line2Name='validation', savePath=self.ckptDir) da_utils.plotLoss(x=self.plt_epoch, y1=self.plt_training_loss, y2=self.plt_validation_loss, figName=self.model, line1Name='training', line2Name='validation', savePath=self.ckptDir) da_utils.plotLoss(x=self.plt_epoch, y1=self.plt_d_loss, y2=self.plt_g_loss, figName=self.model + '_GD_Loss', line1Name='D_Loss', line2Name='G_Loss', savePath=self.ckptDir) da_utils.save2file(log1, self.ckptDir, self.model) self.writer.add_summary(summary, e) self.saver.save(self.sess, self.ckptDir + self.model + '-' + str(e)) self.test_procedure(self.source_test_data, distribution_op=self.distribution_source, inputX=self.x_source, inputY=self.y_source, mode='source') self.test_procedure(self.target_test_data, distribution_op=self.distribution_target, inputX=self.x_target, inputY=self.y_target, mode='target')
def train(self): print('Start to run in mode [Supervied Learning in Source Domain]') self.sess.run(tf.global_variables_initializer()) self.train_itr = len(self.training_data[0]) // self.bs self.best_val_accuracy = [] self.best_val_loss = [] for e in range(1, self.eps + 1): _tr_img, _tr_lab = DA_init.shuffle_data(self.training_data[0], self.training_data[1]) training_acc = 0.0 training_loss = 0.0 for itr in range(self.train_itr): _tr_img_batch, _tr_lab_batch = DA_init.next_batch( _tr_img, _tr_lab, self.bs, itr) _train_accuracy, _train_loss, _ = self.sess.run( [self.accuracy, self.loss, self.train_op], feed_dict={ self.x: _tr_img_batch, self.y: _tr_lab_batch, self.is_training: True }) training_acc += _train_accuracy training_loss += _train_loss summary = self.sess.run(self.merged, feed_dict={ self.x: _tr_img_batch, self.y: _tr_lab_batch, self.is_training: False }) training_acc = float(training_acc / self.train_itr) training_loss = float(training_loss / self.train_itr) validation_acc, validation_loss = self.validation_procedure() self.best_val_accuracy.append(validation_acc) self.best_val_loss.append(validation_loss) log1 = "Epoch: [%d], Training Accuracy: [%g], Validation Accuracy: [%g], Loss Training: [%g] " \ "Loss_validation: [%g], Time: [%s]" % \ (e, training_acc, validation_acc, training_loss, validation_loss, time.ctime(time.time())) self.plt_epoch.append(e) self.plt_training_accuracy.append(training_acc) self.plt_training_loss.append(training_loss) self.plt_validation_accuracy.append(validation_acc) self.plt_validation_loss.append(validation_loss) utils.plotAccuracy(x=self.plt_epoch, y1=self.plt_training_accuracy, y2=self.plt_validation_accuracy, figName=self.model, line1Name='training', line2Name='validation', savePath=self.ckptDir) utils.plotLoss(x=self.plt_epoch, y1=self.plt_training_loss, y2=self.plt_validation_loss, figName=self.model, line1Name='training', line2Name='validation', savePath=self.ckptDir) utils.save2file(log1, self.ckptDir, self.model) self.writer.add_summary(summary, e) self.saver.save(self.sess, self.ckptDir + self.model + '-' + str(e)) self.test_procedure() self.best_val_index = self.best_val_accuracy.index( max(self.best_val_accuracy)) log2 = 'Highest Validation Accuracy : [%g], Epoch : [%g]' % ( self.best_val_accuracy[self.best_val_index], self.best_val_index + 1) utils.save2file(log2, self.ckptDir, self.model) self.best_val_index_loss = self.best_val_loss.index( min(self.best_val_loss)) log3 = 'Lowest Validation Loss : [%g], Epoch : [%g]' % ( self.best_val_loss[self.best_val_index_loss], self.best_val_index_loss + 1) utils.save2file(log3, self.ckptDir, self.model)
def train(self): print('Reloading parameters from pre-trained source domain') self.sess.run(tf.global_variables_initializer()) self.saver.restore(self.sess, self.reloadPath) print('Reloading finished') self.itr_epoch = len(self.training_data[0]) // self.bs self.total_iteration = self.eps * self.itr_epoch self.best_val_accuracy = [] self.best_val_loss = [] training_acc = 0.0 training_loss = 0.0 for itr in range(1, self.total_iteration + 1): _tr_img_batch, _tr_lab_batch = DA_init.next_batch( image=self.training_data[0], label=self.training_data[1], batch_size=self.bs) _train_accuracy, _train_loss, _ = self.sess.run( [self.accuracy, self.loss, self.train_op], feed_dict={ self.x: _tr_img_batch, self.y: _tr_lab_batch, self.is_training: True }) training_acc += _train_accuracy training_loss += _train_loss if itr % self.itr_epoch == 0: _current_eps = int(itr / self.itr_epoch) summary = self.sess.run(self.merged, feed_dict={ self.x: _tr_img_batch, self.y: _tr_lab_batch, self.is_training: False }) training_acc = float(training_acc / self.itr_epoch) training_loss = float(training_loss / self.itr_epoch) validation_acc, validation_loss = self.validation_procedure() self.best_val_accuracy.append(validation_acc) self.best_val_loss.append(validation_loss) log1 = "Epoch: [%d], Training Accuracy: [%g], Validation Accuracy: [%g], Loss Training: [%g] " \ "Loss_validation: [%g], Time: [%s]" % \ (_current_eps, training_acc, validation_acc, training_loss, validation_loss, time.ctime(time.time())) self.plt_epoch.append(_current_eps) self.plt_training_accuracy.append(training_acc) self.plt_training_loss.append(training_loss) self.plt_validation_accuracy.append(validation_acc) self.plt_validation_loss.append(validation_loss) FineTuning_utils.plotAccuracy(x=self.plt_epoch, y1=self.plt_training_accuracy, y2=self.plt_validation_accuracy, figName=self.model, line1Name='training', line2Name='validation', savePath=self.ckptDir) FineTuning_utils.plotLoss(x=self.plt_epoch, y1=self.plt_training_loss, y2=self.plt_validation_loss, figName=self.model, line1Name='training', line2Name='validation', savePath=self.ckptDir) FineTuning_utils.save2file(log1, self.ckptDir, self.model) self.writer.add_summary(summary, _current_eps) self.saver.save( self.sess, self.ckptDir + self.model + '-' + str(_current_eps)) self.test_procedure() training_acc = 0.0 training_loss = 0.0 self.best_val_index = self.best_val_accuracy.index( max(self.best_val_accuracy)) log2 = 'Highest Validation Accuracy : [%g], Epoch : [%g]' % ( self.best_val_accuracy[self.best_val_index], self.best_val_index + 1) FineTuning_utils.save2file(log2, self.ckptDir, self.model) self.best_val_index_loss = self.best_val_loss.index( min(self.best_val_loss)) log3 = 'Lowest Validation Loss : [%g], Epoch : [%g]' % ( self.best_val_loss[self.best_val_index_loss], self.best_val_index_loss + 1) FineTuning_utils.save2file(log3, self.ckptDir, self.model)