def test_procedure(self, test_data, mode): confusion_matrics = np.zeros([self.num_class, self.num_class], dtype="int") tst_batch_num = int(np.ceil(test_data[0].shape[0] / self.bs)) for step in range(tst_batch_num): _testImg = test_data[0][step * self.bs:step * self.bs + self.bs] _testLab = test_data[1][step * self.bs:step * self.bs + self.bs] matrix_row, matrix_col = self.sess.run(self.distribution, feed_dict={ self.x: _testImg, self.y: _testLab, self.is_training: False }) for m, n in zip(matrix_row, matrix_col): confusion_matrics[m][n] += 1 test_accuracy = float( np.sum([confusion_matrics[q][q] for q in range(self.num_class) ])) / float(np.sum(confusion_matrics)) detail_test_accuracy = [ confusion_matrics[i][i] / np.sum(confusion_matrics[i]) for i in range(self.num_class) ] log0 = "Test mode {}".format(mode) log1 = "Test Accuracy : %g" % test_accuracy log2 = np.array(confusion_matrics.tolist()) log3 = '' for j in range(self.num_class): log3 += 'category %s test accuracy : %g\n' % ( FineTuning_utils.pulmonary_category[j], detail_test_accuracy[j]) log3 = log3[:-1] log4 = 'F_Value : %g' % self.f_value(confusion_matrics) FineTuning_utils.save2file(log0, self.ckptDir, self.model) FineTuning_utils.save2file(log1, self.ckptDir, self.model) FineTuning_utils.save2file(log2, self.ckptDir, self.model) FineTuning_utils.save2file(log3, self.ckptDir, self.model) FineTuning_utils.save2file(log4, self.ckptDir, self.model)
src_training = DA_init.loadPickle(FineTuning_utils.experimentalPath, src_name + '_training.pkl') src_validation = DA_init.loadPickle(FineTuning_utils.experimentalPath, src_name + '_validation.pkl') src_test = DA_init.loadPickle(FineTuning_utils.experimentalPath, src_name + '_test.pkl') tar_training = DA_init.loadPickle(FineTuning_utils.experimentalPath, tar_name + '_training.pkl') tar_validation = DA_init.loadPickle(FineTuning_utils.experimentalPath, tar_name + '_validation.pkl') tar_test = DA_init.loadPickle(FineTuning_utils.experimentalPath, tar_name + '_test.pkl') src_training = FineTuning_utils.normalizeInput(src_training, mode='Paired') src_validation = FineTuning_utils.normalizeInput(src_validation, mode='Paired') src_test = FineTuning_utils.normalizeInput(src_test, mode='Paired') tar_training = FineTuning_utils.normalizeInput(tar_training, mode='Paired') tar_validation = FineTuning_utils.normalizeInput(tar_validation, mode='Paired') tar_test = FineTuning_utils.normalizeInput(tar_test, mode='Paired') # 取目标域数据集中指定百分比的数据参与训练 tar_training = FineTuning_utils.getPartialDataSet( tar_training, keep_percentage=args.target_percentage) training = [ np.concatenate((src_training[0], tar_training[0]), axis=0), np.concatenate((src_training[1], tar_training[1]), axis=0) ]
src_training = DA_init.loadPickle(FineTuning_utils.experimentalPath, 'source_training.pkl') src_validation = DA_init.loadPickle(FineTuning_utils.experimentalPath, 'source_validation.pkl') src_test = DA_init.loadPickle(FineTuning_utils.experimentalPath, 'source_test.pkl') tar_training = DA_init.loadPickle(FineTuning_utils.experimentalPath, 'target_training.pkl') tar_validation = DA_init.loadPickle(FineTuning_utils.experimentalPath, 'target_validation.pkl') tar_test = DA_init.loadPickle(FineTuning_utils.experimentalPath, 'target_test.pkl') src_training = FineTuning_utils.normalizeInput(src_training, mode='Paired') src_validation = FineTuning_utils.normalizeInput(src_validation, mode='Paired') src_test = FineTuning_utils.normalizeInput(src_test, mode='Paired') tar_training = FineTuning_utils.normalizeInput(tar_training, mode='Paired') tar_validation = FineTuning_utils.normalizeInput(tar_validation, mode='Paired') tar_test = FineTuning_utils.normalizeInput(tar_test, mode='Paired') training = [ np.concatenate((src_training[0], tar_training[0]), axis=0), np.concatenate((src_training[1], tar_training[1]), axis=0) ] validation = [ np.concatenate((src_validation[0], tar_validation[0]), axis=0), np.concatenate((src_validation[1], tar_validation[1]), axis=0)
def saveConfiguration(self): FineTuning_utils.save2file('epoch : %d' % self.eps, self.ckptDir, self.model) FineTuning_utils.save2file('restore epoch : %d' % self.res_eps, self.ckptDir, self.model) FineTuning_utils.save2file('model : %s' % self.model, self.ckptDir, self.model) FineTuning_utils.save2file('ksize : %d' % self.k, self.ckptDir, self.model) FineTuning_utils.save2file('out channel 1 : %d' % self.oc1, self.ckptDir, self.model) FineTuning_utils.save2file('out channel 2 : %d' % self.oc2, self.ckptDir, self.model) FineTuning_utils.save2file('out channel 3 : %d' % self.oc3, self.ckptDir, self.model) FineTuning_utils.save2file('learning rate : %g' % self.lr, self.ckptDir, self.model) FineTuning_utils.save2file('batch size : %d' % self.bs, self.ckptDir, self.model) FineTuning_utils.save2file('image height : %d' % self.img_h, self.ckptDir, self.model) FineTuning_utils.save2file('image width : %d' % self.img_w, self.ckptDir, self.model) FineTuning_utils.save2file('num class : %d' % self.num_class, self.ckptDir, self.model) FineTuning_utils.save2file('train phase : %s' % self.train_phase, self.ckptDir, self.model)
def train(self): self.sess.run(tf.global_variables_initializer()) self.itr_epoch = len(self.training_data[0]) // self.bs self.best_val_accuracy = [] self.best_val_loss = [] training_acc = 0.0 training_loss = 0.0 for e in range(1, self.eps + 1): for itr in range(self.itr_epoch): _tr_img_batch, _tr_lab_batch = DA_init.next_batch( image=self.training_data[0], label=self.training_data[1], batch_size=self.bs) _train_accuracy, _train_loss, _ = self.sess.run( [self.accuracy, self.loss, self.train_op], feed_dict={ self.x: _tr_img_batch, self.y: _tr_lab_batch, self.is_training: True }) training_acc += _train_accuracy training_loss += _train_loss summary = self.sess.run(self.merged, feed_dict={ self.x: _tr_img_batch, self.y: _tr_lab_batch, self.is_training: False }) training_acc = float(training_acc / self.itr_epoch) training_loss = float(training_loss / self.itr_epoch) validation_acc, validation_loss = self.validation_procedure() self.best_val_accuracy.append(validation_acc) self.best_val_loss.append(validation_loss) log1 = "Epoch: [%d], Training Accuracy: [%g], Validation Accuracy: [%g], Loss Training: [%g] " \ "Loss_validation: [%g], Time: [%s]" % \ (e, training_acc, validation_acc, training_loss, validation_loss, time.ctime(time.time())) self.plt_epoch.append(e) self.plt_training_accuracy.append(training_acc) self.plt_training_loss.append(training_loss) self.plt_validation_accuracy.append(validation_acc) self.plt_validation_loss.append(validation_loss) FineTuning_utils.plotAccuracy(x=self.plt_epoch, y1=self.plt_training_accuracy, y2=self.plt_validation_accuracy, figName=self.model, line1Name='training', line2Name='validation', savePath=self.ckptDir) FineTuning_utils.plotLoss(x=self.plt_epoch, y1=self.plt_training_loss, y2=self.plt_validation_loss, figName=self.model, line1Name='training', line2Name='validation', savePath=self.ckptDir) FineTuning_utils.save2file(log1, self.ckptDir, self.model) self.writer.add_summary(summary, e) self.saver.save(self.sess, self.ckptDir + self.model + '-' + str(e)) self.test_procedure(self.src_test_data, mode='Source') self.test_procedure(self.tar_test_data, mode='Target') training_acc = 0.0 training_loss = 0.0 self.best_val_index = self.best_val_accuracy.index( max(self.best_val_accuracy)) log2 = 'Highest Validation Accuracy : [%g], Epoch : [%g]' % ( self.best_val_accuracy[self.best_val_index], self.best_val_index + 1) FineTuning_utils.save2file(log2, self.ckptDir, self.model) self.best_val_index_loss = self.best_val_loss.index( min(self.best_val_loss)) log3 = 'Lowest Validation Loss : [%g], Epoch : [%g]' % ( self.best_val_loss[self.best_val_index_loss], self.best_val_index_loss + 1) FineTuning_utils.save2file(log3, self.ckptDir, self.model)
parser.add_argument('-img_height', default=32, type=int) parser.add_argument('-img_width', default=32, type=int) args = parser.parse_args() os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID' os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu if args.data_domain == 'Source': training = DA_init.loadPickle(FineTuning_utils.experimentalPath, 'target_training.pkl') validation = DA_init.loadPickle(FineTuning_utils.experimentalPath, 'target_validation.pkl') test = DA_init.loadPickle(FineTuning_utils.experimentalPath, 'target_test.pkl') training = FineTuning_utils.normalizeInput(training, mode='Paired') validation = FineTuning_utils.normalizeInput(validation, mode='Paired') test = FineTuning_utils.normalizeInput(test, mode='Paired') reloadPath = '../checkpoint/bl_f16_s2t/bl_f16_s2t-148' print('training image shape', str(training[0].shape)) print('training label shape', str(training[1].shape)) print('validation image shape', str(validation[0].shape)) print('validation label shape', validation[1].shape) print('test image shape', test[0].shape) print('test label shape', test[1].shape) if args.data_domain == 'Target':