#for tensorboard merged_ = sess.run(merged, feed_dict={x_:img_batch, x_label_:class_chan_f, x_real_:img_batch, x_label_rev_:class_chan_r, tar_dis_g_adv_:tar_dis_0, tar_dis_r_adv_:tar_dis_1, tar_dis_r_cls_:tar_dis_r_cls, tar_dis_g_cls_:tar_dis_f_cls}) summary_writer.add_summary(merged_, epoch) sum_loss_gen += loss_gen_total_ sum_loss_dis += loss_dis_total_ sum_loss_dis_r_cls += loss_dis_r_cls_ sum_loss_dis_g_cls += loss_dis_g_cls_ sum_loss_rec += loss_rec_ sum_loss_dis_r_adv += loss_dis_r_adv_ sum_loss_dis_g_adv_r += loss_dis_g_adv_r_ sum_loss_dis_g_adv_f += loss_dis_g_adv_f_ print("----------------------------------------------------------------------") print("epoch = {:}, Generator Total Loss = {:.4f}, Discriminator Total Loss = {:.4f}".format( epoch, sum_loss_gen / len_data, sum_loss_dis / len_data)) print("Generator : classifier loss = {:.4f}, adversarial loss = {:.4f}, reconstruction loss = {:.4f}".format( sum_loss_dis_g_cls / len_data, sum_loss_dis_g_adv_f / len_data, sum_loss_rec / len_data)) print("Discriminator : classifier loss = {:.4f}, adversarial(real) loss = {:.4f}, adversarial(gen) loss = {:.4f}".format( sum_loss_dis_r_cls / len_data, sum_loss_dis_r_adv / len_data, sum_loss_dis_g_adv_r / len_data)) if epoch % 10 == 0: img_batch, class_chan_f, class_chan_r = make_datasets.get_test_data_for_1_batch(10, int(TEST_DATA_SAMPLE // 3)) gen_images, gen_gen_images = sess.run([x_gen, x_gen_gen], feed_dict={x_:img_batch, x_label_:class_chan_f, x_label_rev_:class_chan_r}) Utility.make_output_img(img_batch, gen_images, gen_gen_images, int(TEST_DATA_SAMPLE // 3) ,out_image_dir, epoch, LOGFILE_NAME)
score_A_np = np.concatenate((score_A_np, score_A_np_tmp), axis=0) tp, fp, tn, fn, precision, recall = Utility.compute_precision_recall( score_A_np) auc = Utility.make_ROC_graph(score_A_np, 'out_graph/' + LOGFILE_NAME, epoch) print( "tp:{}, fp:{}, tn:{}, fn:{}, precision:{:.4f}, recall:{:.4f}, AUC:{:.4f}" .format(tp, fp, tn, fn, precision, recall, auc)) log_list.append([epoch, auc]) img_batch_7, _ = make_datasets.get_valid_data_for_1_batch(0, 10) img_batch_5, _ = make_datasets.get_valid_data_for_1_batch( val_data_num - 11, 10) x_z_x_7 = sess.run(x_z_x, feed_dict={ x_: img_batch_7, is_training_: False }) x_z_x_5 = sess.run(x_z_x, feed_dict={ x_: img_batch_5, is_training_: False }) Utility.make_output_img(img_batch_5, img_batch_7, x_z_x_5, x_z_x_7, epoch, LOGFILE_NAME, OUT_IMG_DIR) #after learning Utility.save_list_to_csv(log_list, 'log/' + LOGFILE_NAME + '_auc.csv')
img1_tile = np.tile(img1, (VAL_IMG_NUM, 1, 1, 1)) # print("np.max(img1_tile), ", np.max(img1_tile)) # print("np.min(img1_tile), ", np.min(img1_tile)) # print("img1_tile.shape, ", img1_tile.shape) segs1 = segs[0].reshape(1, segs.shape[1], segs.shape[2], segs.shape[3]) segs1_tile = np.tile(segs1, (VAL_IMG_NUM, 1, 1, 1)) output1_list = [] for num in range(VAL_IMG_NUM): output_tmp = sess.run(out_infer, feed_dict={x: img_batch, is_training: False}) output1_list.append(output_tmp[0]) output1 = np.asarray(output1_list) # print("np.max(output1), ", np.max(output1)) # print("np.min(output1), ", np.min(output1)) output_tr = sess.run(out_learn, feed_dict={x: img_batch, t: segs, is_training: False}) util.make_output_img(img_batch[:VAL_IMG_NUM], segs[:VAL_IMG_NUM], output_val[:VAL_IMG_NUM], epoch, args.log_file_name + '_val_', OUT_IMG_DIR) util.make_output_img(img_batch[:VAL_IMG_NUM], segs[:VAL_IMG_NUM], output_tr[:VAL_IMG_NUM], epoch, args.log_file_name + '_tra_', OUT_IMG_DIR) util.make_output_img(img1_tile, segs1_tile, output1, epoch, args.log_file_name + '_diff_z_', OUT_IMG_DIR) if epoch % SAVE_MODEL_SPAN == 0 and epoch != 0: _ = saver.save(sess, './out_model/model_' + args.log_file_name + '_' + str(epoch) + '.ckpt')
sample_num_h = 10 sample_num = sample_num_h**2 # z_test = np.random.uniform(0, 1, sample_num_h * noise_num).reshape(sample_num_h, 1, noise_num) # z_test = np.tile(z_test, (1, sample_num_h, 1)) z_test = np.random.uniform(0, 1, sample_num_h * noise_num).reshape( 1, sample_num_h, noise_num) z_test = np.tile(z_test, (sample_num_h, 1, 1)) z_test = z_test.reshape(-1, sample_num).astype(np.float32) label_gen_int = np.arange(10).reshape(10, 1).astype(np.float32) label_gen_int = np.tile(label_gen_int, (1, 10)).reshape(sample_num) label_gen_test = make_mnist.convert_to_10class_(label_gen_int) label_gen_test = Variable(cuda.to_gpu(label_gen_test)) z_test = Variable(cuda.to_gpu(z_test)) x_gen_test, y_gen_test = gen(label_gen_test, z_test, train=False) x_gen_test_data = x_gen_test.data x_gen_test_reshape = x_gen_test_data.reshape(len(x_gen_test_data), 28, 28, 1) x_gen_test_reshape = cuda.to_cpu(x_gen_test_reshape) Utility.make_output_img(x_gen_test_reshape, sample_num_h, out_image_dir, epoch) if epoch % 100 == 0: #serializer serializers.save_npz(out_model_dir + '/gen_' + str(epoch) + '.model', gen) serializers.save_npz(out_model_dir + '/cla_' + str(epoch) + '.model', cla) serializers.save_npz(out_model_dir + '/dis_' + str(epoch) + '.model', dis)
", Loss RP =", sum_loss_RP, ) if epoch % 10 == 0: sample_num_h = 10 sample_num = sample_num_h**2 z_test = np.random.uniform(0, 1, sample_num_h * noise_num).reshape( 1, sample_num_h, noise_num) z_test = np.tile(z_test, (sample_num_h, 1, 1)) z_test = z_test.reshape(-1, sample_num).astype(np.float32) label_gen_int = np.arange(10).reshape(10, 1).astype(np.float32) label_gen_int = np.tile(label_gen_int, (1, 10)).reshape(sample_num) label_gen_test = make_mnist.convert_to_10class_(label_gen_int) gen_images = sess.run(x_gen, feed_dict={ z_: z_test, yg_: label_gen_test }) Utility.make_output_img(gen_images, sample_num_h, out_image_dir, epoch) # z_only_1 = np.random.uniform(0, 1, noise_num).reshape(1, noise_num) # label_gen_only_1 = np.array([4]).reshape(1, 1).astype(np.float32) # label_gen_only_1_class = make_mnist.convert_to_10class_(label_gen_only_1) # gen_image_1 = sess.run(x_gen, feed_dict={z_:z_only_1, yg_:label_gen_only_1_class}) # # Utility.make_1_img(gen_image_1)
def train(self):#training phase start = time.time() # log_list = [] # log_list.append(['epoch', 'AUC', 'tp', 'fp', 'tn', 'fn', 'precision', 'recall', 'threshold']) log_list = ['epoch', 'AUC', 'tp', 'fp', 'tn', 'fn', 'precision', 'recall', 'f1', 'threshold'] Utility.save_1row_to_csv(log_list, 'log/' + self.logfile_name + '_auc.csv') #training loop print("start training") for epoch in range(0, self.epoch): self.sess.run(tf.local_variables_initializer()) sum_loss_dis_f_D = np.float32(0) sum_loss_dis_f_G = np.float32(0) sum_loss_dis_r_D = np.float32(0) sum_loss_perc = np.float32(0) sum_loss_dis_total = np.float32(0) sum_loss_gen_total = np.float32(0) sum_loss_task_total = np.float32(0) sum_loss_task_s = np.float32(0) sum_loss_task_g = np.float32(0) sum_loss_PI_s = np.float32(0) sum_loss_PI_g = np.float32(0) sum_loss_PI_total = np.float32(0) len_data_syn = self.make_datasets.make_data_for_1_epoch() for i in range(0, len_data_syn, self.batch_size): if i % (self.batch_size * 100) == 0: print("i = ", i) syns_np, segs_np, depths_np, reals_np = self.make_datasets.get_data_for_1_batch(i, self.batch_size) tar_1 = self.make_datasets.make_target_1_0(1.0, len(syns_np), self.img_width//8, self.img_height//8) #1 -> tar_0 = self.make_datasets.make_target_1_0(0.0, len(syns_np), self.img_width//8, self.img_height//8) #0 -> self.sess.run(self.train_dis, feed_dict={self.x_s:syns_np, self.x_r:reals_np, self.tar_d_r:tar_1, self.tar_d_f:tar_0, self.is_training:True, self.keep_prob:self.keep_prob_rate}) self.sess.run(self.train_tas, feed_dict={self.x_s:syns_np, self.seg:segs_np, self.is_training:True, self.keep_prob:self.keep_prob_rate}) self.sess.run(self.train_pri, feed_dict={self.x_s:syns_np, self.pi:depths_np, self.is_training:True, self.keep_prob:self.keep_prob_rate}) self.sess.run(self.train_gen, feed_dict={self.x_s:syns_np, self.seg:segs_np, self.pi:depths_np, self.tar_d_f:tar_1, self.is_training:True, self.keep_prob:self.keep_prob_rate}) # sess.run(train_dec_opt, feed_dict={z_:z, x_: img_batch, d_dis_f_: tar_g_1, is_training_:True}) #train encoder # sess.run(train_enc, feed_dict={x_:img_batch, d_dis_r_: tar_g_0, is_training_:True, z_:z}) # sess.run(train_enc_opt, feed_dict={x_:img_batch, d_dis_r_: tar_g_0, is_training_:True}) # loss for discriminator loss_dis_total_, loss_dis_r_D_, loss_dis_f_D_ = self.sess.run([self.loss_dis_total, self.loss_dis_r, self.loss_dis_f], feed_dict={self.x_s:syns_np, self.x_r:reals_np, self.tar_d_r:tar_1, self.tar_d_f:tar_0, self.is_training:False, self.keep_prob:1.0}) #loss for task predictor loss_task_total_, loss_task_s_, loss_task_g_ = self.sess.run([self.loss_task_total, self.loss_task_s, self.loss_task_g], feed_dict={self.x_s:syns_np, self.seg:segs_np, self.is_training:False, self.keep_prob:1.0}) #loss for PI loss_PI_total_, loss_PI_s_, loss_PI_g_ = self.sess.run([self.loss_PI_total, self.loss_PI_s, self.loss_PI_g], feed_dict={self.x_s:syns_np, self.pi:depths_np, self.is_training:False, self.keep_prob:1.0}) #perceptual loss loss_perc_ = self.sess.run(self.loss_perc, feed_dict={self.x_s:syns_np, self.tar_d_f:tar_1, self.is_training:False, self.keep_prob:1.0}) #generstor loss loss_gen_total_, loss_dis_f_G_ = self.sess.run([self.loss_gen_total, self.loss_dis_f], feed_dict={self.x_s:syns_np, self.seg:segs_np, self.pi:depths_np, self.tar_d_f:tar_1, self.is_training:False, self.keep_prob:1.0}) merged_ = self.sess.run(self.merged, feed_dict={self.x_s:syns_np, self.x_r:reals_np, self.seg:segs_np, self.pi:depths_np, self.tar_d_r:tar_1, self.tar_d_f:tar_0, self.is_training:False, self.keep_prob:1.0}) self.summary_writer.add_summary(merged_, epoch) sum_loss_dis_f_D += loss_dis_f_D_ * len(syns_np) sum_loss_dis_r_D += loss_dis_r_D_ * len(syns_np) sum_loss_dis_f_G += loss_dis_f_G_ * len(syns_np) sum_loss_perc += loss_perc_ * len(syns_np) sum_loss_dis_total += loss_dis_total_ * len(syns_np) sum_loss_gen_total += loss_gen_total_ * len(syns_np) sum_loss_task_total += loss_task_total_ * len(syns_np) sum_loss_task_s += loss_task_s_ * len(syns_np) sum_loss_task_g += loss_task_g_ * len(syns_np) sum_loss_PI_s += loss_PI_s_ * len(syns_np) sum_loss_PI_g += loss_PI_g_ * len(syns_np) sum_loss_PI_total += loss_PI_total_ * len(syns_np) dif_sec = time.time() - start hour = int(dif_sec // 3600) min = int((dif_sec - hour * 3600) // 60) sec = int(dif_sec - hour * 3600 - min * 60) print("---------------------------------------------------------------------------------------------") print(epoch, ", total time: {}hour, {}min, {}sec".format(hour, min, sec)) print("epoch = {:}, Generator Total Loss = {:.4f}, Discriminator Total Loss = {:.4f}, " "Task Predictor Total Loss = {:.4f}, Privileged Network Total Loss = {:.4f}".format( epoch, sum_loss_gen_total / len_data_syn, sum_loss_dis_total / len_data_syn, sum_loss_task_total / len_data_syn, sum_loss_PI_total / len_data_syn)) print("Discriminator Real Loss = {:.4f}, Discriminator Fake Loss = {:.4f}, Discriminator Fake Loss for G = {:.4f}".format( sum_loss_dis_r_D / len_data_syn, sum_loss_dis_f_D / len_data_syn, sum_loss_dis_f_G / len_data_syn)) print("Task Predictor Loss for Synthesis = {:.4f}, Task Predictor Loss for Generated = {:.4f}".format( sum_loss_task_s / len_data_syn, sum_loss_task_g / len_data_syn)) print("Privileged information Loss for Synthesis = {:.4f}, Privileged information Loss for Generated = {:.4f}".format( sum_loss_PI_s / len_data_syn, sum_loss_PI_g / len_data_syn)) print("Perceptual Loss = {:.4f}".format(sum_loss_perc / len_data_syn)) # if epoch % self.valid_span == 0: # print("validation phase") # #TODO if epoch % self.output_img_span == 0: print("output image now....") syns_np, segs_np, depths_np, reals_np, real_segs_np = self.make_datasets.get_data_for_1_batch_for_output() t_out_s_, t_out_g_, g_out_ = self.sess.run( [self.t_out_s, self.t_out_g, self.g_out], feed_dict={self.x_s: syns_np, self.is_training: False, self.keep_prob: 1.0}) t_out_r_ = self.sess.run( self.t_out_r, feed_dict={self.x_r_v: reals_np, self.is_training: False, self.keep_prob: 1.0}) Utility.make_output_img(syns_np, g_out_, t_out_s_, t_out_g_, segs_np, epoch, self.logfile_name, self.out_img_dir) Utility.make_output_img_for_real(reals_np, t_out_r_, real_segs_np, epoch, self.logfile_name, self.out_img_dir) # save model if epoch % self.save_model_span == 0 and epoch != 0: saver2 = tf.train.Saver() _ = saver2.save(self.sess, './out_models/model_' + self.logfile_name + '_' + str(epoch) + '.ckpt')
for i in range(0, val_data_num, BATCH_SIZE): img_batch, tars_batch = make_datasets.get_valid_data_for_1_batch(i, BATCH_SIZE) logits_r_, logits_f_ = sess.run([logits_r, logits_f], feed_dict={x_:img_batch, is_training_:False}) logits_r_re = np.reshape(logits_r_, (-1, 1)) logits_f_re = np.reshape(logits_f_, (-1, 1)) tars_batch_re = np.reshape(tars_batch, (-1, 1)) score_A_np_tmp = np.concatenate((logits_r_re, logits_f_re, tars_batch_re), axis=1) score_A_np = np.concatenate((score_A_np, score_A_np_tmp), axis=0) # tp, fp, tn, fn, precision, recall = Utility.compute_precision_recall(score_A_np, INLIER_NUM) tp0, fp0, tn0, fn0, precision0, recall0, tp1, fp1, tn1, fn1, precision1, recall1 = \ Utility.compute_precision_recall(score_A_np, INLIER_NUM, THRESHOLD_TAU) auc0, auc1 = Utility.make_ROC_graph(score_A_np, 'out_graph/' + LOGFILE_NAME, epoch, THRESHOLD_TAU) print("by OCC1, tp:{}, fp:{}, tn:{}, fn:{}, precision:{:.4f}, recall:{:.4f}, AUC:{:.4f}".format(tp0, fp0, tn0, fn0, precision0, recall0, auc0)) print("by OCC2, tp:{}, fp:{}, tn:{}, fn:{}, precision:{:.4f}, recall:{:.4f}, AUC:{:.4f}".format(tp1, fp1, tn1, fn1, precision1, recall1, auc1)) log_list.append([epoch, auc0, auc1]) img_batch_in = make_datasets.get_data_for_1_batch(0, 10) img_batch_out, _ = make_datasets.get_valid_data_for_1_batch(0, 10) x_dec_in = sess.run(x_dec, feed_dict={x_:img_batch_in, is_training_:False}) x_dec_out = sess.run(x_dec, feed_dict={x_:img_batch_out, is_training_:False}) Utility.make_output_img(img_batch_out, img_batch_in, x_dec_out, x_dec_in, epoch, LOGFILE_NAME, OUT_IMG_DIR) #after learning Utility.save_list_to_csv(log_list, 'log/' + LOGFILE_NAME + '_auc.csv')
len_valid_data, CATEGORICAL_NUM) x_gen_list = [] for num, c_conti_1 in enumerate(c_conti): c_conti_1_0 = c_conti_1[0].reshape(1, 1) c_conti_1_1 = c_conti[5, 1].reshape(1, 1) c_conti_1_same = np.concatenate((c_conti_1_0, c_conti_1_1), axis=1) c_conti_1_same = np.tile(c_conti_1_same, (len_valid_data, 1)) x_gen_ = sess.run(x_gen, feed_dict={ z_: z, c_categ_: c_categ, c_conti_: c_conti_1_same, is_training_: False }) x_gen_list.append(x_gen_) for num, c_conti_1 in enumerate(c_conti): c_conti_1_0 = c_conti[5, 0].reshape(1, 1) c_conti_1_1 = c_conti_1[1].reshape(1, 1) c_conti_1_same = np.concatenate((c_conti_1_0, c_conti_1_1), axis=1) c_conti_1_same = np.tile(c_conti_1_same, (len_valid_data, 1)) x_gen_ = sess.run(x_gen, feed_dict={ z_: z, c_categ_: c_categ, c_conti_: c_conti_1_same, is_training_: False }) x_gen_list.append(x_gen_) Utility.make_output_img(x_gen_list, epoch, LOGFILE_NAME, OUT_IMG_DIR)