def mode_predonly(sess, FLAGS, TEST_DIR, validation_dataset_reader, valid_records, pred_annotation, image, annotation, keep_probability, logits, NUM_OF_CLASSES): nFailed = 0 validation_dataset_reader.reset_batch_offset(0) probability = tf.nn.softmax(logits=logits, axis=3) # the axis! for itr1 in range(validation_dataset_reader.get_num_of_records() // FLAGS.batch_size): valid_images, _ = validation_dataset_reader.next_batch( FLAGS.batch_size) predprob, pred = sess.run([probability, pred_annotation], feed_dict={ image: valid_images, keep_probability: 1.0 }) np.set_printoptions(threshold=10) pred = np.squeeze(pred) predprob = np.squeeze(predprob) # @TODO: convert np once not repeatedly for itr2 in range(FLAGS.batch_size): # 1. run CRF crfwithlabeloutput = denseCRF.crf_with_labels( valid_images[itr2].astype(np.uint8), pred[itr2].astype(np.uint8), NUM_OF_CLASSES) crfwithprobsoutput = denseCRF.crf_with_probs( valid_images[itr2].astype(np.uint8), predprob[itr2], NUM_OF_CLASSES) # 2. show result display orignal = valid_images[itr2].astype(np.uint8) fcnpred = pred[itr2].astype(np.uint8) crfwithlabelpred = crfwithlabeloutput.astype(np.uint8) crfwithprobspred = crfwithprobsoutput.astype(np.uint8) # 4. saving result # now we have 0-index image filenum = str(itr1 * FLAGS.batch_size + itr2) # Utils.save_image(orignal, TEST_DIR, name="in_" + filenum) Utils.save_image(crfwithprobspred, TEST_DIR, name="probcrf_" + filenum) Utils.save_image(crfwithlabelpred, TEST_DIR, name="labelcrf_" + filenum) # ---End calculate cross matrix print("Saved image: %s" % filenum)
def mode_new_test(sess, flags, save_dir, validation_dataset_reader, valid_records, pred_annotation, image, annotation, keep_probability, logits, num_classes): print(">>>>>>>>>>>>>>>>Test mode") start = time.time() if not os.path.exists(save_dir): os.makedirs(save_dir) validation_dataset_reader.reset_batch_offset(0) probability = tf.nn.softmax(logits=logits, axis=3) cross_mats = list() crf_cross_mats = list() # tf_pixel_acc_list = [] # tf_miou_list = [] # pixel_acc_op, pixel_acc_update_op = tf.metrics.accuracy(labels=annotation, predictions=pred_annotation) # mean_iou_op, mean_iou_update_op = tf.metrics.mean_iou(labels=annotation, predictions=pred_annotation, num_classes=num_classes) for itr1 in range(validation_dataset_reader.get_num_of_records() // flags.batch_size): valid_images, valid_annotations = validation_dataset_reader.next_batch( flags.batch_size) predprob, pred = sess.run([probability, pred_annotation], feed_dict={ image: valid_images, keep_probability: 1.0 }) # tf measures sess.run(tf.local_variables_initializer()) feed_dict = { image: valid_images, annotation: valid_annotations, keep_probability: 1.0 } # predprob, pred, _, __ = sess.run([probability, pred_annotation, pixel_acc_update_op, mean_iou_update_op], feed_dict=feed_dict) # tf_pixel_acc, tf_miou = sess.run([pixel_acc_op, mean_iou_op], feed_dict=feed_dict) # tf_pixel_acc_list.append(tf_pixel_acc) # tf_miou_list.append(tf_miou) np.set_printoptions(threshold=10) pred = np.squeeze(pred) predprob = np.squeeze(predprob) valid_annotations = np.squeeze(valid_annotations, axis=3) for itr2 in range(flags.batch_size): fig = plt.figure() pos = 240 + 1 plt.subplot(pos) plt.imshow(valid_images[itr2].astype(np.uint8)) plt.axis('off') plt.title('Original') pos = 240 + 2 plt.subplot(pos) # plt.imshow(valid_annotations[itr2].astype(np.uint8), cmap=plt.get_cmap('nipy_spectral')) plt.imshow(valid_annotations[itr2].astype(np.uint8), cmap=ListedColormap(label_colors_10k), norm=clothnorm_10k) plt.axis('off') plt.title('GT') pos = 240 + 3 plt.subplot(pos) # plt.imshow(pred[itr2].astype(np.uint8), cmap=plt.get_cmap('nipy_spectral')) plt.imshow(pred[itr2].astype(np.uint8), cmap=ListedColormap(label_colors_10k), norm=clothnorm_10k) plt.axis('off') plt.title('Prediction') # Confusion matrix for this image prediction crossMat = EvalMetrics.calculate_confusion_matrix( valid_annotations[itr2].astype(np.uint8), pred[itr2].astype(np.uint8), num_classes) cross_mats.append(crossMat) np.savetxt(save_dir + "Crossmatrix" + str(itr1 * flags.batch_size + itr2) + ".csv", crossMat, fmt='%4i', delimiter=',') # Save input, gt, pred, crf_pred, sum figures for this image """ Generate CRF """ # 1. run CRF crfwithprobsoutput = denseCRF.crf_with_probs( valid_images[itr2].astype(np.uint8), predprob[itr2], num_classes) # 2. show result display crfwithprobspred = crfwithprobsoutput.astype(np.uint8) # -----------------------Save inp and masks---------------------- Utils.save_image(valid_images[itr2].astype(np.uint8), save_dir, name="inp_" + str(itr1 * flags.batch_size + itr2)) Utils.save_image(valid_annotations[itr2].astype(np.uint8), save_dir, name="gt_" + str(itr1 * flags.batch_size + itr2)) Utils.save_image(pred[itr2].astype(np.uint8), save_dir, name="pred_" + str(itr1 * flags.batch_size + itr2)) Utils.save_image(crfwithprobspred, save_dir, name="crf_" + str(itr1 * flags.batch_size + itr2)) # ----------------------Save visualized masks--------------------- Utils.save_visualized_image( valid_annotations[itr2].astype(np.uint8), save_dir, image_name="gt_" + str(itr1 * flags.batch_size + itr2), n_classes=num_classes) Utils.save_visualized_image(pred[itr2].astype(np.uint8), save_dir, image_name="pred_" + str(itr1 * flags.batch_size + itr2), n_classes=num_classes) Utils.save_visualized_image(crfwithprobspred, save_dir, image_name="crf_" + str(itr1 * flags.batch_size + itr2), n_classes=num_classes) # -------------------------------------------------- # Confusion matrix for this image prediction with crf prob_crf_crossMat = EvalMetrics.calculate_confusion_matrix( valid_annotations[itr2].astype(np.uint8), crfwithprobsoutput.astype(np.uint8), num_classes) crf_cross_mats.append(prob_crf_crossMat) np.savetxt(save_dir + "prob_crf_Crossmatrix" + str(itr1 * flags.batch_size + itr2) + ".csv", prob_crf_crossMat, fmt='%4i', delimiter=',') pos = 240 + 4 plt.subplot(pos) # plt.imshow(crfwithprobsoutput.astype(np.uint8), cmap=plt.get_cmap('nipy_spectral')) plt.imshow(crfwithprobsoutput.astype(np.uint8), cmap=ListedColormap(label_colors_10k), norm=clothnorm_10k) plt.axis('off') plt.title('Prediction + CRF') plt.savefig(save_dir + "resultSum_" + str(itr1 * flags.batch_size + itr2)) plt.close('all') print("Saved image: %d" % (itr1 * flags.batch_size + itr2)) try: total_cm = np.sum(cross_mats, axis=0) np.savetxt(flags.logs_dir + "Crossmatrix.csv", total_cm, fmt='%4i', delimiter=',') # print("\n>>> Prediction results (TF functions):") # print("Pixel acc:", np.nanmean(tf_pixel_acc_list)) # print("mean IoU:", np.nanmean(tf_miou_list)) print("\n>>> Prediction results:") EvalMetrics.calculate_eval_metrics_from_confusion_matrix( total_cm, num_classes) # Prediction with CRF crf_total_cm = np.sum(crf_cross_mats, axis=0) np.savetxt(flags.logs_dir + "CRF_Crossmatrix.csv", crf_total_cm, fmt='%4i', delimiter=',') print("\n") print("\n>>> Prediction results (CRF):") EvalMetrics.calculate_eval_metrics_from_confusion_matrix( crf_total_cm, num_classes) except Exception as err: print(err) end = time.time() print("Testing time:", end - start, "seconds")
def mode_crftest(sess, FLAGS, TEST_DIR, validation_dataset_reader, valid_records, pred_annotation, image, annotation, keep_probability, logits, NUM_OF_CLASSES): accuracies = np.zeros( (validation_dataset_reader.get_num_of_records(), 3, 2)) nFailed = 0 validation_dataset_reader.reset_batch_offset(0) probability = tf.nn.softmax(logits=logits, axis=3) # the axis! for itr1 in range(validation_dataset_reader.get_num_of_records() // FLAGS.batch_size): valid_images, valid_annotations = validation_dataset_reader.next_batch( FLAGS.batch_size) predprob, pred = sess.run( [probability, pred_annotation], feed_dict={ image: valid_images, annotation: valid_annotations, keep_probability: 1.0 }) np.set_printoptions(threshold=10) valid_annotations = np.squeeze(valid_annotations, axis=3) pred = np.squeeze(pred) predprob = np.squeeze(predprob) # @TODO: convert np once not repeatedly for itr2 in range(FLAGS.batch_size): # 1. run CRF crfwithlabeloutput = denseCRF.crf_with_labels( valid_images[itr2].astype(np.uint8), pred[itr2].astype(np.uint8), NUM_OF_CLASSES) crfwithprobsoutput = denseCRF.crf_with_probs( valid_images[itr2].astype(np.uint8), predprob[itr2], NUM_OF_CLASSES) original = valid_images[itr2].astype(np.uint8) groundtruth = valid_annotations[itr2].astype(np.uint8) fcnpred = pred[itr2].astype(np.uint8) crfwithlabelpred = crfwithlabeloutput.astype(np.uint8) crfwithprobspred = crfwithprobsoutput.astype(np.uint8) # 2. Calculate confusion matrix between gtimage and prediction image and store to file pred_confusion_matrix = EvalMetrics.calculate_confusion_matrix( groundtruth, fcnpred, NUM_OF_CLASSES) crfwithlabelpred_confusion_matrix = EvalMetrics.calculate_confusion_matrix( groundtruth, crfwithlabelpred, NUM_OF_CLASSES) crfwithprobspred_confusion_matrix = EvalMetrics.calculate_confusion_matrix( groundtruth, crfwithprobspred, NUM_OF_CLASSES) accuracies[itr1 * FLAGS.batch_size + itr2][0] = EvalMetrics.calcuate_accuracy( pred_confusion_matrix, False) accuracies[itr1 * FLAGS.batch_size + itr2][1] = EvalMetrics.calcuate_accuracy( crfwithlabelpred_confusion_matrix, False) accuracies[itr1 * FLAGS.batch_size + itr2][2] = EvalMetrics.calcuate_accuracy( crfwithprobspred_confusion_matrix, True) T_full = 0.9 T_fgnd = 0.85 if accuracies[itr1 * FLAGS.batch_size + itr2][2][1] < T_full or accuracies[ itr1 * FLAGS.batch_size + itr2][2][0] < T_fgnd: nFailed += 1 print("Failed Image (%d-th): %d" % (nFailed, itr1 * FLAGS.batch_size + itr2)) # 4. saving result # now we have 0-index image filenum = str(itr1 * FLAGS.batch_size + itr2) Utils.save_image(original, FLAGS.logs_dir, name="in_" + filenum) Utils.save_image(groundtruth, TEST_DIR, name="gt_" + filenum) Utils.save_image(crfwithprobspred, TEST_DIR, name="crf_" + filenum) # ---End calculate cross matrix print("Saved image: %s" % filenum) np.save(FLAGS.logs_dir + "accuracy", accuracies)
def main(): """Create the model and start the evaluation process.""" # Create queue coordinator. coord = tf.train.Coordinator() h, w = INPUT_SIZE # Load reader. with tf.name_scope("create_inputs"): reader = DataSetReader(IMAGE_DIR, LABEL_DIR, DATA_SET, INPUT_SIZE, False, False, False, coord, DATA_SET) # reader = DataSetReader(IMAGE_DIR, coord, DATA_SET) image = reader.image label = reader.label image_rev = tf.reverse(image, tf.stack([1])) image_list = reader.image_list label_list = reader.label_list image_batch_origin = tf.stack([image, image_rev]) image_batch = tf.image.resize_images(image_batch_origin, [int(h), int(w)]) image_batch075 = tf.image.resize_images( image_batch_origin, [int(h * 0.75), int(w * 0.75)]) image_batch125 = tf.image.resize_images( image_batch_origin, [int(h * 1.25), int(w * 1.25)]) # Create network. with tf.variable_scope('', reuse=False): net_100 = DeepLabV2Model({'data': image_batch}, is_training=False, n_classes=N_CLASSES) with tf.variable_scope('', reuse=True): net_075 = DeepLabV2Model({'data': image_batch075}, is_training=False, n_classes=N_CLASSES) with tf.variable_scope('', reuse=True): net_125 = DeepLabV2Model({'data': image_batch125}, is_training=False, n_classes=N_CLASSES) # parsing net parsing_out1_100 = net_100.layers['fc1_human'] parsing_out1_075 = net_075.layers['fc1_human'] parsing_out1_125 = net_125.layers['fc1_human'] parsing_out1 = tf.reduce_mean(tf.stack([ tf.image.resize_images(parsing_out1_100, tf.shape(image_batch_origin)[1:3, ]), tf.image.resize_images(parsing_out1_075, tf.shape(image_batch_origin)[1:3, ]), tf.image.resize_images(parsing_out1_125, tf.shape(image_batch_origin)[1:3, ]) ]), axis=0) raw_output = tf.reduce_mean(tf.stack([parsing_out1]), axis=0) head_output, tail_output = tf.unstack(raw_output, num=2, axis=0) tail_list = tf.unstack(tail_output, num=N_CLASSES, axis=2) tail_list_rev = [None] * N_CLASSES if DATA_SET == "LIP": for xx in range(14): tail_list_rev[xx] = tail_list[xx] tail_list_rev[14] = tail_list[15] tail_list_rev[15] = tail_list[14] tail_list_rev[16] = tail_list[17] tail_list_rev[17] = tail_list[16] tail_list_rev[18] = tail_list[19] tail_list_rev[19] = tail_list[18] elif DATA_SET == "10k": for xx in range(9): tail_list_rev[xx] = tail_list[xx] tail_list_rev[9] = tail_list[10] tail_list_rev[10] = tail_list[9] tail_list_rev[11] = tail_list[11] tail_list_rev[12] = tail_list[13] tail_list_rev[13] = tail_list[12] tail_list_rev[14] = tail_list[15] tail_list_rev[15] = tail_list[14] tail_list_rev[16] = tail_list[16] tail_list_rev[17] = tail_list[17] tail_output_rev = tf.stack(tail_list_rev, axis=2) tail_output_rev = tf.reverse(tail_output_rev, tf.stack([1])) raw_output_all = tf.reduce_mean(tf.stack([head_output, tail_output_rev]), axis=0) raw_output_all = tf.expand_dims(raw_output_all, dim=0) logits = raw_output_all raw_output_all = tf.argmax(raw_output_all, dimension=3) # Create 4-d tensor. prediction_all = tf.expand_dims(raw_output_all, dim=3) # Which variables to load. restore_var = tf.global_variables() # Set up tf session and initialize variables. config = tf.ConfigProto() config.gpu_options.allow_growth = True sess = tf.Session(config=config) init = tf.global_variables_initializer() sess.run(init) sess.run(tf.local_variables_initializer()) # Load weights. loader = tf.train.Saver(var_list=restore_var) if RESTORE_FROM is not None: if load(loader, sess, RESTORE_FROM): print(" [*] Load SUCCESS") else: print(" [!] Load failed...") # Start queue threads. threads = tf.train.start_queue_runners(coord=coord, sess=sess) cross_mats = list() crf_cross_mats = list() probability = tf.nn.softmax(logits=logits, axis=3) image_options = {'resize': True, 'resize_size': IMAGE_SIZE} validation_dataset_reader = BatchDatsetReader.BatchDatset( valid_records, image_options) validation_dataset_reader.reset_batch_offset(0) # Iterate over training steps. for step in range(NUM_STEPS): try: parsing_, probpred = sess.run([prediction_all, probability]) if step % 100 == 0: print('step {:d}'.format(step)) print(image_list[step]) img_split = image_list[step].split('/') # extra split for modified file readers img_split = img_split[-1].split('\\')[1] img_id = img_split[:-4] msk = decode_labels(parsing_, num_classes=N_CLASSES) parsing_im = Image.fromarray(msk[0]) parsing_im.save('{}/pred_{}_vis.png'.format(OUTPUT_DIR, img_id)) cv2.imwrite('{}/pred_{}.png'.format(OUTPUT_DIR, img_id), parsing_[0, :, :, 0]) try: inp, gt = validation_dataset_reader.next_batch(1) msk = decode_labels(gt, num_classes=N_CLASSES) parsing_im = Image.fromarray(msk[0]) parsing_im.save('{}/gt_{}_vis.png'.format(OUTPUT_DIR, img_id)) cv2.imwrite('{}/gt_{}.png'.format(OUTPUT_DIR, img_id), gt[0, :, :, 0]) inp = inp[0] gt = gt[0] gt = np.squeeze(gt, axis=2) # Confusion matrix for this image prediction crossMat = EvalMetrics.calculate_confusion_matrix( gt.astype(np.uint8), parsing_[0, :, :, 0].astype(np.uint8), N_CLASSES) cross_mats.append(crossMat) np.savetxt(OUTPUT_DIR + "Crossmatrix" + str(img_id) + ".csv", crossMat, fmt='%4i', delimiter=',') # Save input, gt, pred, crf_pred, sum figures for this image """ Generate CRF """ # 1. run CRF crfwithprobsoutput = denseCRF.crf_with_probs( inp.astype(np.uint8), probpred[0], N_CLASSES) # 2. show result display crfwithprobspred = crfwithprobsoutput.astype(np.uint8) crfwithprobspred_expanded = np.expand_dims(crfwithprobspred, axis=0) crfwithprobspred_expanded = np.expand_dims( crfwithprobspred_expanded, axis=3) msk = decode_labels(crfwithprobspred_expanded, num_classes=N_CLASSES) parsing_im = Image.fromarray(msk[0]) parsing_im.save('{}/crf_{}_vis.png'.format(OUTPUT_DIR, img_id)) cv2.imwrite('{}/crf_{}.png'.format(OUTPUT_DIR, img_id), crfwithprobspred) # Confusion matrix for this image prediction with crf prob_crf_crossMat = EvalMetrics.calculate_confusion_matrix( gt.astype(np.uint8), crfwithprobsoutput.astype(np.uint8), N_CLASSES) crf_cross_mats.append(prob_crf_crossMat) np.savetxt(OUTPUT_DIR + "crf_Crossmatrix" + str(img_id) + ".csv", prob_crf_crossMat, fmt='%4i', delimiter=',') except Exception as e: print(e) except Exception as err: print(err) try: total_cm = np.sum(cross_mats, axis=0) np.savetxt(OUTPUT_DIR + "Crossmatrix.csv", total_cm, fmt='%4i', delimiter=',') print("\n>>> Prediction results (Our functions):") EvalMetrics.calculate_eval_metrics_from_confusion_matrix( total_cm, N_CLASSES) print("\n>>> Prediction results (LIP functions):") EvalMetrics.show_result(total_cm, N_CLASSES) # Prediction with CRF crf_total_cm = np.sum(crf_cross_mats, axis=0) np.savetxt(OUTPUT_DIR + "CRF_Crossmatrix.csv", crf_total_cm, fmt='%4i', delimiter=',') print("\n") print(">>> Prediction results (CRF):") EvalMetrics.show_result(crf_total_cm, N_CLASSES) except Exception as err: print(err) coord.request_stop() coord.join(threads)