def get_ious(): conf_dir = os.path.join(task_dir, 'conf_map_{}'.format(model_name)) conf_files = sorted(glob(os.path.join(conf_dir, '*.npy'))) cm = collectionMaker.read_collection('aemo_pad') truth_files = cm.load_files(field_name='aus50', field_id='', field_ext='.*gt_d255') truth_files = [f[0] for f in truth_files[:2]] '''uniq_vals = [] for conf, truth in zip(conf_files, truth_files): c = ersa_utils.load_file(conf) uniq_vals.append(np.unique(c.flatten())) uniq_vals = np.sort(np.unique(np.concatenate(uniq_vals))) ious_a = np.zeros(len(uniq_vals)) ious_b = np.zeros(len(uniq_vals))''' uniq_vals = np.linspace(0, 1, 1000) ious_a = np.zeros(len(uniq_vals)) ious_b = np.zeros(len(uniq_vals)) for conf, truth in zip(conf_files, truth_files): c = ersa_utils.load_file(conf) t = ersa_utils.load_file(truth) for cnt, th in enumerate(tqdm(uniq_vals)): c_th = (c > th).astype(np.int) a, b = nn_utils.iou_metric(c_th, t, truth_val=1, divide_flag=True) ious_a[cnt] = a ious_b[cnt] = b return np.stack([uniq_vals, ious_a, ious_b], axis=0)
def process(self): """ Evaluate the network :return: """ nn_utils.set_gpu(self.gpu) if self.score_results: with open(os.path.join(self.score_save_dir, 'result.txt'), 'w'): pass iou_record = [] # prepare the reader if self.score_results: init_op, reader_op = dataReaderSegmentation.DataReaderSegmentationTesting( self.input_size, self.tile_size, self.file_list, overlap=self.model.get_overlap(), pad=self.model.get_overlap() // 2, batch_size=self.batch_size, chan_mean=self.img_mean, is_train=False, has_gt=True, random=False, gt_dim=1, include_gt=True).read_op() feature, label = reader_op self.model.create_graph(feature, **self.kwargs) else: init_op, reader_op = dataReaderSegmentation.DataReaderSegmentationTesting( self.input_size, self.tile_size, self.file_list, overlap=self.model.get_overlap(), pad=self.model.get_overlap() // 2, batch_size=self.batch_size, chan_mean=self.img_mean, is_train=False, has_gt=False, random=False, gt_dim=0, include_gt=False).read_op() feature = reader_op self.model.create_graph(feature[0], **self.kwargs) pad = self.model.get_overlap() for file_cnt, (file_name_list) in enumerate(self.file_list): file_name_truth = None if self.score_results: file_name, file_name_truth = file_name_list tile_name = os.path.basename(file_name_truth).split( self.split_char)[0] else: file_name = file_name_list[0] tile_name = os.path.basename(file_name).split( self.split_char)[0] if self.verb: print('Evaluating {} ... '.format(tile_name)) # read tile size if no tile size is given if self.tile_size is None or self.compute_shape_flag: self.compute_shape_flag = True tile = ersa_utils.load_file(file_name) self.tile_size = tile.shape[:2] start_time = time.time() # run the model if self.model.config is None: self.model.config = tf.ConfigProto(allow_soft_placement=True) with tf.Session(config=self.model.config) as sess: init = tf.global_variables_initializer() sess.run(init) self.model.load(self.model_dir, sess, epoch=self.load_epoch_num, best_model=self.best_model) result = self.model.test_sample(sess, init_op[file_cnt]) image_pred = patchExtractor.unpatch_block( result, tile_dim=[self.tile_size[0] + pad, self.tile_size[1] + pad], patch_size=self.input_size, tile_dim_output=self.tile_size, patch_size_output=[ self.input_size[0] - pad, self.input_size[1] - pad ], overlap=pad) if self.compute_shape_flag: self.tile_size = None pred = nn_utils.get_pred_labels(image_pred) * self.truth_val if self.score_results: truth_label_img = ersa_utils.load_file(file_name_truth) iou = nn_utils.iou_metric(truth_label_img, pred, divide_flag=True) iou_record.append(iou) duration = time.time() - start_time if self.verb: print('{} mean IoU={:.3f}, duration: {:.3f}'.format( tile_name, iou[0] / iou[1], duration)) # save results pred_save_dir = os.path.join(self.score_save_dir, 'pred') ersa_utils.make_dir_if_not_exist(pred_save_dir) ersa_utils.save_file( os.path.join(pred_save_dir, '{}.png'.format(tile_name)), pred.astype(np.uint8)) if self.score_results: with open(os.path.join(self.score_save_dir, 'result.txt'), 'a+') as file: file.write('{} {}\n'.format(tile_name, iou)) if self.score_results: iou_record = np.array(iou_record) mean_iou = np.sum(iou_record[:, 0]) / np.sum(iou_record[:, 1]) print('Overall mean IoU={:.3f}'.format(mean_iou)) with open(os.path.join(self.score_save_dir, 'result.txt'), 'a+') as file: file.write('{}'.format(mean_iou))