def evaluate(model=None, inp_images=None, annotations=None, inp_images_dir=None, annotations_dir=None, checkpoints_path=None): if model is None: assert ( checkpoints_path is not None), "Please provide the model or the checkpoints_path" model = model_from_checkpoint_path(checkpoints_path) if inp_images is None: assert (inp_images_dir is not None), "Please privide inp_images or inp_images_dir" assert (annotations_dir is not None), "Please privide inp_images or inp_images_dir" paths = get_pairs_from_paths(inp_images_dir, annotations_dir) paths = list(zip(*paths)) inp_images = list(paths[0]) annotations = list(paths[1]) assert type(inp_images) is list assert type(annotations) is list tp = np.zeros(model.n_classes) fp = np.zeros(model.n_classes) fn = np.zeros(model.n_classes) n_pixels = np.zeros(model.n_classes) for inp, ann in tqdm(zip(inp_images, annotations)): pr = predict(model, inp) gt = get_segmentation_array(ann, model.n_classes, model.output_width, model.output_height, no_reshape=True) gt = gt.argmax(-1) pr = pr.flatten() gt = gt.flatten() for cl_i in range(model.n_classes): tp[cl_i] += np.sum((pr == cl_i) * (gt == cl_i)) fp[cl_i] += np.sum((pr == cl_i) * ((gt != cl_i))) fn[cl_i] += np.sum((pr != cl_i) * ((gt == cl_i))) n_pixels[cl_i] += np.sum(gt == cl_i) cl_wise_score = tp / (tp + fp + fn + 0.000000000001) n_pixels_norm = n_pixels / np.sum(n_pixels) frequency_weighted_IU = np.sum(cl_wise_score * n_pixels_norm) mean_IU = np.mean(cl_wise_score) return { "frequency_weighted_IU": frequency_weighted_IU, "mean_IU": mean_IU, "class_wise_IU": cl_wise_score }
def seg2ann(seg_dir, ann_dir): data = pd.read_csv('./PSPindexClass.csv') cols = ['Idx', 'Ratio', 'Train', 'Val', 'Stuff', 'Name'] CNames = np.empty(150, dtype=np.object) for k in range(150): CNames[k] = data['Name'].iloc[k] seg_file = glob.glob(os.path.join(seg_dir, "*.png")) for i, seg_file in enumerate(tqdm(seg_file)): if isinstance(seg_file, six.string_types): out_fname = os.path.basename(seg_file) file, ext = os.path.splitext(out_fname) fout = open(ann_dir + file + ".txt", "w") seg_labels = get_segmentation_array(seg_file, 150, 473, 473, no_reshape=True) CN = np.empty(150, dtype=np.object) for i in range(CN.shape[0]): CN[i] = [] xsumavg = np.zeros(150) ysumavg = np.zeros(150) for k in range(150): CN[k].append(k + 1) # class num CN[k][0] CN[k].append(0) # classs val CN[k][1] CN[k][1] = np.sum(seg_labels[:, :, k], axis=(0, 1)) if CN[k][1] > 0: for i in range(473): for j in range(473): if (seg_labels[i, j, k]) == 1: xsumavg[k] = xsumavg[k] + j ysumavg[k] = ysumavg[k] + i xsumavg[k] = xsumavg[k] / CN[k][1] ysumavg[k] = ysumavg[k] / CN[k][1] CDict = {} for k in range(150): if CN[k][1] != 0: centroidx = xsumavg[k] centroidy = ysumavg[k] CDict[CN[k][1]] = [ "{:3d}".format(CN[k][0]), "{:08d}".format(CN[k][1].astype(int)), "{:04d}".format(centroidx.astype(int)), "{:04d}".format(centroidy.astype(int)), CNames[k] ] fout.write( "classnum, classval, centroidx, centroidy, classname\n") for key in sorted(CDict.keys(), reverse=True): if key != 0: listToStr = ','.join(map(str, CDict[key])) fout.write(listToStr + '\n') fout.close()
def seg2ann(seg_file): try: data = pd.read_csv('./PSPindexClass.csv') except Exception as e: print(e) return {} cols = ['Idx', 'Ratio', 'Train', 'Val', 'Stuff', 'Name'] CNames = np.empty(150, dtype=np.object) for k in range(150): CNames[k] = data['Name'].iloc[k] seg_labels = get_segmentation_array(seg_file, 150, 473, 473, no_reshape=True) CN = np.empty(150, dtype=np.object) for i in range(CN.shape[0]): CN[i] = [] xsumavg = np.zeros(150) ysumavg = np.zeros(150) xsum = 0 ysum = 0 for k in range(150): CN[k].append(k + 1) # class num CN[k].append(0) # classs val CN[1] CN[k][1] = np.sum(seg_labels[:, :, k], axis=(0, 1)) if CN[k][1] > 0: for i in range(473): for j in range(473): if (seg_labels[i, j, k]) == 1: xsumavg[k] = xsumavg[k] + j ysumavg[k] = ysumavg[k] + i xsumavg[k] = xsumavg[k] / CN[k][1] ysumavg[k] = ysumavg[k] / CN[k][1] CDict = {} for k in range(150): if CN[k][1] != 0: centroidx = xsumavg[k] centroidy = ysumavg[k] CDict[CN[k][1]] = [(CN[k][0]), (CN[k][1].astype(int)), CNames[k]] return CDict
def image_segm_depth_generator(images_path, segs_path, depth_path, batch_size, n_classes, input_height, input_width, output_height, output_width, do_augment=False): img_seg_depth_pairs = get_triplets_from_paths(images_path, segs_path, depth_path) random.shuffle(img_seg_depth_pairs) zipped = itertools.cycle(img_seg_depth_pairs) while True: X = [] Y = [] Y_segm = [] Y_depth = [] for _ in range(batch_size): im, seg, depth = next(zipped) im = cv2.imread(im, 1) seg = cv2.imread(seg, 1) depth = cv2.imread(depth, 1) if do_augment: im, seg[:, :, 0] = augment_seg(im, seg[:, :, 0]) X.append( get_image_array(im, input_width, input_height, ordering=IMAGE_ORDERING)) Y_segm.append( get_segmentation_array(seg, n_classes, output_width, output_height)) Y_depth.append(get_depth_array(depth, output_width, output_height)) yield np.array(X), { "segm_pred": np.array(Y_segm), "depth_pred": np.array(Y_depth) }
def evaluate_depth_segm(model=None, inp_images_dir=None, segm_annotations_dir=None, depth_annotations_dir=None, checkpoints_path=None): if model is None: assert ( checkpoints_path is not None), "Please provide the model or the checkpoints_path" model = unet_model_from_checkpoint_path(checkpoints_path) paths = get_triplets_from_paths(inp_images_dir, segm_annotations_dir, depth_annotations_dir) paths = list(zip(*paths)) inp_images = list(paths[0]) segm_annotations = list(paths[1]) depth_annotations = list(paths[2]) assert type(inp_images) is list assert type(segm_annotations) is list assert type(depth_annotations) is list tp = np.zeros(model.n_classes) fp = np.zeros(model.n_classes) fn = np.zeros(model.n_classes) mean_abs_err_arr = [] depth_acc_125_arr = [] depth_acc_125_2_arr = [] depth_acc_125_3_arr = [] n_pixels = np.zeros(model.n_classes) for inp, segm_ann, depth_ann in tqdm( zip(inp_images, segm_annotations, depth_annotations)): segm_pr, depth_pr = model.predict_segmentation(inp) segm_gt = get_segmentation_array(segm_ann, model.n_classes, model.output_width, model.output_height, no_reshape=True) segm_gt = segm_gt.argmax(-1) segm_pr = segm_pr.flatten() segm_gt = segm_gt.flatten() depth_gt = get_depth_array(depth_ann, model.output_width, model.output_height) depth_pr = depth_pr.flatten() depth_gt = depth_gt.flatten() depth_pr = 1 - depth_pr depth_pr[depth_pr > MAX_DEPTH] = MAX_DEPTH depth_pr[depth_pr < MIN_DEPTH] = MIN_DEPTH depth_gt[depth_gt > MAX_DEPTH] = MAX_DEPTH depth_gt[depth_gt < MIN_DEPTH] = MIN_DEPTH depth_acc_125 = depth_accuracy(depth_pr, depth_gt, threshold=1.25) depth_acc_125_2 = depth_accuracy(depth_pr, depth_gt, threshold=1.25**2) depth_acc_125_3 = depth_accuracy(depth_pr, depth_gt, threshold=1.25**3) depth_acc_125_arr.append(np.mean(depth_acc_125)) depth_acc_125_2_arr.append(np.mean(depth_acc_125_2)) depth_acc_125_3_arr.append(np.mean(depth_acc_125_3)) mean_abs_err = metrics.mean_absolute_error(depth_pr, depth_gt) mean_abs_err_arr.append(mean_abs_err) for cl_i in range(model.n_classes): tp[cl_i] += np.sum((segm_pr == cl_i) * (segm_gt == cl_i)) fp[cl_i] += np.sum((segm_pr == cl_i) * ((segm_gt != cl_i))) fn[cl_i] += np.sum((segm_pr != cl_i) * ((segm_gt == cl_i))) n_pixels[cl_i] += np.sum(segm_gt == cl_i) mean_abs_err = np.mean(np.array(mean_abs_err_arr)) depth_acc_125 = np.mean(np.array(depth_acc_125_arr)) depth_acc_125_2 = np.mean(np.array(depth_acc_125_2_arr)) depth_acc_125_3 = np.mean(np.array(depth_acc_125_3_arr)) cl_wise_score = tp / (tp + fp + fn + 0.000000000001) n_pixels_norm = n_pixels / np.sum(n_pixels) frequency_weighted_IU = np.sum(cl_wise_score * n_pixels_norm) mean_IU = np.mean(cl_wise_score) return { "frequency_weighted_IU": frequency_weighted_IU, "mean_IU": mean_IU, "class_wise_IU": cl_wise_score, "depth_acc_125": depth_acc_125, "depth_acc_125_2": depth_acc_125_2, "depth_acc_125_3": depth_acc_125_3, "mean_abs_err": mean_abs_err }