def get_example(self, i): view0_file_path = self.row_labels["file_path_"][i] relative_view0_file_path = self.row_labels["relative_file_path_"][i] view0 = self.preprocess_image(view0_file_path) gt_segmentation_file_path = self.row_labels["iuv_path_"][i] gt_segmentation_relative_file_path = self.row_labels[ "relative_iuv_path_"][i] gt_segmentation = self.preprocess_iuv(gt_segmentation_file_path) dp_semantic_remap_dict = self.config.get("dp_semantic_remap_dict") dp_new_part_list = sorted(list(dp_semantic_remap_dict.keys())) dp_remap_dict = denseposelib.semantic_remap_dict2remap_dict( dp_semantic_remap_dict, dp_new_part_list) spatial_size = self.config.get("spatial_size") gt_segmentation_resized = denseposelib.resize_labels( gt_segmentation, (spatial_size, spatial_size)) remapped_gt_segmentation = denseposelib.remap_parts( gt_segmentation_resized, dp_remap_dict) return { "view0": view0, "view1": view0, "file_path_": view0_file_path, "relative_file_path_": relative_view0_file_path, "external_mask": remapped_gt_segmentation, "gt_segmentation_file_path": gt_segmentation_file_path, "gt_segmentation_relative_file_path": gt_segmentation_relative_file_path, }
def make_figure_1(data: dict, root: str, config: dict, global_step: int): figure01_options = config.get("figure01_options") dp_semantic_remap_dict = config.get("dp_semantic_remap_dict") dp_new_part_list = sorted(list(dp_semantic_remap_dict.keys())) dp_remap_dict = denseposelib.semantic_remap_dict2remap_dict( dp_semantic_remap_dict, dp_new_part_list) inferred_segmentation = ( data["outputs"][figure01_options["inferred_segmentation_key"]] + 1 ) # +1 because the visualizer code uses + 1 sampled_segmentation = data["outputs"][ figure01_options["sampled_mask_key"]] images = data["inputs"][figure01_options["input_view_key"]] generated = data["outputs"][figure01_options["generated_image_key"]] groundtruth_segmentation = data["batches"][ figure01_options["gt_segmentation_key"]] groundtruth_segmentation = denseposelib.resize_labels( groundtruth_segmentation, (128, 128)) remapped_gt_segmentation = denseposelib.remap_parts( groundtruth_segmentation, dp_remap_dict) best_remapping = denseposelib.compute_best_iou_remapping( inferred_segmentation, remapped_gt_segmentation) remapped_inferred = denseposelib.remap_parts(inferred_segmentation, best_remapping) ncols = 7 n_inferred_parts = config.get("n_inferred_parts", 10) colors = make_mask_colors(len(set(dp_new_part_list)), background_id=1) df = pd.DataFrame(columns=["global_step", "batch_idx"] + dp_new_part_list) for i in range( len(inferred_segmentation) ): # TODO: maybe replace this amount of plots by parameters in the config file image_container = [] # remap inferred segmentation old_inferred = inferred_segmentation[i] current_sampled_segmentation = np.argmax(sampled_segmentation[i], -1) old_inferred_colors = make_mask_colors(n_inferred_parts, background_id=1) image_container.append(old_inferred_colors[old_inferred - 1]) image_container.append( old_inferred_colors[current_sampled_segmentation]) new_inferred = remapped_inferred[i] current_gt_segmentation = remapped_gt_segmentation[i] # remap GT segmentation iou, iou_labels = denseposelib.compute_iou(new_inferred, current_gt_segmentation) # filter out background iou_filter = np.ones_like(iou) == 1.0 iou_filter[iou_labels == dp_new_part_list.index("background")] = False df_update = {p: -1.0 for p in dp_new_part_list} df_update.update({ p: float(np.squeeze(iou[pi == iou_labels])) for pi, p in enumerate(dp_new_part_list) if pi in iou_labels }) df_update.update({"batch_idx": i, "global_step": global_step}) df = df.append(df_update, ignore_index=True) filtered_iou = iou[iou_filter] mean_iou = np.mean(filtered_iou) image_container.append(colors[new_inferred]) image_container.append(colors[current_gt_segmentation]) legend_labels = [] for pi, p in enumerate(dp_new_part_list): if pi in iou_labels: p_iou = np.squeeze(iou[np.argwhere(iou_labels == pi)]) else: p_iou = 0.0 legend_labels.append(p + " - IOU : {:.03f}".format(p_iou)) legend_labels.append("mIOU (no BG) : {:.03f}".format(mean_iou)) colors = np.concatenate([colors, np.reshape([0, 0, 0], (1, 3))], axis=0) text_colors = [1, 1, 1] * len(colors) legend_image = utils.make_legend_image(legend_labels, colors, text_colors, (128, 128), 1) image_container.append(legend_image) current_image = images[i] current_generated = generated[i] image_container.append( imageutils.convert_range(current_image, [-1, 1], [0, 1])) image_container.append( imageutils.convert_range(current_generated, [-1, 1], [0, 1])) # write files out_path = os.path.join(root, "figure_01") os.makedirs(out_path, exist_ok=True) out_image = np.stack(image_container) out_image = imageutils.convert_range(out_image, [0, 1], [-1, 1]) plot_batch(out_image, os.path.join(out_path, "{:06d}.png".format(i)), cols=ncols) df.to_csv(os.path.join(root, "part_ious.csv"), index=False, header=True) df_mean = df[df != -1].mean().to_frame().transpose() with open(os.path.join(root, "mean_part_ios.csv"), "w") as f: print( tabulate(df_mean, headers="keys", tablefmt="psql", showindex="never"), file=f, )
def main(infer_dir, output_folder, run_crf_config, n_processes): os.makedirs(output_folder, exist_ok=True) with open(run_crf_config, "r") as f: config = yaml.load(f) segmentation_algorithm_args = config["segmentation_algorithm_args"] npz_files = glob.glob(os.path.join(infer_dir, "*.npz")) npz_files = sorted(npz_files) print("Using files :") print(npz_files) segmentation_algorithm = crf.SegmentationFromKeypoints( **segmentation_algorithm_args) data = [] with closing(Pool(n_processes)) as p: for outputs in tqdm.tqdm(p.imap(load_npz, npz_files)): data.append(outputs) data = list_of_dicts2dict_of_lists(data) data = {k: np.concatenate(data[k]) for k in ["image", "gauss_yx"]} data["gauss_yx"] = data["gauss_yx"][..., ::-1] process_func = functools.partial( process_batches, **{ "segmentation_algorithm": segmentation_algorithm, }) tuples = list( zip(np.array_split(data["image"], n_processes, 0), np.array_split(data["gauss_yx"], n_processes, 0))) processed_data = [] with closing(Pool(n_processes)) as p: for outputs in tqdm.tqdm(p.imap(process_func, tuples)): processed_data.append(outputs) labels = np.concatenate([p["labels"] for p in processed_data], 0) labels_rgb = np.concatenate([p["labels_rgb"] for p in processed_data], 0) heatmaps = np.concatenate([p["heatmaps"] for p in processed_data], 0) ims_with_keypoints = np.concatenate( [p["ims_with_keypoints"] for p in processed_data], 0) target_dir = os.path.join(output_folder, "01_keypoints") os.makedirs(target_dir, exist_ok=True) write_rgb(ims_with_keypoints, target_dir, n_processes) target_dir = os.path.join(output_folder, "02_heatmaps") os.makedirs(target_dir, exist_ok=True) write_rgb(heatmaps, target_dir, n_processes) target_dir = os.path.join(output_folder, "03_labels_rgb") os.makedirs(target_dir, exist_ok=True) write_rgb(labels_rgb, target_dir, n_processes) densepose_csv_path = config["densepose_csv_path"] data_root = config["data_root"] fname_col = config["data_fname_col"] iuv_files = get_iuv_files(densepose_csv_path, data_root, len(labels), fname_col) iuvs = [cv2.imread(x, -1) for x in iuv_files] iuvs = [ denseposelib.resize_labels(i[..., 0], labels.shape[1:]) for i in iuvs ] iuvs = np.stack(iuvs, axis=0) dp_semantic_remap_dict = config["dp_semantic_remap_dict"] dp_new_part_list = sorted(list(dp_semantic_remap_dict.keys())) dp_remap_dict = denseposelib.semantic_remap_dict2remap_dict( dp_semantic_remap_dict, dp_new_part_list) remapped_gt_segmentation, remapped_inferred = denseposelib.get_best_segmentation( iuvs, labels, dp_remap_dict) df = pd.DataFrame(columns=["batch_idx"] + dp_new_part_list) df = denseposelib.calculate_iou_df(remapped_inferred, remapped_gt_segmentation, dp_new_part_list) df.to_csv(os.path.join(output_folder, "part_ious.csv"), index=False, header=True) df_mean = denseposelib.calculate_overall_iou_from_df(df) with open(os.path.join(output_folder, "mean_part_ios.csv"), "w") as f: print( tabulate(df_mean, headers="keys", tablefmt="psql", showindex="never"), file=f, ) target_dir = os.path.join(output_folder, "04_compare") os.makedirs(target_dir, exist_ok=True) background_color = np.array([1, 1, 1]) colors1 = imageutils.make_colors(config["n_inferred_parts"] + 1, with_background=True, background_id=0) colors2 = imageutils.make_colors( len(dp_new_part_list), with_background=True, background_id=dp_new_part_list.index("background")) for i, (im1, im2, im3) in enumerate( zip(labels, remapped_inferred, remapped_gt_segmentation)): canvas = np.concatenate([colors1[im1], colors2[im2], colors2[im3]], 1).astype(np.float32) canvas = cv2.cvtColor(canvas, cv2.COLOR_RGB2BGR) fname = os.path.join(target_dir, "{:06d}.png".format(i)) cv2.imwrite(fname, imageutils.convert_range(canvas, [0, 1], [0, 255])) # batches.plot_batch( # imageutils.convert_range(canvas, [0, 1], [-1, 1]), fname, cols=3 # ) target_dir = os.path.join(output_folder, "05_remapped_inferred") os.makedirs(target_dir, exist_ok=True) write_labels(remapped_inferred, target_dir, colors2, n_processes) target_dir = os.path.join(output_folder, "06_remapped_labels") os.makedirs(target_dir, exist_ok=True) write_labels(remapped_gt_segmentation, target_dir, colors2, n_processes)