Beispiel #1
0
    def __getitem__(self, i):
        keypoints = self.joints[i]
        image_path = os.path.join(self.data_dir, self.imgs[i])
        image = cv2.imread(image_path, -1)
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        image = imageutils.convert_range(image, [0, 255], [-1, 1])

        image_stickman = make_joint_img(
            self.img_shape,
            self.joint_order,
            keypoints * np.array(self.img_shape).reshape((-1, 2)),
        )
        image_stickman = imageutils.convert_range(image_stickman, [0, 255], [-1, 1])

        part_img, part_stickman = stickman.VUNetStickman.normalize(
            image,
            keypoints * np.array(self.img_shape).reshape((-1, 2)),
            image_stickman,
            self.joint_order,
            self.box_factor,
        )
        # part_img = imageutils.convert_range(part_img, [0, 255], [0, 1])
        # part_stickman = imageutils.convert_range(part_stickman, [0, 255], [0, 1])

        # build example for output
        example = {
            "image_path": image_path,
            "image": image,
            "image_stickman": image_stickman,
            "part_image": part_img,
            "part_stickman": part_stickman,
            "keypoints": keypoints,
            "joint_order" : self.joint_order
        }
        return example
    def test_convert_range(self):
        from supermariopy.imageutils import convert_range

        a = np.array([0, 1])
        with pytest.raises(ValueError):
            convert_range(a, [1, 0], [0, 1])

        with pytest.raises(ValueError):
            convert_range(a, [0, 1], [1, 0])
Beispiel #3
0
def batched_keypoints_to_segments(
    img: np.ndarray,
    keypoints: np.ndarray,
    segmentation_algorithm: SegmentationAlgorithm,
) -> Union[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
    n_keypoints = keypoints.shape[0]
    MAP = segmentation_algorithm(img, keypoints)
    MAP_colorized = imageutils.make_colors(n_keypoints + 1,
                                           with_background=True,
                                           background_id=0)[MAP]
    heatmaps = imageutils.keypoints_to_heatmaps(img.shape[:2], keypoints,
                                                segmentation_algorithm.var)
    heatmaps *= heatmaps > 0.8
    heatmaps_rgb = imageutils.colorize_heatmaps(
        heatmaps[np.newaxis, ...], imageutils.make_colors(n_keypoints))

    img_resized = cv2.resize(img, (256, 256), cv2.INTER_LINEAR)
    img_resized = imageutils.convert_range(img_resized, [0, 255], [0, 1])
    im_with_keypoints = imageutils.draw_keypoint_markers(
        img_resized,
        keypoints,
        marker_list=[str(i)
                     for i in range(10)] + ["x", "o", "v", "<", ">", "*"],
        font_scale=1,
        thickness=4,
    )
    im_with_keypoints = cv2.resize(im_with_keypoints,
                                   (img.shape[1], img.shape[1]),
                                   cv2.INTER_LINEAR)
    return MAP, MAP_colorized, heatmaps_rgb, im_with_keypoints
Beispiel #4
0
def write_rgb(img_rgb, target_dir, n_processes=8):
    """
    img_rgb : [N, H, W, 3] shaped array
    target_dir : str
    """

    img_rgb = imageutils.convert_range(img_rgb, [0, 1], [0, 255])

    arg_tuples = list(
        zip(img_rgb, range(len(img_rgb)), [target_dir] * len(img_rgb)))
    with closing(Pool(n_processes)) as p:
        p.map(_write_rgb, arg_tuples)
def main(iuv_path):
    I, u, v = denseposelib.load_iuv(iuv_path)
    base_dir = os.path.dirname(iuv_path)
    out_name = "{}_IRGB.png".format(os.path.splitext(iuv_path)[0])
    colors = imageutils.make_colors(
        len(denseposelib.PART_LIST),
        cmap=plt.cm.coolwarm,
        with_background=True,
        background_id=0,
    )
    I_colors = imageutils.convert_range(colors[I], [0, 1], [0, 255])
    cv2.imwrite(os.path.join(base_dir, out_name), I_colors)
Beispiel #6
0
def write_labels(labels, output_dir, colors, n_processes=8):
    """
    labels: [N, H, W] shaped array
    output_dir : str
    colors : [n_classes, 3] shaped array
    """
    labels = imageutils.convert_range(colors[labels].astype(np.float32),
                                      [0, 1], [0, 255])

    arg_tuples = list(
        zip(labels, range(len(labels)), [output_dir] * len(labels)))
    with closing(Pool(n_processes)) as p:
        p.map(_write_labels, arg_tuples)
Beispiel #7
0
    def test_segmentationFromKeypoints_lowRangeError(self):
        from supermariopy.crf import SegmentationFromKeypoints
        from supermariopy import imageutils
        from skimage import data

        n_keypoints = 10
        var = 0.05
        keypoints = np.stack(
            [np.linspace(-1, 1, n_keypoints), np.linspace(-1, 1, n_keypoints)], axis=1
        )

        img = data.astronaut()
        img = imageutils.convert_range(img, [0, 255], [0, 1])
        segmentation_algorithm = SegmentationFromKeypoints(var)
        with pytest.warns(Warning):
            labels = segmentation_algorithm(img, keypoints)
Beispiel #8
0
def batched_keypoints_to_segments(img, keypoints, segmentation_algorithm):
    n_keypoints = keypoints.shape[0]
    MAP = segmentation_algorithm(img, keypoints)
    MAP_colorized = imageutils.make_colors(n_keypoints + 1,
                                           with_background=True,
                                           background_id=0)[MAP]
    heatmaps = imageutils.keypoints_to_heatmaps(img.shape[:2], keypoints,
                                                segmentation_algorithm.var)
    heatmaps *= heatmaps > 0.8
    heatmaps_rgb = imageutils.colorize_heatmaps(
        heatmaps[np.newaxis, ...], imageutils.make_colors(n_keypoints))
    im_with_keypoints = imageutils.draw_keypoint_markers(
        imageutils.convert_range(img, [0, 255], [0, 1]),
        keypoints,
        marker_list=[str(i)
                     for i in range(10)] + ["x", "o", "v", "<", ">", "*"],
        font_scale=0.5,
    )
    return MAP, MAP_colorized, heatmaps_rgb, im_with_keypoints
    def test_draw_keypoint_markers(self):
        from skimage import data
        from supermariopy.imageutils import draw_keypoint_markers, convert_range
        from matplotlib import pyplot as plt

        astronaut = data.astronaut()
        astronaut = convert_range(astronaut, [0, 255], [0, 1])
        keypoints = np.stack([np.linspace(-1, 1, 10),
                              np.linspace(-1, 1, 10)],
                             axis=1)
        img_marked = draw_keypoint_markers(
            astronaut,
            keypoints,
            font_scale=2,
            thickness=3,
            marker_list=["1", "2", "3", "x", "o", "v"],
        )
        fig, ax = plt.subplots(1, 1, figsize=(6, 6))
        ax.imshow(img_marked)
        ax.plot(np.arange(512), np.arange(512))
        return fig
Beispiel #10
0
def make_figure_1(data: dict, root: str, config: dict, global_step: int):
    figure01_options = config.get("figure01_options")
    dp_semantic_remap_dict = config.get("dp_semantic_remap_dict")
    dp_new_part_list = sorted(list(dp_semantic_remap_dict.keys()))
    dp_remap_dict = denseposelib.semantic_remap_dict2remap_dict(
        dp_semantic_remap_dict, dp_new_part_list)

    inferred_segmentation = (
        data["outputs"][figure01_options["inferred_segmentation_key"]] + 1
    )  # +1 because the visualizer code uses + 1
    sampled_segmentation = data["outputs"][
        figure01_options["sampled_mask_key"]]
    images = data["inputs"][figure01_options["input_view_key"]]
    generated = data["outputs"][figure01_options["generated_image_key"]]
    groundtruth_segmentation = data["batches"][
        figure01_options["gt_segmentation_key"]]
    groundtruth_segmentation = denseposelib.resize_labels(
        groundtruth_segmentation, (128, 128))
    remapped_gt_segmentation = denseposelib.remap_parts(
        groundtruth_segmentation, dp_remap_dict)
    best_remapping = denseposelib.compute_best_iou_remapping(
        inferred_segmentation, remapped_gt_segmentation)
    remapped_inferred = denseposelib.remap_parts(inferred_segmentation,
                                                 best_remapping)

    ncols = 7
    n_inferred_parts = config.get("n_inferred_parts", 10)
    colors = make_mask_colors(len(set(dp_new_part_list)), background_id=1)

    df = pd.DataFrame(columns=["global_step", "batch_idx"] + dp_new_part_list)

    for i in range(
            len(inferred_segmentation)
    ):  # TODO: maybe replace this amount of plots by parameters in the config file
        image_container = []

        # remap inferred segmentation
        old_inferred = inferred_segmentation[i]
        current_sampled_segmentation = np.argmax(sampled_segmentation[i], -1)
        old_inferred_colors = make_mask_colors(n_inferred_parts,
                                               background_id=1)

        image_container.append(old_inferred_colors[old_inferred - 1])
        image_container.append(
            old_inferred_colors[current_sampled_segmentation])

        new_inferred = remapped_inferred[i]
        current_gt_segmentation = remapped_gt_segmentation[i]

        # remap GT segmentation
        iou, iou_labels = denseposelib.compute_iou(new_inferred,
                                                   current_gt_segmentation)

        # filter out background
        iou_filter = np.ones_like(iou) == 1.0
        iou_filter[iou_labels == dp_new_part_list.index("background")] = False

        df_update = {p: -1.0 for p in dp_new_part_list}
        df_update.update({
            p: float(np.squeeze(iou[pi == iou_labels]))
            for pi, p in enumerate(dp_new_part_list) if pi in iou_labels
        })
        df_update.update({"batch_idx": i, "global_step": global_step})

        df = df.append(df_update, ignore_index=True)

        filtered_iou = iou[iou_filter]
        mean_iou = np.mean(filtered_iou)

        image_container.append(colors[new_inferred])
        image_container.append(colors[current_gt_segmentation])

        legend_labels = []
        for pi, p in enumerate(dp_new_part_list):
            if pi in iou_labels:
                p_iou = np.squeeze(iou[np.argwhere(iou_labels == pi)])
            else:
                p_iou = 0.0
            legend_labels.append(p + " - IOU : {:.03f}".format(p_iou))
        legend_labels.append("mIOU (no BG) : {:.03f}".format(mean_iou))
        colors = np.concatenate([colors, np.reshape([0, 0, 0], (1, 3))],
                                axis=0)
        text_colors = [1, 1, 1] * len(colors)
        legend_image = utils.make_legend_image(legend_labels, colors,
                                               text_colors, (128, 128), 1)
        image_container.append(legend_image)

        current_image = images[i]
        current_generated = generated[i]

        image_container.append(
            imageutils.convert_range(current_image, [-1, 1], [0, 1]))
        image_container.append(
            imageutils.convert_range(current_generated, [-1, 1], [0, 1]))

        # write files
        out_path = os.path.join(root, "figure_01")
        os.makedirs(out_path, exist_ok=True)
        out_image = np.stack(image_container)
        out_image = imageutils.convert_range(out_image, [0, 1], [-1, 1])
        plot_batch(out_image,
                   os.path.join(out_path, "{:06d}.png".format(i)),
                   cols=ncols)

    df.to_csv(os.path.join(root, "part_ious.csv"), index=False, header=True)

    df_mean = df[df != -1].mean().to_frame().transpose()
    with open(os.path.join(root, "mean_part_ios.csv"), "w") as f:
        print(
            tabulate(df_mean,
                     headers="keys",
                     tablefmt="psql",
                     showindex="never"),
            file=f,
        )
Beispiel #11
0
def main(infer_dir, output_folder, run_crf_config, n_processes):

    os.makedirs(output_folder, exist_ok=True)

    with open(run_crf_config, "r") as f:
        config = yaml.load(f)

    segmentation_algorithm_args = config["segmentation_algorithm_args"]
    npz_files = glob.glob(os.path.join(infer_dir, "*.npz"))
    npz_files = sorted(npz_files)

    print("Using files :")
    print(npz_files)

    segmentation_algorithm = crf.SegmentationFromKeypoints(
        **segmentation_algorithm_args)

    data = []
    with closing(Pool(n_processes)) as p:
        for outputs in tqdm.tqdm(p.imap(load_npz, npz_files)):
            data.append(outputs)
    data = list_of_dicts2dict_of_lists(data)
    data = {k: np.concatenate(data[k]) for k in ["image", "gauss_yx"]}
    data["gauss_yx"] = data["gauss_yx"][..., ::-1]

    process_func = functools.partial(
        process_batches, **{
            "segmentation_algorithm": segmentation_algorithm,
        })
    tuples = list(
        zip(np.array_split(data["image"], n_processes, 0),
            np.array_split(data["gauss_yx"], n_processes, 0)))
    processed_data = []
    with closing(Pool(n_processes)) as p:
        for outputs in tqdm.tqdm(p.imap(process_func, tuples)):
            processed_data.append(outputs)

    labels = np.concatenate([p["labels"] for p in processed_data], 0)
    labels_rgb = np.concatenate([p["labels_rgb"] for p in processed_data], 0)
    heatmaps = np.concatenate([p["heatmaps"] for p in processed_data], 0)
    ims_with_keypoints = np.concatenate(
        [p["ims_with_keypoints"] for p in processed_data], 0)

    target_dir = os.path.join(output_folder, "01_keypoints")
    os.makedirs(target_dir, exist_ok=True)
    write_rgb(ims_with_keypoints, target_dir, n_processes)

    target_dir = os.path.join(output_folder, "02_heatmaps")
    os.makedirs(target_dir, exist_ok=True)
    write_rgb(heatmaps, target_dir, n_processes)

    target_dir = os.path.join(output_folder, "03_labels_rgb")
    os.makedirs(target_dir, exist_ok=True)
    write_rgb(labels_rgb, target_dir, n_processes)

    densepose_csv_path = config["densepose_csv_path"]
    data_root = config["data_root"]
    fname_col = config["data_fname_col"]

    iuv_files = get_iuv_files(densepose_csv_path, data_root, len(labels),
                              fname_col)
    iuvs = [cv2.imread(x, -1) for x in iuv_files]
    iuvs = [
        denseposelib.resize_labels(i[..., 0], labels.shape[1:]) for i in iuvs
    ]
    iuvs = np.stack(iuvs, axis=0)

    dp_semantic_remap_dict = config["dp_semantic_remap_dict"]
    dp_new_part_list = sorted(list(dp_semantic_remap_dict.keys()))
    dp_remap_dict = denseposelib.semantic_remap_dict2remap_dict(
        dp_semantic_remap_dict, dp_new_part_list)

    remapped_gt_segmentation, remapped_inferred = denseposelib.get_best_segmentation(
        iuvs, labels, dp_remap_dict)

    df = pd.DataFrame(columns=["batch_idx"] + dp_new_part_list)

    df = denseposelib.calculate_iou_df(remapped_inferred,
                                       remapped_gt_segmentation,
                                       dp_new_part_list)
    df.to_csv(os.path.join(output_folder, "part_ious.csv"),
              index=False,
              header=True)
    df_mean = denseposelib.calculate_overall_iou_from_df(df)
    with open(os.path.join(output_folder, "mean_part_ios.csv"), "w") as f:
        print(
            tabulate(df_mean,
                     headers="keys",
                     tablefmt="psql",
                     showindex="never"),
            file=f,
        )

    target_dir = os.path.join(output_folder, "04_compare")
    os.makedirs(target_dir, exist_ok=True)

    background_color = np.array([1, 1, 1])
    colors1 = imageutils.make_colors(config["n_inferred_parts"] + 1,
                                     with_background=True,
                                     background_id=0)
    colors2 = imageutils.make_colors(
        len(dp_new_part_list),
        with_background=True,
        background_id=dp_new_part_list.index("background"))
    for i, (im1, im2, im3) in enumerate(
            zip(labels, remapped_inferred, remapped_gt_segmentation)):
        canvas = np.concatenate([colors1[im1], colors2[im2], colors2[im3]],
                                1).astype(np.float32)
        canvas = cv2.cvtColor(canvas, cv2.COLOR_RGB2BGR)
        fname = os.path.join(target_dir, "{:06d}.png".format(i))
        cv2.imwrite(fname, imageutils.convert_range(canvas, [0, 1], [0, 255]))
        # batches.plot_batch(
        #     imageutils.convert_range(canvas, [0, 1], [-1, 1]), fname, cols=3
        # )

    target_dir = os.path.join(output_folder, "05_remapped_inferred")
    os.makedirs(target_dir, exist_ok=True)
    write_labels(remapped_inferred, target_dir, colors2, n_processes)

    target_dir = os.path.join(output_folder, "06_remapped_labels")
    os.makedirs(target_dir, exist_ok=True)
    write_labels(remapped_gt_segmentation, target_dir, colors2, n_processes)
            "data_root": "/mnt/comp/code/nips19/data/exercise_data/exercise_dataset",
            "data_csv": "/mnt/comp/code/nips19/data/exercise_data/exercise_dataset/csvs/instance_level_train_split.csv",
            "data_avoid_identity": False,
            "data_flip": True,
            "data_csv_columns": ["character_id", "relative_file_path_"],
            "data_csv_has_header": True,
        },
        "dset2": "weakly_supervised_dataset.CSVSegmentationDataset",  # supervised set
        "dset2_config": {
            "spatial_size": 256,
            "data_root": "/mnt/comp/code/nips19/data/exercise_data/exercise_dataset",
            "data_csv": "/mnt/comp/code/nips19/data/exercise_data/exercise_dataset/denseposed_csv/denseposed_instance_level_train_split.csv",
            "pandas_kwargs": {},
            "image_colname": "im1",
            "labels_colname": "denseposed_im1",
        },
        "dset2_n_samples": 10,
        "example_mapping": {
            "dset1_view0": "image_unsupervised",
            "dset2_image": "image_supervised",
            "dset2_labels": "labels_supervised",
        },
    }

    dset = WeaklySupervisedDataset(config)
    example = dset.get_example(0)
    example.keys()
    plt.imshow(imageutils.convert_range(example["image_unsupervised"], [-1, 1], [0, 1]))
    plt.imshow(imageutils.convert_range(example["image_supervised"], [-1, 1], [0, 1]))
    plt.imshow(example["labels_supervised"])
Beispiel #13
0
def draw_keypoint_markers(
    img: np.ndarray,
    keypoints: np.ndarray,
    font_scale: float = 0.5,
    thickness: int = 2,
    font=cv2.FONT_HERSHEY_SIMPLEX,
    marker_list=["o", "v", "x", "+", "<", "-", ">", "c"],
) -> np.ndarray:
    """ Draw keypoints on image with markers
    
    Parameters
    ----------
    img : np.ndarray
        shaped [H, W, 3] array  in range [0, 1]
    keypoints : np.ndarray
        shaped [kp, 2] - array giving keypoint positions in range [-1, 1] for x and y. keypoints[:, 0] is x-coordinate (horizontal).
    font_scale : int, optional
        openCV font scale passed to 'cv2.putText', by default 1
    thickness : int, optional
        openCV font thickness passed to 'cv2.putText', by default 2
    font : cv2.FONT_xxx, optional
        openCV font, by default cv2.FONT_HERSHEY_SIMPLEX
    
    Examples
    --------

        from skimage import data
        astronaut = data.astronaut()
        keypoints = np.stack([np.linspace(-1, 1, 10), np.linspace(-1, 1, 10)], axis=1)
        img_marked = draw_keypoint_markers(astronaut, keypoints, font_scale=2, thickness=3)
        plt.imshow(img_marked)
    """
    if not imageutils.is_in_range(img, [0, 1]):
        raise RangeError(img, "img", [0, 1])
    if img.shape[0] != img.shape[1]:
        raise ValueError("only square images are supported currently")

    img_marked = img.copy()
    keypoints = imageutils.convert_range(keypoints, [-1, 1], [0, img.shape[0] - 1])
    colors = imageutils.make_colors(
        keypoints.shape[0], bytes=False, cmap=plt.cm.inferno
    )
    for i, kp in enumerate(keypoints):
        text = marker_list[i % len(marker_list)]
        (label_width, label_height), baseline = cv2.getTextSize(
            text, font, font_scale, thickness
        )
        textX = kp[0]
        textY = kp[1]
        font_color = colors[i]
        text_position = (
            textX - label_width / 2.0 - baseline,
            textY - label_height / 2.0 + baseline,
        )
        text_position = tuple([int(x) for x in text_position])
        img_marked = cv2.putText(
            img_marked,
            text,
            text_position,
            font,
            font_scale,
            font_color,
            thickness=thickness,
        )
    return img_marked