コード例 #1
0
    def test_segmentationFromKeypoints(self):
        from supermariopy.crf import SegmentationFromKeypoints
        from supermariopy import imageutils
        from skimage import data

        n_keypoints = 10
        var = 0.05
        keypoints = np.stack(
            [np.linspace(-1, 1, n_keypoints),
             np.linspace(-1, 1, n_keypoints)],
            axis=1)

        img = data.astronaut()
        segmentation_algorithm = SegmentationFromKeypoints(var)
        labels = segmentation_algorithm(img, keypoints)
        labels_rgb = imageutils.make_colors(n_keypoints + 1)[labels]
        heatmaps = imageutils.keypoints_to_heatmaps(img.shape[:2], keypoints,
                                                    var)
        heatmaps_rgb = imageutils.colorize_heatmaps(
            heatmaps[np.newaxis, ...], imageutils.make_colors(n_keypoints))
        fig, axes = plt.subplots(1, 2, figsize=(8, 4))
        axes[0].imshow(labels_rgb)
        axes[0].set_axis_off()

        axes[1].imshow(np.squeeze(heatmaps_rgb))
        axes[1].set_axis_off()
        return fig
コード例 #2
0
def batched_keypoints_to_segments(
    img: np.ndarray,
    keypoints: np.ndarray,
    segmentation_algorithm: SegmentationAlgorithm,
) -> Union[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
    n_keypoints = keypoints.shape[0]
    MAP = segmentation_algorithm(img, keypoints)
    MAP_colorized = imageutils.make_colors(n_keypoints + 1,
                                           with_background=True,
                                           background_id=0)[MAP]
    heatmaps = imageutils.keypoints_to_heatmaps(img.shape[:2], keypoints,
                                                segmentation_algorithm.var)
    heatmaps *= heatmaps > 0.8
    heatmaps_rgb = imageutils.colorize_heatmaps(
        heatmaps[np.newaxis, ...], imageutils.make_colors(n_keypoints))

    img_resized = cv2.resize(img, (256, 256), cv2.INTER_LINEAR)
    img_resized = imageutils.convert_range(img_resized, [0, 255], [0, 1])
    im_with_keypoints = imageutils.draw_keypoint_markers(
        img_resized,
        keypoints,
        marker_list=[str(i)
                     for i in range(10)] + ["x", "o", "v", "<", ">", "*"],
        font_scale=1,
        thickness=4,
    )
    im_with_keypoints = cv2.resize(im_with_keypoints,
                                   (img.shape[1], img.shape[1]),
                                   cv2.INTER_LINEAR)
    return MAP, MAP_colorized, heatmaps_rgb, im_with_keypoints
コード例 #3
0
def argmax_rgb(m, cmap=plt.cm.viridis):
    """Take argmax of m along dimension 1 and apply RGB colorcode on it

    Parameters
    ----------
    m : [type]
        [description]

    Returns
    -------
    np.array
        RGB mask tensor shaped [B, 3, H, W]
    """
    B, P, H, W = ptutils.nn.shape_as_list(m)
    max_values, argmax_map = torch.max(m, dim=1)
    # if m.is_cuda:
    #     dtype = torch.cuda.FloatTensor
    # else:
    #     dtype = torch.FloatTensor
    colors = imageutils.make_colors(P, cmap=cmap)
    colors = colors.astype(np.float32)
    colors = torch.from_numpy(colors)
    colors = colors.to(m.device)
    m_one_hot = ptnn.to_one_hot(argmax_map, P)
    m_one_hot = m_one_hot.permute(0, 3, 1, 2)
    mask_rgb = torch.einsum("bphw,pc->bchw", m_one_hot, colors)
    return mask_rgb
コード例 #4
0
    def test_make_colors_bytes(self):
        from supermariopy.imageutils import make_colors

        colors = make_colors(10, bytes=True)
        expected = [0, 255]
        assert isinstance(colors.flatten()[0], np.integer)
        assert colors.min() >= expected[0] and colors.max() <= expected[1]
        assert colors.shape == (10, 3)
コード例 #5
0
    def test_make_colors_01(self):
        from supermariopy.imageutils import make_colors

        colors = make_colors(10)
        expected = [0, 1]
        assert isinstance(colors.flatten()[0], np.floating)
        assert colors.min() >= expected[0] and colors.max() <= expected[1]
        assert colors.shape == (10, 3)
コード例 #6
0
def batched_keypoints_to_segments(img, keypoints, segmentation_algorithm):
    n_keypoints = keypoints.shape[0]
    MAP = segmentation_algorithm(img, keypoints)
    MAP_colorized = imageutils.make_colors(n_keypoints + 1,
                                           with_background=True,
                                           background_id=0)[MAP]
    heatmaps = imageutils.keypoints_to_heatmaps(img.shape[:2], keypoints,
                                                segmentation_algorithm.var)
    heatmaps *= heatmaps > 0.8
    heatmaps_rgb = imageutils.colorize_heatmaps(
        heatmaps[np.newaxis, ...], imageutils.make_colors(n_keypoints))
    im_with_keypoints = imageutils.draw_keypoint_markers(
        imageutils.convert_range(img, [0, 255], [0, 1]),
        keypoints,
        marker_list=[str(i)
                     for i in range(10)] + ["x", "o", "v", "<", ">", "*"],
        font_scale=0.5,
    )
    return MAP, MAP_colorized, heatmaps_rgb, im_with_keypoints
コード例 #7
0
def main(iuv_path):
    I, u, v = denseposelib.load_iuv(iuv_path)
    base_dir = os.path.dirname(iuv_path)
    out_name = "{}_IRGB.png".format(os.path.splitext(iuv_path)[0])
    colors = imageutils.make_colors(
        len(denseposelib.PART_LIST),
        cmap=plt.cm.coolwarm,
        with_background=True,
        background_id=0,
    )
    I_colors = imageutils.convert_range(colors[I], [0, 1], [0, 255])
    cv2.imwrite(os.path.join(base_dir, out_name), I_colors)
コード例 #8
0
    def test_colorize_heatmaps(self):
        from supermariopy.imageutils import (
            keypoints_to_heatmaps,
            make_colors,
            colorize_heatmaps,
        )
        from matplotlib import pyplot as plt

        keypoints = np.stack([np.linspace(-1, 1, 10),
                              np.linspace(-1, 1, 10)],
                             axis=1)
        heatmaps = keypoints_to_heatmaps((512, 512), keypoints)
        colors = make_colors(keypoints.shape[0])
        img_marked = colorize_heatmaps(heatmaps[np.newaxis, ...], colors)
        fig, ax = plt.subplots(1, 1, figsize=(6, 6))
        ax.imshow(np.squeeze(img_marked))
        return fig
コード例 #9
0
ファイル: viz.py プロジェクト: hyeon95y/supermariopy
def argmax_rgb(m, cmap=plt.cm.viridis):
    """Take argmax of m along dimension 1 and apply RGB colorcode on it
    
    Parameters
    ----------
    m : tf.Tensor
        Tensorflor tensor or numpy array as result of eager execution

    Returns
    -------
    tf.Tensor
        RGB mask tensor shaped [B, H, W, 3]
    """
    B, H, W, P = nn.shape_as_list(m)
    argmax_map = tf.arg_max(m, dimension=-1)
    colors = imageutils.make_colors(P, cmap=cmap)
    colors = colors.astype(np.float32)
    colors = tf.convert_to_tensor(colors)
    m_one_hot = tf.one_hot(argmax_map, P, axis=-1)
    mask_rgb = tf.einsum("bhwp,pc->bhwc", m_one_hot, colors)
    return mask_rgb
コード例 #10
0
def show_active_parts(
        parts,
        active_idx,
        inactive_parts_color=np.array([0.8, 0.8, 0.8]),
        background_color=np.array([1.0, 1.0, 1.0]),
        cmap=plt.cm.viridis,
):
    """Highlight active parts in color and set color of other parts to a gray value
    
    Parameters
    ----------
    parts : np.ndarray
        tensor of binary masks indicating mutually exclusive parts or segments of a segmentation.
        Shaped [N, H, W, P]
    active_idx : np.array or list
        list of indices indicating which part (indexing into last dimension of parts) are active.
    inactive_parts_color : np.ndarray, default np.array([0.8, 0.8, 0.8])
        Color value in range [0, 1] for inactive parts.
    background_color : np.ndarray, default np.array([1.0, 1.0, 1.0])
        Color value in range [0, 1] for background. Note that this only makes sense if parts does not provide a background itself.
        If part contains a background part, it will be assigned a background_color specified by the color map.
    """
    if not any(
        [isinstance(active_idx, list),
         isinstance(active_idx, np.ndarray)]):
        raise TypeError()
    N, H, W, P = parts.shape
    colors = imageutils.make_colors(P, cmap)
    all_part_idx = set(range(P))
    inactive_part_idx = all_part_idx.difference(set(active_idx))
    colors[
        np.array(list(inactive_part_idx)), :] = inactive_parts_color.reshape(
            (1, 3))

    background_part = 1.0 - np.clip(np.sum(parts, axis=-1), 0, 1)
    colors = np.insert(colors, 0, background_color.reshape(1, 3), axis=0)
    parts = np.insert(parts, 0, background_part, axis=-1)
    parts_rgb = np.einsum("nhwp,pc->nhwc", parts, colors)
    return parts_rgb
コード例 #11
0
def argmax_rgb(m, cmap=plt.cm.viridis):
    """Take argmax of m along dimension 1 and apply RGB colorcode on it
    
    Parameters
    ----------
    m : np.ndarray
        tensor representing list of binary masks, shaped [B, H, W, P]

    Returns
    -------
    np.array
        RGB mask tensor shaped [B, H, W, 3]

    See also
    --------
    .. tfutils.viz.argmax_rgb 
    """
    B, H, W, P = m.shape
    argmax_map = np.argmax(m, axis=-1)
    colors = imageutils.make_colors(P, cmap=cmap)
    colors = colors.astype(np.float32)
    m_one_hot = npu.one_hot(argmax_map, P, axis=-1)
    mask_rgb = np.einsum("bhwp,pc->bhwc", m_one_hot, colors)
    return mask_rgb
コード例 #12
0
def main(infer_dir, output_folder, run_crf_config, n_processes):

    os.makedirs(output_folder, exist_ok=True)

    with open(run_crf_config, "r") as f:
        config = yaml.load(f)

    segmentation_algorithm_args = config["segmentation_algorithm_args"]
    npz_files = glob.glob(os.path.join(infer_dir, "*.npz"))
    npz_files = sorted(npz_files)

    print("Using files :")
    print(npz_files)

    segmentation_algorithm = crf.SegmentationFromKeypoints(
        **segmentation_algorithm_args)

    data = []
    with closing(Pool(n_processes)) as p:
        for outputs in tqdm.tqdm(p.imap(load_npz, npz_files)):
            data.append(outputs)
    data = list_of_dicts2dict_of_lists(data)
    data = {k: np.concatenate(data[k]) for k in ["image", "gauss_yx"]}
    data["gauss_yx"] = data["gauss_yx"][..., ::-1]

    process_func = functools.partial(
        process_batches, **{
            "segmentation_algorithm": segmentation_algorithm,
        })
    tuples = list(
        zip(np.array_split(data["image"], n_processes, 0),
            np.array_split(data["gauss_yx"], n_processes, 0)))
    processed_data = []
    with closing(Pool(n_processes)) as p:
        for outputs in tqdm.tqdm(p.imap(process_func, tuples)):
            processed_data.append(outputs)

    labels = np.concatenate([p["labels"] for p in processed_data], 0)
    labels_rgb = np.concatenate([p["labels_rgb"] for p in processed_data], 0)
    heatmaps = np.concatenate([p["heatmaps"] for p in processed_data], 0)
    ims_with_keypoints = np.concatenate(
        [p["ims_with_keypoints"] for p in processed_data], 0)

    target_dir = os.path.join(output_folder, "01_keypoints")
    os.makedirs(target_dir, exist_ok=True)
    write_rgb(ims_with_keypoints, target_dir, n_processes)

    target_dir = os.path.join(output_folder, "02_heatmaps")
    os.makedirs(target_dir, exist_ok=True)
    write_rgb(heatmaps, target_dir, n_processes)

    target_dir = os.path.join(output_folder, "03_labels_rgb")
    os.makedirs(target_dir, exist_ok=True)
    write_rgb(labels_rgb, target_dir, n_processes)

    densepose_csv_path = config["densepose_csv_path"]
    data_root = config["data_root"]
    fname_col = config["data_fname_col"]

    iuv_files = get_iuv_files(densepose_csv_path, data_root, len(labels),
                              fname_col)
    iuvs = [cv2.imread(x, -1) for x in iuv_files]
    iuvs = [
        denseposelib.resize_labels(i[..., 0], labels.shape[1:]) for i in iuvs
    ]
    iuvs = np.stack(iuvs, axis=0)

    dp_semantic_remap_dict = config["dp_semantic_remap_dict"]
    dp_new_part_list = sorted(list(dp_semantic_remap_dict.keys()))
    dp_remap_dict = denseposelib.semantic_remap_dict2remap_dict(
        dp_semantic_remap_dict, dp_new_part_list)

    remapped_gt_segmentation, remapped_inferred = denseposelib.get_best_segmentation(
        iuvs, labels, dp_remap_dict)

    df = pd.DataFrame(columns=["batch_idx"] + dp_new_part_list)

    df = denseposelib.calculate_iou_df(remapped_inferred,
                                       remapped_gt_segmentation,
                                       dp_new_part_list)
    df.to_csv(os.path.join(output_folder, "part_ious.csv"),
              index=False,
              header=True)
    df_mean = denseposelib.calculate_overall_iou_from_df(df)
    with open(os.path.join(output_folder, "mean_part_ios.csv"), "w") as f:
        print(
            tabulate(df_mean,
                     headers="keys",
                     tablefmt="psql",
                     showindex="never"),
            file=f,
        )

    target_dir = os.path.join(output_folder, "04_compare")
    os.makedirs(target_dir, exist_ok=True)

    background_color = np.array([1, 1, 1])
    colors1 = imageutils.make_colors(config["n_inferred_parts"] + 1,
                                     with_background=True,
                                     background_id=0)
    colors2 = imageutils.make_colors(
        len(dp_new_part_list),
        with_background=True,
        background_id=dp_new_part_list.index("background"))
    for i, (im1, im2, im3) in enumerate(
            zip(labels, remapped_inferred, remapped_gt_segmentation)):
        canvas = np.concatenate([colors1[im1], colors2[im2], colors2[im3]],
                                1).astype(np.float32)
        canvas = cv2.cvtColor(canvas, cv2.COLOR_RGB2BGR)
        fname = os.path.join(target_dir, "{:06d}.png".format(i))
        cv2.imwrite(fname, imageutils.convert_range(canvas, [0, 1], [0, 255]))
        # batches.plot_batch(
        #     imageutils.convert_range(canvas, [0, 1], [-1, 1]), fname, cols=3
        # )

    target_dir = os.path.join(output_folder, "05_remapped_inferred")
    os.makedirs(target_dir, exist_ok=True)
    write_labels(remapped_inferred, target_dir, colors2, n_processes)

    target_dir = os.path.join(output_folder, "06_remapped_labels")
    os.makedirs(target_dir, exist_ok=True)
    write_labels(remapped_gt_segmentation, target_dir, colors2, n_processes)
コード例 #13
0
def draw_keypoint_markers(
    img: np.ndarray,
    keypoints: np.ndarray,
    font_scale: float = 0.5,
    thickness: int = 2,
    font=cv2.FONT_HERSHEY_SIMPLEX,
    marker_list=["o", "v", "x", "+", "<", "-", ">", "c"],
) -> np.ndarray:
    """ Draw keypoints on image with markers
    
    Parameters
    ----------
    img : np.ndarray
        shaped [H, W, 3] array  in range [0, 1]
    keypoints : np.ndarray
        shaped [kp, 2] - array giving keypoint positions in range [-1, 1] for x and y. keypoints[:, 0] is x-coordinate (horizontal).
    font_scale : int, optional
        openCV font scale passed to 'cv2.putText', by default 1
    thickness : int, optional
        openCV font thickness passed to 'cv2.putText', by default 2
    font : cv2.FONT_xxx, optional
        openCV font, by default cv2.FONT_HERSHEY_SIMPLEX
    
    Examples
    --------

        from skimage import data
        astronaut = data.astronaut()
        keypoints = np.stack([np.linspace(-1, 1, 10), np.linspace(-1, 1, 10)], axis=1)
        img_marked = draw_keypoint_markers(astronaut, keypoints, font_scale=2, thickness=3)
        plt.imshow(img_marked)
    """
    if not imageutils.is_in_range(img, [0, 1]):
        raise RangeError(img, "img", [0, 1])
    if img.shape[0] != img.shape[1]:
        raise ValueError("only square images are supported currently")

    img_marked = img.copy()
    keypoints = imageutils.convert_range(keypoints, [-1, 1], [0, img.shape[0] - 1])
    colors = imageutils.make_colors(
        keypoints.shape[0], bytes=False, cmap=plt.cm.inferno
    )
    for i, kp in enumerate(keypoints):
        text = marker_list[i % len(marker_list)]
        (label_width, label_height), baseline = cv2.getTextSize(
            text, font, font_scale, thickness
        )
        textX = kp[0]
        textY = kp[1]
        font_color = colors[i]
        text_position = (
            textX - label_width / 2.0 - baseline,
            textY - label_height / 2.0 + baseline,
        )
        text_position = tuple([int(x) for x in text_position])
        img_marked = cv2.putText(
            img_marked,
            text,
            text_position,
            font,
            font_scale,
            font_color,
            thickness=thickness,
        )
    return img_marked