Ejemplo n.º 1
0
def display_sample(rgb_obs, semantic_obs=np.array([]), depth_obs=np.array([])):
    from habitat_sim.utils.common import d3_40_colors_rgb

    rgb_img = Image.fromarray(rgb_obs, mode="RGBA")

    arr = [rgb_img]
    titles = ["rgb"]
    if semantic_obs.size != 0:
        semantic_img = Image.new("P", (semantic_obs.shape[1], semantic_obs.shape[0]))
        semantic_img.putpalette(d3_40_colors_rgb.flatten())
        semantic_img.putdata((semantic_obs.flatten() % 40).astype(np.uint8))
        semantic_img = semantic_img.convert("RGBA")
        arr.append(semantic_img)
        titles.append("semantic")

    if depth_obs.size != 0:
        depth_img = Image.fromarray((depth_obs / 10 * 255).astype(np.uint8), mode="L")
        arr.append(depth_img)
        titles.append("depth")

    plt.figure(figsize=(12, 8))
    for i, data in enumerate(arr):
        ax = plt.subplot(1, 3, i + 1)
        ax.axis("off")
        ax.set_title(titles[i])
        plt.imshow(data)
    plt.show(block=False)
Ejemplo n.º 2
0
def display_sample(rgb_obs, semantic_obs, depth_obs, visualize=False):
    rgb_img = Image.fromarray(rgb_obs, mode="RGBA")

    semantic_img = Image.new("P",
                             (semantic_obs.shape[1], semantic_obs.shape[0]))
    semantic_img.putpalette(d3_40_colors_rgb.flatten())
    semantic_img.putdata((semantic_obs.flatten() % 40).astype(np.uint8))
    semantic_img = semantic_img.convert("RGBA")
    # st()

    depth_img = Image.fromarray((depth_obs / 10 * 255).astype(np.uint8),
                                mode="L")

    display_img = cv2.cvtColor(np.asarray(rgb_img), cv2.COLOR_RGB2BGR)

    #display_img = cv2.
    cv2.imshow('img', display_img)
    if visualize:
        arr = [rgb_img, semantic_img, depth_img]
        titles = ['rgb', 'semantic', 'depth']
        plt.figure(figsize=(12, 8))
        for i, data in enumerate(arr):
            ax = plt.subplot(1, 3, i + 1)
            ax.axis('off')
            ax.set_title(titles[i])
            plt.imshow(data)
            # plt.pause()
        plt.show()
        plt.pause(0.5)
        # cv2.imshow()
        plt.close()
Ejemplo n.º 3
0
def display_sample2(rgb_obs, depth_obs, semantic_obs, savepath=None):
    depth_obs = depth_obs / np.amax(depth_obs)  # normalize for visualization
    rgb_img = Image.fromarray(rgb_obs, mode="RGB")
    depth_img = Image.fromarray((depth_obs * 255).astype(np.uint8), mode="L")

    semantic_img = Image.new("P",
                             (semantic_obs.shape[1], semantic_obs.shape[0]))
    semantic_img.putpalette(d3_40_colors_rgb.flatten())
    semantic_img.putdata((semantic_obs.flatten() % 40).astype(np.uint8))
    semantic_img = semantic_img.convert("RGBA")

    arr = [rgb_img, depth_img, semantic_img]
    titles = ['rgb', 'depth', 'sseg']

    plt.figure(figsize=(12, 8))
    for i, data in enumerate(arr):
        ax = plt.subplot(1, 3, i + 1)
        ax.axis('off')
        ax.set_title(titles[i])
        plt.imshow(data)
    if savepath is None:
        plt.show()
    else:
        plt.savefig(savepath, bbox_inches='tight', pad_inches=0, dpi=100)
    plt.close()
Ejemplo n.º 4
0
 def save_semantic_observation(self, obs, total_frames):
     semantic_obs = obs["semantic_sensor"]
     semantic_img = Image.new(
         "P", (semantic_obs.shape[1], semantic_obs.shape[0]))
     semantic_img.putpalette(d3_40_colors_rgb.flatten())
     semantic_img.putdata((semantic_obs.flatten() % 40).astype(np.uint8))
     semantic_img.save("test.sem.%05d.png" % total_frames)
Ejemplo n.º 5
0
def display_sample(observations):
    """Plot RGB, Semantic and Depth images"""
    rgb_obs = observations["color_sensor"]
    semantic_obs = observations["semantic_sensor"]
    depth_obs = observations["depth_sensor"]

    rgb_img = Image.fromarray(rgb_obs, mode="RGBA")

    semantic_img = Image.new("P",
                             (semantic_obs.shape[1], semantic_obs.shape[0]))
    semantic_img.putpalette(d3_40_colors_rgb.flatten())
    semantic_img.putdata((semantic_obs.flatten() % 40).astype(np.uint8))
    semantic_img = semantic_img.convert("RGBA")

    depth_img = Image.fromarray((depth_obs / 10 * 255).astype(np.uint8),
                                mode="L")

    arr = [rgb_img, semantic_img, depth_img]
    titles = ['rgb', 'semantic', 'depth']
    plt.figure(figsize=(12, 8))
    for i, data in enumerate(arr):
        ax = plt.subplot(1, 3, i + 1)
        ax.axis('off')
        ax.set_title(titles[i])
        plt.imshow(data)
    plt.show()
def get_semantic_image(semantic_labels):
    semantic_img = Image.new(
        "P", (semantic_labels.shape[1], semantic_labels.shape[0]))
    semantic_img.putpalette(d3_40_colors_rgb.flatten())
    semantic_img.putdata((semantic_labels.flatten() % 40).astype(np.uint8))
    semantic_img = semantic_img.convert("RGB")
    return semantic_img
Ejemplo n.º 7
0
def color_semantic_image(semantic_obs, to_numpy=True):
    semantic_img = Image.new("P",
                             (semantic_obs.shape[1], semantic_obs.shape[0]))
    semantic_img.putpalette(d3_40_colors_rgb.flatten())
    semantic_img.putdata((semantic_obs.flatten() % 40).astype(np.uint8))
    semantic_img = semantic_img.convert("RGBA")

    if to_numpy:
        semantic_img = np.array(semantic_img)[:, :, :3]

    return semantic_img
Ejemplo n.º 8
0
 def save_semantic_observation(self, obs, total_frames):
     semantic_obs = obs["semantic_sensor"]
     semantic_img = Image.new(
         "P", (semantic_obs.shape[1], semantic_obs.shape[0]))
     semantic_img.putpalette(d3_40_colors_rgb.flatten())
     semantic_img.putdata((semantic_obs.flatten() % 40).astype(np.uint8))
     if self._demo_type == DemoRunnerType.AB_TEST:
         if self._group_id == ABTestGroup.CONTROL:
             semantic_img.save("test.sem.control.%05d.png" % total_frames)
         else:
             semantic_img.save("test.sem.test.%05d.png" % total_frames)
     else:
         semantic_img.save("test.sem.%05d.png" % total_frames)
Ejemplo n.º 9
0
def semantic_to_rgb(semantic_image: np.ndarray) -> np.ndarray:
    """Map semantic ids to colors and genereate an rgb image

    :param semantic_image: Raw semantic observation image from sensor output.

    :return: rgb semantic image data.
    """
    semantic_image_rgb = Image.new(
        "P", (semantic_image.shape[1], semantic_image.shape[0])
    )
    semantic_image_rgb.putpalette(d3_40_colors_rgb.flatten())
    semantic_image_rgb.putdata((semantic_image.flatten() % 40).astype(np.uint8))
    semantic_image_rgb = semantic_image_rgb.convert("RGBA")
    return semantic_image_rgb
Ejemplo n.º 10
0
def get_visuals(observations):
    """Returns PIL versions of RGB, Semantic and Depth images, also returns Depth array"""
    rgb_img = observations["color_sensor"]
    rgb_img = Image.fromarray(rgb_img, mode="RGBA")

    sem = observations["semantic_sensor"]
    sem_img = Image.new("P", (sem.shape[1], sem.shape[0]))
    sem_img.putpalette(d3_40_colors_rgb.flatten())
    sem_img.putdata((sem.flatten() % 40).astype(np.uint8))
    sem_img = sem_img.convert("RGBA")

    dep_arr = observations["depth_sensor"]
    dep_img = Image.fromarray((dep_arr / 10 * 255).astype(np.uint8), mode="L")

    return rgb_img, sem_img, dep_img, dep_arr
Ejemplo n.º 11
0
    def display_sample(self,
                       rgb_obs,
                       semantic_obs,
                       depth_obs,
                       mainobj=None,
                       visualize=False):
        rgb_img = Image.fromarray(rgb_obs, mode="RGBA")

        semantic_img = Image.new(
            "P", (semantic_obs.shape[1], semantic_obs.shape[0]))
        semantic_img.putpalette(d3_40_colors_rgb.flatten())
        semantic_img.putdata((semantic_obs.flatten() % 40).astype(np.uint8))
        semantic_img = semantic_img.convert("RGBA")
        # st()

        depth_img = Image.fromarray((depth_obs / 10 * 255).astype(np.uint8),
                                    mode="L")

        display_img = cv2.cvtColor(np.asarray(rgb_img), cv2.COLOR_RGB2BGR)
        #print(display_img.shape)

        # mask_image = False
        # if mask_image and mainobj is not None:
        #     main_id = int(mainobj.id[1:])
        #     print("MAINID ", main_id)
        #     # semantic = observations["semantic_sensor"]
        #     display_img[semantic_obs == main_id] = [1, 0, 1]
        # st()

        #display_img = cv2
        plt.imshow(display_img)
        plt.show()
        # cv2.imshow('img',display_img)
        if visualize:
            arr = [rgb_img, semantic_img, depth_img]
            titles = ['rgb', 'semantic', 'depth']
            plt.figure(figsize=(12, 8))
            for i, data in enumerate(arr):
                ax = plt.subplot(1, 3, i + 1)
                ax.axis('off')
                ax.set_title(titles[i])
                plt.imshow(data)
                # plt.pause()
            plt.show()
Ejemplo n.º 12
0
def display_sample(rgb_obs, semantic_obs, depth_obs, save_file=None):
    rgb_img = Image.fromarray(rgb_obs, mode="RGBA")

    semantic_img = Image.new("P",
                             (semantic_obs.shape[1], semantic_obs.shape[0]))
    semantic_img.putpalette(d3_40_colors_rgb.flatten())
    semantic_img.putdata((semantic_obs.flatten() % 40).astype(np.uint8))
    semantic_img = semantic_img.convert("RGBA")

    depth_img = Image.fromarray((depth_obs / 10 * 255).astype(np.uint8),
                                mode="L")

    arr = [rgb_img, semantic_img, depth_img]
    titles = ['rgb', 'semantic', 'depth']
    plt.figure(figsize=(12, 8))
    for i, data in enumerate(arr):
        ax = plt.subplot(1, 3, i + 1)
        ax.axis('off')
        ax.set_title(titles[i])
        plt.imshow(data)
    if not save_file:
        plt.show()
    else:
        plt.savefig(save_file)
Ejemplo n.º 13
0
    def display_sample(self,
                       rgb_obs,
                       semantic_obs,
                       depth_obs,
                       mainobj=None,
                       visualize=False):
        rgb_img = Image.fromarray(rgb_obs, mode="RGBA")

        semantic_img = Image.new(
            "P", (semantic_obs.shape[1], semantic_obs.shape[0]))
        semantic_img.putpalette(d3_40_colors_rgb.flatten())
        semantic_img.putdata((semantic_obs.flatten() % 40).astype(np.uint8))
        semantic_img = semantic_img.convert("RGBA")
        # st()

        depth_img = Image.fromarray((depth_obs / 10 * 255).astype(np.uint8),
                                    mode="L")

        display_img = cv2.cvtColor(np.asarray(rgb_img), cv2.COLOR_RGB2BGR)
        #print(display_img.shape)

        # mask_image = False
        # if mask_image and mainobj is not None:
        #     main_id = int(mainobj.id[1:])
        #     print("MAINID ", main_id)
        #     # semantic = observations["semantic_sensor"]
        #     display_img[semantic_obs == main_id] = [1, 0, 1]
        # st()

        #display_img = cv2
        plt.imshow(display_img)
        plt.show()

        im = rgb_img[..., :3]
        im = im[:, :, ::-1]
        outputs = self.maskrcnn(im)

        pred_masks = outputs['instances'].pred_masks
        pred_boxes = outputs['instances'].pred_boxes.tensor
        pred_classes = outputs['instances'].pred_classes
        pred_scores = outputs['instances'].scores

        # converts instance segmentation to individual masks and bbox
        # visualisations
        v = Visualizer(im[:, :, ::-1],
                       MetadataCatalog.get(self.cfg.DATASETS.TRAIN[0]),
                       scale=1.2)
        out = v.draw_instance_predictions(outputs['instances'].to("cpu"))
        seg_im = out.get_image()

        # cv2.imshow('img',display_img)
        if visualize:
            arr = [rgb_img, semantic_img, depth_img, seg_im]
            titles = ['rgb', 'semantic', 'depth', 'seg_im']
            plt.figure(figsize=(12, 8))
            for i, data in enumerate(arr):
                ax = plt.subplot(1, 3, i + 1)
                ax.axis('off')
                ax.set_title(titles[i])
                plt.imshow(data)
                # plt.pause()
            plt.show()
Ejemplo n.º 14
0
def convert_semantic_object_to_rgb(x):
    semantic_img = Image.new("P", (x.shape[1], x.shape[0]))
    semantic_img.putpalette(d3_40_colors_rgb.flatten())
    semantic_img.putdata((x.flatten() % 40).astype(np.uint8))
    semantic_img = np.array(semantic_img.convert("RGB"))
    return semantic_img