Exemple #1
0
    def get_segmentation(self, sd_token: str) -> Tuple[np.ndarray, np.ndarray]:
        """
        Produces two segmentation masks as numpy arrays of size H x W each, where H and W are the height and width
        of the camera image respectively:
            - semantic mask: A mask in which each pixel is an integer value between 0 to C (inclusive),
                             where C is the number of categories in nuImages. Each integer corresponds to
                             the index of the class in the category.json.
            - instance mask: A mask in which each pixel is an integer value between 0 to N, where N is the
                             number of objects in a given camera sample_data. Each integer corresponds to
                             the order in which the object was drawn into the mask.
        :param sd_token: The token of the sample_data to be rendered.
        :return: Two 2D numpy arrays (one semantic mask <int32: H, W>, and one instance mask <int32: H, W>).
        """
        # Validate inputs.
        sample_data = self.get('sample_data', sd_token)
        assert sample_data[
            'is_key_frame'], 'Error: Cannot render annotations for non keyframes!'

        name_to_index = name_to_index_mapping(self.category)

        # Get image data.
        self.check_sweeps(sample_data['filename'])
        im_path = osp.join(self.dataroot, sample_data['filename'])
        im = Image.open(im_path)

        (width, height) = im.size
        semseg_mask = np.zeros((height, width)).astype('int32')
        instanceseg_mask = np.zeros((height, width)).astype('int32')

        # Load stuff / surface regions.
        surface_anns = [
            o for o in self.surface_ann if o['sample_data_token'] == sd_token
        ]

        # Draw stuff / surface regions.
        for ann in surface_anns:
            # Get color and mask.
            category_token = ann['category_token']
            category_name = self.get('category', category_token)['name']
            if ann['mask'] is None:
                continue
            mask = mask_decode(ann['mask'])

            # Draw mask for semantic segmentation.
            semseg_mask[mask == 1] = name_to_index[category_name]

        # Load object instances.
        object_anns = [
            o for o in self.object_ann if o['sample_data_token'] == sd_token
        ]

        # Sort by token to ensure that objects always appear in the instance mask in the same order.
        object_anns = sorted(object_anns, key=lambda k: k['token'])

        # Draw object instances.
        # The 0 index is reserved for background; thus, the instances should start from index 1.
        for i, ann in enumerate(object_anns, start=1):
            # Get color, box, mask and name.
            category_token = ann['category_token']
            category_name = self.get('category', category_token)['name']
            if ann['mask'] is None:
                continue
            mask = mask_decode(ann['mask'])

            # Draw masks for semantic segmentation and instance segmentation.
            semseg_mask[mask == 1] = name_to_index[category_name]
            instanceseg_mask[mask == 1] = i

        # Ensure that the number of instances in the instance segmentation mask is the same as the number of objects.
        assert len(object_anns) == np.max(instanceseg_mask), \
            'Error: There are {} objects but only {} instances ' \
            'were drawn into the instance segmentation mask.'.format(len(object_anns), np.max(instanceseg_mask))

        return semseg_mask, instanceseg_mask
Exemple #2
0
    def render_image(self,
                     sd_token: str,
                     annotation_type: str = 'all',
                     with_category: bool = False,
                     with_attributes: bool = False,
                     object_tokens: List[str] = None,
                     surface_tokens: List[str] = None,
                     render_scale: float = 1.0,
                     box_line_width: int = -1,
                     font_size: int = None,
                     out_path: str = None) -> None:
        """
        Renders an image (sample_data), optionally with annotations overlaid.
        :param sd_token: The token of the sample_data to be rendered.
        :param annotation_type: The types of annotations to draw on the image; there are four options:
            'all': Draw surfaces and objects, subject to any filtering done by object_tokens and surface_tokens.
            'surfaces': Draw only surfaces, subject to any filtering done by surface_tokens.
            'objects': Draw objects, subject to any filtering done by object_tokens.
            'none': Neither surfaces nor objects will be drawn.
        :param with_category: Whether to include the category name at the top of a box.
        :param with_attributes: Whether to include attributes in the label tags. Note that with_attributes=True
            will only work if with_category=True.
        :param object_tokens: List of object annotation tokens. If given, only these annotations are drawn.
        :param surface_tokens: List of surface annotation tokens. If given, only these annotations are drawn.
        :param render_scale: The scale at which the image will be rendered. Use 1.0 for the original image size.
        :param box_line_width: The box line width in pixels. The default is -1.
            If set to -1, box_line_width equals render_scale (rounded) to be larger in larger images.
        :param font_size: Size of the text in the rendered image. Use None for the default size.
        :param out_path: The path where we save the rendered image, or otherwise None.
            If a path is provided, the plot is not shown to the user.
        """
        # Validate inputs.
        sample_data = self.get('sample_data', sd_token)
        if not sample_data['is_key_frame']:
            assert annotation_type != 'none', 'Error: Cannot render annotations for non keyframes!'
            assert not with_attributes, 'Error: Cannot render attributes for non keyframes!'
        if with_attributes:
            assert with_category, 'In order to set with_attributes=True, with_category must be True.'
        assert type(
            box_line_width) == int, 'Error: box_line_width must be an integer!'
        if box_line_width == -1:
            box_line_width = int(round(render_scale))

        # Get image data.
        self.check_sweeps(sample_data['filename'])
        im_path = osp.join(self.dataroot, sample_data['filename'])
        im = Image.open(im_path)

        # Initialize drawing.
        if with_category and font_size is not None:
            font = get_font(font_size=font_size)
        else:
            font = None
        im = im.convert('RGBA')
        draw = ImageDraw.Draw(im, 'RGBA')

        annotations_types = ['all', 'surfaces', 'objects', 'none']
        assert annotation_type in annotations_types, \
            'Error: {} is not a valid option for annotation_type. ' \
            'Only {} are allowed.'.format(annotation_type, annotations_types)
        if annotation_type is not 'none':
            if annotation_type == 'all' or annotation_type == 'surfaces':
                # Load stuff / surface regions.
                surface_anns = [
                    o for o in self.surface_ann
                    if o['sample_data_token'] == sd_token
                ]
                if surface_tokens is not None:
                    sd_surface_tokens = set(
                        [s['token'] for s in surface_anns if s['token']])
                    assert set(surface_tokens).issubset(sd_surface_tokens), \
                        'Error: The provided surface_tokens do not belong to the sd_token!'
                    surface_anns = [
                        o for o in surface_anns if o['token'] in surface_tokens
                    ]

                # Draw stuff / surface regions.
                for ann in surface_anns:
                    # Get color and mask.
                    category_token = ann['category_token']
                    category_name = self.get('category',
                                             category_token)['name']
                    color = self.color_map[category_name]
                    if ann['mask'] is None:
                        continue
                    mask = mask_decode(ann['mask'])

                    # Draw mask. The label is obvious from the color.
                    draw.bitmap((0, 0),
                                Image.fromarray(mask * 128),
                                fill=tuple(color + (128, )))

            if annotation_type == 'all' or annotation_type == 'objects':
                # Load object instances.
                object_anns = [
                    o for o in self.object_ann
                    if o['sample_data_token'] == sd_token
                ]
                if object_tokens is not None:
                    sd_object_tokens = set(
                        [o['token'] for o in object_anns if o['token']])
                    assert set(object_tokens).issubset(sd_object_tokens), \
                        'Error: The provided object_tokens do not belong to the sd_token!'
                    object_anns = [
                        o for o in object_anns if o['token'] in object_tokens
                    ]

                # Draw object instances.
                for ann in object_anns:
                    # Get color, box, mask and name.
                    category_token = ann['category_token']
                    category_name = self.get('category',
                                             category_token)['name']
                    color = self.color_map[category_name]
                    bbox = ann['bbox']
                    attr_tokens = ann['attribute_tokens']
                    attributes = [
                        self.get('attribute', at) for at in attr_tokens
                    ]
                    name = annotation_name(attributes,
                                           category_name,
                                           with_attributes=with_attributes)
                    if ann['mask'] is not None:
                        mask = mask_decode(ann['mask'])

                        # Draw mask, rectangle and text.
                        draw.bitmap((0, 0),
                                    Image.fromarray(mask * 128),
                                    fill=tuple(color + (128, )))
                        draw.rectangle(bbox,
                                       outline=color,
                                       width=box_line_width)
                        if with_category:
                            draw.text((bbox[0], bbox[1]), name, font=font)

        # Plot the image.
        (width, height) = im.size
        pix_to_inch = 100 / render_scale
        figsize = (height / pix_to_inch, width / pix_to_inch)
        plt.figure(figsize=figsize)
        plt.axis('off')
        plt.imshow(im)

        # Save to disk.
        if out_path is not None:
            plt.savefig(out_path,
                        bbox_inches='tight',
                        dpi=2.295 * pix_to_inch,
                        pad_inches=0)
            plt.close()
def get_img_annos(nuim, img_info, cat2id, out_dir, data_root, seg_root):
    """Get semantic segmentation map for an image.

    Args:
        nuim (obj:`NuImages`): NuImages dataset object
        img_info (dict): Meta information of img

    Returns:
        np.ndarray: Semantic segmentation map of the image
    """
    sd_token = img_info['token']
    image_id = img_info['id']
    name_to_index = name_to_index_mapping(nuim.category)

    # Get image data.
    width, height = img_info['width'], img_info['height']
    semseg_mask = np.zeros((height, width)).astype('uint8')

    # Load stuff / surface regions.
    surface_anns = [
        o for o in nuim.surface_ann if o['sample_data_token'] == sd_token
    ]

    # Draw stuff / surface regions.
    for ann in surface_anns:
        # Get color and mask.
        category_token = ann['category_token']
        category_name = nuim.get('category', category_token)['name']
        if ann['mask'] is None:
            continue
        mask = mask_decode(ann['mask'])

        # Draw mask for semantic segmentation.
        semseg_mask[mask == 1] = name_to_index[category_name]

    # Load object instances.
    object_anns = [
        o for o in nuim.object_ann if o['sample_data_token'] == sd_token
    ]

    # Sort by token to ensure that objects always appear in the
    # instance mask in the same order.
    object_anns = sorted(object_anns, key=lambda k: k['token'])

    # Draw object instances.
    # The 0 index is reserved for background; thus, the instances
    # should start from index 1.
    annotations = []
    for i, ann in enumerate(object_anns, start=1):
        # Get color, box, mask and name.
        category_token = ann['category_token']
        category_name = nuim.get('category', category_token)['name']
        if ann['mask'] is None:
            continue
        mask = mask_decode(ann['mask'])

        # Draw masks for semantic segmentation and instance segmentation.
        semseg_mask[mask == 1] = name_to_index[category_name]

        if category_name in NAME_MAPPING:
            cat_name = NAME_MAPPING[category_name]
            cat_id = cat2id[cat_name]

            x_min, y_min, x_max, y_max = ann['bbox']
            # encode calibrated instance mask
            mask_anno = dict()
            mask_anno['counts'] = base64.b64decode(
                ann['mask']['counts']).decode()
            mask_anno['size'] = ann['mask']['size']

            data_anno = dict(image_id=image_id,
                             category_id=cat_id,
                             bbox=[x_min, y_min, x_max - x_min, y_max - y_min],
                             area=(x_max - x_min) * (y_max - y_min),
                             segmentation=mask_anno,
                             iscrowd=0)
            annotations.append(data_anno)

    # after process, save semantic masks
    img_filename = img_info['file_name']
    seg_filename = img_filename.replace('jpg', 'png')
    seg_filename = osp.join(seg_root, seg_filename)
    mmcv.imwrite(semseg_mask, seg_filename)
    return annotations, np.max(semseg_mask)