def verify_data(orig_ann: Annotation, classes_matching: dict, res_project_meta: ProjectMeta) -> Annotation:
    ann = orig_ann.clone()
    imsize = ann.img_size

    for first_class, second_class in classes_matching.items():
        mask1 = np.zeros(imsize, dtype=np.bool)
        mask2 = np.zeros(imsize, dtype=np.bool)
        for label in ann.labels:
            if label.obj_class.name == first_class:
                label.geometry.draw(mask1, True)
            elif label.obj_class.name == second_class:
                label.geometry.draw(mask2, True)

        iou_value = _compute_masks_iou(mask1, mask2)

        tag_meta = res_project_meta.img_tag_metas.get(make_iou_tag_name(first_class))
        tag = Tag(tag_meta, iou_value)
        ann.add_tag(tag)

        fp_mask = _create_fp_mask(mask1, mask2)
        if fp_mask.sum() != 0:
            fp_object_cls = res_project_meta.obj_classes.get(make_false_positive_name(first_class))
            fp_geom = Bitmap(data=fp_mask)
            fp_label = Label(fp_geom, fp_object_cls)
            ann.add_label(fp_label)

        fn_mask = _create_fn_mask(mask1, mask2)
        if fn_mask.sum() != 0:
            fn_object_cls = res_project_meta.obj_classes.get(make_false_negative_name(first_class))
            fn_geom = Bitmap(data=fn_mask)
            fn_label = Label(fn_geom, fn_object_cls)
            ann.add_label(fn_label)
    return ann
Beispiel #2
0
def geometry_to_bitmap(geometry,
                       radius: int = 0,
                       crop_image_shape: tuple = None) -> list:
    """
    Args:
        geometry: Geometry type which implemented 'draw', 'translate' and 'to_bbox` methods
        radius: half of thickness of drawed vector elements
        crop_image_shape: if not None - crop bitmap object by this shape (HxW)
    Returns:
        Bitmap (geometry) object
    """

    thickness = radius + 1

    bbox = geometry.to_bbox()
    extended_bbox = Rectangle(top=bbox.top - radius,
                              left=bbox.left - radius,
                              bottom=bbox.bottom + radius,
                              right=bbox.right + radius)
    bitmap_data = np.full(shape=(extended_bbox.height, extended_bbox.width),
                          fill_value=False)
    geometry = geometry.translate(-extended_bbox.top, -extended_bbox.left)
    geometry.draw(bitmap_data, color=True, thickness=thickness)

    origin = PointLocation(extended_bbox.top, extended_bbox.left)
    bitmap_geometry = Bitmap(data=bitmap_data, origin=origin)
    if crop_image_shape is not None:
        crop_rect = Rectangle.from_size(*crop_image_shape)
        return bitmap_geometry.crop(crop_rect)
    return [bitmap_geometry]
Beispiel #3
0
    def test_to_contours(self):
        bitmap = Bitmap(data=np.array([[1, 1, 0, 1, 1, 1],
                                       [1, 1, 0, 1, 0, 1],
                                       [0, 0, 0, 1, 1, 1],
                                       [1, 0, 0, 1, 0, 1],
                                       [1, 0, 0, 1, 1, 1],
                                       [1, 0, 0, 0, 0, 0],
                                       [1, 0, 0, 1, 1, 1]], dtype=np.bool),
                        origin=PointLocation(10, 110))
        polygons = bitmap.to_contours()

        exteriors_points = [np.array([[10, 113], [14, 113], [14, 115], [10, 115]]),
                            np.array([[10, 110], [11, 110], [11, 111], [10, 111]])]

        interiors_points = [[],
                            [np.array([[13, 113], [12, 114], [13, 115], [14, 114]]),
                             np.array([[11, 113], [10, 114], [11, 115], [12, 114]])],
                           []]

        self.assertEqual(len(polygons), 2)
        for polygon, target_exterior, target_interiors in zip(polygons, exteriors_points, interiors_points):
            self.assertTrue(np.equal(polygon.exterior_np, target_exterior).all())
            self.assertTrue(all(np.equal(p_inter, t_inter)
                                for p_inter, t_inter in zip(polygon.interior_np, target_interiors)))
            json.dumps(polygon.to_json())
            self.assertIsInstance(polygon, Polygon)
Beispiel #4
0
 def setUp(self):
     self.origin = PointLocation(0, 4)
     self.mask = np.array([[0, 0, 0, 1, 0, 0, 0],
                           [0, 0, 1, 1, 1, 0, 0],
                           [0, 1, 0, 1, 0, 1, 0],
                           [0, 0, 0, 1, 0, 0, 0],
                           [0, 0, 0, 1, 0, 0, 0],
                           [0, 0, 0, 1, 0, 0, 0],
                           [0, 0, 0, 1, 0, 0, 0]], dtype=np.bool)
     self.mask_no_margins = self.mask[:, 1:-1]
     self.bitmap = Bitmap(data=self.mask, origin=self.origin)
Beispiel #5
0
def get_effective_nonoverlapping_masks(geometries, img_size=None):
    '''
    Find nonoverlapping objects from given list of geometries
    :param geometries: list of geometry type objects(Point, Polygon, PolyLine, Bitmap etc.)
    :param img_size: tuple or list of integers
    :return: list of bitmaps, numpy array
    '''
    if img_size is None:
        if len(geometries) > 0:
            common_bbox = Rectangle.from_geometries_list(geometries)
            img_size = (common_bbox.bottom + 1, common_bbox.right + 1)
        else:
            img_size = (0, 0)
    canvas = np.full(shape=img_size,
                     fill_value=len(geometries),
                     dtype=np.int32)

    for idx, geometry in enumerate(geometries):
        geometry.draw(canvas, color=idx)
    result_masks = []
    for idx, geometry in enumerate(geometries):
        effective_indicator = (canvas == idx)
        if np.any(effective_indicator):
            result_masks.append(Bitmap(effective_indicator))
        else:
            result_masks.append(None)
    return result_masks, canvas
Beispiel #6
0
def infer_on_image(image,
                   graph,
                   model,
                   idx_to_class_title,
                   project_meta,
                   confidence_tag_meta,
                   min_confidence=0):
    with graph.as_default():
        [results] = model.detect([image], verbose=0)

    res_labels = []
    for mask_idx, class_id in enumerate(results['class_ids']):
        confidence = results['scores'][mask_idx]
        if confidence < min_confidence:
            continue
        bool_mask = results['masks'][:, :, mask_idx] != 0
        class_geometry = Bitmap(data=bool_mask)
        cls_title = idx_to_class_title[class_id]
        label = Label(geometry=class_geometry,
                      obj_class=project_meta.get_obj_class(cls_title))

        confidence_tag = Tag(confidence_tag_meta,
                             value=round(float(confidence), 4))
        label = label.add_tag(confidence_tag)
        res_labels.append(label)
    return res_labels
def extract_labels_from_mask(mask: np.ndarray, color_id_to_obj_class: collections.Mapping) -> list:
    """
    Extract multiclass instances from grayscale mask and save it to labels list.
    Args:
        mask: multiclass grayscale mask
        color_id_to_obj_class: dict of objects classes assigned to color id (e.g. {1: ObjClass('cat), ...})
    Returns:
        list of labels with bitmap geometry
    """
    zero_offset = 1 if 0 in color_id_to_obj_class else 0
    if zero_offset > 0:
        mask = mask + zero_offset

    labeled, labels_count = measure.label(mask, connectivity=1, return_num=True)
    objects_slices = ndimage.find_objects(labeled)
    labels = []

    for object_index, slices in enumerate(objects_slices, start=1):
        crop = mask[slices]
        sub_mask = crop * (labeled[slices] == object_index).astype(np.int)

        class_index = np.max(sub_mask) - zero_offset

        if class_index in color_id_to_obj_class:
            bitmap = Bitmap(data=sub_mask.astype(np.bool), origin=PointLocation(slices[0].start, slices[1].start))
            label = Label(geometry=bitmap, obj_class=color_id_to_obj_class.get(class_index))
            labels.append(label)
    return labels
Beispiel #8
0
 def test_from_json(self):
     packed_obj = {
         BITMAP: {
             DATA: 'eJzrDPBz5+WS4mJgYOD19HAJAtLsIMzIDCT/zTk6AUixBfiEuALp////L705/y6QxVgS5BfM4PDsRhqQI+j'
                   'p4hhSMSdZIGFDAkeiQIMDA7sVw125xatvACUZPF39XNY5JTQBADRqHJQ=',
             ORIGIN: [0, 1]
         }
     }
     bitmap = Bitmap.from_json(packed_obj)
     self.assertBitmapEquals(bitmap, 1, 1, self.mask_no_margins)
Beispiel #9
0
 def to_segmentation_task(self):
     class_mask = {}
     for label in self.labels:
         if label.obj_class not in class_mask:
             class_mask[label.obj_class] = np.zeros(self.img_size, np.uint8)
         label.draw(class_mask[label.obj_class], color=255)
     new_labels = []
     for obj_class, white_mask in class_mask.items():
         mask = white_mask == 255
         bitmap = Bitmap(data=mask)
         new_labels.append(Label(geometry=bitmap, obj_class=obj_class))
     return self.clone(labels=new_labels)
Beispiel #10
0
 def to_nonoverlapping_masks(self, mapping):
     common_img = np.zeros(self.img_size, np.int32)  # size is (h, w)
     for idx, lbl in enumerate(self.labels, start=1):
         if mapping[lbl.obj_class] is not None:
             lbl.draw(common_img, color=idx)
     (unique, counts) = np.unique(common_img, return_counts=True)
     new_labels = []
     for idx, lbl in enumerate(self.labels, start=1):
         dest_class = mapping[lbl.obj_class]
         if dest_class is None:
             continue  # skip labels
         mask = common_img == idx
         if np.any(mask):  # figure may be entirely covered by others
             g = lbl.geometry
             new_mask = Bitmap(data=mask)
             new_lbl = lbl.clone(geometry=new_mask, obj_class=dest_class)
             new_labels.append(new_lbl)
     new_ann = self.clone(labels=new_labels)
     return new_ann
Beispiel #11
0
def segmentation_array_to_sly_bitmaps(idx_to_class: dict,
                                      pred: np.ndarray,
                                      origin: PointLocation = None) -> list:
    """
    Converts array with segmentation results to Labels with Bitmap geometry according to idx_to_class mapping.

    Args:
        idx_to_class: Dict matching values in prediction array with appropriate ObjClass.
        pred: Array containing raw segmentation results.
        origin: Origin point for all output Bitmaps.
    return:
        A list containing result labels.
    """
    labels = []
    for cls_idx, cls_obj in idx_to_class.items():
        predicted_class_pixels = (pred == cls_idx)
        if np.any(predicted_class_pixels):
            class_geometry = Bitmap(data=predicted_class_pixels, origin=origin)
            labels.append(Label(geometry=class_geometry, obj_class=cls_obj))
    return labels
Beispiel #12
0
    def from_imgaug(cls,
                    img,
                    ia_boxes=None,
                    ia_masks=None,
                    index_to_class=None,
                    meta: ProjectMeta = None):
        if ((ia_boxes is not None) or (ia_masks is not None)) and meta is None:
            raise ValueError("Project meta has to be provided")

        labels = []
        if ia_boxes is not None:
            for ia_box in ia_boxes:
                obj_class = meta.get_obj_class(ia_box.label)
                if obj_class is None:
                    raise KeyError(
                        "Class {!r} not found in project meta".format(
                            ia_box.label))
                lbl = Label(
                    Rectangle(top=ia_box.y1,
                              left=ia_box.x1,
                              bottom=ia_box.y2,
                              right=ia_box.x2), obj_class)
                labels.append(lbl)

        if ia_masks is not None:
            if index_to_class is None:
                raise ValueError(
                    "mapping from index to class name is needed to transform masks to SLY format"
                )
            class_mask = ia_masks.get_arr()
            # mask = white_mask == 255
            (unique, counts) = np.unique(class_mask, return_counts=True)
            for index, count in zip(unique, counts):
                if index == 0:
                    continue
                mask = class_mask == index
                bitmap = Bitmap(data=mask[:, :, 0])
                restore_class = meta.get_obj_class(index_to_class[index])
                labels.append(Label(geometry=bitmap, obj_class=restore_class))

        return cls(img_size=img.shape[:2], labels=labels)
Beispiel #13
0
def get_effective_nonoverlapping_masks(geometries, img_size=None):
    if img_size is None:
        if len(geometries) > 0:
            common_bbox = Rectangle.from_geometries_list(geometries)
            img_size = (common_bbox.bottom + 1, common_bbox.right + 1)
        else:
            img_size = (0, 0)
    canvas = np.full(shape=img_size,
                     fill_value=len(geometries),
                     dtype=np.int32)

    for idx, geometry in enumerate(geometries):
        geometry.draw(canvas, color=idx)
    result_masks = []
    for idx, geometry in enumerate(geometries):
        effective_indicator = (canvas == idx)
        if np.any(effective_indicator):
            result_masks.append(Bitmap(effective_indicator))
        else:
            result_masks.append(None)
    return result_masks, canvas
Beispiel #14
0
class BitmapTest(unittest.TestCase):
    def setUp(self):
        self.origin = PointLocation(0, 4)
        self.mask = np.array([[0, 0, 0, 1, 0, 0, 0],
                              [0, 0, 1, 1, 1, 0, 0],
                              [0, 1, 0, 1, 0, 1, 0],
                              [0, 0, 0, 1, 0, 0, 0],
                              [0, 0, 0, 1, 0, 0, 0],
                              [0, 0, 0, 1, 0, 0, 0],
                              [0, 0, 0, 1, 0, 0, 0]], dtype=np.bool)
        self.mask_no_margins = self.mask[:, 1:-1]
        self.bitmap = Bitmap(data=self.mask, origin=self.origin)

    def assertBitmapEquals(self, bitmap, origin_row, origin_col, mask):
        self.assertIsInstance(bitmap, Bitmap),
        self.assertEqual(bitmap.origin.row, origin_row)
        self.assertEqual(bitmap.origin.col, origin_col)
        self.assertListEqual(bitmap.data.tolist(), mask.tolist())

    def test_rotate(self):
        in_size = (15, 15)
        rotator = ImageRotator(imsize=in_size, angle_degrees_ccw=90)
        res_bitmap = self.bitmap.rotate(rotator)
        expected_mask = np.array([[0, 0, 1, 0, 0, 0, 0],
                                  [0, 1, 0, 0, 0, 0, 0],
                                  [1, 1, 1, 1, 1, 1, 1],
                                  [0, 1, 0, 0, 0, 0, 0],
                                  [0, 0, 1, 0, 0, 0, 0]], dtype=np.bool)
        self.assertListEqual(res_bitmap.data.tolist(), expected_mask.tolist())

    def test_empty_crop(self):
        crop_rect = Rectangle(0, 0, 4, 4)
        res_geoms = self.bitmap.crop(crop_rect)
        self.assertEqual(len(res_geoms), 0)

    def test_crop(self):  # @TODO: mb delete compress while cropping
        crop_rect = Rectangle(0, 0, 8, 8)
        res_geoms = self.bitmap.crop(crop_rect)
        self.assertEqual(len(res_geoms), 1)
        res_bitmap = res_geoms[0]
        res_mask = np.array([[0, 0, 1, 0],
                             [0, 1, 1, 1],
                             [1, 0, 1, 0],
                             [0, 0, 1, 0],
                             [0, 0, 1, 0],
                             [0, 0, 1, 0],
                             [0, 0, 1, 0]], dtype=np.bool)
        self.assertBitmapEquals(res_bitmap, 0, 5, res_mask)

    def test_translate(self):
        drows = 10
        dcols = 7
        res_bitmap = self.bitmap.translate(drows, dcols)
        self.assertBitmapEquals(res_bitmap, 10, 12, self.mask_no_margins)

    def test_area(self):
        area = self.bitmap.area
        self.assertIsInstance(area, float)
        self.assertEqual(area, 11)

    def test_to_bbox(self):
        res_rect = self.bitmap.to_bbox()
        self.assertIsInstance(res_rect, Rectangle)
        self.assertEqual(res_rect.top, 0)
        self.assertEqual(res_rect.left, 5)
        self.assertEqual(res_rect.right, 9)
        self.assertEqual(res_rect.bottom, 6)

    def test_from_json(self):
        packed_obj = {
            BITMAP: {
                DATA: 'eJzrDPBz5+WS4mJgYOD19HAJAtLsIMzIDCT/zTk6AUixBfiEuALp////L705/y6QxVgS5BfM4PDsRhqQI+j'
                      'p4hhSMSdZIGFDAkeiQIMDA7sVw125xatvACUZPF39XNY5JTQBADRqHJQ=',
                ORIGIN: [0, 1]
            }
        }
        bitmap = Bitmap.from_json(packed_obj)
        self.assertBitmapEquals(bitmap, 1, 1, self.mask_no_margins)

    def test_resize(self):
        in_size = (20, 20)
        out_size = (40, 40)
        res_bitmap = self.bitmap.resize(in_size, out_size)
        self.assertIsInstance(res_bitmap, Bitmap)

    def test_flipud(self):
        im_size = (20, 20)
        res_bitmap = self.bitmap.flipud(im_size)
        expected_mask = np.array([[0, 0, 1, 0, 0],
                                  [0, 0, 1, 0, 0],
                                  [0, 0, 1, 0, 0],
                                  [0, 0, 1, 0, 0],
                                  [1, 0, 1, 0, 1],
                                  [0, 1, 1, 1, 0],
                                  [0, 0, 1, 0, 0]], dtype=np.bool)
        self.assertBitmapEquals(res_bitmap, 13, 5, expected_mask)

    def test_to_json(self):
        obj = self.bitmap.to_json()
        expected_dict = {
            BITMAP: {
                DATA: 'eJzrDPBz5+WS4mJgYOD19HAJAtKsQMzOyAwkf2WKrgVSbAE+Ia5A+v///0tvzr8LZDGWBPkFMzg8u5EG5Ah'
                      '6ujiGVMxJDkjQSIg4uIChkYEvjXHncvfAUqAkg6ern8s6p4QmAAVvHAE=',
                ORIGIN: [5, 0]
            }
        }
        self.assertDictEqual(obj, expected_dict)