Esempio n. 1
0
def geometry_to_bitmap(geometry,
                       radius: int = 0,
                       crop_image_shape: tuple = None) -> list:
    """
    Args:
        geometry: Geometry type which implemented 'draw', 'translate' and 'to_bbox` methods
        radius: half of thickness of drawed vector elements
        crop_image_shape: if not None - crop bitmap object by this shape (HxW)
    Returns:
        Bitmap (geometry) object
    """

    thickness = radius + 1

    bbox = geometry.to_bbox()
    extended_bbox = Rectangle(top=bbox.top - radius,
                              left=bbox.left - radius,
                              bottom=bbox.bottom + radius,
                              right=bbox.right + radius)
    bitmap_data = np.full(shape=(extended_bbox.height, extended_bbox.width),
                          fill_value=False)
    geometry = geometry.translate(-extended_bbox.top, -extended_bbox.left)
    geometry.draw(bitmap_data, color=True, thickness=thickness)

    origin = PointLocation(extended_bbox.top, extended_bbox.left)
    bitmap_geometry = Bitmap(data=bitmap_data, origin=origin)
    if crop_image_shape is not None:
        crop_rect = Rectangle.from_size(*crop_image_shape)
        return bitmap_geometry.crop(crop_rect)
    return [bitmap_geometry]
Esempio n. 2
0
def crop(img: np.ndarray, rect: Rectangle) -> np.ndarray:
    '''
    The function crop cut out part of the image with rectangle size. If rectangle for crop is out of image area it
    generates exception error(ValueError).
    :param img: image(numpy matrix) to be cropped
    :param rect: class object Rectangle of a certain size
    :return: cropped image
    '''
    img_rect = Rectangle.from_array(img)
    if not img_rect.contains(rect):
        raise ValueError('Rectangle for crop out of image area!')
    return rect.get_cropped_numpy_slice(img)
    def get_add_padding(self, source_shape):
        source_rect = Rectangle.from_size(source_shape)
        window_rect = Rectangle.from_size(self.window_shape)
        if not source_rect.contains(window_rect):
            raise RuntimeError(
                'Sliding window: window is larger than source (image).')

        hw_limit = tuple(source_shape[i] - self.window_shape[i]
                         for i in (0, 1))
        for wind_top in range(0, hw_limit[0] + self.stride[0], self.stride[0]):
            for wind_left in range(0, hw_limit[1] + self.stride[1],
                                   self.stride[1]):
                roi = window_rect.translate(drow=wind_top, dcol=wind_left)
                yield roi
Esempio n. 4
0
 def test_crop(self):
     crop_rect = Rectangle(0, 0, 100, 100)
     res_geoms = self.rect.crop(crop_rect)
     self.assertEqual(len(res_geoms), 1)
     res_rect = res_geoms[0]
     self.assertRectEquals(res_rect, self.rect.top, self.rect.left,
                           self.rect.bottom, self.rect.right)
Esempio n. 5
0
 def to_bbox(self):
     '''
     The function to_bbox create Rectangle class object from current Bitmap class object
     :return: Rectangle class object
     '''
     return Rectangle.from_array(self._data).translate(
         drow=self._origin.row, dcol=self._origin.col)
Esempio n. 6
0
def detection_preds_to_sly_rects(
        idx_to_class, network_prediction: DetectionNetworkPrediction,
        img_shape, min_score_threshold, score_tag_meta) -> list:
    """
    Converts network detection results to Supervisely Labels with Rectangle geometry.

    Args:
        idx_to_class: Dict matching predicted boxes with appropriate ObjClass.
        network_prediction: Network predictions packed into DetectionNetworkPrediction instance.
        img_shape: Size(height, width) of image that was used for inference.
        min_score_threshold: All detections with less scores will be dropped.
        score_tag_meta: TagMeta instance for score tags.
    Returns:
        A list containing labels with detection rectangles.
    """
    labels = []
    thr_mask = np.squeeze(network_prediction.scores) > min_score_threshold
    for box, class_id, score in zip(
            np.squeeze(network_prediction.boxes)[thr_mask],
            np.squeeze(network_prediction.classes)[thr_mask],
            np.squeeze(network_prediction.scores)[thr_mask]):

        xmin = round(float(box[1] * img_shape[1]))
        ymin = round(float(box[0] * img_shape[0]))
        xmax = round(float(box[3] * img_shape[1]))
        ymax = round(float(box[2] * img_shape[0]))

        rect = Rectangle(top=ymin, left=xmin, bottom=ymax, right=xmax)
        class_obj = idx_to_class[int(class_id)]
        label = Label(geometry=rect, obj_class=class_obj)

        score_tag = Tag(score_tag_meta, value=round(float(score), 4))
        label = label.add_tag(score_tag)
        labels.append(label)
    return labels
Esempio n. 7
0
def get_effective_nonoverlapping_masks(geometries, img_size=None):
    '''
    Find nonoverlapping objects from given list of geometries
    :param geometries: list of geometry type objects(Point, Polygon, PolyLine, Bitmap etc.)
    :param img_size: tuple or list of integers
    :return: list of bitmaps, numpy array
    '''
    if img_size is None:
        if len(geometries) > 0:
            common_bbox = Rectangle.from_geometries_list(geometries)
            img_size = (common_bbox.bottom + 1, common_bbox.right + 1)
        else:
            img_size = (0, 0)
    canvas = np.full(shape=img_size,
                     fill_value=len(geometries),
                     dtype=np.int32)

    for idx, geometry in enumerate(geometries):
        geometry.draw(canvas, color=idx)
    result_masks = []
    for idx, geometry in enumerate(geometries):
        effective_indicator = (canvas == idx)
        if np.any(effective_indicator):
            result_masks.append(Bitmap(effective_indicator))
        else:
            result_masks.append(None)
    return result_masks, canvas
Esempio n. 8
0
def _rectangle_from_cropping_or_padding_bounds(img_shape, crop_config,
                                               do_crop: bool):
    def get_crop_pixels(raw_side, dim_name):
        side_crop_config = crop_config.get(dim_name)
        if side_crop_config is None:
            crop_pixels = 0
        elif side_crop_config.endswith(PX):
            crop_pixels = int(side_crop_config[:-len(PX)])
        elif side_crop_config.endswith(PERCENT):
            padding_fraction = float(side_crop_config[:-len(PERCENT)])
            crop_pixels = int(raw_side * padding_fraction / 100.0)
        else:
            raise ValueError(
                'Unknown padding size format: {}. Expected absolute values as "5px" or relative as "5%"'
                .format(side_crop_config))
        if not do_crop:
            crop_pixels *= -1  # Pad instead of crop.
        return crop_pixels

    # TODO more informative error message.
    return Rectangle(
        top=get_crop_pixels(img_shape[0], TOP),
        left=get_crop_pixels(img_shape[1], LEFT),
        bottom=img_shape[0] - get_crop_pixels(img_shape[0], BOTTOM) - 1,
        right=img_shape[1] - get_crop_pixels(img_shape[1], RIGHT) - 1)
Esempio n. 9
0
 def test_crop(self):
     crop_rect = Rectangle(top=1, left=0, bottom=10, right=4)
     res_geoms = self.bitmap.crop(crop_rect)
     self.assertEqual(len(res_geoms), 1)
     res_bitmap = res_geoms[0]
     res_data = np.array([[[0.6, 0.7]]], dtype=np.float64)
     self.assertMultichannelBitmapEquals(res_bitmap, 1, 4, res_data)
Esempio n. 10
0
 def test_crop(self):
     crop_rect = Rectangle(25, 0, 200, 200)
     res_geoms = self.poly.crop(crop_rect)
     self.assertEqual(len(res_geoms), 1)
     crop = res_geoms[0]
     self.assertPolyEquals(crop, [[10, 25], [20, 25], [20, 30], [30, 30],
                                  [30, 25], [35, 25], [30, 40], [10, 30]],
                           [])
Esempio n. 11
0
 def test_relative_crop(self):
     crop_rect = Rectangle(25, 0, 200, 200)
     res_geoms = self.poly.relative_crop(crop_rect)
     self.assertEqual(len(res_geoms), 1)
     crop = res_geoms[0]
     self.assertPolyEquals(crop,
                           [[10, 0], [20, 0], [20, 5], [30, 5], [30, 0],
                            [35, 0], [30, 15], [10, 5]], [])
Esempio n. 12
0
 def to_bbox(self):
     '''
     The function to_bbox create Rectangle class object from current GraphNodes class object
     :return: Rectangle class object
     '''
     return Rectangle.from_geometries_list([
         Point.from_point_location(node.location)
         for node in self._nodes.values()
     ])
Esempio n. 13
0
 def to_bbox(self):
     '''
     The function to_bbox create Rectangle class object from Point class object
     :return: Rectangle class object
     '''
     return Rectangle(top=self.row,
                      left=self.col,
                      bottom=self.row,
                      right=self.col)
Esempio n. 14
0
 def test_from_json(self):
     packed_obj = {
         'some_stuff': 'aaa',
         POINTS: {
             EXTERIOR: [[17, 3], [34, 45]],
             INTERIOR: []
         }
     }
     res_rect = Rectangle.from_json(packed_obj)
     self.assertRectEquals(res_rect, 3, 17, 45, 34)
Esempio n. 15
0
 def crop(self, rect: Rectangle):
     '''
     The function "crop" return list containing graph if all nodes of graph located in given rectangle and an empty list otherwise
     :param rect: Rectangle class object
     :return: list containing GraphNodes class object or empty list
     '''
     is_all_nodes_inside = all(
         rect.contains_point_location(node.location)
         for node in self._nodes.values())
     return [self] if is_all_nodes_inside else []
Esempio n. 16
0
 def to_bbox(self):
     '''
     The function to_bbox create Rectangle class object from current Cuboid class object
     :return: Rectangle class object
     '''
     points_np = np.array([[self._points[p].row, self._points[p].col]
                           for face in self._faces for p in face.tolist()])
     rows, cols = points_np[:, 0], points_np[:, 1]
     return Rectangle(top=round(min(rows).item()), left=round(min(cols).item()), bottom=round(max(rows).item()),
                      right=round(max(cols).item()))
Esempio n. 17
0
 def test_crop(self):  # @TODO: mb delete compress while cropping
     crop_rect = Rectangle(0, 0, 8, 8)
     res_geoms = self.bitmap.crop(crop_rect)
     self.assertEqual(len(res_geoms), 1)
     res_bitmap = res_geoms[0]
     res_mask = np.array(
         [[0, 0, 1, 0], [0, 1, 1, 1], [1, 0, 1, 0], [0, 0, 1, 0],
          [0, 0, 1, 0], [0, 0, 1, 0], [0, 0, 1, 0]],
         dtype=np.bool)
     self.assertBitmapEquals(res_bitmap, 0, 5, res_mask)
    def get_change_size(self, source_shape):
        source_rect = Rectangle.from_size(source_shape)
        window_rect = Rectangle.from_size(self.window_shape)
        if not source_rect.contains(window_rect):
            raise RuntimeError(
                'Sliding window: window is larger than source (image).')

        hw_limit = tuple(source_shape[i] - self.window_shape[i]
                         for i in (0, 1))
        for wind_top in range(0, hw_limit[0] + self.stride[0], self.stride[0]):
            for wind_left in range(0, hw_limit[1] + self.stride[1],
                                   self.stride[1]):
                wind_bottom = min(wind_top + self.stride[0], source_shape[0])
                wind_right = min(wind_left + self.stride[1], source_shape[1])
                roi = Rectangle(wind_top, wind_left, wind_bottom - 1,
                                wind_right - 1)
                if not source_rect.contains(roi):
                    raise RuntimeError(
                        'Sliding window: result crop bounds are invalid.')
                yield roi
Esempio n. 19
0
 def to_bbox(self):
     '''
     The function to_bbox create Rectangle class object from current VectorGeometry class object
     :return: Rectangle class object
     '''
     exterior_np = self.exterior_np
     rows, cols = exterior_np[:, 0], exterior_np[:, 1]
     return Rectangle(top=round(min(rows).item()),
                      left=round(min(cols).item()),
                      bottom=round(max(rows).item()),
                      right=round(max(cols).item()))
Esempio n. 20
0
 def _add_labels_impl(self, dest, labels):
     '''
     The function _add_labels_impl extend list of the labels of the current Annotation object
     :param dest: destination list of the Label class objects
     :param labels: list of the Label class objects to be added to the destination list
     :return: list of the Label class objects
     '''
     for label in labels:
         # TODO Reconsider silent automatic normalization, reimplement
         canvas_rect = Rectangle.from_size(self.img_size)
         dest.extend(label.crop(canvas_rect))
Esempio n. 21
0
def _find_mask_tight_bbox(raw_mask: np.ndarray) -> Rectangle:
    rows = list(np.any(raw_mask, axis=1).tolist(
    ))  # Redundant conversion to list to help PyCharm static analysis.
    cols = list(np.any(raw_mask, axis=0).tolist())
    top_margin = rows.index(True)
    bottom_margin = rows[::-1].index(True)
    left_margin = cols.index(True)
    right_margin = cols[::-1].index(True)
    return Rectangle(top=top_margin,
                     left=left_margin,
                     bottom=len(rows) - 1 - bottom_margin,
                     right=len(cols) - 1 - right_margin)
Esempio n. 22
0
    def _calc_inner_crop(self):
        """
        Given a rectangle of self.src_imsize HxW that has been rotated by
        self.angle_degrees_ccw (in degrees), computes the location of the
        largest possible axis-aligned rectangle within the rotated rectangle.
        """

        # TODO This needs significant streamlinig.
        a_ccw = np.deg2rad(self.angle_degrees_ccw)
        quadrant = math.floor(a_ccw / (math.pi / 2)) & 3
        sign_alpha = a_ccw if ((quadrant & 1) == 0) else math.pi - a_ccw
        alpha = (sign_alpha % math.pi + math.pi) % math.pi

        h, w = self.src_imsize
        bb_w = w * math.cos(alpha) + h * math.sin(alpha)
        bb_h = w * math.sin(alpha) + h * math.cos(alpha)

        gamma = math.atan2(bb_w, bb_w) if (w < h) else math.atan2(bb_w, bb_w)

        delta = math.pi - alpha - gamma

        length = h if (w < h) else w

        d = length * math.cos(alpha)
        a = d * math.sin(alpha) / math.sin(delta)

        y = a * math.cos(gamma)
        x = y * math.tan(gamma)

        largest_w, largest_h = bb_w - 2 * x, bb_h - 2 * y

        new_h, new_w = self.new_imsize
        left = round((new_w - largest_w) * 0.5)
        right = round((new_w + largest_w) * 0.5)
        top = round((new_h - largest_h) * 0.5)
        bottom = round((new_h + largest_h) * 0.5)
        some_inner_crop = Rectangle(top, left, bottom, right)
        new_img_bbox = Rectangle(0, 0, self.new_imsize[0] - 1,
                                 self.new_imsize[1] - 1)
        self.inner_crop = new_img_bbox.crop(some_inner_crop)[0]
Esempio n. 23
0
 def _all_filtered_bbox_rois(ann: Annotation, included_classes,
                             crop_config: dict):
     for src_label in ann.labels:
         effective_roi = None
         if is_name_included(src_label.obj_class.name, included_classes):
             bbox = src_label.geometry.to_bbox()
             roi = _make_padded_rectangle((bbox.height, bbox.width),
                                          crop_config)
             maybe_effective_roi = roi.translate(
                 drow=bbox.top,
                 dcol=bbox.left).crop(Rectangle.from_size(ann.img_size))
             if len(maybe_effective_roi) > 0:
                 [effective_roi] = maybe_effective_roi
         yield src_label, effective_roi
Esempio n. 24
0
def get_geometries_iou(geometry_1: Geometry, geometry_2: Geometry):
    if isinstance(geometry_1, Rectangle):
        return get_iou_rect(geometry_1, geometry_2)
    elif isinstance(geometry_2, Rectangle):
        return get_iou_rect(geometry_2, geometry_1)
    else:
        common_bbox = Rectangle.from_geometries_list((geometry_1, geometry_2))
        g1 = geometry_1.relative_crop(common_bbox)[0]
        g2 = geometry_2.relative_crop(common_bbox)[0]
        mask_1 = np.full(common_bbox.to_size(), False)
        g1.draw(mask_1, color=True)
        mask_2 = np.full(common_bbox.to_size(), False)
        g2.draw(mask_2, color=True)
        return safe_ratio((mask_1 & mask_2).sum(), (mask_1 | mask_2).sum())
Esempio n. 25
0
def add_background(ann: Annotation, bg_class: ObjClass) -> Annotation:
    """
    Adds background rectangle (size equals to image size) to annotation.

    Args:
        ann: Input annotation.
        bg_class: ObjClass instance for background class label.
    Returns:
        Annotation with added background rectangle.
    """
    img_size = ann.img_size
    rect = Rectangle(0, 0, img_size[0] - 1, img_size[1] - 1)
    new_label = Label(rect, bg_class)
    return ann.add_label(new_label)
Esempio n. 26
0
    def test_crop_by_border(self):
        exterior = [[10, 10], [40, 10], [40, 40], [10, 40]]
        interiors = [[[11, 11], [11, 20], [20, 11]],
                     [[20, 20], [21, 20], [20, 21]]]
        poly = Polygon(exterior=row_col_list_to_points(
            exterior, flip_row_col_order=True),
                       interior=[
                           row_col_list_to_points(interior,
                                                  flip_row_col_order=True)
                           for interior in interiors
                       ])

        crop_rect = Rectangle(0, 0, 100, 10)
        res_geoms = poly.crop(crop_rect)
        self.assertEqual(len(res_geoms), 0)
Esempio n. 27
0
def crop_with_padding(img: np.ndarray, rect: Rectangle) -> np.ndarray:
    '''
    The function crop cut out part of the image with rectangle size. If rectangle for crop is out of image area it
    generates additional padding.
    :param img: image(numpy matrix) to be cropped
    :param rect: class object Rectangle of a certain size
    :return: cropped image
    '''
    img_rect = Rectangle.from_array(img)
    if not img_rect.contains(rect):
        row, col = img.shape[:2]
        new_img = cv2.copyMakeBorder(
            img,
            top=rect.height,
            bottom=rect.height,
            left=rect.width,
            right=rect.width,
            borderType=cv2.BORDER_CONSTANT
        )
        new_rect = rect.translate(drow=rect.height, dcol=rect.width)
        return new_rect.get_cropped_numpy_slice(new_img)

    else:
        return rect.get_cropped_numpy_slice(img)
Esempio n. 28
0
def _get_annotation_for_bbox(img: np.ndarray, roi: Rectangle,
                             model) -> Annotation:
    """Runs inference within the given roi; moves resulting figures to global reference frame."""
    img_cropped = roi.get_cropped_numpy_slice(img)
    # TODO pass through image and parent figure tags via roi_ann.
    roi_ann = Annotation(img_size=(roi.height, roi.width))
    raw_result_ann = model.inference(img_cropped, roi_ann)
    return Annotation(img_size=img.shape[:2],
                      labels=[
                          label.translate(drow=roi.top, dcol=roi.left)
                          for label in raw_result_ann.labels
                      ],
                      img_tags=raw_result_ann.img_tags,
                      img_description=raw_result_ann.img_description,
                      pixelwise_scores_labels=[
                          label.translate(drow=roi.top, dcol=roi.left)
                          for label in raw_result_ann.pixelwise_scores_labels
                      ])
Esempio n. 29
0
    def validate_bounds(self, img_size, _auto_correct=False):
        '''
        The function validate_bounds checks if given image contains a figure. Raise error if figure is out of image bounds
        :param img_size: tuple or list of integers
        :param _auto_correct: bool
        '''
        canvas_rect = Rectangle.from_size(img_size)
        if canvas_rect.contains(self.geometry.to_bbox()) is False:
            raise OutOfImageBoundsExtension("Figure is out of image bounds")

        if _auto_correct is True:
            geometries_after_crop = [
                cropped_geometry
                for cropped_geometry in self.geometry.crop(canvas_rect)
            ]
            if len(geometries_after_crop) != 1:
                raise OutOfImageBoundsExtension(
                    "Several geometries after crop")
            self._set_geometry_inplace(geometries_after_crop[0])
Esempio n. 30
0
    def test_complex_crop(self):
        # Crop generate GeometryCollection here
        exterior = [[0, 0], [0, 3], [5, 8], [5, 9], [5, 10], [0, 15], [10, 20],
                    [0, 25], [20, 25], [20, 0]]
        interiors = [[[2, 2], [4, 4], [4, 2]]]

        poly = Polygon(exterior=row_col_list_to_points(
            exterior, flip_row_col_order=True),
                       interior=[
                           row_col_list_to_points(interior,
                                                  flip_row_col_order=True)
                           for interior in interiors
                       ])

        crop_rect = Rectangle(0, 0, 30, 5)
        res_geoms = poly.crop(crop_rect)
        self.assertEqual(len(res_geoms), 3)
        self.assertPolyEquals(res_geoms[0], [[0, 0], [5, 0], [5, 8], [0, 3]],
                              interiors)