Example #1
0
 def _make_final_ann(self, result_ann):
     frontend_compatible_labels = _remove_backend_only_labels(
         result_ann.labels)
     return Annotation(
         img_size=result_ann.img_size,
         labels=frontend_compatible_labels,
         img_tags=result_ann.img_tags,
         img_description=result_ann.img_description,
         pixelwise_scores_labels=result_ann.pixelwise_scores_labels)
Example #2
0
def _get_annotation_for_bbox(img: np.ndarray, roi: Rectangle,
                             model) -> Annotation:
    """Runs inference within the given roi; moves resulting figures to global reference frame."""
    img_cropped = roi.get_cropped_numpy_slice(img)
    # TODO pass through image and parent figure tags via roi_ann.
    roi_ann = Annotation(img_size=(roi.height, roi.width))
    raw_result_ann = model.inference(img_cropped, roi_ann)
    return Annotation(img_size=img.shape[:2],
                      labels=[
                          label.translate(drow=roi.top, dcol=roi.left)
                          for label in raw_result_ann.labels
                      ],
                      img_tags=raw_result_ann.img_tags,
                      img_description=raw_result_ann.img_description,
                      pixelwise_scores_labels=[
                          label.translate(drow=roi.top, dcol=roi.left)
                          for label in raw_result_ann.pixelwise_scores_labels
                      ])
 def _set_item(self,
               name,
               title,
               image_url,
               ann: Union[Annotation, dict] = None):
     setattr(self, f"_{name}_title", title)
     setattr(self, f"_{name}_image_url", image_url)
     res_ann = Annotation((1, 1))
     if ann is not None:
         if type(ann) is dict:
             res_ann = Annotation.from_json(ann, self._project_meta)
         else:
             res_ann = ann.clone()
     setattr(self, f"_{name}_ann", res_ann)
Example #4
0
    def _do_single_img_inference(self, img, in_msg):
        in_project_meta = self._in_project_meta_from_msg(in_msg)
        ann_json = in_msg.get('annotation')
        if ann_json is not None:
            if in_project_meta is None:
                raise ValueError('In order to perform inference with annotation you must specify the appropriate'
                                 ' project meta.')
            ann = Annotation.from_json(ann_json, in_project_meta)
        else:
            in_project_meta = in_project_meta or ProjectMeta()
            ann = Annotation(img.shape[:2])

        inference_mode = self._make_inference_mode(in_msg.get(MODE, {}), in_project_meta)
        inference_result = inference_mode.infer_annotate(img, ann)
        return inference_result.to_json()
Example #5
0
    def add_item(self,
                 title,
                 image_url,
                 ann: Union[Annotation, dict] = None,
                 col_index=None,
                 custom_info: dict = None,
                 zoom_to_figure=None,
                 title_url=None):

        if col_index is not None:
            if col_index <= 0 or col_index > self.col_number:
                raise ValueError(
                    "Column number is not correct, check your input data")

        res_ann = Annotation((1, 1))
        if ann is not None:
            if type(ann) is dict:
                res_ann = Annotation.from_json(ann, self._project_meta)
            else:
                res_ann = ann.clone()

        self._data[title] = {
            "image_url": image_url,
            "ann": res_ann,
            "col_index": col_index
        }

        if zoom_to_figure is not None:
            self._data[title]["zoom_to_figure"] = zoom_to_figure
            self._need_zoom = True

        if title_url is not None:
            self.preview_info = True
            self._with_title_url = True
            self._data[title]["labelingUrl"] = title_url

        if self.preview_info:
            if custom_info is not None:
                self._data[title]["info"] = custom_info
            else:
                self._data[title]["info"] = None
Example #6
0
    def test_with_matches(self):
        ann = Annotation(
            img_size=[100, 100],
            labels=[self._gt_obj_1, self._gt_obj_2, self._gt_obj_3,
                    self._pred_obj_1, self._pred_obj_2, self._pred_obj_3] + self._pred_objs_fp)
        self._metric_calculator.add_pair(ann, ann)

        # Sorted matches by confidence:
        # 0.75 - recall 0   precision 0
        # 0.7  + recall 1/3 precision 1/2
        # 0.65 - recall 1/3 precision 1/3
        # 0.6  - recall 1/3 precision 1/4
        # 0.55 - recall 1/3 precision 1/5
        # 0.45 - recall 1/3 precision 1/6
        # 0.35 - recall 1/3 precision 1/7
        # 0.25 - recall 1/3 precision 1/8
        # 0.15 - recall 2/3 precision 1/9
        # 0.1  + recall 2/3 precision 2/10

        # Recalls 0.7, 0.8, 0.9, 1.0 -> max precision 0.
        # Recalls 0.6, 0.5, 0.4      -> max precision 2/10
        # Recalls 0.3, 0.2, 0.1, 0.0 -> max precision 1/2
        expected_map = (4 * 0.0 + 3 * (2/10) + 4 * 1/2) / 11
        self.assertEqual(self._metric_calculator.get_total_metrics()[AP], expected_map)
Example #7
0
 def test_empty_predictions(self):
     ann = Annotation(
         img_size=[100, 100], labels=[self._gt_obj_1, self._gt_obj_2, self._gt_obj_3])
     self._metric_calculator.add_pair(ann, ann)
     self.assertEqual(self._metric_calculator.get_total_metrics()[AP], 0)