Beispiel #1
0
    def test_dataset_item_repr(self):
        """
        <b>Description:</b>
        Check DatasetItemEntity class __repr__ method

        <b>Input data:</b>
        DatasetItemEntity class objects with specified "media", "annotation_scene", "roi", "metadata" and "subset"
        parameters

        <b>Expected results:</b>
        Test passes if value returned by __repr__ method is equal to expected

        <b>Steps</b>
        1. Check value returned by __repr__ method for  DatasetItemEntity object with default optional parameters
        2. Check value returned by __repr__ method for  DatasetItemEntity object with specified optional parameters
        """
        media = DatasetItemParameters.generate_random_image()
        annotation_scene = DatasetItemParameters().annotations_entity()
        # Checking __repr__ method for DatasetItemEntity object initialized with default optional parameters
        default_values_dataset_item = DatasetItemEntity(
            media, annotation_scene)
        generated_roi = default_values_dataset_item.roi
        assert repr(default_values_dataset_item) == (
            f"DatasetItemEntity(media=Image(with data, width=16, height=10), "
            f"annotation_scene={annotation_scene}, roi={generated_roi}, "
            f"subset=NONE)")
        # Checking __repr__ method for DatasetItemEntity object initialized with specified optional parameters
        roi = DatasetItemParameters().roi()
        metadata = DatasetItemParameters.metadata()
        subset = Subset.TESTING
        specified_values_dataset_item = DatasetItemEntity(
            media, annotation_scene, roi, metadata, subset)
        assert repr(specified_values_dataset_item) == (
            f"DatasetItemEntity(media=Image(with data, width=16, height=10), annotation_scene={annotation_scene}, "
            f"roi={roi}, subset=TESTING)")
Beispiel #2
0
    def test_dataset_item_eq(self):
        """
        <b>Description:</b>
        Check DatasetItemEntity class __eq__ method

        <b>Input data:</b>
        DatasetItemEntity class objects with specified "media", "annotation_scene", "roi", "metadata" and "subset"
        parameters

        <b>Expected results:</b>
        Test passes if value returned by __eq__ method is equal to expected

        <b>Steps</b>
        1. Check value returned by __eq__ method for equal DatasetItemEntity objects
        2. Check value returned by __eq__ method for DatasetItemEntity objects with unequal "media", "annotation_scene",
        "roi" or "subset"  parameters
        3. Check value returned by __eq__ method for DatasetItemEntity objects with unequal "metadata" parameters
        4. Check value returned by __eq__ method for DatasetItemEntity object compared to different type object
        """
        media = DatasetItemParameters.generate_random_image()
        annotation_scene = DatasetItemParameters().annotations_entity()
        roi = DatasetItemParameters().roi()
        metadata = DatasetItemParameters.metadata()
        dataset_parameters = {
            "media": media,
            "annotation_scene": annotation_scene,
            "roi": roi,
            "metadata": metadata,
            "subset": Subset.TESTING,
        }
        dataset_item = DatasetItemEntity(**dataset_parameters)
        # Checking value returned by __eq__ method for equal DatasetItemEntity objects
        equal_dataset_item = DatasetItemEntity(**dataset_parameters)
        assert dataset_item == equal_dataset_item
        # Checking inequality of DatasetItemEntity objects with unequal initialization parameters
        unequal_annotation_scene = DatasetItemParameters().annotations_entity()
        unequal_annotation_scene.annotations.pop(0)
        unequal_values = [
            ("media", DatasetItemParameters.generate_random_image()),
            ("annotation_scene", unequal_annotation_scene),
            ("roi", None),
            ("subset", Subset.VALIDATION),
        ]
        for key, value in unequal_values:
            unequal_parameters = dict(dataset_parameters)
            unequal_parameters[key] = value
            unequal_dataset_item = DatasetItemEntity(**unequal_parameters)
            assert dataset_item != unequal_dataset_item, (
                f"Expected False returned for DatasetItemEntity objects with "
                f"unequal {key} parameters")
        # Checking value returned by __eq__ method for DatasetItemEntity objects with unequal "metadata" parameters
        # expected equality
        unequal_metadata_parameters = dict(dataset_parameters)
        unequal_metadata_parameters["metadata"] = None
        unequal_metadata_dataset_item = DatasetItemEntity(
            **unequal_metadata_parameters)
        assert dataset_item == unequal_metadata_dataset_item
        # Checking value returned by __eq__ method for DatasetItemEntity object compared to different type object
        assert not dataset_item == str
Beispiel #3
0
    def test_dataset_entity_append(self):
        """
        <b>Description:</b>
        Check DatasetEntity class "append" method

        <b>Input data:</b>
        DatasetEntity class object with specified "items" and "purpose" parameters

        <b>Expected results:</b>
        Test passes if "items" attribute of DatasetEntity object is equal to expected after using "append" method

        <b>Steps</b>
        1. Check "items" attribute of DatasetEntity object after adding new DatasetEntity object
        2. Check "items" attribute of DatasetEntity object after adding existing DatasetEntity object
        3. Check that ValueError exception is raised when appending DatasetEntity with "media" attribute is equal to
        "None"
        """
        dataset = self.dataset()
        expected_items = list(dataset._items)
        # Checking "items" attribute of DatasetEntity object after adding new DatasetEntity object
        item_to_add = self.dataset_item()
        dataset.append(item_to_add)
        expected_items.append(item_to_add)
        assert dataset._items == expected_items
        # Checking "items" attribute of DatasetEntity object after adding existing DatasetEntity object
        dataset.append(item_to_add)
        expected_items.append(item_to_add)
        assert dataset._items == expected_items
        # Checking that ValueError exception is raised when appending DatasetEntity with "media" is "None" attribute
        no_media_item = DatasetItemEntity(None, self.annotations_entity())
        with pytest.raises(ValueError):
            dataset.append(no_media_item)
    def generate(self) -> DatasetEntity:
        """
        Generate OTE Anomaly Dataset

        Returns:
            DatasetEntity: Output OTE Anomaly Dataset from an MVTec
        """
        samples = self.get_samples()
        dataset_items: List[DatasetItemEntity] = []
        for _, sample in tqdm(samples.iterrows()):
            # Create image
            image = Image(file_path=sample.image_path)

            # Create annotation
            shape = Rectangle(x1=0, y1=0, x2=1, y2=1)
            labels = [ScoredLabel(sample.label)]
            annotations = [Annotation(shape=shape, labels=labels)]
            annotation_scene = AnnotationSceneEntity(annotations=annotations, kind=AnnotationSceneKind.ANNOTATION)

            # Create dataset item
            dataset_item = DatasetItemEntity(media=image, annotation_scene=annotation_scene, subset=sample.subset)

            # Add to dataset items
            dataset_items.append(dataset_item)

        dataset = DatasetEntity(items=dataset_items)
        return dataset
Beispiel #5
0
 def dataset_item(self) -> DatasetItemEntity:
     return DatasetItemEntity(
         media=self.generate_random_image(),
         annotation_scene=self.annotations_entity(),
         roi=self.roi(),
         metadata=self.metadata(),
         subset=Subset.TESTING,
     )
Beispiel #6
0
    def test_dataset_item_roi(self):
        """
        <b>Description:</b>
        Check DatasetItemEntity class "roi" property

        <b>Input data:</b>
        DatasetItemEntity class object with specified "media", "annotation_scene", "roi", "metadata" and "subset"
        parameters

        <b>Expected results:</b>
        Test passes if value returned by "roi" property is equal to expected

        <b>Steps</b>
        1. Check value returned by "roi" property for DatasetItemEntity with specified "roi" parameter
        2. Check value returned by "roi" property for DatasetItemEntity with not specified "roi" parameter
        3. Check value returned by "roi" property for DatasetItemEntity with not specified "roi" parameter but one
        of annotation objects in annotation_scene is equal to full Rectangle
        """
        media = DatasetItemParameters.generate_random_image()
        annotations = DatasetItemParameters().annotations()
        annotation_scene = DatasetItemParameters().annotations_entity()
        roi = DatasetItemParameters().roi()
        metadata = DatasetItemParameters.metadata()
        # Checking "roi" property for DatasetItemEntity with specified "roi" parameter
        specified_roi_dataset_item = DatasetItemParameters().dataset_item()
        assert specified_roi_dataset_item.roi == roi
        # Checking that "roi" property is equal to full_box for DatasetItemEntity with not specified "roi" parameter
        non_specified_roi_dataset_item = DatasetItemEntity(media,
                                                           annotation_scene,
                                                           metadata=metadata)
        default_roi = non_specified_roi_dataset_item.roi.shape
        assert isinstance(default_roi, Rectangle)
        assert Rectangle.is_full_box(default_roi)
        # Checking that "roi" property will be equal to full_box for DatasetItemEntity with not specified "roi" but one
        # of Annotation objects in annotation_scene is equal to full Rectangle
        full_box_label = LabelEntity("Full-box label",
                                     Domain.DETECTION,
                                     id=ID("full_box_label"))
        full_box_annotation = Annotation(Rectangle.generate_full_box(),
                                         [ScoredLabel(full_box_label)])
        annotations.append(full_box_annotation)
        annotation_scene.annotations.append(full_box_annotation)
        full_box_label_dataset_item = DatasetItemEntity(media,
                                                        annotation_scene,
                                                        metadata=metadata)
        assert full_box_label_dataset_item.roi is full_box_annotation
def init_environment(params, model_template, number_of_images=10):
    resolution = (224, 224)
    colors = [(0, 255, 0), (0, 0, 255)]
    cls_names = ['b', 'g']
    texts = ['Blue', 'Green']
    env_labels = [
        LabelEntity(name=name,
                    domain=Domain.CLASSIFICATION,
                    is_empty=False,
                    id=ID(i)) for i, name in enumerate(cls_names)
    ]

    items = []

    for _ in range(0, number_of_images):
        for j, lbl in enumerate(env_labels):
            class_img = np.zeros((*resolution, 3), dtype=np.uint8)
            class_img[:] = colors[j]
            class_img = cv.putText(class_img, texts[j], (50, 50),
                                   cv.FONT_HERSHEY_SIMPLEX, .8 + j * .2,
                                   colors[j - 1], 2, cv.LINE_AA)

            image = Image(data=class_img)
            labels = [ScoredLabel(label=lbl, probability=1.0)]
            shapes = [Annotation(Rectangle.generate_full_box(), labels)]
            annotation_scene = AnnotationSceneEntity(
                kind=AnnotationSceneKind.ANNOTATION, annotations=shapes)
            items.append(
                DatasetItemEntity(media=image,
                                  annotation_scene=annotation_scene))

    rng = random.Random()
    rng.seed(100)
    rng.shuffle(items)
    for i, _ in enumerate(items):
        subset_region = i / number_of_images
        if subset_region >= 0.9:
            subset = Subset.TESTING
        elif subset_region >= 0.6:
            subset = Subset.VALIDATION
        else:
            subset = Subset.TRAINING
        items[i].subset = subset

    dataset = DatasetEntity(items)
    labels_schema = generate_label_schema(dataset.get_labels(),
                                          multilabel=False)
    environment = TaskEnvironment(model=None,
                                  hyper_parameters=params,
                                  label_schema=labels_schema,
                                  model_template=model_template)
    return environment, dataset
Beispiel #8
0
 def dataset(self) -> DatasetEntity:
     other_dataset_item = DatasetItemEntity(
         media=self.generate_random_image(),
         annotation_scene=self.annotations_entity(),
         metadata=self.metadata(),
         subset=Subset.VALIDATION,
     )
     items = [
         self.default_values_dataset_item(),
         self.dataset_item(),
         other_dataset_item,
     ]
     return DatasetEntity(items, DatasetPurpose.TEMPORARY_DATASET)
Beispiel #9
0
    def test_dataset_item_initialization(self):
        """
        <b>Description:</b>
        Check DatasetItemEntity class object initialization

        <b>Input data:</b>
        DatasetItemEntity class objects with specified "media", "annotation_scene", "roi", "metadata" and "subset"
        parameters

        <b>Expected results:</b>
        Test passes if attributes of DatasetItemEntity class object are equal to expected

        <b>Steps</b>
        1. Check attributes of DatasetItemEntity object initialized with default optional parameters
        2. Check attributes of DatasetItemEntity object initialized with specified optional parameters
        """
        media = DatasetItemParameters.generate_random_image()
        annotations_scene = DatasetItemParameters().annotations_entity()
        # Checking attributes of DatasetItemEntity object initialized with default optional parameters
        default_values_dataset_item = DatasetItemEntity(
            media, annotations_scene)
        assert default_values_dataset_item.media == media
        assert default_values_dataset_item.annotation_scene == annotations_scene
        assert not default_values_dataset_item.metadata
        assert default_values_dataset_item.subset == Subset.NONE
        # Checking attributes of DatasetItemEntity object initialized with specified optional parameters
        roi = DatasetItemParameters().roi()
        metadata = DatasetItemParameters.metadata()
        subset = Subset.TESTING
        specified_values_dataset_item = DatasetItemEntity(
            media, annotations_scene, roi, metadata, subset)
        assert specified_values_dataset_item.media == media
        assert specified_values_dataset_item.annotation_scene == annotations_scene
        assert specified_values_dataset_item.roi == roi
        assert specified_values_dataset_item.metadata == metadata
        assert specified_values_dataset_item.subset == subset
    def with_empty_annotations(
        self,
        annotation_kind: AnnotationSceneKind = AnnotationSceneKind.PREDICTION
    ) -> "DatasetEntity":
        """
        Produces a new dataset with empty annotation objects (no shapes or labels).
        This is a convenience function to generate a dataset with empty annotations from another dataset.
        This is particularly useful for evaluation on validation data and to build resultsets.

        Assume a dataset containing user annotations.

        >>> labeled_dataset = Dataset()  # user annotated dataset

        Then, we want to see the performance of our task on this labeled_dataset,
        which means we need to create a new dataset to be passed for analysis.

        >>> prediction_dataset = labeled_dataset.with_empty_annotations()

        Later, we can pass this prediction_dataset to the task analysis function.
        By pairing the labeled_dataset and the prediction_dataset, the resultset can then be constructed.
        Refer to :class:`~ote_sdk.entities.resultset.ResultSetEntity` for more info.

        :param annotation_kind: Sets the empty annotation to this kind. Default value: AnnotationSceneKind.PREDICTION
        :return: a new dataset containing the same items, with empty annotation objects.
        """
        new_dataset = DatasetEntity(purpose=self.purpose)
        for dataset_item in self:
            if isinstance(dataset_item, DatasetItemEntity):
                empty_annotation = AnnotationSceneEntity(annotations=[],
                                                         kind=annotation_kind)

                # reset ROI
                roi = copy.copy(dataset_item.roi)
                roi.id = ID(ObjectId())
                roi.set_labels([])

                new_dataset_item = DatasetItemEntity(
                    media=dataset_item.media,
                    annotation_scene=empty_annotation,
                    roi=roi,
                    subset=dataset_item.subset,
                )
                new_dataset.append(new_dataset_item)
        return new_dataset
Beispiel #11
0
    def get_dataset_items(self, ann_file_path: Path, data_root_dir: Path,
                          subset: Subset) -> List[DatasetItemEntity]:
        """Loads dataset based on the image path in annotation file.

        Args:
            ann_file_path (Path): Path to json containing the annotations.
                For example of annotation look at `data/anomaly/[train, test,val].json.
            data_root_dir (Path): Path to folder containing images.
            subset (Subset): Subset of the dataset.

        Returns:
            List[DatasetItemEntity]: List containing subset dataset.
        """
        # read annotation file
        samples = pd.read_json(ann_file_path)

        dataset_items = []
        for _, sample in samples.iterrows():
            # Create image
            # convert path to str as PosixPath is not supported by Image
            image = Image(file_path=str(data_root_dir / sample.image_path))
            # Create annotation
            shape = Rectangle(x1=0, y1=0, x2=1, y2=1)
            label: LabelEntity = (self.normal_label if sample.label == "good"
                                  else self.abnormal_label)
            labels = [ScoredLabel(label)]
            annotations = [Annotation(shape=shape, labels=labels)]
            annotation_scene = AnnotationSceneEntity(
                annotations=annotations, kind=AnnotationSceneKind.ANNOTATION)

            # Create dataset item
            dataset_item = DatasetItemEntity(media=image,
                                             annotation_scene=annotation_scene,
                                             subset=subset)
            # Add to dataset items
            dataset_items.append(dataset_item)

        return dataset_items
Beispiel #12
0
def mask_from_dataset_item(dataset_item: DatasetItemEntity,
                           labels: List[LabelEntity]) -> np.ndarray:
    """
    Creates a mask from dataset item. The mask will be two dimensional,
    and the value of each pixel matches the class index with offset 1. The background
    class index is zero. labels[0] matches pixel value 1, etc. The class index is
    determined based on the order of :param labels:

    :param dataset_item: Item to make mask for
    :param labels: The labels to use for creating the mask. The order of the labels
                   determines the class index.

    :return: Numpy array of mask
    """
    # todo: cache this so that it does not have to be redone for all the same media
    mask = mask_from_annotation(
        dataset_item.get_annotations(),
        labels,
        dataset_item.width,
        dataset_item.height,
    )

    return mask
Beispiel #13
0
    def __init__(self,
                 train_ann_file=None,
                 train_data_root=None,
                 val_ann_file=None,
                 val_data_root=None,
                 test_ann_file=None,
                 test_data_root=None,
                 **kwargs):
        self.data_roots = {}
        self.ann_files = {}
        self.multilabel = False
        if train_data_root:
            self.data_roots[Subset.TRAINING] = train_data_root
            self.ann_files[Subset.TRAINING] = train_ann_file
        if val_data_root:
            self.data_roots[Subset.VALIDATION] = val_data_root
            self.ann_files[Subset.VALIDATION] = val_ann_file
        if test_data_root:
            self.data_roots[Subset.TESTING] = test_data_root
            self.ann_files[Subset.TESTING] = test_ann_file
        self.annotations = {}
        for k, v in self.data_roots.items():
            if v:
                self.data_roots[k] = osp.abspath(v)
                if self.ann_files[k] and '.json' in self.ann_files[
                        k] and osp.isfile(self.ann_files[k]):
                    self.data_roots[k] = osp.dirname(self.ann_files[k])
                    self.multilabel = True
                    self.annotations[k] = self._load_annotation_multilabel(
                        self.ann_files[k], self.data_roots[k])
                else:
                    self.annotations[k] = self._load_annotation(
                        self.data_roots[k])
                    assert not self.multilabel

        self.label_map = None
        self.labels = None
        self._set_labels_obtained_from_annotation()
        self.project_labels = [
            LabelEntity(name=name,
                        domain=Domain.CLASSIFICATION,
                        is_empty=False,
                        id=ID(i)) for i, name in enumerate(self.labels)
        ]

        dataset_items = []
        for subset, subset_data in self.annotations.items():
            for data_info in subset_data[0]:
                image = Image(file_path=data_info[0])
                labels = [
                    ScoredLabel(
                        label=self._label_name_to_project_label(label_name),
                        probability=1.0) for label_name in data_info[1]
                ]
                shapes = [Annotation(Rectangle.generate_full_box(), labels)]
                annotation_scene = AnnotationSceneEntity(
                    kind=AnnotationSceneKind.ANNOTATION, annotations=shapes)
                dataset_item = DatasetItemEntity(image,
                                                 annotation_scene,
                                                 subset=subset)
                dataset_items.append(dataset_item)

        super().__init__(items=dataset_items, **kwargs)
 def generate_random_image(self):
     with generate_random_single_image() as path:
         image = Image(file_path=path)
         return DatasetItemEntity(
             media=image, annotation_scene=NullAnnotationSceneEntity())
Beispiel #15
0
    def test_dataset_item_append_labels(self):
        """
        <b>Description:</b>
        Check DatasetItemEntity class "append_labels" method

        <b>Input data:</b>
        DatasetItemEntity class object with specified "media", "annotation_scene", "roi", "metadata" and "subset"
        parameters

        <b>Expected results:</b>
        Test passes if annotations list returned after using "append_labels" method is equal to expected

        <b>Steps</b>
        1. Check annotations list after "append_labels" method for DatasetItemEntity object with ROI-annotation
        specified in annotation_scene.annotations
        2. Check annotations list after "append_labels" method for DatasetItemEntity object with non-specified
        ROI-annotation in annotation_scene.annotations
        """
        annotation_labels = DatasetItemParameters.labels()
        labels_to_add = self.labels_to_add()
        scored_labels_to_add = [
            ScoredLabel(labels_to_add[0]),
            ScoredLabel(labels_to_add[1]),
        ]
        media = DatasetItemParameters.generate_random_image()
        roi_labels = DatasetItemParameters.roi_labels()
        roi_scored_labels = DatasetItemParameters().roi_scored_labels()
        roi = DatasetItemParameters().roi()
        equal_roi = DatasetItemParameters().roi()
        annotations = DatasetItemParameters().annotations()
        annotations_with_roi = annotations + [equal_roi]
        annotations_scene = AnnotationSceneEntity(
            annotations_with_roi, AnnotationSceneKind.ANNOTATION)
        # Scenario for checking "append_labels" method for DatasetItemEntity object with ROI-annotation specified in
        # annotation_scene.annotations object
        roi_label_dataset_item = DatasetItemEntity(media, annotations_scene,
                                                   roi)
        roi_label_dataset_item.append_labels(scored_labels_to_add)
        # Check for include_empty is "False"
        expected_labels = [
            annotation_labels[0], roi_labels[0], labels_to_add[0]
        ]
        assert roi_label_dataset_item.annotation_scene.get_labels(
        ) == expected_labels
        expected_labels = [roi_scored_labels[0], scored_labels_to_add[0]]
        self.check_roi_equal_annotation(roi_label_dataset_item,
                                        expected_labels)
        # Check for include_empty is "True"
        expected_labels = annotation_labels + roi_labels + labels_to_add
        assert (roi_label_dataset_item.annotation_scene.get_labels(True) ==
                expected_labels)
        expected_labels = roi_scored_labels + scored_labels_to_add
        self.check_roi_equal_annotation(roi_label_dataset_item,
                                        expected_labels, True)
        # Scenario for checking "append_labels" method for DatasetItemEntity object with non-specified ROI-annotation in
        # annotation_scene.annotations object
        non_roi_dataset_item = DatasetItemParameters().dataset_item()
        non_roi_dataset_item.append_labels(scored_labels_to_add)
        # Check for "include_empty" is "False"
        expected_labels = [
            annotation_labels[0], roi_labels[0], labels_to_add[0]
        ]
        assert non_roi_dataset_item.annotation_scene.get_labels(
        ) == expected_labels
        expected_labels = [roi_scored_labels[0], scored_labels_to_add[0]]
        self.check_roi_equal_annotation(non_roi_dataset_item, expected_labels)
        # Check for "include_empty" is "True"
        expected_labels = annotation_labels + roi_labels + labels_to_add
        assert non_roi_dataset_item.annotation_scene.get_labels(
            True) == expected_labels
        expected_labels = roi_scored_labels + scored_labels_to_add
        self.check_roi_equal_annotation(non_roi_dataset_item, expected_labels,
                                        True)
        # Scenario for "labels" parameter is equal to []
        dataset_item = DatasetItemParameters().dataset_item()
        dataset_item.append_labels([])
        assert dataset_item.annotation_scene.get_labels() == [
            annotation_labels[0]
        ]
        assert (dataset_item.annotation_scene.get_labels(
            include_empty=True) == annotation_labels)
Beispiel #16
0
    def test_dataset_item_roi_numpy(self):
        """
        <b>Description:</b>
        Check DatasetItemEntity class "roi_numpy" method

        <b>Input data:</b>
        DatasetItemEntity class object with specified "media", "annotation_scene", "roi", "metadata" and "subset"
        parameters

        <b>Expected results:</b>
        Test passes if array returned by "roi_numpy" method is equal to expected

        <b>Steps</b>
        1. Check array returned by roi_numpy method with not specified "roi" parameter for DatasetItemEntity with
        "roi" attribute is "None"
        2. Check array returned by roi_numpy method with Rectangle-shape "roi" parameter
        3. Check array returned by roi_numpy method with Ellipse-shape "roi" parameter
        4. Check array returned by roi_numpy method with Polygon-shape "roi" parameter
        5. Check array returned by roi_numpy method with non-specified "roi" parameter for DatasetItemEntity with "roi"
        attribute
        """
        media = DatasetItemParameters.generate_random_image()
        annotation_scene = DatasetItemParameters().annotations_entity()
        roi_label = LabelEntity("ROI label",
                                Domain.DETECTION,
                                id=ID("roi_label"))
        dataset_item = DatasetItemEntity(media, annotation_scene)
        # Checking array returned by "roi_numpy" method with non-specified "roi" parameter for DatasetItemEntity
        # "roi" attribute is "None"
        assert np.array_equal(dataset_item.roi_numpy(), media.numpy)
        # Checking array returned by "roi_numpy" method with specified Rectangle-shape "roi" parameter
        rectangle_roi = Annotation(
            Rectangle(x1=0.2, y1=0.1, x2=0.8, y2=0.9),
            [ScoredLabel(roi_label)],
            ID("rectangle_roi"),
        )
        assert np.array_equal(dataset_item.roi_numpy(rectangle_roi),
                              media.numpy[1:9, 3:13])
        # Checking array returned by "roi_numpy" method with specified Ellipse-shape "roi" parameter
        ellipse_roi = Annotation(
            Ellipse(x1=0.1, y1=0.0, x2=0.9, y2=0.8),
            [ScoredLabel(roi_label)],
            ID("ellipse_roi"),
        )
        assert np.array_equal(dataset_item.roi_numpy(ellipse_roi),
                              media.numpy[0:8, 2:14])
        # Checking array returned by "roi_numpy" method with specified Polygon-shape "roi" parameter
        polygon_roi = Annotation(
            shape=Polygon([
                Point(0.3, 0.4),
                Point(0.3, 0.7),
                Point(0.5, 0.75),
                Point(0.8, 0.7),
                Point(0.8, 0.4),
            ]),
            labels=[],
            id=ID("polygon_roi"),
        )
        assert np.array_equal(dataset_item.roi_numpy(polygon_roi),
                              media.numpy[4:8, 5:13])
        # Checking array returned by "roi_numpy" method with not specified "roi" parameter for DatasetItemEntity with
        # "roi" attribute
        roi_specified_dataset_item = DatasetItemEntity(
            media, annotation_scene,
            DatasetItemParameters().roi())
        roi_specified_dataset_item.roi_numpy()
        assert np.array_equal(roi_specified_dataset_item.roi_numpy(),
                              media.numpy[1:9, 2:14])
Beispiel #17
0
 def default_values_dataset_item(self) -> DatasetItemEntity:
     return DatasetItemEntity(self.generate_random_image(),
                              self.annotations_entity())