def optional_result_media_parameters(self) -> dict: optional_result_media_parameters = self.default_result_media_parameters( ) roi_label = LabelEntity( "ROI label", Domain.DETECTION, Color(10, 200, 40), creation_date=datetime.datetime(year=2021, month=12, day=18), id=ID("roi_label_1"), ) roi = Annotation( shape=Rectangle(x1=0.3, y1=0.2, x2=0.7, y2=0.6), labels=[ScoredLabel(roi_label)], id=ID("roi_annotation"), ) result_media_label = LabelEntity( "ResultMedia label", Domain.CLASSIFICATION, Color(200, 60, 100), creation_date=datetime.datetime(year=2021, month=12, day=20), id=ID("result_media_1"), ) optional_result_media_parameters["roi"] = roi optional_result_media_parameters["label"] = result_media_label return optional_result_media_parameters
def generate_labels_list(include_empty: bool = True) -> list: classification_label = ScoredLabel( LabelEntity( name="classification", domain=Domain.CLASSIFICATION, color=Color(red=187, green=28, blue=28), creation_date=datetime(year=2021, month=10, day=25), )) detection_label = ScoredLabel( LabelEntity( name="detection", domain=Domain.DETECTION, color=Color(red=180, green=30, blue=24), creation_date=datetime(year=2021, month=9, day=24), )) empty_label = ScoredLabel( LabelEntity( name="empty_rectangle_label", domain=Domain.CLASSIFICATION, color=Color(red=178, green=25, blue=30), creation_date=datetime(year=2021, month=7, day=26), is_empty=True, )) labels_list = [classification_label, detection_label] if include_empty: labels_list.append(empty_label) return labels_list
def environment(): """ Return TaskEnvironment """ car = LabelEntity(id=ID(123456789), name="car", domain=Domain.DETECTION, is_empty=True) person = LabelEntity(id=ID(987654321), name="person", domain=Domain.DETECTION, is_empty=True) labels_list = [car, person] dummy_template = __get_path_to_file("./dummy_template.yaml") model_template = parse_model_template(dummy_template) hyper_parameters = model_template.hyper_parameters.data params = ote_config_helper.create(hyper_parameters) labels_schema = LabelSchemaEntity.from_labels(labels_list) environment = TaskEnvironment( model=None, hyper_parameters=params, label_schema=labels_schema, model_template=model_template, ) return environment
def test_detection_to_annotation_convert_invalid_input(self): """ <b>Description:</b> Check that DetectionToAnnotationConverter raises an error if invalid inputs are provided <b>Input data:</b> Array of size [1203, 5] Array of size [3, 8] <b>Expected results:</b> Test passes a ValueError is raised for both inputs <b>Steps</b> 1. Create DetectionToAnnotationConverter 2. Attempt to convert array of [1203,5] to annotations 3. Attempt to convert array of [3, 8] to annotations """ labels = [ LabelEntity("Zero", domain=Domain.DETECTION), LabelEntity("One", domain=Domain.DETECTION), ] converter = DetectionToAnnotationConverter(labels) with pytest.raises(ValueError): converter.convert_to_annotation(np.ndarray((1203, 5))) with pytest.raises(ValueError): converter.convert_to_annotation(np.ndarray((3, 8)))
def test_shape_entity_not_implemented_methods(self): """ <b>Description:</b> Check not implemented methods of ShapeEntity class <b>Expected results:</b> Test passes if NotImplementedError exception raises when using not implemented methods on ShapeEntity instance """ rectangle_entity = Rectangle(x1=0.2, y1=0.2, x2=0.6, y2=0.7) ellipse_entity = Ellipse(x1=0.4, y1=0.1, x2=0.9, y2=0.8) polygon_entity = Polygon([ Point(0.3, 0.4), Point(0.3, 0.7), Point(0.5, 0.75), Point(0.8, 0.7), Point(0.8, 0.4), ]) for shape in [rectangle_entity, ellipse_entity, polygon_entity]: with pytest.raises(NotImplementedError): ShapeEntity.get_area(shape) with pytest.raises(NotImplementedError): ShapeEntity.intersects(shape, shape) with pytest.raises(NotImplementedError): ShapeEntity.intersect_percentage(shape, shape) with pytest.raises(NotImplementedError): ShapeEntity.get_labels(shape) with pytest.raises(NotImplementedError): ShapeEntity.append_label( shape, ScoredLabel( LabelEntity(name="classification", domain=Domain.CLASSIFICATION)), ) with pytest.raises(NotImplementedError): ShapeEntity.set_labels( shape, [ ScoredLabel( LabelEntity(name="detection", domain=Domain.DETECTION)) ], ) with pytest.raises(NotImplementedError): ShapeEntity.normalize_wrt_roi_shape(shape, rectangle_entity) with pytest.raises(NotImplementedError): ShapeEntity.denormalize_wrt_roi_shape(shape, rectangle_entity) with pytest.raises(NotImplementedError): ShapeEntity._as_shapely_polygon(shape)
def default_result_media_parameters() -> dict: rectangle_label = LabelEntity( name="Rectangle Annotation Label", domain=Domain.DETECTION, color=Color(100, 200, 60), creation_date=datetime.datetime(year=2021, month=12, day=16), id=ID("rectangle_label_1"), ) rectangle_annotation = Annotation( shape=Rectangle(x1=0.1, y1=0.4, x2=0.4, y2=0.9), labels=[ScoredLabel(rectangle_label)], id=ID("rectangle_annotation"), ) annotation_scene = AnnotationSceneEntity( annotations=[rectangle_annotation], kind=AnnotationSceneKind.ANNOTATION, creation_date=datetime.datetime(year=2021, month=12, day=16), id=ID("annotation_scene"), ) return { "name": "ResultMedia name", "type": "Test ResultMedia", "annotation_scene": annotation_scene, "numpy": RANDOM_IMAGE, }
def test_label_entity_serialization(self): """ This test serializes LabelEntity and checks serialized representation. Then it compares deserialized LabelEntity with original one. """ cur_date = now() red = randint(0, 255) # nosec green = randint(0, 255) # nosec blue = randint(0, 255) # nosec alpha = randint(0, 255) # nosec label = LabelEntity( name="my_label", domain=Domain.DETECTION, color=Color(red, green, blue, alpha), hotkey="ctrl+1", creation_date=cur_date, is_empty=False, id=ID("0000213"), ) serialized = LabelMapper.forward(label) assert serialized == { "_id": "0000213", "name": "my_label", "color": {"red": red, "green": green, "blue": blue, "alpha": alpha}, "hotkey": "ctrl+1", "domain": "DETECTION", "creation_date": DatetimeMapper.forward(cur_date), "is_empty": False, } deserialized = LabelMapper.backward(serialized) assert label == deserialized
def test_rectangle_generate_full_box(self): """ <b>Description:</b> Check Rectangle generate_full_box method <b>Input data:</b> Labels specified for full_box instance of Rectangle class <b>Expected results:</b> Test passes if generate_full_box method returns instance of Rectangle class with coordinates (x1=0.0, y1=0.0, x2=1.0, y2=1.0) <b>Steps</b> 1. Check generate_full_box method for Rectangle instance with no labels specified 2. Check generate_full_box method for Rectangle instance with labels specified """ detection_label = ScoredLabel( LabelEntity(name="detection", domain=Domain.DETECTION) ) for label_actual, label_expected in [ (None, []), ([detection_label], [detection_label]), ]: full_box = Rectangle.generate_full_box(label_actual) assert full_box.type == ShapeType.RECTANGLE assert full_box.x1 == full_box.y1 == 0.0 assert full_box.x2 == full_box.y2 == 1.0 assert full_box._labels == label_expected
def generate_label_schema(not_empty_labels, multilabel=False): assert len(not_empty_labels) > 1 label_schema = LabelSchemaEntity() if multilabel: emptylabel = LabelEntity(name="Empty label", is_empty=True, domain=Domain.CLASSIFICATION) empty_group = LabelGroup(name="empty", labels=[emptylabel], group_type=LabelGroupType.EMPTY_LABEL) single_groups = [] for label in not_empty_labels: single_groups.append( LabelGroup(name=label.name, labels=[label], group_type=LabelGroupType.EXCLUSIVE)) label_schema.add_group(single_groups[-1]) label_schema.add_group(empty_group, exclusive_with=single_groups) else: main_group = LabelGroup(name="labels", labels=not_empty_labels, group_type=LabelGroupType.EXCLUSIVE) label_schema.add_group(main_group) return label_schema
def test_annotation_scene_entity_append_annotation(self): """ <b>Description:</b> Check Annotation append_annotation method <b>Input data:</b> Initialized instance of AnnotationSceneEntity <b>Expected results:</b> Test passes if AnnotationSceneEntity append_annotation method returns correct values <b>Steps</b> 1. Create AnnotationSceneEntity instances 2. Check returning value of append_annotation method """ annotation_scene_entity = self.annotation_scene_entity tree = LabelEntity(name="tree", domain=Domain.DETECTION) tree_label = ScoredLabel(tree) labels = [tree_label] annotation = Annotation(shape=self.rectangle, labels=labels) assert len(annotation_scene_entity.annotations) == 2 annotation_scene_entity.append_annotation(annotation) assert len(annotation_scene_entity.annotations) == 3
def test_annotation_scene_entity_get_label_ids(self): """ <b>Description:</b> Check Annotation get_label_ids method <b>Input data:</b> Initialized instance of AnnotationSceneEntity <b>Expected results:</b> Test passes if AnnotationSceneEntity get_label_ids method returns correct values <b>Steps</b> 1. Create AnnotationSceneEntity instances 2. Check returning value of get_label_ids method """ annotation_scene_entity = self.annotation_scene_entity assert annotation_scene_entity.get_label_ids() == {ID()} bus = LabelEntity(id=ID(123456789), name="bus", domain=Domain.DETECTION) bus_label = ScoredLabel(bus) labels = [bus_label] annotation = Annotation(shape=self.rectangle, labels=labels) annotation_scene_entity.append_annotation(annotation) assert annotation_scene_entity.get_label_ids() == {ID(), ID(123456789)}
def test_detection_to_annotation_convert(self): """ <b>Description:</b> Check that DetectionToAnnotationConverter correctly converts Network output to list of Annotation <b>Input data:</b> Array of network output with shape [4,6] <b>Expected results:</b> Test passes if each Converted annotation has the same values as the network output <b>Steps</b> 1. Create mock network output 2. Convert network output to Annotation 3. Check Annotations """ test_boxes = np.array(( (0, 0.6, 0.1, 0.1, 0.2, 0.3), (1, 0.2, 0.2, 0.1, 0.3, 0.4), (1, 0.7, 0.3, 0.2, 0.5, 0.6), (0, 0.1, 0.1, 0.1, 0.2, 0.3), )) labels = [ LabelEntity("Zero", domain=Domain.DETECTION), LabelEntity("One", domain=Domain.DETECTION), ] converter = DetectionToAnnotationConverter(labels) annotation_scene = converter.convert_to_annotation(test_boxes) for i, annotation in enumerate(annotation_scene.annotations): label: ScoredLabel = next(iter(annotation.get_labels())) test_label = labels[int(test_boxes[i][0])] assert test_label.name == label.name assert test_boxes[i][1], label.probability assert test_boxes[i][2] == annotation.shape.x1 assert test_boxes[i][3] == annotation.shape.y1 assert test_boxes[i][4] == annotation.shape.x2 assert test_boxes[i][5] == annotation.shape.y2 annotation_scene = converter.convert_to_annotation( np.ndarray((0, 6))) assert 0 == len(annotation_scene.shapes)
def test_scored_label(self): """ <b>Description:</b> Check the ScoredLabel can correctly return the value <b>Input data:</b> LabelEntity <b>Expected results:</b> Test passes if the results match """ car = LabelEntity(id=ID(123456789), name="car", domain=Domain.DETECTION, is_empty=True) person = LabelEntity(id=ID(987654321), name="person", domain=Domain.DETECTION, is_empty=True) car_label = ScoredLabel(car) person_label = ScoredLabel(person) for attr in [ "id", "name", "color", "hotkey", "creation_date", "is_empty" ]: assert getattr(car_label, attr) == getattr(car, attr) assert car_label.get_label() == car assert car_label == ScoredLabel(car) assert car_label != car assert car_label != person_label assert hash(car_label) == hash(str(car_label)) probability = 0.0 assert car_label.probability == probability delta_probability = 0.4 probability += delta_probability car_label.probability += delta_probability assert car_label.probability == probability car.color = Color(red=16, green=15, blue=56, alpha=255) assert ( "ScoredLabel(123456789, name=car, probability=0.4, domain=DETECTION, color=" in repr(car_label)) assert "Color(red=16, green=15, blue=56, alpha=255), hotkey=ctrl+0)" in repr( car_label)
def labels_to_add() -> List[LabelEntity]: label_to_add = LabelEntity( name="Label which will be added", domain=Domain.DETECTION, color=Color(red=60, green=120, blue=70), creation_date=datetime.datetime(year=2021, month=12, day=12), id=ID("label_to_add_1"), ) other_label_to_add = LabelEntity( name="Other label to add", domain=Domain.SEGMENTATION, color=Color(red=80, green=70, blue=100), creation_date=datetime.datetime(year=2021, month=12, day=11), is_empty=True, id=ID("label_to_add_2"), ) return [label_to_add, other_label_to_add]
def rectangle_labels() -> list: rectangle_label = LabelEntity( name="Rectangle label", domain=Domain.DETECTION, color=Color(red=100, green=50, blue=200), id=ID("rectangle_label_1"), ) other_rectangle_label = LabelEntity( name="Other rectangle label", domain=Domain.SEGMENTATION, color=Color(red=200, green=80, blue=100), id=ID("rectangle_label_2"), ) return [ ScoredLabel(label=rectangle_label), ScoredLabel(label=other_rectangle_label), ]
def test_shape_set_labels(self): """ <b>Description:</b> Check Shape set_labels method for Rectangle, Ellipse and Polygon objects <b>Expected results:</b> Test passes if set_labels method returns expected values <b>Steps</b> 1. Check set_labels method to add labels list to Shape object with no labels specified 2. Check set_labels method to add empty labels list to Shape object with no labels specified 3. Check set_labels method to add labels list to Shape object with labels specified 4. Check set_labels method to add empty labels list to Shape object with labels specified """ not_empty_label = self.appendable_label() new_labels_list = [ not_empty_label, ScoredLabel( LabelEntity( name="new_label", domain=Domain.CLASSIFICATION, color=Color(red=183, green=31, blue=28), creation_date=datetime(year=2021, month=9, day=25), is_empty=True, )), ] expected_not_empty_labels_list = [not_empty_label] # Check for adding labels list to Shape with no labels specified for no_labels_shape in [ self.fully_covering_rectangle(), self.fully_covering_ellipse(), self.fully_covering_polygon(), ]: no_labels_shape.set_labels(new_labels_list) assert no_labels_shape.get_labels( ) == expected_not_empty_labels_list assert no_labels_shape.get_labels( include_empty=True) == new_labels_list # Check for adding empty labels list to Shape with no labels specified for no_labels_shape in [ self.fully_covering_rectangle(), self.fully_covering_ellipse(), self.fully_covering_polygon(), ]: no_labels_shape.set_labels([]) assert no_labels_shape.get_labels() == [] assert no_labels_shape.get_labels(include_empty=True) == [] # Check for adding labels list to Shape with labels specified for shape in [self.rectangle(), self.ellipse(), self.polygon()]: shape.set_labels(new_labels_list) assert shape.get_labels() == expected_not_empty_labels_list assert shape.get_labels(include_empty=True) == new_labels_list # Check for adding empty labels list to Shape with labels specified for shape in [self.rectangle(), self.ellipse(), self.polygon()]: shape.set_labels([]) assert shape.get_labels() == [] assert shape.get_labels(include_empty=True) == []
def __init__( self, path: Union[str, Path] = "./datasets/MVTec", split_ratio: float = 0.5, seed: int = 0, create_validation_set: bool = True, ): self.path = path if isinstance(path, Path) else Path(path) self.split_ratio = split_ratio self.seed = seed self.create_validation_set = create_validation_set self.normal_label = LabelEntity( name=LabelNames.normal, domain=Domain.ANOMALY_CLASSIFICATION, id=LabelNames.normal ) self.abnormal_label = LabelEntity( name=LabelNames.anomalous, domain=Domain.ANOMALY_CLASSIFICATION, id=LabelNames.anomalous )
def roi_labels() -> List[LabelEntity]: creation_date = datetime.datetime(year=2021, month=12, day=9) roi_label = LabelEntity( name="ROI label", domain=Domain.DETECTION, color=Color(red=40, green=180, blue=80), creation_date=creation_date, id=ID("roi_label_1"), ) other_roi_label = LabelEntity( name="Second ROI label", domain=Domain.SEGMENTATION, color=Color(red=80, green=90, blue=70), creation_date=creation_date, is_empty=True, id=ID("roi_label_2"), ) return [roi_label, other_roi_label]
def test_dataset_item_append_annotations(self): """ <b>Description:</b> Check DatasetItemEntity class "append_annotations" method <b>Input data:</b> DatasetItemEntity class object with specified "media", "annotation_scene", "roi", "metadata" and "subset" parameters <b>Expected results:</b> Test passes if annotations list returned after "append_annotations" method is equal to expected <b>Steps</b> 1. Check annotations list returned after "append_annotations" method with specified non-included annotations 2. Check annotations list returned after "append_annotations" method with incorrect shape annotation """ # Checking annotations list returned after "append_annotations" method with specified non-included annotations dataset_item = DatasetItemParameters().default_values_dataset_item() full_box_annotations = list(dataset_item.annotation_scene.annotations) annotations_to_add = self.annotations_to_add() normalized_annotations = [] for annotation in annotations_to_add: normalized_annotations.append( Annotation( shape=annotation.shape.normalize_wrt_roi_shape( dataset_item.roi.shape), labels=annotation.get_labels(), )) dataset_item.append_annotations(annotations_to_add) # Random id is generated for normalized annotations normalized_annotations[ 0].id = dataset_item.annotation_scene.annotations[2].id normalized_annotations[ 1].id = dataset_item.annotation_scene.annotations[3].id assert ( dataset_item.annotation_scene.annotations == full_box_annotations + normalized_annotations) # Checking annotations list returned after "append_annotations" method with incorrect shape annotation incorrect_shape_label = LabelEntity( name="Label for incorrect shape", domain=Domain.CLASSIFICATION, color=Color(red=80, green=70, blue=155), id=ID("incorrect_shape_label"), ) incorrect_polygon = Polygon( [Point(x=0.01, y=0.1), Point(x=0.35, y=0.1), Point(x=0.35, y=0.1)]) incorrect_shape_annotation = Annotation( shape=incorrect_polygon, labels=[ScoredLabel(incorrect_shape_label)], id=ID("incorrect_shape_annotation"), ) dataset_item.append_annotations([incorrect_shape_annotation]) assert ( dataset_item.annotation_scene.annotations == full_box_annotations + normalized_annotations)
def labels() -> List[LabelEntity]: creation_date = datetime.datetime(year=2021, month=12, day=9) detection_label = LabelEntity( name="Label for Detection", domain=Domain.DETECTION, color=Color(red=100, green=200, blue=150), creation_date=creation_date, id=ID("detection_label"), ) segmentation_label = LabelEntity( name="Label for Segmentation", domain=Domain.DETECTION, color=Color(red=50, green=80, blue=200), creation_date=creation_date, is_empty=True, id=ID("segmentation_label"), ) return [detection_label, segmentation_label]
def appendable_label(empty=False) -> ScoredLabel: return ScoredLabel( LabelEntity( name="appended_label", domain=Domain.CLASSIFICATION, color=Color(red=181, green=28, blue=31), creation_date=datetime(year=2021, month=11, day=22), is_empty=empty, ))
def __init__( self, train_subset: Optional[Dict[str, str]] = None, val_subset: Optional[Dict[str, str]] = None, test_subset: Optional[Dict[str, str]] = None, ): items: List[DatasetItemEntity] = [] self.normal_label = LabelEntity(name="Normal", domain=Domain.ANOMALY_CLASSIFICATION) self.abnormal_label = LabelEntity(name="Anomalous", domain=Domain.ANOMALY_CLASSIFICATION) if train_subset is not None: train_ann_file = Path(train_subset["ann_file"]) train_data_root = Path(train_subset["data_root"]) items.extend( self.get_dataset_items( ann_file_path=train_ann_file, data_root_dir=train_data_root, subset=Subset.TRAINING, )) if val_subset is not None: val_ann_file = Path(val_subset["ann_file"]) val_data_root = Path(val_subset["data_root"]) items.extend( self.get_dataset_items( ann_file_path=val_ann_file, data_root_dir=val_data_root, subset=Subset.VALIDATION, )) if test_subset is not None: test_ann_file = Path(test_subset["ann_file"]) test_data_root = Path(test_subset["data_root"]) items.extend( self.get_dataset_items( ann_file_path=test_ann_file, data_root_dir=test_data_root, subset=Subset.TESTING, )) super().__init__(items=items)
def test_result_media_eq(self): """ <b>Description:</b> Check ResultMediaEntity class object __eq__ method <b>Input data:</b> ResultMediaEntity class objects with specified "name", "type", "annotation_scene", "numpy", "roi" and "label" parameters <b>Expected results:</b> Test passes if value returned by __eq__ method is equal to expected <b>Steps</b> 1. Check value returned by __eq__ method for comparing equal ResultMediaEntity objects 2. Check value returned by __eq__ method for comparing ResultMediaEntity objects with unequal "name", "type", "label" and "numpy" parameters - expected equality 3. Check value returned by __eq__ method for comparing ResultMediaEntity objects with unequal "annotation_scene" and "roi" parameters - expected inequality 4. Check value returned by __eq__ method for comparing ResultMediaEntity with different type object """ initialization_params = self.optional_result_media_parameters() result_media = ResultMediaEntity(**initialization_params) # Comparing equal ResultMediaEntity objects equal_result_media = ResultMediaEntity(**initialization_params) assert result_media == equal_result_media # Comparing ResultMediaEntity objects with unequal "name", "type", "label" and "numpy" parameters, # expected equality unequal_values = { "name": "Unequal name", "type": "Unequal type", "label": LabelEntity("Unequal label", Domain.CLASSIFICATION), "numpy": np.random.uniform(low=0.0, high=255.0, size=(1, 2, 3)), } for key in unequal_values: unequal_params = dict(initialization_params) unequal_params[key] = unequal_values.get(key) equal_result_media = ResultMediaEntity(**unequal_params) assert result_media == equal_result_media # Comparing ResultMediaEntity objects with unequal "annotation_scene" and "roi" parameters, expected inequality unequal_values = { "annotation_scene": AnnotationSceneEntity(annotations=[], kind=AnnotationSceneKind.NONE), "roi": Rectangle.generate_full_box(), } for key in unequal_values: unequal_params = dict(initialization_params) unequal_params[key] = unequal_values.get(key) unequal_result_media = ResultMediaEntity(**unequal_params) assert result_media != unequal_result_media # Comparing ResultMediaEntity with different type object assert result_media != str
def init_environment(params, model_template, number_of_images=10): resolution = (224, 224) colors = [(0, 255, 0), (0, 0, 255)] cls_names = ['b', 'g'] texts = ['Blue', 'Green'] env_labels = [ LabelEntity(name=name, domain=Domain.CLASSIFICATION, is_empty=False, id=ID(i)) for i, name in enumerate(cls_names) ] items = [] for _ in range(0, number_of_images): for j, lbl in enumerate(env_labels): class_img = np.zeros((*resolution, 3), dtype=np.uint8) class_img[:] = colors[j] class_img = cv.putText(class_img, texts[j], (50, 50), cv.FONT_HERSHEY_SIMPLEX, .8 + j * .2, colors[j - 1], 2, cv.LINE_AA) image = Image(data=class_img) labels = [ScoredLabel(label=lbl, probability=1.0)] shapes = [Annotation(Rectangle.generate_full_box(), labels)] annotation_scene = AnnotationSceneEntity( kind=AnnotationSceneKind.ANNOTATION, annotations=shapes) items.append( DatasetItemEntity(media=image, annotation_scene=annotation_scene)) rng = random.Random() rng.seed(100) rng.shuffle(items) for i, _ in enumerate(items): subset_region = i / number_of_images if subset_region >= 0.9: subset = Subset.TESTING elif subset_region >= 0.6: subset = Subset.VALIDATION else: subset = Subset.TRAINING items[i].subset = subset dataset = DatasetEntity(items) labels_schema = generate_label_schema(dataset.get_labels(), multilabel=False) environment = TaskEnvironment(model=None, hyper_parameters=params, label_schema=labels_schema, model_template=model_template) return environment, dataset
def generate_label_schema(dataset, task_type): """ Generates label schema depending on task type. """ if task_type == TaskType.CLASSIFICATION and dataset.is_multilabel(): not_empty_labels = dataset.get_labels() assert len(not_empty_labels) > 1 label_schema = LabelSchemaEntity() empty_label = LabelEntity( name="Empty label", is_empty=True, domain=Domain.CLASSIFICATION ) empty_group = LabelGroup( name="empty", labels=[empty_label], group_type=LabelGroupType.EMPTY_LABEL ) single_groups = [] for label in not_empty_labels: single_groups.append( LabelGroup( name=label.name, labels=[label], group_type=LabelGroupType.EXCLUSIVE ) ) label_schema.add_group(single_groups[-1]) label_schema.add_group(empty_group, exclusive_with=single_groups) return label_schema if task_type == TaskType.ANOMALY_CLASSIFICATION: return LabelSchemaEntity.from_labels( [ LabelEntity( name="Normal", domain=Domain.ANOMALY_CLASSIFICATION, id=ID(0) ), LabelEntity( name="Anomalous", domain=Domain.ANOMALY_CLASSIFICATION, id=ID(1) ), ] ) return LabelSchemaEntity.from_labels(dataset.get_labels())
def test_dataset_item_setters(self): """ <b>Description:</b> Check DatasetItemEntity class "roi", "subset" and "annotation_scene" setters <b>Input data:</b> DatasetItemEntity class object with specified "media", "annotation_scene", "roi", "metadata" and "subset" parameters <b>Expected results:</b> Test passes if assigned values of "roi", "subset" and "annotation_scene" properties are equal to expected <b>Steps</b> 1. Check value returned by "roi" property after using @roi.setter 2. Check value returned by "subset" property after using @subset.setter 3. Check value returned by "annotation_scene" property after using @subset.annotation_scene """ dataset_item = DatasetItemParameters().dataset_item() # Checking value returned by "roi" property after using @roi.setter new_roi_label = ScoredLabel( LabelEntity("new ROI label", Domain.DETECTION)) new_dataset_roi = Annotation(Rectangle(x1=0.2, y1=0.2, x2=1.0, y2=1.0), [new_roi_label]) dataset_item.roi = new_dataset_roi assert dataset_item.roi == new_dataset_roi # Checking value returned by subset property after using @subset.setter new_subset = Subset.TRAINING dataset_item.subset = new_subset assert dataset_item.subset == new_subset # Checking value returned by annotation_scene property after using @annotation_scene.setter new_annotation_label = ScoredLabel( LabelEntity("new annotation label", Domain.CLASSIFICATION)) new_annotation = Annotation(Rectangle(x1=0.1, y1=0, x2=0.9, y2=1.0), [new_annotation_label]) new_annotation_scene = AnnotationSceneEntity( [new_annotation], AnnotationSceneKind.PREDICTION) dataset_item.annotation_scene = new_annotation_scene assert dataset_item.annotation_scene == new_annotation_scene
def test_annotation_scene_entity_contains_any(self): """ <b>Description:</b> Check Annotation contains_any method <b>Input data:</b> Initialized instance of AnnotationSceneEntity <b>Expected results:</b> Test passes if AnnotationSceneEntity contains_any method returns correct values <b>Steps</b> 1. Create AnnotationSceneEntity instances 2. Check returning value of contains_any method """ annotation_scene_entity = self.annotation_scene_entity annotation_scene_entity.annotations = self.annotations car = LabelEntity(name="car", domain=Domain.DETECTION, is_empty=True) person = LabelEntity(name="person", domain=Domain.DETECTION) tree = LabelEntity(name="tree", domain=Domain.DETECTION) car_label = ScoredLabel(car) person_label = ScoredLabel(person) tree_label = ScoredLabel(tree) labels = [car_label] labels2 = [car_label, person_label] annotation = Annotation(shape=self.rectangle, labels=labels2) annotations = [annotation] annotation_scene_entity2 = AnnotationSceneEntity( annotations=annotations, kind=AnnotationSceneKind.ANNOTATION) assert annotation_scene_entity.contains_any(labels=labels) is False assert annotation_scene_entity2.contains_any(labels=labels2) is True assert annotation_scene_entity2.contains_any( labels=[tree_label]) is False
def test_dataset_item_roi(self): """ <b>Description:</b> Check DatasetItemEntity class "roi" property <b>Input data:</b> DatasetItemEntity class object with specified "media", "annotation_scene", "roi", "metadata" and "subset" parameters <b>Expected results:</b> Test passes if value returned by "roi" property is equal to expected <b>Steps</b> 1. Check value returned by "roi" property for DatasetItemEntity with specified "roi" parameter 2. Check value returned by "roi" property for DatasetItemEntity with not specified "roi" parameter 3. Check value returned by "roi" property for DatasetItemEntity with not specified "roi" parameter but one of annotation objects in annotation_scene is equal to full Rectangle """ media = DatasetItemParameters.generate_random_image() annotations = DatasetItemParameters().annotations() annotation_scene = DatasetItemParameters().annotations_entity() roi = DatasetItemParameters().roi() metadata = DatasetItemParameters.metadata() # Checking "roi" property for DatasetItemEntity with specified "roi" parameter specified_roi_dataset_item = DatasetItemParameters().dataset_item() assert specified_roi_dataset_item.roi == roi # Checking that "roi" property is equal to full_box for DatasetItemEntity with not specified "roi" parameter non_specified_roi_dataset_item = DatasetItemEntity(media, annotation_scene, metadata=metadata) default_roi = non_specified_roi_dataset_item.roi.shape assert isinstance(default_roi, Rectangle) assert Rectangle.is_full_box(default_roi) # Checking that "roi" property will be equal to full_box for DatasetItemEntity with not specified "roi" but one # of Annotation objects in annotation_scene is equal to full Rectangle full_box_label = LabelEntity("Full-box label", Domain.DETECTION, id=ID("full_box_label")) full_box_annotation = Annotation(Rectangle.generate_full_box(), [ScoredLabel(full_box_label)]) annotations.append(full_box_annotation) annotation_scene.annotations.append(full_box_annotation) full_box_label_dataset_item = DatasetItemEntity(media, annotation_scene, metadata=metadata) assert full_box_label_dataset_item.roi is full_box_annotation
def backward(instance: dict) -> LabelEntity: """Deserializes from dict.""" label_id = IDMapper().backward(instance["_id"]) domain = str(instance.get("domain")) label_domain = Domain[domain] label = LabelEntity( id=label_id, name=instance["name"], color=ColorMapper().backward(instance["color"]), hotkey=instance.get("hotkey", ""), domain=label_domain, creation_date=DatetimeMapper.backward(instance["creation_date"]), is_empty=instance.get("is_empty", False), ) return label
def test_dataset_item_get_shapes_labels(self): """ <b>Description:</b> Check DatasetItemEntity class "get_shapes_labels" method <b>Input data:</b> DatasetItemEntity class object with specified "media", "annotation_scene", "roi", "metadata" and "subset" parameters <b>Expected results:</b> Test passes if labels list returned by "get_shapes_labels" method is equal to expected <b>Steps</b> 1. Check labels list returned by "get_shapes_labels" for non-specified "labels" parameter 2. Check labels list returned by "get_shapes_labels" for specified "labels" parameter """ dataset_item = DatasetItemParameters().default_values_dataset_item() labels = DatasetItemParameters.labels() detection_label = labels[0] segmentation_label = labels[1] # Checking labels list returned by "get_shapes_labels" method with non-specified "labels" parameter # Scenario for "include_empty" is "False" assert dataset_item.get_shapes_labels() == [detection_label] # Scenario for "include_empty" is "True" shapes_labels_actual = dataset_item.get_shapes_labels( include_empty=True) assert len(shapes_labels_actual) == 2 assert isinstance(shapes_labels_actual, list) assert detection_label in shapes_labels_actual assert segmentation_label in shapes_labels_actual # Checking labels list returned by "get_shapes_labels" method with specified "labels" parameter # Scenario for "include_empty" is "False" non_included_label = LabelEntity("Non-included label", Domain.CLASSIFICATION) list_labels = [segmentation_label, non_included_label] assert dataset_item.get_shapes_labels(labels=list_labels) == [] # Scenario for "include_empty" is "True", expected that non_included label will not be shown assert dataset_item.get_shapes_labels(list_labels, True) == [segmentation_label]