def test_dataset_item_append_annotations(self): """ <b>Description:</b> Check DatasetItemEntity class "append_annotations" method <b>Input data:</b> DatasetItemEntity class object with specified "media", "annotation_scene", "roi", "metadata" and "subset" parameters <b>Expected results:</b> Test passes if annotations list returned after "append_annotations" method is equal to expected <b>Steps</b> 1. Check annotations list returned after "append_annotations" method with specified non-included annotations 2. Check annotations list returned after "append_annotations" method with incorrect shape annotation """ # Checking annotations list returned after "append_annotations" method with specified non-included annotations dataset_item = DatasetItemParameters().default_values_dataset_item() full_box_annotations = list(dataset_item.annotation_scene.annotations) annotations_to_add = self.annotations_to_add() normalized_annotations = [] for annotation in annotations_to_add: normalized_annotations.append( Annotation( shape=annotation.shape.normalize_wrt_roi_shape( dataset_item.roi.shape), labels=annotation.get_labels(), )) dataset_item.append_annotations(annotations_to_add) # Random id is generated for normalized annotations normalized_annotations[ 0].id = dataset_item.annotation_scene.annotations[2].id normalized_annotations[ 1].id = dataset_item.annotation_scene.annotations[3].id assert ( dataset_item.annotation_scene.annotations == full_box_annotations + normalized_annotations) # Checking annotations list returned after "append_annotations" method with incorrect shape annotation incorrect_shape_label = LabelEntity( name="Label for incorrect shape", domain=Domain.CLASSIFICATION, color=Color(red=80, green=70, blue=155), id=ID("incorrect_shape_label"), ) incorrect_polygon = Polygon( [Point(x=0.01, y=0.1), Point(x=0.35, y=0.1), Point(x=0.35, y=0.1)]) incorrect_shape_annotation = Annotation( shape=incorrect_polygon, labels=[ScoredLabel(incorrect_shape_label)], id=ID("incorrect_shape_annotation"), ) dataset_item.append_annotations([incorrect_shape_annotation]) assert ( dataset_item.annotation_scene.annotations == full_box_annotations + normalized_annotations)
def annotations_to_add(self) -> List[Annotation]: labels_to_add = self.labels_to_add() annotation_to_add = Annotation( shape=Rectangle(x1=0.1, y1=0.1, x2=0.7, y2=0.8), labels=[ScoredLabel(label=labels_to_add[0])], id=ID("added_annotation_1"), ) other_annotation_to_add = Annotation( shape=Rectangle(x1=0.2, y1=0.3, x2=0.8, y2=0.9), labels=[ScoredLabel(label=labels_to_add[1])], id=ID("added_annotation_2"), ) return [annotation_to_add, other_annotation_to_add]
def test_annotation_get_labels(self): """ <b>Description:</b> Check Annotation get_labels method <b>Input data:</b> Initialized instance of Annotation <b>Expected results:</b> Test passes if Annotation get_labels method returns correct values <b>Steps</b> 1. Create Annotation instances 2. Check returning value of get_labels method 3. Check returning value of get_labels method with include_empty=True """ annotation = Annotation(shape=self.rectangle, labels=self.labels2) assert ( "[ScoredLabel(987654321, name=person, probability=0.0, domain=DETECTION," in str(annotation.get_labels())) assert "color=Color(red=11, green=18, blue=38, alpha=200), hotkey=)]" in str( annotation.get_labels()) assert "[ScoredLabel(123456789, name=car" in str( annotation.get_labels(include_empty=True)) assert ", probability=0.0, domain=DETECTION," in str( annotation.get_labels(include_empty=True)) assert "color=Color(red=16, green=15," in str( annotation.get_labels(include_empty=True)) assert "blue=56, alpha=255), hotkey=ctrl+0)," in str( annotation.get_labels(include_empty=True))
def annotations(self) -> List[Annotation]: labels = self.labels() rectangle = Rectangle(x1=0.2, y1=0.2, x2=0.6, y2=0.7) other_rectangle = Rectangle(x1=0.3, y1=0.2, x2=0.9, y2=0.9) detection_annotation = Annotation( shape=rectangle, labels=[ScoredLabel(label=labels[0])], id=ID("detection_annotation_1"), ) segmentation_annotation = Annotation( shape=other_rectangle, labels=[ScoredLabel(label=labels[1])], id=ID("segmentation_annotation_1"), ) return [detection_annotation, segmentation_annotation]
def on_predict_epoch_end(self, _trainer: pl.Trainer, _pl_module: AnomalyModule, outputs: List[Any]): """Called when the predict epoch ends.""" outputs = outputs[0] pred_scores = np.hstack( [output["pred_scores"].cpu() for output in outputs]) pred_labels = np.hstack( [output["pred_labels"].cpu() for output in outputs]) anomaly_maps = np.vstack( [output["anomaly_maps"].cpu() for output in outputs]) # Loop over dataset again to assign predictions for dataset_item, pred_score, pred_label, anomaly_map in zip( self.ote_dataset, pred_scores, pred_labels, anomaly_maps): assigned_label = self.anomalous_label if pred_label else self.normal_label shape = Annotation( Rectangle(x1=0, y1=0, x2=1, y2=1), labels=[ ScoredLabel(assigned_label, probability=float(pred_score)) ], ) dataset_item.append_annotations([shape]) heatmap = anomaly_map_to_color_map(anomaly_map.squeeze()) heatmap_media = ResultMediaEntity( name="Anomaly Map", type="anomaly_map", annotation_scene=dataset_item.annotation_scene, numpy=heatmap, ) dataset_item.append_metadata_item(heatmap_media)
def test_annotation_magic_methods(self): """ <b>Description:</b> Check Annotation __repr__, __eq__ methods <b>Input data:</b> Initialized instance of Annotation <b>Expected results:</b> Test passes if Annotation magic methods returns correct values <b>Steps</b> 1. Create Annotation instances 2. Check returning value of magic methods """ annotation = self.annotation other_annotation = self.annotation point1 = Point(0.3, 0.1) point2 = Point(0.8, 0.3) point3 = Point(0.6, 0.2) points = [point1, point2, point3] third_annotation = Annotation(shape=Polygon(points=points), labels=self.labels) assert ( repr(annotation) == "Annotation(shape=Ellipse(x1=0.5, y1=0.1, x2=0.8, y2=0.3), labels=[], id=123456789)" ) assert annotation == other_annotation assert annotation != third_annotation assert annotation != str
def optional_result_media_parameters(self) -> dict: optional_result_media_parameters = self.default_result_media_parameters( ) roi_label = LabelEntity( "ROI label", Domain.DETECTION, Color(10, 200, 40), creation_date=datetime.datetime(year=2021, month=12, day=18), id=ID("roi_label_1"), ) roi = Annotation( shape=Rectangle(x1=0.3, y1=0.2, x2=0.7, y2=0.6), labels=[ScoredLabel(roi_label)], id=ID("roi_annotation"), ) result_media_label = LabelEntity( "ResultMedia label", Domain.CLASSIFICATION, Color(200, 60, 100), creation_date=datetime.datetime(year=2021, month=12, day=20), id=ID("result_media_1"), ) optional_result_media_parameters["roi"] = roi optional_result_media_parameters["label"] = result_media_label return optional_result_media_parameters
def default_result_media_parameters() -> dict: rectangle_label = LabelEntity( name="Rectangle Annotation Label", domain=Domain.DETECTION, color=Color(100, 200, 60), creation_date=datetime.datetime(year=2021, month=12, day=16), id=ID("rectangle_label_1"), ) rectangle_annotation = Annotation( shape=Rectangle(x1=0.1, y1=0.4, x2=0.4, y2=0.9), labels=[ScoredLabel(rectangle_label)], id=ID("rectangle_annotation"), ) annotation_scene = AnnotationSceneEntity( annotations=[rectangle_annotation], kind=AnnotationSceneKind.ANNOTATION, creation_date=datetime.datetime(year=2021, month=12, day=16), id=ID("annotation_scene"), ) return { "name": "ResultMedia name", "type": "Test ResultMedia", "annotation_scene": annotation_scene, "numpy": RANDOM_IMAGE, }
def test_annotation_scene_entity_append_annotation(self): """ <b>Description:</b> Check Annotation append_annotation method <b>Input data:</b> Initialized instance of AnnotationSceneEntity <b>Expected results:</b> Test passes if AnnotationSceneEntity append_annotation method returns correct values <b>Steps</b> 1. Create AnnotationSceneEntity instances 2. Check returning value of append_annotation method """ annotation_scene_entity = self.annotation_scene_entity tree = LabelEntity(name="tree", domain=Domain.DETECTION) tree_label = ScoredLabel(tree) labels = [tree_label] annotation = Annotation(shape=self.rectangle, labels=labels) assert len(annotation_scene_entity.annotations) == 2 annotation_scene_entity.append_annotation(annotation) assert len(annotation_scene_entity.annotations) == 3
def test_annotation_scene_entity_get_label_ids(self): """ <b>Description:</b> Check Annotation get_label_ids method <b>Input data:</b> Initialized instance of AnnotationSceneEntity <b>Expected results:</b> Test passes if AnnotationSceneEntity get_label_ids method returns correct values <b>Steps</b> 1. Create AnnotationSceneEntity instances 2. Check returning value of get_label_ids method """ annotation_scene_entity = self.annotation_scene_entity assert annotation_scene_entity.get_label_ids() == {ID()} bus = LabelEntity(id=ID(123456789), name="bus", domain=Domain.DETECTION) bus_label = ScoredLabel(bus) labels = [bus_label] annotation = Annotation(shape=self.rectangle, labels=labels) annotation_scene_entity.append_annotation(annotation) assert annotation_scene_entity.get_label_ids() == {ID(), ID(123456789)}
def generate(self) -> DatasetEntity: """ Generate OTE Anomaly Dataset Returns: DatasetEntity: Output OTE Anomaly Dataset from an MVTec """ samples = self.get_samples() dataset_items: List[DatasetItemEntity] = [] for _, sample in tqdm(samples.iterrows()): # Create image image = Image(file_path=sample.image_path) # Create annotation shape = Rectangle(x1=0, y1=0, x2=1, y2=1) labels = [ScoredLabel(sample.label)] annotations = [Annotation(shape=shape, labels=labels)] annotation_scene = AnnotationSceneEntity(annotations=annotations, kind=AnnotationSceneKind.ANNOTATION) # Create dataset item dataset_item = DatasetItemEntity(media=image, annotation_scene=annotation_scene, subset=sample.subset) # Add to dataset items dataset_items.append(dataset_item) dataset = DatasetEntity(items=dataset_items) return dataset
def append_annotations(self, annotations: Sequence[Annotation]): """ Adds a list of shapes to the annotation """ roi_as_box = ShapeFactory.shape_as_rectangle(self.roi.shape) validated_annotations = [ Annotation( shape=annotation.shape.normalize_wrt_roi_shape(roi_as_box), labels=annotation.get_labels(), ) for annotation in annotations if ShapeFactory().shape_produces_valid_crop( shape=annotation.shape, media_width=self.media.width, media_height=self.media.height, ) ] n_invalid_shapes = len(annotations) - len(validated_annotations) if n_invalid_shapes > 0: logger.info( "%d shapes will not be added to the dataset item as they " "would produce invalid crops (this is expected for some tasks, " "such as segmentation).", n_invalid_shapes, ) self.annotation_scene.append_annotations(validated_annotations)
def draw(self, image: np.ndarray, entity: Annotation, labels: List[ScoredLabel]) -> np.ndarray: """ Draw the labels of a shape in the image top left corner :param image: Image :param entity: Annotation :param labels: labels to be drawn on the image """ return self.draw_labels(image, entity.get_labels())
def roi(self) -> Annotation: """Region Of Interest.""" with self.__roi_lock: if self.__roi is None: requested_roi = Annotation(Rectangle.generate_full_box(), labels=[]) self.__roi = requested_roi else: requested_roi = self.__roi return requested_roi
def get_annotations( self, labels: Optional[List[LabelEntity]] = None, include_empty: bool = False, ios_threshold: float = 0.0, ) -> List[Annotation]: """ Returns a list of annotations that exist in the dataset item (wrt. ROI) :param labels: Subset of input labels to filter with; if ``None``, all the shapes within the ROI are returned :param include_empty: if True, returns both empty and non-empty labels :param ios_threshold: Only return shapes where Area(self.roi ∩ shape)/ Area(shape) > ios_threshold. :return: The intersection of the input label set and those present within the ROI """ is_full_box = Rectangle.is_full_box(self.roi.shape) annotations = [] if is_full_box and labels is None and not include_empty: # Fast path for the case where we do not need to change the shapes annotations = self.annotation_scene.annotations else: # Todo: improve speed. This is O(n) for n shapes. roi_as_box = ShapeFactory.shape_as_rectangle(self.roi.shape) labels_set = {label.name for label in labels} if labels is not None else {} for annotation in self.annotation_scene.annotations: if (not is_full_box and self.roi.shape.intersect_percentage( annotation.shape) <= ios_threshold): continue shape_labels = annotation.get_labels(include_empty) if labels is not None: shape_labels = [ label for label in shape_labels if label.name in labels_set ] if len(shape_labels) == 0: continue if not is_full_box: # Create a denormalized copy of the shape. shape = annotation.shape.denormalize_wrt_roi_shape( roi_as_box) else: # Also create a copy of the shape, so that we can safely modify the labels # without tampering with the original shape. shape = copy.deepcopy(annotation.shape) annotations.append(Annotation(shape=shape, labels=shape_labels)) return annotations
def init_environment(params, model_template, number_of_images=10): resolution = (224, 224) colors = [(0, 255, 0), (0, 0, 255)] cls_names = ['b', 'g'] texts = ['Blue', 'Green'] env_labels = [ LabelEntity(name=name, domain=Domain.CLASSIFICATION, is_empty=False, id=ID(i)) for i, name in enumerate(cls_names) ] items = [] for _ in range(0, number_of_images): for j, lbl in enumerate(env_labels): class_img = np.zeros((*resolution, 3), dtype=np.uint8) class_img[:] = colors[j] class_img = cv.putText(class_img, texts[j], (50, 50), cv.FONT_HERSHEY_SIMPLEX, .8 + j * .2, colors[j - 1], 2, cv.LINE_AA) image = Image(data=class_img) labels = [ScoredLabel(label=lbl, probability=1.0)] shapes = [Annotation(Rectangle.generate_full_box(), labels)] annotation_scene = AnnotationSceneEntity( kind=AnnotationSceneKind.ANNOTATION, annotations=shapes) items.append( DatasetItemEntity(media=image, annotation_scene=annotation_scene)) rng = random.Random() rng.seed(100) rng.shuffle(items) for i, _ in enumerate(items): subset_region = i / number_of_images if subset_region >= 0.9: subset = Subset.TESTING elif subset_region >= 0.6: subset = Subset.VALIDATION else: subset = Subset.TRAINING items[i].subset = subset dataset = DatasetEntity(items) labels_schema = generate_label_schema(dataset.get_labels(), multilabel=False) environment = TaskEnvironment(model=None, hyper_parameters=params, label_schema=labels_schema, model_template=model_template) return environment, dataset
def test_dataset_item_setters(self): """ <b>Description:</b> Check DatasetItemEntity class "roi", "subset" and "annotation_scene" setters <b>Input data:</b> DatasetItemEntity class object with specified "media", "annotation_scene", "roi", "metadata" and "subset" parameters <b>Expected results:</b> Test passes if assigned values of "roi", "subset" and "annotation_scene" properties are equal to expected <b>Steps</b> 1. Check value returned by "roi" property after using @roi.setter 2. Check value returned by "subset" property after using @subset.setter 3. Check value returned by "annotation_scene" property after using @subset.annotation_scene """ dataset_item = DatasetItemParameters().dataset_item() # Checking value returned by "roi" property after using @roi.setter new_roi_label = ScoredLabel( LabelEntity("new ROI label", Domain.DETECTION)) new_dataset_roi = Annotation(Rectangle(x1=0.2, y1=0.2, x2=1.0, y2=1.0), [new_roi_label]) dataset_item.roi = new_dataset_roi assert dataset_item.roi == new_dataset_roi # Checking value returned by subset property after using @subset.setter new_subset = Subset.TRAINING dataset_item.subset = new_subset assert dataset_item.subset == new_subset # Checking value returned by annotation_scene property after using @annotation_scene.setter new_annotation_label = ScoredLabel( LabelEntity("new annotation label", Domain.CLASSIFICATION)) new_annotation = Annotation(Rectangle(x1=0.1, y1=0, x2=0.9, y2=1.0), [new_annotation_label]) new_annotation_scene = AnnotationSceneEntity( [new_annotation], AnnotationSceneKind.PREDICTION) dataset_item.annotation_scene = new_annotation_scene assert dataset_item.annotation_scene == new_annotation_scene
def roi(self): roi = Annotation( shape=Rectangle( x1=0.1, y1=0.1, x2=0.9, y2=0.9, modification_date=datetime.datetime(year=2021, month=12, day=9), ), labels=self.roi_scored_labels(), id=ID("roi_annotation"), ) return roi
def test_annotation_get_label_ids(self): """ <b>Description:</b> Check Annotation get_label_ids method <b>Input data:</b> Initialized instance of Annotation <b>Expected results:</b> Test passes if Annotation get_label_ids method returns correct values <b>Steps</b> 1. Create Annotation instances 2. Check returning value of get_label_ids method 3. Check returning value of get_label_ids method with include_empty=True """ annotation = Annotation(shape=self.rectangle, labels=self.labels2) assert annotation.get_label_ids() == {ID(987654321)} assert annotation.get_label_ids(include_empty=True) == { ID(987654321), ID(123456789), }
def __init__( self, name: str, type: str, annotation_scene: AnnotationSceneEntity, numpy: np.ndarray, roi: Optional[Annotation] = None, label: Optional[LabelEntity] = None, ): self.name = name self.type = type self.annotation_scene = annotation_scene self.roi = (Annotation(Rectangle.generate_full_box(), labels=[]) if roi is None else roi) self.label = label self.numpy = numpy
def convert_to_annotation( self, predictions: np.ndarray, metadata: Dict[str, Any]) -> AnnotationSceneEntity: pred_score = predictions.reshape(-1).max() pred_label = pred_score >= metadata.get("threshold", 20) assigned_label = self.anomalous_label if pred_label else self.normal_label annotations = [ Annotation( Rectangle.generate_full_box(), labels=[ ScoredLabel(assigned_label, probability=float(pred_score)) ], ) ] return AnnotationSceneEntity(kind=AnnotationSceneKind.PREDICTION, annotations=annotations)
def test_dataset_item_roi(self): """ <b>Description:</b> Check DatasetItemEntity class "roi" property <b>Input data:</b> DatasetItemEntity class object with specified "media", "annotation_scene", "roi", "metadata" and "subset" parameters <b>Expected results:</b> Test passes if value returned by "roi" property is equal to expected <b>Steps</b> 1. Check value returned by "roi" property for DatasetItemEntity with specified "roi" parameter 2. Check value returned by "roi" property for DatasetItemEntity with not specified "roi" parameter 3. Check value returned by "roi" property for DatasetItemEntity with not specified "roi" parameter but one of annotation objects in annotation_scene is equal to full Rectangle """ media = DatasetItemParameters.generate_random_image() annotations = DatasetItemParameters().annotations() annotation_scene = DatasetItemParameters().annotations_entity() roi = DatasetItemParameters().roi() metadata = DatasetItemParameters.metadata() # Checking "roi" property for DatasetItemEntity with specified "roi" parameter specified_roi_dataset_item = DatasetItemParameters().dataset_item() assert specified_roi_dataset_item.roi == roi # Checking that "roi" property is equal to full_box for DatasetItemEntity with not specified "roi" parameter non_specified_roi_dataset_item = DatasetItemEntity(media, annotation_scene, metadata=metadata) default_roi = non_specified_roi_dataset_item.roi.shape assert isinstance(default_roi, Rectangle) assert Rectangle.is_full_box(default_roi) # Checking that "roi" property will be equal to full_box for DatasetItemEntity with not specified "roi" but one # of Annotation objects in annotation_scene is equal to full Rectangle full_box_label = LabelEntity("Full-box label", Domain.DETECTION, id=ID("full_box_label")) full_box_annotation = Annotation(Rectangle.generate_full_box(), [ScoredLabel(full_box_label)]) annotations.append(full_box_annotation) annotation_scene.annotations.append(full_box_annotation) full_box_label_dataset_item = DatasetItemEntity(media, annotation_scene, metadata=metadata) assert full_box_label_dataset_item.roi is full_box_annotation
def convert_to_annotation( self, predictions: List[Tuple[int, float]], metadata: Optional[Dict] = None) -> AnnotationSceneEntity: labels = [] for index, score in predictions: labels.append(ScoredLabel(self.labels[index], float(score))) if not labels and self.empty_label: labels = [ScoredLabel(self.empty_label, probability=1.0)] elif self.hierarchical: labels.extend( get_ancestors_by_prediction(self.label_schema, labels[0])) annotations = [ Annotation(Rectangle.generate_full_box(), labels=labels) ] return AnnotationSceneEntity(kind=AnnotationSceneKind.PREDICTION, annotations=annotations)
def __get_boxes_from_dataset_as_list(dataset: DatasetEntity, labels: List[LabelEntity]) -> List: """ Explanation of output shape: a box: [x1: float, y1, x2, y2, class: str, score: float] boxes_per_image: [box1, box2, …] ground_truth_boxes_per_image: [boxes_per_image_1, boxes_per_image_2, boxes_per_image_3, …] :param dataset: :param labels: significant labels for detection task :return: returns list with shape: List[List[List[Optional[float, str]]]] """ boxes_per_image = [] converted_types_to_box = set() label_names = {label.name for label in labels} for item in dataset: boxes: List[List[Union[float, str]]] = [] roi_as_box = Annotation(ShapeFactory.shape_as_rectangle( item.roi.shape), labels=[]) for annotation in item.annotation_scene.annotations: shape_as_box = ShapeFactory.shape_as_rectangle( annotation.shape) box = shape_as_box.normalize_wrt_roi_shape(roi_as_box.shape) n_boxes_before = len(boxes) boxes.extend([[ box.x1, box.y1, box.x2, box.y2, label.name, label.probability ] for label in annotation.get_labels() if label.name in label_names]) if (not isinstance(annotation.shape, Rectangle) and len(boxes) > n_boxes_before): converted_types_to_box.add( annotation.shape.__class__.__name__) boxes_per_image.append(boxes) if len(converted_types_to_box) > 0: logger.warning( f"The shapes of types {tuple(converted_types_to_box)} have been converted to their " f"full enclosing Box representation in order to compute the f-measure" ) return boxes_per_image
def convert_to_annotation( self, predictions: List[utils.Detection], metadata: Dict[str, Any]) -> AnnotationSceneEntity: annotations = [] image_size = metadata["original_shape"][1::-1] for box in predictions: scored_label = ScoredLabel(self.labels[int(box.id)], float(box.score)) coords = np.array(box.get_coords(), dtype=float) / np.tile( image_size, 2) annotations.append( Annotation( Rectangle(coords[0], coords[1], coords[2], coords[3]), labels=[scored_label], )) annotation_scene = AnnotationSceneEntity( kind=AnnotationSceneKind.PREDICTION, annotations=annotations, ) return annotation_scene
def get_dataset_items(self, ann_file_path: Path, data_root_dir: Path, subset: Subset) -> List[DatasetItemEntity]: """Loads dataset based on the image path in annotation file. Args: ann_file_path (Path): Path to json containing the annotations. For example of annotation look at `data/anomaly/[train, test,val].json. data_root_dir (Path): Path to folder containing images. subset (Subset): Subset of the dataset. Returns: List[DatasetItemEntity]: List containing subset dataset. """ # read annotation file samples = pd.read_json(ann_file_path) dataset_items = [] for _, sample in samples.iterrows(): # Create image # convert path to str as PosixPath is not supported by Image image = Image(file_path=str(data_root_dir / sample.image_path)) # Create annotation shape = Rectangle(x1=0, y1=0, x2=1, y2=1) label: LabelEntity = (self.normal_label if sample.label == "good" else self.abnormal_label) labels = [ScoredLabel(label)] annotations = [Annotation(shape=shape, labels=labels)] annotation_scene = AnnotationSceneEntity( annotations=annotations, kind=AnnotationSceneKind.ANNOTATION) # Create dataset item dataset_item = DatasetItemEntity(media=image, annotation_scene=annotation_scene, subset=subset) # Add to dataset items dataset_items.append(dataset_item) return dataset_items
def test_annotation_scene_entity_contains_any(self): """ <b>Description:</b> Check Annotation contains_any method <b>Input data:</b> Initialized instance of AnnotationSceneEntity <b>Expected results:</b> Test passes if AnnotationSceneEntity contains_any method returns correct values <b>Steps</b> 1. Create AnnotationSceneEntity instances 2. Check returning value of contains_any method """ annotation_scene_entity = self.annotation_scene_entity annotation_scene_entity.annotations = self.annotations car = LabelEntity(name="car", domain=Domain.DETECTION, is_empty=True) person = LabelEntity(name="person", domain=Domain.DETECTION) tree = LabelEntity(name="tree", domain=Domain.DETECTION) car_label = ScoredLabel(car) person_label = ScoredLabel(person) tree_label = ScoredLabel(tree) labels = [car_label] labels2 = [car_label, person_label] annotation = Annotation(shape=self.rectangle, labels=labels2) annotations = [annotation] annotation_scene_entity2 = AnnotationSceneEntity( annotations=annotations, kind=AnnotationSceneKind.ANNOTATION) assert annotation_scene_entity.contains_any(labels=labels) is False assert annotation_scene_entity2.contains_any(labels=labels2) is True assert annotation_scene_entity2.contains_any( labels=[tree_label]) is False
def __convert_to_annotations(self, predictions: np.ndarray) -> List[Annotation]: """ Converts a list of Detections to OTE SDK Annotation objects :param predictions: A list of predictions with shape [num_prediction, 6] or [num_predictions, 7] :returns: A list of Annotation objects with Rectangle shapes :raises ValueError: This error is raised if the shape of prediction is not (n, 7) or (n, 6) """ annotations = [] if predictions.shape[1:] < (6, ) or predictions.shape[1:] > (7, ): raise ValueError( f"Shape of prediction is not expected, expected (n, 7) or (n, 6) " f"got {predictions.shape}") for prediction in predictions: if prediction.shape == (7, ): # Some OpenVINO models use an output shape of [7,] # If this is the case, skip the first value as it is not used prediction = prediction[1:] label = int(prediction[0]) confidence = prediction[1] scored_label = ScoredLabel(self.label_map[label], confidence) annotations.append( Annotation( Rectangle(prediction[2], prediction[3], prediction[4], prediction[5]), labels=[scored_label], )) return annotations
def generate_random_annotated_image( image_width: int, image_height: int, labels: Sequence[LabelEntity], min_size=50, max_size=250, shape: Optional[str] = None, max_shapes: int = 10, intensity_range: List[Tuple[int, int]] = None, random_seed: Optional[int] = None, ) -> Tuple[np.ndarray, List[Annotation]]: """ Generate a random image with the corresponding annotation entities. :param intensity_range: Intensity range for RGB channels ((r_min, r_max), (g_min, g_max), (b_min, b_max)) :param max_shapes: Maximum amount of shapes in the image :param shape: {"rectangle", "ellipse", "triangle"} :param image_height: Height of the image :param image_width: Width of the image :param labels: Task Labels that should be applied to the respective shape :param min_size: Minimum size of the shape(s) :param max_size: Maximum size of the shape(s) :param random_seed: Seed to initialize the random number generator :return: uint8 array, list of shapes """ from skimage.draw import random_shapes, rectangle if intensity_range is None: intensity_range = [(100, 200)] image1: Optional[np.ndarray] = None sc_labels = [] # Sporadically, it might happen there is no shape in the image, especially on low-res images. # It'll retry max 5 times until we see a shape, and otherwise raise a runtime error if ( shape == "ellipse" ): # ellipse shape is not available in random_shapes function. use circle instead shape = "circle" for _ in range(5): rand_image, sc_labels = random_shapes( (image_height, image_width), min_shapes=1, max_shapes=max_shapes, intensity_range=intensity_range, min_size=min_size, max_size=max_size, shape=shape, random_seed=random_seed, ) num_shapes = len(sc_labels) if num_shapes > 0: image1 = rand_image break if image1 is None: raise RuntimeError( "Was not able to generate a random image that contains any shapes") annotations: List[Annotation] = [] for sc_label in sc_labels: sc_label_name = sc_label[0] sc_label_shape_r = sc_label[1][0] sc_label_shape_c = sc_label[1][1] y_min, y_max = max(0.0, float(sc_label_shape_r[0] / image_height)), min( 1.0, float(sc_label_shape_r[1] / image_height)) x_min, x_max = max(0.0, float(sc_label_shape_c[0] / image_width)), min( 1.0, float(sc_label_shape_c[1] / image_width)) if sc_label_name == "ellipse": # Fix issue with newer scikit-image libraries that generate ellipses. # For now we render a rectangle on top of it sc_label_name = "rectangle" rr, cc = rectangle( start=(sc_label_shape_r[0], sc_label_shape_c[0]), end=(sc_label_shape_r[1] - 1, sc_label_shape_c[1] - 1), shape=image1.shape, ) image1[rr, cc] = ( random.randint(0, 200), # nosec random.randint(0, 200), # nosec random.randint(0, 200), # nosec ) if sc_label_name == "circle": sc_label_name = "ellipse" label_matches = [ label for label in labels if sc_label_name == label.name ] if len(label_matches) > 0: label = label_matches[0] box_annotation = Annotation( Rectangle(x1=x_min, y1=y_min, x2=x_max, y2=y_max), labels=[ScoredLabel(label, probability=1.0)], ) annotation: Annotation if label.name == "ellipse": annotation = Annotation( Ellipse( x1=box_annotation.shape.x1, y1=box_annotation.shape.y1, x2=box_annotation.shape.x2, y2=box_annotation.shape.y2, ), labels=box_annotation.get_labels(include_empty=True), ) elif label.name == "triangle": points = [ Point( x=(box_annotation.shape.x1 + box_annotation.shape.x2) / 2, y=box_annotation.shape.y1, ), Point(x=box_annotation.shape.x1, y=box_annotation.shape.y2), Point(x=box_annotation.shape.x2, y=box_annotation.shape.y2), ] annotation = Annotation( Polygon(points=points), labels=box_annotation.get_labels(include_empty=True), ) else: annotation = box_annotation annotations.append(annotation) else: logger.warning( "Generated a random image, but was not able to associate a label with a shape. " f"The name of the shape was `{sc_label_name}`. ") return image1, annotations
class TestAnnotation: rectangle = Rectangle(x1=0.5, x2=1.0, y1=0.0, y2=0.5) labels: List[ScoredLabel] = [] annotation = Annotation(shape=rectangle, labels=labels) car = LabelEntity( id=ID(123456789), name="car", domain=Domain.DETECTION, color=Color(red=16, green=15, blue=56, alpha=255), is_empty=True, ) person = LabelEntity( id=ID(987654321), name="person", domain=Domain.DETECTION, color=Color(red=11, green=18, blue=38, alpha=200), is_empty=False, ) car_label = ScoredLabel(car) person_label = ScoredLabel(person) labels2 = [car_label, person_label] @pytest.mark.priority_medium @pytest.mark.component @pytest.mark.reqids(Requirements.REQ_1) def test_annotation_default_property(self): """ <b>Description:</b> Check that Annotation can correctly return default property value <b>Input data:</b> Annotation class <b>Expected results:</b> Test passes if the Annotation return correct values <b>Steps</b> 1. Create Annotation instances 2. Check default values """ annotation = self.annotation assert type(annotation.id) == ID assert annotation.id is not None assert str(annotation.shape ) == "Rectangle(x=0.5, y=0.0, width=0.5, height=0.5)" assert annotation.get_labels() == [] @pytest.mark.priority_medium @pytest.mark.component @pytest.mark.reqids(Requirements.REQ_1) def test_annotation_setters(self): """ <b>Description:</b> Check that Annotation can correctly return modified property value <b>Input data:</b> Annotation class <b>Expected results:</b> Test passes if the Annotation return correct values <b>Steps</b> 1. Create Annotation instances 2. Set another values 3. Check changed values """ annotation = self.annotation ellipse = Ellipse(x1=0.5, y1=0.1, x2=0.8, y2=0.3) annotation.shape = ellipse annotation.id = ID(123456789) assert annotation.id == ID(123456789) assert annotation.shape == ellipse @pytest.mark.priority_medium @pytest.mark.component @pytest.mark.reqids(Requirements.REQ_1) def test_annotation_magic_methods(self): """ <b>Description:</b> Check Annotation __repr__, __eq__ methods <b>Input data:</b> Initialized instance of Annotation <b>Expected results:</b> Test passes if Annotation magic methods returns correct values <b>Steps</b> 1. Create Annotation instances 2. Check returning value of magic methods """ annotation = self.annotation other_annotation = self.annotation point1 = Point(0.3, 0.1) point2 = Point(0.8, 0.3) point3 = Point(0.6, 0.2) points = [point1, point2, point3] third_annotation = Annotation(shape=Polygon(points=points), labels=self.labels) assert ( repr(annotation) == "Annotation(shape=Ellipse(x1=0.5, y1=0.1, x2=0.8, y2=0.3), labels=[], id=123456789)" ) assert annotation == other_annotation assert annotation != third_annotation assert annotation != str @pytest.mark.priority_medium @pytest.mark.component @pytest.mark.reqids(Requirements.REQ_1) def test_annotation_get_labels(self): """ <b>Description:</b> Check Annotation get_labels method <b>Input data:</b> Initialized instance of Annotation <b>Expected results:</b> Test passes if Annotation get_labels method returns correct values <b>Steps</b> 1. Create Annotation instances 2. Check returning value of get_labels method 3. Check returning value of get_labels method with include_empty=True """ annotation = Annotation(shape=self.rectangle, labels=self.labels2) assert ( "[ScoredLabel(987654321, name=person, probability=0.0, domain=DETECTION," in str(annotation.get_labels())) assert "color=Color(red=11, green=18, blue=38, alpha=200), hotkey=)]" in str( annotation.get_labels()) assert "[ScoredLabel(123456789, name=car" in str( annotation.get_labels(include_empty=True)) assert ", probability=0.0, domain=DETECTION," in str( annotation.get_labels(include_empty=True)) assert "color=Color(red=16, green=15," in str( annotation.get_labels(include_empty=True)) assert "blue=56, alpha=255), hotkey=ctrl+0)," in str( annotation.get_labels(include_empty=True)) @pytest.mark.priority_medium @pytest.mark.component @pytest.mark.reqids(Requirements.REQ_1) def test_annotation_get_label_ids(self): """ <b>Description:</b> Check Annotation get_label_ids method <b>Input data:</b> Initialized instance of Annotation <b>Expected results:</b> Test passes if Annotation get_label_ids method returns correct values <b>Steps</b> 1. Create Annotation instances 2. Check returning value of get_label_ids method 3. Check returning value of get_label_ids method with include_empty=True """ annotation = Annotation(shape=self.rectangle, labels=self.labels2) assert annotation.get_label_ids() == {ID(987654321)} assert annotation.get_label_ids(include_empty=True) == { ID(987654321), ID(123456789), } @pytest.mark.priority_medium @pytest.mark.component @pytest.mark.reqids(Requirements.REQ_1) def test_annotation_append_label(self): """ <b>Description:</b> Check Annotation append_label method <b>Input data:</b> Initialized instance of Annotation <b>Expected results:</b> Test passes if Annotation append_label method correct appending label <b>Steps</b> 1. Create Annotation instances 2. Append label 3. Check labels """ annotation = self.annotation annotation.append_label(label=self.car_label) assert annotation.get_labels() == [] # car_label is empty annotation.append_label(label=self.person_label) assert "name=person" in str(annotation.get_labels()) @pytest.mark.priority_medium @pytest.mark.component @pytest.mark.reqids(Requirements.REQ_1) def test_annotation_set_labels(self): """ <b>Description:</b> Check Annotation set_labels method <b>Input data:</b> Initialized instance of Annotation <b>Expected results:</b> Test passes if Annotation set_labels method correct setting label <b>Steps</b> 1. Create Annotation instances 2. Set labels 3. Check labels """ annotation = self.annotation assert annotation.get_labels() != [] annotation.set_labels(labels=[]) assert annotation.get_labels() == [] annotation.set_labels(labels=self.labels2) assert "name=person" in str(annotation.get_labels()) assert "name=car" not in str( annotation.get_labels()) # car_label is empty