def test_bounding_box_annotator_crop(): # read input image = _image() _, bbox_annotation = [ read_annotation_file( pkg_resources.resource_filename("bob.bio.face.test", "data/" + filename + ".json"), "json", ) for filename in ["testimage", "testimage_bbox"] ] final_image_size = (112, 112) reference_eyes_location = { "leye": (55, 72), "reye": (55, 40), } eyes_cropper = FaceEyesNorm(reference_eyes_location, final_image_size) face_cropper = BoundingBoxAnnotatorCrop(eyes_cropper=eyes_cropper, annotator="mtcnn") # Cropping and checking crops = face_cropper.transform([image], [bbox_annotation])[0] assert crops.shape == (3, 112, 112) # Testing with face anotattor face_cropper = BoundingBoxAnnotatorCrop(eyes_cropper=eyes_cropper, annotator=FakeAnnotator()) # Cropping and checking crops = face_cropper.transform([image], [bbox_annotation])[0] assert crops.shape == (3, 112, 112)
def read_frame_annotation_file_replaymobile(file_name, frame, annotations_type="json"): """Returns the bounding-box for one frame of a video file of replay-mobile. Given an annnotation file location and a frame number, returns the bounding box coordinates corresponding to the frame. The replay-mobile annotation files are composed of 4 columns and N rows for N frames of the video: 120 230 40 40 125 230 40 40 ... <x> <y> <w> <h> Parameters ---------- file_name: str The annotation file name (relative to annotations_path). frame: int The video frame index. """ logger.debug(f"Reading annotation file '{file_name}', frame {frame}.") video_annotations = read_annotation_file(file_name, annotation_type=annotations_type) # read_annotation_file returns an ordered dict with str keys as frame number frame_annotations = video_annotations[str(frame)] if frame_annotations is None: logger.warning( f"Annotation for file '{file_name}' at frame {frame} was 'null'.") return frame_annotations
def test_multi_face_crop(): # read input image = _image() eye_annotation, bbox_annotation = [ read_annotation_file( pkg_resources.resource_filename("bob.bio.face.test", "data/" + filename + ".json"), "json", ) for filename in ["testimage", "testimage_bbox"] ] # define the preprocessor eyes_cropper = bob.bio.face.preprocessor.FaceCrop( cropped_image_size=(CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH), cropped_positions={ "leye": LEFT_EYE_POS, "reye": RIGHT_EYE_POS }, dtype=int, ) face_cropper = bob.bio.face.preprocessor.FaceCrop( cropped_image_size=(CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH), cropper=FaceCropBoundingBox(final_image_size=(CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)), dtype=int, ) cropper = bob.bio.face.preprocessor.MultiFaceCrop( croppers_list=[eyes_cropper, face_cropper]) # execute face cropper eye_reference, bbox_reference = [ pkg_resources.resource_filename("bob.bio.face.test", "data/" + filename + ".hdf5") for filename in ["cropped", "cropped_bbox"] ] eye_cropped, bbox_cropped = cropper.transform( [image, image], [eye_annotation, bbox_annotation]) # Compare the cropped results to the reference _compare(eye_cropped, eye_reference) bob.io.base.save(bbox_cropped.astype("uint8"), bbox_reference) _compare(bbox_cropped.astype("uint8"), bbox_reference) # test a ValueError is raised if the annotations don't match any cropper with pytest.raises(ValueError): annot = dict(landmark_A=(60, 60), landmark_B=(120, 120)) cropper.transform([image], [annot]) # test that the first annotator is taken when multiple exist annot = {**eye_annotation, **bbox_annotation} eye_cropped = cropper.transform([image], [annot])[0] _compare(eye_cropped, eye_reference)
def test_annotate_samples(): try: tmp_dir = tempfile.mkdtemp(prefix="bobtest_") runner = CliRunner() result = runner.invoke( annotate_samples, args=("dummy_samples", "-a", "dummy", "-o", tmp_dir), ) assert_click_runner_result(result) # test if annotations exist for dirpath, dirnames, filenames in os.walk(tmp_dir): for filename in filenames: path = os.path.join(dirpath, filename) annot = read_annotation_file(path, "json") assert annot["topleft"] == [0, 0] # size of atnt images assert annot["bottomright"] == [112, 92] finally: shutil.rmtree(tmp_dir)
def annotations(self, file): """Reads the annotations for the given file id from file and returns them in a dictionary. Parameters ---------- file : BioFile The BioFile object for which the annotations should be read. Returns ------- dict The annotations as a dictionary, e.g.: ``{'reye':(re_y,re_x), 'leye':(le_y,le_x)}`` """ if self.annotation_directory is None: return None # since the file id is equal to the file name, we can simply use it annotation_file = os.path.join(self.annotation_directory, file.id + self.annotation_extension) # return the annotations as read from file return read_annotation_file(annotation_file, self.annotation_type)
def annotations(self): path = self.make_path(self.annotation_directory or "", self.annotation_extension or "") return read_annotation_file(path, annotation_type=self.annotation_type)
def _annotation(): return read_annotation_file( pkg_resources.resource_filename("bob.bio.face.test", "data/testimage.json"), "json", )
def display_face_annotations( database, is_video, annotations_dir, annotations_extension, annotations_type, marker_style, marker_size, display_names, font_color, font_size, output_dir, keep_all, self_test, groups, **kwargs, ): """ Plots annotations on the corresponding face picture. """ logger.debug("Retrieving samples from database.") samples = database.all_samples(groups) logger.debug(f"{len(samples)} samples loaded from database.") # open figure from matplotlib import pyplot if not self_test and not output_dir: pyplot.ion() pyplot.show() else: pyplot.ioff() pyplot.figure() for sample in samples: # load image logger.info("loading image for sample %s", sample.key) image = sample.data if is_video: frame_id, image, _ = image[0] # convert to color if it is not if image.ndim == 2: image = gray_to_rgb(image) # get annotations annotations = {} if annotations_dir is not None: # Loads the corresponding annotations file annotations_file = os.path.join(annotations_dir, sample.key + annotations_extension) if os.path.exists(annotations_file): logger.info("Loading annotations from file %s", annotations_file) annotations = read_annotation_file(annotations_file, annotations_type) else: logger.warn("Could not find annotation file %s", annotations_file) else: # get annotations from database annotations = database.annotations(sample) if not annotations: logger.warn("Could not find annotations for file %s", sample.key) continue if is_video: assert frame_id in annotations, annotations annotations = annotations[frame_id] pyplot.clf() pyplot.imshow(image.transpose(1, 2, 0)) global_annotation = [] for n, a in annotations.items(): if isinstance(a, (list, tuple)) and len(a) == 2: pyplot.plot( a[1], a[0], marker_style, ms=marker_size, mew=marker_size / 5.0, ) if display_names: pyplot.annotate(n, (a[1], a[0]), color=font_color, fontsize=font_size) else: global_annotation.append("%s=%s" % (n, a)) # plot all global annotations, at the top center of the image pyplot.annotate( ";".join(global_annotation), (image.shape[-1] / 2, 0), color=font_color, fontsize=font_size, ha="center", va="baseline", ) pyplot.gca().set_aspect("equal") pyplot.gca().autoscale(tight=True) if output_dir is None: if self_test: raise RuntimeError( "Do not run self_test without --output_dir.") pyplot.pause(0.001) else: if keep_all: output_path = os.path.join(output_dir, sample.key + ".png") else: output_path = os.path.join(output_dir, "annotated.png") os.makedirs(os.path.dirname(output_path), exist_ok=True) pyplot.savefig(output_path) if not self_test: input_text = ( "Press Enter to continue to the next image (or Ctrl-C to exit)" ) input(input_text)