def test_instance_confidence_map_generator(min_labels): labels_reader = providers.LabelsReader(min_labels) instance_centroid_finder = instance_centroids.InstanceCentroidFinder( center_on_anchor_part=True, anchor_part_names="A", skeletons=labels_reader.labels.skeletons, ) instance_cropper = instance_cropping.InstanceCropper( crop_width=160, crop_height=160 ) instance_confmap_generator = InstanceConfidenceMapGenerator( sigma=5, output_stride=2, all_instances=False ) ds = labels_reader.make_dataset() ds = instance_centroid_finder.transform_dataset(ds) ds = instance_cropper.transform_dataset(ds) ds = instance_confmap_generator.transform_dataset(ds) example = next(iter(ds)) assert example["instance_confidence_maps"].shape == (80, 80, 2) assert example["instance_confidence_maps"].dtype == tf.float32 assert "all_instance_confidence_maps" not in example points = ( example["center_instance"].numpy() / instance_confmap_generator.output_stride ) cms = example["instance_confidence_maps"].numpy() np.testing.assert_allclose( cms[(points[:, 1]).astype(int), (points[:, 0]).astype(int), :], [[0.9139312, 0.0], [0.0, 0.94459903]], )
def test_instance_cropper_keeping_full_image(min_labels): labels_reader = providers.LabelsReader(min_labels) instance_centroid_finder = instance_centroids.InstanceCentroidFinder( center_on_anchor_part=True, anchor_part_names="A", skeletons=labels_reader.labels.skeletons) instance_cropper = instance_cropping.InstanceCropper(crop_width=160, crop_height=160, keep_full_image=True) ds = instance_centroid_finder.transform_dataset( labels_reader.make_dataset()) ds = instance_cropper.transform_dataset(ds) example = next(iter(ds)) assert example["instance_image"].shape == (160, 160, 1) assert example["instance_image"].dtype == tf.uint8 assert example["full_image_height"] == 384 assert example["full_image_height"].dtype == tf.int32 assert example["full_image_width"] == 384 assert example["full_image_width"].dtype == tf.int32 assert example["image"].shape == (384, 384, 1) assert example["image"].dtype == tf.uint8
def test_multi_confidence_map_generator(min_labels): labels_reader = providers.LabelsReader(min_labels) multi_confmap_generator = MultiConfidenceMapGenerator( sigma=3, output_stride=2, centroids=False ) ds = labels_reader.make_dataset() ds = multi_confmap_generator.transform_dataset(ds) example = next(iter(ds)) assert example["confidence_maps"].shape == (192, 192, 2) assert example["confidence_maps"].dtype == tf.float32 instances = example["instances"].numpy() / multi_confmap_generator.output_stride cms = example["confidence_maps"].numpy() np.testing.assert_allclose( cms[int(instances[0, 0, 1]), int(instances[0, 0, 0]), :], [0.948463, 0.0] ) np.testing.assert_allclose( cms[int(instances[1, 0, 1]), int(instances[1, 0, 0]), :], [0.66676116, 0.0] ) np.testing.assert_allclose( cms[int(instances[0, 1, 1]), int(instances[0, 1, 0]), :], [0.0, 0.9836702] ) np.testing.assert_allclose( cms[int(instances[1, 1, 1]), int(instances[1, 1, 0]), :], [0.0, 0.8815618] )
def test_resizer(min_labels): labels_reader = providers.LabelsReader(min_labels) ds_data = labels_reader.make_dataset() data_example = next(iter(ds_data)) resizer = resizing.Resizer(image_key="image", scale=0.25) ds = resizer.transform_dataset(ds_data) example = next(iter(ds)) assert example["image"].shape == (96, 96, 1) np.testing.assert_array_equal(example["scale"], (0.25, 0.25)) np.testing.assert_allclose(example["instances"], data_example["instances"] * 0.25) resizer = resizing.Resizer(image_key="image", pad_to_stride=100) ds = resizer.transform_dataset(ds_data) example = next(iter(ds)) assert example["image"].shape == (400, 400, 1) np.testing.assert_array_equal(example["scale"], (1.0, 1.0)) np.testing.assert_allclose(example["instances"], data_example["instances"]) resizer = resizing.Resizer(image_key="image", scale=0.25, pad_to_stride=100) ds = resizer.transform_dataset(ds_data) example = next(iter(ds)) assert example["image"].shape == (100, 100, 1) np.testing.assert_array_equal(example["scale"], (0.25, 0.25)) np.testing.assert_allclose(example["instances"], data_example["instances"] * 0.25)
def test_multi_confidence_map_generator_centroids(min_labels): labels_reader = providers.LabelsReader(min_labels) instance_centroid_finder = instance_centroids.InstanceCentroidFinder( center_on_anchor_part=True, anchor_part_names="A", skeletons=labels_reader.labels.skeletons, ) multi_confmap_generator = MultiConfidenceMapGenerator( sigma=5, output_stride=2, centroids=True ) ds = labels_reader.make_dataset() ds = instance_centroid_finder.transform_dataset(ds) ds = multi_confmap_generator.transform_dataset(ds) example = next(iter(ds)) assert example["centroid_confidence_maps"].shape == (192, 192, 1) assert example["centroid_confidence_maps"].dtype == tf.float32 centroids = example["centroids"].numpy() / multi_confmap_generator.output_stride centroid_cms = example["centroid_confidence_maps"].numpy() np.testing.assert_allclose( centroid_cms[int(centroids[0, 1]), int(centroids[0, 0]), :], [0.9811318] ) np.testing.assert_allclose( centroid_cms[int(centroids[1, 1]), int(centroids[1, 0]), :], [0.8642299] )
def test_instance_confidence_map_generator_with_all_instances(min_labels): labels_reader = providers.LabelsReader(min_labels) instance_centroid_finder = instance_centroids.InstanceCentroidFinder( center_on_anchor_part=True, anchor_part_names="A", skeletons=labels_reader.labels.skeletons, ) instance_cropper = instance_cropping.InstanceCropper(crop_width=160, crop_height=160) instance_confmap_generator = InstanceConfidenceMapGenerator( sigma=5 / 2, output_stride=2, all_instances=True) ds = labels_reader.make_dataset() ds = instance_centroid_finder.transform_dataset(ds) ds = instance_cropper.transform_dataset(ds) ds = instance_confmap_generator.transform_dataset(ds) example = next(iter(ds)) assert "instance_confidence_maps" in example assert example["all_instance_confidence_maps"].shape == (80, 80, 2) assert example["all_instance_confidence_maps"].dtype == tf.float32 instances = (example["all_instances"].numpy() / instance_confmap_generator.output_stride) all_cms = example["all_instance_confidence_maps"].numpy() x = (instances[:, :, 0]).astype(int) y = (instances[:, :, 1]).astype(int) x[(x < 0) | (x >= all_cms.shape[1])] = 0 y[(y < 0) | (y >= all_cms.shape[0])] = 0 np.testing.assert_allclose( all_cms[y, x, :], [[[0.91393119, 0.], [0., 0.94459903]], [[0., 0.], [0., 0.]]], atol=1e-6)
def test_normalizer(min_labels): # tf.executing_eagerly() labels_reader = providers.LabelsReader(min_labels) ds_img = labels_reader.make_dataset() normalizer = normalization.Normalizer(ensure_grayscale=True) ds = normalizer.transform_dataset(ds_img) example = next(iter(ds)) assert example["image"].shape[-1] == 1 normalizer = normalization.Normalizer(ensure_float=True, ensure_grayscale=True) ds = normalizer.transform_dataset(ds_img) example = next(iter(ds)) assert example["image"].dtype == tf.float32 assert example["image"].shape[-1] == 1 normalizer = normalization.Normalizer(ensure_float=True, ensure_rgb=True) ds = normalizer.transform_dataset(ds_img) example = next(iter(ds)) assert example["image"].dtype == tf.float32 assert example["image"].shape[-1] == 3 normalizer = normalization.Normalizer(ensure_grayscale=True, ensure_rgb=True) ds = normalizer.transform_dataset(ds_img) example = next(iter(ds)) assert example["image"].shape[-1] == 1
def test_labels_reader_subset(min_labels): labels = sleap.Labels([min_labels[0], min_labels[0], min_labels[0]]) assert len(labels) == 3 labels_reader = providers.LabelsReader(labels, example_indices=[2, 1]) assert len(labels_reader) == 2 examples = list(iter(labels_reader.make_dataset())) assert len(examples) == 2 assert examples[0]["example_ind"] == 2 assert examples[1]["example_ind"] == 1
def test_instance_cropper(min_labels): labels_reader = providers.LabelsReader(min_labels) instance_centroid_finder = instance_centroids.InstanceCentroidFinder( center_on_anchor_part=True, anchor_part_names="A", skeletons=labels_reader.labels.skeletons) instance_cropper = instance_cropping.InstanceCropper(crop_width=160, crop_height=160, keep_full_image=False) ds = instance_centroid_finder.transform_dataset( labels_reader.make_dataset()) ds = instance_cropper.transform_dataset(ds) example = next(iter(ds)) assert example["instance_image"].shape == (160, 160, 1) assert example["instance_image"].dtype == tf.uint8 assert example["bbox"].shape == (4, ) assert example["bbox"].dtype == tf.float32 assert example["center_instance"].shape == (2, 2) assert example["center_instance"].dtype == tf.float32 assert example["center_instance_ind"] == 0 assert example["center_instance_ind"].dtype == tf.int32 assert example["all_instances"].shape == (2, 2, 2) assert example["all_instances"].dtype == tf.float32 assert example["centroid"].shape == (2, ) assert example["centroid"].dtype == tf.float32 assert example["full_image_height"] == 384 assert example["full_image_height"].dtype == tf.int32 assert example["full_image_width"] == 384 assert example["full_image_width"].dtype == tf.int32 assert example["video_ind"] == 0 assert example["video_ind"].dtype == tf.int32 assert example["frame_ind"] == 0 assert example["frame_ind"].dtype == tf.int64 np.testing.assert_array_equal(example["scale"], (1.0, 1.0)) assert example["scale"].dtype == tf.float32 assert example["skeleton_inds"].shape == (2, ) assert example["skeleton_inds"].dtype == tf.int32 assert "image" not in example
def test_instance_centroid_finder(min_labels): labels_reader = providers.LabelsReader(min_labels) labels_ds = labels_reader.make_dataset() instance_centroid_finder = instance_centroids.InstanceCentroidFinder( center_on_anchor_part=False) ds = instance_centroid_finder.transform_dataset(labels_ds) example = next(iter(ds)) assert example["centroids"].dtype == tf.float32 np.testing.assert_allclose( example["centroids"], [[122.49705, 180.57481], [242.28264, 195.62775]])
def test_normalizer(min_labels): tf.executing_eagerly() labels_reader = providers.LabelsReader(min_labels) normalizer = normalization.Normalizer(image_key="image", ensure_float=True, ensure_grayscale=True) ds = labels_reader.make_dataset() ds = normalizer.transform_dataset(ds) example = next(iter(ds)) assert example["image"].dtype == tf.float32
def test_instance_centroid_finder_anchored(min_labels): labels_reader = providers.LabelsReader(min_labels) labels_ds = labels_reader.make_dataset() instance_centroid_finder = instance_centroids.InstanceCentroidFinder( center_on_anchor_part=True, anchor_part_names="A", skeletons=labels_reader.labels.skeletons) ds = instance_centroid_finder.transform_dataset(labels_ds) example = next(iter(ds)) assert example["centroids"].dtype == tf.float32 np.testing.assert_allclose(example["centroids"], [[92.65221, 202.72598], [205.93005, 187.88963]])
def test_part_affinity_fields_generator(min_labels): labels_reader = providers.LabelsReader(min_labels) paf_generator = edge_maps.PartAffinityFieldsGenerator( sigma=8, output_stride=2, skeletons=labels_reader.labels.skeletons) ds = labels_reader.make_dataset() ds = paf_generator.transform_dataset(ds) example = next(iter(ds)) assert example["part_affinity_fields"].shape == (192, 192, 1, 2) assert example["part_affinity_fields"].dtype == tf.float32 np.testing.assert_allclose( example["part_affinity_fields"][196 // 2, 250 // 2, :, :], [[0.9600351, 0.20435576]], )
def test_single_instance_confidence_map_generator(min_labels_robot): labels_reader = providers.LabelsReader(min_labels_robot) confmap_generator = SingleInstanceConfidenceMapGenerator( sigma=5, output_stride=2, with_offsets=False) ds = labels_reader.make_dataset() ds = confmap_generator.transform_dataset(ds) example = next(iter(ds)) assert example["confidence_maps"].shape == (320 // 2, 560 // 2, 2) assert example["confidence_maps"].dtype == tf.float32 confmap_generator = SingleInstanceConfidenceMapGenerator(sigma=5, output_stride=2, with_offsets=True) ds = labels_reader.make_dataset() ds = confmap_generator.transform_dataset(ds) example = next(iter(ds)) assert example["confidence_maps"].shape == (320 // 2, 560 // 2, 2) assert example["confidence_maps"].dtype == tf.float32 assert example["offsets"].shape == (320 // 2, 560 // 2, 4) assert example["offsets"].dtype == tf.float32
def test_labels_reader_multi_size(): # Create some fake data using two different size videos. skeleton = sleap.Skeleton.from_names_and_edge_inds(["A"]) labels = sleap.Labels([ sleap.LabeledFrame( frame_idx=0, video=sleap.Video.from_filename(TEST_SMALL_ROBOT_MP4_FILE, grayscale=True), instances=[ sleap.Instance.from_pointsarray(np.array([[128, 128]]), skeleton=skeleton) ], ), sleap.LabeledFrame( frame_idx=0, video=sleap.Video.from_filename(TEST_H5_FILE, dataset="/box", input_format="channels_first"), instances=[ sleap.Instance.from_pointsarray(np.array([[128, 128]]), skeleton=skeleton) ], ), ]) # Create a loader for those labels. labels_reader = providers.LabelsReader(labels) ds = labels_reader.make_dataset() ds_iter = iter(ds) # Check LabelReader can provide different shapes of individual samples assert next(ds_iter)["image"].shape == (320, 560, 1) assert next(ds_iter)["image"].shape == (512, 512, 1) # Check util functions h, w = labels_reader.max_height_and_width assert h == 512 assert w == 560 assert labels_reader.is_from_multi_size_videos
def test_size_matcher(): # Create some fake data using two different size videos. skeleton = sleap.Skeleton.from_names_and_edge_inds(["A"]) labels = sleap.Labels([ sleap.LabeledFrame( frame_idx=0, video=sleap.Video.from_filename(TEST_SMALL_ROBOT_MP4_FILE, grayscale=True), instances=[ sleap.Instance.from_pointsarray(np.array([[128, 128]]), skeleton=skeleton) ], ), sleap.LabeledFrame( frame_idx=0, video=sleap.Video.from_filename(TEST_H5_FILE, dataset="/box", input_format="channels_first"), instances=[ sleap.Instance.from_pointsarray(np.array([[128, 128]]), skeleton=skeleton) ], ), ]) # Create a loader for those labels. labels_reader = providers.LabelsReader(labels) ds = labels_reader.make_dataset() ds_iter = iter(ds) assert next(ds_iter)["image"].shape == (320, 560, 1) assert next(ds_iter)["image"].shape == (512, 512, 1) def check_padding(image, from_y, to_y, from_x, to_x): assert (image.numpy()[from_y:to_y, from_x:to_x] == 0).all() # Check SizeMatcher when target dims is not strictly larger than actual image dims size_matcher = SizeMatcher(max_image_height=560, max_image_width=560) transform_iter = iter(size_matcher.transform_dataset(ds)) im1 = next(transform_iter)["image"] assert im1.shape == (560, 560, 1) # padding should be on the bottom check_padding(im1, 321, 560, 0, 560) im2 = next(transform_iter)["image"] assert im2.shape == (560, 560, 1) # Variant 2 size_matcher = SizeMatcher(max_image_height=320, max_image_width=560) transform_iter = iter(size_matcher.transform_dataset(ds)) im1 = next(transform_iter)["image"] assert im1.shape == (320, 560, 1) im2 = next(transform_iter)["image"] assert im2.shape == (320, 560, 1) # padding should be on the right check_padding(im2, 0, 320, 321, 560) # Check SizeMatcher when target is 'max' in both dimensions size_matcher = SizeMatcher(max_image_height=512, max_image_width=560) transform_iter = iter(size_matcher.transform_dataset(ds)) im1 = next(transform_iter)["image"] assert im1.shape == (512, 560, 1) # Check padding is on the bottom check_padding(im1, 320, 512, 0, 560) im2 = next(transform_iter)["image"] assert im2.shape == (512, 560, 1) # Check padding is on the right check_padding(im2, 0, 512, 512, 560) # Check SizeMatcher when target is larger in both dimensions size_matcher = SizeMatcher(max_image_height=750, max_image_width=750) transform_iter = iter(size_matcher.transform_dataset(ds)) im1 = next(transform_iter)["image"] assert im1.shape == (750, 750, 1) # Check padding is on the bottom check_padding(im1, 700, 750, 0, 750) im2 = next(transform_iter)["image"] assert im2.shape == (750, 750, 1)