def test_iterate_directories(): directory1 = data_path("files/d1") directory2 = data_path("files/d2") files = list(iterate_directories((directory1, directory2), "foo")) assert len(files) == 5 files = list(iterate_directories((directory1, directory2), "foo", "e")) assert len(files) == 3 files = list(iterate_directories((directory1, directory2), "baz")) assert len(files) == 0
def test_iterate_files(): directory = data_path("files/d1") assert len(list(iterate_files(directory, "foo"))) == 3 assert len(list(iterate_files(directory, ".foo"))) == 3 assert len(list(iterate_files(directory, "foo", "e"))) == 2 assert len(list(iterate_files(directory, "foo", "a"))) == 1 assert len(list(iterate_files(directory, "bar"))) == 0
def test_load_dataset(): dataset_path = os.path.abspath(data_path("dataset")) df = load_dataset(dataset_path, ".jpeg") assert sorted(df["path"]) == [ os.path.join(dataset_path, f"{p}.jpeg") for p in ("1", "2", "3") ] df = load_dataset(dataset_path, ".jpeg", "1") assert sorted(df["path"]) == [os.path.join(dataset_path, "1.jpeg")]
def test_overlay_masks(): masks = [ load_image(data_path(f"segmentation/color_masks/mask-{i}.png")) for i in range(4) ] background = np.zeros((100, 100, 3), dtype=np.uint8) background[:, :] = (50, 50, 50) check_image_equality( overlay_masks(background, masks, alpha=1.0), data_path("segmentation/color_masks/overlay-alpha-1.0.png"), delta=1, ) check_image_equality( overlay_masks(background, masks, alpha=0.5), data_path("segmentation/color_masks/overlay-alpha-0.5.png"), delta=1, )
def test_load_voc_xml(): annotated = voc_to_annotated_image(data_path("example.xml")) width, height = annotated.width, annotated.height assert width == 500 assert height == 375 assert annotated.annotations[0].class_name == "dog" assert annotated.annotations[0].confidence is None assert annotated.annotations[0].type == AnnotationType.GROUND_TRUTH assert annotated.annotations[0].bbox.denormalize( width, height).to_int().as_tuple() == (144, 255, 90, 201) assert annotated.annotations[1].class_name == "dog" assert annotated.annotations[1].bbox.denormalize( width, height).to_int().as_tuple() == (264, 380, 73, 180)
def test_color_bitmap_masks(): mask1 = np.zeros((100, 100), dtype=np.uint8) mask1[10:30, 20:40] = 255 mask2 = np.zeros((100, 100), dtype=np.uint8) mask2[35:50, 40:60] = 255 mask3 = np.zeros((100, 100), dtype=np.uint8) mask3[35:50, 70:90] = 255 mask4 = np.zeros((100, 100), dtype=np.uint8) mask4[70:95, 10:80] = 255 palette = [(1.0, 0.0, 0.0), (0.0, 1.0, 0.0), (0.0, 0.0, 1.0)] colored_masks = color_bitmap_masks((mask1, mask2, mask3, mask4), palette) for (index, mask) in enumerate(colored_masks): check_image_equality( mask, data_path(f"segmentation/color_masks/mask-{index}.png"))
def test_load_image_rgb(): img = load_image(data_path("example.jpeg")) assert img.shape == (375, 500, 3) assert np.max(img) > 1.0
def test_load_image_normalize(): img = load_image(data_path("example.jpeg"), normalize=True) assert np.max(img) <= 1.0
def test_load_image_resize(): img = load_image(data_path("example.jpeg"), target_size=(224, 224)) assert img.shape == (224, 224, 3)
def test_load_image_grayscale(): img = load_image(data_path("example.jpeg"), color_mode="grayscale") assert img.shape == (375, 500)
def test_load_image_bgr(): img2 = load_image(data_path("example.jpeg"), color_mode="bgr") check_image_equality(cv2.cvtColor(img2, cv2.COLOR_BGR2RGB), data_path("example.jpeg"))