def test_random_sharpness_py(degrees=(0.7, 0.7), plot=False): """ Test RandomSharpness python op """ logger.info("Test RandomSharpness python op") # Original Images data = de.ImageFolderDatasetV2(dataset_dir=DATA_DIR, shuffle=False) transforms_original = F.ComposeOp( [F.Decode(), F.Resize((224, 224)), F.ToTensor()]) ds_original = data.map(input_columns="image", operations=transforms_original()) ds_original = ds_original.batch(512) for idx, (image, _) in enumerate(ds_original): if idx == 0: images_original = np.transpose(image, (0, 2, 3, 1)) else: images_original = np.append(images_original, np.transpose(image, (0, 2, 3, 1)), axis=0) # Random Sharpness Adjusted Images data = de.ImageFolderDatasetV2(dataset_dir=DATA_DIR, shuffle=False) py_op = F.RandomSharpness() if degrees is not None: py_op = F.RandomSharpness(degrees) transforms_random_sharpness = F.ComposeOp( [F.Decode(), F.Resize((224, 224)), py_op, F.ToTensor()]) ds_random_sharpness = data.map(input_columns="image", operations=transforms_random_sharpness()) ds_random_sharpness = ds_random_sharpness.batch(512) for idx, (image, _) in enumerate(ds_random_sharpness): if idx == 0: images_random_sharpness = np.transpose(image, (0, 2, 3, 1)) else: images_random_sharpness = np.append(images_random_sharpness, np.transpose( image, (0, 2, 3, 1)), axis=0) num_samples = images_original.shape[0] mse = np.zeros(num_samples) for i in range(num_samples): mse[i] = diff_mse(images_random_sharpness[i], images_original[i]) logger.info("MSE= {}".format(str(np.mean(mse)))) if plot: visualize_list(images_original, images_random_sharpness)
def test_random_perspective_op(plot=False): """ Test RandomPerspective in python transformations """ logger.info("test_random_perspective_op") # define map operations transforms1 = [ py_vision.Decode(), py_vision.RandomPerspective(), py_vision.ToTensor() ] transform1 = py_vision.ComposeOp(transforms1) transforms2 = [ py_vision.Decode(), py_vision.ToTensor() ] transform2 = py_vision.ComposeOp(transforms2) # First dataset data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data1 = data1.map(input_columns=["image"], operations=transform1()) # Second dataset data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data2 = data2.map(input_columns=["image"], operations=transform2()) image_perspective = [] image_original = [] for item1, item2 in zip(data1.create_dict_iterator(), data2.create_dict_iterator()): image1 = (item1["image"].transpose(1, 2, 0) * 255).astype(np.uint8) image2 = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8) image_perspective.append(image1) image_original.append(image2) if plot: visualize(image_original, image_perspective)
def test_auto_contrast_invalid_cutoff_param_py(): """ Test AutoContrast python Op with invalid cutoff parameter """ logger.info("Test AutoContrast python Op with invalid cutoff parameter") try: ds = de.ImageFolderDatasetV2(dataset_dir=DATA_DIR, shuffle=False) ds = ds.map(input_columns=["image"], operations=[ F.ComposeOp([ F.Decode(), F.Resize((224, 224)), F.AutoContrast(cutoff=-10.0), F.ToTensor() ]) ]) except ValueError as error: logger.info("Got an exception in DE: {}".format(str(error))) assert "Input cutoff is not within the required interval of (0 to 100)." in str( error) try: ds = de.ImageFolderDatasetV2(dataset_dir=DATA_DIR, shuffle=False) ds = ds.map(input_columns=["image"], operations=[ F.ComposeOp([ F.Decode(), F.Resize((224, 224)), F.AutoContrast(cutoff=120.0), F.ToTensor() ]) ]) except ValueError as error: logger.info("Got an exception in DE: {}".format(str(error))) assert "Input cutoff is not within the required interval of (0 to 100)." in str( error)
def test_random_grayscale_valid_prob(plot=False): """ Test RandomGrayscale Op: valid input, expect to pass """ logger.info("test_random_grayscale_valid_prob") # First dataset data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) transforms1 = [ py_vision.Decode(), # Note: prob is 1 so the output should always be grayscale images py_vision.RandomGrayscale(1), py_vision.ToTensor() ] transform1 = py_vision.ComposeOp(transforms1) data1 = data1.map(input_columns=["image"], operations=transform1()) # Second dataset data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) transforms2 = [ py_vision.Decode(), py_vision.ToTensor() ] transform2 = py_vision.ComposeOp(transforms2) data2 = data2.map(input_columns=["image"], operations=transform2()) image_gray = [] image = [] for item1, item2 in zip(data1.create_dict_iterator(), data2.create_dict_iterator()): image1 = (item1["image"].transpose(1, 2, 0) * 255).astype(np.uint8) image2 = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8) image_gray.append(image1) image.append(image2) if plot: visualize_list(image, image_gray)
def test_auto_contrast_invalid_ignore_param_py(): """ Test AutoContrast python Op with invalid ignore parameter """ logger.info("Test AutoContrast python Op with invalid ignore parameter") try: ds = de.ImageFolderDatasetV2(dataset_dir=DATA_DIR, shuffle=False) ds = ds.map(input_columns=["image"], operations=[ F.ComposeOp([ F.Decode(), F.Resize((224, 224)), F.AutoContrast(ignore=255.5), F.ToTensor() ]) ]) except TypeError as error: logger.info("Got an exception in DE: {}".format(str(error))) assert "Argument ignore with value 255.5 is not of type" in str(error) try: ds = de.ImageFolderDatasetV2(dataset_dir=DATA_DIR, shuffle=False) ds = ds.map(input_columns=["image"], operations=[ F.ComposeOp([ F.Decode(), F.Resize((224, 224)), F.AutoContrast(ignore=(10, 100)), F.ToTensor() ]) ]) except TypeError as error: logger.info("Got an exception in DE: {}".format(str(error))) assert "Argument ignore with value (10,100) is not of type" in str( error)
def test_rgb_hsv_pipeline(): # First dataset transforms1 = [vision.Decode(), vision.ToTensor()] transforms1 = vision.ComposeOp(transforms1) ds1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) ds1 = ds1.map(input_columns=["image"], operations=transforms1()) # Second dataset transforms2 = [ vision.Decode(), vision.ToTensor(), vision.RgbToHsv(), vision.HsvToRgb() ] transform2 = vision.ComposeOp(transforms2) ds2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) ds2 = ds2.map(input_columns=["image"], operations=transform2()) num_iter = 0 for data1, data2 in zip(ds1.create_dict_iterator(), ds2.create_dict_iterator()): num_iter += 1 ori_img = data1["image"] cvt_img = data2["image"] assert_allclose(ori_img.flatten(), cvt_img.flatten(), rtol=1e-5, atol=0) assert (ori_img.shape == cvt_img.shape)
def test_random_crop_op_py(plot=False): """ Test RandomCrop op in py transforms """ logger.info("test_random_crop_op_py") # First dataset data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) transforms1 = [ py_vision.Decode(), py_vision.RandomCrop([512, 512], [200, 200, 200, 200]), py_vision.ToTensor() ] transform1 = py_vision.ComposeOp(transforms1) data1 = data1.map(input_columns=["image"], operations=transform1()) # Second dataset # Second dataset for comparison data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) transforms2 = [ py_vision.Decode(), py_vision.ToTensor() ] transform2 = py_vision.ComposeOp(transforms2) data2 = data2.map(input_columns=["image"], operations=transform2()) crop_images = [] original_images = [] for item1, item2 in zip(data1.create_dict_iterator(), data2.create_dict_iterator()): crop = (item1["image"].transpose(1, 2, 0) * 255).astype(np.uint8) original = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8) crop_images.append(crop) original_images.append(original) if plot: visualize(original_images, crop_images)
def test_ten_crop_invalid_size_error_msg(): """ Tests TenCrop error message when the size arg is not positive """ logger.info("test_ten_crop_invalid_size_error_msg") with pytest.raises(ValueError) as info: transforms = [ vision.Decode(), vision.TenCrop(0), lambda images: np.stack( [vision.ToTensor()(image) for image in images]) # 4D stack of 10 images ] error_msg = "Input is not within the required range" assert error_msg == str(info.value) with pytest.raises(ValueError) as info: transforms = [ vision.Decode(), vision.TenCrop(-10), lambda images: np.stack( [vision.ToTensor()(image) for image in images]) # 4D stack of 10 images ] assert error_msg == str(info.value)
def test_cpp_uniform_augment(plot=False, num_ops=2): """ Test UniformAugment """ logger.info("Test CPP UniformAugment") # Original Images ds = de.ImageFolderDatasetV2(dataset_dir=DATA_DIR, shuffle=False) transforms_original = [C.Decode(), C.Resize(size=[224, 224]), F.ToTensor()] ds_original = ds.map(input_columns="image", operations=transforms_original) ds_original = ds_original.batch(512) for idx, (image, _) in enumerate(ds_original): if idx == 0: images_original = np.transpose(image, (0, 2, 3, 1)) else: images_original = np.append(images_original, np.transpose(image, (0, 2, 3, 1)), axis=0) # UniformAugment Images ds = de.ImageFolderDatasetV2(dataset_dir=DATA_DIR, shuffle=False) transforms_ua = [C.RandomCrop(size=[224, 224], padding=[32, 32, 32, 32]), C.RandomHorizontalFlip(), C.RandomVerticalFlip(), C.RandomColorAdjust(), C.RandomRotation(degrees=45)] uni_aug = C.UniformAugment(operations=transforms_ua, num_ops=num_ops) transforms_all = [C.Decode(), C.Resize(size=[224, 224]), uni_aug, F.ToTensor()] ds_ua = ds.map(input_columns="image", operations=transforms_all, num_parallel_workers=1) ds_ua = ds_ua.batch(512) for idx, (image, _) in enumerate(ds_ua): if idx == 0: images_ua = np.transpose(image, (0, 2, 3, 1)) else: images_ua = np.append(images_ua, np.transpose(image, (0, 2, 3, 1)), axis=0) if plot: visualize_list(images_original, images_ua) num_samples = images_original.shape[0] mse = np.zeros(num_samples) for i in range(num_samples): mse[i] = diff_mse(images_ua[i], images_original[i]) logger.info("MSE= {}".format(str(np.mean(mse))))
def test_random_color(degrees=(0.1, 1.9), plot=False): """ Test RandomColor """ logger.info("Test RandomColor") # Original Images ds = de.ImageFolderDatasetV2(dataset_dir=DATA_DIR, shuffle=False) transforms_original = F.ComposeOp( [F.Decode(), F.Resize((224, 224)), F.ToTensor()]) ds_original = ds.map(input_columns="image", operations=transforms_original()) ds_original = ds_original.batch(512) for idx, (image, _) in enumerate(ds_original): if idx == 0: images_original = np.transpose(image, (0, 2, 3, 1)) else: images_original = np.append(images_original, np.transpose(image, (0, 2, 3, 1)), axis=0) # Random Color Adjusted Images ds = de.ImageFolderDatasetV2(dataset_dir=DATA_DIR, shuffle=False) transforms_random_color = F.ComposeOp([ F.Decode(), F.Resize((224, 224)), F.RandomColor(degrees=degrees), F.ToTensor() ]) ds_random_color = ds.map(input_columns="image", operations=transforms_random_color()) ds_random_color = ds_random_color.batch(512) for idx, (image, _) in enumerate(ds_random_color): if idx == 0: images_random_color = np.transpose(image, (0, 2, 3, 1)) else: images_random_color = np.append(images_random_color, np.transpose(image, (0, 2, 3, 1)), axis=0) num_samples = images_original.shape[0] mse = np.zeros(num_samples) for i in range(num_samples): mse[i] = np.mean((images_random_color[i] - images_original[i])**2) logger.info("MSE= {}".format(str(np.mean(mse)))) if plot: visualize(images_original, images_random_color)
def test_auto_contrast(plot=False): """ Test AutoContrast """ logger.info("Test AutoContrast") # Original Images ds = de.ImageFolderDatasetV2(dataset_dir=DATA_DIR, shuffle=False) transforms_original = F.ComposeOp( [F.Decode(), F.Resize((224, 224)), F.ToTensor()]) ds_original = ds.map(input_columns="image", operations=transforms_original()) ds_original = ds_original.batch(512) for idx, (image, _) in enumerate(ds_original): if idx == 0: images_original = np.transpose(image, (0, 2, 3, 1)) else: images_original = np.append(images_original, np.transpose(image, (0, 2, 3, 1)), axis=0) # AutoContrast Images ds = de.ImageFolderDatasetV2(dataset_dir=DATA_DIR, shuffle=False) transforms_auto_contrast = F.ComposeOp( [F.Decode(), F.Resize((224, 224)), F.AutoContrast(), F.ToTensor()]) ds_auto_contrast = ds.map(input_columns="image", operations=transforms_auto_contrast()) ds_auto_contrast = ds_auto_contrast.batch(512) for idx, (image, _) in enumerate(ds_auto_contrast): if idx == 0: images_auto_contrast = np.transpose(image, (0, 2, 3, 1)) else: images_auto_contrast = np.append(images_auto_contrast, np.transpose(image, (0, 2, 3, 1)), axis=0) num_samples = images_original.shape[0] mse = np.zeros(num_samples) for i in range(num_samples): mse[i] = np.mean((images_auto_contrast[i] - images_original[i])**2) logger.info("MSE= {}".format(str(np.mean(mse)))) if plot: visualize(images_original, images_auto_contrast)
def test_equalize_py(plot=False): """ Test Equalize py op """ logger.info("Test Equalize") # Original Images ds = de.ImageFolderDatasetV2(dataset_dir=DATA_DIR, shuffle=False) transforms_original = F.ComposeOp( [F.Decode(), F.Resize((224, 224)), F.ToTensor()]) ds_original = ds.map(input_columns="image", operations=transforms_original()) ds_original = ds_original.batch(512) for idx, (image, _) in enumerate(ds_original): if idx == 0: images_original = np.transpose(image, (0, 2, 3, 1)) else: images_original = np.append(images_original, np.transpose(image, (0, 2, 3, 1)), axis=0) # Color Equalized Images ds = de.ImageFolderDatasetV2(dataset_dir=DATA_DIR, shuffle=False) transforms_equalize = F.ComposeOp( [F.Decode(), F.Resize((224, 224)), F.Equalize(), F.ToTensor()]) ds_equalize = ds.map(input_columns="image", operations=transforms_equalize()) ds_equalize = ds_equalize.batch(512) for idx, (image, _) in enumerate(ds_equalize): if idx == 0: images_equalize = np.transpose(image, (0, 2, 3, 1)) else: images_equalize = np.append(images_equalize, np.transpose(image, (0, 2, 3, 1)), axis=0) num_samples = images_original.shape[0] mse = np.zeros(num_samples) for i in range(num_samples): mse[i] = diff_mse(images_equalize[i], images_original[i]) logger.info("MSE= {}".format(str(np.mean(mse)))) if plot: visualize_list(images_original, images_equalize)
def test_five_crop_op(plot=False): """ Test FiveCrop """ logger.info("test_five_crop") # First dataset data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) transforms_1 = [ vision.Decode(), vision.ToTensor(), ] transform_1 = vision.ComposeOp(transforms_1) data1 = data1.map(input_columns=["image"], operations=transform_1()) # Second dataset data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) transforms_2 = [ vision.Decode(), vision.FiveCrop(200), lambda images: np.stack([vision.ToTensor()(image) for image in images]) # 4D stack of 5 images ] transform_2 = vision.ComposeOp(transforms_2) data2 = data2.map(input_columns=["image"], operations=transform_2()) num_iter = 0 for item1, item2 in zip(data1.create_dict_iterator(), data2.create_dict_iterator()): num_iter += 1 image_1 = (item1["image"].transpose(1, 2, 0) * 255).astype(np.uint8) image_2 = item2["image"] logger.info("shape of image_1: {}".format(image_1.shape)) logger.info("shape of image_2: {}".format(image_2.shape)) logger.info("dtype of image_1: {}".format(image_1.dtype)) logger.info("dtype of image_2: {}".format(image_2.dtype)) if plot: visualize_list(np.array([image_1] * 5), (image_2 * 255).astype(np.uint8).transpose( 0, 2, 3, 1)) # The output data should be of a 4D tensor shape, a stack of 5 images. assert len(image_2.shape) == 4 assert image_2.shape[0] == 5
def util_test_ten_crop(crop_size, vertical_flip=False, plot=False): """ Utility function for testing TenCrop. Input arguments are given by other tests """ data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) transforms_1 = [ vision.Decode(), vision.ToTensor(), ] transform_1 = vision.ComposeOp(transforms_1) data1 = data1.map(input_columns=["image"], operations=transform_1()) # Second dataset data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) transforms_2 = [ vision.Decode(), vision.TenCrop(crop_size, use_vertical_flip=vertical_flip), lambda images: np.stack([vision.ToTensor()(image) for image in images]) # 4D stack of 10 images ] transform_2 = vision.ComposeOp(transforms_2) data2 = data2.map(input_columns=["image"], operations=transform_2()) num_iter = 0 for item1, item2 in zip(data1.create_dict_iterator(), data2.create_dict_iterator()): num_iter += 1 image_1 = (item1["image"].transpose(1, 2, 0) * 255).astype(np.uint8) image_2 = item2["image"] logger.info("shape of image_1: {}".format(image_1.shape)) logger.info("shape of image_2: {}".format(image_2.shape)) logger.info("dtype of image_1: {}".format(image_1.dtype)) logger.info("dtype of image_2: {}".format(image_2.dtype)) if plot: visualize_list(np.array([image_1] * 10), (image_2 * 255).astype(np.uint8).transpose( 0, 2, 3, 1)) # The output data should be of a 4D tensor shape, a stack of 10 images. assert len(image_2.shape) == 4 assert image_2.shape[0] == 10
def test_random_affine_md5(): """ Test RandomAffine with md5 comparison """ logger.info("test_random_affine_md5") original_seed = config_get_set_seed(55) original_num_parallel_workers = config_get_set_num_parallel_workers(1) # define map operations transforms = [ py_vision.Decode(), py_vision.RandomAffine(degrees=(-5, 15), translate=(0.1, 0.3), scale=(0.9, 1.1), shear=(-10, 10, -5, 5)), py_vision.ToTensor() ] transform = py_vision.ComposeOp(transforms) # Generate dataset data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data = data.map(input_columns=["image"], operations=transform()) # check results with md5 comparison filename = "random_affine_01_result.npz" save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN) # Restore configuration ds.config.set_seed(original_seed) ds.config.set_num_parallel_workers((original_num_parallel_workers))
def test_random_crop_08_py(): """ Test RandomCrop op with py_transforms: padding_mode is Border.EDGE, expected to pass """ logger.info("test_random_crop_08_py") original_seed = config_get_set_seed(0) original_num_parallel_workers = config_get_set_num_parallel_workers(1) # Generate dataset data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) # Note: The padding_mode is Border.EDGE. transforms = [ py_vision.Decode(), py_vision.RandomCrop(512, [200, 200, 200, 200], padding_mode=mode.Border.EDGE), py_vision.ToTensor() ] transform = py_vision.ComposeOp(transforms) data = data.map(input_columns=["image"], operations=transform()) filename = "random_crop_08_py_result.npz" save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN) # Restore config setting ds.config.set_seed(original_seed) ds.config.set_num_parallel_workers(original_num_parallel_workers)
def test_random_crop_05_py(): """ Test RandomCrop op with py_transforms: input image size < crop size but pad_if_needed is enabled, expected to pass """ logger.info("test_random_crop_05_py") original_seed = config_get_set_seed(0) original_num_parallel_workers = config_get_set_num_parallel_workers(1) # Generate dataset data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) # Note: The size of the image is 4032*2268 transforms = [ py_vision.Decode(), py_vision.RandomCrop([2268, 4033], [200, 200, 200, 200], pad_if_needed=True), py_vision.ToTensor() ] transform = py_vision.ComposeOp(transforms) data = data.map(input_columns=["image"], operations=transform()) filename = "random_crop_05_py_result.npz" save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN) # Restore config setting ds.config.set_seed(original_seed) ds.config.set_num_parallel_workers(original_num_parallel_workers)
def test_random_crop_04_py(): """ Test RandomCrop op with py_transforms: input image size < crop size, expected to fail """ logger.info("test_random_crop_04_py") # Generate dataset data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) # Note: The size of the image is 4032*2268 transforms = [ py_vision.Decode(), py_vision.RandomCrop([2268, 4033]), py_vision.ToTensor() ] transform = py_vision.ComposeOp(transforms) data = data.map(input_columns=["image"], operations=transform()) try: data.create_dict_iterator().get_next() except RuntimeError as e: logger.info("Got an exception in DE: {}".format(str(e))) assert "Crop size" in str(e)
def test_random_crop_02_py(): """ Test RandomCrop op with py_transforms: size is a list/tuple with length 2, expected to pass """ logger.info("test_random_crop_02_py") original_seed = config_get_set_seed(0) original_num_parallel_workers = config_get_set_num_parallel_workers(1) # Generate dataset data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) # Note: If size is a sequence of length 2, it should be (height, width). transforms = [ py_vision.Decode(), py_vision.RandomCrop([512, 375]), py_vision.ToTensor() ] transform = py_vision.ComposeOp(transforms) data = data.map(input_columns=["image"], operations=transform()) filename = "random_crop_02_py_result.npz" save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN) # Restore config setting ds.config.set_seed(original_seed) ds.config.set_num_parallel_workers(original_num_parallel_workers)
def test_crop_grayscale(height=375, width=375): """ Test that centercrop works with pad and grayscale images """ def channel_swap(image): """ Py func hack for our pytransforms to work with c transforms """ return (image.transpose(1, 2, 0) * 255).astype(np.uint8) transforms = [ py_vision.Decode(), py_vision.Grayscale(1), py_vision.ToTensor(), (lambda image: channel_swap(image)) ] transform = py_vision.ComposeOp(transforms) data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data1 = data1.map(input_columns=["image"], operations=transform()) # If input is grayscale, the output dimensions should be single channel crop_gray = vision.CenterCrop([height, width]) data1 = data1.map(input_columns=["image"], operations=crop_gray) for item1 in data1.create_dict_iterator(): c_image = item1["image"] # Check that the image is grayscale assert (c_image.ndim == 3 and c_image.shape[2] == 1)
def test_random_horizontal_invalid_prob_py(): """ Test RandomHorizontalFlip op in py_transforms: invalid input, expect to raise error """ logger.info("test_random_horizontal_invalid_prob_py") # Generate dataset data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) try: transforms = [ py_vision.Decode(), # Note: Valid range of prob should be [0.0, 1.0] py_vision.RandomHorizontalFlip(1.5), py_vision.ToTensor() ] transform = py_vision.ComposeOp(transforms) data = data.map(input_columns=["image"], operations=transform()) except ValueError as e: logger.info("Got an exception in DE: {}".format(str(e))) assert "Input prob is not within the required interval of (0.0 to 1.0)." in str( e)
def test_random_crop_06_py(): """ Test RandomCrop op with py_transforms: invalid size, expected to raise TypeError """ logger.info("test_random_crop_06_py") ds.config.set_seed(0) ds.config.set_num_parallel_workers(1) try: # Generate dataset data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) # Note: if size is neither an int nor a list of length 2, an exception will raise transforms = [ py_vision.Decode(), py_vision.RandomCrop([512, 512, 375]), py_vision.ToTensor() ] transform = py_vision.ComposeOp(transforms) data = data.map(input_columns=["image"], operations=transform()) image_list = [] for item in data.create_dict_iterator(): image = (item["image"].transpose(1, 2, 0) * 255).astype(np.uint8) image_list.append(image.shape) except TypeError as e: logger.info("Got an exception in DE: {}".format(str(e))) assert "Size" in str(e)
def test_random_crop_and_resize_05_py(): """ Test RandomCropAndResize with py_transforms: invalid range of ratio (max<min), expected to raise ValueError """ logger.info("test_random_crop_and_resize_05_py") ds.config.set_seed(0) ds.config.set_num_parallel_workers(1) try: # Generate dataset data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) transforms = [ py_vision.Decode(), # If input range of ratio is not in the order of (min, max), ValueError will be raised. py_vision.RandomResizedCrop((256, 512), (1, 1), (1, 0.5)), py_vision.ToTensor() ] transform = py_vision.ComposeOp(transforms) data = data.map(input_columns=["image"], operations=transform()) image_list = [] for item in data.create_dict_iterator(): image = item["image"] image_list.append(image.shape) except ValueError as e: logger.info("Got an exception in DE: {}".format(str(e))) assert "Input range is not valid" in str(e)
def test_random_crop_07_py(): """ Test RandomCrop op with py_transforms: padding_mode is Border.CONSTANT and fill_value is 255 (White), expected to pass """ logger.info("test_random_crop_07_py") ds.config.set_seed(0) ds.config.set_num_parallel_workers(1) # Generate dataset data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) # Note: The padding_mode is default as Border.CONSTANT and set filling color to be white. transforms = [ py_vision.Decode(), py_vision.RandomCrop(512, [200, 200, 200, 200], fill_value=(255, 255, 255)), py_vision.ToTensor() ] transform = py_vision.ComposeOp(transforms) data = data.map(input_columns=["image"], operations=transform()) filename = "random_crop_07_py_result.npz" save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN)
def test_random_sharpness_md5(): """ Test RandomSharpness with md5 comparison """ logger.info("Test RandomSharpness with md5 comparison") original_seed = config_get_set_seed(5) original_num_parallel_workers = config_get_set_num_parallel_workers(1) # define map operations transforms = [ F.Decode(), F.RandomSharpness((0.1, 1.9)), F.ToTensor() ] transform = F.ComposeOp(transforms) # Generate dataset data = de.ImageFolderDatasetV2(dataset_dir=DATA_DIR, shuffle=False) data = data.map(input_columns=["image"], operations=transform()) # check results with md5 comparison filename = "random_sharpness_01_result.npz" save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN) # Restore configuration ds.config.set_seed(original_seed) ds.config.set_num_parallel_workers(original_num_parallel_workers)
def test_random_choice_exception_random_crop_badinput(): """ Test RandomChoice: hit error in RandomCrop with greater crop size, expected to raise error """ logger.info("test_random_choice_exception_random_crop_badinput") # define map operations # note: crop size[5000, 5000] > image size[4032, 2268] transforms_list = [py_vision.RandomCrop(5000)] transforms = [ py_vision.Decode(), py_vision.RandomChoice(transforms_list), py_vision.ToTensor() ] transform = py_vision.ComposeOp(transforms) # Generate dataset data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data = data.map(input_columns=["image"], operations=transform()) try: _ = data.create_dict_iterator().get_next() except RuntimeError as e: logger.info("Got an exception in DE: {}".format(str(e))) assert "Crop size" in str(e)
def test_random_horizontal_valid_prob_py(): """ Test RandomHorizontalFlip op with py_transforms: valid non-default input, expect to pass """ logger.info("test_random_horizontal_valid_prob_py") original_seed = config_get_set_seed(0) original_num_parallel_workers = config_get_set_num_parallel_workers(1) # Generate dataset data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) transforms = [ py_vision.Decode(), py_vision.RandomHorizontalFlip(0.8), py_vision.ToTensor() ] transform = py_vision.ComposeOp(transforms) data = data.map(input_columns=["image"], operations=transform()) filename = "random_horizontal_01_py_result.npz" save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN) # Restore config setting ds.config.set_seed(original_seed) ds.config.set_num_parallel_workers(original_num_parallel_workers)
def skip_test_random_perspective_md5(): """ Test RandomPerspective with md5 comparison """ logger.info("test_random_perspective_md5") original_seed = config_get_set_seed(5) original_num_parallel_workers = config_get_set_num_parallel_workers(1) # define map operations transforms = [ py_vision.Decode(), py_vision.RandomPerspective(distortion_scale=0.3, prob=0.7, interpolation=Inter.BILINEAR), py_vision.ToTensor() ] transform = py_vision.ComposeOp(transforms) # Generate dataset data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data = data.map(input_columns=["image"], operations=transform()) # check results with md5 comparison filename = "random_perspective_01_result.npz" save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN) # Restore configuration ds.config.set_seed(original_seed) ds.config.set_num_parallel_workers((original_num_parallel_workers))
def test_random_crop_01_py(): """ Test RandomCrop op with py_transforms: size is a single integer, expected to pass """ logger.info("test_random_crop_01_py") original_seed = config_get_set_seed(0) original_num_parallel_workers = config_get_set_num_parallel_workers(1) # Generate dataset data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) # Note: If size is an int, a square crop of size (size, size) is returned. transforms = [ py_vision.Decode(), py_vision.RandomCrop(512), py_vision.ToTensor() ] transform = py_vision.ComposeOp(transforms) data = data.map(input_columns=["image"], operations=transform()) filename = "random_crop_01_py_result.npz" save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN) # Restore config setting ds.config.set_seed(original_seed) ds.config.set_num_parallel_workers(original_num_parallel_workers)
def test_random_crop_09(): """ Test RandomCrop op: invalid type of input image (not PIL), expected to raise TypeError """ logger.info("test_random_crop_09") ds.config.set_seed(0) ds.config.set_num_parallel_workers(1) # Generate dataset data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) transforms = [ py_vision.Decode(), py_vision.ToTensor(), # Note: if input is not PIL image, TypeError will raise py_vision.RandomCrop(512) ] transform = py_vision.ComposeOp(transforms) try: data = data.map(input_columns=["image"], operations=transform()) image_list = [] for item in data.create_dict_iterator(): image = item["image"] image_list.append(image.shape) except Exception as e: logger.info("Got an exception in DE: {}".format(str(e))) assert "should be PIL Image" in str(e)