Ejemplo n.º 1
0
def test_random_crop_and_resize_op_py(plot=False):
    """
    Test RandomCropAndResize op in py transforms
    """
    logger.info("test_random_crop_and_resize_op_py")
    # First dataset
    data1 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    # With these inputs we expect the code to crop the whole image
    transforms1 = [
        py_vision.Decode(),
        py_vision.RandomResizedCrop((256, 512), (2, 2), (1, 3)),
        py_vision.ToTensor()
    ]
    transform1 = py_vision.ComposeOp(transforms1)
    data1 = data1.map(input_columns=["image"], operations=transform1())
    # Second dataset
    # Second dataset for comparison
    data2 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    transforms2 = [py_vision.Decode(), py_vision.ToTensor()]
    transform2 = py_vision.ComposeOp(transforms2)
    data2 = data2.map(input_columns=["image"], operations=transform2())
    num_iter = 0
    crop_and_resize_images = []
    original_images = []
    for item1, item2 in zip(data1.create_dict_iterator(),
                            data2.create_dict_iterator()):
        crop_and_resize = (item1["image"].transpose(1, 2, 0) * 255).astype(
            np.uint8)
        original = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
        original = cv2.resize(original, (512, 256))
        mse = diff_mse(crop_and_resize, original)
        # Due to rounding error the mse for Python is not exactly 0
        assert mse <= 0.05
        logger.info("random_crop_and_resize_op_{}, mse: {}".format(
            num_iter + 1, mse))
        num_iter += 1
        crop_and_resize_images.append(crop_and_resize)
        original_images.append(original)
    if plot:
        visualize_list(original_images, crop_and_resize_images)
def test_random_color_adjust_op_brightness(plot=False):
    """
    Test RandomColorAdjust op
    """
    logger.info("test_random_color_adjust_op")

    # First dataset
    data1 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    decode_op = c_vision.Decode()

    random_adjust_op = c_vision.RandomColorAdjust((0.8, 0.8), (1, 1), (1, 1),
                                                  (0, 0))

    ctrans = [
        decode_op,
        random_adjust_op,
    ]

    data1 = data1.map(input_columns=["image"], operations=ctrans)

    # Second dataset
    transforms = [
        py_vision.Decode(),
        py_vision.RandomColorAdjust((0.8, 0.8), (1, 1), (1, 1), (0, 0)),
        py_vision.ToTensor(),
    ]
    transform = py_vision.ComposeOp(transforms)
    data2 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    data2 = data2.map(input_columns=["image"], operations=transform())

    num_iter = 0
    for item1, item2 in zip(data1.create_dict_iterator(),
                            data2.create_dict_iterator()):
        num_iter += 1
        c_image = item1["image"]
        py_image = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)

        logger.info("shape of c_image: {}".format(c_image.shape))
        logger.info("shape of py_image: {}".format(py_image.shape))

        logger.info("dtype of c_image: {}".format(c_image.dtype))
        logger.info("dtype of py_image: {}".format(py_image.dtype))

        mse = diff_mse(c_image, py_image)
        logger.info("mse is {}".format(mse))

        logger.info("random_rotation_op_{}, mse: {}".format(num_iter + 1, mse))
        assert mse < 0.01
        # if mse != 0:
        #     logger.info("mse is: {}".format(mse))
        if plot:
            visualize(c_image, mse, py_image)
Ejemplo n.º 3
0
def test_random_color_jitter_op_hue():
    """
    Test RandomColorAdjust op
    """
    logger.info("test_random_color_jitter_op")

    # First dataset
    data1 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    decode_op = c_vision.Decode()
    # channel_swap_op = c_vision.ChannelSwap()
    # change_mode_op = c_vision.ChangeMode()

    random_jitter_op = c_vision.RandomColorAdjust((1, 1), (1, 1), (1, 1),
                                                  (0.2, 0.2))

    ctrans = [
        decode_op,
        random_jitter_op,
    ]

    data1 = data1.map(input_columns=["image"], operations=ctrans)

    # Second dataset
    transforms = [
        py_vision.Decode(),
        py_vision.RandomColorAdjust((1, 1), (1, 1), (1, 1), (0.2, 0.2)),
        py_vision.ToTensor(),
    ]
    transform = py_vision.ComposeOp(transforms)
    data2 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    data2 = data2.map(input_columns=["image"], operations=transform())

    num_iter = 0
    for item1, item2 in zip(data1.create_dict_iterator(),
                            data2.create_dict_iterator()):
        num_iter += 1
        c_image = item1["image"]
        py_image = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)

        # logger.info("shape of img: {}".format(img.shape))
        logger.info("shape of c_image: {}".format(c_image.shape))
        logger.info("shape of py_image: {}".format(py_image.shape))

        logger.info("dtype of c_image: {}".format(c_image.dtype))
        logger.info("dtype of py_image: {}".format(py_image.dtype))
        # logger.info("dtype of img: {}".format(img.dtype))

        diff = c_image - py_image
        # mse = (np.sum(np.power(diff, 2))) / (c_image.shape[0] * c_image.shape[1])
        mse = diff_mse(c_image, py_image)
        logger.info("mse is {}".format(mse))
        assert mse < 0.01
Ejemplo n.º 4
0
def test_random_grayscale_input_grayscale_images():
    """
    Test RandomGrayscale Op: valid parameter with grayscale images as input, expect to pass
    """
    logger.info("test_random_grayscale_input_grayscale_images")
    original_seed = config_get_set_seed(0)
    original_num_parallel_workers = config_get_set_num_parallel_workers(1)

    # First dataset
    data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
    transforms1 = [
        py_vision.Decode(),
        py_vision.Grayscale(1),
        # Note: If the input images is grayscale image with 1 channel.
        py_vision.RandomGrayscale(0.5),
        py_vision.ToTensor()
    ]
    transform1 = py_vision.ComposeOp(transforms1)
    data1 = data1.map(input_columns=["image"], operations=transform1())

    # Second dataset
    data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
    transforms2 = [
        py_vision.Decode(),
        py_vision.ToTensor()
    ]
    transform2 = py_vision.ComposeOp(transforms2)
    data2 = data2.map(input_columns=["image"], operations=transform2())

    image_gray = []
    image = []
    for item1, item2 in zip(data1.create_dict_iterator(), data2.create_dict_iterator()):
        image1 = (item1["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
        image2 = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
        image_gray.append(image1)
        image.append(image2)

        assert len(image1.shape) == 3
        assert image1.shape[2] == 1
        assert len(image2.shape) == 3
        assert image2.shape[2] == 3

    # Restore config
    ds.config.set_seed(original_seed)
    ds.config.set_num_parallel_workers(original_num_parallel_workers)
Ejemplo n.º 5
0
def test_deterministic_python_seed_multi_thread():
    """
    Test deterministic execution with seed in python, this fails with multi-thread pyfunc run
    """
    logger.info("test_deterministic_python_seed_multi_thread")

    # Save original configuration values
    num_parallel_workers_original = ds.config.get_num_parallel_workers()
    seed_original = ds.config.get_seed()
    ds.config.set_num_parallel_workers(3)
    ds.config.set_seed(0)
    # when we set the seed all operations within our dataset should be deterministic
    # First dataset
    data1 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    transforms = [
        py_vision.Decode(),
        py_vision.RandomCrop([512, 512], [200, 200, 200, 200]),
        py_vision.ToTensor(),
    ]
    transform = py_vision.ComposeOp(transforms)
    data1 = data1.map(input_columns=["image"],
                      operations=transform(),
                      python_multiprocessing=True)
    data1_output = []
    # config.set_seed() calls random.seed()
    for data_one in data1.create_dict_iterator():
        data1_output.append(data_one["image"])

    # Second dataset
    data2 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    # If seed is set up on constructor
    data2 = data2.map(input_columns=["image"],
                      operations=transform(),
                      python_multiprocessing=True)
    # config.set_seed() calls random.seed()
    ds.config.set_seed(0)

    data2_output = []
    for data_two in data2.create_dict_iterator():
        data2_output.append(data_two["image"])

    try:
        np.testing.assert_equal(data1_output, data2_output)
    except Exception as e:
        # expect output to not match during multi-threaded excution
        logger.info("Got an exception in DE: {}".format(str(e)))
        assert "Array" in str(e)

    # Restore original configuration values
    ds.config.set_num_parallel_workers(num_parallel_workers_original)
    ds.config.set_seed(seed_original)
Ejemplo n.º 6
0
def test_random_erasing_op(plot=False):
    """
    Test RandomErasing and Cutout
    """
    logger.info("test_random_erasing")

    # First dataset
    data1 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    transforms_1 = [
        vision.Decode(),
        vision.ToTensor(),
        vision.RandomErasing(value='random')
    ]
    transform_1 = vision.ComposeOp(transforms_1)
    data1 = data1.map(input_columns=["image"], operations=transform_1())

    # Second dataset
    data2 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    transforms_2 = [vision.Decode(), vision.ToTensor(), vision.Cutout(80)]
    transform_2 = vision.ComposeOp(transforms_2)
    data2 = data2.map(input_columns=["image"], operations=transform_2())

    num_iter = 0
    for item1, item2 in zip(data1.create_dict_iterator(),
                            data2.create_dict_iterator()):
        num_iter += 1
        image_1 = (item1["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
        image_2 = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)

        logger.info("shape of image_1: {}".format(image_1.shape))
        logger.info("shape of image_2: {}".format(image_2.shape))

        logger.info("dtype of image_1: {}".format(image_1.dtype))
        logger.info("dtype of image_2: {}".format(image_2.dtype))

        mse = diff_mse(image_1, image_2)
        if plot:
            visualize_image(image_1, image_2, mse)
Ejemplo n.º 7
0
def test_random_affine_op(plot=False):
    """
    Test RandomAffine in python transformations
    """
    logger.info("test_random_affine_op")
    # define map operations
    transforms1 = [
        py_vision.Decode(),
        py_vision.RandomAffine(degrees=15,
                               translate=(0.1, 0.1),
                               scale=(0.9, 1.1)),
        py_vision.ToTensor()
    ]
    transform1 = py_vision.ComposeOp(transforms1)

    transforms2 = [py_vision.Decode(), py_vision.ToTensor()]
    transform2 = py_vision.ComposeOp(transforms2)

    #  First dataset
    data1 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    data1 = data1.map(input_columns=["image"], operations=transform1())
    #  Second dataset
    data2 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    data2 = data2.map(input_columns=["image"], operations=transform2())

    image_affine = []
    image_original = []
    for item1, item2 in zip(data1.create_dict_iterator(),
                            data2.create_dict_iterator()):
        image1 = (item1["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
        image2 = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
        image_affine.append(image1)
        image_original.append(image2)
    if plot:
        visualize_list(image_original, image_affine)
Ejemplo n.º 8
0
def test_random_rotation_op_py(plot=False):
    """
    Test RandomRotation in python transformations op
    """
    logger.info("test_random_rotation_op_py")

    # First dataset
    data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False)
    # use [90, 90] to force rotate 90 degrees, expand is set to be True to match output size
    transform1 = py_vision.ComposeOp([
        py_vision.Decode(),
        py_vision.RandomRotation((90, 90), expand=True),
        py_vision.ToTensor()
    ])
    data1 = data1.map(input_columns=["image"], operations=transform1())

    # Second dataset
    data2 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    transform2 = py_vision.ComposeOp(
        [py_vision.Decode(), py_vision.ToTensor()])
    data2 = data2.map(input_columns=["image"], operations=transform2())

    num_iter = 0
    for item1, item2 in zip(data1.create_dict_iterator(),
                            data2.create_dict_iterator()):
        if num_iter > 0:
            break
        rotation_de = (item1["image"].transpose(1, 2, 0) * 255).astype(
            np.uint8)
        original = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
        logger.info("shape before rotate: {}".format(original.shape))
        rotation_cv = cv2.rotate(original, cv2.ROTATE_90_COUNTERCLOCKWISE)
        mse = diff_mse(rotation_de, rotation_cv)
        logger.info("random_rotation_op_{}, mse: {}".format(num_iter + 1, mse))
        assert mse == 0
        num_iter += 1
        if plot:
            visualize_image(original, rotation_de, mse, rotation_cv)
Ejemplo n.º 9
0
def test_linear_transformation_op(plot=False):
    """
    Test LinearTransformation op: verify if images transform correctly
    """
    logger.info("test_linear_transformation_01")

    # Initialize parameters
    height = 50
    weight = 50
    dim = 3 * height * weight
    transformation_matrix = np.eye(dim)
    mean_vector = np.zeros(dim)

    # Define operations
    transforms = [
        py_vision.Decode(),
        py_vision.CenterCrop([height, weight]),
        py_vision.ToTensor()
    ]
    transform = py_vision.ComposeOp(transforms)

    # First dataset
    data1 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    data1 = data1.map(input_columns=["image"], operations=transform())
    # Note: if transformation matrix is diagonal matrix with all 1 in diagonal,
    #       the output matrix in expected to be the same as the input matrix.
    data1 = data1.map(input_columns=["image"],
                      operations=py_vision.LinearTransformation(
                          transformation_matrix, mean_vector))

    # Second dataset
    data2 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    data2 = data2.map(input_columns=["image"], operations=transform())

    image_transformed = []
    image = []
    for item1, item2 in zip(data1.create_dict_iterator(),
                            data2.create_dict_iterator()):
        image1 = (item1["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
        image2 = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
        image_transformed.append(image1)
        image.append(image2)

        mse = diff_mse(image1, image2)
        assert mse == 0
    if plot:
        visualize_list(image, image_transformed)
Ejemplo n.º 10
0
def test_random_apply_op(plot=False):
    """
    Test RandomApply in python transformations
    """
    logger.info("test_random_apply_op")
    # define map operations
    transforms_list = [py_vision.CenterCrop(64), py_vision.RandomRotation(30)]
    transforms1 = [
        py_vision.Decode(),
        py_vision.RandomApply(transforms_list, prob=0.6),
        py_vision.ToTensor()
    ]
    transform1 = py_vision.ComposeOp(transforms1)

    transforms2 = [py_vision.Decode(), py_vision.ToTensor()]
    transform2 = py_vision.ComposeOp(transforms2)

    #  First dataset
    data1 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    data1 = data1.map(input_columns=["image"], operations=transform1())
    #  Second dataset
    data2 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    data2 = data2.map(input_columns=["image"], operations=transform2())

    image_apply = []
    image_original = []
    for item1, item2 in zip(data1.create_dict_iterator(),
                            data2.create_dict_iterator()):
        image1 = (item1["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
        image2 = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
        image_apply.append(image1)
        image_original.append(image2)
    if plot:
        visualize(image_original, image_apply)
Ejemplo n.º 11
0
def test_pad_grayscale():
    """
    Tests that the pad works for grayscale images 
    """
    def channel_swap(image):
        """
        Py func hack for our pytransforms to work with c transforms
        """
        return (image.transpose(1, 2, 0) * 255).astype(np.uint8)

    transforms = [
        py_vision.Decode(),
        py_vision.Grayscale(1),
        py_vision.ToTensor(), (lambda image: channel_swap(image))
    ]

    transform = py_vision.ComposeOp(transforms)
    data1 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    data1 = data1.map(input_columns=["image"], operations=transform())

    # if input is grayscale, the output dimensions should be single channel
    pad_gray = c_vision.Pad(100, fill_value=(20, 20, 20))
    data1 = data1.map(input_columns=["image"], operations=pad_gray)
    dataset_shape_1 = []
    for item1 in data1.create_dict_iterator():
        c_image = item1["image"]
        dataset_shape_1.append(c_image.shape)

    # Dataset for comparison
    data2 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    decode_op = c_vision.Decode()

    # we use the same padding logic
    ctrans = [decode_op, pad_gray]
    dataset_shape_2 = []

    data2 = data2.map(input_columns=["image"], operations=ctrans)

    for item2 in data2.create_dict_iterator():
        c_image = item2["image"]
        dataset_shape_2.append(c_image.shape)

    for shape1, shape2 in zip(dataset_shape_1, dataset_shape_2):
        # validate that the first two dimensions are the same
        # we have a little inconsistency here because the third dimension is 1 after py_vision.Grayscale
        assert (shape1[0:1] == shape2[0:1])
Ejemplo n.º 12
0
def test_to_type_op():
    """
    Test ToType Op
    """
    logger.info("test_to_type_op")

    # First dataset
    data1 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    transforms1 = [
        py_vision.Decode(),
        py_vision.ToTensor(),
        # Note: Convert the datatype from float32 to int16
        py_vision.ToType(np.int16)
    ]
    transform1 = py_vision.ComposeOp(transforms1)
    data1 = data1.map(input_columns=["image"], operations=transform1())

    # Second dataset
    data2 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    transforms2 = [py_vision.Decode(), py_vision.ToTensor()]
    transform2 = py_vision.ComposeOp(transforms2)
    data2 = data2.map(input_columns=["image"], operations=transform2())

    for item1, item2 in zip(data1.create_dict_iterator(),
                            data2.create_dict_iterator()):
        image1 = item1["image"]
        image2 = item2["image"]

        assert isinstance(image1, np.ndarray)
        assert isinstance(image2, np.ndarray)
        assert image1.dtype == np.int16
        assert image2.dtype == np.float32
        assert image1.shape == image2.shape
Ejemplo n.º 13
0
def test_pad_op():
    """
    Test Pad op
    """
    logger.info("test_random_color_jitter_op")

    # First dataset
    data1 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    decode_op = c_vision.Decode()

    pad_op = c_vision.Pad((100, 100, 100, 100))
    ctrans = [
        decode_op,
        pad_op,
    ]

    data1 = data1.map(input_columns=["image"], operations=ctrans)

    # Second dataset
    transforms = [
        py_vision.Decode(),
        py_vision.Pad(100),
        py_vision.ToTensor(),
    ]
    transform = py_vision.ComposeOp(transforms)
    data2 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    data2 = data2.map(input_columns=["image"], operations=transform())

    num_iter = 0
    for item1, item2 in zip(data1.create_dict_iterator(),
                            data2.create_dict_iterator()):
        num_iter += 1
        c_image = item1["image"]
        py_image = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)

        logger.info("shape of c_image: {}".format(c_image.shape))
        logger.info("shape of py_image: {}".format(py_image.shape))

        logger.info("dtype of c_image: {}".format(c_image.dtype))
        logger.info("dtype of py_image: {}".format(py_image.dtype))

        diff = c_image - py_image
        mse = diff_mse(c_image, py_image)
        logger.info("mse is {}".format(mse))
        assert mse < 0.01
Ejemplo n.º 14
0
def test_rotation_diff(plot=False):
    """
    Test RandomRotation op
    """
    logger.info("test_random_rotation_op")

    # First dataset
    data1 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    decode_op = c_vision.Decode()

    rotation_op = c_vision.RandomRotation((45, 45))
    ctrans = [decode_op, rotation_op]

    data1 = data1.map(input_columns=["image"], operations=ctrans)

    # Second dataset
    transforms = [
        py_vision.Decode(),
        py_vision.RandomRotation((45, 45)),
        py_vision.ToTensor(),
    ]
    transform = py_vision.ComposeOp(transforms)
    data2 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    data2 = data2.map(input_columns=["image"], operations=transform())

    num_iter = 0
    image_list_c, image_list_py = [], []
    for item1, item2 in zip(data1.create_dict_iterator(),
                            data2.create_dict_iterator()):
        num_iter += 1
        c_image = item1["image"]
        py_image = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
        image_list_c.append(c_image)
        image_list_py.append(py_image)

        logger.info("shape of c_image: {}".format(c_image.shape))
        logger.info("shape of py_image: {}".format(py_image.shape))

        logger.info("dtype of c_image: {}".format(c_image.dtype))
        logger.info("dtype of py_image: {}".format(py_image.dtype))

        mse = diff_mse(c_image, py_image)
        assert mse < 0.001  # Rounding error
    if plot:
        visualize_list(image_list_c, image_list_py, visualize_mode=2)
def create_imagenet_dataset(imagenet_dir):
    ds = de.ImageFolderDatasetV2(imagenet_dir)

    transform = F.ComposeOp([
        F.Decode(),
        F.RandomHorizontalFlip(0.5),
        F.ToTensor(),
        F.Normalize((0.491, 0.482, 0.447), (0.247, 0.243, 0.262)),
        F.RandomErasing()
    ])
    ds = ds.map(input_columns="image", operations=transform())
    ds = ds.shuffle(buffer_size=5)
    ds = ds.repeat(3)
    return ds
Ejemplo n.º 16
0
def test_equalize_md5_py():
    """
    Test Equalize py op with md5 check
    """
    logger.info("Test Equalize")

    # First dataset
    data1 = de.ImageFolderDatasetV2(dataset_dir=DATA_DIR, shuffle=False)
    transforms = F.ComposeOp([F.Decode(), F.Equalize(), F.ToTensor()])

    data1 = data1.map(input_columns="image", operations=transforms())
    # Compare with expected md5 from images
    filename = "equalize_01_result.npz"
    save_and_check_md5(data1, filename, generate_golden=GENERATE_GOLDEN)
Ejemplo n.º 17
0
def test_random_crop_op_py(plot=False):
    """
    Test RandomCrop op in py transforms
    """
    logger.info("test_random_crop_op_py")
    # First dataset
    data1 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    transforms1 = [
        py_vision.Decode(),
        py_vision.RandomCrop([512, 512], [200, 200, 200, 200]),
        py_vision.ToTensor()
    ]
    transform1 = py_vision.ComposeOp(transforms1)
    data1 = data1.map(input_columns=["image"], operations=transform1())
    # Second dataset
    # Second dataset for comparison
    data2 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    transforms2 = [py_vision.Decode(), py_vision.ToTensor()]
    transform2 = py_vision.ComposeOp(transforms2)
    data2 = data2.map(input_columns=["image"], operations=transform2())

    crop_images = []
    original_images = []
    for item1, item2 in zip(data1.create_dict_iterator(),
                            data2.create_dict_iterator()):
        crop = (item1["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
        original = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
        crop_images.append(crop)
        original_images.append(original)
    if plot:
        visualize(original_images, crop_images)
Ejemplo n.º 18
0
def test_deterministic_python_seed():
    """
    Test deterministic execution with seed in python
    """
    logger.info("test_deterministic_python_seed")

    # Save original configuration values
    num_parallel_workers_original = ds.config.get_num_parallel_workers()
    seed_original = ds.config.get_seed()

    ds.config.set_seed(0)
    ds.config.set_num_parallel_workers(1)

    # First dataset
    data1 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)

    transforms = [
        py_vision.Decode(),
        py_vision.RandomCrop([512, 512], [200, 200, 200, 200]),
        py_vision.ToTensor(),
    ]
    transform = py_vision.ComposeOp(transforms)
    data1 = data1.map(input_columns=["image"], operations=transform())
    data1_output = []
    # config.set_seed() calls random.seed()
    for data_one in data1.create_dict_iterator():
        data1_output.append(data_one["image"])

    # Second dataset
    data2 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    data2 = data2.map(input_columns=["image"], operations=transform())
    # config.set_seed() calls random.seed(), resets seed for next dataset iterator
    ds.config.set_seed(0)

    data2_output = []
    for data_two in data2.create_dict_iterator():
        data2_output.append(data_two["image"])

    np.testing.assert_equal(data1_output, data2_output)

    # Restore original configuration values
    ds.config.set_num_parallel_workers(num_parallel_workers_original)
    ds.config.set_seed(seed_original)
Ejemplo n.º 19
0
def test_cut_out_op_multicut(plot=False):
    """
    Test Cutout
    """
    logger.info("test_cut_out")

    # First dataset
    data1 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)

    transforms_1 = [
        f.Decode(),
        f.ToTensor(),
    ]
    transform_1 = f.ComposeOp(transforms_1)
    data1 = data1.map(input_columns=["image"], operations=transform_1())

    # Second dataset
    data2 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    decode_op = c.Decode()
    cut_out_op = c.CutOut(80, num_patches=10)

    transforms_2 = [decode_op, cut_out_op]

    data2 = data2.map(input_columns=["image"], operations=transforms_2)

    num_iter = 0
    image_list_1, image_list_2 = [], []
    for item1, item2 in zip(data1.create_dict_iterator(),
                            data2.create_dict_iterator()):
        num_iter += 1
        image_1 = (item1["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
        # C image doesn't require transpose
        image_2 = item2["image"]
        image_list_1.append(image_1)
        image_list_2.append(image_2)

        logger.info("shape of image_1: {}".format(image_1.shape))
        logger.info("shape of image_2: {}".format(image_2.shape))

        logger.info("dtype of image_1: {}".format(image_1.dtype))
        logger.info("dtype of image_2: {}".format(image_2.dtype))
    if plot:
        visualize_list(image_list_1, image_list_2)
Ejemplo n.º 20
0
def util_test_normalize_grayscale(num_output_channels, mean, std):
    """
    Utility function for testing Normalize. Input arguments are given by other tests
    """
    transforms = [
        py_vision.Decode(),
        py_vision.Grayscale(num_output_channels),
        py_vision.ToTensor(),
        py_vision.Normalize(mean, std)
    ]
    transform = py_vision.ComposeOp(transforms)
    # Generate dataset
    data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
    data = data.map(input_columns=["image"], operations=transform())
    return data
Ejemplo n.º 21
0
def test_rgb_hsv_pipeline():
    # First dataset
    transforms1 = [vision.Decode(), vision.Resize([64, 64]), vision.ToTensor()]
    transforms1 = vision.ComposeOp(transforms1)
    ds1 = ds.TFRecordDataset(DATA_DIR,
                             SCHEMA_DIR,
                             columns_list=["image"],
                             shuffle=False)
    ds1 = ds1.map(input_columns=["image"], operations=transforms1())

    # Second dataset
    transforms2 = [
        vision.Decode(),
        vision.Resize([64, 64]),
        vision.ToTensor(),
        vision.RgbToHsv(),
        vision.HsvToRgb()
    ]
    transform2 = vision.ComposeOp(transforms2)
    ds2 = ds.TFRecordDataset(DATA_DIR,
                             SCHEMA_DIR,
                             columns_list=["image"],
                             shuffle=False)
    ds2 = ds2.map(input_columns=["image"], operations=transform2())

    num_iter = 0
    for data1, data2 in zip(ds1.create_dict_iterator(),
                            ds2.create_dict_iterator()):
        num_iter += 1
        ori_img = data1["image"]
        cvt_img = data2["image"]
        assert_allclose(ori_img.flatten(),
                        cvt_img.flatten(),
                        rtol=1e-5,
                        atol=0)
        assert (ori_img.shape == cvt_img.shape)
Ejemplo n.º 22
0
def test_invert_md5_py():
    """
    Test Invert python op with md5 check
    """
    logger.info("Test Invert python op with md5 check")

    # Generate dataset
    ds = de.ImageFolderDatasetV2(dataset_dir=DATA_DIR, shuffle=False)

    transforms_invert = F.ComposeOp([F.Decode(), F.Invert(), F.ToTensor()])

    data = ds.map(input_columns="image", operations=transforms_invert())
    # Compare with expected md5 from images
    filename = "invert_01_result_py.npz"
    save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN)
def test_random_color_adjust_op_contrast():
    """
    Test RandomColorAdjust op
    """
    logger.info("test_random_color_adjust_op")

    # First dataset
    data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
    decode_op = c_vision.Decode()

    random_adjust_op = c_vision.RandomColorAdjust((1, 1), (0.5, 0.5), (1, 1), (0, 0))

    ctrans = [decode_op,
              random_adjust_op
              ]

    data1 = data1.map(input_columns=["image"], operations=ctrans)

    # Second dataset
    transforms = [
        py_vision.Decode(),
        py_vision.RandomColorAdjust((1, 1), (0.5, 0.5), (1, 1), (0, 0)),
        py_vision.ToTensor(),
    ]
    transform = py_vision.ComposeOp(transforms)
    data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
    data2 = data2.map(input_columns=["image"], operations=transform())

    num_iter = 0
    for item1, item2 in zip(data1.create_dict_iterator(), data2.create_dict_iterator()):
        num_iter += 1
        c_image = item1["image"]
        py_image = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)

        logger.info("shape of c_image: {}".format(c_image.shape))
        logger.info("shape of py_image: {}".format(py_image.shape))

        logger.info("dtype of c_image: {}".format(c_image.dtype))
        logger.info("dtype of py_image: {}".format(py_image.dtype))

        diff = c_image - py_image
        logger.info("contrast difference c is : {}".format(c_image[0][0]))
        logger.info("contrast difference  py is : {}".format(py_image[0][0]))

        logger.info("contrast difference is : {}".format(diff[0][0]))
        # mse = (np.sum(np.power(diff, 2))) / (c_image.shape[0] * c_image.shape[1])
        mse = diff_mse(c_image, py_image)
        logger.info("mse is {}".format(mse))
Ejemplo n.º 24
0
def test_ten_crop_list_size_error_msg():
    """
    Tests TenCrop error message when the size arg has more than 2 elements
    """
    logger.info("test_ten_crop_list_size_error_msg")

    with pytest.raises(TypeError) as info:
        transforms = [
            vision.Decode(),
            vision.TenCrop([200, 200, 200]),
            lambda images: np.stack(
                [vision.ToTensor()(image)
                 for image in images])  # 4D stack of 10 images
        ]
    error_msg = "Size should be a single integer or a list/tuple (h, w) of length 2."
    assert error_msg == str(info.value)
Ejemplo n.º 25
0
def test_five_crop_error_msg():
    """
    Test FiveCrop error message.
    """
    logger.info("test_five_crop_error_msg")

    data = ds.TFRecordDataset(DATA_DIR,
                              SCHEMA_DIR,
                              columns_list=["image"],
                              shuffle=False)
    transforms = [vision.Decode(), vision.FiveCrop(200), vision.ToTensor()]
    transform = vision.ComposeOp(transforms)
    data = data.map(input_columns=["image"], operations=transform())

    with pytest.raises(RuntimeError):
        data.create_tuple_iterator().__next__()
Ejemplo n.º 26
0
def test_HWC2CHW_comp(plot=False):
    """
    Test HWC2CHW between python and c image augmentation
    """
    logger.info("Test HWC2CHW with c_transform and py_transform comparison")

    # First dataset
    data1 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    decode_op = c_vision.Decode()
    hwc2chw_op = c_vision.HWC2CHW()
    data1 = data1.map(input_columns=["image"], operations=decode_op)
    data1 = data1.map(input_columns=["image"], operations=hwc2chw_op)

    # Second dataset
    data2 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    transforms = [
        py_vision.Decode(),
        py_vision.ToTensor(),
        py_vision.HWC2CHW()
    ]
    transform = py_vision.ComposeOp(transforms)
    data2 = data2.map(input_columns=["image"], operations=transform())

    image_c_transposed = []
    image_py_transposed = []
    for item1, item2 in zip(data1.create_dict_iterator(),
                            data2.create_dict_iterator()):
        c_image = item1["image"]
        py_image = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)

        # compare images between that applying c_transform and py_transform
        mse = diff_mse(py_image, c_image)
        # the images aren't exactly the same due to rounding error
        assert mse < 0.001

        image_c_transposed.append(item1["image"].copy())
        image_py_transposed.append(item2["image"].copy())

    if plot:
        visualize(image_c_transposed, image_py_transposed)
Ejemplo n.º 27
0
def test_random_vertical_comp(plot=False):
    """
    Test test_random_vertical_flip and compare between python and c image augmentation ops
    """
    logger.info("test_random_vertical_comp")

    # First dataset
    data1 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    decode_op = c_vision.Decode()
    # Note: The image must be flipped if prob is set to be 1
    random_horizontal_op = c_vision.RandomVerticalFlip(1)
    data1 = data1.map(input_columns=["image"], operations=decode_op)
    data1 = data1.map(input_columns=["image"], operations=random_horizontal_op)

    # Second dataset
    data2 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    transforms = [
        py_vision.Decode(),
        # Note: The image must be flipped if prob is set to be 1
        py_vision.RandomVerticalFlip(1),
        py_vision.ToTensor()
    ]
    transform = py_vision.ComposeOp(transforms)
    data2 = data2.map(input_columns=["image"], operations=transform())

    images_list_c = []
    images_list_py = []
    for item1, item2 in zip(data1.create_dict_iterator(),
                            data2.create_dict_iterator()):
        image_c = item1["image"]
        image_py = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
        images_list_c.append(image_c)
        images_list_py.append(image_py)

        # Check if the output images are the same
        mse = diff_mse(image_c, image_py)
        assert mse < 0.001
    if plot:
        visualize_list(images_list_c, images_list_py, visualize_mode=2)
Ejemplo n.º 28
0
def test_normalize_op_py(plot=False):
    """
    Test Normalize in python transformations
    """
    logger.info("Test Normalize in python")
    mean = [0.475, 0.45, 0.392]
    std = [0.275, 0.267, 0.278]
    # define map operations
    transforms = [py_vision.Decode(), py_vision.ToTensor()]
    transform = py_vision.ComposeOp(transforms)
    normalize_op = py_vision.Normalize(mean, std)

    #  First dataset
    data1 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    data1 = data1.map(input_columns=["image"], operations=transform())
    data1 = data1.map(input_columns=["image"], operations=normalize_op)

    #  Second dataset
    data2 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    data2 = data2.map(input_columns=["image"], operations=transform())

    num_iter = 0
    for item1, item2 in zip(data1.create_dict_iterator(),
                            data2.create_dict_iterator()):
        image_de_normalized = (item1["image"].transpose(1, 2, 0) * 255).astype(
            np.uint8)
        image_np_normalized = (
            normalize_np(item2["image"].transpose(1, 2, 0), mean, std) *
            255).astype(np.uint8)
        image_original = (item2["image"].transpose(1, 2, 0) * 255).astype(
            np.uint8)
        mse = diff_mse(image_de_normalized, image_np_normalized)
        logger.info("image_{}, mse: {}".format(num_iter + 1, mse))
        assert mse < 0.01
        if plot:
            visualize_image(image_original, image_de_normalized, mse,
                            image_np_normalized)
        num_iter += 1
Ejemplo n.º 29
0
def test_type_cast():
    """
    Test TypeCast op
    """
    logger.info("test_type_cast")

    # First dataset
    data1 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    decode_op = c_vision.Decode()

    type_cast_op = data_util.TypeCast(mstype.float32)

    ctrans = [
        decode_op,
        type_cast_op,
    ]

    data1 = data1.map(input_columns=["image"], operations=ctrans)

    # Second dataset
    transforms = [py_vision.Decode(), py_vision.ToTensor()]
    transform = py_vision.ComposeOp(transforms)
    data2 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    data2 = data2.map(input_columns=["image"], operations=transform())

    num_iter = 0
    for item1, item2 in zip(data1.create_dict_iterator(),
                            data2.create_dict_iterator()):
        num_iter += 1
        c_image = item1["image"]
        py_image = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)

        logger.info("shape of c_image: {}".format(c_image.shape))
        logger.info("shape of py_image: {}".format(py_image.shape))

        logger.info("dtype of c_image: {}".format(c_image.dtype))
        logger.info("dtype of py_image: {}".format(py_image.dtype))
        assert c_image.dtype == "float32"
Ejemplo n.º 30
0
def test_random_rotation_md5():
    """
    Test RandomRotation with md5 check
    """
    logger.info("Test RandomRotation with md5 check")
    original_seed = config_get_set_seed(5)
    original_num_parallel_workers = config_get_set_num_parallel_workers(1)

    # Fisrt dataset
    data1 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    decode_op = c_vision.Decode()
    resize_op = c_vision.RandomRotation((0, 90),
                                        expand=True,
                                        resample=Inter.BILINEAR,
                                        center=(50, 50),
                                        fill_value=150)
    data1 = data1.map(input_columns=["image"], operations=decode_op)
    data1 = data1.map(input_columns=["image"], operations=resize_op)

    # Second dataset
    data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False)
    transform2 = py_vision.ComposeOp([
        py_vision.Decode(),
        py_vision.RandomRotation((0, 90),
                                 expand=True,
                                 resample=Inter.BILINEAR,
                                 center=(50, 50),
                                 fill_value=150),
        py_vision.ToTensor()
    ])
    data2 = data2.map(input_columns=["image"], operations=transform2())

    # Compare with expected md5 from images
    filename1 = "random_rotation_01_c_result.npz"
    save_and_check_md5(data1, filename1, generate_golden=GENERATE_GOLDEN)
    filename2 = "random_rotation_01_py_result.npz"
    save_and_check_md5(data2, filename2, generate_golden=GENERATE_GOLDEN)

    # Restore configuration
    ds.config.set_seed(original_seed)
    ds.config.set_num_parallel_workers(original_num_parallel_workers)