Ejemplo n.º 1
0
def test_random_order_op(plot=False):
    """
    Test RandomOrder in python transformations
    """
    logger.info("test_random_order_op")
    # define map operations
    transforms_list = [py_vision.CenterCrop(64), py_vision.RandomRotation(30)]
    transforms1 = [
        py_vision.Decode(),
        py_vision.RandomOrder(transforms_list),
        py_vision.ToTensor()
    ]
    transform1 = py_vision.ComposeOp(transforms1)

    transforms2 = [
        py_vision.Decode(),
        py_vision.ToTensor()
    ]
    transform2 = py_vision.ComposeOp(transforms2)

    #  First dataset
    data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
    data1 = data1.map(input_columns=["image"], operations=transform1())
    #  Second dataset
    data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
    data2 = data2.map(input_columns=["image"], operations=transform2())

    image_order = []
    image_original = []
    for item1, item2 in zip(data1.create_dict_iterator(), data2.create_dict_iterator()):
        image1 = (item1["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
        image2 = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
        image_order.append(image1)
        image_original.append(image2)
    if plot:
        visualize(image_original, image_order)
Ejemplo n.º 2
0
def test_rgb_hsv_pipeline():
    # First dataset
    transforms1 = [vision.Decode(), vision.Resize([64, 64]), vision.ToTensor()]
    transforms1 = vision.ComposeOp(transforms1)
    ds1 = ds.TFRecordDataset(DATA_DIR,
                             SCHEMA_DIR,
                             columns_list=["image"],
                             shuffle=False)
    ds1 = ds1.map(input_columns=["image"], operations=transforms1())

    # Second dataset
    transforms2 = [
        vision.Decode(),
        vision.Resize([64, 64]),
        vision.ToTensor(),
        vision.RgbToHsv(),
        vision.HsvToRgb()
    ]
    transform2 = vision.ComposeOp(transforms2)
    ds2 = ds.TFRecordDataset(DATA_DIR,
                             SCHEMA_DIR,
                             columns_list=["image"],
                             shuffle=False)
    ds2 = ds2.map(input_columns=["image"], operations=transform2())

    num_iter = 0
    for data1, data2 in zip(ds1.create_dict_iterator(),
                            ds2.create_dict_iterator()):
        num_iter += 1
        ori_img = data1["image"]
        cvt_img = data2["image"]
        assert_allclose(ori_img.flatten(),
                        cvt_img.flatten(),
                        rtol=1e-5,
                        atol=0)
        assert ori_img.shape == cvt_img.shape
def test_random_color_adjust_op_contrast():
    """
    Test RandomColorAdjust op
    """
    logger.info("test_random_color_adjust_op")

    # First dataset
    data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
    decode_op = c_vision.Decode()

    random_adjust_op = c_vision.RandomColorAdjust((1, 1), (0.5, 0.5), (1, 1), (0, 0))

    ctrans = [decode_op,
              random_adjust_op
              ]

    data1 = data1.map(input_columns=["image"], operations=ctrans)

    # Second dataset
    transforms = [
        py_vision.Decode(),
        py_vision.RandomColorAdjust((1, 1), (0.5, 0.5), (1, 1), (0, 0)),
        py_vision.ToTensor(),
    ]
    transform = py_vision.ComposeOp(transforms)
    data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
    data2 = data2.map(input_columns=["image"], operations=transform())

    num_iter = 0
    for item1, item2 in zip(data1.create_dict_iterator(), data2.create_dict_iterator()):
        num_iter += 1
        c_image = item1["image"]
        py_image = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)

        logger.info("shape of c_image: {}".format(c_image.shape))
        logger.info("shape of py_image: {}".format(py_image.shape))

        logger.info("dtype of c_image: {}".format(c_image.dtype))
        logger.info("dtype of py_image: {}".format(py_image.dtype))

        diff = c_image - py_image
        logger.info("contrast difference c is : {}".format(c_image[0][0]))
        logger.info("contrast difference  py is : {}".format(py_image[0][0]))

        logger.info("contrast difference is : {}".format(diff[0][0]))
        # mse = (np.sum(np.power(diff, 2))) / (c_image.shape[0] * c_image.shape[1])
        mse = diff_mse(c_image, py_image)
        logger.info("mse is {}".format(mse))
Ejemplo n.º 4
0
def test_deterministic_python_seed_multi_thread():
    """
    Test deterministic execution with seed in python, this fails with multi-thread pyfunc run
    """
    logger.info("deterministic_random_crop_op_python_2")
    ds.config.set_seed(0)
    # when we set the seed all operations within our dataset should be deterministic
    # First dataset
    data1 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    transforms = [
        py_vision.Decode(),
        py_vision.RandomCrop([512, 512], [200, 200, 200, 200]),
        py_vision.ToTensor(),
    ]
    transform = py_vision.ComposeOp(transforms)
    data1 = data1.map(input_columns=["image"],
                      operations=transform(),
                      python_multiprocessing=True)
    data1_output = []
    # config.set_seed() calls random.seed()
    for data_one in data1.create_dict_iterator():
        data1_output.append(data_one["image"])

    # Second dataset
    data2 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    # If seed is set up on constructor
    data2 = data2.map(input_columns=["image"],
                      operations=transform(),
                      python_multiprocessing=True)
    # config.set_seed() calls random.seed()
    ds.config.set_seed(0)

    data2_output = []
    for data_two in data2.create_dict_iterator():
        data2_output.append(data_two["image"])

    try:
        np.testing.assert_equal(data1_output, data2_output)
    except BaseException as e:
        # expect output to not match during multi-threaded excution
        logger.info("Got an exception in DE: {}".format(str(e)))
        assert "Array" in str(e)
Ejemplo n.º 5
0
def test_pad_op():
    """
    Test Pad op
    """
    logger.info("test_random_color_jitter_op")

    # First dataset
    data1 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    decode_op = c_vision.Decode()

    pad_op = c_vision.Pad((100, 100, 100, 100))
    ctrans = [
        decode_op,
        pad_op,
    ]

    data1 = data1.map(input_columns=["image"], operations=ctrans)

    # Second dataset
    transforms = [
        py_vision.Decode(),
        py_vision.Pad(100),
        py_vision.ToTensor(),
    ]
    transform = py_vision.ComposeOp(transforms)
    data2 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    data2 = data2.map(input_columns=["image"], operations=transform())

    for item1, item2 in zip(data1.create_dict_iterator(),
                            data2.create_dict_iterator()):
        c_image = item1["image"]
        py_image = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)

        logger.info("shape of c_image: {}".format(c_image.shape))
        logger.info("shape of py_image: {}".format(py_image.shape))

        logger.info("dtype of c_image: {}".format(c_image.dtype))
        logger.info("dtype of py_image: {}".format(py_image.dtype))

        mse = diff_mse(c_image, py_image)
        logger.info("mse is {}".format(mse))
        assert mse < 0.01
Ejemplo n.º 6
0
def test_random_affine_py_exception_non_pil_images():
    """
    Test RandomAffine: input img is ndarray and not PIL, expected to raise RuntimeError
    """
    logger.info("test_random_affine_exception_negative_degrees")
    dataset = ds.MnistDataset(MNIST_DATA_DIR, num_parallel_workers=3)
    try:
        transform = py_vision.ComposeOp([py_vision.ToTensor(),
                                         py_vision.RandomAffine(degrees=(15, 15))])
        dataset = dataset.map(input_columns=["image"], operations=transform(), num_parallel_workers=3,
                              python_multiprocessing=True)
        for _ in dataset.create_dict_iterator():
            break
    except RuntimeError as e:
        logger.info("Got an exception in DE: {}".format(str(e)))
        assert "Pillow image" in str(e)
Ejemplo n.º 7
0
def test_equalize_md5():
    """
    Test Equalize with md5 check
    """
    logger.info("Test Equalize")

    # First dataset
    data1 = de.ImageFolderDatasetV2(dataset_dir=DATA_DIR, shuffle=False)
    transforms = F.ComposeOp([F.Decode(),
                              F.Equalize(),
                              F.ToTensor()])

    data1 = data1.map(input_columns="image", operations=transforms())
    # Compare with expected md5 from images
    filename = "equalize_01_result.npz"
    save_and_check_md5(data1, filename, generate_golden=GENERATE_GOLDEN)
Ejemplo n.º 8
0
def test_five_crop_error_msg():
    """
    Test FiveCrop error message.
    """
    logger.info("test_five_crop_error_msg")

    data = ds.TFRecordDataset(DATA_DIR,
                              SCHEMA_DIR,
                              columns_list=["image"],
                              shuffle=False)
    transforms = [vision.Decode(), vision.FiveCrop(200), vision.ToTensor()]
    transform = vision.ComposeOp(transforms)
    data = data.map(input_columns=["image"], operations=transform())

    with pytest.raises(RuntimeError):
        data.create_tuple_iterator().__next__()
Ejemplo n.º 9
0
def test_HWC2CHW_comp(plot=False):
    """
    Test HWC2CHW between python and c image augmentation
    """
    logger.info("Test HWC2CHW with c_transform and py_transform comparison")

    # First dataset
    data1 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    decode_op = c_vision.Decode()
    hwc2chw_op = c_vision.HWC2CHW()
    data1 = data1.map(input_columns=["image"], operations=decode_op)
    data1 = data1.map(input_columns=["image"], operations=hwc2chw_op)

    # Second dataset
    data2 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    transforms = [
        py_vision.Decode(),
        py_vision.ToTensor(),
        py_vision.HWC2CHW()
    ]
    transform = py_vision.ComposeOp(transforms)
    data2 = data2.map(input_columns=["image"], operations=transform())

    image_c_transposed = []
    image_py_transposed = []
    for item1, item2 in zip(data1.create_dict_iterator(),
                            data2.create_dict_iterator()):
        c_image = item1["image"]
        py_image = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)

        # compare images between that applying c_transform and py_transform
        mse = diff_mse(py_image, c_image)
        # the images aren't exactly the same due to rounding error
        assert mse < 0.001

        image_c_transposed.append(item1["image"].copy())
        image_py_transposed.append(item2["image"].copy())

    if plot:
        visualize(image_c_transposed, image_py_transposed)
Ejemplo n.º 10
0
def test_compare_random_color_op(degrees=None, plot=False):
    """
    Compare Random Color op in Python and Cpp
    """

    logger.info("test_random_color_op")

    original_seed = config_get_set_seed(5)
    original_num_parallel_workers = config_get_set_num_parallel_workers(1)

    # Decode with rgb format set to True
    data1 = ds.TFRecordDataset(C_DATA_DIR, C_SCHEMA_DIR, columns_list=["image"], shuffle=False)
    data2 = ds.TFRecordDataset(C_DATA_DIR, C_SCHEMA_DIR, columns_list=["image"], shuffle=False)

    if degrees is None:
        c_op = vision.RandomColor()
        p_op = F.RandomColor()
    else:
        c_op = vision.RandomColor(degrees)
        p_op = F.RandomColor(degrees)

    transforms_random_color_py = F.ComposeOp([lambda img: img.astype(np.uint8), F.ToPIL(),
                                              p_op, np.array])

    data1 = data1.map(input_columns=["image"], operations=[vision.Decode(), c_op])
    data2 = data2.map(input_columns=["image"], operations=[vision.Decode()])
    data2 = data2.map(input_columns=["image"], operations=transforms_random_color_py())

    image_random_color_op = []
    image = []

    for item1, item2 in zip(data1.create_dict_iterator(), data2.create_dict_iterator()):
        actual = item1["image"]
        expected = item2["image"]
        image_random_color_op.append(actual)
        image.append(expected)
        assert actual.shape == expected.shape
        mse = diff_mse(actual, expected)
        logger.info("MSE= {}".format(str(np.mean(mse))))

    # Restore configuration
    ds.config.set_seed(original_seed)
    ds.config.set_num_parallel_workers(original_num_parallel_workers)

    if plot:
        visualize_list(image, image_random_color_op)
Ejemplo n.º 11
0
def test_random_vertical_comp(plot=False):
    """
    Test test_random_vertical_flip and compare between python and c image augmentation ops
    """
    logger.info("test_random_vertical_comp")

    # First dataset
    data1 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    decode_op = c_vision.Decode()
    # Note: The image must be flipped if prob is set to be 1
    random_horizontal_op = c_vision.RandomVerticalFlip(1)
    data1 = data1.map(input_columns=["image"], operations=decode_op)
    data1 = data1.map(input_columns=["image"], operations=random_horizontal_op)

    # Second dataset
    data2 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    transforms = [
        py_vision.Decode(),
        # Note: The image must be flipped if prob is set to be 1
        py_vision.RandomVerticalFlip(1),
        py_vision.ToTensor()
    ]
    transform = py_vision.ComposeOp(transforms)
    data2 = data2.map(input_columns=["image"], operations=transform())

    images_list_c = []
    images_list_py = []
    for item1, item2 in zip(data1.create_dict_iterator(),
                            data2.create_dict_iterator()):
        image_c = item1["image"]
        image_py = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
        images_list_c.append(image_c)
        images_list_py.append(image_py)

        # Check if the output images are the same
        mse = diff_mse(image_c, image_py)
        assert mse < 0.001
    if plot:
        visualize_list(images_list_c, images_list_py, visualize_mode=2)
Ejemplo n.º 12
0
def test_cut_out_op(plot=False):
    """
    Test Cutout
    """
    logger.info("test_cut_out")

    # First dataset
    data1 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)

    transforms_1 = [f.Decode(), f.ToTensor(), f.RandomErasing(value='random')]
    transform_1 = f.ComposeOp(transforms_1)
    data1 = data1.map(input_columns=["image"], operations=transform_1())

    # Second dataset
    data2 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    decode_op = c.Decode()
    cut_out_op = c.CutOut(80)

    transforms_2 = [decode_op, cut_out_op]

    data2 = data2.map(input_columns=["image"], operations=transforms_2)

    num_iter = 0
    for item1, item2 in zip(data1.create_dict_iterator(),
                            data2.create_dict_iterator()):
        num_iter += 1
        image_1 = (item1["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
        # C image doesn't require transpose
        image_2 = item2["image"]

        logger.info("shape of image_1: {}".format(image_1.shape))
        logger.info("shape of image_2: {}".format(image_2.shape))

        logger.info("dtype of image_1: {}".format(image_1.dtype))
        logger.info("dtype of image_2: {}".format(image_2.dtype))

        mse = diff_mse(image_1, image_2)
        if plot:
            visualize_image(image_1, image_2, mse)
Ejemplo n.º 13
0
def test_linear_transformation_op(plot=False):
    """
    Test LinearTransformation op: verify if images transform correctly
    """
    logger.info("test_linear_transformation_01")

    # Initialize parameters
    height = 50
    weight = 50
    dim = 3 * height * weight
    transformation_matrix = np.eye(dim)
    mean_vector = np.zeros(dim)

    # Define operations
    transforms = [
        py_vision.Decode(),
        py_vision.CenterCrop([height, weight]),
        py_vision.ToTensor()
    ]
    transform = py_vision.ComposeOp(transforms)

    # First dataset
    data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
    data1 = data1.map(input_columns=["image"], operations=transform())
    # Note: if transformation matrix is diagonal matrix with all 1 in diagonal,
    #       the output matrix in expected to be the same as the input matrix.
    data1 = data1.map(input_columns=["image"],
                      operations=py_vision.LinearTransformation(transformation_matrix, mean_vector))

    # Second dataset
    data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
    data2 = data2.map(input_columns=["image"], operations=transform())

    image_transformed = []
    image = []
    for item1, item2 in zip(data1.create_dict_iterator(), data2.create_dict_iterator()):
        image1 = (item1["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
        image2 = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
        image_transformed.append(image1)
        image.append(image2)

        mse = diff_mse(image1, image2)
        assert mse == 0
    if plot:
        visualize_list(image, image_transformed)
Ejemplo n.º 14
0
def util_test_normalize_grayscale(num_output_channels, mean, std):
    """
    Utility function for testing Normalize. Input arguments are given by other tests
    """
    transforms = [
        py_vision.Decode(),
        py_vision.Grayscale(num_output_channels),
        py_vision.ToTensor(),
        py_vision.Normalize(mean, std)
    ]
    transform = py_vision.ComposeOp(transforms)
    # Generate dataset
    data = ds.TFRecordDataset(DATA_DIR,
                              SCHEMA_DIR,
                              columns_list=["image"],
                              shuffle=False)
    data = data.map(input_columns=["image"], operations=transform())
    return data
Ejemplo n.º 15
0
def test_random_rotation_md5():
    """
    Test RandomRotation with md5 check
    """
    logger.info("Test RandomRotation with md5 check")
    original_seed = config_get_set_seed(5)
    original_num_parallel_workers = config_get_set_num_parallel_workers(1)

    # Fisrt dataset
    data1 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    decode_op = c_vision.Decode()
    resize_op = c_vision.RandomRotation((0, 90),
                                        expand=True,
                                        resample=Inter.BILINEAR,
                                        center=(50, 50),
                                        fill_value=150)
    data1 = data1.map(input_columns=["image"], operations=decode_op)
    data1 = data1.map(input_columns=["image"], operations=resize_op)

    # Second dataset
    data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False)
    transform2 = py_vision.ComposeOp([
        py_vision.Decode(),
        py_vision.RandomRotation((0, 90),
                                 expand=True,
                                 resample=Inter.BILINEAR,
                                 center=(50, 50),
                                 fill_value=150),
        py_vision.ToTensor()
    ])
    data2 = data2.map(input_columns=["image"], operations=transform2())

    # Compare with expected md5 from images
    filename1 = "random_rotation_01_c_result.npz"
    save_and_check_md5(data1, filename1, generate_golden=GENERATE_GOLDEN)
    filename2 = "random_rotation_01_py_result.npz"
    save_and_check_md5(data2, filename2, generate_golden=GENERATE_GOLDEN)

    # Restore configuration
    ds.config.set_seed(original_seed)
    ds.config.set_num_parallel_workers(original_num_parallel_workers)
Ejemplo n.º 16
0
def test_pad_grayscale():
    """
    Tests that the pad works for grayscale images
    """

    # Note: image.transpose performs channel swap to allow py transforms to
    # work with c transforms
    transforms = [
        py_vision.Decode(),
        py_vision.Grayscale(1),
        py_vision.ToTensor(),
        (lambda image: (image.transpose(1, 2, 0) * 255).astype(np.uint8))
    ]

    transform = py_vision.ComposeOp(transforms)
    data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
    data1 = data1.map(input_columns=["image"], operations=transform())

    # if input is grayscale, the output dimensions should be single channel
    pad_gray = c_vision.Pad(100, fill_value=(20, 20, 20))
    data1 = data1.map(input_columns=["image"], operations=pad_gray)
    dataset_shape_1 = []
    for item1 in data1.create_dict_iterator():
        c_image = item1["image"]
        dataset_shape_1.append(c_image.shape)

    # Dataset for comparison
    data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
    decode_op = c_vision.Decode()

    # we use the same padding logic
    ctrans = [decode_op, pad_gray]
    dataset_shape_2 = []

    data2 = data2.map(input_columns=["image"], operations=ctrans)

    for item2 in data2.create_dict_iterator():
        c_image = item2["image"]
        dataset_shape_2.append(c_image.shape)

    for shape1, shape2 in zip(dataset_shape_1, dataset_shape_2):
        # validate that the first two dimensions are the same
        # we have a little inconsistency here because the third dimension is 1 after py_vision.Grayscale
        assert shape1[0:1] == shape2[0:1]
Ejemplo n.º 17
0
def test_type_cast():
    """
    Test TypeCast op
    """
    logger.info("test_type_cast")

    # First dataset
    data1 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    decode_op = c_vision.Decode()

    type_cast_op = data_util.TypeCast(mstype.float32)

    ctrans = [
        decode_op,
        type_cast_op,
    ]

    data1 = data1.map(input_columns=["image"], operations=ctrans)

    # Second dataset
    transforms = [py_vision.Decode(), py_vision.ToTensor()]
    transform = py_vision.ComposeOp(transforms)
    data2 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    data2 = data2.map(input_columns=["image"], operations=transform())

    num_iter = 0
    for item1, item2 in zip(data1.create_dict_iterator(),
                            data2.create_dict_iterator()):
        num_iter += 1
        c_image = item1["image"]
        py_image = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)

        logger.info("shape of c_image: {}".format(c_image.shape))
        logger.info("shape of py_image: {}".format(py_image.shape))

        logger.info("dtype of c_image: {}".format(c_image.dtype))
        logger.info("dtype of py_image: {}".format(py_image.dtype))
        assert c_image.dtype == "float32"
Ejemplo n.º 18
0
def test_cut_out_comp(plot=False):
    """
    Test Cutout with c++ and python op comparison
    """
    logger.info("test_cut_out_comp")

    # First dataset
    data1 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)

    transforms_1 = [f.Decode(), f.ToTensor(), f.Cutout(200)]
    transform_1 = f.ComposeOp(transforms_1)
    data1 = data1.map(input_columns=["image"], operations=transform_1())

    # Second dataset
    data2 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)

    transforms_2 = [c.Decode(), c.CutOut(200)]

    data2 = data2.map(input_columns=["image"], operations=transforms_2)

    num_iter = 0
    image_list_1, image_list_2 = [], []
    for item1, item2 in zip(data1.create_dict_iterator(),
                            data2.create_dict_iterator()):
        num_iter += 1
        image_1 = (item1["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
        # C image doesn't require transpose
        image_2 = item2["image"]
        image_list_1.append(image_1)
        image_list_2.append(image_2)

        logger.info("shape of image_1: {}".format(image_1.shape))
        logger.info("shape of image_2: {}".format(image_2.shape))

        logger.info("dtype of image_1: {}".format(image_1.dtype))
        logger.info("dtype of image_2: {}".format(image_2.dtype))
    if plot:
        visualize_list(image_list_1, image_list_2, visualize_mode=2)
Ejemplo n.º 19
0
def test_normalize_op_py(plot=False):
    """
    Test Normalize in python transformations
    """
    logger.info("Test Normalize in python")
    mean = [0.475, 0.45, 0.392]
    std = [0.275, 0.267, 0.278]
    # define map operations
    transforms = [py_vision.Decode(), py_vision.ToTensor()]
    transform = py_vision.ComposeOp(transforms)
    normalize_op = py_vision.Normalize(mean, std)

    #  First dataset
    data1 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    data1 = data1.map(input_columns=["image"], operations=transform())
    data1 = data1.map(input_columns=["image"], operations=normalize_op)

    #  Second dataset
    data2 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    data2 = data2.map(input_columns=["image"], operations=transform())

    num_iter = 0
    for item1, item2 in zip(data1.create_dict_iterator(),
                            data2.create_dict_iterator()):
        image_de_normalized = (item1["image"].transpose(1, 2, 0) * 255).astype(
            np.uint8)
        image_np_normalized = (
            normalize_np(item2["image"].transpose(1, 2, 0), mean, std) *
            255).astype(np.uint8)
        image_original = (item2["image"].transpose(1, 2, 0) * 255).astype(
            np.uint8)
        mse = diff_mse(image_de_normalized, image_np_normalized)
        logger.info("image_{}, mse: {}".format(num_iter + 1, mse))
        assert mse < 0.01
        if plot:
            visualize_image(image_original, image_de_normalized, mse,
                            image_np_normalized)
        num_iter += 1
Ejemplo n.º 20
0
def test_deterministic_python_seed():
    """
    Test deterministic execution with seed in python
    """
    logger.info("test_deterministic_python_seed")

    # Save original configuration values
    num_parallel_workers_original = ds.config.get_num_parallel_workers()
    seed_original = ds.config.get_seed()

    ds.config.set_seed(0)
    ds.config.set_num_parallel_workers(1)

    # First dataset
    data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)

    transforms = [
        py_vision.Decode(),
        py_vision.RandomCrop([512, 512], [200, 200, 200, 200]),
        py_vision.ToTensor(),
    ]
    transform = py_vision.ComposeOp(transforms)
    data1 = data1.map(input_columns=["image"], operations=transform())
    data1_output = []
    # config.set_seed() calls random.seed()
    for data_one in data1.create_dict_iterator():
        data1_output.append(data_one["image"])

    # Second dataset
    data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
    data2 = data2.map(input_columns=["image"], operations=transform())
    # config.set_seed() calls random.seed(), resets seed for next dataset iterator
    ds.config.set_seed(0)

    data2_output = []
    for data_two in data2.create_dict_iterator():
        data2_output.append(data_two["image"])

    np.testing.assert_equal(data1_output, data2_output)

    # Restore original configuration values
    ds.config.set_num_parallel_workers(num_parallel_workers_original)
    ds.config.set_seed(seed_original)
def test_random_color_adjust_op_brightness():
    """
    Test RandomColorAdjust op
    """
    logger.info("test_random_color_adjust_op")

    # First dataset
    data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
    decode_op = c_vision.Decode()

    random_adjust_op = c_vision.RandomColorAdjust((0.8, 0.8), (1, 1), (1, 1), (0, 0))

    ctrans = [decode_op,
              random_adjust_op,
              ]

    data1 = data1.map(input_columns=["image"], operations=ctrans)

    # Second dataset
    transforms = [
        py_vision.Decode(),
        py_vision.RandomColorAdjust((0.8, 0.8), (1, 1), (1, 1), (0, 0)),
        py_vision.ToTensor(),
    ]
    transform = py_vision.ComposeOp(transforms)
    data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
    data2 = data2.map(input_columns=["image"], operations=transform())

    num_iter = 0
    for item1, item2 in zip(data1.create_dict_iterator(), data2.create_dict_iterator()):
        num_iter += 1
        c_image = item1["image"]
        py_image = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)

        logger.info("shape of c_image: {}".format(c_image.shape))
        logger.info("shape of py_image: {}".format(py_image.shape))

        logger.info("dtype of c_image: {}".format(c_image.dtype))
        logger.info("dtype of py_image: {}".format(py_image.dtype))

        mse = diff_mse(c_image, py_image)
        logger.info("mse is {}".format(mse))
        assert mse < 0.01
Ejemplo n.º 22
0
def test_random_grayscale_invalid_param():
    """
    Test RandomGrayscale: invalid parameter given, expect to raise error
    """
    logger.info("test_random_grayscale_invalid_param")

    # Generate dataset
    data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
    try:
        transforms = [
            py_vision.Decode(),
            py_vision.RandomGrayscale(1.5),
            py_vision.ToTensor()
        ]
        transform = py_vision.ComposeOp(transforms)
        data = data.map(input_columns=["image"], operations=transform())
    except ValueError as e:
        logger.info("Got an exception in DE: {}".format(str(e)))
        assert "Input is not within the required range" in str(e)
Ejemplo n.º 23
0
def create_icdar_train_dataset(img_path, gt_path, batch_size=32, repeat_num=10, 
                                is_training=True, num_parallel_workers=1, length=512, scale=0.25):

    dataloader = ds.GeneratorDataset(source=custom_dataset(img_path, gt_path, scale=scale, length=length), column_names=["img", "score_map", "geo_map", "ignored_map"], shuffle=is_training, num_parallel_workers=num_parallel_workers)
    dataloader.set_dataset_size(1000)
    transform = py_transforms.ComposeOp([py_transforms.RandomColorAdjust(brightness=0.5, contrast=0.5, saturation=0.5, hue=0.25), \
                                        py_transforms.ToTensor(), \
                                        py_transforms.Normalize(mean=(0.5,0.5,0.5),std=(0.5,0.5,0.5))])
    dataloader = dataloader.map(input_columns="img", operations=transform, num_parallel_workers=num_parallel_workers, python_multiprocessing=is_training)
    dataloader = dataloader.batch(batch_size, drop_remainder=True)
    
    return dataloader






        
Ejemplo n.º 24
0
def test_random_crop_and_resize_comp(plot=False):
    """
    Test RandomCropAndResize and compare between python and c image augmentation
    """
    logger.info("test_random_crop_and_resize_comp")

    # First dataset
    data1 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    decode_op = c_vision.Decode()
    random_crop_and_resize_op = c_vision.RandomResizedCrop(
        512, (1, 1), (0.5, 0.5))
    data1 = data1.map(input_columns=["image"], operations=decode_op)
    data1 = data1.map(input_columns=["image"],
                      operations=random_crop_and_resize_op)

    # Second dataset
    data2 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    transforms = [
        py_vision.Decode(),
        py_vision.RandomResizedCrop(512, (1, 1), (0.5, 0.5)),
        py_vision.ToTensor()
    ]
    transform = py_vision.ComposeOp(transforms)
    data2 = data2.map(input_columns=["image"], operations=transform())

    image_c_cropped = []
    image_py_cropped = []
    for item1, item2 in zip(data1.create_dict_iterator(),
                            data2.create_dict_iterator()):
        c_image = item1["image"]
        py_image = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
        image_c_cropped.append(c_image)
        image_py_cropped.append(py_image)
        mse = diff_mse(c_image, py_image)
        assert mse < 0.02  # rounding error
    if plot:
        visualize_list(image_c_cropped, image_py_cropped, visualize_mode=2)
def test_random_crop_and_resize_02():
    """
    Test RandomCropAndResize with md5 check:Image interpolation mode is Inter.NEAREST,
    expected to pass
    """
    logger.info("test_random_crop_and_resize_02")
    original_seed = config_get_set_seed(0)
    original_num_parallel_workers = config_get_set_num_parallel_workers(1)

    # First dataset
    data1 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    decode_op = c_vision.Decode()
    random_crop_and_resize_op = c_vision.RandomResizedCrop(
        (256, 512), interpolation=mode.Inter.NEAREST)
    data1 = data1.map(input_columns=["image"], operations=decode_op)
    data1 = data1.map(input_columns=["image"],
                      operations=random_crop_and_resize_op)

    # Second dataset
    data2 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    transforms = [
        py_vision.Decode(),
        py_vision.RandomResizedCrop((256, 512),
                                    interpolation=mode.Inter.NEAREST),
        py_vision.ToTensor()
    ]
    transform = py_vision.ComposeOp(transforms)
    data2 = data2.map(input_columns=["image"], operations=transform())

    filename1 = "random_crop_and_resize_02_c_result.npz"
    filename2 = "random_crop_and_resize_02_py_result.npz"
    save_and_check_md5(data1, filename1, generate_golden=GENERATE_GOLDEN)
    save_and_check_md5(data2, filename2, generate_golden=GENERATE_GOLDEN)

    # Restore config setting
    ds.config.set_seed(original_seed)
    ds.config.set_num_parallel_workers(original_num_parallel_workers)
Ejemplo n.º 26
0
def test_rotation_diff():
    """
    Test Rotation op
    """
    logger.info("test_random_rotation_op")

    # First dataset
    data1 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    decode_op = c_vision.Decode()

    rotation_op = c_vision.RandomRotation((45, 45), expand=True)
    ctrans = [decode_op, rotation_op]

    data1 = data1.map(input_columns=["image"], operations=ctrans)

    # Second dataset
    transforms = [
        py_vision.Decode(),
        py_vision.RandomRotation((45, 45), expand=True),
        py_vision.ToTensor(),
    ]
    transform = py_vision.ComposeOp(transforms)
    data2 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    data2 = data2.map(input_columns=["image"], operations=transform())

    num_iter = 0
    for item1, item2 in zip(data1.create_dict_iterator(),
                            data2.create_dict_iterator()):
        num_iter += 1
        c_image = item1["image"]
        py_image = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)

        logger.info("shape of c_image: {}".format(c_image.shape))
        logger.info("shape of py_image: {}".format(py_image.shape))

        logger.info("dtype of c_image: {}".format(c_image.dtype))
        logger.info("dtype of py_image: {}".format(py_image.dtype))
Ejemplo n.º 27
0
def test_random_crop_comp(plot=False):
    """
    Test RandomCrop and compare between python and c image augmentation
    """
    logger.info("Test RandomCrop with c_transform and py_transform comparison")
    ds.config.set_seed(0)
    ds.config.set_num_parallel_workers(1)
    cropped_size = 512

    # First dataset
    data1 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    random_crop_op = c_vision.RandomCrop(cropped_size)
    decode_op = c_vision.Decode()
    data1 = data1.map(input_columns=["image"], operations=decode_op)
    data1 = data1.map(input_columns=["image"], operations=random_crop_op)

    # Second dataset
    data2 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    transforms = [
        py_vision.Decode(),
        py_vision.RandomCrop(cropped_size),
        py_vision.ToTensor()
    ]
    transform = py_vision.ComposeOp(transforms)
    data2 = data2.map(input_columns=["image"], operations=transform())

    image_c_cropped = []
    image_py_cropped = []
    for item1, item2 in zip(data1.create_dict_iterator(),
                            data2.create_dict_iterator()):
        c_image = item1["image"]
        py_image = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
        image_c_cropped.append(c_image)
        image_py_cropped.append(py_image)
    if plot:
        visualize(image_c_cropped, image_py_cropped)
Ejemplo n.º 28
0
def test_random_vertical_invalid_prob_py():
    """
    Test RandomVerticalFlip op in py_transforms: invalid input, expect to raise error
    """
    logger.info("test_random_vertical_invalid_prob_py")

    # Generate dataset
    data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
    try:
        transforms = [
            py_vision.Decode(),
            # Note: Valid range of prob should be [0.0, 1.0]
            py_vision.RandomVerticalFlip(1.5),
            py_vision.ToTensor()
        ]
        transform = py_vision.ComposeOp(transforms)
        data = data.map(input_columns=["image"], operations=transform())
    except ValueError as e:
        logger.info("Got an exception in DE: {}".format(str(e)))
        assert 'Input prob is not within the required interval of (0.0 to 1.0).' in str(e)
Ejemplo n.º 29
0
def test_five_crop_error_msg():
    """
    Test FiveCrop error message.
    """
    logger.info("test_five_crop_error_msg")

    data = ds.TFRecordDataset(DATA_DIR,
                              SCHEMA_DIR,
                              columns_list=["image"],
                              shuffle=False)
    transforms = [vision.Decode(), vision.FiveCrop(200), vision.ToTensor()]
    transform = vision.ComposeOp(transforms)
    data = data.map(input_columns=["image"], operations=transform())

    with pytest.raises(RuntimeError) as info:
        data.create_tuple_iterator().get_next()
    error_msg = "TypeError: img should be PIL Image or Numpy array. Got <class 'tuple'>"

    # error msg comes from ToTensor()
    assert error_msg in str(info.value)
Ejemplo n.º 30
0
def test_center_crop_comp(height=375, width=375, plot=False):
    """
    Test CenterCrop between python and c image augmentation
    """
    logger.info("Test CenterCrop")

    # First dataset
    data1 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    decode_op = vision.Decode()
    center_crop_op = vision.CenterCrop([height, width])
    data1 = data1.map(input_columns=["image"], operations=decode_op)
    data1 = data1.map(input_columns=["image"], operations=center_crop_op)

    # Second dataset
    data2 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    transforms = [
        py_vision.Decode(),
        py_vision.CenterCrop([height, width]),
        py_vision.ToTensor()
    ]
    transform = py_vision.ComposeOp(transforms)
    data2 = data2.map(input_columns=["image"], operations=transform())

    image_cropped = []
    image = []
    for item1, item2 in zip(data1.create_dict_iterator(),
                            data2.create_dict_iterator()):
        c_image = item1["image"]
        py_image = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
        # Note: The images aren't exactly the same due to rounding error
        assert diff_mse(py_image, c_image) < 0.001
        image_cropped.append(item1["image"].copy())
        image.append(item2["image"].copy())
    if plot:
        visualize(image, image_cropped)