Esempio n. 1
0
def test_random_apply_md5():
    """
    Test RandomApply op with md5 check
    """
    logger.info("test_random_apply_md5")
    original_seed = config_get_set_seed(10)
    original_num_parallel_workers = config_get_set_num_parallel_workers(1)
    # define map operations
    transforms_list = [py_vision.CenterCrop(64), py_vision.RandomRotation(30)]
    transforms = [
        py_vision.Decode(),
        # Note: using default value "prob=0.5"
        py_transforms.RandomApply(transforms_list),
        py_vision.ToTensor()
    ]
    transform = py_transforms.Compose(transforms)

    #  Generate dataset
    data = ds.TFRecordDataset(DATA_DIR,
                              SCHEMA_DIR,
                              columns_list=["image"],
                              shuffle=False)
    data = data.map(operations=transform, input_columns=["image"])

    # check results with md5 comparison
    filename = "random_apply_01_result.npz"
    save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN)

    # Restore configuration
    ds.config.set_seed(original_seed)
    ds.config.set_num_parallel_workers((original_num_parallel_workers))
Esempio n. 2
0
def test_random_apply_exception_random_crop_badinput():
    """
    Test RandomApply: test invalid input for one of the transform functions,
    expected to raise error
    """
    logger.info("test_random_apply_exception_random_crop_badinput")
    original_seed = config_get_set_seed(200)
    original_num_parallel_workers = config_get_set_num_parallel_workers(1)
    # define map operations
    transforms_list = [
        py_vision.Resize([32, 32]),
        py_vision.RandomCrop(100),  # crop size > image size
        py_vision.RandomRotation(30)
    ]
    transforms = [
        py_vision.Decode(),
        py_transforms.RandomApply(transforms_list, prob=0.6),
        py_vision.ToTensor()
    ]
    transform = py_transforms.Compose(transforms)
    #  Generate dataset
    data = ds.TFRecordDataset(DATA_DIR,
                              SCHEMA_DIR,
                              columns_list=["image"],
                              shuffle=False)
    data = data.map(operations=transform, input_columns=["image"])
    try:
        _ = data.create_dict_iterator(num_epochs=1).get_next()
    except RuntimeError as e:
        logger.info("Got an exception in DE: {}".format(str(e)))
        assert "Crop size" in str(e)
    # Restore configuration
    ds.config.set_seed(original_seed)
    ds.config.set_num_parallel_workers(original_num_parallel_workers)
def test_rotation_diff(plot=False):
    """
    Test RandomRotation op
    """
    logger.info("test_random_rotation_op")

    # First dataset
    data1 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    decode_op = c_vision.Decode()

    rotation_op = c_vision.RandomRotation((45, 45))
    ctrans = [decode_op, rotation_op]

    data1 = data1.map(operations=ctrans, input_columns=["image"])

    # Second dataset
    transforms = [
        py_vision.Decode(),
        py_vision.RandomRotation((45, 45)),
        py_vision.ToTensor(),
    ]
    transform = mindspore.dataset.transforms.py_transforms.Compose(transforms)
    data2 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    data2 = data2.map(operations=transform, input_columns=["image"])

    num_iter = 0
    image_list_c, image_list_py = [], []
    for item1, item2 in zip(
            data1.create_dict_iterator(num_epochs=1, output_numpy=True),
            data2.create_dict_iterator(num_epochs=1, output_numpy=True)):
        num_iter += 1
        c_image = item1["image"]
        py_image = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
        image_list_c.append(c_image)
        image_list_py.append(py_image)

        logger.info("shape of c_image: {}".format(c_image.shape))
        logger.info("shape of py_image: {}".format(py_image.shape))

        logger.info("dtype of c_image: {}".format(c_image.dtype))
        logger.info("dtype of py_image: {}".format(py_image.dtype))

        mse = diff_mse(c_image, py_image)
        assert mse < 0.001  # Rounding error
    if plot:
        visualize_list(image_list_c, image_list_py, visualize_mode=2)
def test_random_rotation_md5():
    """
    Test RandomRotation with md5 check
    """
    logger.info("Test RandomRotation with md5 check")
    original_seed = config_get_set_seed(5)
    original_num_parallel_workers = config_get_set_num_parallel_workers(1)

    # Fisrt dataset
    data1 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    decode_op = c_vision.Decode()
    resize_op = c_vision.RandomRotation((0, 90),
                                        expand=True,
                                        resample=Inter.BILINEAR,
                                        center=(50, 50),
                                        fill_value=150)
    data1 = data1.map(operations=decode_op, input_columns=["image"])
    data1 = data1.map(operations=resize_op, input_columns=["image"])

    # Second dataset
    data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False)
    transform2 = mindspore.dataset.transforms.py_transforms.Compose([
        py_vision.Decode(),
        py_vision.RandomRotation((0, 90),
                                 expand=True,
                                 resample=Inter.BILINEAR,
                                 center=(50, 50),
                                 fill_value=150),
        py_vision.ToTensor()
    ])
    data2 = data2.map(operations=transform2, input_columns=["image"])

    # Compare with expected md5 from images
    filename1 = "random_rotation_01_c_result.npz"
    save_and_check_md5(data1, filename1, generate_golden=GENERATE_GOLDEN)
    filename2 = "random_rotation_01_py_result.npz"
    save_and_check_md5(data2, filename2, generate_golden=GENERATE_GOLDEN)

    # Restore configuration
    ds.config.set_seed(original_seed)
    ds.config.set_num_parallel_workers(original_num_parallel_workers)
def test_random_rotation_op_py(plot=False):
    """
    Test RandomRotation in python transformations op
    """
    logger.info("test_random_rotation_op_py")

    # First dataset
    data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False)
    # use [90, 90] to force rotate 90 degrees, expand is set to be True to match output size
    transform1 = mindspore.dataset.transforms.py_transforms.Compose([
        py_vision.Decode(),
        py_vision.RandomRotation((90, 90), expand=True),
        py_vision.ToTensor()
    ])
    data1 = data1.map(operations=transform1, input_columns=["image"])

    # Second dataset
    data2 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    transform2 = mindspore.dataset.transforms.py_transforms.Compose(
        [py_vision.Decode(), py_vision.ToTensor()])
    data2 = data2.map(operations=transform2, input_columns=["image"])

    num_iter = 0
    for item1, item2 in zip(
            data1.create_dict_iterator(num_epochs=1, output_numpy=True),
            data2.create_dict_iterator(num_epochs=1, output_numpy=True)):
        if num_iter > 0:
            break
        rotation_de = (item1["image"].transpose(1, 2, 0) * 255).astype(
            np.uint8)
        original = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
        logger.info("shape before rotate: {}".format(original.shape))
        rotation_cv = cv2.rotate(original, cv2.ROTATE_90_COUNTERCLOCKWISE)
        mse = diff_mse(rotation_de, rotation_cv)
        logger.info("random_rotation_op_{}, mse: {}".format(num_iter + 1, mse))
        assert mse == 0
        num_iter += 1
        if plot:
            visualize_image(original, rotation_de, mse, rotation_cv)
Esempio n. 6
0
def test_random_apply_op(plot=False):
    """
    Test RandomApply in python transformations
    """
    logger.info("test_random_apply_op")
    # define map operations
    transforms_list = [py_vision.CenterCrop(64), py_vision.RandomRotation(30)]
    transforms1 = [
        py_vision.Decode(),
        py_transforms.RandomApply(transforms_list, prob=0.6),
        py_vision.ToTensor()
    ]
    transform1 = py_transforms.Compose(transforms1)

    transforms2 = [py_vision.Decode(), py_vision.ToTensor()]
    transform2 = py_transforms.Compose(transforms2)

    #  First dataset
    data1 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    data1 = data1.map(operations=transform1, input_columns=["image"])
    #  Second dataset
    data2 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    data2 = data2.map(operations=transform2, input_columns=["image"])

    image_apply = []
    image_original = []
    for item1, item2 in zip(
            data1.create_dict_iterator(num_epochs=1, output_numpy=True),
            data2.create_dict_iterator(num_epochs=1, output_numpy=True)):
        image1 = (item1["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
        image2 = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
        image_apply.append(image1)
        image_original.append(image2)
    if plot:
        visualize_list(image_original, image_apply)
Esempio n. 7
0
def test_random_rotation_op_py_ANTIALIAS():
    """
    Test RandomRotation in python transformations op
    """
    logger.info("test_random_rotation_op_py_ANTIALIAS")

    # First dataset
    data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False)
    # use [90, 90] to force rotate 90 degrees, expand is set to be True to match output size
    transform1 = mindspore.dataset.transforms.py_transforms.Compose([
        py_vision.Decode(),
        py_vision.RandomRotation((90, 90),
                                 expand=True,
                                 resample=Inter.ANTIALIAS),
        py_vision.ToTensor()
    ])
    data1 = data1.map(operations=transform1, input_columns=["image"])

    num_iter = 0
    for _ in data1.create_dict_iterator(num_epochs=1, output_numpy=True):
        num_iter += 1
    logger.info(
        "use RandomRotation by Inter.ANTIALIAS process {} images.".format(
            num_iter))
Esempio n. 8
0
def test_uniform_augment(plot=False, num_ops=2):
    """
    Test UniformAugment
    """
    logger.info("Test UniformAugment")

    # Original Images
    data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)

    transforms_original = mindspore.dataset.transforms.py_transforms.Compose([F.Decode(),
                                                                              F.Resize((224, 224)),
                                                                              F.ToTensor()])

    ds_original = data_set.map(operations=transforms_original, input_columns="image")

    ds_original = ds_original.batch(512)

    for idx, (image, _) in enumerate(ds_original):
        if idx == 0:
            images_original = np.transpose(image.asnumpy(), (0, 2, 3, 1))
        else:
            images_original = np.append(images_original,
                                        np.transpose(image.asnumpy(), (0, 2, 3, 1)),
                                        axis=0)

            # UniformAugment Images
    data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)

    transform_list = [F.RandomRotation(45),
                      F.RandomColor(),
                      F.RandomSharpness(),
                      F.Invert(),
                      F.AutoContrast(),
                      F.Equalize()]

    transforms_ua = \
        mindspore.dataset.transforms.py_transforms.Compose([F.Decode(),
                                                            F.Resize((224, 224)),
                                                            F.UniformAugment(transforms=transform_list,
                                                                             num_ops=num_ops),
                                                            F.ToTensor()])

    ds_ua = data_set.map(operations=transforms_ua, input_columns="image")

    ds_ua = ds_ua.batch(512)

    for idx, (image, _) in enumerate(ds_ua):
        if idx == 0:
            images_ua = np.transpose(image.asnumpy(), (0, 2, 3, 1))
        else:
            images_ua = np.append(images_ua,
                                  np.transpose(image.asnumpy(), (0, 2, 3, 1)),
                                  axis=0)

    num_samples = images_original.shape[0]
    mse = np.zeros(num_samples)
    for i in range(num_samples):
        mse[i] = diff_mse(images_ua[i], images_original[i])
    logger.info("MSE= {}".format(str(np.mean(mse))))

    if plot:
        visualize_list(images_original, images_ua)