Exemple #1
0
def test_py_transforms_with_c_vision():
    """
    These examples will fail, as py_transforms.Random(Apply/Choice/Order) expect callable functions
    """

    ds.config.set_seed(0)

    def test_config(op_list):
        data_dir = "../data/dataset/testImageNetData/train/"
        data = ds.ImageFolderDataset(dataset_dir=data_dir, shuffle=False)
        data = data.map(operations=op_list)
        res = []
        for i in data.create_dict_iterator(output_numpy=True):
            for col_name in output_cols:
                res.append(i[col_name].tolist())
        return res

    with pytest.raises(ValueError) as error_info:
        test_config(py_transforms.RandomApply([c_vision.RandomResizedCrop(200)]))
    assert "transforms[0] is not callable." in str(error_info.value)

    with pytest.raises(ValueError) as error_info:
        test_config(py_transforms.RandomChoice([c_vision.RandomResizedCrop(200)]))
    assert "transforms[0] is not callable." in str(error_info.value)

    with pytest.raises(ValueError) as error_info:
        test_config(py_transforms.RandomOrder([np.array, c_vision.RandomResizedCrop(200)]))
    assert "transforms[1] is not callable." in str(error_info.value)

    with pytest.raises(RuntimeError) as error_info:
        test_config([py_transforms.OneHotOp(20, 0.1)])
    assert "The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()" in str(
        error_info.value)
Exemple #2
0
def test_random_crop_and_resize_06():
    """
    Test RandomCropAndResize with c_transforms: invalid values for scale,
    expected to raise ValueError
    """
    logger.info("test_random_crop_and_resize_05_c")

    # Generate dataset
    data = ds.TFRecordDataset(DATA_DIR,
                              SCHEMA_DIR,
                              columns_list=["image"],
                              shuffle=False)
    decode_op = c_vision.Decode()
    try:
        random_crop_and_resize_op = c_vision.RandomResizedCrop((256, 512),
                                                               scale="",
                                                               ratio=(1, 0.5))
        data = data.map(operations=decode_op, input_columns=["image"])
        data.map(operations=random_crop_and_resize_op, input_columns=["image"])
    except TypeError as e:
        logger.info("Got an exception in DE: {}".format(str(e)))
        assert "Argument scale with value \"\" is not of type (<class 'tuple'>,)" in str(
            e)

    try:
        random_crop_and_resize_op = c_vision.RandomResizedCrop((256, 512),
                                                               scale=(1, "2"),
                                                               ratio=(1, 0.5))
        data = data.map(operations=decode_op, input_columns=["image"])
        data.map(operations=random_crop_and_resize_op, input_columns=["image"])
    except TypeError as e:
        logger.info("Got an exception in DE: {}".format(str(e)))
        assert "Argument scale[1] with value 2 is not of type (<class 'float'>, <class 'int'>)." in str(
            e)
Exemple #3
0
def test_random_crop_and_resize_op_c(plot=False):
    """
    Test RandomCropAndResize op in c transforms
    """
    logger.info("test_random_crop_and_resize_op_c")

    # First dataset
    data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
    decode_op = c_vision.Decode()
    # With these inputs we expect the code to crop the whole image
    random_crop_and_resize_op = c_vision.RandomResizedCrop((256, 512), (2, 2), (1, 3))
    data1 = data1.map(operations=decode_op, input_columns=["image"])
    data1 = data1.map(operations=random_crop_and_resize_op, input_columns=["image"])

    # Second dataset
    data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
    data2 = data2.map(operations=decode_op, input_columns=["image"])
    num_iter = 0
    crop_and_resize_images = []
    original_images = []
    for item1, item2 in zip(data1.create_dict_iterator(num_epochs=1, output_numpy=True),
                            data2.create_dict_iterator(num_epochs=1, output_numpy=True)):
        crop_and_resize = item1["image"]
        original = item2["image"]
        # Note: resize the original image with the same size as the one applied RandomResizedCrop()
        original = cv2.resize(original, (512, 256))
        mse = diff_mse(crop_and_resize, original)
        assert mse == 0
        logger.info("random_crop_and_resize_op_{}, mse: {}".format(num_iter + 1, mse))
        num_iter += 1
        crop_and_resize_images.append(crop_and_resize)
        original_images.append(original)
    if plot:
        visualize_list(original_images, crop_and_resize_images)
Exemple #4
0
def test_random_crop_and_resize_03():
    """
    Test RandomCropAndResize with md5 check: max_attempts is 1, expected to pass
    """
    logger.info("test_random_crop_and_resize_03")
    original_seed = config_get_set_seed(0)
    original_num_parallel_workers = config_get_set_num_parallel_workers(1)

    # First dataset
    data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
    decode_op = c_vision.Decode()
    random_crop_and_resize_op = c_vision.RandomResizedCrop((256, 512), max_attempts=1)
    data1 = data1.map(operations=decode_op, input_columns=["image"])
    data1 = data1.map(operations=random_crop_and_resize_op, input_columns=["image"])

    # Second dataset
    data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
    transforms = [
        py_vision.Decode(),
        py_vision.RandomResizedCrop((256, 512), max_attempts=1),
        py_vision.ToTensor()
    ]
    transform = mindspore.dataset.transforms.py_transforms.Compose(transforms)
    data2 = data2.map(operations=transform, input_columns=["image"])

    filename1 = "random_crop_and_resize_03_c_result.npz"
    filename2 = "random_crop_and_resize_03_py_result.npz"
    save_and_check_md5(data1, filename1, generate_golden=GENERATE_GOLDEN)
    save_and_check_md5(data2, filename2, generate_golden=GENERATE_GOLDEN)

    # Restore config setting
    ds.config.set_seed(original_seed)
    ds.config.set_num_parallel_workers(original_num_parallel_workers)
Exemple #5
0
def test_random_crop_and_resize_comp(plot=False):
    """
    Test RandomCropAndResize and compare between python and c image augmentation
    """
    logger.info("test_random_crop_and_resize_comp")

    # First dataset
    data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
    decode_op = c_vision.Decode()
    random_crop_and_resize_op = c_vision.RandomResizedCrop(512, (1, 1), (0.5, 0.5))
    data1 = data1.map(operations=decode_op, input_columns=["image"])
    data1 = data1.map(operations=random_crop_and_resize_op, input_columns=["image"])

    # Second dataset
    data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
    transforms = [
        py_vision.Decode(),
        py_vision.RandomResizedCrop(512, (1, 1), (0.5, 0.5)),
        py_vision.ToTensor()
    ]
    transform = mindspore.dataset.transforms.py_transforms.Compose(transforms)
    data2 = data2.map(operations=transform, input_columns=["image"])

    image_c_cropped = []
    image_py_cropped = []
    for item1, item2 in zip(data1.create_dict_iterator(num_epochs=1, output_numpy=True),
                            data2.create_dict_iterator(num_epochs=1, output_numpy=True)):
        c_image = item1["image"]
        py_image = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
        image_c_cropped.append(c_image)
        image_py_cropped.append(py_image)
        mse = diff_mse(c_image, py_image)
        assert mse < 0.02  # rounding error
    if plot:
        visualize_list(image_c_cropped, image_py_cropped, visualize_mode=2)
def test_random_crop_decode_resize_op(plot=False):
    """
    Test RandomCropDecodeResize op
    """
    logger.info("test_random_decode_resize_op")

    # First dataset
    data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
    decode_op = vision.Decode()
    random_crop_decode_resize_op = vision.RandomCropDecodeResize((256, 512), (1, 1), (0.5, 0.5))
    data1 = data1.map(operations=random_crop_decode_resize_op, input_columns=["image"])

    # Second dataset
    data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
    random_crop_resize_op = vision.RandomResizedCrop((256, 512), (1, 1), (0.5, 0.5))
    data2 = data2.map(operations=decode_op, input_columns=["image"])
    data2 = data2.map(operations=random_crop_resize_op, input_columns=["image"])

    num_iter = 0
    for item1, item2 in zip(data1.create_dict_iterator(num_epochs=1, output_numpy=True),
                            data2.create_dict_iterator(num_epochs=1, output_numpy=True)):
        if num_iter > 0:
            break
        image1 = item1["image"]
        image2 = item2["image"]
        mse = diff_mse(image1, image2)
        assert mse == 0
        logger.info("random_crop_decode_resize_op_{}, mse: {}".format(num_iter + 1, mse))
        if plot:
            visualize_image(image1, image2, mse)
        num_iter += 1
Exemple #7
0
def create_dataset(args, shuffle=True, max_dataset_size=float("inf")):
    """Create dataset"""
    dataroot = args.dataroot
    phase = args.phase
    batch_size = args.batch_size
    device_num = args.device_num
    rank = args.rank
    cores = multiprocessing.cpu_count()
    num_parallel_workers = min(8, int(cores / device_num))
    image_size = args.image_size
    mean = [0.5 * 255] * 3
    std = [0.5 * 255] * 3
    if phase == "train":
        dataset = UnalignedDataset(dataroot,
                                   phase,
                                   max_dataset_size=max_dataset_size)
        distributed_sampler = DistributedSampler(len(dataset),
                                                 device_num,
                                                 rank,
                                                 shuffle=shuffle)
        ds = de.GeneratorDataset(dataset,
                                 column_names=["image_A", "image_B"],
                                 sampler=distributed_sampler,
                                 num_parallel_workers=num_parallel_workers)
        trans = [
            C.RandomResizedCrop(image_size,
                                scale=(0.5, 1.0),
                                ratio=(0.75, 1.333)),
            C.RandomHorizontalFlip(prob=0.5),
            C.Normalize(mean=mean, std=std),
            C.HWC2CHW()
        ]
        ds = ds.map(operations=trans,
                    input_columns=["image_A"],
                    num_parallel_workers=num_parallel_workers)
        ds = ds.map(operations=trans,
                    input_columns=["image_B"],
                    num_parallel_workers=num_parallel_workers)
        ds = ds.batch(batch_size, drop_remainder=True)
        ds = ds.repeat(1)
    else:
        datadir = os.path.join(dataroot, args.data_dir)
        dataset = ImageFolderDataset(datadir,
                                     max_dataset_size=max_dataset_size)
        ds = de.GeneratorDataset(dataset,
                                 column_names=["image", "image_name"],
                                 num_parallel_workers=num_parallel_workers)
        trans = [
            C.Resize((image_size, image_size)),
            C.Normalize(mean=mean, std=std),
            C.HWC2CHW()
        ]
        ds = ds.map(operations=trans,
                    input_columns=["image"],
                    num_parallel_workers=num_parallel_workers)
        ds = ds.batch(1, drop_remainder=True)
        ds = ds.repeat(1)
    args.dataset_size = len(dataset)
    return ds
Exemple #8
0
def test_random_crop_and_resize_callable():
    """
    Test RandomCropAndResize op is callable
    """
    logger.info("test_random_crop_and_resize_callable")
    img = np.fromfile("../data/dataset/apple.jpg", dtype=np.uint8)
    logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.shape))

    decode_op = c_vision.Decode()
    img = decode_op(img)

    random_crop_and_resize_op = c_vision.RandomResizedCrop((256, 512), (2, 2),
                                                           (1, 3))
    img = random_crop_and_resize_op(img)
    assert np.shape(img) == (256, 512, 3)
Exemple #9
0
def test_random_crop_and_resize_05_c():
    """
    Test RandomCropAndResize with c_transforms: invalid range of ratio (max<min),
    expected to raise ValueError
    """
    logger.info("test_random_crop_and_resize_05_c")

    # Generate dataset
    data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
    decode_op = c_vision.Decode()
    try:
        random_crop_and_resize_op = c_vision.RandomResizedCrop((256, 512), (1, 1), (1, 0.5))
        # If input range of ratio is not in the order of (min, max), ValueError will be raised.
        data = data.map(operations=decode_op, input_columns=["image"])
        data = data.map(operations=random_crop_and_resize_op, input_columns=["image"])
    except ValueError as e:
        logger.info("Got an exception in DE: {}".format(str(e)))
        assert "ratio should be in (min,max) format. Got (max,min)." in str(e)
Exemple #10
0
def create_dataset(batch_size,
                   train_data_url='',
                   workers=8,
                   distributed=False):
    if not os.path.exists(train_data_url):
        raise ValueError('Path not exists')
    interpolation = str2MsInter(inter_str)

    c_decode_op = C.Decode()
    type_cast_op = C2.TypeCast(mstype.int32)
    random_resize_crop_op = C.RandomResizedCrop(size=(resize_value,
                                                      resize_value),
                                                scale=scale,
                                                ratio=ratio,
                                                interpolation=interpolation)
    random_horizontal_flip_op = C.RandomHorizontalFlip(0.5)

    efficient_rand_augment = RandAugment()

    image_ops = [c_decode_op, random_resize_crop_op, random_horizontal_flip_op]

    rank_id = get_rank() if distributed else 0
    rank_size = get_group_size() if distributed else 1

    dataset_train = ds.ImageFolderDataset(train_data_url,
                                          num_parallel_workers=workers,
                                          shuffle=True,
                                          num_shards=rank_size,
                                          shard_id=rank_id)
    dataset_train = dataset_train.map(input_columns=["image"],
                                      operations=image_ops,
                                      num_parallel_workers=workers)
    dataset_train = dataset_train.map(input_columns=["label"],
                                      operations=type_cast_op,
                                      num_parallel_workers=workers)
    ds_train = dataset_train.batch(batch_size,
                                   per_batch_map=efficient_rand_augment,
                                   input_columns=["image", "label"],
                                   num_parallel_workers=2,
                                   drop_remainder=True)
    ds_train = ds_train.repeat(1)
    return ds_train
def test_random_crop_and_resize_04_c():
    """
    Test RandomCropAndResize with c_tranforms: invalid range of scale (max<min),
    expected to raise ValueError
    """
    logger.info("test_random_crop_and_resize_04_c")

    # Generate dataset
    data = ds.TFRecordDataset(DATA_DIR,
                              SCHEMA_DIR,
                              columns_list=["image"],
                              shuffle=False)
    decode_op = c_vision.Decode()
    try:
        # If input range of scale is not in the order of (min, max), ValueError will be raised.
        random_crop_and_resize_op = c_vision.RandomResizedCrop(
            (256, 512), (1, 0.5), (0.5, 0.5))
        data = data.map(operations=decode_op, input_columns=["image"])
        data = data.map(operations=random_crop_and_resize_op,
                        input_columns=["image"])
    except ValueError as e:
        logger.info("Got an exception in DE: {}".format(str(e)))
        assert "Input is not within the required interval of (0 to 16777216)." in str(
            e)
Exemple #12
0
def create_dataset(args, dataset_mode, repeat_num=1):
    """
    create a train or evaluate cifar10 dataset for SimCLR
    """
    if args.dataset_name != "cifar10":
        raise ValueError("Unsupported dataset.")
    if dataset_mode in ("train_endcoder", "train_classifier"):
        dataset_path = args.train_dataset_path
    else:
        dataset_path = args.eval_dataset_path
    if args.run_distribute and args.device_target == "Ascend":
        data_set = ds.Cifar10Dataset(dataset_path,
                                     num_parallel_workers=8,
                                     shuffle=True,
                                     num_shards=args.device_num,
                                     shard_id=args.device_id)
    else:
        data_set = ds.Cifar10Dataset(dataset_path,
                                     num_parallel_workers=8,
                                     shuffle=True)
    # define map operations
    trans = []
    if dataset_mode == "train_endcoder":
        if args.use_crop:
            trans += [C.Resize(256, interpolation=Inter.BICUBIC)]
            trans += [
                C.RandomResizedCrop(size=(32, 32),
                                    scale=(0.31, 1),
                                    interpolation=Inter.BICUBIC,
                                    max_attempts=100)
            ]
        if args.use_flip:
            trans += [C.RandomHorizontalFlip(prob=0.5)]
        if args.use_color_jitter:
            scale = 0.6
            color_jitter = C.RandomColorAdjust(0.8 * scale, 0.8 * scale,
                                               0.8 * scale, 0.2 * scale)
            trans += [C2.RandomApply([color_jitter], prob=0.8)]
        if args.use_color_gray:
            trans += [
                py_vision.ToPIL(),
                py_vision.RandomGrayscale(prob=0.2), np.array
            ]  # need to convert PIL image to a NumPy array to pass it to C++ operation
        if args.use_blur:
            trans += [C2.RandomApply([gaussian_blur], prob=0.8)]
        if args.use_norm:
            trans += [
                C.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])
            ]
        trans += [C2.TypeCast(mstype.float32), C.HWC2CHW()]
    else:
        trans += [C.Resize(32)]
        trans += [C2.TypeCast(mstype.float32)]
        if args.use_norm:
            trans += [
                C.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])
            ]
        trans += [C.HWC2CHW()]
    type_cast_op = C2.TypeCast(mstype.int32)
    data_set = data_set.map(operations=type_cast_op,
                            input_columns="label",
                            num_parallel_workers=8)
    data_set = data_set.map(operations=copy_column,
                            input_columns=["image", "label"],
                            output_columns=["image1", "image2", "label"],
                            column_order=["image1", "image2", "label"],
                            num_parallel_workers=8)
    data_set = data_set.map(operations=trans,
                            input_columns=["image1"],
                            num_parallel_workers=8)
    data_set = data_set.map(operations=trans,
                            input_columns=["image2"],
                            num_parallel_workers=8)
    # apply batch operations
    data_set = data_set.batch(args.batch_size, drop_remainder=True)
    # apply dataset repeat operation
    data_set = data_set.repeat(repeat_num)
    return data_set