コード例 #1
0
def test_random_grayscale_md5_no_param():
    """
    Test RandomGrayscale with md5 comparison: no parameter given, expect to pass
    """
    logger.info("test_random_grayscale_md5_no_param")
    original_seed = config_get_set_seed(0)
    original_num_parallel_workers = config_get_set_num_parallel_workers(1)

    # Generate dataset
    data = ds.TFRecordDataset(DATA_DIR,
                              SCHEMA_DIR,
                              columns_list=["image"],
                              shuffle=False)
    transforms = [
        py_vision.Decode(),
        py_vision.RandomGrayscale(),
        py_vision.ToTensor()
    ]
    transform = mindspore.dataset.transforms.py_transforms.Compose(transforms)
    data = data.map(operations=transform, input_columns=["image"])

    # Check output images with md5 comparison
    filename = "random_grayscale_02_result.npz"
    save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN)

    # Restore config
    ds.config.set_seed(original_seed)
    ds.config.set_num_parallel_workers(original_num_parallel_workers)
コード例 #2
0
ファイル: datasets.py プロジェクト: Outliers1106/AVA_cifar
    def __init__(self,
                 data_dir,
                 training=True,
                 use_third_trsfm=False,
                 use_auto_augment=False,
                 num_parallel_workers=8,
                 device_num=1,
                 device_id=0):

        if not training:
            trsfm = Compose([
                transforms.ToTensor(),
                transforms.Normalize((0.4914, 0.4822, 0.4465),
                                     (0.2023, 0.1994, 0.2010)),
            ])
        else:
            if not use_third_trsfm:
                trsfm = Compose([
                    transforms.ToPIL(),
                    transforms.RandomResizedCrop(size=32, scale=(0.2, 1.)),
                    transforms.RandomColorAdjust(0.4, 0.4, 0.4, 0.4),
                    transforms.RandomGrayscale(prob=0.2),
                    transforms.RandomHorizontalFlip(),
                    transforms.ToTensor(),
                    transforms.Normalize((0.4914, 0.4822, 0.4465),
                                         (0.2023, 0.1994, 0.2010)),
                ])
            else:
                if use_auto_augment:
                    trsfm = Compose([
                        transforms.ToPIL(),
                        transforms.RandomResizedCrop(size=32, scale=(0.2, 1.)),
                        transforms.RandomHorizontalFlip(),
                        CIFAR10Policy(),
                        transforms.ToTensor(),
                        transforms.Normalize((0.4914, 0.4822, 0.4465),
                                             (0.2023, 0.1994, 0.2010)),
                    ])
                else:
                    rand_augment = RandAugment(n=2, m=10)
                    trsfm = Compose([
                        transforms.ToPIL(),
                        transforms.RandomResizedCrop(size=32, scale=(0.2, 1.)),
                        transforms.RandomHorizontalFlip(),
                        rand_augment,
                        transforms.ToTensor(),
                        transforms.Normalize((0.4914, 0.4822, 0.4465),
                                             (0.2023, 0.1994, 0.2010)),
                    ])

        self.trsfm = trsfm
        self.data_dir = data_dir
        self.num_parallel_workers = num_parallel_workers
        self.device_num = device_num
        self.device_id = device_id
コード例 #3
0
def test_random_grayscale_input_grayscale_images():
    """
    Test RandomGrayscale Op: valid parameter with grayscale images as input, expect to pass
    """
    logger.info("test_random_grayscale_input_grayscale_images")
    original_seed = config_get_set_seed(0)
    original_num_parallel_workers = config_get_set_num_parallel_workers(1)

    # First dataset
    data1 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    transforms1 = [
        py_vision.Decode(),
        py_vision.Grayscale(1),
        # Note: If the input images is grayscale image with 1 channel.
        py_vision.RandomGrayscale(0.5),
        py_vision.ToTensor()
    ]
    transform1 = mindspore.dataset.transforms.py_transforms.Compose(
        transforms1)
    data1 = data1.map(operations=transform1, input_columns=["image"])

    # Second dataset
    data2 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    transforms2 = [py_vision.Decode(), py_vision.ToTensor()]
    transform2 = mindspore.dataset.transforms.py_transforms.Compose(
        transforms2)
    data2 = data2.map(operations=transform2, input_columns=["image"])

    image_gray = []
    image = []
    for item1, item2 in zip(
            data1.create_dict_iterator(num_epochs=1, output_numpy=True),
            data2.create_dict_iterator(num_epochs=1, output_numpy=True)):
        image1 = (item1["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
        image2 = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
        image_gray.append(image1)
        image.append(image2)

        assert len(image1.shape) == 3
        assert image1.shape[2] == 1
        assert len(image2.shape) == 3
        assert image2.shape[2] == 3

    # Restore config
    ds.config.set_seed(original_seed)
    ds.config.set_num_parallel_workers(original_num_parallel_workers)
コード例 #4
0
def test_random_grayscale_valid_prob(plot=False):
    """
    Test RandomGrayscale Op: valid input, expect to pass
    """
    logger.info("test_random_grayscale_valid_prob")

    # First dataset
    data1 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    transforms1 = [
        py_vision.Decode(),
        # Note: prob is 1 so the output should always be grayscale images
        py_vision.RandomGrayscale(1),
        py_vision.ToTensor()
    ]
    transform1 = mindspore.dataset.transforms.py_transforms.Compose(
        transforms1)
    data1 = data1.map(operations=transform1, input_columns=["image"])

    # Second dataset
    data2 = ds.TFRecordDataset(DATA_DIR,
                               SCHEMA_DIR,
                               columns_list=["image"],
                               shuffle=False)
    transforms2 = [py_vision.Decode(), py_vision.ToTensor()]
    transform2 = mindspore.dataset.transforms.py_transforms.Compose(
        transforms2)
    data2 = data2.map(operations=transform2, input_columns=["image"])

    image_gray = []
    image = []
    for item1, item2 in zip(
            data1.create_dict_iterator(num_epochs=1, output_numpy=True),
            data2.create_dict_iterator(num_epochs=1, output_numpy=True)):
        image1 = (item1["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
        image2 = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
        image_gray.append(image1)
        image.append(image2)
    if plot:
        visualize_list(image, image_gray)
コード例 #5
0
def test_random_grayscale_invalid_param():
    """
    Test RandomGrayscale: invalid parameter given, expect to raise error
    """
    logger.info("test_random_grayscale_invalid_param")

    # Generate dataset
    data = ds.TFRecordDataset(DATA_DIR,
                              SCHEMA_DIR,
                              columns_list=["image"],
                              shuffle=False)
    try:
        transforms = [
            py_vision.Decode(),
            py_vision.RandomGrayscale(1.5),
            py_vision.ToTensor()
        ]
        transform = mindspore.dataset.transforms.py_transforms.Compose(
            transforms)
        data = data.map(operations=transform, input_columns=["image"])
    except ValueError as e:
        logger.info("Got an exception in DE: {}".format(str(e)))
        assert "Input prob is not within the required interval of (0.0 to 1.0)." in str(
            e)
コード例 #6
0
def create_dataset(args, dataset_mode, repeat_num=1):
    """
    create a train or evaluate cifar10 dataset for SimCLR
    """
    if args.dataset_name != "cifar10":
        raise ValueError("Unsupported dataset.")
    if dataset_mode in ("train_endcoder", "train_classifier"):
        dataset_path = args.train_dataset_path
    else:
        dataset_path = args.eval_dataset_path
    if args.run_distribute and args.device_target == "Ascend":
        data_set = ds.Cifar10Dataset(dataset_path,
                                     num_parallel_workers=8,
                                     shuffle=True,
                                     num_shards=args.device_num,
                                     shard_id=args.device_id)
    else:
        data_set = ds.Cifar10Dataset(dataset_path,
                                     num_parallel_workers=8,
                                     shuffle=True)
    # define map operations
    trans = []
    if dataset_mode == "train_endcoder":
        if args.use_crop:
            trans += [C.Resize(256, interpolation=Inter.BICUBIC)]
            trans += [
                C.RandomResizedCrop(size=(32, 32),
                                    scale=(0.31, 1),
                                    interpolation=Inter.BICUBIC,
                                    max_attempts=100)
            ]
        if args.use_flip:
            trans += [C.RandomHorizontalFlip(prob=0.5)]
        if args.use_color_jitter:
            scale = 0.6
            color_jitter = C.RandomColorAdjust(0.8 * scale, 0.8 * scale,
                                               0.8 * scale, 0.2 * scale)
            trans += [C2.RandomApply([color_jitter], prob=0.8)]
        if args.use_color_gray:
            trans += [
                py_vision.ToPIL(),
                py_vision.RandomGrayscale(prob=0.2), np.array
            ]  # need to convert PIL image to a NumPy array to pass it to C++ operation
        if args.use_blur:
            trans += [C2.RandomApply([gaussian_blur], prob=0.8)]
        if args.use_norm:
            trans += [
                C.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])
            ]
        trans += [C2.TypeCast(mstype.float32), C.HWC2CHW()]
    else:
        trans += [C.Resize(32)]
        trans += [C2.TypeCast(mstype.float32)]
        if args.use_norm:
            trans += [
                C.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])
            ]
        trans += [C.HWC2CHW()]
    type_cast_op = C2.TypeCast(mstype.int32)
    data_set = data_set.map(operations=type_cast_op,
                            input_columns="label",
                            num_parallel_workers=8)
    data_set = data_set.map(operations=copy_column,
                            input_columns=["image", "label"],
                            output_columns=["image1", "image2", "label"],
                            column_order=["image1", "image2", "label"],
                            num_parallel_workers=8)
    data_set = data_set.map(operations=trans,
                            input_columns=["image1"],
                            num_parallel_workers=8)
    data_set = data_set.map(operations=trans,
                            input_columns=["image2"],
                            num_parallel_workers=8)
    # apply batch operations
    data_set = data_set.batch(args.batch_size, drop_remainder=True)
    # apply dataset repeat operation
    data_set = data_set.repeat(repeat_num)
    return data_set