コード例 #1
0
def test_auto_contrast_invalid_cutoff_param_py():
    """
    Test AutoContrast python Op with invalid cutoff parameter
    """
    logger.info("Test AutoContrast python Op with invalid cutoff parameter")
    try:
        ds = de.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
        ds = ds.map(operations=[
            mindspore.dataset.transforms.py_transforms.Compose([
                F.Decode(),
                F.Resize((224, 224)),
                F.AutoContrast(cutoff=-10.0),
                F.ToTensor()
            ])
        ],
                    input_columns=["image"])
    except ValueError as error:
        logger.info("Got an exception in DE: {}".format(str(error)))
        assert "Input cutoff is not within the required interval of (0 to 100)." in str(
            error)
    try:
        ds = de.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
        ds = ds.map(operations=[
            mindspore.dataset.transforms.py_transforms.Compose([
                F.Decode(),
                F.Resize((224, 224)),
                F.AutoContrast(cutoff=120.0),
                F.ToTensor()
            ])
        ],
                    input_columns=["image"])
    except ValueError as error:
        logger.info("Got an exception in DE: {}".format(str(error)))
        assert "Input cutoff is not within the required interval of (0 to 100)." in str(
            error)
コード例 #2
0
def test_auto_contrast_invalid_cutoff_param_c():
    """
    Test AutoContrast C Op with invalid cutoff parameter
    """
    logger.info("Test AutoContrast C Op with invalid cutoff parameter")
    try:
        ds = de.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
        ds = ds.map(operations=[
            C.Decode(),
            C.Resize((224, 224)), lambda img: np.array(img[:, :, 0])
        ],
                    input_columns=["image"])
        # invalid ignore
        ds = ds.map(operations=C.AutoContrast(cutoff=-10.0),
                    input_columns="image")
    except ValueError as error:
        logger.info("Got an exception in DE: {}".format(str(error)))
        assert "Input cutoff is not within the required interval of (0 to 100)." in str(
            error)
    try:
        ds = de.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
        ds = ds.map(operations=[
            C.Decode(),
            C.Resize((224, 224)), lambda img: np.array(img[:, :, 0])
        ],
                    input_columns=["image"])
        # invalid ignore
        ds = ds.map(operations=C.AutoContrast(cutoff=120.0),
                    input_columns="image")
    except ValueError as error:
        logger.info("Got an exception in DE: {}".format(str(error)))
        assert "Input cutoff is not within the required interval of (0 to 100)." in str(
            error)
コード例 #3
0
def test_auto_contrast_invalid_ignore_param_py():
    """
    Test AutoContrast python Op with invalid ignore parameter
    """
    logger.info("Test AutoContrast python Op with invalid ignore parameter")
    try:
        ds = de.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
        ds = ds.map(operations=[
            mindspore.dataset.transforms.py_transforms.Compose([
                F.Decode(),
                F.Resize((224, 224)),
                F.AutoContrast(ignore=255.5),
                F.ToTensor()
            ])
        ],
                    input_columns=["image"])
    except TypeError as error:
        logger.info("Got an exception in DE: {}".format(str(error)))
        assert "Argument ignore with value 255.5 is not of type" in str(error)
    try:
        ds = de.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
        ds = ds.map(operations=[
            mindspore.dataset.transforms.py_transforms.Compose([
                F.Decode(),
                F.Resize((224, 224)),
                F.AutoContrast(ignore=(10, 100)),
                F.ToTensor()
            ])
        ],
                    input_columns=["image"])
    except TypeError as error:
        logger.info("Got an exception in DE: {}".format(str(error)))
        assert "Argument ignore with value (10,100) is not of type" in str(
            error)
コード例 #4
0
def test_auto_contrast_invalid_ignore_param_c():
    """
    Test AutoContrast C Op with invalid ignore parameter
    """
    logger.info("Test AutoContrast C Op with invalid ignore parameter")
    try:
        ds = de.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
        ds = ds.map(operations=[
            C.Decode(),
            C.Resize((224, 224)), lambda img: np.array(img[:, :, 0])
        ],
                    input_columns=["image"])
        # invalid ignore
        ds = ds.map(operations=C.AutoContrast(ignore=255.5),
                    input_columns="image")
    except TypeError as error:
        logger.info("Got an exception in DE: {}".format(str(error)))
        assert "Argument ignore with value 255.5 is not of type" in str(error)
    try:
        ds = de.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
        ds = ds.map(operations=[
            C.Decode(),
            C.Resize((224, 224)), lambda img: np.array(img[:, :, 0])
        ],
                    input_columns=["image"])
        # invalid ignore
        ds = ds.map(operations=C.AutoContrast(ignore=(10, 100)),
                    input_columns="image")
    except TypeError as error:
        logger.info("Got an exception in DE: {}".format(str(error)))
        assert "Argument ignore with value (10,100) is not of type" in str(
            error)
コード例 #5
0
def test_random_color_py(degrees=(0.1, 1.9), plot=False):
    """
    Test Python RandomColor
    """
    logger.info("Test RandomColor")

    # Original Images
    data = de.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)

    transforms_original = mindspore.dataset.transforms.py_transforms.Compose(
        [F.Decode(), F.Resize((224, 224)),
         F.ToTensor()])

    ds_original = data.map(operations=transforms_original,
                           input_columns="image")

    ds_original = ds_original.batch(512)

    for idx, (image, _) in enumerate(ds_original):
        if idx == 0:
            images_original = np.transpose(image.asnumpy(), (0, 2, 3, 1))
        else:
            images_original = np.append(images_original,
                                        np.transpose(image.asnumpy(),
                                                     (0, 2, 3, 1)),
                                        axis=0)

            # Random Color Adjusted Images
    data = de.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)

    transforms_random_color = mindspore.dataset.transforms.py_transforms.Compose(
        [
            F.Decode(),
            F.Resize((224, 224)),
            F.RandomColor(degrees=degrees),
            F.ToTensor()
        ])

    ds_random_color = data.map(operations=transforms_random_color,
                               input_columns="image")

    ds_random_color = ds_random_color.batch(512)

    for idx, (image, _) in enumerate(ds_random_color):
        if idx == 0:
            images_random_color = np.transpose(image.asnumpy(), (0, 2, 3, 1))
        else:
            images_random_color = np.append(images_random_color,
                                            np.transpose(
                                                image.asnumpy(), (0, 2, 3, 1)),
                                            axis=0)

    num_samples = images_original.shape[0]
    mse = np.zeros(num_samples)
    for i in range(num_samples):
        mse[i] = diff_mse(images_random_color[i], images_original[i])
    logger.info("MSE= {}".format(str(np.mean(mse))))

    if plot:
        visualize_list(images_original, images_random_color)
コード例 #6
0
ファイル: dataset.py プロジェクト: zhangjinrong/mindspore
def create_dataset_py(dataset_path, do_train, repeat_num=1, batch_size=32, target="Ascend"):
    """
    create a train or eval dataset

    Args:
        dataset_path(string): the path of dataset.
        do_train(bool): whether dataset is used for train or eval.
        repeat_num(int): the repeat times of dataset. Default: 1
        batch_size(int): the batch size of dataset. Default: 32
        target(str): the device target. Default: Ascend

    Returns:
        dataset
    """
    if target == "Ascend":
        device_num = int(os.getenv("RANK_SIZE"))
        rank_id = int(os.getenv("RANK_ID"))
    else:
        init()
        rank_id = get_rank()
        device_num = get_group_size()

    if do_train:
        if device_num == 1:
            ds = de.ImageFolderDataset(dataset_path, num_parallel_workers=8, shuffle=True)
        else:
            ds = de.ImageFolderDataset(dataset_path, num_parallel_workers=8, shuffle=True,
                                       num_shards=device_num, shard_id=rank_id)
    else:
        ds = de.ImageFolderDataset(dataset_path, num_parallel_workers=8, shuffle=False)

    image_size = 224

    # define map operations
    decode_op = P.Decode()
    resize_crop_op = P.RandomResizedCrop(image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333))
    horizontal_flip_op = P.RandomHorizontalFlip(prob=0.5)

    resize_op = P.Resize(256)
    center_crop = P.CenterCrop(image_size)
    to_tensor = P.ToTensor()
    normalize_op = P.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])

    # define map operations
    if do_train:
        trans = [decode_op, resize_crop_op, horizontal_flip_op, to_tensor, normalize_op]
    else:
        trans = [decode_op, resize_op, center_crop, to_tensor, normalize_op]

    compose = P2.Compose(trans)
    ds = ds.map(operations=compose, input_columns="image", num_parallel_workers=8, python_multiprocessing=True)

    # apply batch operations
    ds = ds.batch(batch_size, drop_remainder=True)

    # apply dataset repeat operation
    ds = ds.repeat(repeat_num)

    return ds
コード例 #7
0
ファイル: test_invert.py プロジェクト: stjordanis/mindspore
def test_invert_py(plot=False):
    """
    Test Invert python op
    """
    logger.info("Test Invert Python op")

    # Original Images
    ds = de.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)

    transforms_original = mindspore.dataset.transforms.py_transforms.Compose(
        [F.Decode(), F.Resize((224, 224)),
         F.ToTensor()])

    ds_original = ds.map(operations=transforms_original, input_columns="image")

    ds_original = ds_original.batch(512)

    for idx, (image, _) in enumerate(ds_original):
        if idx == 0:
            images_original = np.transpose(image.asnumpy(), (0, 2, 3, 1))
        else:
            images_original = np.append(images_original,
                                        np.transpose(image.asnumpy(),
                                                     (0, 2, 3, 1)),
                                        axis=0)

    # Color Inverted Images
    ds = de.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)

    transforms_invert = mindspore.dataset.transforms.py_transforms.Compose(
        [F.Decode(),
         F.Resize((224, 224)),
         F.Invert(),
         F.ToTensor()])

    ds_invert = ds.map(operations=transforms_invert, input_columns="image")

    ds_invert = ds_invert.batch(512)

    for idx, (image, _) in enumerate(ds_invert):
        if idx == 0:
            images_invert = np.transpose(image.asnumpy(), (0, 2, 3, 1))
        else:
            images_invert = np.append(images_invert,
                                      np.transpose(image.asnumpy(),
                                                   (0, 2, 3, 1)),
                                      axis=0)

    num_samples = images_original.shape[0]
    mse = np.zeros(num_samples)
    for i in range(num_samples):
        mse[i] = np.mean((images_invert[i] - images_original[i])**2)
    logger.info("MSE= {}".format(str(np.mean(mse))))

    if plot:
        visualize_list(images_original, images_invert)
コード例 #8
0
def test_random_sharpness_c(degrees=(1.6, 1.6), plot=False):
    """
    Test RandomSharpness cpp op
    """
    print(degrees)
    logger.info("Test RandomSharpness cpp op")

    # Original Images
    data = de.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)

    transforms_original = [C.Decode(), C.Resize((224, 224))]

    ds_original = data.map(operations=transforms_original,
                           input_columns="image")

    ds_original = ds_original.batch(512)

    for idx, (image, _) in enumerate(
            ds_original.create_tuple_iterator(output_numpy=True)):
        if idx == 0:
            images_original = image
        else:
            images_original = np.append(images_original, image, axis=0)

            # Random Sharpness Adjusted Images
    data = de.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)

    c_op = C.RandomSharpness()
    if degrees is not None:
        c_op = C.RandomSharpness(degrees)

    transforms_random_sharpness = [C.Decode(), C.Resize((224, 224)), c_op]

    ds_random_sharpness = data.map(operations=transforms_random_sharpness,
                                   input_columns="image")

    ds_random_sharpness = ds_random_sharpness.batch(512)

    for idx, (image, _) in enumerate(
            ds_random_sharpness.create_tuple_iterator(output_numpy=True)):
        if idx == 0:
            images_random_sharpness = image
        else:
            images_random_sharpness = np.append(images_random_sharpness,
                                                image,
                                                axis=0)

    num_samples = images_original.shape[0]
    mse = np.zeros(num_samples)
    for i in range(num_samples):
        mse[i] = diff_mse(images_random_sharpness[i], images_original[i])

    logger.info("MSE= {}".format(str(np.mean(mse))))

    if plot:
        visualize_list(images_original, images_random_sharpness)
コード例 #9
0
def create_dataset(dataset_path, config, do_train, repeat_num=1):
    """
    create a train or eval dataset

    Args:
        dataset_path(string): the path of dataset.
        config(dict): config of dataset.
        do_train(bool): whether dataset is used for train or eval.
        repeat_num(int): the repeat times of dataset. Default: 1.

    Returns:
        dataset
    """
    rank = config.rank
    group_size = config.group_size
    if group_size == 1:
        ds = de.ImageFolderDataset(dataset_path,
                                   num_parallel_workers=config.work_nums,
                                   shuffle=True)
    else:
        ds = de.ImageFolderDataset(dataset_path,
                                   num_parallel_workers=config.work_nums,
                                   shuffle=True,
                                   num_shards=group_size,
                                   shard_id=rank)
    # define map operations
    if do_train:
        trans = [
            C.RandomCropDecodeResize(config.image_size),
            C.RandomHorizontalFlip(prob=0.5),
            C.RandomColorAdjust(brightness=0.4, saturation=0.5)  # fast mode
        ]
    else:
        trans = [
            C.Decode(),
            C.Resize(int(config.image_size / 0.875)),
            C.CenterCrop(config.image_size)
        ]
    trans += [
        C.Rescale(1.0 / 255.0, 0.0),
        C.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
        C.HWC2CHW()
    ]
    type_cast_op = C2.TypeCast(mstype.int32)
    ds = ds.map(operations=trans,
                input_columns="image",
                num_parallel_workers=config.work_nums)
    ds = ds.map(operations=type_cast_op,
                input_columns="label",
                num_parallel_workers=config.work_nums)
    # apply batch operations
    ds = ds.batch(config.batch_size, drop_remainder=True)
    # apply dataset repeat operation
    ds = ds.repeat(repeat_num)
    return ds
コード例 #10
0
ファイル: dataset.py プロジェクト: zhangjinrong/mindspore
def create_dataset(dataset_path, do_train, repeat_num=1, batch_size=32):
    """
    Create a train or eval dataset.

    Args:
        dataset_path(string): the path of dataset.
        do_train(bool): whether dataset is used for train or eval.
        repeat_num(int): the repeat times of dataset. Default: 1
        batch_size(int): the batch size of dataset. Default: 32

    Returns:
        dataset
    """

    device_num = int(os.getenv("RANK_SIZE"))
    rank_id = int(os.getenv("RANK_ID"))
    if device_num == 1:
        ds = de.ImageFolderDataset(dataset_path, num_parallel_workers=8, shuffle=True)
    else:
        ds = de.ImageFolderDataset(dataset_path, num_parallel_workers=8, shuffle=True,
                                   num_shards=device_num, shard_id=rank_id)

    image_size = 224
    mean = [0.485 * 255, 0.456 * 255, 0.406 * 255]
    std = [0.229 * 255, 0.224 * 255, 0.225 * 255]

    # define map operations
    if do_train:
        trans = [
            C.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
            C.RandomHorizontalFlip(prob=0.5),
            C.Normalize(mean=mean, std=std),
            C.HWC2CHW()
        ]
    else:
        trans = [
            C.Decode(),
            C.Resize((256, 256)),
            C.CenterCrop(image_size),
            C.Normalize(mean=mean, std=std),
            C.HWC2CHW()
        ]

    type_cast_op = C2.TypeCast(mstype.int32)

    ds = ds.map(operations=trans, input_columns="image", num_parallel_workers=8)
    ds = ds.map(operations=type_cast_op, input_columns="label", num_parallel_workers=8)

    # apply batch operations
    ds = ds.batch(batch_size, drop_remainder=True)

    # apply dataset repeat operation
    ds = ds.repeat(repeat_num)
    return ds
コード例 #11
0
ファイル: test_invert.py プロジェクト: stjordanis/mindspore
def test_invert_py_c(plot=False):
    """
    Test Invert Cpp op and python op
    """
    logger.info("Test Invert cpp and python op")

    # Invert Images in cpp
    ds = de.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
    ds = ds.map(operations=[C.Decode(), C.Resize((224, 224))],
                input_columns=["image"])

    ds_c_invert = ds.map(operations=C.Invert(), input_columns="image")

    ds_c_invert = ds_c_invert.batch(512)

    for idx, (image, _) in enumerate(ds_c_invert):
        if idx == 0:
            images_c_invert = image.asnumpy()
        else:
            images_c_invert = np.append(images_c_invert,
                                        image.asnumpy(),
                                        axis=0)

    # invert images in python
    ds = de.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
    ds = ds.map(operations=[C.Decode(), C.Resize((224, 224))],
                input_columns=["image"])

    transforms_p_invert = mindspore.dataset.transforms.py_transforms.Compose(
        [lambda img: img.astype(np.uint8),
         F.ToPIL(),
         F.Invert(), np.array])

    ds_p_invert = ds.map(operations=transforms_p_invert, input_columns="image")

    ds_p_invert = ds_p_invert.batch(512)

    for idx, (image, _) in enumerate(ds_p_invert):
        if idx == 0:
            images_p_invert = image.asnumpy()
        else:
            images_p_invert = np.append(images_p_invert,
                                        image.asnumpy(),
                                        axis=0)

    num_samples = images_c_invert.shape[0]
    mse = np.zeros(num_samples)
    for i in range(num_samples):
        mse[i] = diff_mse(images_p_invert[i], images_c_invert[i])
    logger.info("MSE= {}".format(str(np.mean(mse))))

    if plot:
        visualize_list(images_c_invert, images_p_invert, visualize_mode=2)
コード例 #12
0
ファイル: dataset.py プロジェクト: peng-zhihui/mindspore
def create_dataset(dataset_path,
                   do_train,
                   batch_size=16,
                   device_num=1,
                   rank=0):
    """
    create a train or eval dataset

    Args:
        dataset_path(string): the path of dataset.
        do_train(bool): whether dataset is used for train or eval.
        batch_size(int): the batch size of dataset. Default: 16.
        device_num (int): Number of shards that the dataset should be divided into (default=1).
        rank (int): The shard ID within num_shards (default=0).

    Returns:
        dataset
    """
    if device_num == 1:
        ds = de.ImageFolderDataset(dataset_path,
                                   num_parallel_workers=8,
                                   shuffle=True)
    else:
        ds = de.ImageFolderDataset(dataset_path,
                                   num_parallel_workers=8,
                                   shuffle=True,
                                   num_shards=device_num,
                                   shard_id=rank)
    # define map operations
    if do_train:
        trans = [
            C.RandomCropDecodeResize(299),
            C.RandomHorizontalFlip(prob=0.5),
            C.RandomColorAdjust(brightness=0.4, contrast=0.4, saturation=0.4)
        ]
    else:
        trans = [C.Decode(), C.Resize(320), C.CenterCrop(299)]
    trans += [
        C.Normalize(mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5]),
        C.HWC2CHW(),
        C2.TypeCast(mstype.float32)
    ]

    type_cast_op = C2.TypeCast(mstype.int32)
    ds = ds.map(input_columns="image",
                operations=trans,
                num_parallel_workers=8)
    ds = ds.map(input_columns="label",
                operations=type_cast_op,
                num_parallel_workers=8)
    # apply batch operations
    ds = ds.batch(batch_size, drop_remainder=True)
    return ds
コード例 #13
0
def create_dataset4(dataset_path, do_train, repeat_num=1, batch_size=32, target="Ascend"):
    """
    create a train or eval imagenet2012 dataset for se-resnet50

    Args:
        dataset_path(string): the path of dataset.
        do_train(bool): whether dataset is used for train or eval.
        repeat_num(int): the repeat times of dataset. Default: 1
        batch_size(int): the batch size of dataset. Default: 32
        target(str): the device target. Default: Ascend

    Returns:
        dataset
    """
    if target == "Ascend":
        device_num, rank_id = _get_rank_info()
    if device_num == 1:
        ds = de.ImageFolderDataset(dataset_path, num_parallel_workers=12, shuffle=True)
    else:
        ds = de.ImageFolderDataset(dataset_path, num_parallel_workers=12, shuffle=True,
                                   num_shards=device_num, shard_id=rank_id)
    image_size = 224
    mean = [123.68, 116.78, 103.94]
    std = [1.0, 1.0, 1.0]

    # define map operations
    if do_train:
        trans = [
            C.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
            C.RandomHorizontalFlip(prob=0.5),
            C.Normalize(mean=mean, std=std),
            C.HWC2CHW()
        ]
    else:
        trans = [
            C.Decode(),
            C.Resize(292),
            C.CenterCrop(256),
            C.Normalize(mean=mean, std=std),
            C.HWC2CHW()
        ]

    type_cast_op = C2.TypeCast(mstype.int32)
    ds = ds.map(operations=trans, input_columns="image", num_parallel_workers=12)
    ds = ds.map(operations=type_cast_op, input_columns="label", num_parallel_workers=12)

    # apply batch operations
    ds = ds.batch(batch_size, drop_remainder=True)

    # apply dataset repeat operation
    ds = ds.repeat(repeat_num)

    return ds
コード例 #14
0
ファイル: dataset.py プロジェクト: zhangjinrong/mindspore
def create_dataset(dataset_path, do_train, rank, group_size, repeat_num=1):
    """
    create a train or eval dataset

    Args:
        dataset_path(string): the path of dataset.
        do_train(bool): whether dataset is used for train or eval.
        rank (int): The shard ID within num_shards (default=None).
        group_size (int): Number of shards that the dataset should be divided into (default=None).
        repeat_num(int): the repeat times of dataset. Default: 1.

    Returns:
        dataset
    """
    if group_size == 1:
        ds = de.ImageFolderDataset(dataset_path,
                                   num_parallel_workers=cfg.work_nums,
                                   shuffle=True)
    else:
        ds = de.ImageFolderDataset(dataset_path,
                                   num_parallel_workers=cfg.work_nums,
                                   shuffle=True,
                                   num_shards=group_size,
                                   shard_id=rank)
    # define map operations
    if do_train:
        trans = [
            C.RandomCropDecodeResize(299,
                                     scale=(0.08, 1.0),
                                     ratio=(0.75, 1.333)),
            C.RandomHorizontalFlip(prob=0.5),
            C.RandomColorAdjust(brightness=0.4, contrast=0.4, saturation=0.4)
        ]
    else:
        trans = [C.Decode(), C.Resize(299), C.CenterCrop(299)]
    trans += [
        C.Rescale(1.0 / 255.0, 0.0),
        C.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
        C.HWC2CHW()
    ]
    type_cast_op = C2.TypeCast(mstype.int32)
    ds = ds.map(operations=trans,
                input_columns="image",
                num_parallel_workers=cfg.work_nums)
    ds = ds.map(operations=type_cast_op,
                input_columns="label",
                num_parallel_workers=cfg.work_nums)
    # apply batch operations
    ds = ds.batch(cfg.batch_size, drop_remainder=True)
    # apply dataset repeat operation
    ds = ds.repeat(repeat_num)
    return ds
コード例 #15
0
def create_dataset(dataset_path,
                   do_train,
                   repeat_num=1,
                   batch_size=32,
                   target="GPU",
                   dtype="fp16",
                   device_num=1):
    if device_num == 1:
        ds = de.ImageFolderDataset(dataset_path,
                                   num_parallel_workers=4,
                                   shuffle=True)
    else:
        ds = de.ImageFolderDataset(dataset_path,
                                   num_parallel_workers=4,
                                   shuffle=True,
                                   num_shards=device_num,
                                   shard_id=get_rank())
    image_size = 224
    mean = [0.485 * 255, 0.456 * 255, 0.406 * 255]
    std = [0.229 * 255, 0.224 * 255, 0.225 * 255]

    # define map operations
    normalize_op = C.Normalize(mean=mean, std=std)
    if dtype == "fp16":
        normalize_op = C.NormalizePad(mean=mean, std=std, dtype="float16")
    if do_train:
        trans = [
            C.RandomCropDecodeResize(image_size,
                                     scale=(0.08, 1.0),
                                     ratio=(0.75, 1.333)),
            C.RandomHorizontalFlip(prob=0.5),
            normalize_op,
        ]
    else:
        trans = [
            C.Decode(),
            C.Resize(256),
            C.CenterCrop(image_size),
            normalize_op,
        ]
    if dtype == "fp32":
        trans.append(C.HWC2CHW())
    ds = ds.map(operations=trans,
                input_columns="image",
                num_parallel_workers=8)
    # apply batch operations
    ds = ds.batch(batch_size, drop_remainder=True)
    # apply dataset repeat operation
    if repeat_num > 1:
        ds = ds.repeat(repeat_num)

    return ds
コード例 #16
0
def test_equalize_c(plot=False):
    """
    Test Equalize Cpp op
    """
    logger.info("Test Equalize cpp op")

    # Original Images
    ds = de.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)

    transforms_original = [C.Decode(), C.Resize(size=[224, 224])]

    ds_original = ds.map(operations=transforms_original, input_columns="image")

    ds_original = ds_original.batch(512)

    for idx, (image, _) in enumerate(ds_original):
        if idx == 0:
            images_original = image.asnumpy()
        else:
            images_original = np.append(images_original,
                                        image.asnumpy(),
                                        axis=0)

    # Equalize Images
    ds = de.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)

    transform_equalize = [C.Decode(), C.Resize(size=[224, 224]),
                          C.Equalize()]

    ds_equalize = ds.map(operations=transform_equalize, input_columns="image")

    ds_equalize = ds_equalize.batch(512)

    for idx, (image, _) in enumerate(ds_equalize):
        if idx == 0:
            images_equalize = image.asnumpy()
        else:
            images_equalize = np.append(images_equalize,
                                        image.asnumpy(),
                                        axis=0)
    if plot:
        visualize_list(images_original, images_equalize)

    num_samples = images_original.shape[0]
    mse = np.zeros(num_samples)
    for i in range(num_samples):
        mse[i] = diff_mse(images_equalize[i], images_original[i])
    logger.info("MSE= {}".format(str(np.mean(mse))))
コード例 #17
0
def test_random_sharpness_invalid_params():
    """
    Test RandomSharpness with invalid input parameters.
    """
    logger.info("Test RandomSharpness with invalid input parameters.")
    try:
        data = de.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
        data = data.map(operations=[
            C.Decode(),
            C.Resize((224, 224)),
            C.RandomSharpness(10)
        ],
                        input_columns=["image"])
    except TypeError as error:
        logger.info("Got an exception in DE: {}".format(str(error)))
        assert "tuple" in str(error)

    try:
        data = de.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
        data = data.map(operations=[
            C.Decode(),
            C.Resize((224, 224)),
            C.RandomSharpness((-10, 10))
        ],
                        input_columns=["image"])
    except ValueError as error:
        logger.info("Got an exception in DE: {}".format(str(error)))
        assert "interval" in str(error)

    try:
        data = de.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
        data = data.map(operations=[
            C.Decode(),
            C.Resize((224, 224)),
            C.RandomSharpness((10, 5))
        ],
                        input_columns=["image"])
    except ValueError as error:
        logger.info("Got an exception in DE: {}".format(str(error)))
        assert "(min,max)" in str(error)
コード例 #18
0
def create_dataset(data_path, is_train=True, batch_size=32):
    # import
    import mindspore.common.dtype as mstype
    import mindspore.dataset.engine as de
    import mindspore.dataset.transforms.c_transforms as C2
    import mindspore.dataset.vision.c_transforms as C
    from mindspore.common import set_seed

    set_seed(1)

    # shard
    num_shards = shard_id = None
    rand_size = os.getenv("RANK_SIZE")
    rand_id = os.getenv("RANK_ID")
    if rand_size is not None and rand_id is not None:
        num_shards = int(rand_size)
        shard_id = int(rand_id)

    # define dataset
    data_path = os.path.join(data_path, "train" if is_train else "val")
    ds = de.ImageFolderDataset(data_path, shuffle=True, num_parallel_workers=8,
                               num_shards=num_shards, shard_id=shard_id, num_samples=None)

    # define ops
    comps_ops = list()

    # train or val
    if is_train:
        comps_ops.append(C.RandomCropDecodeResize(224, scale=(0.08, 1.0), ratio=(0.75, 1.333)))
        comps_ops.append(C.RandomHorizontalFlip(prob=0.5))
        comps_ops.append(C.RandomColorAdjust(brightness=0.4, contrast=0.4, saturation=0.4))
    else:
        comps_ops.append(C.Decode())
        comps_ops.append(C.Resize(224))
        comps_ops.append(C.CenterCrop(224))

    comps_ops.append(C.Rescale(1 / 255.0, 0.))
    comps_ops.append(C.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]))
    comps_ops.append(C.HWC2CHW())

    # map ops
    ds = ds.map(input_columns=["image"], operations=comps_ops, num_parallel_workers=8)
    ds = ds.map(input_columns=["label"], operations=C2.TypeCast(mstype.int32), num_parallel_workers=8)

    # batch & repeat
    ds = ds.batch(batch_size=batch_size, drop_remainder=is_train)
    ds = ds.repeat(count=1)

    return ds
コード例 #19
0
ファイル: test_invert.py プロジェクト: stjordanis/mindspore
def test_invert_md5_py():
    """
    Test Invert python op with md5 check
    """
    logger.info("Test Invert python op with md5 check")

    # Generate dataset
    ds = de.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)

    transforms_invert = mindspore.dataset.transforms.py_transforms.Compose(
        [F.Decode(), F.Invert(), F.ToTensor()])

    data = ds.map(operations=transforms_invert, input_columns="image")
    # Compare with expected md5 from images
    filename = "invert_01_result_py.npz"
    save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN)
コード例 #20
0
def test_equalize_md5_py():
    """
    Test Equalize py op with md5 check
    """
    logger.info("Test Equalize")

    # First dataset
    data1 = de.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
    transforms = mindspore.dataset.transforms.py_transforms.Compose([F.Decode(),
                                                                     F.Equalize(),
                                                                     F.ToTensor()])

    data1 = data1.map(operations=transforms, input_columns="image")
    # Compare with expected md5 from images
    filename = "equalize_01_result.npz"
    save_and_check_md5(data1, filename, generate_golden=GENERATE_GOLDEN)
コード例 #21
0
def test_equalize_one_channel():
    """
     Test Equalize cpp op with one channel image
     """
    logger.info("Test Equalize C Op With One Channel Images")

    c_op = C.Equalize()

    try:
        ds = de.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
        ds = ds.map(operations=[C.Decode(), C.Resize((224, 224)),
                                lambda img: np.array(img[:, :, 0])], input_columns=["image"])

        ds.map(operations=c_op, input_columns="image")

    except RuntimeError as e:
        logger.info("Got an exception in DE: {}".format(str(e)))
        assert "The shape" in str(e)
コード例 #22
0
def test_equalize_md5_c():
    """
    Test Equalize cpp op with md5 check
    """
    logger.info("Test Equalize cpp op with md5 check")

    # Generate dataset
    ds = de.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)

    transforms_equalize = [C.Decode(),
                           C.Resize(size=[224, 224]),
                           C.Equalize(),
                           F.ToTensor()]

    data = ds.map(operations=transforms_equalize, input_columns="image")
    # Compare with expected md5 from images
    filename = "equalize_01_result_c.npz"
    save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN)
コード例 #23
0
def create_dataset(dataset_path,
                   do_train,
                   repeat_num=1,
                   batch_size=32,
                   target="GPU"):
    ds = de.ImageFolderDataset(dataset_path,
                               num_parallel_workers=4,
                               shuffle=True)

    image_size = 224
    mean = [0.485 * 255, 0.456 * 255, 0.406 * 255]
    std = [0.229 * 255, 0.224 * 255, 0.225 * 255]

    # define map operations
    if do_train:
        trans = [
            C.RandomCropDecodeResize(image_size,
                                     scale=(0.08, 1.0),
                                     ratio=(0.75, 1.333)),
            C.RandomHorizontalFlip(prob=0.5),
            C.Normalize(mean=mean, std=std),
        ]
    else:
        trans = [
            C.Decode(),
            C.Resize(256),
            C.CenterCrop(image_size),
            C.Normalize(mean=mean, std=std),
        ]

    ds = ds.map(operations=trans,
                input_columns="image",
                num_parallel_workers=4)
    ds = ds.map(operations=pad, input_columns="image", num_parallel_workers=4)
    # apply batch operations
    ds = ds.batch(batch_size, drop_remainder=True)
    # apply dataset repeat operation
    if repeat_num > 1:
        ds = ds.repeat(repeat_num)

    return ds
コード例 #24
0
def test_random_sharpness_c_md5():
    """
    Test RandomSharpness cpp op with md5 comparison
    """
    logger.info("Test RandomSharpness cpp op with md5 comparison")
    original_seed = config_get_set_seed(200)
    original_num_parallel_workers = config_get_set_num_parallel_workers(1)

    # define map operations
    transforms = [C.Decode(), C.RandomSharpness((10.0, 15.0))]

    #  Generate dataset
    data = de.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
    data = data.map(operations=transforms, input_columns=["image"])

    # check results with md5 comparison
    filename = "random_sharpness_cpp_01_result.npz"
    save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN)

    # Restore configuration
    ds.config.set_seed(original_seed)
    ds.config.set_num_parallel_workers(original_num_parallel_workers)
コード例 #25
0
def test_random_sharpness_py_md5():
    """
    Test RandomSharpness python op with md5 comparison
    """
    logger.info("Test RandomSharpness python op with md5 comparison")
    original_seed = config_get_set_seed(5)
    original_num_parallel_workers = config_get_set_num_parallel_workers(1)

    # define map operations
    transforms = [F.Decode(), F.RandomSharpness((20.0, 25.0)), F.ToTensor()]
    transform = mindspore.dataset.transforms.py_transforms.Compose(transforms)

    #  Generate dataset
    data = de.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
    data = data.map(operations=transform, input_columns=["image"])

    # check results with md5 comparison
    filename = "random_sharpness_py_01_result.npz"
    save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN)

    # Restore configuration
    ds.config.set_seed(original_seed)
    ds.config.set_num_parallel_workers(original_num_parallel_workers)
コード例 #26
0
def test_random_color_py_md5():
    """
    Test Python RandomColor with md5 check
    """
    logger.info("Test RandomColor with md5 check")
    original_seed = config_get_set_seed(10)
    original_num_parallel_workers = config_get_set_num_parallel_workers(1)

    # Generate dataset
    data = de.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)

    transforms = mindspore.dataset.transforms.py_transforms.Compose(
        [F.Decode(), F.RandomColor((2.0, 2.5)),
         F.ToTensor()])

    data = data.map(operations=transforms, input_columns="image")
    # Compare with expected md5 from images
    filename = "random_color_01_result.npz"
    save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN)

    # Restore configuration
    ds.config.set_seed(original_seed)
    ds.config.set_num_parallel_workers((original_num_parallel_workers))
コード例 #27
0
def create_dataset(dataset_path,
                   do_train,
                   config,
                   device_target,
                   repeat_num=1,
                   batch_size=32):
    """
    create a train or eval dataset

    Args:
        dataset_path(string): the path of dataset.
        do_train(bool): whether dataset is used for train or eval.
        repeat_num(int): the repeat times of dataset. Default: 1.
        batch_size(int): the batch size of dataset. Default: 32.

    Returns:
        dataset
    """
    if device_target == "Ascend":
        rank_size = int(os.getenv("RANK_SIZE"))
        rank_id = int(os.getenv("RANK_ID"))
        columns_list = ['image', 'label']
        if config.data_load_mode == "mindrecord":
            load_func = partial(de.MindDataset, dataset_path, columns_list)
        else:
            load_func = partial(de.ImageFolderDataset, dataset_path)
        if do_train:
            if rank_size == 1:
                ds = load_func(num_parallel_workers=8, shuffle=True)
            else:
                ds = load_func(num_parallel_workers=8,
                               shuffle=True,
                               num_shards=rank_size,
                               shard_id=rank_id)
        else:
            ds = load_func(num_parallel_workers=8, shuffle=False)
    elif device_target == "GPU":
        if do_train:
            from mindspore.communication.management import get_rank, get_group_size
            ds = de.ImageFolderDataset(dataset_path,
                                       num_parallel_workers=8,
                                       shuffle=True,
                                       num_shards=get_group_size(),
                                       shard_id=get_rank())
        else:
            ds = de.ImageFolderDataset(dataset_path,
                                       num_parallel_workers=8,
                                       shuffle=True)
    else:
        raise ValueError("Unsupported device_target.")

    resize_height = config.image_height

    if do_train:
        buffer_size = 20480
        # apply shuffle operations
        ds = ds.shuffle(buffer_size=buffer_size)

    # define map operations
    decode_op = C.Decode()
    resize_crop_decode_op = C.RandomCropDecodeResize(resize_height,
                                                     scale=(0.08, 1.0),
                                                     ratio=(0.75, 1.333))
    horizontal_flip_op = C.RandomHorizontalFlip(prob=0.5)

    resize_op = C.Resize(256)
    center_crop = C.CenterCrop(resize_height)
    normalize_op = C.Normalize(mean=[0.485 * 255, 0.456 * 255, 0.406 * 255],
                               std=[0.229 * 255, 0.224 * 255, 0.225 * 255])
    change_swap_op = C.HWC2CHW()

    if do_train:
        trans = [
            resize_crop_decode_op, horizontal_flip_op, normalize_op,
            change_swap_op
        ]
    else:
        trans = [
            decode_op, resize_op, center_crop, normalize_op, change_swap_op
        ]

    type_cast_op = C2.TypeCast(mstype.int32)

    ds = ds.map(operations=trans,
                input_columns="image",
                num_parallel_workers=16)
    ds = ds.map(operations=type_cast_op,
                input_columns="label",
                num_parallel_workers=8)

    # apply batch operations
    ds = ds.batch(batch_size, drop_remainder=True)

    # apply dataset repeat operation
    ds = ds.repeat(repeat_num)

    return ds
コード例 #28
0
def test_random_sharpness_py(degrees=(0.7, 0.7), plot=False):
    """
    Test RandomSharpness python op
    """
    logger.info("Test RandomSharpness python op")

    # Original Images
    data = de.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)

    transforms_original = mindspore.dataset.transforms.py_transforms.Compose(
        [F.Decode(), F.Resize((224, 224)),
         F.ToTensor()])

    ds_original = data.map(operations=transforms_original,
                           input_columns="image")

    ds_original = ds_original.batch(512)

    for idx, (image, _) in enumerate(
            ds_original.create_tuple_iterator(output_numpy=True)):
        if idx == 0:
            images_original = np.transpose(image, (0, 2, 3, 1))
        else:
            images_original = np.append(images_original,
                                        np.transpose(image, (0, 2, 3, 1)),
                                        axis=0)

    # Random Sharpness Adjusted Images
    data = de.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)

    py_op = F.RandomSharpness()
    if degrees is not None:
        py_op = F.RandomSharpness(degrees)

    transforms_random_sharpness = mindspore.dataset.transforms.py_transforms.Compose(
        [F.Decode(), F.Resize((224, 224)), py_op,
         F.ToTensor()])

    ds_random_sharpness = data.map(operations=transforms_random_sharpness,
                                   input_columns="image")

    ds_random_sharpness = ds_random_sharpness.batch(512)

    for idx, (image, _) in enumerate(
            ds_random_sharpness.create_tuple_iterator(output_numpy=True)):
        if idx == 0:
            images_random_sharpness = np.transpose(image, (0, 2, 3, 1))
        else:
            images_random_sharpness = np.append(images_random_sharpness,
                                                np.transpose(
                                                    image, (0, 2, 3, 1)),
                                                axis=0)

    num_samples = images_original.shape[0]
    mse = np.zeros(num_samples)
    for i in range(num_samples):
        mse[i] = diff_mse(images_random_sharpness[i], images_original[i])

    logger.info("MSE= {}".format(str(np.mean(mse))))

    if plot:
        visualize_list(images_original, images_random_sharpness)
コード例 #29
0
def test_random_sharpness_c_py(degrees=(1.0, 1.0), plot=False):
    """
    Test Random Sharpness C and python Op
    """
    logger.info("Test RandomSharpness C and python Op")

    # RandomSharpness Images
    data = de.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
    data = data.map(operations=[C.Decode(), C.Resize((200, 300))],
                    input_columns=["image"])

    python_op = F.RandomSharpness(degrees)
    c_op = C.RandomSharpness(degrees)

    transforms_op = mindspore.dataset.transforms.py_transforms.Compose(
        [lambda img: F.ToPIL()(img.astype(np.uint8)), python_op, np.array])

    ds_random_sharpness_py = data.map(operations=transforms_op,
                                      input_columns="image")

    ds_random_sharpness_py = ds_random_sharpness_py.batch(512)

    for idx, (image, _) in enumerate(
            ds_random_sharpness_py.create_tuple_iterator(output_numpy=True)):
        if idx == 0:
            images_random_sharpness_py = image

        else:
            images_random_sharpness_py = np.append(images_random_sharpness_py,
                                                   image,
                                                   axis=0)

    data = de.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
    data = data.map(operations=[C.Decode(), C.Resize((200, 300))],
                    input_columns=["image"])

    ds_images_random_sharpness_c = data.map(operations=c_op,
                                            input_columns="image")

    ds_images_random_sharpness_c = ds_images_random_sharpness_c.batch(512)

    for idx, (image, _) in enumerate(
            ds_images_random_sharpness_c.create_tuple_iterator(
                output_numpy=True)):
        if idx == 0:
            images_random_sharpness_c = image

        else:
            images_random_sharpness_c = np.append(images_random_sharpness_c,
                                                  image,
                                                  axis=0)

    num_samples = images_random_sharpness_c.shape[0]
    mse = np.zeros(num_samples)
    for i in range(num_samples):
        mse[i] = diff_mse(images_random_sharpness_c[i],
                          images_random_sharpness_py[i])
    logger.info("MSE= {}".format(str(np.mean(mse))))
    if plot:
        visualize_list(images_random_sharpness_c,
                       images_random_sharpness_py,
                       visualize_mode=2)
コード例 #30
0
def create_dataset(dataset_path, do_train, config, repeat_num=1):
    """
    create a train or eval dataset

    Args:
        dataset_path(string): the path of dataset.
        do_train(bool): whether dataset is used for train or eval.
        config(struct): the config of train and eval in diffirent platform.
        repeat_num(int): the repeat times of dataset. Default: 1.

    Returns:
        dataset
    """
    if config.platform == "Ascend":
        rank_size = int(os.getenv("RANK_SIZE", '1'))
        rank_id = int(os.getenv("RANK_ID", '0'))
        if rank_size == 1:
            ds = de.ImageFolderDataset(dataset_path,
                                       num_parallel_workers=8,
                                       shuffle=True)
        else:
            ds = de.ImageFolderDataset(dataset_path,
                                       num_parallel_workers=8,
                                       shuffle=True,
                                       num_shards=rank_size,
                                       shard_id=rank_id)
    elif config.platform == "GPU":
        if do_train:
            if config.run_distribute:
                from mindspore.communication.management import get_rank, get_group_size
                ds = de.ImageFolderDataset(dataset_path,
                                           num_parallel_workers=8,
                                           shuffle=True,
                                           num_shards=get_group_size(),
                                           shard_id=get_rank())
            else:
                ds = de.ImageFolderDataset(dataset_path,
                                           num_parallel_workers=8,
                                           shuffle=True)
        else:
            ds = de.ImageFolderDataset(dataset_path,
                                       num_parallel_workers=8,
                                       shuffle=True)
    elif config.platform == "CPU":
        ds = de.ImageFolderDataset(dataset_path,
                                   num_parallel_workers=8,
                                   shuffle=True)

    resize_height = config.image_height
    resize_width = config.image_width
    buffer_size = 1000

    # define map operations
    decode_op = C.Decode()
    resize_crop_op = C.RandomCropDecodeResize(resize_height,
                                              scale=(0.08, 1.0),
                                              ratio=(0.75, 1.333))
    horizontal_flip_op = C.RandomHorizontalFlip(prob=0.5)

    resize_op = C.Resize((256, 256))
    center_crop = C.CenterCrop(resize_width)
    rescale_op = C.RandomColorAdjust(brightness=0.4,
                                     contrast=0.4,
                                     saturation=0.4)
    normalize_op = C.Normalize(mean=[0.485 * 255, 0.456 * 255, 0.406 * 255],
                               std=[0.229 * 255, 0.224 * 255, 0.225 * 255])
    change_swap_op = C.HWC2CHW()

    if do_train:
        trans = [
            resize_crop_op, horizontal_flip_op, rescale_op, normalize_op,
            change_swap_op
        ]
    else:
        trans = [
            decode_op, resize_op, center_crop, normalize_op, change_swap_op
        ]

    type_cast_op = C2.TypeCast(mstype.int32)

    ds = ds.map(operations=trans,
                input_columns="image",
                num_parallel_workers=8)
    ds = ds.map(operations=type_cast_op,
                input_columns="label",
                num_parallel_workers=8)

    # apply shuffle operations
    ds = ds.shuffle(buffer_size=buffer_size)

    # apply batch operations
    ds = ds.batch(config.batch_size, drop_remainder=True)

    # apply dataset repeat operation
    ds = ds.repeat(repeat_num)

    return ds