コード例 #1
0
def test_cifar_exception_file_path():
    def exception_func(item):
        raise Exception("Error occur!")

    try:
        data = ds.Cifar10Dataset(DATA_DIR_10)
        data = data.map(operations=exception_func, input_columns=["image"], num_parallel_workers=1)
        num_rows = 0
        for _ in data.create_dict_iterator():
            num_rows += 1
        assert False
    except RuntimeError as e:
        assert "map operation: [PyFunc] failed. The corresponding data files" in str(e)

    try:
        data = ds.Cifar10Dataset(DATA_DIR_10)
        data = data.map(operations=exception_func, input_columns=["label"], num_parallel_workers=1)
        num_rows = 0
        for _ in data.create_dict_iterator():
            num_rows += 1
        assert False
    except RuntimeError as e:
        assert "map operation: [PyFunc] failed. The corresponding data files" in str(e)

    try:
        data = ds.Cifar100Dataset(DATA_DIR_100)
        data = data.map(operations=exception_func, input_columns=["image"], num_parallel_workers=1)
        num_rows = 0
        for _ in data.create_dict_iterator():
            num_rows += 1
        assert False
    except RuntimeError as e:
        assert "map operation: [PyFunc] failed. The corresponding data files" in str(e)

    try:
        data = ds.Cifar100Dataset(DATA_DIR_100)
        data = data.map(operations=exception_func, input_columns=["coarse_label"], num_parallel_workers=1)
        num_rows = 0
        for _ in data.create_dict_iterator():
            num_rows += 1
        assert False
    except RuntimeError as e:
        assert "map operation: [PyFunc] failed. The corresponding data files" in str(e)

    try:
        data = ds.Cifar100Dataset(DATA_DIR_100)
        data = data.map(operations=exception_func, input_columns=["fine_label"], num_parallel_workers=1)
        num_rows = 0
        for _ in data.create_dict_iterator():
            num_rows += 1
        assert False
    except RuntimeError as e:
        assert "map operation: [PyFunc] failed. The corresponding data files" in str(e)
コード例 #2
0
def test_cifar_usage():
    """
    test usage of cifar
    """
    logger.info("Test Cifar100Dataset usage flag")

    # flag, if True, test cifar10 else test cifar100
    def test_config(usage, flag=True, cifar_path=None):
        if cifar_path is None:
            cifar_path = DATA_DIR_10 if flag else DATA_DIR_100
        try:
            data = ds.Cifar10Dataset(cifar_path, usage=usage) if flag else ds.Cifar100Dataset(cifar_path, usage=usage)
            num_rows = 0
            for _ in data.create_dict_iterator(num_epochs=1, output_numpy=True):
                num_rows += 1
        except (ValueError, TypeError, RuntimeError) as e:
            return str(e)
        return num_rows

    # test the usage of CIFAR100
    assert test_config("train") == 10000
    assert test_config("all") == 10000
    assert "usage is not within the valid set of ['train', 'test', 'all']" in test_config("invalid")
    assert "Argument usage with value ['list'] is not of type (<class 'str'>,)" in test_config(["list"])
    assert "no valid data matching the dataset API Cifar10Dataset" in test_config("test")

    # test the usage of CIFAR10
    assert test_config("test", False) == 10000
    assert test_config("all", False) == 10000
    assert "no valid data matching the dataset API Cifar100Dataset" in test_config("train", False)
    assert "usage is not within the valid set of ['train', 'test', 'all']" in test_config("invalid", False)

    # change this directory to the folder that contains all cifar10 files
    all_cifar10 = None
    if all_cifar10 is not None:
        assert test_config("train", True, all_cifar10) == 50000
        assert test_config("test", True, all_cifar10) == 10000
        assert test_config("all", True, all_cifar10) == 60000
        assert ds.Cifar10Dataset(all_cifar10, usage="train").get_dataset_size() == 50000
        assert ds.Cifar10Dataset(all_cifar10, usage="test").get_dataset_size() == 10000
        assert ds.Cifar10Dataset(all_cifar10, usage="all").get_dataset_size() == 60000

    # change this directory to the folder that contains all cifar100 files
    all_cifar100 = None
    if all_cifar100 is not None:
        assert test_config("train", False, all_cifar100) == 50000
        assert test_config("test", False, all_cifar100) == 10000
        assert test_config("all", False, all_cifar100) == 60000
        assert ds.Cifar100Dataset(all_cifar100, usage="train").get_dataset_size() == 50000
        assert ds.Cifar100Dataset(all_cifar100, usage="test").get_dataset_size() == 10000
        assert ds.Cifar100Dataset(all_cifar100, usage="all").get_dataset_size() == 60000
コード例 #3
0
def test_cifar100_basic():
    """
    Test Cifar100Dataset
    """
    logger.info("Test Cifar100Dataset")

    # case 1: test num_samples
    data1 = ds.Cifar100Dataset(DATA_DIR_100, num_samples=100)
    num_iter1 = 0
    for _ in data1.create_dict_iterator(num_epochs=1):
        num_iter1 += 1
    assert num_iter1 == 100

    # case 2: test repeat
    data1 = data1.repeat(2)
    num_iter2 = 0
    for _ in data1.create_dict_iterator(num_epochs=1):
        num_iter2 += 1
    assert num_iter2 == 200

    # case 3: test num_parallel_workers
    data2 = ds.Cifar100Dataset(DATA_DIR_100,
                               num_samples=100,
                               num_parallel_workers=1)
    num_iter3 = 0
    for _ in data2.create_dict_iterator(num_epochs=1):
        num_iter3 += 1
    assert num_iter3 == 100

    # case 4: test batch with drop_remainder=False
    data3 = ds.Cifar100Dataset(DATA_DIR_100, num_samples=100)
    assert data3.get_dataset_size() == 100
    assert data3.get_batch_size() == 1
    data3 = data3.batch(batch_size=3)
    assert data3.get_dataset_size() == 34
    assert data3.get_batch_size() == 3
    num_iter4 = 0
    for _ in data3.create_dict_iterator(num_epochs=1):
        num_iter4 += 1
    assert num_iter4 == 34

    # case 4: test batch with drop_remainder=True
    data4 = ds.Cifar100Dataset(DATA_DIR_100, num_samples=100)
    data4 = data4.batch(batch_size=3, drop_remainder=True)
    assert data4.get_dataset_size() == 33
    assert data4.get_batch_size() == 3
    num_iter5 = 0
    for _ in data4.create_dict_iterator(num_epochs=1):
        num_iter5 += 1
    assert num_iter5 == 33
コード例 #4
0
def vgg_create_dataset100(data_home, image_size, batch_size, rank_id=0, rank_size=1, repeat_num=1,
                          training=True, num_samples=None, shuffle=True):
    """Data operations."""
    ds.config.set_seed(1)
    data_dir = os.path.join(data_home, "train")
    if not training:
        data_dir = os.path.join(data_home, "test")

    if num_samples is not None:
        data_set = ds.Cifar100Dataset(data_dir, num_shards=rank_size, shard_id=rank_id,
                                      num_samples=num_samples, shuffle=shuffle)
    else:
        data_set = ds.Cifar100Dataset(data_dir, num_shards=rank_size, shard_id=rank_id)

    input_columns = ["fine_label"]
    output_columns = ["label"]
    data_set = data_set.rename(input_columns=input_columns, output_columns=output_columns)
    data_set = data_set.project(["image", "label"])

    rescale = 1.0 / 255.0
    shift = 0.0

    # define map operations
    random_crop_op = CV.RandomCrop((32, 32), (4, 4, 4, 4))  # padding_mode default CONSTANT
    random_horizontal_op = CV.RandomHorizontalFlip()
    resize_op = CV.Resize(image_size)  # interpolation default BILINEAR
    rescale_op = CV.Rescale(rescale, shift)
    normalize_op = CV.Normalize((0.4465, 0.4822, 0.4914), (0.2010, 0.1994, 0.2023))
    changeswap_op = CV.HWC2CHW()
    type_cast_op = C.TypeCast(mstype.int32)

    c_trans = []
    if training:
        c_trans = [random_crop_op, random_horizontal_op]
    c_trans += [resize_op, rescale_op, normalize_op,
                changeswap_op]

    # apply map operations on images
    data_set = data_set.map(input_columns="label", operations=type_cast_op)
    data_set = data_set.map(input_columns="image", operations=c_trans)

    # apply shuffle operations
    data_set = data_set.shuffle(buffer_size=1000)

    # apply batch operations
    data_set = data_set.batch(batch_size=batch_size, drop_remainder=True)

    # apply repeat operations
    data_set = data_set.repeat(repeat_num)
    return data_set
コード例 #5
0
def test_cifar10_dataset_size():
    ds_total = ds.Cifar10Dataset(CIFAR10_DATA_DIR)
    assert ds_total.get_dataset_size() == 10000

    # test get_dataset_size with usage flag
    train_size = ds.Cifar100Dataset(CIFAR100_DATA_DIR,
                                    usage="train").get_dataset_size()
    assert train_size == 0
    train_size = ds.Cifar10Dataset(CIFAR10_DATA_DIR,
                                   usage="train").get_dataset_size()
    assert train_size == 10000

    all_size = ds.Cifar10Dataset(CIFAR10_DATA_DIR,
                                 usage="all").get_dataset_size()
    assert all_size == 10000

    ds_shard_1_0 = ds.Cifar10Dataset(CIFAR10_DATA_DIR,
                                     num_shards=1,
                                     shard_id=0)
    assert ds_shard_1_0.get_dataset_size() == 10000

    ds_shard_2_0 = ds.Cifar10Dataset(CIFAR10_DATA_DIR,
                                     num_shards=2,
                                     shard_id=0)
    assert ds_shard_2_0.get_dataset_size() == 5000

    ds_shard_3_0 = ds.Cifar10Dataset(CIFAR10_DATA_DIR,
                                     num_shards=3,
                                     shard_id=0)
    assert ds_shard_3_0.get_dataset_size() == 3334

    ds_shard_7_0 = ds.Cifar10Dataset(CIFAR10_DATA_DIR,
                                     num_shards=7,
                                     shard_id=0)
    assert ds_shard_7_0.get_dataset_size() == 1429
コード例 #6
0
def test_cifar100_visualize(plot=False):
    """
    Visualize Cifar100Dataset results
    """
    logger.info("Test Cifar100Dataset visualization")

    data1 = ds.Cifar100Dataset(DATA_DIR_100, num_samples=10, shuffle=False)
    num_iter = 0
    image_list, label_list = [], []
    for item in data1.create_dict_iterator(num_epochs=1, output_numpy=True):
        image = item["image"]
        coarse_label = item["coarse_label"]
        fine_label = item["fine_label"]
        image_list.append(image)
        label_list.append("coarse_label {}\nfine_label {}".format(
            coarse_label, fine_label))
        assert isinstance(image, np.ndarray)
        assert image.shape == (32, 32, 3)
        assert image.dtype == np.uint8
        assert coarse_label.dtype == np.uint32
        assert fine_label.dtype == np.uint32
        num_iter += 1
    assert num_iter == 10
    if plot:
        visualize_dataset(image_list, label_list)
コード例 #7
0
def test_cifar100_exception():
    """
    Test error cases for Cifar100Dataset
    """
    logger.info("Test error cases for Cifar100Dataset")
    error_msg_1 = "sampler and shuffle cannot be specified at the same time"
    with pytest.raises(RuntimeError, match=error_msg_1):
        ds.Cifar100Dataset(DATA_DIR_100, shuffle=False, sampler=ds.PKSampler(3))

    error_msg_2 = "sampler and sharding cannot be specified at the same time"
    with pytest.raises(RuntimeError, match=error_msg_2):
        ds.Cifar100Dataset(DATA_DIR_100, sampler=ds.PKSampler(3), num_shards=2, shard_id=0)

    error_msg_3 = "num_shards is specified and currently requires shard_id as well"
    with pytest.raises(RuntimeError, match=error_msg_3):
        ds.Cifar100Dataset(DATA_DIR_100, num_shards=10)

    error_msg_4 = "shard_id is specified but num_shards is not"
    with pytest.raises(RuntimeError, match=error_msg_4):
        ds.Cifar100Dataset(DATA_DIR_100, shard_id=0)

    error_msg_5 = "Input shard_id is not within the required interval"
    with pytest.raises(ValueError, match=error_msg_5):
        ds.Cifar100Dataset(DATA_DIR_100, num_shards=2, shard_id=-1)
    with pytest.raises(ValueError, match=error_msg_5):
        ds.Cifar10Dataset(DATA_DIR_100, num_shards=2, shard_id=5)

    error_msg_6 = "num_parallel_workers exceeds"
    with pytest.raises(ValueError, match=error_msg_6):
        ds.Cifar100Dataset(DATA_DIR_100, shuffle=False, num_parallel_workers=0)
    with pytest.raises(ValueError, match=error_msg_6):
        ds.Cifar100Dataset(DATA_DIR_100, shuffle=False, num_parallel_workers=88)
コード例 #8
0
def test_cifar100_dataset_size():
    ds_total = ds.Cifar100Dataset(CIFAR100_DATA_DIR)
    assert ds_total.get_dataset_size() == 10000

    ds_shard_1_0 = ds.Cifar100Dataset(CIFAR100_DATA_DIR,
                                      num_shards=1,
                                      shard_id=0)
    assert ds_shard_1_0.get_dataset_size() == 10000

    ds_shard_2_0 = ds.Cifar100Dataset(CIFAR100_DATA_DIR,
                                      num_shards=2,
                                      shard_id=0)
    assert ds_shard_2_0.get_dataset_size() == 5000

    ds_shard_3_0 = ds.Cifar100Dataset(CIFAR100_DATA_DIR,
                                      num_shards=3,
                                      shard_id=0)
    assert ds_shard_3_0.get_dataset_size() == 3334
コード例 #9
0
def test_cifar():
    data = ds.Cifar10Dataset("../data/dataset/testCifar10Data")
    assert data.get_dataset_size() == 10000

    data = ds.Cifar10Dataset("../data/dataset/testCifar10Data", num_samples=10)
    assert data.get_dataset_size() == 10

    data = ds.Cifar10Dataset("../data/dataset/testCifar10Data", num_samples=90000)
    assert data.get_dataset_size() == 10000

    data = ds.Cifar100Dataset("../data/dataset/testCifar100Data")
    assert data.get_dataset_size() == 10000

    data = ds.Cifar100Dataset("../data/dataset/testCifar100Data", num_samples=10)
    assert data.get_dataset_size() == 10

    data = ds.Cifar100Dataset("../data/dataset/testCifar100Data", num_samples=20000)
    assert data.get_dataset_size() == 10000
コード例 #10
0
 def test_config(usage, flag=True, cifar_path=None):
     if cifar_path is None:
         cifar_path = DATA_DIR_10 if flag else DATA_DIR_100
     try:
         data = ds.Cifar10Dataset(cifar_path, usage=usage) if flag else ds.Cifar100Dataset(cifar_path, usage=usage)
         num_rows = 0
         for _ in data.create_dict_iterator(num_epochs=1, output_numpy=True):
             num_rows += 1
     except (ValueError, TypeError, RuntimeError) as e:
         return str(e)
     return num_rows
コード例 #11
0
ファイル: test_cifarop.py プロジェクト: xyg320/mindspore
def test_case_dataset_cifar100():
    """
    dataset parameter
    """
    logger.info("Test dataset parameter")
    # apply dataset operations
    data1 = ds.Cifar100Dataset(DATA_DIR_100, 100)

    num_iter = 0
    for _ in data1.create_dict_iterator():
        # in this example, each dictionary has keys "image" and "label"
        num_iter += 1
    assert num_iter == 100
コード例 #12
0
def test_cifar100_pk_sampler():
    """
    Test Cifar100Dataset with PKSampler
    """
    logger.info("Test Cifar100Dataset with PKSampler")
    golden = [i for i in range(20)]
    sampler = ds.PKSampler(1)
    data = ds.Cifar100Dataset(DATA_DIR_100, sampler=sampler)
    num_iter = 0
    label_list = []
    for item in data.create_dict_iterator():
        label_list.append(item["coarse_label"])
        num_iter += 1
    np.testing.assert_array_equal(golden, label_list)
    assert num_iter == 20
コード例 #13
0
def test_cifar100_content_check():
    """
    Validate Cifar100Dataset image readings
    """
    logger.info("Test Cifar100Dataset with content check")
    data1 = ds.Cifar100Dataset(DATA_DIR_100, num_samples=100, shuffle=False)
    images, labels = load_cifar(DATA_DIR_100, kind="cifar100")
    num_iter = 0
    # in this example, each dictionary has keys "image", "coarse_label" and "fine_image"
    for i, d in enumerate(data1.create_dict_iterator()):
        np.testing.assert_array_equal(d["image"], images[i])
        np.testing.assert_array_equal(d["coarse_label"], labels[i][0])
        np.testing.assert_array_equal(d["fine_label"], labels[i][1])
        num_iter += 1
    assert num_iter == 100
コード例 #14
0
 def sharding_config(num_shards,
                     shard_id,
                     num_samples,
                     shuffle,
                     repeat_cnt=1):
     data1 = ds.Cifar100Dataset(cifar100_dir,
                                num_shards=num_shards,
                                shard_id=shard_id,
                                num_samples=num_samples,
                                shuffle=shuffle)
     data1 = data1.repeat(repeat_cnt)
     res = []
     for item in data1.create_dict_iterator():  # each data is a dictionary
         res.append(item["coarse_label"].item())
     if print_res:
         logger.info("labels of dataset: {}".format(res))
     return res
コード例 #15
0
def test_get_column_name_cifar100():
    data = ds.Cifar100Dataset(CIFAR100_DIR)
    assert data.get_col_names() == ["image", "coarse_label", "fine_label"]