コード例 #1
0
ファイル: test_csv.py プロジェクト: yngtodd/datasets
def test_csv_dataset_reader(path_type, split, features, keep_in_memory,
                            csv_path, tmp_path):
    if issubclass(path_type, str):
        path = csv_path
    elif issubclass(path_type, list):
        path = [csv_path]
    cache_dir = tmp_path / "cache"

    expected_split = str(split) if split else "train"

    # CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
    default_expected_features = {
        "col_1": "int64",
        "col_2": "int64",
        "col_3": "float64"
    }
    expected_features = features.copy(
    ) if features else default_expected_features
    features = Features(
        {feature: Value(dtype)
         for feature, dtype in features.items()}) if features else None
    with assert_arrow_memory_increases(
    ) if keep_in_memory else assert_arrow_memory_doesnt_increase():
        dataset = CsvDatasetReader(path,
                                   split=split,
                                   features=features,
                                   cache_dir=cache_dir,
                                   keep_in_memory=keep_in_memory).read()
    assert isinstance(dataset, Dataset)
    assert dataset.num_rows == 4
    assert dataset.num_columns == 3
    assert dataset.column_names == ["col_1", "col_2", "col_3"]
    assert dataset.split == expected_split
    for feature, expected_dtype in expected_features.items():
        assert dataset.features[feature].dtype == expected_dtype
コード例 #2
0
def test_dataset_from_csv_path_type(path_type, csv_path, tmp_path):
    if issubclass(path_type, str):
        path = csv_path
    elif issubclass(path_type, list):
        path = [csv_path]
    cache_dir = tmp_path / "cache"
    expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"}
    dataset = CsvDatasetReader(path, cache_dir=cache_dir).read()
    _check_csv_dataset(dataset, expected_features)
コード例 #3
0
def test_dataset_from_csv_features(features, csv_path, tmp_path):
    cache_dir = tmp_path / "cache"
    # CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
    default_expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"}
    expected_features = features.copy() if features else default_expected_features
    features = (
        Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None
    )
    dataset = CsvDatasetReader(csv_path, features=features, cache_dir=cache_dir).read()
    _check_csv_dataset(dataset, expected_features)
コード例 #4
0
def test_dataset_from_csv_split(split, csv_path, tmp_path):
    cache_dir = tmp_path / "cache"
    expected_features = {
        "col_1": "int64",
        "col_2": "int64",
        "col_3": "float64"
    }
    dataset = CsvDatasetReader(csv_path, cache_dir=cache_dir,
                               split=split).read()
    _check_csv_dataset(dataset, expected_features)
    assert dataset.split == str(split) if split else "train"
コード例 #5
0
def test_dataset_to_csv_multiproc(csv_path, tmp_path):
    cache_dir = tmp_path / "cache"
    output_csv = os.path.join(cache_dir, "tmp.csv")
    dataset = CsvDatasetReader({"train": csv_path}, cache_dir=cache_dir).read()
    CsvDatasetWriter(dataset["train"], output_csv, index=False, num_proc=2).write()

    original_csv = iter_csv_file(csv_path)
    expected_csv = iter_csv_file(output_csv)

    for row1, row2 in zip(original_csv, expected_csv):
        assert row1 == row2
コード例 #6
0
def test_csv_datasetdict_reader_split(split, csv_path, tmp_path):
    if split:
        path = {split: csv_path}
    else:
        split = "train"
        path = {"train": csv_path, "test": csv_path}
    cache_dir = tmp_path / "cache"
    expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"}
    dataset = CsvDatasetReader(path, cache_dir=cache_dir).read()
    _check_csv_datasetdict(dataset, expected_features, splits=list(path.keys()))
    assert all(dataset[split].split == split for split in path.keys())
コード例 #7
0
def test_dataset_from_csv_keep_in_memory(keep_in_memory, csv_path, tmp_path):
    cache_dir = tmp_path / "cache"
    expected_features = {
        "col_1": "int64",
        "col_2": "int64",
        "col_3": "float64"
    }
    with assert_arrow_memory_increases(
    ) if keep_in_memory else assert_arrow_memory_doesnt_increase():
        dataset = CsvDatasetReader(csv_path,
                                   cache_dir=cache_dir,
                                   keep_in_memory=keep_in_memory).read()
    _check_csv_dataset(dataset, expected_features)
コード例 #8
0
ファイル: test_csv.py プロジェクト: songfeng/datasets
def test_csv_datasetdict_reader(split, features, keep_in_memory, csv_path,
                                tmp_path):
    if split:
        path = {split: csv_path}
    else:
        split = "train"
        path = {"train": csv_path, "test": csv_path}
    cache_dir = tmp_path / "cache"

    # CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
    default_expected_features = {
        "col_1": "int64",
        "col_2": "int64",
        "col_3": "float64"
    }
    expected_features = features.copy(
    ) if features else default_expected_features
    features = Features(
        {feature: Value(dtype)
         for feature, dtype in features.items()}) if features else None
    previous_allocated_memory = pa.total_allocated_bytes()
    dataset = CsvDatasetReader(path,
                               features=features,
                               cache_dir=cache_dir,
                               keep_in_memory=keep_in_memory).read()
    increased_allocated_memory = (pa.total_allocated_bytes() -
                                  previous_allocated_memory) > 0
    assert isinstance(dataset, DatasetDict)
    dataset = dataset[split]
    assert dataset.num_rows == 4
    assert dataset.num_columns == 3
    assert dataset.column_names == ["col_1", "col_2", "col_3"]
    assert dataset.split == split
    for feature, expected_dtype in expected_features.items():
        assert dataset.features[feature].dtype == expected_dtype
    assert increased_allocated_memory == keep_in_memory
コード例 #9
0
def test_dataset_to_csv_invalidproc(csv_path, tmp_path):
    cache_dir = tmp_path / "cache"
    output_csv = os.path.join(cache_dir, "tmp.csv")
    dataset = CsvDatasetReader({"train": csv_path}, cache_dir=cache_dir).read()
    with pytest.raises(ValueError):
        CsvDatasetWriter(dataset["train"], output_csv, index=False, num_proc=0)