def test_save_invalidates_cache(self, dataset, local_csvs): pds = PartitionedDataSet(str(local_csvs), dataset) first_load = pds.load() data = pd.DataFrame({"foo": 42, "bar": ["a", "b", None]}) part_id = "new/data.csv" pds.save({part_id: data}) assert part_id not in first_load assert part_id in pds.load()
def test_release(self, dataset, local_csvs): partition_to_remove = "p2.csv" pds = PartitionedDataSet(str(local_csvs), dataset) initial_load = pds.load() assert partition_to_remove in initial_load (local_csvs / partition_to_remove).unlink() cached_load = pds.load() assert initial_load.keys() == cached_load.keys() pds.release() load_after_release = pds.load() assert initial_load.keys() ^ load_after_release.keys() == { partition_to_remove }
def test_release_instance_cache(self, local_csvs): """Test that cache invalidation does not affect other instances""" ds_a = PartitionedDataSet(str(local_csvs), "pandas.CSVDataSet") ds_a.load() ds_b = PartitionedDataSet(str(local_csvs), "pandas.CSVDataSet") ds_b.load() assert ds_a._partition_cache.currsize == 1 assert ds_b._partition_cache.currsize == 1 # invalidate cache of the dataset A ds_a.release() assert ds_a._partition_cache.currsize == 0 # cache of the dataset B is unaffected assert ds_b._partition_cache.currsize == 1
def test_release(self, dataset, mocked_csvs_in_s3): partition_to_remove = "p2.csv" pds = PartitionedDataSet(mocked_csvs_in_s3, dataset) initial_load = pds.load() assert partition_to_remove in initial_load s3 = s3fs.S3FileSystem() s3.rm("/".join([mocked_csvs_in_s3, partition_to_remove])) cached_load = pds.load() assert initial_load.keys() == cached_load.keys() pds.release() load_after_release = pds.load() assert initial_load.keys() ^ load_after_release.keys() == { partition_to_remove }
def test_load(self, dataset, mocked_csvs_in_s3, partitioned_data_pandas): pds = PartitionedDataSet(mocked_csvs_in_s3, dataset) loaded_partitions = pds.load() assert loaded_partitions.keys() == partitioned_data_pandas.keys() for partition_id, load_func in loaded_partitions.items(): df = load_func() assert_frame_equal(df, partitioned_data_pandas[partition_id])
def test_overwrite(self, local_csvs, overwrite, expected_num_parts): pds = PartitionedDataSet(str(local_csvs), "pandas.CSVDataSet", overwrite=overwrite) original_data = pd.DataFrame({"foo": 42, "bar": ["a", "b", None]}) part_id = "new/data" pds.save({part_id: original_data}) loaded_partitions = pds.load() assert part_id in loaded_partitions assert len(loaded_partitions.keys()) == expected_num_parts
def test_save(self, dataset, mocked_csvs_in_s3): pds = PartitionedDataSet(mocked_csvs_in_s3, dataset) original_data = pd.DataFrame({"foo": 42, "bar": ["a", "b", None]}) part_id = "new/data.csv" pds.save({part_id: original_data}) s3 = s3fs.S3FileSystem() assert s3.exists("/".join([mocked_csvs_in_s3, part_id])) loaded_partitions = pds.load() assert part_id in loaded_partitions reloaded_data = loaded_partitions[part_id]() assert_frame_equal(reloaded_data, original_data)
def test_invalid_dataset(self, dataset, local_csvs): pds = PartitionedDataSet(str(local_csvs), dataset) loaded_partitions = pds.load() for partition, df_loader in loaded_partitions.items(): pattern = r"Failed while loading data from data set ParquetDataSet(.*)" with pytest.raises(DataSetError, match=pattern) as exc_info: df_loader() error_message = str(exc_info.value) assert ( "Either the file is corrupted or this is not a parquet file" in error_message) assert str(partition) in error_message
def test_save(self, dataset, local_csvs, suffix): pds = PartitionedDataSet(str(local_csvs), dataset, filename_suffix=suffix) original_data = pd.DataFrame({"foo": 42, "bar": ["a", "b", None]}) part_id = "new/data" pds.save({part_id: original_data}) assert (local_csvs / "new" / ("data" + suffix)).is_file() loaded_partitions = pds.load() assert part_id in loaded_partitions reloaded_data = loaded_partitions[part_id]() assert_frame_equal(reloaded_data, original_data)
def test_load(self, dataset, local_csvs, partitioned_data_pandas, suffix, expected_num_parts): pds = PartitionedDataSet(str(local_csvs), dataset, filename_suffix=suffix) loaded_partitions = pds.load() assert len(loaded_partitions.keys()) == expected_num_parts for partition_id, load_func in loaded_partitions.items(): df = load_func() assert_frame_equal(df, partitioned_data_pandas[partition_id + suffix]) if suffix: assert not partition_id.endswith(suffix)
def test_save_invalidates_cache(self, local_csvs, mocker): """Test that save calls invalidate partition cache""" pds = PartitionedDataSet(str(local_csvs), "pandas.CSVDataSet") mocked_fs_invalidate = mocker.patch.object(pds._filesystem, "invalidate_cache") first_load = pds.load() assert pds._partition_cache.currsize == 1 mocked_fs_invalidate.assert_not_called() # save clears cache data = pd.DataFrame({"foo": 42, "bar": ["a", "b", None]}) new_partition = "new/data.csv" pds.save({new_partition: data}) assert pds._partition_cache.currsize == 0 # it seems that `_filesystem.invalidate_cache` calls itself inside, # resulting in not one, but 2 mock calls # hence using `assert_any_call` instead of `assert_called_once_with` mocked_fs_invalidate.assert_any_call(pds._normalized_path) # new load returns new partition too second_load = pds.load() assert new_partition not in first_load assert new_partition in second_load
def test_load_args(self, mocker): fake_partition_name = "fake_partition" mocked_filesystem = mocker.patch("fsspec.filesystem") mocked_find = mocked_filesystem.return_value.find mocked_find.return_value = [fake_partition_name] path = str(Path.cwd()) load_args = {"maxdepth": 42, "withdirs": True} pds = PartitionedDataSet(path, "CSVLocalDataSet", load_args=load_args) mocker.patch.object(pds, "_path_to_partition", return_value=fake_partition_name) assert pds.load().keys() == {fake_partition_name} mocked_find.assert_called_once_with(path, **load_args)
def test_load_s3a(self, mocked_csvs_in_s3, partitioned_data_pandas, mocker): s3a_path = "s3a://{}".format(mocked_csvs_in_s3.split("://", 1)[1]) # any type is fine as long as it passes isinstance check # since _dataset_type is mocked later anyways pds = PartitionedDataSet(s3a_path, "pandas.CSVDataSet") assert pds._protocol == "s3a" mocked_ds = mocker.patch.object(pds, "_dataset_type") mocked_ds.__name__ = "mocked" loaded_partitions = pds.load() assert loaded_partitions.keys() == partitioned_data_pandas.keys() assert mocked_ds.call_count == len(loaded_partitions) expected = [ mocker.call(filepath="{}/{}".format(s3a_path, partition_id)) for partition_id in loaded_partitions ] mocked_ds.assert_has_calls(expected, any_order=True)
def test_no_partitions(self, tmpdir): pds = PartitionedDataSet(str(tmpdir), "pandas.CSVDataSet") pattern = re.escape(f"No partitions found in `{tmpdir}`") with pytest.raises(DataSetError, match=pattern): pds.load()
def test_no_partitions(self, tmpdir): pds = PartitionedDataSet(str(tmpdir), "CSVLocalDataSet") pattern = "No partitions found in `{}`".format(str(tmpdir)) with pytest.raises(DataSetError, match=pattern): pds.load()