def _resolve_paths_and_filesystem(
    paths: Union[str, List[str]],
    filesystem: "pyarrow.fs.FileSystem" = None,
) -> Tuple[List[str], "pyarrow.fs.FileSystem"]:
    """
    Resolves and normalizes all provided paths, infers a filesystem from the
    paths and ensures that all paths use the same filesystem.

    Args:
        paths: A single file/directory path or a list of file/directory paths.
            A list of paths can contain both files and directories.
        filesystem: The filesystem implementation that should be used for
            reading these files. If None, a filesystem will be inferred. If not
            None, the provided filesystem will still be validated against all
            filesystems inferred from the provided paths to ensure
            compatibility.
    """
    import pyarrow as pa
    from pyarrow.fs import (FileSystem, PyFileSystem, FSSpecHandler,
                            _resolve_filesystem_and_path)
    import fsspec

    if isinstance(paths, str):
        paths = [paths]
    elif (not isinstance(paths, list)
          or any(not isinstance(p, str) for p in paths)):
        raise ValueError(
            "paths must be a path string or a list of path strings.")
    elif len(paths) == 0:
        raise ValueError("Must provide at least one path.")

    if filesystem and not isinstance(filesystem, FileSystem):
        if not isinstance(filesystem, fsspec.spec.AbstractFileSystem):
            raise TypeError(f"The filesystem passed must either conform to "
                            f"pyarrow.fs.FileSystem, or "
                            f"fsspec.spec.AbstractFileSystem. The provided "
                            f"filesystem was: {filesystem}")
        filesystem = PyFileSystem(FSSpecHandler(filesystem))

    resolved_paths = []
    for path in paths:
        try:
            resolved_filesystem, resolved_path = _resolve_filesystem_and_path(
                path, filesystem)
        except pa.lib.ArrowInvalid as e:
            if "Cannot parse URI" in str(e):
                resolved_filesystem, resolved_path = (
                    _resolve_filesystem_and_path(_encode_url(path),
                                                 filesystem))
                resolved_path = _decode_url(resolved_path)
            else:
                raise
        if filesystem is None:
            filesystem = resolved_filesystem
        else:
            resolved_path = _unwrap_protocol(resolved_path)
        resolved_path = filesystem.normalize_path(resolved_path)
        resolved_paths.append(resolved_path)

    return resolved_paths, filesystem
Exemple #2
0
def read_table(source, columns=None, filesystem=None):
    """
    Read a Table from an ORC file.

    Parameters
    ----------
    source : str, pyarrow.NativeFile, or file-like object
        If a string passed, can be a single file name. For file-like objects,
        only read a single file. Use pyarrow.BufferReader to read a file
        contained in a bytes or buffer-like object.
    columns : list
        If not None, only these columns will be read from the file. A column
        name may be a prefix of a nested field, e.g. 'a' will select 'a.b',
        'a.c', and 'a.d.e'. If empty, no columns will be read. Note
        that the table will still have the correct num_rows set despite having
        no columns.
    filesystem : FileSystem, default None
        If nothing passed, paths assumed to be found in the local on-disk
        filesystem.
    """

    filesystem, path = _resolve_filesystem_and_path(source, filesystem)
    if filesystem is not None:
        source = filesystem.open_input_file(path)

    if columns is not None and len(columns) == 0:
        result = ORCFile(source).read().select(columns)
    else:
        result = ORCFile(source).read(columns=columns)

    return result
def _resolve_paths_and_filesystem(
    paths: Union[str, List[str]],
    filesystem: "pyarrow.fs.FileSystem" = None
) -> Tuple[List[str], "pyarrow.fs.FileSystem"]:
    """
    Resolves and normalizes all provided paths, infers a filesystem from the
    paths and ensures that all paths use the same filesystem, and expands all
    directory paths to the underlying file paths.

    Args:
        paths: A single file/directory path or a list of file/directory paths.
            A list of paths can contain both files and directories.
        filesystem: The filesystem implementation that should be used for
            reading these files. If None, a filesystem will be inferred. If not
            None, the provided filesystem will still be validated against all
            filesystems inferred from the provided paths to ensure
            compatibility.
    """
    from pyarrow.fs import FileType, _resolve_filesystem_and_path

    if isinstance(paths, str):
        paths = [paths]
    elif (not isinstance(paths, list)
          or any(not isinstance(p, str) for p in paths)):
        raise ValueError(
            "paths must be a path string or a list of path strings.")
    elif len(paths) == 0:
        raise ValueError("Must provide at least one path.")

    resolved_paths = []
    for path in paths:
        if filesystem is not None:
            # If we provide a filesystem, _resolve_filesystem_and_path will not
            # slice off the protocol from the provided URI/path when resolved.
            path = _unwrap_protocol(path)
        resolved_filesystem, resolved_path = _resolve_filesystem_and_path(
            path, filesystem)
        if filesystem is None:
            filesystem = resolved_filesystem
        elif type(resolved_filesystem) != type(filesystem):
            raise ValueError("All paths must use same filesystem.")
        resolved_path = filesystem.normalize_path(resolved_path)
        resolved_paths.append(resolved_path)

    expanded_paths = []
    file_infos = []
    for path in resolved_paths:
        file_info = filesystem.get_file_info(path)
        if file_info.type == FileType.Directory:
            paths, file_infos_ = _expand_directory(path, filesystem)
            expanded_paths.extend(paths)
            file_infos.extend(file_infos_)
        elif file_info.type == FileType.File:
            expanded_paths.append(path)
            file_infos.append(file_info)
        else:
            raise FileNotFoundError(path)
    return expanded_paths, file_infos, filesystem
Exemple #4
0
def read_table(source, columns=None, filesystem=None):
    filesystem, path = _resolve_filesystem_and_path(source, filesystem)
    if filesystem is not None:
        source = filesystem.open_input_file(path)

    if columns is not None and len(columns) == 0:
        result = ORCFile(source).read().select(columns)
    else:
        result = ORCFile(source).read(columns=columns)

    return result
Exemple #5
0
def _ensure_single_source(path, filesystem=None):
    """
    Treat path as either a recursively traversable directory or a single file.

    Parameters
    ----------
    path : path-like
    filesystem : FileSystem or str, optional
        If an URI is passed, then its path component will act as a prefix for
        the file paths.

    Returns
    -------
    (FileSystem, list of str or fs.Selector)
        File system object and either a single item list pointing to a file or
        an fs.Selector object pointing to a directory.

    Raises
    ------
    TypeError
        If the passed filesystem has wrong type.
    FileNotFoundError
        If the referenced file or directory doesn't exist.
    """
    from pyarrow.fs import FileType, FileSelector, _resolve_filesystem_and_path

    # at this point we already checked that `path` is a path-like
    filesystem, path = _resolve_filesystem_and_path(path, filesystem)

    # ensure that the path is normalized before passing to dataset discovery
    path = filesystem.normalize_path(path)

    # retrieve the file descriptor
    file_info = filesystem.get_file_info(path)

    # depending on the path type either return with a recursive
    # directory selector or as a list containing a single file
    if file_info.type == FileType.Directory:
        paths_or_selector = FileSelector(path, recursive=True)
    elif file_info.type == FileType.File:
        paths_or_selector = [path]
    else:
        raise FileNotFoundError(path)

    return filesystem, paths_or_selector
Exemple #6
0
def write_dataset(data,
                  base_dir,
                  basename_template=None,
                  format=None,
                  partitioning=None,
                  schema=None,
                  filesystem=None,
                  file_options=None,
                  use_threads=True,
                  use_async=False,
                  max_partitions=None,
                  file_visitor=None):
    """
    Write a dataset to a given format and partitioning.

    Parameters
    ----------
    data : Dataset, Table/RecordBatch, RecordBatchReader, list of
           Table/RecordBatch, or iterable of RecordBatch
        The data to write. This can be a Dataset instance or
        in-memory Arrow data. If an iterable is given, the schema must
        also be given.
    base_dir : str
        The root directory where to write the dataset.
    basename_template : str, optional
        A template string used to generate basenames of written data files.
        The token '{i}' will be replaced with an automatically incremented
        integer. If not specified, it defaults to
        "part-{i}." + format.default_extname
    format : FileFormat or str
        The format in which to write the dataset. Currently supported:
        "parquet", "ipc"/"feather". If a FileSystemDataset is being written
        and `format` is not specified, it defaults to the same format as the
        specified FileSystemDataset. When writing a Table or RecordBatch, this
        keyword is required.
    partitioning : Partitioning, optional
        The partitioning scheme specified with the ``partitioning()``
        function.
    schema : Schema, optional
    filesystem : FileSystem, optional
    file_options : FileWriteOptions, optional
        FileFormat specific write options, created using the
        ``FileFormat.make_write_options()`` function.
    use_threads : bool, default True
        Write files in parallel. If enabled, then maximum parallelism will be
        used determined by the number of available CPU cores.
    use_async : bool, default False
        If enabled, an async scanner will be used that should offer
        better performance with high-latency/highly-parallel filesystems
        (e.g. S3)
    max_partitions : int, default 1024
        Maximum number of partitions any batch may be written into.
    file_visitor : Function
        If set, this function will be called with a WrittenFile instance
        for each file created during the call.  This object will have both
        a path attribute and a metadata attribute.

        The path attribute will be a string containing the path to
        the created file.

        The metadata attribute will be the parquet metadata of the file.
        This metadata will have the file path attribute set and can be used
        to build a _metadata file.  The metadata attribute will be None if
        the format is not parquet.

        Example visitor which simple collects the filenames created::

            visited_paths = []

            def file_visitor(written_file):
                visited_paths.append(written_file.path)
    """
    from pyarrow.fs import _resolve_filesystem_and_path

    if isinstance(data, (list, tuple)):
        schema = schema or data[0].schema
        data = InMemoryDataset(data, schema=schema)
    elif isinstance(data, (pa.RecordBatch, pa.Table)):
        schema = schema or data.schema
        data = InMemoryDataset(data, schema=schema)
    elif isinstance(data, pa.ipc.RecordBatchReader) or _is_iterable(data):
        data = Scanner.from_batches(data, schema=schema)
        schema = None
    elif not isinstance(data, (Dataset, Scanner)):
        raise ValueError(
            "Only Dataset, Scanner, Table/RecordBatch, RecordBatchReader, "
            "a list of Tables/RecordBatches, or iterable of batches are "
            "supported.")

    if format is None and isinstance(data, FileSystemDataset):
        format = data.format
    else:
        format = _ensure_format(format)

    if file_options is None:
        file_options = format.make_write_options()

    if format != file_options.format:
        raise TypeError("Supplied FileWriteOptions have format {}, "
                        "which doesn't match supplied FileFormat {}".format(
                            format, file_options))

    if basename_template is None:
        basename_template = "part-{i}." + format.default_extname

    if max_partitions is None:
        max_partitions = 1024

    partitioning = _ensure_write_partitioning(partitioning)

    filesystem, base_dir = _resolve_filesystem_and_path(base_dir, filesystem)

    if isinstance(data, Dataset):
        scanner = data.scanner(use_threads=use_threads, use_async=use_async)
    else:
        # scanner was passed directly by the user, in which case a schema
        # cannot be passed
        if schema is not None:
            raise ValueError("Cannot specify a schema when writing a Scanner")
        scanner = data

    _filesystemdataset_write(scanner, base_dir, basename_template, filesystem,
                             partitioning, file_options, max_partitions,
                             file_visitor)
Exemple #7
0
def write_dataset(data,
                  base_dir,
                  basename_template=None,
                  format=None,
                  partitioning=None,
                  schema=None,
                  filesystem=None,
                  file_options=None,
                  use_threads=True,
                  max_partitions=None):
    """
    Write a dataset to a given format and partitioning.

    Parameters
    ----------
    data : Dataset, Table/RecordBatch, RecordBatchReader, list of
           Table/RecordBatch, or iterable of RecordBatch
        The data to write. This can be a Dataset instance or
        in-memory Arrow data. If an iterable is given, the schema must
        also be given.
    base_dir : str
        The root directory where to write the dataset.
    basename_template : str, optional
        A template string used to generate basenames of written data files.
        The token '{i}' will be replaced with an automatically incremented
        integer. If not specified, it defaults to
        "part-{i}." + format.default_extname
    format : FileFormat or str
        The format in which to write the dataset. Currently supported:
        "parquet", "ipc"/"feather". If a FileSystemDataset is being written
        and `format` is not specified, it defaults to the same format as the
        specified FileSystemDataset. When writing a Table or RecordBatch, this
        keyword is required.
    partitioning : Partitioning, optional
        The partitioning scheme specified with the ``partitioning()``
        function.
    schema : Schema, optional
    filesystem : FileSystem, optional
    file_options : FileWriteOptions, optional
        FileFormat specific write options, created using the
        ``FileFormat.make_write_options()`` function.
    use_threads : bool, default True
        Write files in parallel. If enabled, then maximum parallelism will be
        used determined by the number of available CPU cores.
    max_partitions : int, default 1024
        Maximum number of partitions any batch may be written into.
    """
    from pyarrow.fs import _resolve_filesystem_and_path

    if isinstance(data, Dataset):
        schema = schema or data.schema
    elif isinstance(data, (list, tuple)):
        schema = schema or data[0].schema
        data = InMemoryDataset(data, schema=schema)
    elif isinstance(data, (pa.RecordBatch, pa.ipc.RecordBatchReader,
                           pa.Table)) or _is_iterable(data):
        data = InMemoryDataset(data, schema=schema)
        schema = schema or data.schema
    else:
        raise ValueError(
            "Only Dataset, Table/RecordBatch, RecordBatchReader, a list "
            "of Tables/RecordBatches, or iterable of batches are supported.")

    if format is None and isinstance(data, FileSystemDataset):
        format = data.format
    else:
        format = _ensure_format(format)

    if file_options is None:
        file_options = format.make_write_options()

    if format != file_options.format:
        raise TypeError("Supplied FileWriteOptions have format {}, "
                        "which doesn't match supplied FileFormat {}".format(
                            format, file_options))

    if basename_template is None:
        basename_template = "part-{i}." + format.default_extname

    if max_partitions is None:
        max_partitions = 1024

    partitioning = _ensure_write_partitioning(partitioning)

    filesystem, base_dir = _resolve_filesystem_and_path(base_dir, filesystem)

    _filesystemdataset_write(data, base_dir, basename_template, schema,
                             filesystem, partitioning, file_options,
                             use_threads, max_partitions)
Exemple #8
0
def write_dataset(data, base_dir, basename_template=None, format=None,
                  partitioning=None, partitioning_flavor=None, schema=None,
                  filesystem=None, file_options=None, use_threads=True,
                  max_partitions=None, max_open_files=None,
                  max_rows_per_file=None, min_rows_per_group=None,
                  max_rows_per_group=None, file_visitor=None,
                  existing_data_behavior='error', create_dir=True):
    """
    Write a dataset to a given format and partitioning.

    Parameters
    ----------
    data : Dataset, Table/RecordBatch, RecordBatchReader, list of \
Table/RecordBatch, or iterable of RecordBatch
        The data to write. This can be a Dataset instance or
        in-memory Arrow data. If an iterable is given, the schema must
        also be given.
    base_dir : str
        The root directory where to write the dataset.
    basename_template : str, optional
        A template string used to generate basenames of written data files.
        The token '{i}' will be replaced with an automatically incremented
        integer. If not specified, it defaults to
        "part-{i}." + format.default_extname
    format : FileFormat or str
        The format in which to write the dataset. Currently supported:
        "parquet", "ipc"/"arrow"/"feather", and "csv". If a FileSystemDataset
        is being written and `format` is not specified, it defaults to the
        same format as the specified FileSystemDataset. When writing a
        Table or RecordBatch, this keyword is required.
    partitioning : Partitioning or list[str], optional
        The partitioning scheme specified with the ``partitioning()``
        function or a list of field names. When providing a list of
        field names, you can use ``partitioning_flavor`` to drive which
        partitioning type should be used.
    partitioning_flavor : str, optional
        One of the partitioning flavors supported by
        ``pyarrow.dataset.partitioning``. If omitted will use the
        default of ``partitioning()`` which is directory partitioning.
    schema : Schema, optional
    filesystem : FileSystem, optional
    file_options : pyarrow.dataset.FileWriteOptions, optional
        FileFormat specific write options, created using the
        ``FileFormat.make_write_options()`` function.
    use_threads : bool, default True
        Write files in parallel. If enabled, then maximum parallelism will be
        used determined by the number of available CPU cores.
    max_partitions : int, default 1024
        Maximum number of partitions any batch may be written into.
    max_open_files : int, default 1024
        If greater than 0 then this will limit the maximum number of
        files that can be left open. If an attempt is made to open
        too many files then the least recently used file will be closed.
        If this setting is set too low you may end up fragmenting your
        data into many small files.
    max_rows_per_file : int, default 0
        Maximum number of rows per file. If greater than 0 then this will
        limit how many rows are placed in any single file. Otherwise there
        will be no limit and one file will be created in each output
        directory unless files need to be closed to respect max_open_files
    min_rows_per_group : int, default 0
        Minimum number of rows per group. When the value is greater than 0,
        the dataset writer will batch incoming data and only write the row
        groups to the disk when sufficient rows have accumulated.
    max_rows_per_group : int, default 1024 * 1024
        Maximum number of rows per group. If the value is greater than 0,
        then the dataset writer may split up large incoming batches into
        multiple row groups.  If this value is set, then min_rows_per_group
        should also be set. Otherwise it could end up with very small row
        groups.
    file_visitor : function
        If set, this function will be called with a WrittenFile instance
        for each file created during the call.  This object will have both
        a path attribute and a metadata attribute.

        The path attribute will be a string containing the path to
        the created file.

        The metadata attribute will be the parquet metadata of the file.
        This metadata will have the file path attribute set and can be used
        to build a _metadata file.  The metadata attribute will be None if
        the format is not parquet.

        Example visitor which simple collects the filenames created::

            visited_paths = []

            def file_visitor(written_file):
                visited_paths.append(written_file.path)
    existing_data_behavior : 'error' | 'overwrite_or_ignore' | \
'delete_matching'
        Controls how the dataset will handle data that already exists in
        the destination.  The default behavior ('error') is to raise an error
        if any data exists in the destination.

        'overwrite_or_ignore' will ignore any existing data and will
        overwrite files with the same name as an output file.  Other
        existing files will be ignored.  This behavior, in combination
        with a unique basename_template for each write, will allow for
        an append workflow.

        'delete_matching' is useful when you are writing a partitioned
        dataset.  The first time each partition directory is encountered
        the entire directory will be deleted.  This allows you to overwrite
        old partitions completely.
    create_dir : bool, default True
        If False, directories will not be created.  This can be useful for
        filesystems that do not require directories.
    """
    from pyarrow.fs import _resolve_filesystem_and_path

    if isinstance(data, (list, tuple)):
        schema = schema or data[0].schema
        data = InMemoryDataset(data, schema=schema)
    elif isinstance(data, (pa.RecordBatch, pa.Table)):
        schema = schema or data.schema
        data = InMemoryDataset(data, schema=schema)
    elif isinstance(data, pa.ipc.RecordBatchReader) or _is_iterable(data):
        data = Scanner.from_batches(data, schema=schema)
        schema = None
    elif not isinstance(data, (Dataset, Scanner)):
        raise ValueError(
            "Only Dataset, Scanner, Table/RecordBatch, RecordBatchReader, "
            "a list of Tables/RecordBatches, or iterable of batches are "
            "supported."
        )

    if format is None and isinstance(data, FileSystemDataset):
        format = data.format
    else:
        format = _ensure_format(format)

    if file_options is None:
        file_options = format.make_write_options()

    if format != file_options.format:
        raise TypeError("Supplied FileWriteOptions have format {}, "
                        "which doesn't match supplied FileFormat {}".format(
                            format, file_options))

    if basename_template is None:
        basename_template = "part-{i}." + format.default_extname

    if max_partitions is None:
        max_partitions = 1024

    if max_open_files is None:
        max_open_files = 1024

    if max_rows_per_file is None:
        max_rows_per_file = 0

    if max_rows_per_group is None:
        max_rows_per_group = 1 << 20

    if min_rows_per_group is None:
        min_rows_per_group = 0

    # at this point data is a Scanner or a Dataset, anything else
    # was converted to one of those two. So we can grab the schema
    # to build the partitioning object from Dataset.
    if isinstance(data, Scanner):
        partitioning_schema = data.dataset_schema
    else:
        partitioning_schema = data.schema
    partitioning = _ensure_write_partitioning(partitioning,
                                              schema=partitioning_schema,
                                              flavor=partitioning_flavor)

    filesystem, base_dir = _resolve_filesystem_and_path(base_dir, filesystem)

    if isinstance(data, Dataset):
        scanner = data.scanner(use_threads=use_threads)
    else:
        # scanner was passed directly by the user, in which case a schema
        # cannot be passed
        if schema is not None:
            raise ValueError("Cannot specify a schema when writing a Scanner")
        scanner = data

    _filesystemdataset_write(
        scanner, base_dir, basename_template, filesystem, partitioning,
        file_options, max_partitions, file_visitor, existing_data_behavior,
        max_open_files, max_rows_per_file,
        min_rows_per_group, max_rows_per_group, create_dir
    )
Exemple #9
0
def _resolve_paths_and_filesystem(
    paths: Union[str, List[str]],
    filesystem: "pyarrow.fs.FileSystem" = None,
) -> Tuple[List[str], "pyarrow.fs.FileSystem"]:
    """
    Resolves and normalizes all provided paths, infers a filesystem from the
    paths and ensures that all paths use the same filesystem.

    Args:
        paths: A single file/directory path or a list of file/directory paths.
            A list of paths can contain both files and directories.
        filesystem: The filesystem implementation that should be used for
            reading these files. If None, a filesystem will be inferred. If not
            None, the provided filesystem will still be validated against all
            filesystems inferred from the provided paths to ensure
            compatibility.
    """
    import pyarrow as pa
    from pyarrow.fs import (
        FileSystem,
        FSSpecHandler,
        PyFileSystem,
        _resolve_filesystem_and_path,
    )

    if isinstance(paths, str):
        paths = [paths]
    elif not isinstance(paths, list) or any(not isinstance(p, str)
                                            for p in paths):
        raise ValueError(
            "paths must be a path string or a list of path strings.")
    elif len(paths) == 0:
        raise ValueError("Must provide at least one path.")

    need_unwrap_path_protocol = True
    if filesystem and not isinstance(filesystem, FileSystem):
        err_msg = (f"The filesystem passed must either conform to "
                   f"pyarrow.fs.FileSystem, or "
                   f"fsspec.spec.AbstractFileSystem. The provided "
                   f"filesystem was: {filesystem}")
        try:
            import fsspec
            from fsspec.implementations.http import HTTPFileSystem
        except ModuleNotFoundError:
            # If filesystem is not a pyarrow filesystem and fsspec isn't
            # installed, then filesystem is neither a pyarrow filesystem nor
            # an fsspec filesystem, so we raise a TypeError.
            raise TypeError(err_msg) from None
        if not isinstance(filesystem, fsspec.spec.AbstractFileSystem):
            raise TypeError(err_msg) from None
        if isinstance(filesystem, HTTPFileSystem):
            # If filesystem is fsspec HTTPFileSystem, the protocol/scheme of paths
            # should not be unwrapped/removed, because HTTPFileSystem expects full file
            # paths including protocol/scheme. This is different behavior compared to
            # file systems implementation in pyarrow.fs.FileSystem.
            need_unwrap_path_protocol = False

        filesystem = PyFileSystem(FSSpecHandler(filesystem))

    resolved_paths = []
    for path in paths:
        path = _resolve_example_path(path)
        try:
            resolved_filesystem, resolved_path = _resolve_filesystem_and_path(
                path, filesystem)
        except pa.lib.ArrowInvalid as e:
            if "Cannot parse URI" in str(e):
                resolved_filesystem, resolved_path = _resolve_filesystem_and_path(
                    _encode_url(path), filesystem)
                resolved_path = _decode_url(resolved_path)
            elif "Unrecognized filesystem type in URI" in str(e):
                scheme = urllib.parse.urlparse(path,
                                               allow_fragments=False).scheme
                if scheme in ["http", "https"]:
                    # If scheme of path is HTTP and filesystem is not resolved,
                    # try to use fsspec HTTPFileSystem. This expects fsspec is
                    # installed.
                    try:
                        from fsspec.implementations.http import HTTPFileSystem
                    except ModuleNotFoundError:
                        raise ImportError(
                            "Please install fsspec to read files from HTTP."
                        ) from None

                    resolved_filesystem = PyFileSystem(
                        FSSpecHandler(HTTPFileSystem()))
                    resolved_path = path
                    need_unwrap_path_protocol = False
                else:
                    raise
            else:
                raise
        if filesystem is None:
            filesystem = resolved_filesystem
        elif need_unwrap_path_protocol:
            resolved_path = _unwrap_protocol(resolved_path)
        resolved_path = filesystem.normalize_path(resolved_path)
        resolved_paths.append(resolved_path)

    return resolved_paths, filesystem