示例#1
0
def _read_text_full(
    parser_func: Callable,
    path_root: str,
    path: str,
    boto3_session: Union[boto3.Session, Dict[str, Optional[str]]],
    pandas_args: Dict[str, Any],
    s3_additional_kwargs: Optional[Dict[str, str]] = None,
    dataset: bool = False,
) -> pd.DataFrame:
    fs: s3fs.S3FileSystem = _utils.get_fs(
        session=boto3_session, s3_additional_kwargs=s3_additional_kwargs)
    if pandas_args.get("compression", "infer") == "infer":
        pandas_args["compression"] = infer_compression(path,
                                                       compression="infer")
    mode: str = "r" if pandas_args.get("compression") is None else "rb"
    encoding: Optional[str] = pandas_args.get("encoding", None)
    newline: Optional[str] = pandas_args.get("lineterminator", None)
    with fs.open(path=path, mode=mode, encoding=encoding,
                 newline=newline) as f:
        df: pd.DataFrame = parser_func(f, **pandas_args)
    if dataset is True:
        partitions: Dict[str, Any] = _utils.extract_partitions_from_path(
            path_root=path_root, path=path)
        for column_name, value in partitions.items():
            df[column_name] = value
    return df
示例#2
0
def _get_write_details(path: str, pandas_kwargs: Dict[str, Any]) -> Tuple[str, Optional[str], Optional[str]]:
    if pandas_kwargs.get("compression", "infer") == "infer":
        pandas_kwargs["compression"] = infer_compression(path, compression="infer")
    mode: str = "w" if pandas_kwargs.get("compression") is None else "wb"
    encoding: Optional[str] = pandas_kwargs.get("encoding", None)
    newline: Optional[str] = pandas_kwargs.get("lineterminator", "")
    return mode, encoding, newline
示例#3
0
def _read_text_chunksize(
    parser_func: Callable,
    path_root: str,
    paths: List[str],
    boto3_session: boto3.Session,
    chunksize: int,
    pandas_args: Dict[str, Any],
    s3_additional_kwargs: Optional[Dict[str, str]] = None,
    dataset: bool = False,
) -> Iterator[pd.DataFrame]:
    fs: s3fs.S3FileSystem = _utils.get_fs(
        session=boto3_session, s3_additional_kwargs=s3_additional_kwargs)
    for path in paths:
        _logger.debug("path: %s", path)
        partitions: Dict[str, Any] = {}
        if dataset is True:
            partitions = _utils.extract_partitions_from_path(
                path_root=path_root, path=path)
        if pandas_args.get("compression", "infer") == "infer":
            pandas_args["compression"] = infer_compression(path,
                                                           compression="infer")
        mode: str = "r" if pandas_args.get("compression") is None else "rb"
        with fs.open(path, mode) as f:
            reader: pandas.io.parsers.TextFileReader = parser_func(
                f, chunksize=chunksize, **pandas_args)
            for df in reader:
                if dataset is True:
                    for column_name, value in partitions.items():
                        df[column_name] = value
                yield df
示例#4
0
def read_json(
    path_or_buf=None,
    orient=None,
    typ="frame",
    dtype=None,
    convert_axes=None,
    convert_dates=True,
    keep_default_dates=True,
    numpy=False,
    precise_float=False,
    date_unit=None,
    encoding=None,
    lines=False,
    chunksize=None,
    compression="infer",
):
    """
    Convert a JSON string to pandas object.

    Parameters
    ----------
    path_or_buf : a valid JSON str, path object or file-like object
        Any valid string path is acceptable. The string could be a URL. Valid
        URL schemes include http, ftp, s3, and file. For file URLs, a host is
        expected. A local file could be:
        ``file://localhost/path/to/table.json``.

        If you want to pass in a path object, pandas accepts any
        ``os.PathLike``.

        By file-like object, we refer to objects with a ``read()`` method,
        such as a file handler (e.g. via builtin ``open`` function)
        or ``StringIO``.
    orient : str
        Indication of expected JSON string format.
        Compatible JSON strings can be produced by ``to_json()`` with a
        corresponding orient value.
        The set of possible orients is:

        - ``'split'`` : dict like
          ``{index -> [index], columns -> [columns], data -> [values]}``
        - ``'records'`` : list like
          ``[{column -> value}, ... , {column -> value}]``
        - ``'index'`` : dict like ``{index -> {column -> value}}``
        - ``'columns'`` : dict like ``{column -> {index -> value}}``
        - ``'values'`` : just the values array

        The allowed and default values depend on the value
        of the `typ` parameter.

        * when ``typ == 'series'``,

          - allowed orients are ``{'split','records','index'}``
          - default is ``'index'``
          - The Series index must be unique for orient ``'index'``.

        * when ``typ == 'frame'``,

          - allowed orients are ``{'split','records','index',
            'columns','values', 'table'}``
          - default is ``'columns'``
          - The DataFrame index must be unique for orients ``'index'`` and
            ``'columns'``.
          - The DataFrame columns must be unique for orients ``'index'``,
            ``'columns'``, and ``'records'``.

        .. versionadded:: 0.23.0
           'table' as an allowed value for the ``orient`` argument

    typ : {'frame', 'series'}, default 'frame'
        The type of object to recover.

    dtype : bool or dict, default None
        If True, infer dtypes; if a dict of column to dtype, then use those;
        if False, then don't infer dtypes at all, applies only to the data.

        For all ``orient`` values except ``'table'``, default is True.

        .. versionchanged:: 0.25.0

           Not applicable for ``orient='table'``.

    convert_axes : bool, default None
        Try to convert the axes to the proper dtypes.

        For all ``orient`` values except ``'table'``, default is True.

        .. versionchanged:: 0.25.0

           Not applicable for ``orient='table'``.

    convert_dates : bool or list of str, default True
        If True then default datelike columns may be converted (depending on
        keep_default_dates).
        If False, no dates will be converted.
        If a list of column names, then those columns will be converted and
        default datelike columns may also be converted (depending on
        keep_default_dates).

    keep_default_dates : bool, default True
        If parsing dates (convert_dates is not False), then try to parse the
        default datelike columns.
        A column label is datelike if

        * it ends with ``'_at'``,

        * it ends with ``'_time'``,

        * it begins with ``'timestamp'``,

        * it is ``'modified'``, or

        * it is ``'date'``.

    numpy : bool, default False
        Direct decoding to numpy arrays. Supports numeric data only, but
        non-numeric column and index labels are supported. Note also that the
        JSON ordering MUST be the same for each term if numpy=True.

        .. deprecated:: 1.0.0

    precise_float : bool, default False
        Set to enable usage of higher precision (strtod) function when
        decoding string to double values. Default (False) is to use fast but
        less precise builtin functionality.

    date_unit : str, default None
        The timestamp unit to detect if converting dates. The default behaviour
        is to try and detect the correct precision, but if this is not desired
        then pass one of 's', 'ms', 'us' or 'ns' to force parsing only seconds,
        milliseconds, microseconds or nanoseconds respectively.

    encoding : str, default is 'utf-8'
        The encoding to use to decode py3 bytes.

    lines : bool, default False
        Read the file as a json object per line.

    chunksize : int, optional
        Return JsonReader object for iteration.
        See the `line-delimited json docs
        <https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#line-delimited-json>`_
        for more information on ``chunksize``.
        This can only be passed if `lines=True`.
        If this is None, the file will be read into memory all at once.
    compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default 'infer'
        For on-the-fly decompression of on-disk data. If 'infer', then use
        gzip, bz2, zip or xz if path_or_buf is a string ending in
        '.gz', '.bz2', '.zip', or 'xz', respectively, and no decompression
        otherwise. If using 'zip', the ZIP file must contain only one data
        file to be read in. Set to None for no decompression.

    Returns
    -------
    Series or DataFrame
        The type returned depends on the value of `typ`.

    See Also
    --------
    DataFrame.to_json : Convert a DataFrame to a JSON string.
    Series.to_json : Convert a Series to a JSON string.

    Notes
    -----
    Specific to ``orient='table'``, if a :class:`DataFrame` with a literal
    :class:`Index` name of `index` gets written with :func:`to_json`, the
    subsequent read operation will incorrectly set the :class:`Index` name to
    ``None``. This is because `index` is also used by :func:`DataFrame.to_json`
    to denote a missing :class:`Index` name, and the subsequent
    :func:`read_json` operation cannot distinguish between the two. The same
    limitation is encountered with a :class:`MultiIndex` and any names
    beginning with ``'level_'``.

    Examples
    --------
    >>> df = pd.DataFrame([['a', 'b'], ['c', 'd']],
    ...                   index=['row 1', 'row 2'],
    ...                   columns=['col 1', 'col 2'])

    Encoding/decoding a Dataframe using ``'split'`` formatted JSON:

    >>> df.to_json(orient='split')
    '{"columns":["col 1","col 2"],
      "index":["row 1","row 2"],
      "data":[["a","b"],["c","d"]]}'
    >>> pd.read_json(_, orient='split')
          col 1 col 2
    row 1     a     b
    row 2     c     d

    Encoding/decoding a Dataframe using ``'index'`` formatted JSON:

    >>> df.to_json(orient='index')
    '{"row 1":{"col 1":"a","col 2":"b"},"row 2":{"col 1":"c","col 2":"d"}}'
    >>> pd.read_json(_, orient='index')
          col 1 col 2
    row 1     a     b
    row 2     c     d

    Encoding/decoding a Dataframe using ``'records'`` formatted JSON.
    Note that index labels are not preserved with this encoding.

    >>> df.to_json(orient='records')
    '[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'
    >>> pd.read_json(_, orient='records')
      col 1 col 2
    0     a     b
    1     c     d

    Encoding with Table Schema

    >>> df.to_json(orient='table')
    '{"schema": {"fields": [{"name": "index", "type": "string"},
                            {"name": "col 1", "type": "string"},
                            {"name": "col 2", "type": "string"}],
                    "primaryKey": "index",
                    "pandas_version": "0.20.0"},
        "data": [{"index": "row 1", "col 1": "a", "col 2": "b"},
                {"index": "row 2", "col 1": "c", "col 2": "d"}]}'
    """
    if orient == "table" and dtype:
        raise ValueError("cannot pass both dtype and orient='table'")
    if orient == "table" and convert_axes:
        raise ValueError("cannot pass both convert_axes and orient='table'")

    if dtype is None and orient != "table":
        dtype = True
    if convert_axes is None and orient != "table":
        convert_axes = True
    if encoding is None:
        encoding = "utf-8"

    compression = infer_compression(path_or_buf, compression)
    filepath_or_buffer, _, compression, should_close = get_filepath_or_buffer(
        path_or_buf, encoding=encoding, compression=compression)

    json_reader = JsonReader(
        filepath_or_buffer,
        orient=orient,
        typ=typ,
        dtype=dtype,
        convert_axes=convert_axes,
        convert_dates=convert_dates,
        keep_default_dates=keep_default_dates,
        numpy=numpy,
        precise_float=precise_float,
        date_unit=date_unit,
        encoding=encoding,
        lines=lines,
        chunksize=chunksize,
        compression=compression,
    )

    if chunksize:
        return json_reader

    result = json_reader.read()
    if should_close:
        filepath_or_buffer.close()

    return result
示例#5
0
 def test_infer_compression_from_path(self, extension, expected, path_type):
     path = path_type("foo/bar.csv" + extension)
     compression = icom.infer_compression(path, compression="infer")
     assert compression == expected
示例#6
0
    def __init__(
        self,
        obj,
        path_or_buf: Optional[FilePathOrBuffer[str]] = None,
        sep: str = ",",
        na_rep: str = "",
        float_format: Optional[str] = None,
        cols=None,
        header: Union[bool, Sequence[Hashable]] = True,
        index: bool = True,
        index_label: Optional[Union[bool, Hashable,
                                    Sequence[Hashable]]] = None,
        mode: str = "w",
        encoding: Optional[str] = None,
        compression: Union[str, Mapping[str, str], None] = "infer",
        quoting: Optional[int] = None,
        line_terminator="\n",
        chunksize: Optional[int] = None,
        quotechar='"',
        date_format: Optional[str] = None,
        doublequote: bool = True,
        escapechar: Optional[str] = None,
        decimal=".",
    ):
        self.obj = obj

        if path_or_buf is None:
            path_or_buf = StringIO()

        # Extract compression mode as given, if dict
        compression, self.compression_args = get_compression_method(
            compression)

        self.path_or_buf, _, _, self.should_close = get_filepath_or_buffer(
            path_or_buf, encoding=encoding, compression=compression, mode=mode)
        self.sep = sep
        self.na_rep = na_rep
        self.float_format = float_format
        self.decimal = decimal

        self.header = header
        self.index = index
        self.index_label = index_label
        self.mode = mode
        if encoding is None:
            encoding = "utf-8"
        self.encoding = encoding
        self.compression = infer_compression(self.path_or_buf, compression)

        if quoting is None:
            quoting = csvlib.QUOTE_MINIMAL
        self.quoting = quoting

        if quoting == csvlib.QUOTE_NONE:
            # prevents crash in _csv
            quotechar = None
        self.quotechar = quotechar

        self.doublequote = doublequote
        self.escapechar = escapechar

        self.line_terminator = line_terminator or os.linesep

        self.date_format = date_format

        self.has_mi_columns = isinstance(obj.columns, ABCMultiIndex)

        # validate mi options
        if self.has_mi_columns:
            if cols is not None:
                raise TypeError(
                    "cannot specify cols with a MultiIndex on the columns")

        if cols is not None:
            if isinstance(cols, ABCIndexClass):
                cols = cols.to_native_types(
                    na_rep=na_rep,
                    float_format=float_format,
                    date_format=date_format,
                    quoting=self.quoting,
                )
            else:
                cols = list(cols)
            self.obj = self.obj.loc[:, cols]

        # update columns to include possible multiplicity of dupes
        # and make sure sure cols is just a list of labels
        cols = self.obj.columns
        if isinstance(cols, ABCIndexClass):
            cols = cols.to_native_types(
                na_rep=na_rep,
                float_format=float_format,
                date_format=date_format,
                quoting=self.quoting,
            )
        else:
            cols = list(cols)

        # save it
        self.cols = cols

        # preallocate data 2d list
        self.blocks = self.obj._data.blocks
        ncols = sum(b.shape[0] for b in self.blocks)
        self.data = [None] * ncols

        if chunksize is None:
            chunksize = (100000 // (len(self.cols) or 1)) or 1
        self.chunksize = int(chunksize)

        self.data_index = obj.index
        if (isinstance(self.data_index, (ABCDatetimeIndex, ABCPeriodIndex))
                and date_format is not None):
            from pandas import Index

            self.data_index = Index([
                x.strftime(date_format) if notna(x) else ""
                for x in self.data_index
            ])

        self.nlevels = getattr(self.data_index, "nlevels", 1)
        if not index:
            self.nlevels = 0
示例#7
0
文件: xml.py 项目: tnir/pandas
    def _iterparse_nodes(self, iterparse: Callable) -> list[dict[str, str | None]]:
        """
        Iterparse xml nodes.

        This method will read in local disk, decompressed XML files for elements
        and underlying descendants using iterparse, a method to iterate through
        an XML tree without holding entire XML tree in memory.

        Raises
        ------
        TypeError
            * If `iterparse` is not a dict or its dict value is not list-like.
        ParserError
            * If `path_or_buffer` is not a physical, decompressed file on disk.
            * If no data is returned from selected items in `iterparse`.

        Notes
        -----
        Namespace URIs will be removed from return node values. Also,
        elements with missing children or attributes in submitted list
        will have optional keys filled with None values.
        """

        dicts: list[dict[str, str | None]] = []
        row: dict[str, str | None] | None = None

        if not isinstance(self.iterparse, dict):
            raise TypeError(
                f"{type(self.iterparse).__name__} is not a valid type for iterparse"
            )

        row_node = next(iter(self.iterparse.keys())) if self.iterparse else ""
        if not is_list_like(self.iterparse[row_node]):
            raise TypeError(
                f"{type(self.iterparse[row_node])} is not a valid type "
                "for value in iterparse"
            )

        if (
            not isinstance(self.path_or_buffer, str)
            or is_url(self.path_or_buffer)
            or is_fsspec_url(self.path_or_buffer)
            or self.path_or_buffer.startswith(("<?xml", "<"))
            or infer_compression(self.path_or_buffer, "infer") is not None
        ):
            raise ParserError(
                "iterparse is designed for large XML files that are fully extracted on "
                "local disk and not as compressed files or online sources."
            )

        for event, elem in iterparse(self.path_or_buffer, events=("start", "end")):
            curr_elem = elem.tag.split("}")[1] if "}" in elem.tag else elem.tag

            if event == "start":
                if curr_elem == row_node:
                    row = {}

            if row is not None:
                if self.names:
                    for col, nm in zip(self.iterparse[row_node], self.names):
                        if curr_elem == col:
                            elem_val = elem.text.strip() if elem.text else None
                            if row.get(nm) != elem_val and nm not in row:
                                row[nm] = elem_val
                        if col in elem.attrib:
                            if elem.attrib[col] not in row.values() and nm not in row:
                                row[nm] = elem.attrib[col]
                else:
                    for col in self.iterparse[row_node]:
                        if curr_elem == col:
                            row[col] = elem.text.strip() if elem.text else None
                        if col in elem.attrib:
                            row[col] = elem.attrib[col]

            if event == "end":
                if curr_elem == row_node and row is not None:
                    dicts.append(row)
                    row = None

                elem.clear()
                if hasattr(elem, "getprevious"):
                    while (
                        elem.getprevious() is not None and elem.getparent() is not None
                    ):
                        del elem.getparent()[0]

        if dicts == []:
            raise ParserError("No result from selected items in iterparse.")

        keys = list(dict.fromkeys([k for d in dicts for k in d.keys()]))
        dicts = [{k: d[k] if k in d.keys() else None for k in keys} for d in dicts]

        if self.names:
            dicts = [{nm: v for nm, v in zip(self.names, d.values())} for d in dicts]

        return dicts
示例#8
0
def read_into_dataframe(file: IO,
                        filename: str = "",
                        nrows: int = 100,
                        max_characters: int = 50) -> pd.DataFrame:
    """Reads a file into a DataFrame.
    Infers the file encoding and whether a header column exists
    Args:
        file (IO): file buffer.
        filename (str): filename. Used to infer compression.
        nrows (int, optional): number of rows to peek. Default: 100.
        max_characters (int, optional): max characters a column name can have to be distinguished from a real text value
    Returns:
        A pandas.DataFrame.
    """
    detector = UniversalDetector()
    for line, text in enumerate(file):
        detector.feed(text)
        if detector.done or line > nrows:
            break
    detector.close()
    encoding = detector.result.get("encoding")

    compression = infer_compression(filename, "infer")

    file.seek(0, SEEK_SET)
    contents = file.read()

    with BytesIO(contents) as file:
        df0 = pd.read_csv(
            file,
            encoding=encoding,
            compression=compression,
            sep=None,
            engine="python",
            header="infer",
            nrows=nrows,
        )

    df0_cols = list(df0.columns)

    #Check if all columns are strins and short strings(text values tend to be long)
    column_names_checker = all([type(item) == str for item in df0_cols])
    if column_names_checker:
        column_names_checker = all(
            [len(item) < max_characters for item in df0_cols])

    #Check if any column can be turned to float
    conversion_checker = True
    for item in df0_cols:
        try:
            item = float(item)
            conversion_checker = False
            break
        except:
            pass

    #Prefix and header
    final_checker = True if (column_names_checker
                             and conversion_checker) else False
    header = "infer" if final_checker else None
    prefix = None if header else "col"

    with BytesIO(contents) as file:
        df = pd.read_csv(
            file,
            encoding=encoding,
            compression=compression,
            sep=None,
            engine="python",
            header=header,
            prefix=prefix,
        )
    return df
示例#9
0
def read_csv(
    filepath_or_buffer,
    sep=",",
    delimiter=None,
    header="infer",
    names=None,
    index_col=None,
    usecols=None,
    prefix=None,
    mangle_dupe_cols=True,
    dtype=None,
    true_values=None,
    false_values=None,
    skiprows=None,
    skipfooter=0,
    nrows=None,
    na_values=None,
    skip_blank_lines=True,
    parse_dates=False,
    compression="infer",
    quotechar='"',
    quoting=0,
    doublequote=True,
    verify_header=False,
    **kwargs,
    # TODO: Put back these options once we figure out how to support them
    #       with the Arrows CSV reader.
    # skipinitialspace=False,  # GPU only
    # keep_default_na=True,  # GPU only
    # na_filter=True,  # GPU only
    # dayfirst=False, # GPU only
    # thousands=None,  # GPU only
    # decimal=".",  # GPU only
    # lineterminator=None, # GPU only
    # comment=None,  # GPU only
    # delim_whitespace=False,  # GPU only
):

    # Checks on filepath_or_buffer
    paths = util.to_list_if_scalar(filepath_or_buffer)

    if any(not isinstance(path, str) for path in paths):
        raise err._unsupported_error(
            "'filepath_or_buffer' must be a string or a list of strings")
    if len(paths) == 0:
        raise ValueError("'filepath_or_buffer' must be a non-empty list")

    for path in paths:
        if not os.path.exists(path):
            raise ValueError(f"{path} does not exist")

    if not isinstance(compression, str):
        raise err._unsupported_error("compression", compression)
    compressions = [
        _parse_compression(infer_compression(path, compression))
        for path in paths
    ]

    # Checks on sep and delimiter
    if sep is None and delimiter is None:
        raise ValueError("at least one of 'sep' or 'delimiter' must be given")
    sep = delimiter if delimiter is not None else sep
    if len(sep) > 1:
        raise ValueError("'sep' must be a 1-character string")

    # Checks on sep and delimiter
    if header == "infer":
        header = 0 if names is None else None

    if header not in (
            0,
            None,
    ):
        raise err._unsupported_error("header", header)

    # Checks on skiprows, kipfooter, and nrows
    skiprows = 0 if skiprows is None else skiprows
    if not is_integer(skiprows):
        raise ValueError("'skiprows' must be an integer")
    if not is_integer(skipfooter):
        raise ValueError("'skipfooter' must be an integer")
    if not (nrows is None or is_integer(nrows)):
        raise ValueError("'nrows' must be None or an integer")

    # If either column names or dtype is missing, infer them by parsing
    # the first few of lines using Pandas
    # FIXME: We should use cuDF for this
    if names is None or dtype is None:
        engine = ("python" if skipfooter > 0 else "c", )
        column_names, dtypes = _extract_header_using_pandas(
            paths[0],
            sep,
            header,
            names,
            dtype,
            true_values,
            false_values,
            skiprows,
            na_values,
            skip_blank_lines,
            parse_dates,
            compression,
            quotechar,
            quoting,
            doublequote,
            engine,
            peek_rows=3,
        )
        if verify_header:
            for path in paths[1:]:
                result = _extract_header_using_pandas(
                    path,
                    sep,
                    header,
                    names,
                    dtype,
                    true_values,
                    false_values,
                    skiprows,
                    na_values,
                    skip_blank_lines,
                    parse_dates,
                    compression,
                    quotechar,
                    quoting,
                    doublequote,
                    engine,
                    peek_rows=3,
                )
                if not column_names.equals(result[0]):
                    raise ValueError(
                        f"{paths[0]} and {path} have different headers")

    else:
        column_names = pandas.Index(names)

        if is_dict_like(dtype):
            dtypes = []
            for name in names:
                if name not in dtype:
                    raise ValueError(f"'dtype' has no entry for '{name}'")
                dtypes.append(_ensure_dtype(dtype[name]))
        elif is_list_like(dtype):
            raise err._unsupported_error(
                "'dtype' must be a string, a dtype, or a dictionary")
        else:
            dtype = _ensure_dtype(dtype)
            dtypes = [dtype] * len(names)

    if column_names.has_duplicates:
        raise ValueError("Header must not have any duplicates")

    # Checks on unsupported options
    if prefix is not None:
        raise err._unsupported_error("prefix", prefix)
    if mangle_dupe_cols not in (True, ):
        raise err._unsupported_error("mangle_dupe_cols", mangle_dupe_cols)

    # If there was a header in the file, we should skip that line as well
    if header == 0:
        skiprows += 1

    # Checks on parse_dates
    _ERR_MSG_PARSE_DATES = (
        "'parse_dates' must be a list of integers or strings for now")

    if is_dict_like(parse_dates):
        raise err._unsupported_error(_ERR_MSG_PARSE_DATES)

    parse_dates = parse_dates if parse_dates is not False else []
    if not is_list_like(parse_dates):
        raise err._unsupported_error(_ERR_MSG_PARSE_DATES)

    date_cols = _get_indexer(column_names, parse_dates, "parse_dates")

    # Override dtypes for the datetime columns
    for idx in date_cols:
        dtypes[idx] = ty.ts_ns

    # If a column is given a datetime dtype but not added to the parse_dates,
    # we should record it
    for idx, dtype in enumerate(dtypes):
        if idx not in parse_dates:
            parse_dates.append(idx)

    # Checks on quoting
    if quoting != 0:
        raise err._unsupported_error("quoting", quoting)
    if len(quotechar) > 1:
        raise ValueError("'quotechar' must be a 1-character string")

    # Checks on index_col
    index_col = None if index_col is False else index_col
    if index_col is not None:
        if is_integer(index_col) or isinstance(index_col, str):
            index_col = [index_col]
        if not is_list_like(index_col):
            raise err._unsupported_error("index_col", index_col)
        index_col = _get_indexer(column_names, index_col, "index_col")

    # Checks on true_values, false_values, and na_values
    _check_string_list(true_values, "true_values")
    _check_string_list(false_values, "false_values")
    _check_string_list(na_values, "na_values")

    # Checks on nrows
    if skipfooter != 0 and nrows is not None:
        raise ValueError("'skipfooter' not supported with 'nrows'")

    df = DataFrame(
        frame=io.read_csv(
            paths,
            sep=sep,
            usecols=usecols,
            dtypes=dtypes,
            true_values=true_values,
            false_values=false_values,
            skiprows=skiprows,
            skipfooter=skipfooter,
            nrows=nrows,
            na_values=na_values,
            skip_blank_lines=skip_blank_lines,
            date_cols=date_cols,
            compressions=compressions,
            quotechar=quotechar,
            quoting=quoting,
            doublequote=doublequote,
        ),
        columns=column_names,
    )

    if index_col is not None:
        df = df.set_index(column_names[index_col])
        # Make sure we reset the names for unnamed indices
        names = df._raw_index.names
        names = [
            None if name.startswith("Unnamed") else name for name in names
        ]
        df._raw_index.names = names

    return df
示例#10
0
def read_into_dataframe(file, filename=None, nrows=100, max_characters=50):
    """
    Reads a file into a DataFrame.
    Infers the file encoding and whether a header column exists
    The file can be in any format (.csv, .txt, .zip, .gif,...).
    If it's not a .csv file, it will throw an exception (pandas.errors.EmptyDataError).
    One-column .csv gives exception there in try...except.

    Parameters
    ----------
    file : IO
        File buffer.
    filename : str
        Filename. Used to infer compression. Default to None.
    nrows : int
        Number of rows to peek. Default to 100.
    max_characters : int
        Max characters a column name can have to be distinguished from a real text value. Default to 50.

    Returns
    -------
    pd.DataFrame
        The dataframe content.

    Raises
    ------
    pandas.errors.EmptyDataError

    Notes
    -----
    If no filename is given, a hex uuid will be used as the file name.
    """

    detector = UniversalDetector()
    for line, text in enumerate(file):
        detector.feed(text)
        if detector.done or line > nrows:
            break
    detector.close()
    encoding = detector.result.get("encoding")

    if filename is None:
        filename = uuid4().hex

    compression = infer_compression(filename, "infer")

    file.seek(0, SEEK_SET)

    pdread = TextIOWrapper(file, encoding=encoding)

    try:
        # check if the file has header.
        sniffer = csv.Sniffer()
        pdread.seek(0, SEEK_SET)
        pdreadline = pdread.readline()
        pdreadline += pdread.readline()
        has_header = sniffer.has_header(pdreadline)
        sep = None

    except csv.Error:
        sep = ","
        has_header = True

    # Prefix and header
    header = "infer" if has_header else None
    prefix = None if header else "col"

    pdread.seek(0, SEEK_SET)
    df = pd.read_csv(
        pdread,
        encoding=encoding,
        compression=compression,
        sep=sep,
        engine="python",
        header=header,
        nrows=nrows,
        prefix=prefix,
    )
    return df