Ejemplo n.º 1
0
def read_clipboard(sep=r"\s+", **kwargs):  # pragma: no cover
    _, _, _, kwargs = inspect.getargvalues(inspect.currentframe())
    kwargs.update(kwargs.pop("kwargs", {}))

    from modin.data_management.factories import BaseFactory

    return DataFrame(query_compiler=BaseFactory.read_clipboard(**kwargs))
Ejemplo n.º 2
0
def read_fwf(filepath_or_buffer,
             colspecs="infer",
             widths=None,
             infer_nrows=100,
             **kwds):
    _, _, _, kwargs = inspect.getargvalues(inspect.currentframe())
    return DataFrame(query_compiler=BaseFactory.read_fwf(**kwargs))
Ejemplo n.º 3
0
def to_pickle(obj, path, compression="infer", protocol=4):
    if isinstance(obj, DataFrame):
        obj = obj._query_compiler
    return BaseFactory.to_pickle(obj,
                                 path,
                                 compression=compression,
                                 protocol=protocol)
Ejemplo n.º 4
0
Archivo: io.py Proyecto: wzhang1/modin
def read_excel(
    io,
    sheet_name=0,
    header=0,
    names=None,
    index_col=None,
    parse_cols=None,
    usecols=None,
    squeeze=False,
    dtype=None,
    engine=None,
    converters=None,
    true_values=None,
    false_values=None,
    skiprows=None,
    nrows=None,
    na_values=None,
    keep_default_na=True,
    verbose=False,
    parse_dates=False,
    date_parser=None,
    thousands=None,
    comment=None,
    skip_footer=0,
    skipfooter=0,
    convert_float=True,
    mangle_dupe_cols=True,
    **kwds
):
    _, _, _, kwargs = inspect.getargvalues(inspect.currentframe())
    kwargs.update(kwargs.pop("kwds", {}))
    return DataFrame(query_compiler=BaseFactory.read_excel(**kwargs))
Ejemplo n.º 5
0
def read_spss(
    path: Union[str, pathlib.Path],
    usecols: Union[Sequence[str], type(None)] = None,
    convert_categoricals: bool = True,
):
    return DataFrame(query_compiler=BaseFactory.read_spss(
        path, usecols, convert_categoricals))
Ejemplo n.º 6
0
def read_pickle(filepath_or_buffer: FilePathOrBuffer,
                compression: Optional[str] = "infer"):
    _, _, _, kwargs = inspect.getargvalues(inspect.currentframe())

    from modin.data_management.factories import BaseFactory

    return DataFrame(query_compiler=BaseFactory.read_pickle(**kwargs))
Ejemplo n.º 7
0
def read_sql(
    sql,
    con,
    index_col=None,
    coerce_float=True,
    params=None,
    parse_dates=None,
    columns=None,
    chunksize=None,
):
    """ Read SQL query or database table into a DataFrame.

    Args:
        sql: string or SQLAlchemy Selectable (select or text object) SQL query to be executed or a table name.
        con: SQLAlchemy connectable (engine/connection) or database string URI or DBAPI2 connection (fallback mode)
        index_col: Column(s) to set as index(MultiIndex).
        coerce_float: Attempts to convert values of non-string, non-numeric objects (like decimal.Decimal) to
                      floating point, useful for SQL result sets.
        params: List of parameters to pass to execute method. The syntax used
                to pass parameters is database driver dependent. Check your
                database driver documentation for which of the five syntax styles,
                described in PEP 249's paramstyle, is supported.
        parse_dates:
                     - List of column names to parse as dates.
                     - Dict of ``{column_name: format string}`` where format string is
                       strftime compatible in case of parsing string times, or is one of
                       (D, s, ns, ms, us) in case of parsing integer timestamps.
                     - Dict of ``{column_name: arg dict}``, where the arg dict corresponds
                       to the keyword arguments of :func:`pandas.to_datetime`
                       Especially useful with databases without native Datetime support,
                       such as SQLite.
        columns: List of column names to select from SQL table (only used when reading a table).
        chunksize: If specified, return an iterator where `chunksize` is the number of rows to include in each chunk.

    Returns:
        Modin Dataframe
    """
    _, _, _, kwargs = inspect.getargvalues(inspect.currentframe())

    from modin.data_management.factories import BaseFactory

    if kwargs.get("chunksize") is not None:
        ErrorMessage.default_to_pandas("Parameters provided [chunksize]")
        df_gen = pandas.read_sql(**kwargs)
        return (DataFrame(query_compiler=BaseFactory.from_pandas(df))
                for df in df_gen)
    return DataFrame(query_compiler=BaseFactory.read_sql(**kwargs))
Ejemplo n.º 8
0
def read_fwf(filepath_or_buffer: Union[str, pathlib.Path, IO[AnyStr]],
             colspecs="infer",
             widths=None,
             infer_nrows=100,
             **kwds):
    _, _, _, kwargs = inspect.getargvalues(inspect.currentframe())
    kwargs.update(kwargs.pop("kwds", {}))
    return DataFrame(query_compiler=BaseFactory.read_fwf(**kwargs))
Ejemplo n.º 9
0
def from_non_pandas(df, index, columns, dtype):
    from modin.data_management.factories import BaseFactory

    new_qc = BaseFactory.from_non_pandas(df, index, columns, dtype)
    if new_qc is not None:
        from .dataframe import DataFrame

        return DataFrame(query_compiler=new_qc)
    return new_qc
Ejemplo n.º 10
0
def read_spss(
    path: Union[str, pathlib.Path],
    usecols: Union[Sequence[str], type(None)] = None,
    convert_categoricals: bool = True,
):
    from modin.data_management.factories import BaseFactory

    return DataFrame(query_compiler=BaseFactory.read_spss(
        path, usecols, convert_categoricals))
Ejemplo n.º 11
0
def read_sas(
    filepath_or_buffer,
    format=None,
    index=None,
    encoding=None,
    chunksize=None,
    iterator=False,
):  # pragma: no cover
    _, _, _, kwargs = inspect.getargvalues(inspect.currentframe())
    return DataFrame(query_compiler=BaseFactory.read_sas(**kwargs))
Ejemplo n.º 12
0
 def query_iterator():
     offset = 0
     while True:
         kwargs["sql"] = "SELECT * FROM ({}) LIMIT {} OFFSET {}".format(sql, chunksize, offset)
         offset += chunksize
         df = DataFrame(query_compiler=BaseFactory.read_sql(**kwargs))
         if df.empty:
             break
         else:
             yield df
Ejemplo n.º 13
0
Archivo: io.py Proyecto: alclol/modin
def to_pickle(
    obj: Any,
    filepath_or_buffer: Union[str, pathlib.Path],
    compression: Optional[str] = "infer",
    protocol: int = 4,
):
    if isinstance(obj, DataFrame):
        obj = obj._query_compiler
    return BaseFactory.to_pickle(
        obj, filepath_or_buffer, compression=compression, protocol=protocol
    )
Ejemplo n.º 14
0
def read_gbq(query,
             project_id=None,
             index_col=None,
             col_order=None,
             reauth=False,
             verbose=None,
             private_key=None,
             dialect="legacy",
             **kwargs):
    _, _, _, kwargs = inspect.getargvalues(inspect.currentframe())
    return DataFrame(query_compiler=BaseFactory.read_gbq(**kwargs))
Ejemplo n.º 15
0
def read_sql(
    sql,
    con,
    index_col=None,
    coerce_float=True,
    params=None,
    parse_dates=None,
    columns=None,
    chunksize=None,
):
    """ Read SQL query or database table into a DataFrame.

    Args:
        sql: string or SQLAlchemy Selectable (select or text object) SQL query to be executed or a table name.
        con: SQLAlchemy connectable (engine/connection) or database string URI or DBAPI2 connection (fallback mode)
        index_col: Column(s) to set as index(MultiIndex).
        coerce_float: Attempts to convert values of non-string, non-numeric objects (like decimal.Decimal) to
                      floating point, useful for SQL result sets.
        params: List of parameters to pass to execute method. The syntax used
                to pass parameters is database driver dependent. Check your
                database driver documentation for which of the five syntax styles,
                described in PEP 249's paramstyle, is supported.
        parse_dates:
                     - List of column names to parse as dates.
                     - Dict of ``{column_name: format string}`` where format string is
                       strftime compatible in case of parsing string times, or is one of
                       (D, s, ns, ms, us) in case of parsing integer timestamps.
                     - Dict of ``{column_name: arg dict}``, where the arg dict corresponds
                       to the keyword arguments of :func:`pandas.to_datetime`
                       Especially useful with databases without native Datetime support,
                       such as SQLite.
        columns: List of column names to select from SQL table (only used when reading a table).
        chunksize: If specified, return an iterator where `chunksize` is the number of rows to include in each chunk.

    Returns:
        Modin Dataframe
    """
    _, _, _, kwargs = inspect.getargvalues(inspect.currentframe())
    if kwargs.get("chunksize", None) is not None:
        kwargs["chunksize"] = None
        def query_iterator():
            offset = 0
            while True:
                kwargs["sql"] = "SELECT * FROM ({}) LIMIT {} OFFSET {}".format(sql, chunksize, offset)
                offset += chunksize
                df = DataFrame(query_compiler=BaseFactory.read_sql(**kwargs))
                if df.empty:
                    break
                else:
                    yield df

        return query_iterator()
    
    return DataFrame(query_compiler=BaseFactory.read_sql(**kwargs))
Ejemplo n.º 16
0
def read_parquet(path, engine: str = "auto", columns=None, **kwargs):
    """Load a parquet object from the file path, returning a DataFrame.

    Args:
        path: The filepath of the parquet file.
              We only support local files for now.
        engine: This argument doesn't do anything for now.
        kwargs: Pass into parquet's read_pandas function.
    """
    return DataFrame(query_compiler=BaseFactory.read_parquet(
        path=path, columns=columns, engine=engine, **kwargs))
Ejemplo n.º 17
0
def read_sql_query(
    sql,
    con,
    index_col=None,
    coerce_float=True,
    params=None,
    parse_dates=None,
    chunksize=None,
):
    _, _, _, kwargs = inspect.getargvalues(inspect.currentframe())
    return DataFrame(query_compiler=BaseFactory.read_sql_query(**kwargs))
Ejemplo n.º 18
0
def from_pandas(df):
    """Converts a pandas DataFrame to a Ray DataFrame.
    Args:
        df (pandas.DataFrame): The pandas DataFrame to convert.

    Returns:
        A new Ray DataFrame object.
    """
    from modin.data_management.factories import BaseFactory
    from .dataframe import DataFrame

    return DataFrame(query_compiler=BaseFactory.from_pandas(df))
Ejemplo n.º 19
0
def read_stata(
    filepath_or_buffer,
    convert_dates=True,
    convert_categoricals=True,
    index_col=None,
    convert_missing=False,
    preserve_dtypes=True,
    columns=None,
    order_categoricals=True,
    chunksize=None,
    iterator=False,
):
    _, _, _, kwargs = inspect.getargvalues(inspect.currentframe())
    return DataFrame(query_compiler=BaseFactory.read_stata(**kwargs))
Ejemplo n.º 20
0
def read_sql_table(
    table_name,
    con,
    schema=None,
    index_col=None,
    coerce_float=True,
    parse_dates=None,
    columns=None,
    chunksize=None,
):
    _, _, _, kwargs = inspect.getargvalues(inspect.currentframe())

    from modin.data_management.factories import BaseFactory

    return DataFrame(query_compiler=BaseFactory.read_sql_table(**kwargs))
Ejemplo n.º 21
0
def _read(**kwargs):
    """Read csv file from local disk.
    Args:
        filepath_or_buffer:
              The filepath of the csv file.
              We only support local files for now.
        kwargs: Keyword arguments in pandas.read_csv
    """
    pd_obj = BaseFactory.read_csv(**kwargs)
    # This happens when `read_csv` returns a TextFileReader object for iterating through
    if isinstance(pd_obj, pandas.io.parsers.TextFileReader):
        reader = pd_obj.read
        pd_obj.read = lambda *args, **kwargs: DataFrame(query_compiler=reader(
            *args, **kwargs))
        return pd_obj
    return DataFrame(query_compiler=pd_obj)
Ejemplo n.º 22
0
def read_hdf(
    path_or_buf,
    key=None,
    mode: str = "r",
    errors: str = "strict",
    where=None,
    start: Optional[int] = None,
    stop: Optional[int] = None,
    columns=None,
    iterator=False,
    chunksize: Optional[int] = None,
    **kwargs,
):
    _, _, _, kwargs = inspect.getargvalues(inspect.currentframe())
    kwargs.update(kwargs.pop("kwargs", {}))
    return DataFrame(query_compiler=BaseFactory.read_hdf(**kwargs))
Ejemplo n.º 23
0
Archivo: io.py Proyecto: wzhang1/modin
def read_gbq(
    query,
    project_id=None,
    index_col=None,
    col_order=None,
    reauth=False,
    auth_local_webserver=False,
    dialect=None,
    location=None,
    configuration=None,
    credentials=None,
    private_key=None,
    verbose=None,
):
    _, _, _, kwargs = inspect.getargvalues(inspect.currentframe())
    kwargs.update(kwargs.pop("kwargs", {}))
    return DataFrame(query_compiler=BaseFactory.read_gbq(**kwargs))
Ejemplo n.º 24
0
def read_json(
    path_or_buf=None,
    orient=None,
    typ="frame",
    dtype=None,
    convert_axes=None,
    convert_dates=True,
    keep_default_dates=True,
    numpy=False,
    precise_float=False,
    date_unit=None,
    encoding=None,
    lines=False,
    chunksize=None,
    compression="infer",
):
    _, _, _, kwargs = inspect.getargvalues(inspect.currentframe())
    return DataFrame(query_compiler=BaseFactory.read_json(**kwargs))
Ejemplo n.º 25
0
def read_fwf(
    filepath_or_buffer: Union[str, pathlib.Path, IO[AnyStr]],
    colspecs="infer",
    widths=None,
    infer_nrows=100,
    **kwds,
):
    from modin.data_management.factories import BaseFactory

    _, _, _, kwargs = inspect.getargvalues(inspect.currentframe())
    kwargs.update(kwargs.pop("kwds", {}))
    pd_obj = BaseFactory.read_fwf(**kwargs)
    # When `read_fwf` returns a TextFileReader object for iterating through
    if isinstance(pd_obj, pandas.io.parsers.TextFileReader):
        reader = pd_obj.read
        pd_obj.read = lambda *args, **kwargs: DataFrame(query_compiler=reader(
            *args, **kwargs))
        return pd_obj
    return DataFrame(query_compiler=pd_obj)
Ejemplo n.º 26
0
def read_html(
    io,
    match=".+",
    flavor=None,
    header=None,
    index_col=None,
    skiprows=None,
    attrs=None,
    parse_dates=False,
    thousands=",",
    encoding=None,
    decimal=".",
    converters=None,
    na_values=None,
    keep_default_na=True,
    displayed_only=True,
):
    _, _, _, kwargs = inspect.getargvalues(inspect.currentframe())
    return DataFrame(query_compiler=BaseFactory.read_html(**kwargs))
Ejemplo n.º 27
0
def read_gbq(
    query: str,
    project_id: Optional[str] = None,
    index_col: Optional[str] = None,
    col_order: Optional[List[str]] = None,
    reauth: bool = False,
    auth_local_webserver: bool = False,
    dialect: Optional[str] = None,
    location: Optional[str] = None,
    configuration: Optional[Dict[str, Any]] = None,
    credentials=None,
    use_bqstorage_api: Optional[bool] = None,
    private_key=None,
    verbose=None,
    progress_bar_type: Optional[str] = None,
) -> DataFrame:
    _, _, _, kwargs = inspect.getargvalues(inspect.currentframe())
    kwargs.update(kwargs.pop("kwargs", {}))
    return DataFrame(query_compiler=BaseFactory.read_gbq(**kwargs))
Ejemplo n.º 28
0
def read_excel(
    io,
    sheet_name=0,
    header=0,
    names=None,
    index_col=None,
    usecols=None,
    squeeze=False,
    dtype=None,
    engine=None,
    converters=None,
    true_values=None,
    false_values=None,
    skiprows=None,
    nrows=None,
    na_values=None,
    keep_default_na=True,
    verbose=False,
    parse_dates=False,
    date_parser=None,
    thousands=None,
    comment=None,
    skipfooter=0,
    convert_float=True,
    mangle_dupe_cols=True,
    **kwds,
):

    _, _, _, kwargs = inspect.getargvalues(inspect.currentframe())
    kwargs.update(kwargs.pop("kwds", {}))

    from modin.data_management.factories import BaseFactory

    intermediate = BaseFactory.read_excel(**kwargs)
    if isinstance(intermediate, (OrderedDict, dict)):
        parsed = type(intermediate)()
        for key in intermediate.keys():
            parsed[key] = DataFrame(query_compiler=intermediate.get(key))
        return parsed
    else:
        return DataFrame(query_compiler=intermediate)
Ejemplo n.º 29
0
def read_excel(
    io,
    sheet_name=0,
    header=0,
    skiprows=None,
    index_col=None,
    names=None,
    usecols=None,
    parse_dates=False,
    date_parser=None,
    na_values=None,
    thousands=None,
    convert_float=True,
    converters=None,
    dtype=None,
    true_values=None,
    false_values=None,
    engine=None,
    squeeze=False,
):
    _, _, _, kwargs = inspect.getargvalues(inspect.currentframe())
    return DataFrame(query_compiler=BaseFactory.read_excel(**kwargs))
Ejemplo n.º 30
0
def read_feather(path, columns=None, use_threads: bool = True):
    _, _, _, kwargs = inspect.getargvalues(inspect.currentframe())
    return DataFrame(query_compiler=BaseFactory.read_feather(**kwargs))