def _sanitize( df: pd.DataFrame, dtype: Dict[str, str], partition_cols: List[str] ) -> Tuple[pd.DataFrame, Dict[str, str], List[str]]: df = catalog.sanitize_dataframe_columns_names(df=df) partition_cols = [catalog.sanitize_column_name(p) for p in partition_cols] dtype = {catalog.sanitize_column_name(k): v.lower() for k, v in dtype.items()} _utils.check_duplicated_columns(df=df) return df, dtype, partition_cols
def to_parquet( # pylint: disable=too-many-arguments,too-many-locals df: pd.DataFrame, path: str, index: bool = False, compression: Optional[str] = "snappy", use_threads: bool = True, boto3_session: Optional[boto3.Session] = None, s3_additional_kwargs: Optional[Dict[str, str]] = None, sanitize_columns: bool = False, dataset: bool = False, partition_cols: Optional[List[str]] = None, mode: Optional[str] = None, catalog_versioning: bool = False, database: Optional[str] = None, table: Optional[str] = None, dtype: Optional[Dict[str, str]] = None, description: Optional[str] = None, parameters: Optional[Dict[str, str]] = None, columns_comments: Optional[Dict[str, str]] = None, regular_partitions: bool = True, projection_enabled: bool = False, projection_types: Optional[Dict[str, str]] = None, projection_ranges: Optional[Dict[str, str]] = None, projection_values: Optional[Dict[str, str]] = None, projection_intervals: Optional[Dict[str, str]] = None, projection_digits: Optional[Dict[str, str]] = None, ) -> Dict[str, Union[List[str], Dict[str, List[str]]]]: """Write Parquet file or dataset on Amazon S3. The concept of Dataset goes beyond the simple idea of files and enable more complex features like partitioning, casting and catalog integration (Amazon Athena/AWS Glue Catalog). Note ---- If `dataset=True` The table name and all column names will be automatically sanitized using `wr.catalog.sanitize_table_name` and `wr.catalog.sanitize_column_name`. Please, pass `sanitize_columns=True` to force the same behaviour for `dataset=False`. Note ---- On `append` mode, the `parameters` will be upsert on an existing table. Note ---- In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count(). Parameters ---------- df: pandas.DataFrame Pandas DataFrame https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html path : str S3 path (for file e.g. ``s3://bucket/prefix/filename.parquet``) (for dataset e.g. ``s3://bucket/prefix``). index : bool True to store the DataFrame index in file, otherwise False to ignore it. compression: str, optional Compression style (``None``, ``snappy``, ``gzip``). use_threads : bool True to enable concurrent requests, False to disable multiple threads. If enabled os.cpu_count() will be used as the max number of threads. boto3_session : boto3.Session(), optional Boto3 Session. The default boto3 session will be used if boto3_session receive None. s3_additional_kwargs: Forward to s3fs, useful for server side encryption https://s3fs.readthedocs.io/en/latest/#serverside-encryption sanitize_columns : bool True to sanitize columns names or False to keep it as is. True value is forced if `dataset=True`. dataset : bool If True store a parquet dataset instead of a single file. If True, enable all follow arguments: partition_cols, mode, database, table, description, parameters, columns_comments, . partition_cols: List[str], optional List of column names that will be used to create partitions. Only takes effect if dataset=True. mode: str, optional ``append`` (Default), ``overwrite``, ``overwrite_partitions``. Only takes effect if dataset=True. catalog_versioning : bool If True and `mode="overwrite"`, creates an archived version of the table catalog before updating it. database : str, optional Glue/Athena catalog: Database name. table : str, optional Glue/Athena catalog: Table name. dtype : Dict[str, str], optional Dictionary of columns names and Athena/Glue types to be casted. Useful when you have columns with undetermined or mixed data types. (e.g. {'col name': 'bigint', 'col2 name': 'int'}) description : str, optional Glue/Athena catalog: Table description parameters : Dict[str, str], optional Glue/Athena catalog: Key/value pairs to tag the table. columns_comments : Dict[str, str], optional Glue/Athena catalog: Columns names and the related comments (e.g. {'col0': 'Column 0.', 'col1': 'Column 1.', 'col2': 'Partition.'}). regular_partitions : bool Create regular partitions (Non projected partitions) on Glue Catalog. Disable when you will work only with Partition Projection. Keep enabled even when working with projections is useful to keep Redshift Spectrum working with the regular partitions. projection_enabled : bool Enable Partition Projection on Athena (https://docs.aws.amazon.com/athena/latest/ug/partition-projection.html) projection_types : Optional[Dict[str, str]] Dictionary of partitions names and Athena projections types. Valid types: "enum", "integer", "date", "injected" https://docs.aws.amazon.com/athena/latest/ug/partition-projection-supported-types.html (e.g. {'col_name': 'enum', 'col2_name': 'integer'}) projection_ranges: Optional[Dict[str, str]] Dictionary of partitions names and Athena projections ranges. https://docs.aws.amazon.com/athena/latest/ug/partition-projection-supported-types.html (e.g. {'col_name': '0,10', 'col2_name': '-1,8675309'}) projection_values: Optional[Dict[str, str]] Dictionary of partitions names and Athena projections values. https://docs.aws.amazon.com/athena/latest/ug/partition-projection-supported-types.html (e.g. {'col_name': 'A,B,Unknown', 'col2_name': 'foo,boo,bar'}) projection_intervals: Optional[Dict[str, str]] Dictionary of partitions names and Athena projections intervals. https://docs.aws.amazon.com/athena/latest/ug/partition-projection-supported-types.html (e.g. {'col_name': '1', 'col2_name': '5'}) projection_digits: Optional[Dict[str, str]] Dictionary of partitions names and Athena projections digits. https://docs.aws.amazon.com/athena/latest/ug/partition-projection-supported-types.html (e.g. {'col_name': '1', 'col2_name': '2'}) Returns ------- Dict[str, Union[List[str], Dict[str, List[str]]]] Dictionary with: 'paths': List of all stored files paths on S3. 'partitions_values': Dictionary of partitions added with keys as S3 path locations and values as a list of partitions values as str. Examples -------- Writing single file >>> import awswrangler as wr >>> import pandas as pd >>> wr.s3.to_parquet( ... df=pd.DataFrame({'col': [1, 2, 3]}), ... path='s3://bucket/prefix/my_file.parquet', ... ) { 'paths': ['s3://bucket/prefix/my_file.parquet'], 'partitions_values': {} } Writing single file encrypted with a KMS key >>> import awswrangler as wr >>> import pandas as pd >>> wr.s3.to_parquet( ... df=pd.DataFrame({'col': [1, 2, 3]}), ... path='s3://bucket/prefix/my_file.parquet', ... s3_additional_kwargs={ ... 'ServerSideEncryption': 'aws:kms', ... 'SSEKMSKeyId': 'YOUR_KMY_KEY_ARN' ... } ... ) { 'paths': ['s3://bucket/prefix/my_file.parquet'], 'partitions_values': {} } Writing partitioned dataset >>> import awswrangler as wr >>> import pandas as pd >>> wr.s3.to_parquet( ... df=pd.DataFrame({ ... 'col': [1, 2, 3], ... 'col2': ['A', 'A', 'B'] ... }), ... path='s3://bucket/prefix', ... dataset=True, ... partition_cols=['col2'] ... ) { 'paths': ['s3://.../col2=A/x.parquet', 's3://.../col2=B/y.parquet'], 'partitions_values: { 's3://.../col2=A/': ['A'], 's3://.../col2=B/': ['B'] } } Writing dataset to S3 with metadata on Athena/Glue Catalog. >>> import awswrangler as wr >>> import pandas as pd >>> wr.s3.to_parquet( ... df=pd.DataFrame({ ... 'col': [1, 2, 3], ... 'col2': ['A', 'A', 'B'] ... }), ... path='s3://bucket/prefix', ... dataset=True, ... partition_cols=['col2'], ... database='default', # Athena/Glue database ... table='my_table' # Athena/Glue table ... ) { 'paths': ['s3://.../col2=A/x.parquet', 's3://.../col2=B/y.parquet'], 'partitions_values: { 's3://.../col2=A/': ['A'], 's3://.../col2=B/': ['B'] } } Writing dataset casting empty column data type >>> import awswrangler as wr >>> import pandas as pd >>> wr.s3.to_parquet( ... df=pd.DataFrame({ ... 'col': [1, 2, 3], ... 'col2': ['A', 'A', 'B'], ... 'col3': [None, None, None] ... }), ... path='s3://bucket/prefix', ... dataset=True, ... database='default', # Athena/Glue database ... table='my_table' # Athena/Glue table ... dtype={'col3': 'date'} ... ) { 'paths': ['s3://.../x.parquet'], 'partitions_values: {} } """ if (database is None) ^ (table is None): raise exceptions.InvalidArgumentCombination( "Please pass database and table arguments to be able to store the metadata into the Athena/Glue Catalog." ) if df.empty is True: raise exceptions.EmptyDataFrame() partition_cols = partition_cols if partition_cols else [] dtype = dtype if dtype else {} partitions_values: Dict[str, List[str]] = {} # Sanitize table to respect Athena's standards if (sanitize_columns is True) or (dataset is True): df = catalog.sanitize_dataframe_columns_names(df=df) partition_cols = [catalog.sanitize_column_name(p) for p in partition_cols] dtype = {catalog.sanitize_column_name(k): v.lower() for k, v in dtype.items()} catalog.drop_duplicated_columns(df=df) session: boto3.Session = _utils.ensure_session(session=boto3_session) cpus: int = _utils.ensure_cpu_count(use_threads=use_threads) fs: s3fs.S3FileSystem = _utils.get_fs(session=session, s3_additional_kwargs=s3_additional_kwargs) compression_ext: Optional[str] = _COMPRESSION_2_EXT.get(compression, None) if compression_ext is None: raise exceptions.InvalidCompression(f"{compression} is invalid, please use None, snappy or gzip.") if dataset is False: if path.endswith("/"): # pragma: no cover raise exceptions.InvalidArgumentValue( "If <dataset=False>, the argument <path> should be a object path, not a directory." ) if partition_cols: raise exceptions.InvalidArgumentCombination("Please, pass dataset=True to be able to use partition_cols.") if mode is not None: raise exceptions.InvalidArgumentCombination("Please pass dataset=True to be able to use mode.") if any(arg is not None for arg in (database, table, description, parameters)): raise exceptions.InvalidArgumentCombination( "Please pass dataset=True to be able to use any one of these " "arguments: database, table, description, parameters, " "columns_comments." ) df = _data_types.cast_pandas_with_athena_types(df=df, dtype=dtype) schema: pa.Schema = _data_types.pyarrow_schema_from_pandas( df=df, index=index, ignore_cols=partition_cols, dtype=dtype ) _logger.debug("schema: \n%s", schema) paths = [ _to_parquet_file( df=df, path=path, schema=schema, index=index, compression=compression, cpus=cpus, fs=fs, dtype=dtype ) ] else: mode = "append" if mode is None else mode if ( (mode in ("append", "overwrite_partitions")) and (database is not None) and (table is not None) ): # Fetching Catalog Types catalog_types: Optional[Dict[str, str]] = catalog.get_table_types( database=database, table=table, boto3_session=session ) if catalog_types is not None: for k, v in catalog_types.items(): dtype[k] = v paths, partitions_values = _to_parquet_dataset( df=df, path=path, index=index, compression=compression, compression_ext=compression_ext, cpus=cpus, fs=fs, use_threads=use_threads, partition_cols=partition_cols, dtype=dtype, mode=mode, boto3_session=session, ) if (database is not None) and (table is not None): columns_types, partitions_types = _data_types.athena_types_from_pandas_partitioned( df=df, index=index, partition_cols=partition_cols, dtype=dtype ) catalog.create_parquet_table( database=database, table=table, path=path, columns_types=columns_types, partitions_types=partitions_types, compression=compression, description=description, parameters=parameters, columns_comments=columns_comments, boto3_session=session, mode=mode, catalog_versioning=catalog_versioning, projection_enabled=projection_enabled, projection_types=projection_types, projection_ranges=projection_ranges, projection_values=projection_values, projection_intervals=projection_intervals, projection_digits=projection_digits, ) if partitions_values and (regular_partitions is True): _logger.debug("partitions_values:\n%s", partitions_values) catalog.add_parquet_partitions( database=database, table=table, partitions_values=partitions_values, compression=compression, boto3_session=session, ) return {"paths": paths, "partitions_values": partitions_values}
def to_csv( # pylint: disable=too-many-arguments,too-many-locals df: pd.DataFrame, path: str, sep: str = ",", index: bool = True, columns: Optional[List[str]] = None, use_threads: bool = True, boto3_session: Optional[boto3.Session] = None, s3_additional_kwargs: Optional[Dict[str, str]] = None, sanitize_columns: bool = False, dataset: bool = False, partition_cols: Optional[List[str]] = None, mode: Optional[str] = None, catalog_versioning: bool = False, database: Optional[str] = None, table: Optional[str] = None, dtype: Optional[Dict[str, str]] = None, description: Optional[str] = None, parameters: Optional[Dict[str, str]] = None, columns_comments: Optional[Dict[str, str]] = None, regular_partitions: bool = True, projection_enabled: bool = False, projection_types: Optional[Dict[str, str]] = None, projection_ranges: Optional[Dict[str, str]] = None, projection_values: Optional[Dict[str, str]] = None, projection_intervals: Optional[Dict[str, str]] = None, projection_digits: Optional[Dict[str, str]] = None, **pandas_kwargs, ) -> Dict[str, Union[List[str], Dict[str, List[str]]]]: """Write CSV file or dataset on Amazon S3. The concept of Dataset goes beyond the simple idea of files and enable more complex features like partitioning, casting and catalog integration (Amazon Athena/AWS Glue Catalog). Note ---- If `dataset=True` The table name and all column names will be automatically sanitized using `wr.catalog.sanitize_table_name` and `wr.catalog.sanitize_column_name`. Please, pass `sanitize_columns=True` to force the same behaviour for `dataset=False`. Note ---- If `dataset=True`, `pandas_kwargs` will be ignored due restrictive quoting, date_format, escapechar, encoding, etc required by Athena/Glue Catalog. Note ---- By now Pandas does not support in-memory CSV compression. https://github.com/pandas-dev/pandas/issues/22555 So the `compression` will not be supported on Wrangler too. Note ---- On `append` mode, the `parameters` will be upsert on an existing table. Note ---- In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count(). Parameters ---------- df: pandas.DataFrame Pandas DataFrame https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html path : str Amazon S3 path (e.g. s3://bucket/filename.csv). sep : str String of length 1. Field delimiter for the output file. index : bool Write row names (index). columns : List[str], optional Columns to write. use_threads : bool True to enable concurrent requests, False to disable multiple threads. If enabled os.cpu_count() will be used as the max number of threads. boto3_session : boto3.Session(), optional Boto3 Session. The default boto3 Session will be used if boto3_session receive None. s3_additional_kwargs: Forward to s3fs, useful for server side encryption https://s3fs.readthedocs.io/en/latest/#serverside-encryption sanitize_columns : bool True to sanitize columns names or False to keep it as is. True value is forced if `dataset=True`. dataset : bool If True store a parquet dataset instead of a single file. If True, enable all follow arguments: partition_cols, mode, database, table, description, parameters, columns_comments, . partition_cols: List[str], optional List of column names that will be used to create partitions. Only takes effect if dataset=True. mode : str, optional ``append`` (Default), ``overwrite``, ``overwrite_partitions``. Only takes effect if dataset=True. catalog_versioning : bool If True and `mode="overwrite"`, creates an archived version of the table catalog before updating it. database : str, optional Glue/Athena catalog: Database name. table : str, optional Glue/Athena catalog: Table name. dtype : Dict[str, str], optional Dictionary of columns names and Athena/Glue types to be casted. Useful when you have columns with undetermined or mixed data types. (e.g. {'col name': 'bigint', 'col2 name': 'int'}) description : str, optional Glue/Athena catalog: Table description parameters : Dict[str, str], optional Glue/Athena catalog: Key/value pairs to tag the table. columns_comments : Dict[str, str], optional Glue/Athena catalog: Columns names and the related comments (e.g. {'col0': 'Column 0.', 'col1': 'Column 1.', 'col2': 'Partition.'}). regular_partitions : bool Create regular partitions (Non projected partitions) on Glue Catalog. Disable when you will work only with Partition Projection. Keep enabled even when working with projections is useful to keep Redshift Spectrum working with the regular partitions. projection_enabled : bool Enable Partition Projection on Athena (https://docs.aws.amazon.com/athena/latest/ug/partition-projection.html) projection_types : Optional[Dict[str, str]] Dictionary of partitions names and Athena projections types. Valid types: "enum", "integer", "date", "injected" https://docs.aws.amazon.com/athena/latest/ug/partition-projection-supported-types.html (e.g. {'col_name': 'enum', 'col2_name': 'integer'}) projection_ranges: Optional[Dict[str, str]] Dictionary of partitions names and Athena projections ranges. https://docs.aws.amazon.com/athena/latest/ug/partition-projection-supported-types.html (e.g. {'col_name': '0,10', 'col2_name': '-1,8675309'}) projection_values: Optional[Dict[str, str]] Dictionary of partitions names and Athena projections values. https://docs.aws.amazon.com/athena/latest/ug/partition-projection-supported-types.html (e.g. {'col_name': 'A,B,Unknown', 'col2_name': 'foo,boo,bar'}) projection_intervals: Optional[Dict[str, str]] Dictionary of partitions names and Athena projections intervals. https://docs.aws.amazon.com/athena/latest/ug/partition-projection-supported-types.html (e.g. {'col_name': '1', 'col2_name': '5'}) projection_digits: Optional[Dict[str, str]] Dictionary of partitions names and Athena projections digits. https://docs.aws.amazon.com/athena/latest/ug/partition-projection-supported-types.html (e.g. {'col_name': '1', 'col2_name': '2'}) pandas_kwargs : keyword arguments forwarded to pandas.DataFrame.to_csv() https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.to_csv.html Returns ------- None None. Examples -------- Writing single file >>> import awswrangler as wr >>> import pandas as pd >>> wr.s3.to_csv( ... df=pd.DataFrame({'col': [1, 2, 3]}), ... path='s3://bucket/prefix/my_file.csv', ... ) { 'paths': ['s3://bucket/prefix/my_file.csv'], 'partitions_values': {} } Writing single file encrypted with a KMS key >>> import awswrangler as wr >>> import pandas as pd >>> wr.s3.to_csv( ... df=pd.DataFrame({'col': [1, 2, 3]}), ... path='s3://bucket/prefix/my_file.csv', ... s3_additional_kwargs={ ... 'ServerSideEncryption': 'aws:kms', ... 'SSEKMSKeyId': 'YOUR_KMY_KEY_ARN' ... } ... ) { 'paths': ['s3://bucket/prefix/my_file.csv'], 'partitions_values': {} } Writing partitioned dataset >>> import awswrangler as wr >>> import pandas as pd >>> wr.s3.to_csv( ... df=pd.DataFrame({ ... 'col': [1, 2, 3], ... 'col2': ['A', 'A', 'B'] ... }), ... path='s3://bucket/prefix', ... dataset=True, ... partition_cols=['col2'] ... ) { 'paths': ['s3://.../col2=A/x.csv', 's3://.../col2=B/y.csv'], 'partitions_values: { 's3://.../col2=A/': ['A'], 's3://.../col2=B/': ['B'] } } Writing dataset to S3 with metadata on Athena/Glue Catalog. >>> import awswrangler as wr >>> import pandas as pd >>> wr.s3.to_csv( ... df=pd.DataFrame({ ... 'col': [1, 2, 3], ... 'col2': ['A', 'A', 'B'] ... }), ... path='s3://bucket/prefix', ... dataset=True, ... partition_cols=['col2'], ... database='default', # Athena/Glue database ... table='my_table' # Athena/Glue table ... ) { 'paths': ['s3://.../col2=A/x.csv', 's3://.../col2=B/y.csv'], 'partitions_values: { 's3://.../col2=A/': ['A'], 's3://.../col2=B/': ['B'] } } Writing dataset casting empty column data type >>> import awswrangler as wr >>> import pandas as pd >>> wr.s3.to_csv( ... df=pd.DataFrame({ ... 'col': [1, 2, 3], ... 'col2': ['A', 'A', 'B'], ... 'col3': [None, None, None] ... }), ... path='s3://bucket/prefix', ... dataset=True, ... database='default', # Athena/Glue database ... table='my_table' # Athena/Glue table ... dtype={'col3': 'date'} ... ) { 'paths': ['s3://.../x.csv'], 'partitions_values: {} } """ if (database is None) ^ (table is None): raise exceptions.InvalidArgumentCombination( "Please pass database and table arguments to be able to store the metadata into the Athena/Glue Catalog." ) if df.empty is True: raise exceptions.EmptyDataFrame() partition_cols = partition_cols if partition_cols else [] dtype = dtype if dtype else {} partitions_values: Dict[str, List[str]] = {} # Sanitize table to respect Athena's standards if (sanitize_columns is True) or (dataset is True): df = catalog.sanitize_dataframe_columns_names(df=df) partition_cols = [catalog.sanitize_column_name(p) for p in partition_cols] dtype = {catalog.sanitize_column_name(k): v.lower() for k, v in dtype.items()} catalog.drop_duplicated_columns(df=df) session: boto3.Session = _utils.ensure_session(session=boto3_session) fs: s3fs.S3FileSystem = _utils.get_fs(session=session, s3_additional_kwargs=s3_additional_kwargs) if dataset is False: if partition_cols: raise exceptions.InvalidArgumentCombination("Please, pass dataset=True to be able to use partition_cols.") if mode is not None: raise exceptions.InvalidArgumentCombination("Please pass dataset=True to be able to use mode.") if columns_comments: raise exceptions.InvalidArgumentCombination("Please pass dataset=True to be able to use columns_comments.") if any(arg is not None for arg in (database, table, description, parameters)): raise exceptions.InvalidArgumentCombination( "Please pass dataset=True to be able to use any one of these " "arguments: database, table, description, parameters, " "columns_comments." ) pandas_kwargs["sep"] = sep pandas_kwargs["index"] = index pandas_kwargs["columns"] = columns _to_text(file_format="csv", df=df, path=path, fs=fs, **pandas_kwargs) paths = [path] else: mode = "append" if mode is None else mode if columns: df = df[columns] if ( (mode in ("append", "overwrite_partitions")) and (database is not None) and (table is not None) ): # Fetching Catalog Types catalog_types: Optional[Dict[str, str]] = catalog.get_table_types( database=database, table=table, boto3_session=session ) if catalog_types is not None: for k, v in catalog_types.items(): dtype[k] = v paths, partitions_values = _to_csv_dataset( df=df, path=path, index=index, sep=sep, fs=fs, use_threads=use_threads, partition_cols=partition_cols, dtype=dtype, mode=mode, boto3_session=session, ) if (database is not None) and (table is not None): columns_types, partitions_types = _data_types.athena_types_from_pandas_partitioned( df=df, index=index, partition_cols=partition_cols, dtype=dtype, index_left=True ) catalog.create_csv_table( database=database, table=table, path=path, columns_types=columns_types, partitions_types=partitions_types, description=description, parameters=parameters, columns_comments=columns_comments, boto3_session=session, mode=mode, catalog_versioning=catalog_versioning, sep=sep, projection_enabled=projection_enabled, projection_types=projection_types, projection_ranges=projection_ranges, projection_values=projection_values, projection_intervals=projection_intervals, projection_digits=projection_digits, ) if partitions_values and (regular_partitions is True): _logger.debug("partitions_values:\n%s", partitions_values) catalog.add_csv_partitions( database=database, table=table, partitions_values=partitions_values, boto3_session=session, sep=sep ) return {"paths": paths, "partitions_values": partitions_values}