def _create_from_pandas_with_arrow(self, pdf, schema, timezone): """ Create a DataFrame from a given pandas.DataFrame by slicing it into partitions, converting to Arrow data, then sending to the JVM to parallelize. If a schema is passed in, the data types will be used to coerce the data in Pandas to Arrow conversion. """ from pyspark.serializers import ArrowStreamSerializer, _create_batch from pyspark.sql.types import from_arrow_schema, to_arrow_type, TimestampType from pyspark.sql.utils import require_minimum_pandas_version, \ require_minimum_pyarrow_version require_minimum_pandas_version() require_minimum_pyarrow_version() from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype # Determine arrow types to coerce data when creating batches if isinstance(schema, StructType): arrow_types = [to_arrow_type(f.dataType) for f in schema.fields] elif isinstance(schema, DataType): raise ValueError("Single data type %s is not supported with Arrow" % str(schema)) else: # Any timestamps must be coerced to be compatible with Spark arrow_types = [to_arrow_type(TimestampType()) if is_datetime64_dtype(t) or is_datetime64tz_dtype(t) else None for t in pdf.dtypes] # Slice the DataFrame to be batched step = -(-len(pdf) // self.sparkContext.defaultParallelism) # round int up pdf_slices = (pdf[start:start + step] for start in xrange(0, len(pdf), step)) # Create Arrow record batches safecheck = self._wrapped._conf.arrowSafeTypeConversion() batches = [_create_batch([(c, t) for (_, c), t in zip(pdf_slice.iteritems(), arrow_types)], timezone, safecheck) for pdf_slice in pdf_slices] # Create the Spark schema from the first Arrow batch (always at least 1 batch after slicing) if isinstance(schema, (list, tuple)): struct = from_arrow_schema(batches[0].schema) for i, name in enumerate(schema): struct.fields[i].name = name struct.names[i] = name schema = struct jsqlContext = self._wrapped._jsqlContext def reader_func(temp_filename): return self._jvm.PythonSQLUtils.readArrowStreamFromFile(jsqlContext, temp_filename) def create_RDD_server(): return self._jvm.ArrowRDDServer(jsqlContext) # Create Spark DataFrame from Arrow stream file, using one batch per partition jrdd = self._sc._serialize_to_jvm(batches, ArrowStreamSerializer(), reader_func, create_RDD_server) jdf = self._jvm.PythonSQLUtils.toDataFrame(jrdd, schema.json(), jsqlContext) df = DataFrame(jdf, self._wrapped) df._schema = schema return df
def _create_from_pandas_with_arrow(self, pdf, schema, timezone): """ Create a DataFrame from a given pandas.DataFrame by slicing it into partitions, converting to Arrow data, then sending to the JVM to parallelize. If a schema is passed in, the data types will be used to coerce the data in Pandas to Arrow conversion. """ from pyspark.serializers import ArrowSerializer, _create_batch from pyspark.sql.types import from_arrow_schema, to_arrow_type, \ _old_pandas_exception_message, TimestampType from pyspark.sql.utils import _require_minimum_pyarrow_version try: from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype except ImportError as e: raise ImportError(_old_pandas_exception_message(e)) _require_minimum_pyarrow_version() # Determine arrow types to coerce data when creating batches if isinstance(schema, StructType): arrow_types = [to_arrow_type(f.dataType) for f in schema.fields] elif isinstance(schema, DataType): raise ValueError("Single data type %s is not supported with Arrow" % str(schema)) else: # Any timestamps must be coerced to be compatible with Spark arrow_types = [to_arrow_type(TimestampType()) if is_datetime64_dtype(t) or is_datetime64tz_dtype(t) else None for t in pdf.dtypes] # Slice the DataFrame to be batched step = -(-len(pdf) // self.sparkContext.defaultParallelism) # round int up pdf_slices = (pdf[start:start + step] for start in xrange(0, len(pdf), step)) # Create Arrow record batches batches = [_create_batch([(c, t) for (_, c), t in zip(pdf_slice.iteritems(), arrow_types)], timezone) for pdf_slice in pdf_slices] # Create the Spark schema from the first Arrow batch (always at least 1 batch after slicing) if isinstance(schema, (list, tuple)): struct = from_arrow_schema(batches[0].schema) for i, name in enumerate(schema): struct.fields[i].name = name struct.names[i] = name schema = struct # Create the Spark DataFrame directly from the Arrow data and schema jrdd = self._sc._serialize_to_jvm(batches, len(batches), ArrowSerializer()) jdf = self._jvm.PythonSQLUtils.arrowPayloadToDataFrame( jrdd, schema.json(), self._wrapped._jsqlContext) df = DataFrame(jdf, self._wrapped) df._schema = schema return df
def infer_pd_series_spark_type(s: pd.Series) -> types.DataType: """Infer Spark DataType from pandas Series dtype. :param s: :class:`pandas.Series` to be inferred :return: the inferred Spark data type """ dt = s.dtype if dt == np.dtype('object'): if len(s) == 0 or s.isnull().all(): raise ValueError("can not infer schema from empty or null dataset") return types.from_arrow_type(pa.Array.from_pandas(s).type) elif is_datetime64_dtype(dt) or is_datetime64tz_dtype(dt): return types.TimestampType() else: return types.from_arrow_type(pa.from_numpy_dtype(dt))
def from_pandas(pdf: pd.DataFrame) -> '_InternalFrame': """ Create an immutable DataFrame from pandas DataFrame. :param pdf: :class:`pd.DataFrame` :return: the created immutable DataFrame """ columns = pdf.columns data_columns = [str(col) for col in columns] if isinstance(columns, pd.MultiIndex): column_index = columns.tolist() else: column_index = None column_index_names = columns.names index = pdf.index index_map = [] # type: List[IndexMap] if isinstance(index, pd.MultiIndex): if index.names is None: index_map = [(SPARK_INDEX_NAME_FORMAT(i), None) for i in range(len(index.levels))] else: index_map = [(SPARK_INDEX_NAME_FORMAT(i) if name is None else name, name if name is None or isinstance(name, tuple) else (name,)) for i, name in enumerate(index.names)] else: name = index.name index_map = [(name_like_string(name) if name is not None else SPARK_INDEX_NAME_FORMAT(0), name if name is None or isinstance(name, tuple) else (name,))] index_columns = [index_column for index_column, _ in index_map] reset_index = pdf.reset_index() reset_index.columns = index_columns + data_columns schema = StructType([StructField(name_like_string(name), infer_pd_series_spark_type(col), nullable=bool(col.isnull().any())) for name, col in reset_index.iteritems()]) for name, col in reset_index.iteritems(): dt = col.dtype if is_datetime64_dtype(dt) or is_datetime64tz_dtype(dt): continue reset_index[name] = col.replace({np.nan: None}) sdf = default_session().createDataFrame(reset_index, schema=schema) return _InternalFrame(sdf=sdf, index_map=index_map, data_columns=data_columns, column_index=column_index, column_index_names=column_index_names)
def _get_columns_info(self, stats): column_info = {} column_info[self.TYPE_CONSTANT] = stats['uniques'][stats['uniques'] == 1].index column_info[self.TYPE_BOOL] = stats['uniques'][stats['uniques'] == 2].index rest_columns = self.get_columns(self.df, self.EXCLUDE, column_info['constant'].union(column_info['bool'])) column_info[self.TYPE_NUMERIC] = pd.Index([c for c in rest_columns if types.is_numeric_dtype(self.df[c])]) rest_columns = self.get_columns( self.df[rest_columns], self.EXCLUDE, column_info['numeric']) column_info[self.TYPE_DATE] = pd.Index([c for c in rest_columns if types.is_datetime64_dtype(self.df[c])]) rest_columns = self.get_columns( self.df[rest_columns], self.EXCLUDE, column_info['date']) unique_columns = stats['uniques'][rest_columns] == stats['counts'][rest_columns] column_info[self.TYPE_UNIQUE] = stats['uniques'][rest_columns][unique_columns].index column_info[self.TYPE_CATEGORICAL] = stats['uniques'][rest_columns][~unique_columns].index return column_info
def _read_data(data_file, **kwargs): if data_file is None: raise ValueError("Data file must be specified.") elif not isinstance(data_file, str): raise TypeError("Data file must be a string.") elif (data_file.endswith('pkl') or data_file.endswith('pickle')): data = pd.read_pickle(data_file, **kwargs) elif data_file.endswith('csv'): data = pd.read_csv(data_file, **kwargs) elif data_file.endswith('txt'): data = pd.read_table(data_file, **kwargs) else: raise ValueError( "Data file must have extension pkl, pickle, csv, or txt.") if not is_datetime64_dtype(data.index): raise TypeError("Index must be of type datetime64.") return data
def get_type(series): if series.name == 'geometry' and isinstance(series, gpd.GeoSeries): return constants.TYPE_GEO try: distinct_count = series.nunique() value_count = series.nunique(dropna=False) if value_count == 1 and distinct_count == 0: return constants.TYPE_EMPTY elif pd_types.is_bool_dtype(series): return constants.TYPE_BOOL elif pd_types.is_datetime64_dtype(series): return constants.TYPE_DATE elif pd_types.is_numeric_dtype(series): return constants.TYPE_NUM else: return constants.TYPE_STR except: # eg. 2D series return constants.TYPE_UNSUPPORTED
def _create_from_pandas_with_arrow(self, pdf, schema, timezone): """ Create a DataFrame from a given pandas.DataFrame by slicing it into partitions, converting to Arrow data, then sending to the JVM to parallelize. If a schema is passed in, the data types will be used to coerce the data in Pandas to Arrow conversion. """ from distutils.version import LooseVersion from pyspark.serializers import ArrowStreamPandasSerializer from pyspark.sql.types import from_arrow_type, to_arrow_type, TimestampType from pyspark.sql.utils import require_minimum_pandas_version, \ require_minimum_pyarrow_version require_minimum_pandas_version() require_minimum_pyarrow_version() from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype import pyarrow as pa # Create the Spark schema from list of names passed in with Arrow types if isinstance(schema, (list, tuple)): if LooseVersion(pa.__version__) < LooseVersion("0.12.0"): temp_batch = pa.RecordBatch.from_pandas(pdf[0:100], preserve_index=False) arrow_schema = temp_batch.schema else: arrow_schema = pa.Schema.from_pandas(pdf, preserve_index=False) struct = StructType() for name, field in zip(schema, arrow_schema): struct.add(name, from_arrow_type(field.type), nullable=field.nullable) schema = struct # Determine arrow types to coerce data when creating batches if isinstance(schema, StructType): arrow_types = [to_arrow_type(f.dataType) for f in schema.fields] elif isinstance(schema, DataType): raise ValueError("Single data type %s is not supported with Arrow" % str(schema)) else: # Any timestamps must be coerced to be compatible with Spark arrow_types = [to_arrow_type(TimestampType()) if is_datetime64_dtype(t) or is_datetime64tz_dtype(t) else None for t in pdf.dtypes] # Slice the DataFrame to be batched step = -(-len(pdf) // self.sparkContext.defaultParallelism) # round int up pdf_slices = (pdf[start:start + step] for start in xrange(0, len(pdf), step)) # Create list of Arrow (columns, type) for serializer dump_stream arrow_data = [[(c, t) for (_, c), t in zip(pdf_slice.iteritems(), arrow_types)] for pdf_slice in pdf_slices] jsqlContext = self._wrapped._jsqlContext safecheck = self._wrapped._conf.arrowSafeTypeConversion() col_by_name = True # col by name only applies to StructType columns, can't happen here ser = ArrowStreamPandasSerializer(timezone, safecheck, col_by_name) def reader_func(temp_filename): return self._jvm.PythonSQLUtils.readArrowStreamFromFile(jsqlContext, temp_filename) def create_RDD_server(): return self._jvm.ArrowRDDServer(jsqlContext) # Create Spark DataFrame from Arrow stream file, using one batch per partition jrdd = self._sc._serialize_to_jvm(arrow_data, ser, reader_func, create_RDD_server) jdf = self._jvm.PythonSQLUtils.toDataFrame(jrdd, schema.json(), jsqlContext) df = DataFrame(jdf, self._wrapped) df._schema = schema return df
def test_prod_data_convert(): prod_data_converted = preprocess.prod_date_convert(prod_data, prod_col_dict) assert ptypes.is_datetime64_dtype( prod_data_converted[prod_col_dict['timestamp']])
def test_om_data_convert_e(): om_data_converted = preprocess.om_date_convert(om_data, om_col_dict) assert ptypes.is_datetime64_dtype( om_data_converted[om_col_dict['dateend']])
def test_default_sample_creation(): default_sample = create_arma_sample() assert isinstance(default_sample, pd.Series) assert default_sample.shape == (100,) assert is_datetime64_dtype(default_sample.index)
def test_specified_sample_creation(): default_sample = create_arma_sample(ar_order=2, ma_order=2, size=200) assert isinstance(default_sample, pd.Series) assert default_sample.shape == (200,) assert is_datetime64_dtype(default_sample.index)