def _inferSchemaFromList( self, data: Iterable[Any], names: Optional[List[str]] = None ) -> StructType: """ Infer schema from list of Row, dict, or tuple. Parameters ---------- data : iterable list of Row, dict, or tuple names : list, optional list of column names Returns ------- :class:`pyspark.sql.types.StructType` """ if not data: raise ValueError("can not infer schema from empty dataset") infer_dict_as_struct = self._wrapped._conf.inferDictAsStruct() # type: ignore[attr-defined] prefer_timestamp_ntz = is_timestamp_ntz_preferred() schema = reduce(_merge_type, ( _infer_schema(row, names, infer_dict_as_struct, prefer_timestamp_ntz) for row in data)) if _has_nulltype(schema): raise ValueError("Some of types cannot be determined after inferring") return schema
def _convert_from_pandas(self, pdf: "PandasDataFrameLike", schema: Union[StructType, str, List[str]], timezone: str) -> List: """ Convert a pandas.DataFrame to list of records that can be used to make a DataFrame Returns ------- list list of records """ from pyspark.sql import SparkSession assert isinstance(self, SparkSession) if timezone is not None: from pyspark.sql.pandas.types import _check_series_convert_timestamps_tz_local from pandas.core.dtypes.common import is_datetime64tz_dtype copied = False if isinstance(schema, StructType): for field in schema: # TODO: handle nested timestamps, such as ArrayType(TimestampType())? if isinstance(field.dataType, TimestampType): s = _check_series_convert_timestamps_tz_local( pdf[field.name], timezone) if s is not pdf[field.name]: if not copied: # Copy once if the series is modified to prevent the original # Pandas DataFrame from being updated pdf = pdf.copy() copied = True pdf[field.name] = s else: should_localize = not is_timestamp_ntz_preferred() for column, series in pdf.iteritems(): s = series if should_localize and is_datetime64tz_dtype( s.dtype) and s.dt.tz is not None: s = _check_series_convert_timestamps_tz_local( series, timezone) if s is not series: if not copied: # Copy once if the series is modified to prevent the original # Pandas DataFrame from being updated pdf = pdf.copy() copied = True pdf[column] = s # Convert pandas.DataFrame to list of numpy records np_records = pdf.to_records(index=False) # Check if any columns need to be fixed for Spark to infer properly if len(np_records) > 0: record_dtype = self._get_numpy_record_dtype(np_records[0]) if record_dtype is not None: return [r.astype(record_dtype).tolist() for r in np_records] # Convert list of numpy records to python lists return [r.tolist() for r in np_records]
def _inferSchema(self, rdd: "RDD[Any]", samplingRatio: Optional[float] = None, names: Optional[List[str]] = None) -> StructType: """ Infer schema from an RDD of Row, dict, or tuple. Parameters ---------- rdd : :class:`RDD` an RDD of Row, dict, or tuple samplingRatio : float, optional sampling ratio, or no sampling (default) names : list, optional Returns ------- :class:`pyspark.sql.types.StructType` """ first = rdd.first() if not first: raise ValueError("The first row in RDD is empty, " "can not infer schema") infer_dict_as_struct = self._wrapped._conf.inferDictAsStruct( ) # type: ignore[attr-defined] prefer_timestamp_ntz = is_timestamp_ntz_preferred() if samplingRatio is None: schema = _infer_schema(first, names=names, infer_dict_as_struct=infer_dict_as_struct, prefer_timestamp_ntz=prefer_timestamp_ntz) if _has_nulltype(schema): for row in rdd.take(100)[1:]: schema = _merge_type( schema, _infer_schema( row, names=names, infer_dict_as_struct=infer_dict_as_struct, prefer_timestamp_ntz=prefer_timestamp_ntz)) if not _has_nulltype(schema): break else: raise ValueError( "Some of types cannot be determined by the " "first 100 rows, please try again with sampling") else: if samplingRatio < 0.99: rdd = rdd.sample(False, float(samplingRatio)) schema = rdd.map(lambda row: _infer_schema( row, names, infer_dict_as_struct=infer_dict_as_struct, prefer_timestamp_ntz=prefer_timestamp_ntz)).reduce(_merge_type) return schema
def _inferSchemaFromList( self, data: Iterable[Any], names: Optional[List[str]] = None ) -> StructType: """ Infer schema from list of Row, dict, or tuple. Parameters ---------- data : iterable list of Row, dict, or tuple names : list, optional list of column names Returns ------- :class:`pyspark.sql.types.StructType` """ if not data: raise ValueError("can not infer schema from empty dataset") infer_dict_as_struct = self._jconf.inferDictAsStruct() infer_array_from_first_element = self._jconf.legacyInferArrayTypeFromFirstElement() prefer_timestamp_ntz = is_timestamp_ntz_preferred() schema = reduce( _merge_type, ( _infer_schema( row, names, infer_dict_as_struct=infer_dict_as_struct, infer_array_from_first_element=infer_array_from_first_element, prefer_timestamp_ntz=prefer_timestamp_ntz, ) for row in data ), ) if _has_nulltype(schema): raise ValueError("Some of types cannot be determined after inferring") return schema
def _create_from_pandas_with_arrow(self, pdf, schema, timezone): """ Create a DataFrame from a given pandas.DataFrame by slicing it into partitions, converting to Arrow data, then sending to the JVM to parallelize. If a schema is passed in, the data types will be used to coerce the data in Pandas to Arrow conversion. """ from pyspark.sql import SparkSession from pyspark.sql.dataframe import DataFrame assert isinstance(self, SparkSession) from pyspark.sql.pandas.serializers import ArrowStreamPandasSerializer from pyspark.sql.types import TimestampType from pyspark.sql.pandas.types import from_arrow_type, to_arrow_type from pyspark.sql.pandas.utils import require_minimum_pandas_version, \ require_minimum_pyarrow_version require_minimum_pandas_version() require_minimum_pyarrow_version() from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype import pyarrow as pa # Create the Spark schema from list of names passed in with Arrow types if isinstance(schema, (list, tuple)): arrow_schema = pa.Schema.from_pandas(pdf, preserve_index=False) struct = StructType() prefer_timestamp_ntz = is_timestamp_ntz_preferred() for name, field in zip(schema, arrow_schema): struct.add(name, from_arrow_type(field.type, prefer_timestamp_ntz), nullable=field.nullable) schema = struct # Determine arrow types to coerce data when creating batches if isinstance(schema, StructType): arrow_types = [to_arrow_type(f.dataType) for f in schema.fields] elif isinstance(schema, DataType): raise ValueError( "Single data type %s is not supported with Arrow" % str(schema)) else: # Any timestamps must be coerced to be compatible with Spark arrow_types = [ to_arrow_type(TimestampType()) if is_datetime64_dtype(t) or is_datetime64tz_dtype(t) else None for t in pdf.dtypes ] # Slice the DataFrame to be batched step = -(-len(pdf) // self.sparkContext.defaultParallelism ) # round int up pdf_slices = (pdf.iloc[start:start + step] for start in range(0, len(pdf), step)) # Create list of Arrow (columns, type) for serializer dump_stream arrow_data = [[(c, t) for (_, c), t in zip(pdf_slice.iteritems(), arrow_types) ] for pdf_slice in pdf_slices] jsqlContext = self._wrapped._jsqlContext safecheck = self._wrapped._conf.arrowSafeTypeConversion() col_by_name = True # col by name only applies to StructType columns, can't happen here ser = ArrowStreamPandasSerializer(timezone, safecheck, col_by_name) def reader_func(temp_filename): return self._jvm.PythonSQLUtils.readArrowStreamFromFile( jsqlContext, temp_filename) def create_RDD_server(): return self._jvm.ArrowRDDServer(jsqlContext) # Create Spark DataFrame from Arrow stream file, using one batch per partition jrdd = self._sc._serialize_to_jvm(arrow_data, ser, reader_func, create_RDD_server) jdf = self._jvm.PythonSQLUtils.toDataFrame(jrdd, schema.json(), jsqlContext) df = DataFrame(jdf, self._wrapped) df._schema = schema return df
def _inferSchema( self, rdd: RDD[Any], samplingRatio: Optional[float] = None, names: Optional[List[str]] = None, ) -> StructType: """ Infer schema from an RDD of Row, dict, or tuple. Parameters ---------- rdd : :class:`RDD` an RDD of Row, dict, or tuple samplingRatio : float, optional sampling ratio, or no sampling (default) names : list, optional Returns ------- :class:`pyspark.sql.types.StructType` """ first = rdd.first() if isinstance(first, Sized) and len(first) == 0: raise ValueError( "The first row in RDD is empty, can not infer schema") infer_dict_as_struct = self._jconf.inferDictAsStruct() infer_array_from_first_element = self._jconf.legacyInferArrayTypeFromFirstElement( ) prefer_timestamp_ntz = is_timestamp_ntz_preferred() if samplingRatio is None: schema = _infer_schema( first, names=names, infer_dict_as_struct=infer_dict_as_struct, prefer_timestamp_ntz=prefer_timestamp_ntz, ) if _has_nulltype(schema): for row in rdd.take(100)[1:]: schema = _merge_type( schema, _infer_schema( row, names=names, infer_dict_as_struct=infer_dict_as_struct, infer_array_from_first_element= infer_array_from_first_element, prefer_timestamp_ntz=prefer_timestamp_ntz, ), ) if not _has_nulltype(schema): break else: raise ValueError( "Some of types cannot be determined by the " "first 100 rows, please try again with sampling") else: if samplingRatio < 0.99: rdd = rdd.sample(False, float(samplingRatio)) schema = rdd.map(lambda row: _infer_schema( row, names, infer_dict_as_struct=infer_dict_as_struct, infer_array_from_first_element=infer_array_from_first_element, prefer_timestamp_ntz=prefer_timestamp_ntz, )).reduce(_merge_type) return schema