def _reassign_jdf(target_df: DataFrame, new_df: DataFrame): """ Reassigns the java df contont of a dataframe. """ target_df._jdf = new_df._jdf target_df._metadata = new_df._metadata # Reset the cached variables target_df._schema = None target_df._lazy_rdd = None
def _create_from_pandas_with_arrow(self, pdf: "PandasDataFrameLike", schema: Union[StructType, List[str]], timezone: str) -> "DataFrame": """ Create a DataFrame from a given pandas.DataFrame by slicing it into partitions, converting to Arrow data, then sending to the JVM to parallelize. If a schema is passed in, the data types will be used to coerce the data in Pandas to Arrow conversion. """ from pyspark.sql import SparkSession from pyspark.sql.dataframe import DataFrame assert isinstance(self, SparkSession) from pyspark.sql.pandas.serializers import ArrowStreamPandasSerializer from pyspark.sql.types import TimestampType from pyspark.sql.pandas.types import from_arrow_type, to_arrow_type from pyspark.sql.pandas.utils import require_minimum_pandas_version, \ require_minimum_pyarrow_version require_minimum_pandas_version() require_minimum_pyarrow_version() from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype import pyarrow as pa # Create the Spark schema from list of names passed in with Arrow types if isinstance(schema, (list, tuple)): arrow_schema = pa.Schema.from_pandas(pdf, preserve_index=False) struct = StructType() prefer_timestamp_ntz = is_timestamp_ntz_preferred() for name, field in zip(schema, arrow_schema): struct.add(name, from_arrow_type(field.type, prefer_timestamp_ntz), nullable=field.nullable) schema = struct # Determine arrow types to coerce data when creating batches if isinstance(schema, StructType): arrow_types = [to_arrow_type(f.dataType) for f in schema.fields] elif isinstance(schema, DataType): raise ValueError( "Single data type %s is not supported with Arrow" % str(schema)) else: # Any timestamps must be coerced to be compatible with Spark arrow_types = [ to_arrow_type(TimestampType()) if is_datetime64_dtype(t) or is_datetime64tz_dtype(t) else None for t in pdf.dtypes ] # Slice the DataFrame to be batched step = -(-len(pdf) // self.sparkContext.defaultParallelism ) # round int up pdf_slices = (pdf.iloc[start:start + step] for start in range(0, len(pdf), step)) # Create list of Arrow (columns, type) for serializer dump_stream arrow_data = [[(c, t) for (_, c), t in zip(pdf_slice.iteritems(), arrow_types) ] for pdf_slice in pdf_slices] jsqlContext = self._wrapped._jsqlContext # type: ignore[attr-defined] safecheck = self._wrapped._conf.arrowSafeTypeConversion( ) # type: ignore[attr-defined] col_by_name = True # col by name only applies to StructType columns, can't happen here ser = ArrowStreamPandasSerializer(timezone, safecheck, col_by_name) @no_type_check def reader_func(temp_filename): return self._jvm.PythonSQLUtils.readArrowStreamFromFile( jsqlContext, temp_filename) @no_type_check def create_RDD_server(): return self._jvm.ArrowRDDServer(jsqlContext) # Create Spark DataFrame from Arrow stream file, using one batch per partition jrdd = ( self._sc # type: ignore[attr-defined] ._serialize_to_jvm(arrow_data, ser, reader_func, create_RDD_server)) jdf = ( self._jvm # type: ignore[attr-defined] .PythonSQLUtils.toDataFrame(jrdd, schema.json(), jsqlContext)) df = DataFrame(jdf, self._wrapped) df._schema = schema return df