def _is_monotonic_decreasing(self) -> Series: window = Window.orderBy(NATURAL_ORDER_COLUMN_NAME).rowsBetween(-1, -1) cond = SF.lit(True) has_not_null = SF.lit(True) for scol in self._internal.index_spark_columns[::-1]: data_type = self._internal.spark_type_for(scol) prev = F.lag(scol, 1).over(window) compare = MultiIndex._comparator_for_monotonic_increasing(data_type) # Since pandas 1.1.4, null value is not allowed at any levels of MultiIndex. # Therefore, we should check `has_not_null` over the all levels. has_not_null = has_not_null & scol.isNotNull() cond = F.when(scol.eqNullSafe(prev), cond).otherwise(compare(scol, prev, Column.__lt__)) cond = has_not_null & (prev.isNull() | cond) cond_name = verify_temp_column_name( self._internal.spark_frame.select(self._internal.index_spark_columns), "__is_monotonic_decreasing_cond__", ) sdf = self._internal.spark_frame.select( self._internal.index_spark_columns + [cond.alias(cond_name)] ) internal = InternalFrame( spark_frame=sdf, index_spark_columns=[ scol_for(sdf, col) for col in self._internal.index_spark_column_names ], index_names=self._internal.index_names, index_fields=self._internal.index_fields, ) return first_series(DataFrame(internal))
def indexer_between_time( self, start_time: Union[datetime.time, str], end_time: Union[datetime.time, str], include_start: bool = True, include_end: bool = True, ) -> Index: """ Return index locations of values between particular times of day (example: 9:00-9:30AM). Parameters ---------- start_time, end_time : datetime.time, str Time passed either as object (datetime.time) or as string in appropriate format ("%H:%M", "%H%M", "%I:%M%p", "%I%M%p", "%H:%M:%S", "%H%M%S", "%I:%M:%S%p","%I%M%S%p"). include_start : bool, default True include_end : bool, default True Returns ------- values_between_time : Index of integers Examples -------- >>> psidx = ps.date_range("2000-01-01", periods=3, freq="T") >>> psidx # doctest: +NORMALIZE_WHITESPACE DatetimeIndex(['2000-01-01 00:00:00', '2000-01-01 00:01:00', '2000-01-01 00:02:00'], dtype='datetime64[ns]', freq=None) >>> psidx.indexer_between_time("00:01", "00:02").sort_values() Int64Index([1, 2], dtype='int64') >>> psidx.indexer_between_time("00:01", "00:02", include_end=False) Int64Index([1], dtype='int64') >>> psidx.indexer_between_time("00:01", "00:02", include_start=False) Int64Index([2], dtype='int64') """ @no_type_check def pandas_between_time(pdf) -> ps.DataFrame[int]: return pdf.between_time(start_time, end_time, include_start, include_end) psdf = self.to_frame()[[]] id_column_name = verify_temp_column_name(psdf, "__id_column__") psdf = psdf.pandas_on_spark.attach_id_column("distributed-sequence", id_column_name) with ps.option_context("compute.default_index_type", "distributed"): # The attached index in the statement below will be dropped soon, # so we enforce “distributed” default index type psdf = psdf.pandas_on_spark.apply_batch(pandas_between_time) return ps.Index(first_series(psdf).rename(self.name))
def indexer_at_time(self, time: Union[datetime.time, str], asof: bool = False) -> Index: """ Return index locations of values at particular time of day (example: 9:30AM). Parameters ---------- time : datetime.time or str Time passed in either as object (datetime.time) or as string in appropriate format ("%H:%M", "%H%M", "%I:%M%p", "%I%M%p", "%H:%M:%S", "%H%M%S", "%I:%M:%S%p", "%I%M%S%p"). Returns ------- values_at_time : Index of integers Examples -------- >>> psidx = ps.date_range("2000-01-01", periods=3, freq="T") >>> psidx # doctest: +NORMALIZE_WHITESPACE DatetimeIndex(['2000-01-01 00:00:00', '2000-01-01 00:01:00', '2000-01-01 00:02:00'], dtype='datetime64[ns]', freq=None) >>> psidx.indexer_at_time("00:00") Int64Index([0], dtype='int64') >>> psidx.indexer_at_time("00:01") Int64Index([1], dtype='int64') """ if asof: raise NotImplementedError("'asof' argument is not supported") @no_type_check def pandas_at_time(pdf) -> ps.DataFrame[int]: return pdf.at_time(time, asof) psdf = self.to_frame()[[]] id_column_name = verify_temp_column_name(psdf, "__id_column__") psdf = psdf.pandas_on_spark.attach_id_column("distributed-sequence", id_column_name) with ps.option_context("compute.default_index_type", "distributed"): # The attached index in the statement below will be dropped soon, # so we enforce “distributed” default index type psdf = psdf.pandas_on_spark.apply_batch(pandas_at_time) return ps.Index(first_series(psdf).rename(self.name))
def transform_batch( self, func: Callable[..., Union[pd.DataFrame, pd.Series]], *args: Any, **kwargs: Any ) -> DataFrameOrSeries: """ Transform chunks with a function that takes pandas DataFrame and outputs pandas DataFrame. The pandas DataFrame given to the function is of a batch used internally. The length of each input and output should be the same. See also `Transform and apply a function <https://koalas.readthedocs.io/en/latest/user_guide/transform_apply.html>`_. .. note:: the `func` is unable to access to the whole input frame. pandas-on-Spark internally splits the input series into multiple batches and calls `func` with each batch multiple times. Therefore, operations such as global aggregations are impossible. See the example below. >>> # This case does not return the length of whole frame but of the batch internally ... # used. ... def length(pdf) -> ps.DataFrame[int]: ... return pd.DataFrame([len(pdf)] * len(pdf)) ... >>> df = ps.DataFrame({'A': range(1000)}) >>> df.pandas_on_spark.transform_batch(length) # doctest: +SKIP c0 0 83 1 83 2 83 ... .. note:: this API executes the function once to infer the type which is potentially expensive, for instance, when the dataset is created after aggregations or sorting. To avoid this, specify return type in ``func``, for instance, as below: >>> def plus_one(x) -> ps.DataFrame[int, [float, float]]: ... return x + 1 If the return type is specified, the output column names become `c0, c1, c2 ... cn`. These names are positionally mapped to the returned DataFrame in ``func``. To specify the column names, you can assign them in a NumPy compound type style as below: >>> def plus_one(x) -> ps.DataFrame[("index", int), [("a", float), ("b", float)]]: ... return x + 1 >>> pdf = pd.DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]}) >>> def plus_one(x) -> ps.DataFrame[ ... (pdf.index.name, pdf.index.dtype), zip(pdf.dtypes, pdf.columns)]: ... return x + 1 Parameters ---------- func : function Function to transform each pandas frame. *args Positional arguments to pass to func. **kwargs Keyword arguments to pass to func. Returns ------- DataFrame or Series See Also -------- DataFrame.pandas_on_spark.apply_batch: For row/columnwise operations. Series.pandas_on_spark.transform_batch: transform the search as each pandas chunks. Examples -------- >>> df = ps.DataFrame([(1, 2), (3, 4), (5, 6)], columns=['A', 'B']) >>> df A B 0 1 2 1 3 4 2 5 6 >>> def plus_one_func(pdf) -> ps.DataFrame[int, [int, int]]: ... return pdf + 1 >>> df.pandas_on_spark.transform_batch(plus_one_func) c0 c1 0 2 3 1 4 5 2 6 7 >>> def plus_one_func(pdf) -> ps.DataFrame[("index", int), [('A', int), ('B', int)]]: ... return pdf + 1 >>> df.pandas_on_spark.transform_batch(plus_one_func) # doctest: +NORMALIZE_WHITESPACE A B index 0 2 3 1 4 5 2 6 7 >>> def plus_one_func(pdf) -> ps.Series[int]: ... return pdf.B + 1 >>> df.pandas_on_spark.transform_batch(plus_one_func) 0 3 1 5 2 7 dtype: int64 You can also omit the type hints so pandas-on-Spark infers the return schema as below: >>> df.pandas_on_spark.transform_batch(lambda pdf: pdf + 1) A B 0 2 3 1 4 5 2 6 7 >>> (df * -1).pandas_on_spark.transform_batch(abs) A B 0 1 2 1 3 4 2 5 6 Note that you should not transform the index. The index information will not change. >>> df.pandas_on_spark.transform_batch(lambda pdf: pdf.B + 1) 0 3 1 5 2 7 Name: B, dtype: int64 You can also specify extra arguments as below. >>> df.pandas_on_spark.transform_batch(lambda pdf, a, b, c: pdf.B + a + b + c, 1, 2, c=3) 0 8 1 10 2 12 Name: B, dtype: int64 """ from pyspark.pandas.groupby import GroupBy from pyspark.pandas.frame import DataFrame from pyspark.pandas.series import first_series from pyspark import pandas as ps assert callable(func), "the first argument should be a callable function." spec = inspect.getfullargspec(func) return_sig = spec.annotations.get("return", None) should_infer_schema = return_sig is None should_retain_index = should_infer_schema original_func = func func = lambda o: original_func(o, *args, **kwargs) def apply_func(pdf: pd.DataFrame) -> pd.DataFrame: return func(pdf).to_frame() def pandas_series_func( f: Callable[[pd.DataFrame], pd.DataFrame], return_type: DataType ) -> "UserDefinedFunctionLike": ff = f @pandas_udf(returnType=return_type) # type: ignore[call-overload] def udf(pdf: pd.DataFrame) -> pd.Series: return first_series(ff(pdf)) return udf if should_infer_schema: # Here we execute with the first 1000 to get the return type. # If the records were less than 1000, it uses pandas API directly for a shortcut. log_advice( "If the type hints is not specified for `transform_batch`, " "it is expensive to infer the data type internally." ) limit = ps.get_option("compute.shortcut_limit") pdf = self._psdf.head(limit + 1)._to_internal_pandas() transformed = func(pdf) if not isinstance(transformed, (pd.DataFrame, pd.Series)): raise ValueError( "The given function should return a frame; however, " "the return type was %s." % type(transformed) ) if len(transformed) != len(pdf): raise ValueError("transform_batch cannot produce aggregated results") psdf_or_psser = ps.from_pandas(transformed) if isinstance(psdf_or_psser, ps.Series): psser = cast(ps.Series, psdf_or_psser) field = psser._internal.data_fields[0].normalize_spark_type() return_schema = StructType([field.struct_field]) output_func = GroupBy._make_pandas_df_builder_func( self._psdf, apply_func, return_schema, retain_index=False ) pudf = pandas_series_func(output_func, return_type=field.spark_type) columns = self._psdf._internal.spark_columns # TODO: Index will be lost in this case. internal = self._psdf._internal.copy( column_labels=psser._internal.column_labels, data_spark_columns=[pudf(F.struct(*columns)).alias(field.name)], data_fields=[field], column_label_names=psser._internal.column_label_names, ) return first_series(DataFrame(internal)) else: psdf = cast(DataFrame, psdf_or_psser) if len(pdf) <= limit: # only do the short cut when it returns a frame to avoid # operations on different dataframes in case of series. return psdf index_fields = [ field.normalize_spark_type() for field in psdf._internal.index_fields ] data_fields = [field.normalize_spark_type() for field in psdf._internal.data_fields] return_schema = StructType( [field.struct_field for field in index_fields + data_fields] ) self_applied: DataFrame = DataFrame(self._psdf._internal.resolved_copy) output_func = GroupBy._make_pandas_df_builder_func( self_applied, func, return_schema, retain_index=True # type: ignore[arg-type] ) columns = self_applied._internal.spark_columns pudf = pandas_udf( # type: ignore[call-overload] output_func, returnType=return_schema ) temp_struct_column = verify_temp_column_name( self_applied._internal.spark_frame, "__temp_struct__" ) applied = pudf(F.struct(*columns)).alias(temp_struct_column) sdf = self_applied._internal.spark_frame.select(applied) sdf = sdf.selectExpr("%s.*" % temp_struct_column) return DataFrame( psdf._internal.with_new_sdf( spark_frame=sdf, index_fields=index_fields, data_fields=data_fields ) ) else: return_type = infer_return_type(original_func) is_return_series = isinstance(return_type, SeriesType) is_return_dataframe = isinstance(return_type, DataFrameType) if not is_return_dataframe and not is_return_series: raise TypeError( "The given function should specify a frame or series as its type " "hints; however, the return type was %s." % return_sig ) if is_return_series: field = InternalField( dtype=cast(SeriesType, return_type).dtype, struct_field=StructField( name=SPARK_DEFAULT_SERIES_NAME, dataType=cast(SeriesType, return_type).spark_type, ), ).normalize_spark_type() return_schema = StructType([field.struct_field]) output_func = GroupBy._make_pandas_df_builder_func( self._psdf, apply_func, return_schema, retain_index=False ) pudf = pandas_series_func(output_func, return_type=field.spark_type) columns = self._psdf._internal.spark_columns internal = self._psdf._internal.copy( column_labels=[None], data_spark_columns=[pudf(F.struct(*columns)).alias(field.name)], data_fields=[field], column_label_names=None, ) return first_series(DataFrame(internal)) else: index_fields = cast(DataFrameType, return_type).index_fields index_fields = [index_field.normalize_spark_type() for index_field in index_fields] data_fields = [ field.normalize_spark_type() for field in cast(DataFrameType, return_type).data_fields ] normalized_fields = index_fields + data_fields return_schema = StructType([field.struct_field for field in normalized_fields]) should_retain_index = len(index_fields) > 0 self_applied = DataFrame(self._psdf._internal.resolved_copy) output_func = GroupBy._make_pandas_df_builder_func( self_applied, func, return_schema, retain_index=should_retain_index # type: ignore[arg-type] ) columns = self_applied._internal.spark_columns pudf = pandas_udf( # type: ignore[call-overload] output_func, returnType=return_schema ) temp_struct_column = verify_temp_column_name( self_applied._internal.spark_frame, "__temp_struct__" ) applied = pudf(F.struct(*columns)).alias(temp_struct_column) sdf = self_applied._internal.spark_frame.select(applied) sdf = sdf.selectExpr("%s.*" % temp_struct_column) index_spark_columns = None index_names: Optional[List[Optional[Tuple[Any, ...]]]] = None if should_retain_index: index_spark_columns = [ scol_for(sdf, index_field.struct_field.name) for index_field in index_fields ] if not any( [ SPARK_INDEX_NAME_PATTERN.match(index_field.struct_field.name) for index_field in index_fields ] ): index_names = [ (index_field.struct_field.name,) for index_field in index_fields ] internal = InternalFrame( spark_frame=sdf, index_names=index_names, index_spark_columns=index_spark_columns, index_fields=index_fields, data_fields=data_fields, ) return DataFrame(internal)
def transform_batch(self, func, *args, **kwargs) -> Union["DataFrame", "Series"]: """ Transform chunks with a function that takes pandas DataFrame and outputs pandas DataFrame. The pandas DataFrame given to the function is of a batch used internally. The length of each input and output should be the same. See also `Transform and apply a function <https://koalas.readthedocs.io/en/latest/user_guide/transform_apply.html>`_. .. note:: the `func` is unable to access to the whole input frame. pandas-on-Spark internally splits the input series into multiple batches and calls `func` with each batch multiple times. Therefore, operations such as global aggregations are impossible. See the example below. >>> # This case does not return the length of whole frame but of the batch internally ... # used. ... def length(pdf) -> ps.DataFrame[int]: ... return pd.DataFrame([len(pdf)] * len(pdf)) ... >>> df = ps.DataFrame({'A': range(1000)}) >>> df.pandas_on_spark.transform_batch(length) # doctest: +SKIP c0 0 83 1 83 2 83 ... .. note:: this API executes the function once to infer the type which is potentially expensive, for instance, when the dataset is created after aggregations or sorting. To avoid this, specify return type in ``func``, for instance, as below: >>> def plus_one(x) -> ps.DataFrame[float, float]: ... return x + 1 If the return type is specified, the output column names become `c0, c1, c2 ... cn`. These names are positionally mapped to the returned DataFrame in ``func``. To specify the column names, you can assign them in a pandas friendly style as below: >>> def plus_one(x) -> ps.DataFrame['a': float, 'b': float]: ... return x + 1 >>> pdf = pd.DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]}) >>> def plus_one(x) -> ps.DataFrame[zip(pdf.dtypes, pdf.columns)]: ... return x + 1 When the given function returns DataFrame and has the return type annotated, the original index of the DataFrame will be lost and then a default index will be attached to the result. Please be careful about configuring the default index. See also `Default Index Type <https://koalas.readthedocs.io/en/latest/user_guide/options.html#default-index-type>`_. Parameters ---------- func : function Function to transform each pandas frame. *args Positional arguments to pass to func. **kwargs Keyword arguments to pass to func. Returns ------- DataFrame or Series See Also -------- DataFrame.pandas_on_spark.apply_batch: For row/columnwise operations. Series.pandas_on_spark.transform_batch: transform the search as each pandas chunks. Examples -------- >>> df = ps.DataFrame([(1, 2), (3, 4), (5, 6)], columns=['A', 'B']) >>> df A B 0 1 2 1 3 4 2 5 6 >>> def plus_one_func(pdf) -> ps.DataFrame[int, int]: ... return pdf + 1 >>> df.pandas_on_spark.transform_batch(plus_one_func) c0 c1 0 2 3 1 4 5 2 6 7 >>> def plus_one_func(pdf) -> ps.DataFrame['A': int, 'B': int]: ... return pdf + 1 >>> df.pandas_on_spark.transform_batch(plus_one_func) A B 0 2 3 1 4 5 2 6 7 >>> def plus_one_func(pdf) -> ps.Series[int]: ... return pdf.B + 1 >>> df.pandas_on_spark.transform_batch(plus_one_func) 0 3 1 5 2 7 dtype: int64 You can also omit the type hints so pandas-on-Spark infers the return schema as below: >>> df.pandas_on_spark.transform_batch(lambda pdf: pdf + 1) A B 0 2 3 1 4 5 2 6 7 >>> (df * -1).pandas_on_spark.transform_batch(abs) A B 0 1 2 1 3 4 2 5 6 Note that you should not transform the index. The index information will not change. >>> df.pandas_on_spark.transform_batch(lambda pdf: pdf.B + 1) 0 3 1 5 2 7 Name: B, dtype: int64 You can also specify extra arguments as below. >>> df.pandas_on_spark.transform_batch(lambda pdf, a, b, c: pdf.B + a + b + c, 1, 2, c=3) 0 8 1 10 2 12 Name: B, dtype: int64 """ from pyspark.pandas.groupby import GroupBy from pyspark.pandas.frame import DataFrame from pyspark.pandas.series import first_series from pyspark import pandas as ps assert callable(func), "the first argument should be a callable function." spec = inspect.getfullargspec(func) return_sig = spec.annotations.get("return", None) should_infer_schema = return_sig is None original_func = func func = lambda o: original_func(o, *args, **kwargs) names = self._psdf._internal.to_internal_spark_frame.schema.names def pandas_concat(series): # The input can only be a DataFrame for struct from Spark 3.0. # This works around to make the input as a frame. See SPARK-27240 pdf = pd.concat(series, axis=1) pdf.columns = names return pdf def apply_func(pdf): return func(pdf).to_frame() def pandas_extract(pdf, name): # This is for output to work around a DataFrame for struct # from Spark 3.0. See SPARK-23836 return pdf[name] def pandas_series_func(f): ff = f return lambda *series: first_series(ff(*series)) def pandas_frame_func(f, field_name): ff = f return lambda *series: pandas_extract(ff(pandas_concat(series)), field_name) if should_infer_schema: # Here we execute with the first 1000 to get the return type. # If the records were less than 1000, it uses pandas API directly for a shortcut. limit = ps.get_option("compute.shortcut_limit") pdf = self._psdf.head(limit + 1)._to_internal_pandas() transformed = func(pdf) if not isinstance(transformed, (pd.DataFrame, pd.Series)): raise ValueError( "The given function should return a frame; however, " "the return type was %s." % type(transformed) ) if len(transformed) != len(pdf): raise ValueError("transform_batch cannot produce aggregated results") psdf_or_psser = ps.from_pandas(transformed) if isinstance(psdf_or_psser, ps.Series): psser = cast(ps.Series, psdf_or_psser) spark_return_type = force_decimal_precision_scale( as_nullable_spark_type(psser.spark.data_type) ) return_schema = StructType( [StructField(SPARK_DEFAULT_SERIES_NAME, spark_return_type)] ) output_func = GroupBy._make_pandas_df_builder_func( self._psdf, apply_func, return_schema, retain_index=False ) pudf = pandas_udf(returnType=spark_return_type, functionType=PandasUDFType.SCALAR)( pandas_series_func(output_func) ) columns = self._psdf._internal.spark_columns # TODO: Index will be lost in this case. internal = self._psdf._internal.copy( column_labels=psser._internal.column_labels, data_spark_columns=[ pudf(F.struct(*columns)).alias(psser._internal.data_spark_column_names[0]) ], data_dtypes=psser._internal.data_dtypes, column_label_names=psser._internal.column_label_names, ) return first_series(DataFrame(internal)) else: psdf = cast(DataFrame, psdf_or_psser) if len(pdf) <= limit: # only do the short cut when it returns a frame to avoid # operations on different dataframes in case of series. return psdf # Force nullability. return_schema = force_decimal_precision_scale( as_nullable_spark_type(psdf._internal.to_internal_spark_frame.schema) ) self_applied = DataFrame(self._psdf._internal.resolved_copy) # type: DataFrame output_func = GroupBy._make_pandas_df_builder_func( self_applied, func, return_schema, retain_index=True ) columns = self_applied._internal.spark_columns pudf = pandas_udf(returnType=return_schema, functionType=PandasUDFType.SCALAR)( output_func ) temp_struct_column = verify_temp_column_name( self_applied._internal.spark_frame, "__temp_struct__" ) applied = pudf(F.struct(*columns)).alias(temp_struct_column) sdf = self_applied._internal.spark_frame.select(applied) sdf = sdf.selectExpr("%s.*" % temp_struct_column) return DataFrame(psdf._internal.with_new_sdf(sdf)) else: return_type = infer_return_type(original_func) is_return_series = isinstance(return_type, SeriesType) is_return_dataframe = isinstance(return_type, DataFrameType) if not is_return_dataframe and not is_return_series: raise TypeError( "The given function should specify a frame or series as its type " "hints; however, the return type was %s." % return_sig ) if is_return_series: spark_return_type = force_decimal_precision_scale( as_nullable_spark_type(cast(SeriesType, return_type).spark_type) ) return_schema = StructType( [StructField(SPARK_DEFAULT_SERIES_NAME, spark_return_type)] ) output_func = GroupBy._make_pandas_df_builder_func( self._psdf, apply_func, return_schema, retain_index=False ) pudf = pandas_udf(returnType=spark_return_type, functionType=PandasUDFType.SCALAR)( pandas_series_func(output_func) ) columns = self._psdf._internal.spark_columns internal = self._psdf._internal.copy( column_labels=[None], data_spark_columns=[pudf(F.struct(*columns)).alias(SPARK_DEFAULT_SERIES_NAME)], data_dtypes=[cast(SeriesType, return_type).dtype], column_label_names=None, ) return first_series(DataFrame(internal)) else: return_schema = cast(DataFrameType, return_type).spark_type self_applied = DataFrame(self._psdf._internal.resolved_copy) output_func = GroupBy._make_pandas_df_builder_func( self_applied, func, return_schema, retain_index=False ) columns = self_applied._internal.spark_columns pudf = pandas_udf(returnType=return_schema, functionType=PandasUDFType.SCALAR)( output_func ) temp_struct_column = verify_temp_column_name( self_applied._internal.spark_frame, "__temp_struct__" ) applied = pudf(F.struct(*columns)).alias(temp_struct_column) sdf = self_applied._internal.spark_frame.select(applied) sdf = sdf.selectExpr("%s.*" % temp_struct_column) internal = InternalFrame( spark_frame=sdf, index_spark_columns=None, data_dtypes=cast(DataFrameType, return_type).dtypes, ) return DataFrame(internal)
def _downsample(self, f: str) -> DataFrame: """ Downsample the defined function. Parameters ---------- how : string / mapped function **kwargs : kw args passed to how function """ # a simple example to illustrate the computation: # dates = [ # datetime.datetime(2012, 1, 2), # datetime.datetime(2012, 5, 3), # datetime.datetime(2022, 5, 3), # ] # index = pd.DatetimeIndex(dates) # pdf = pd.DataFrame(np.array([1,2,3]), index=index, columns=['A']) # pdf.resample('3Y').max() # A # 2012-12-31 2.0 # 2015-12-31 NaN # 2018-12-31 NaN # 2021-12-31 NaN # 2024-12-31 3.0 # # in this case: # 1, obtain one origin point to bin all timestamps, we can get one (2009-12-31) # from the minimum timestamp (2012-01-02); # 2, the default intervals for 'Y' are right-closed, so intervals are: # (2009-12-31, 2012-12-31], (2012-12-31, 2015-12-31], (2015-12-31, 2018-12-31], ... # 3, bin all timestamps, for example, 2022-05-03 belongs to interval # (2021-12-31, 2024-12-31], since the default label is 'right', label it with the right # edge 2024-12-31; # 4, some intervals maybe too large for this down sampling, so we need to pad the dataframe # to avoid missing some results, like: 2015-12-31, 2018-12-31 and 2021-12-31; # 5, union the binned dataframe and padded dataframe, and apply aggregation 'max' to get # the final results; # one action to obtain the range, in the future we may cache it in the index. ts_min, ts_max = (self._psdf._internal.spark_frame.select( F.min(self._resamplekey_scol), F.max(self._resamplekey_scol)).toPandas().iloc[0]) # the logic to obtain an origin point to bin the timestamps is too complex to follow, # here just use Pandas' resample on a 1-length series to get it. ts_origin = (pd.Series([0], index=[ts_min ]).resample(rule=self._offset.freqstr, closed=self._closed, label="left").sum().index[0]) assert ts_origin <= ts_min bin_col_name = "__tmp_resample_bin_col__" bin_col_label = verify_temp_column_name(self._psdf, bin_col_name) bin_col_field = InternalField( dtype=np.dtype("datetime64[ns]"), struct_field=StructField(bin_col_name, TimestampType(), True), ) bin_scol = self._bin_time_stamp( ts_origin, self._resamplekey_scol, ) agg_columns = [ psser for psser in self._agg_columns if (isinstance(psser.spark.data_type, NumericType)) ] assert len(agg_columns) > 0 # in the binning side, label the timestamps according to the origin and the freq(rule) bin_sdf = self._psdf._internal.spark_frame.select( F.col(SPARK_DEFAULT_INDEX_NAME), bin_scol.alias(bin_col_name), *[psser.spark.column for psser in agg_columns], ) # in the padding side, insert necessary points # again, directly apply Pandas' resample on a 2-length series to obtain the indices pad_sdf = (ps.from_pandas( pd.Series([0, 0], index=[ts_min, ts_max]).resample( rule=self._offset.freqstr, closed=self._closed, label=self._label).sum().index)._internal.spark_frame.select( F.col(SPARK_DEFAULT_INDEX_NAME).alias(bin_col_name)).where( (ts_min <= F.col(bin_col_name)) & (F.col(bin_col_name) <= ts_max))) # union the above two spark dataframes. sdf = bin_sdf.unionByName( pad_sdf, allowMissingColumns=True).where(~F.isnull(F.col(bin_col_name))) internal = InternalFrame( spark_frame=sdf, index_spark_columns=[scol_for(sdf, SPARK_DEFAULT_INDEX_NAME)], data_spark_columns=[F.col(bin_col_name)] + [ scol_for(sdf, psser._internal.data_spark_column_names[0]) for psser in agg_columns ], column_labels=[bin_col_label] + [psser._column_label for psser in agg_columns], data_fields=[bin_col_field] + [ psser._internal.data_fields[0].copy(nullable=True) for psser in agg_columns ], column_label_names=self._psdf._internal.column_label_names, ) psdf: DataFrame = DataFrame(internal) groupby = psdf.groupby(psdf._psser_for(bin_col_label), dropna=False) downsampled = getattr(groupby, f)() downsampled.index.name = None return downsampled