def test_flush(self, feature_set_dataframe, mocker): # given spark_client = SparkClient() writer = [ HistoricalFeatureStoreWriter(), OnlineFeatureStoreWriter(), ] for w in writer: w.write = mocker.stub("write") feature_set = mocker.stub("feature_set") feature_set.entity = "house" feature_set.name = "test" # when sink = Sink(writers=writer) sink.flush( dataframe=feature_set_dataframe, feature_set=feature_set, spark_client=spark_client, ) # then for w in writer: w.write.assert_called_once()
def test_sink(input_dataframe, feature_set): # arrange client = SparkClient() client.conn.conf.set("spark.sql.sources.partitionOverwriteMode", "dynamic") feature_set_df = feature_set.construct(input_dataframe, client) target_latest_df = OnlineFeatureStoreWriter.filter_latest( feature_set_df, id_columns=[key.name for key in feature_set.keys]) columns_sort = feature_set_df.schema.fieldNames() # setup historical writer s3config = Mock() s3config.mode = "overwrite" s3config.format_ = "parquet" s3config.get_options = Mock( return_value={"path": "test_folder/historical/entity/feature_set"}) s3config.get_path_with_partitions = Mock( return_value="test_folder/historical/entity/feature_set") historical_writer = HistoricalFeatureStoreWriter(db_config=s3config, interval_mode=True) # setup online writer # TODO: Change for CassandraConfig when Cassandra for test is ready online_config = Mock() online_config.mode = "overwrite" online_config.format_ = "parquet" online_config.get_options = Mock( return_value={"path": "test_folder/online/entity/feature_set"}) online_writer = OnlineFeatureStoreWriter(db_config=online_config) writers = [historical_writer, online_writer] sink = Sink(writers) # act client.sql("CREATE DATABASE IF NOT EXISTS {}".format( historical_writer.database)) sink.flush(feature_set, feature_set_df, client) # get historical results historical_result_df = client.read( s3config.format_, path=s3config.get_path_with_partitions(feature_set.name, feature_set_df), ) # get online results online_result_df = client.read( online_config.format_, **online_config.get_options(feature_set.name)) # assert # assert historical results assert sorted(feature_set_df.select(*columns_sort).collect()) == sorted( historical_result_df.select(*columns_sort).collect()) # assert online results assert sorted(target_latest_df.select(*columns_sort).collect()) == sorted( online_result_df.select(*columns_sort).collect()) # tear down shutil.rmtree("test_folder")
def test_flush_with_writers_list_empty(self): # given writer = [] # then with pytest.raises(ValueError): Sink(writers=writer)
def __init__(self): super(FirstPipeline, self).__init__( source=Source( readers=[TableReader(id="t", database="db", table="table",)], query=f"select * from t", # noqa ), feature_set=FeatureSet( name="first", entity="entity", description="description", features=[ Feature(name="feature1", description="test", dtype=DataType.FLOAT,), Feature( name="feature2", description="another test", dtype=DataType.STRING, ), ], keys=[ KeyFeature( name="id", description="identifier", dtype=DataType.BIGINT, ) ], timestamp=TimestampFeature(), ), sink=Sink( writers=[HistoricalFeatureStoreWriter(), OnlineFeatureStoreWriter()] ), )
def __init__(self): super(UserChargebacksPipeline, self).__init__( source=Source( readers=[ FileReader( id="chargeback_events", path="data/order_events/input.csv", format="csv", format_options={"header": True}, ) ], query=(""" select cpf, timestamp(chargeback_timestamp) as timestamp, order_id from chargeback_events where chargeback_timestamp is not null """), ), feature_set=AggregatedFeatureSet( name="user_chargebacks", entity="user", description="Aggregates the total of chargebacks from users in " "different time windows.", keys=[ KeyFeature( name="cpf", description="User unique identifier, entity key.", dtype=DataType.STRING, ) ], timestamp=TimestampFeature(), features=[ Feature( name="cpf_chargebacks", description= "Total of chargebacks registered on user's CPF", transformation=AggregatedTransform(functions=[ Function(functions.count, DataType.INTEGER) ]), from_column="order_id", ), ], ).with_windows( definitions=["3 days", "7 days", "30 days"]).add_post_hook( ZeroFillHook()), sink=Sink(writers=[ LocalHistoricalFSWriter(), OnlineFeatureStoreWriter( interval_mode=True, check_schema_hook=NotCheckSchemaHook(), debug_mode=True, ), ]), )
def test_pipeline_with_hooks(self, spark_session): # arrange hook1 = AddHook(value=1) spark_session.sql( "select 1 as id, timestamp('2020-01-01') as timestamp, 0 as feature" ).createOrReplaceTempView("test") target_df = spark_session.sql( "select 1 as id, timestamp('2020-01-01') as timestamp, 6 as feature, 2020 " "as year, 1 as month, 1 as day") historical_writer = HistoricalFeatureStoreWriter(debug_mode=True) test_pipeline = FeatureSetPipeline( source=Source( readers=[ TableReader( id="reader", table="test", ).add_post_hook(hook1) ], query="select * from reader", ).add_post_hook(hook1), feature_set=FeatureSet( name="feature_set", entity="entity", description="description", features=[ Feature( name="feature", description="test", transformation=SQLExpressionTransform( expression="feature + 1"), dtype=DataType.INTEGER, ), ], keys=[ KeyFeature( name="id", description="The user's Main ID or device ID", dtype=DataType.INTEGER, ) ], timestamp=TimestampFeature(), ).add_pre_hook(hook1).add_post_hook(hook1), sink=Sink(writers=[historical_writer], ).add_pre_hook(hook1), ) # act test_pipeline.run() output_df = spark_session.table( "historical_feature_store__feature_set") # assert output_df.show() assert_dataframe_equality(output_df, target_df)
def test_flush_with_multiple_online_writers(self, feature_set, feature_set_dataframe): """Testing the flow of writing to a feature-set table and to an entity table.""" # arrange spark_client = SparkClient() spark_client.write_dataframe = Mock() feature_set.entity = "my_entity" feature_set.name = "my_feature_set" online_feature_store_writer = OnlineFeatureStoreWriter() online_feature_store_writer_on_entity = OnlineFeatureStoreWriter( write_to_entity=True) sink = Sink(writers=[ online_feature_store_writer, online_feature_store_writer_on_entity ]) # act sink.flush( dataframe=feature_set_dataframe, feature_set=feature_set, spark_client=spark_client, ) # assert spark_client.write_dataframe.assert_any_call( dataframe=ANY, format_=ANY, mode=ANY, **online_feature_store_writer.db_config.get_options( table="my_entity"), ) spark_client.write_dataframe.assert_any_call( dataframe=ANY, format_=ANY, mode=ANY, **online_feature_store_writer.db_config.get_options( table="my_feature_set"), )
def test_flush_with_invalid_df(self, not_feature_set_dataframe, mocker): # given spark_client = SparkClient() writer = [ HistoricalFeatureStoreWriter(), OnlineFeatureStoreWriter(), ] feature_set = mocker.stub("feature_set") feature_set.entity = "house" feature_set.name = "test" # when sink = Sink(writers=writer) # then with pytest.raises(ValueError): sink.flush( dataframe=not_feature_set_dataframe, feature_set=feature_set, spark_client=spark_client, )
def test_flush_streaming_df(self, feature_set): """Testing the return of the streaming handlers by the sink.""" # arrange spark_client = SparkClient() mocked_stream_df = Mock() mocked_stream_df.isStreaming = True mocked_stream_df.writeStream = mocked_stream_df mocked_stream_df.trigger.return_value = mocked_stream_df mocked_stream_df.outputMode.return_value = mocked_stream_df mocked_stream_df.outputMode.return_value = mocked_stream_df mocked_stream_df.option.return_value = mocked_stream_df mocked_stream_df.foreachBatch.return_value = mocked_stream_df mocked_stream_df.start.return_value = Mock(spec=StreamingQuery) online_feature_store_writer = OnlineFeatureStoreWriter() online_feature_store_writer_on_entity = OnlineFeatureStoreWriter( write_to_entity=True) sink = Sink( writers=[ online_feature_store_writer, online_feature_store_writer_on_entity, ], validation=Mock(spec=BasicValidation), ) # act handlers = sink.flush( dataframe=mocked_stream_df, feature_set=feature_set, spark_client=spark_client, ) # assert print(handlers[0]) print(isinstance(handlers[0], StreamingQuery)) for handler in handlers: assert isinstance(handler, StreamingQuery)
def test_validate(self, feature_set_dataframe, mocker): # given spark_client = SparkClient() writer = [ HistoricalFeatureStoreWriter(), OnlineFeatureStoreWriter(), ] for w in writer: w.validate = mocker.stub("validate") feature_set = mocker.stub("feature_set") # when sink = Sink(writers=writer) sink.validate( dataframe=feature_set_dataframe, feature_set=feature_set, spark_client=spark_client, ) # then for w in writer: w.validate.assert_called_once()
def test_validate_false(self, feature_set_dataframe, mocker): # given spark_client = SparkClient() writer = [ HistoricalFeatureStoreWriter(), OnlineFeatureStoreWriter(), ] for w in writer: w.validate = mocker.stub("validate") w.validate.side_effect = AssertionError("test") feature_set = mocker.stub("feature_set") # when sink = Sink(writers=writer) # then with pytest.raises(RuntimeError): sink.validate( dataframe=feature_set_dataframe, feature_set=feature_set, spark_client=spark_client, )
def feature_set_pipeline( spark_context, spark_session, ): feature_set_pipeline = FeatureSetPipeline( source=Source( readers=[ TableReader(id="b_source", table="b_table",).with_incremental_strategy( incremental_strategy=IncrementalStrategy(column="timestamp") ), ], query=f"select * from b_source ", # noqa ), feature_set=FeatureSet( name="feature_set", entity="entity", description="description", features=[ Feature( name="feature", description="test", transformation=SparkFunctionTransform( functions=[ Function(F.avg, DataType.FLOAT), Function(F.stddev_pop, DataType.FLOAT), ], ).with_window( partition_by="id", order_by=TIMESTAMP_COLUMN, mode="fixed_windows", window_definition=["1 day"], ), ), ], keys=[ KeyFeature( name="id", description="The user's Main ID or device ID", dtype=DataType.INTEGER, ) ], timestamp=TimestampFeature(), ), sink=Sink(writers=[HistoricalFeatureStoreWriter(debug_mode=True)]), ) return feature_set_pipeline
def loader(features_set_df: pyspark.sql.DataFrame) -> Sink: db_config = get_config() keyspace = "feature_store" table_name = "orders_feature_master_table_" primary_key = "customer_id" create_table(features_set_df, keyspace, table_name, primary_key) writers = [ HistoricalFeatureStoreWriter(debug_mode=True), OnlineFeatureStoreWriter(db_config=db_config) ] #writers = [HistoricalFeatureStoreWriter(debug_mode=True)] sink = Sink(writers=writers) return sink
def test_feature_set_args(self): # arrange and act out_columns = [ "user_id", "timestamp", "listing_page_viewed__rent_per_month__avg_over_7_days_fixed_windows", "listing_page_viewed__rent_per_month__avg_over_2_weeks_fixed_windows", "listing_page_viewed__rent_per_month__stddev_pop_over_7_days_fixed_windows", "listing_page_viewed__rent_per_month__" "stddev_pop_over_2_weeks_fixed_windows", # noqa ] pipeline = FeatureSetPipeline( source=Source( readers=[ TableReader( id="source_a", database="db", table="table", ), FileReader( id="source_b", path="path", format="parquet", ), ], query="select a.*, b.specific_feature " "from source_a left join source_b on a.id=b.id", ), feature_set=FeatureSet( name="feature_set", entity="entity", description="description", keys=[ KeyFeature( name="user_id", description="The user's Main ID or device ID", dtype=DataType.INTEGER, ) ], timestamp=TimestampFeature(from_column="ts"), features=[ Feature( name="listing_page_viewed__rent_per_month", description="Average of something.", transformation=SparkFunctionTransform(functions=[ Function(functions.avg, DataType.FLOAT), Function(functions.stddev_pop, DataType.FLOAT), ], ).with_window( partition_by="user_id", order_by=TIMESTAMP_COLUMN, window_definition=["7 days", "2 weeks"], mode="fixed_windows", ), ), ], ), sink=Sink(writers=[ HistoricalFeatureStoreWriter(db_config=None), OnlineFeatureStoreWriter(db_config=None), ], ), ) assert isinstance(pipeline.spark_client, SparkClient) assert len(pipeline.source.readers) == 2 assert all( isinstance(reader, Reader) for reader in pipeline.source.readers) assert isinstance(pipeline.source.query, str) assert pipeline.feature_set.name == "feature_set" assert pipeline.feature_set.entity == "entity" assert pipeline.feature_set.description == "description" assert isinstance(pipeline.feature_set.timestamp, TimestampFeature) assert len(pipeline.feature_set.keys) == 1 assert all( isinstance(k, KeyFeature) for k in pipeline.feature_set.keys) assert len(pipeline.feature_set.features) == 1 assert all( isinstance(feature, Feature) for feature in pipeline.feature_set.features) assert pipeline.feature_set.columns == out_columns assert len(pipeline.sink.writers) == 2 assert all( isinstance(writer, Writer) for writer in pipeline.sink.writers)
def test_feature_set_pipeline( self, mocked_df, spark_session, fixed_windows_output_feature_set_dataframe, ): # arrange table_reader_id = "a_source" table_reader_table = "table" table_reader_db = environment.get_variable( "FEATURE_STORE_HISTORICAL_DATABASE") create_temp_view(dataframe=mocked_df, name=table_reader_id) create_db_and_table( spark=spark_session, table_reader_id=table_reader_id, table_reader_db=table_reader_db, table_reader_table=table_reader_table, ) dbconfig = Mock() dbconfig.mode = "overwrite" dbconfig.format_ = "parquet" dbconfig.get_options = Mock( return_value={"path": "test_folder/historical/entity/feature_set"}) historical_writer = HistoricalFeatureStoreWriter(db_config=dbconfig) # act test_pipeline = FeatureSetPipeline( source=Source( readers=[ TableReader( id=table_reader_id, database=table_reader_db, table=table_reader_table, ), ], query=f"select * from {table_reader_id} ", # noqa ), feature_set=FeatureSet( name="feature_set", entity="entity", description="description", features=[ Feature( name="feature1", description="test", transformation=SparkFunctionTransform(functions=[ Function(F.avg, DataType.FLOAT), Function(F.stddev_pop, DataType.FLOAT), ], ).with_window( partition_by="id", order_by=TIMESTAMP_COLUMN, mode="fixed_windows", window_definition=["2 minutes", "15 minutes"], ), ), Feature( name="divided_feature", description="unit test", dtype=DataType.FLOAT, transformation=CustomTransform( transformer=divide, column1="feature1", column2="feature2", ), ), ], keys=[ KeyFeature( name="id", description="The user's Main ID or device ID", dtype=DataType.INTEGER, ) ], timestamp=TimestampFeature(), ), sink=Sink(writers=[historical_writer]), ) test_pipeline.run() # assert path = dbconfig.get_options("historical/entity/feature_set").get( "path") df = spark_session.read.parquet(path).orderBy(TIMESTAMP_COLUMN) target_df = fixed_windows_output_feature_set_dataframe.orderBy( test_pipeline.feature_set.timestamp_column) # assert assert_dataframe_equality(df, target_df) # tear down shutil.rmtree("test_folder")
def test_pipeline_interval_run(self, mocked_date_df, pipeline_interval_run_target_dfs, spark_session): """Testing pipeline's idempotent interval run feature. Source data: +-------+---+-------------------+-------------------+ |feature| id| ts| timestamp| +-------+---+-------------------+-------------------+ | 200| 1|2016-04-11 11:31:11|2016-04-11 11:31:11| | 300| 1|2016-04-12 11:44:12|2016-04-12 11:44:12| | 400| 1|2016-04-13 11:46:24|2016-04-13 11:46:24| | 500| 1|2016-04-14 12:03:21|2016-04-14 12:03:21| +-------+---+-------------------+-------------------+ The test executes 3 runs for different time intervals. The input data has 4 data points: 2016-04-11, 2016-04-12, 2016-04-13 and 2016-04-14. The following run specifications are: 1) Interval: from 2016-04-11 to 2016-04-13 Target table result: +---+-------+---+-----+------+-------------------+----+ |day|feature| id|month|run_id| timestamp|year| +---+-------+---+-----+------+-------------------+----+ | 11| 200| 1| 4| 1|2016-04-11 11:31:11|2016| | 12| 300| 1| 4| 1|2016-04-12 11:44:12|2016| | 13| 400| 1| 4| 1|2016-04-13 11:46:24|2016| +---+-------+---+-----+------+-------------------+----+ 2) Interval: only 2016-04-14. Target table result: +---+-------+---+-----+------+-------------------+----+ |day|feature| id|month|run_id| timestamp|year| +---+-------+---+-----+------+-------------------+----+ | 11| 200| 1| 4| 1|2016-04-11 11:31:11|2016| | 12| 300| 1| 4| 1|2016-04-12 11:44:12|2016| | 13| 400| 1| 4| 1|2016-04-13 11:46:24|2016| | 14| 500| 1| 4| 2|2016-04-14 12:03:21|2016| +---+-------+---+-----+------+-------------------+----+ 3) Interval: only 2016-04-11. Target table result: +---+-------+---+-----+------+-------------------+----+ |day|feature| id|month|run_id| timestamp|year| +---+-------+---+-----+------+-------------------+----+ | 11| 200| 1| 4| 3|2016-04-11 11:31:11|2016| | 12| 300| 1| 4| 1|2016-04-12 11:44:12|2016| | 13| 400| 1| 4| 1|2016-04-13 11:46:24|2016| | 14| 500| 1| 4| 2|2016-04-14 12:03:21|2016| +---+-------+---+-----+------+-------------------+----+ """ # arrange create_temp_view(dataframe=mocked_date_df, name="input_data") db = environment.get_variable("FEATURE_STORE_HISTORICAL_DATABASE") path = "test_folder/historical/entity/feature_set" spark_session.conf.set("spark.sql.sources.partitionOverwriteMode", "dynamic") spark_session.sql(f"create database if not exists {db}") spark_session.sql( f"create table if not exists {db}.feature_set_interval " f"(id int, timestamp timestamp, feature int, " f"run_id int, year int, month int, day int);") dbconfig = MetastoreConfig() dbconfig.get_options = Mock(return_value={ "mode": "overwrite", "format_": "parquet", "path": path }) historical_writer = HistoricalFeatureStoreWriter(db_config=dbconfig, interval_mode=True) first_run_hook = RunHook(id=1) second_run_hook = RunHook(id=2) third_run_hook = RunHook(id=3) ( first_run_target_df, second_run_target_df, third_run_target_df, ) = pipeline_interval_run_target_dfs test_pipeline = FeatureSetPipeline( source=Source( readers=[ TableReader( id="id", table="input_data", ).with_incremental_strategy(IncrementalStrategy("ts")), ], query="select * from id ", ), feature_set=FeatureSet( name="feature_set_interval", entity="entity", description="", keys=[ KeyFeature( name="id", description="", dtype=DataType.INTEGER, ) ], timestamp=TimestampFeature(from_column="ts"), features=[ Feature(name="feature", description="", dtype=DataType.INTEGER), Feature(name="run_id", description="", dtype=DataType.INTEGER), ], ), sink=Sink([historical_writer], ), ) # act and assert dbconfig.get_path_with_partitions = Mock(return_value=[ "test_folder/historical/entity/feature_set/year=2016/month=4/day=11", "test_folder/historical/entity/feature_set/year=2016/month=4/day=12", "test_folder/historical/entity/feature_set/year=2016/month=4/day=13", ]) test_pipeline.feature_set.add_pre_hook(first_run_hook) test_pipeline.run(end_date="2016-04-13", start_date="2016-04-11") first_run_output_df = spark_session.read.parquet(path) assert_dataframe_equality(first_run_output_df, first_run_target_df) dbconfig.get_path_with_partitions = Mock(return_value=[ "test_folder/historical/entity/feature_set/year=2016/month=4/day=14", ]) test_pipeline.feature_set.add_pre_hook(second_run_hook) test_pipeline.run_for_date("2016-04-14") second_run_output_df = spark_session.read.parquet(path) assert_dataframe_equality(second_run_output_df, second_run_target_df) dbconfig.get_path_with_partitions = Mock(return_value=[ "test_folder/historical/entity/feature_set/year=2016/month=4/day=11", ]) test_pipeline.feature_set.add_pre_hook(third_run_hook) test_pipeline.run_for_date("2016-04-11") third_run_output_df = spark_session.read.parquet(path) assert_dataframe_equality(third_run_output_df, third_run_target_df) # tear down shutil.rmtree("test_folder")
def __init__(self): super(AwesomeDatasetPipeline, self).__init__( source=Source( readers=[ FileReader( id="order_events", path="data/order_events/input.csv", format="csv", format_options={"header": True}, ), FileReader( id="user_chargebacks", path="data/feature_store/historical/user/user_chargebacks", format="parquet", ), FileReader( id="user_orders", path="data/feature_store/historical/user/user_orders", format="parquet", ), ], query=""" with feature_sets_merge as( select user_orders.cpf, user_orders.timestamp, user_chargebacks.timestamp as chargeback_timestamp, cpf_orders__count_over_3_days_rolling_windows, cpf_orders__count_over_7_days_rolling_windows, cpf_orders__count_over_30_days_rolling_windows, cpf_chargebacks__count_over_3_days_rolling_windows, cpf_chargebacks__count_over_7_days_rolling_windows, cpf_chargebacks__count_over_30_days_rolling_windows, row_number() over ( partition by (user_orders.cpf, user_orders.timestamp) order by user_chargebacks.timestamp desc ) as rn from user_orders left join user_chargebacks on user_orders.cpf = user_chargebacks.cpf and user_orders.timestamp >= user_chargebacks.timestamp ), feature_sets_rn_filter as( select * from feature_sets_merge where rn = 1 ), orders_with_feature_sets as( select order_events.order_id, timestamp(order_events.order_timestamp) as timestamp, timestamp(order_events.chargeback_timestamp) as chargeback_timestamp, order_events.cpf, feature_sets_rn_filter.cpf_orders__count_over_3_days_rolling_windows, feature_sets_rn_filter.cpf_orders__count_over_7_days_rolling_windows, feature_sets_rn_filter.cpf_orders__count_over_30_days_rolling_windows, feature_sets_rn_filter.cpf_chargebacks__count_over_3_days_rolling_windows, feature_sets_rn_filter.cpf_chargebacks__count_over_7_days_rolling_windows, feature_sets_rn_filter.cpf_chargebacks__count_over_30_days_rolling_windows, row_number() over ( partition by (order_events.cpf, order_events.order_timestamp) order by feature_sets_rn_filter.timestamp desc ) as rn from order_events join feature_sets_rn_filter on order_events.cpf = feature_sets_rn_filter.cpf and timestamp(order_events.order_timestamp) >= feature_sets_rn_filter.timestamp ) select order_id, timestamp, chargeback_timestamp, cpf, cpf_orders__count_over_3_days_rolling_windows, cpf_orders__count_over_7_days_rolling_windows, cpf_orders__count_over_30_days_rolling_windows, coalesce( cpf_chargebacks__count_over_3_days_rolling_windows, 0) as cpf_chargebacks__count_over_3_days_rolling_windows, coalesce( cpf_chargebacks__count_over_7_days_rolling_windows, 0) as cpf_chargebacks__count_over_7_days_rolling_windows, coalesce( cpf_chargebacks__count_over_30_days_rolling_windows, 0) as cpf_chargebacks__count_over_30_days_rolling_windows from orders_with_feature_sets where rn = 1 """, ), feature_set=FeatureSet( name="awesome_dataset", entity="user", description="Dataset enriching orders events with aggregated features " "on total of orders and chargebacks by user.", keys=[ KeyFeature( name="order_id", description="Orders unique identifier.", dtype=DataType.STRING, ) ], timestamp=TimestampFeature(), features=[ Feature( name="chargeback_timestamp", description="Timestamp for the order creation.", dtype=DataType.TIMESTAMP, ), Feature( name="cpf", description="User unique identifier, user entity key.", dtype=DataType.STRING, ), Feature( name="cpf_orders__count_over_3_days_rolling_windows", description="Count of orders over 3 days rolling windows group " "by user (identified by CPF)", dtype=DataType.INTEGER, ), Feature( name="cpf_orders__count_over_7_days_rolling_windows", description="Count of orders over 7 days rolling windows group " "by user (identified by CPF)", dtype=DataType.INTEGER, ), Feature( name="cpf_orders__count_over_30_days_rolling_windows", description="Count of orders over 30 days rolling windows group" " by user (identified by CPF)", dtype=DataType.INTEGER, ), Feature( name="cpf_chargebacks__count_over_3_days_rolling_windows", description="Count of chargebacks over 3 days rolling windows " "group by user (identified by CPF)", dtype=DataType.INTEGER, ), Feature( name="cpf_chargebacks__count_over_7_days_rolling_windows", description="Count of chargebacks over 7 days rolling windows " "group by user (identified by CPF)", dtype=DataType.INTEGER, ), Feature( name="cpf_chargebacks__count_over_30_days_rolling_windows", description="Count of chargebacks over 30 days rolling windows " "group by user (identified by CPF)", dtype=DataType.INTEGER, ), ], ), sink=Sink(writers=[DatasetWriter()]), )