Exemple #1
0
def test_get_variable_out_of_spec_fails(monkeypatch):
    # given
    not_specified_variable = "not_specified_variable"
    monkeypatch.setenv(not_specified_variable, "anything")
    if not_specified_variable in environment.specification:
        del environment.specification[not_specified_variable]

    # then
    with pytest.raises(environment.UnspecifiedVariableError,
                       match="not listed in the environment"):
        environment.get_variable(not_specified_variable, "anything")
 def __init__(
     self,
     id: str,
     topic: str,
     value_schema: StructType,
     connection_string: str = None,
     topic_options: dict = None,
     stream: bool = True,
 ):
     super().__init__(id)
     if not isinstance(topic, str):
         raise ValueError("topic must be a string with the topic name")
     if not isinstance(value_schema, StructType):
         raise ValueError(
             "value_schema must be a StructType with the schema "
             'of the JSON presented in "value" Kafka column'
         )
     self.topic = topic
     self.value_schema = value_schema
     self.connection_string = connection_string or environment.get_variable(
         "KAFKA_CONSUMER_CONNECTION_STRING"
     )
     self.options = dict(
         {
             "kafka.bootstrap.servers": self.connection_string,
             "subscribe": self.topic,
         },
         **topic_options if topic_options else {},
     )
     self.stream = stream
 def __init__(
     self,
     mode: str = None,
     format_: str = None,
     keyspace: str = None,
     stream_processing_time: str = None,
     stream_output_mode: str = None,
     stream_checkpoint_path: str = None,
 ):
     self.mode = mode
     self.format_ = format_
     self.keyspace = keyspace
     self.username = environment.get_variable("CASSANDRA_USERNAME")
     self.password = environment.get_variable("CASSANDRA_PASSWORD")
     self.host = environment.get_variable("CASSANDRA_HOST")
     self.stream_processing_time = stream_processing_time
     self.stream_output_mode = stream_output_mode
     self.stream_checkpoint_path = stream_checkpoint_path
Exemple #4
0
def test_get_variable_success(monkeypatch):
    # given
    specified_variable = "specified_variable"
    effective_value = "effective_value"
    monkeypatch.setenv(specified_variable, effective_value)
    environment.specification[specified_variable] = "spec_default_value"

    # when
    return_value = environment.get_variable(specified_variable, "anything")

    # then
    assert return_value == effective_value
Exemple #5
0
def test_get_variable_default(monkeypatch):
    # given
    default = "default_value"
    variable = "environment_variable"
    environment.specification[variable] = None
    monkeypatch.setenv(variable, "overwrite")
    monkeypatch.delenv(variable)

    # when
    return_value = environment.get_variable(variable, default)

    # then
    assert return_value == default
Exemple #6
0
def test_get_variable_from_spec_default(monkeypatch):
    # given
    specified_variable = "specified_variable"
    spec_default_value = "default_value"
    monkeypatch.setenv(specified_variable, "overwrite")
    monkeypatch.delenv(specified_variable)
    environment.specification[specified_variable] = spec_default_value

    # when
    return_value = environment.get_variable(specified_variable, "anything")

    # then
    assert return_value == spec_default_value
 def __init__(
     self,
     db_config=None,
     database=None,
     num_partitions=None,
     validation_threshold: float = DEFAULT_VALIDATION_THRESHOLD,
     debug_mode: bool = False,
 ):
     self.db_config = db_config or S3Config()
     self.database = database or environment.get_variable(
         "FEATURE_STORE_HISTORICAL_DATABASE"
     )
     self.num_partitions = num_partitions or DEFAULT_NUM_PARTITIONS
     self.validation_threshold = validation_threshold
     self.debug_mode = debug_mode
 def keyspace(self, value: str):
     value = value or environment.get_variable("CASSANDRA_KEYSPACE")
     if not value:
         raise ValueError("Config 'keyspace' cannot be empty.")
     self.__keyspace = value
 def stream_checkpoint_path(self, value: str):
     self.__stream_checkpoint_path = value or environment.get_variable(
         "STREAM_CHECKPOINT_PATH")
 def stream_processing_time(self, value: str):
     self.__stream_processing_time = value or environment.get_variable(
         "STREAM_PROCESSING_TIME")
 def test_bucket(self, s3_config):
     # expecting
     default = environment.get_variable("FEATURE_STORE_S3_BUCKET")
     assert s3_config.bucket == default
Exemple #12
0
    def test_feature_set_pipeline(self, mocked_df, spark_session,
                                  fixed_windows_output_feature_set_dataframe):
        # arrange
        table_reader_id = "a_source"
        table_reader_table = "table"
        table_reader_db = environment.get_variable(
            "FEATURE_STORE_HISTORICAL_DATABASE")
        create_temp_view(dataframe=mocked_df, name=table_reader_id)
        create_db_and_table(
            spark=spark_session,
            table_reader_id=table_reader_id,
            table_reader_db=table_reader_db,
            table_reader_table=table_reader_table,
        )
        dbconfig = Mock()
        dbconfig.get_options = Mock(
            return_value={
                "mode": "overwrite",
                "format_": "parquet",
                "path": "test_folder/historical/entity/feature_set",
            })

        # act
        test_pipeline = FeatureSetPipeline(
            source=Source(
                readers=[
                    TableReader(
                        id=table_reader_id,
                        database=table_reader_db,
                        table=table_reader_table,
                    ),
                ],
                query=f"select * from {table_reader_id} ",  # noqa
            ),
            feature_set=FeatureSet(
                name="feature_set",
                entity="entity",
                description="description",
                features=[
                    Feature(
                        name="feature1",
                        description="test",
                        transformation=SparkFunctionTransform(functions=[
                            Function(F.avg, DataType.FLOAT),
                            Function(F.stddev_pop, DataType.FLOAT),
                        ], ).with_window(
                            partition_by="id",
                            order_by=TIMESTAMP_COLUMN,
                            mode="fixed_windows",
                            window_definition=["2 minutes", "15 minutes"],
                        ),
                    ),
                    Feature(
                        name="divided_feature",
                        description="unit test",
                        dtype=DataType.FLOAT,
                        transformation=CustomTransform(
                            transformer=divide,
                            column1="feature1",
                            column2="feature2",
                        ),
                    ),
                ],
                keys=[
                    KeyFeature(
                        name="id",
                        description="The user's Main ID or device ID",
                        dtype=DataType.INTEGER,
                    )
                ],
                timestamp=TimestampFeature(),
            ),
            sink=Sink(
                writers=[HistoricalFeatureStoreWriter(db_config=dbconfig)], ),
        )
        test_pipeline.run()

        # assert
        path = dbconfig.get_options("historical/entity/feature_set").get(
            "path")
        df = spark_session.read.parquet(path).orderBy(TIMESTAMP_COLUMN)

        target_df = fixed_windows_output_feature_set_dataframe.orderBy(
            test_pipeline.feature_set.timestamp_column)

        # assert
        assert_dataframe_equality(df, target_df)

        # tear down
        shutil.rmtree("test_folder")