def test_expectation_configuration_get_evaluation_parameter_dependencies_with_query_store_formatted_urns( ): ec = ExpectationConfiguration( expectation_type="expect_column_values_to_be_in_set", kwargs={ "column": "genre_id", "value_set": { "$PARAMETER": "urn:great_expectations:stores:query_store:get_pet_names" }, "result_format": "COMPLETE", }, ) # Should fully skip `nested_update` calls in method due to lacking an "expectation_suite_name" key dependencies = ec.get_evaluation_parameter_dependencies() assert dependencies == {}
def test_expectation_configuration_get_evaluation_parameter_dependencies(): # Getting evaluation parameter dependencies relies on pyparsing, but the expectation # configuration is responsible for ensuring that it only returns one copy of required metrics. # If different expectations rely on the same upstream dependency,then it is possible for duplicates # to be present nonetheless ec = ExpectationConfiguration( expectation_type="expect_column_values_to_be_between", kwargs={ "column": "norm", "min_value": { "$PARAMETER": "(-3 * urn:great_expectations:validations:profile:expect_column_stdev_to_be_between" ".result.observed_value:column=norm) + " "urn:great_expectations:validations:profile:expect_column_mean_to_be_between.result.observed_value" ":column=norm" }, "max_value": { "$PARAMETER": "(3 * urn:great_expectations:validations:profile:expect_column_stdev_to_be_between" ".result.observed_value:column=norm) + " "urn:great_expectations:validations:profile:expect_column_mean_to_be_between.result.observed_value" ":column=norm" }, }, ) dependencies = ec.get_evaluation_parameter_dependencies() dependencies["profile"][0]["metric_kwargs_id"]["column=norm"] = set( dependencies["profile"][0]["metric_kwargs_id"]["column=norm"]) assert { "profile": [{ "metric_kwargs_id": { "column=norm": { "expect_column_stdev_to_be_between.result.observed_value", "expect_column_mean_to_be_between.result.observed_value", } } }] } == dependencies