def test_resolve_metric_bundle_with_nonexistent_metric(): df = pd.DataFrame({"a": [1, 2, 3, None]}) # Building engine and configurations in attempt to resolve metrics engine = PandasExecutionEngine(batch_data_dict={"made_up_id": df}) mean = MetricConfiguration( metric_name="column.i_don't_exist", metric_domain_kwargs={"column": "a"}, metric_value_kwargs=dict(), ) stdev = MetricConfiguration( metric_name="column.nonexistent", metric_domain_kwargs={"column": "a"}, metric_value_kwargs=dict(), ) desired_metrics = (mean, stdev) with pytest.raises(MetricProviderError) as e: metrics = engine.resolve_metrics(metrics_to_resolve=desired_metrics)
def test_resolve_metric_bundle(): df = pd.DataFrame({"a": [1, 2, 3, None]}) # Building engine and configurations in attempt to resolve metrics engine = PandasExecutionEngine(batch_data_dict={"made-up-id": df}) metrics: dict = {} table_columns_metric: MetricConfiguration results: dict table_columns_metric, results = get_table_columns_metric(engine=engine) metrics.update(results) mean = MetricConfiguration( metric_name="column.mean", metric_domain_kwargs={"column": "a"}, metric_value_kwargs=dict(), metric_dependencies={ "table.columns": table_columns_metric, }, ) stdev = MetricConfiguration( metric_name="column.standard_deviation", metric_domain_kwargs={"column": "a"}, metric_value_kwargs=dict(), metric_dependencies={ "table.columns": table_columns_metric, }, ) desired_metrics = (mean, stdev) results = engine.resolve_metrics(metrics_to_resolve=desired_metrics, metrics=metrics) metrics.update(results) # Ensuring metrics have been properly resolved assert (metrics[("column.mean", "column=a", ())] == 2.0), "mean metric not properly computed" assert metrics[("column.standard_deviation", "column=a", ())] == 1.0, ("standard deviation " "metric not properly computed")