def test_rank(metric): real_data, synthetic_data, metadata = load_timeseries_demo() real_score = metric.compute(real_data, real_data, metadata) synthetic_score = metric.compute(real_data, synthetic_data, metadata) assert metric.min_value <= synthetic_score <= real_score <= metric.max_value
def test_rank(metric): real_data, synthetic_data, metadata = load_timeseries_demo() real_score = metric.compute(real_data, real_data, metadata) synthetic_score = metric.compute(real_data, synthetic_data, metadata) normalized_real_score = metric.normalize(real_score) normalized_synthetic_score = metric.normalize(synthetic_score) assert metric.min_value <= synthetic_score <= real_score <= metric.max_value assert 0.0 <= normalized_synthetic_score <= normalized_real_score <= 1.0
def test_compute_all(): real_data, synthetic_data, metadata = load_timeseries_demo() output = compute_metrics( TimeSeriesMetric.get_subclasses(), real_data, synthetic_data, metadata=metadata ) assert not pd.isnull(output.score.mean()) scores = output[output.score.notnull()] assert scores.score.between(scores.min_value, scores.max_value).all()