def test_benchmark_summary(): experiment_df = create_experiment_data() benchmark_df = experiment_df[experiment_df.benchmark == 'libxml'] snapshot_df = data_utils.get_benchmark_snapshot(benchmark_df) summary = data_utils.benchmark_summary(snapshot_df) expected_summary = pd.DataFrame({ 'fuzzer': ['afl', 'libfuzzer'], 'time': [9, 9], 'count': [2, 2], 'min': [1000, 600], 'median': [1100, 700], 'max': [1200, 800] }).set_index(['fuzzer', 'time']).astype(float) assert summary[['count', 'min', 'median', 'max']].equals(expected_summary)
def summary_table(self): """Statistical summary table.""" return data_utils.benchmark_summary(self._benchmark_snapshot_df)
def bug_summary_table(self): """Statistical summary table.""" return data_utils.benchmark_summary(self._benchmark_snapshot_df, key='bugs_covered')