def test_report_generation(self): """Test report generation for a directory.""" analyzer = performance_analyzer.LibFuzzerPerformanceAnalyzer() report_logs_directory = os.path.join(self.libfuzzer_data_directory, 'report_logs') stats_rows = [] # Use default values for stats values usually provided by CF. stats_overrides = DEFAULT_STATS_PROVIDED_BY_CF.copy() for filename in sorted(os.listdir(report_logs_directory)): # Use different timestamp values for each log. stats_overrides['timestamp'] += 1 stats_rows.append( _get_stats_from_log( os.path.join(report_logs_directory, filename), stats_overrides=stats_overrides)) performance_scores, affected_runs_percents, examples = ( analyzer.analyze_stats(stats_rows)) performance_issues = analyzer.get_issues(performance_scores, affected_runs_percents, examples) performance_report = performance_analyzer.generate_report( performance_issues, 'fuzzer1', 'job1') expected_report = utils.read_data_from_file( os.path.join(self.libfuzzer_data_directory, 'expected_report.json'), eval_data=False) self.maxDiff = None # pylint: disable=invalid-name self.assertEqual( json.loads(performance_report), json.loads(expected_report))
def get(self, fuzzer_name=None, job_type=None, logs_date=None): """Handle a GET request.""" if not fuzzer_name: raise helpers.EarlyExitException('Fuzzer name cannot be empty.', 400) if not job_type: raise helpers.EarlyExitException('Job type cannot be empty.', 400) if not logs_date: raise helpers.EarlyExitException('Logs Date cannot be empty.', 400) if not access.has_access(fuzzer_name=fuzzer_name, job_type=job_type): raise helpers.AccessDeniedException() performance_features, date = _get_performance_report_data( fuzzer_name, job_type, logs_date) performance_data = performance_features.rows analyzer = performance_analyzer.LibFuzzerPerformanceAnalyzer() # It is possible to break the analysis by requesting outdated stats. try: total_time = sum(row['actual_duration'] for row in performance_data) performance_scores, affected_runs_percents, examples = ( analyzer.analyze_stats(performance_data)) except (KeyError, TypeError, ValueError) as e: logging.error('Exception during performance analysis: %s\n', str(e)) raise helpers.EarlyExitException( 'Cannot analyze performance for the requested time period.', 404) # Build performance analysis result. performance_issues = analyzer.get_issues(performance_scores, affected_runs_percents, examples) performance_report = performance_analyzer.generate_report( performance_issues, fuzzer_name, job_type) report = _get_performance_report(fuzzer_name, job_type, performance_report) result = { 'info': { 'date': str(date), 'fuzzer_name': report['fuzzer_name'], 'fuzzer_runs': performance_features.total_count, 'job_type': report['job_type'], 'table_data': _build_rows_and_columns(report), 'total_time': str(datetime.timedelta(seconds=total_time)), } } return self.render('performance-report.html', result)
def setUp(self): """Prepare test data and necessary env variables.""" test_helpers.patch_environ(self) self.data_directory = os.path.join( os.path.dirname(__file__), 'performance_analyzer_data') self.libfuzzer_data_directory = os.path.join(self.data_directory, 'libfuzzer') environment.set_value('FAIL_RETRIES', 1) self.analyzer = performance_analyzer.LibFuzzerPerformanceAnalyzer()
def get_issues(self, log_filename, stats_overrides=None): """Returns the issue for a particular log file.""" extra_stats = DEFAULT_STATS_PROVIDED_BY_CF.copy() if stats_overrides: extra_stats.update(stats_overrides) log_file_path = os.path.join(self.libfuzzer_data_directory, 'issue_logs', log_filename) stats = _get_stats_from_log(log_file_path, stats_overrides=extra_stats) analyzer = performance_analyzer.LibFuzzerPerformanceAnalyzer() performance_scores, affected_runs_percents, examples = ( analyzer.analyze_stats([stats])) return analyzer.get_issues(performance_scores, affected_runs_percents, examples)