def aggregate_flaky_tests(day=None, max_flaky_tests=200): if day is None: day = datetime.utcnow().date() - timedelta(days=1) try: projects = Project.query.all() for project in projects: tests = get_flaky_tests(day, day + timedelta(days=1), [project], max_flaky_tests) for test in tests: _log_metrics( "flaky_test_reruns", flaky_test_reruns_name=test['name'], flaky_test_reruns_project_id=test['project_id'], flaky_test_reruns_flaky_runs=test['flaky_runs'], flaky_test_reruns_passing_runs=test['passing_runs'], ) try_create(FlakyTestStat, { 'name': test['name'], 'project_id': test['project_id'], 'date': day, 'last_flaky_run_id': test['id'], 'flaky_runs': test['flaky_runs'], 'double_reruns': test['double_reruns'], 'passing_runs': test['passing_runs'], }) # Potentially hundreds of commits per project may be a bit excessive, # but the metric posting can potentially take seconds, meaning this could be # a very long-running transaction otherwise. db.session.commit() db.session.commit() except Exception as err: logging.exception(unicode(err))
def aggregate_flaky_tests(day=None, max_flaky_tests=200): if day is None: day = datetime.utcnow().date() - timedelta(days=1) try: projects = Project.query.all() for project in projects: tests = get_flaky_tests(day, day + timedelta(days=1), [project], max_flaky_tests) for test in tests: first_run = ( db.session.query(TestCase.date_created) .filter(TestCase.project_id == test["project_id"], TestCase.name_sha == test["hash"]) .order_by(TestCase.date_created) .limit(1) .scalar() ) log_metrics( "flaky_test_reruns", flaky_test_reruns_name=test["name"], flaky_test_reruns_project_id=test["project_id"], flaky_test_reruns_flaky_runs=test["flaky_runs"], flaky_test_reruns_passing_runs=test["passing_runs"], ) try_create( FlakyTestStat, { "name": test["name"], "project_id": test["project_id"], "date": day, "last_flaky_run_id": test["id"], "flaky_runs": test["flaky_runs"], "double_reruns": test["double_reruns"], "passing_runs": test["passing_runs"], "first_run": first_run, }, ) db.session.commit() except Exception as err: logging.exception(unicode(err))
def aggregate_flaky_tests(day=None, max_flaky_tests=200): if day is None: day = datetime.utcnow().date() - timedelta(days=1) try: projects = Project.query.all() for project in projects: tests = get_flaky_tests(day, day + timedelta(days=1), [project], max_flaky_tests) for test in tests: first_run = db.session.query( TestCase.date_created ).filter( TestCase.project_id == test['project_id'], TestCase.name_sha == test['hash'] ).order_by( TestCase.date_created ).limit(1).scalar() log_metrics( "flaky_test_reruns", flaky_test_reruns_name=test['name'], flaky_test_reruns_project_id=test['project_id'], flaky_test_reruns_flaky_runs=test['flaky_runs'], flaky_test_reruns_passing_runs=test['passing_runs'], ) try_create(FlakyTestStat, { 'name': test['name'], 'project_id': test['project_id'], 'date': day, 'last_flaky_run_id': test['id'], 'flaky_runs': test['flaky_runs'], 'double_reruns': test['double_reruns'], 'passing_runs': test['passing_runs'], 'first_run': first_run }) db.session.commit() except Exception as err: logging.exception(unicode(err))
def aggregate_flaky_tests(day=None, max_flaky_tests=200): if day is None: day = datetime.utcnow().date() - timedelta(days=1) try: projects = Project.query.all() for project in projects: tests = get_flaky_tests(day, day + timedelta(days=1), [project], max_flaky_tests) for test in tests: first_run = db.session.query(TestCase.date_created).filter( TestCase.project_id == test['project_id'], TestCase.name_sha == test['hash']).order_by( TestCase.date_created).limit(1).scalar() log_metrics( "flaky_test_reruns", flaky_test_reruns_name=test['name'], flaky_test_reruns_project_id=test['project_id'], flaky_test_reruns_flaky_runs=test['flaky_runs'], flaky_test_reruns_passing_runs=test['passing_runs'], ) try_create( FlakyTestStat, { 'name': test['name'], 'project_id': test['project_id'], 'date': day, 'last_flaky_run_id': test['id'], 'flaky_runs': test['flaky_runs'], 'double_reruns': test['double_reruns'], 'passing_runs': test['passing_runs'], 'first_run': first_run }) db.session.commit() except Exception as err: logging.exception(unicode(err))
def generate(self, days=7): end_period = datetime.utcnow() days_delta = timedelta(days=days) start_period = end_period - days_delta # if we're pulling data for a select number of days let's use the # previous week as the previous period if days < 7: previous_end_period = end_period - timedelta(days=7) else: previous_end_period = start_period previous_start_period = previous_end_period - days_delta current_results = self.get_project_stats( start_period, end_period) previous_results = self.get_project_stats( previous_start_period, previous_end_period) for project, stats in current_results.items(): # exclude projects that had no builds in this period if not stats['total_builds']: del current_results[project] continue previous_stats = previous_results.get(project) if not previous_stats: green_change = None duration_change = None elif stats['green_percent'] is None: green_change = None duration_change = None elif previous_stats['green_percent'] is None: green_change = None duration_change = None else: green_change = stats['green_percent'] - previous_stats['green_percent'] duration_change = stats['avg_duration'] - previous_stats['avg_duration'] if not previous_stats: total_change = None elif previous_stats['total_builds'] is None: total_change = None else: total_change = stats['total_builds'] - previous_stats['total_builds'] stats['avg_duration'] = stats['avg_duration'] stats['total_change'] = total_change stats['percent_change'] = green_change stats['duration_change'] = duration_change project_stats = sorted( current_results.items(), key=lambda x: ( -(x[1]['total_builds'] or 0), abs(x[1]['green_percent'] or 0), x[0].name, )) current_failure_stats = self.get_failure_stats( start_period, end_period) previous_failure_stats = self.get_failure_stats( previous_start_period, previous_end_period) failure_stats = [] for stat_name, current_stat_value in current_failure_stats['reasons'].iteritems(): previous_stat_value = previous_failure_stats['reasons'].get(stat_name, 0) failure_stats.append({ 'name': stat_name, 'current': { 'value': current_stat_value, 'percent': percent(current_stat_value, current_failure_stats['total']) }, 'previous': { 'value': previous_stat_value, 'percent': percent(previous_stat_value, previous_failure_stats['total']) }, }) flaky_tests = get_flaky_tests( start_period, end_period, self.projects, MAX_FLAKY_TESTS) slow_tests = self.get_slow_tests(start_period, end_period) title = 'Build Report ({0} through {1})'.format( start_period.strftime('%b %d, %Y'), end_period.strftime('%b %d, %Y'), ) if len(self.projects) == 1: title = '[%s] %s' % (iter(self.projects).next().name, title) return { 'title': title, 'period': [start_period, end_period], 'failure_stats': failure_stats, 'project_stats': project_stats, 'tests': { 'flaky_list': flaky_tests, 'slow_list': slow_tests, }, }
def generate(self, days=7): end_period = datetime.utcnow() days_delta = timedelta(days=days) start_period = end_period - days_delta # if we're pulling data for a select number of days let's use the # previous week as the previous period if days < 7: previous_end_period = end_period - timedelta(days=7) else: previous_end_period = start_period previous_start_period = previous_end_period - days_delta current_results = self.get_project_stats(start_period, end_period) previous_results = self.get_project_stats(previous_start_period, previous_end_period) for project, stats in current_results.items(): # exclude projects that had no builds in this period if not stats['total_builds']: del current_results[project] continue previous_stats = previous_results.get(project) if not previous_stats: green_change = None duration_change = None elif stats['green_percent'] is None: green_change = None duration_change = None elif previous_stats['green_percent'] is None: green_change = None duration_change = None else: green_change = stats['green_percent'] - previous_stats[ 'green_percent'] duration_change = stats['avg_duration'] - previous_stats[ 'avg_duration'] if not previous_stats: total_change = None elif previous_stats['total_builds'] is None: total_change = None else: total_change = stats['total_builds'] - previous_stats[ 'total_builds'] stats['avg_duration'] = stats['avg_duration'] stats['total_change'] = total_change stats['percent_change'] = green_change stats['duration_change'] = duration_change project_stats = sorted(current_results.items(), key=lambda x: ( -(x[1]['total_builds'] or 0), abs(x[1]['green_percent'] or 0), x[0].name, )) current_failure_stats = self.get_failure_stats(start_period, end_period) previous_failure_stats = self.get_failure_stats( previous_start_period, previous_end_period) failure_stats = [] for stat_name, current_stat_value in current_failure_stats[ 'reasons'].iteritems(): previous_stat_value = previous_failure_stats['reasons'].get( stat_name, 0) failure_stats.append({ 'name': stat_name, 'current': { 'value': current_stat_value, 'percent': percent(current_stat_value, current_failure_stats['total']) }, 'previous': { 'value': previous_stat_value, 'percent': percent(previous_stat_value, previous_failure_stats['total']) }, }) flaky_tests = get_flaky_tests(start_period, end_period, self.projects, MAX_FLAKY_TESTS) slow_tests = self.get_slow_tests(start_period, end_period) title = 'Build Report ({0} through {1})'.format( start_period.strftime('%b %d, %Y'), end_period.strftime('%b %d, %Y'), ) if len(self.projects) == 1: title = '[%s] %s' % (iter(self.projects).next().name, title) return { 'title': title, 'period': [start_period, end_period], 'failure_stats': failure_stats, 'project_stats': project_stats, 'tests': { 'flaky_list': flaky_tests, 'slow_list': slow_tests, }, }