def test_get_test_failures_with_parent( failure_classifications, test_repository, test_job, mock_log_parser, text_log_error_lines ): test_job.result = 'testfailed' test_job.save() parent_push = Push.objects.create( revision='abcdef77949168d16c03a4cba167678b7ab65f76', repository=test_repository, author='*****@*****.**', time=datetime.datetime.now(), ) parent_job = Job.objects.first() parent_job.pk = None parent_job.push = parent_push parent_job.guid = 'wazzon chokey?' parent_job.save() TaskclusterMetadata.objects.create(job=parent_job, task_id='V3SVuxO8TFy37En_6HcXLs', retry_id=0) create_lines(parent_job, [(test_line, {})]) jobs = get_test_failure_jobs(test_job.push) build_failures = get_test_failures(test_job.push, jobs, parent_push) need_investigation = build_failures['needInvestigation'] assert len(need_investigation) == 1 assert len(jobs[need_investigation[0]['jobName']]) == 1 assert need_investigation[0]['failedInParent']
def test_get_test_failures(failure_classifications, test_repository, test_job, text_log_error_lines, eleven_job_blobs): for job in eleven_job_blobs: job['result'] = 'success' job['taskcluster_metadata__task_id'] = 'V3SVuxO8TFy37En_6HcXLs' job['taskcluster_metadata__retry_id'] = '0' job['job_type__name'] = job['job']['name'] job['job_type__symbol'] = job['job']['job_symbol'] job['machine_platform__platform'] = job['job']['machine_platform'][ 'platform'] job['job_group__name'] = None job['job_group__symbol'] = job['job']['group_symbol'] job['start_time'] = job['job']['start_timestamp'] job['option_collection_hash'] = '32faaecac742100f7753f0c1d0aa0add01b4046b' eleven_job_blobs[0]['result'] = 'testfailed' test_job.result = 'testfailed' test_job.option_collection_hash = '32faaecac742100f7753f0c1d0aa0add01b4046b' test_job.save() result_status, jobs = get_test_failure_jobs(test_job.push, eleven_job_blobs) result, test_failures = get_test_failures(test_job.push, jobs, [test_job.job_type.name], result_status) need_investigation = test_failures['needInvestigation']['tests'] assert result == 'fail' assert len(need_investigation) == 1 assert len(jobs[list(need_investigation)[0]['jobName']]) == 1
def health_summary(self, request, project): """ Return a calculated summary of the health of this push. """ revision = request.query_params.get('revision') try: push = Push.objects.get(revision=revision, repository__name=project) except Push.DoesNotExist: return Response( "No push with revision: {0}".format(revision), status=HTTP_404_NOT_FOUND ) jobs = get_test_failure_jobs(push) push_health_test_failures = get_test_failures(push, jobs) push_health_lint_failures = get_lint_failures(push) push_health_build_failures = get_build_failures(push) test_failure_count = len(push_health_test_failures['needInvestigation']) build_failure_count = len(push_health_build_failures) lint_failure_count = len(push_health_lint_failures) return Response( { 'testFailureCount': test_failure_count, 'buildFailureCount': build_failure_count, 'lintFailureCount': lint_failure_count, 'needInvestigation': test_failure_count + build_failure_count + lint_failure_count, } )
def test_get_test_failures_no_parent( failure_classifications, test_repository, test_job, text_log_error_lines ): test_job.result = 'testfailed' test_job.save() jobs = get_test_failure_jobs(test_job.push) build_failures = get_test_failures(test_job.push, jobs) need_investigation = build_failures['needInvestigation'] assert len(need_investigation) == 1 assert len(jobs[need_investigation[0]['jobName']]) == 1 assert not need_investigation[0]['failedInParent']
def test_get_test_failures(failure_classifications, test_repository, test_job, text_log_error_lines): test_job.result = 'testfailed' test_job.save() result_status, jobs = get_test_failure_jobs(test_job.push) result, build_failures = get_test_failures(test_job.push, jobs, result_status) need_investigation = build_failures['needInvestigation'] assert result == 'fail' assert len(need_investigation) == 1 assert len(jobs[need_investigation[0]['jobName']]) == 1
def health(self, request, project): """ Return a calculated assessment of the health of this push. """ revision = request.query_params.get('revision') try: repository = Repository.objects.get(name=project) push = Push.objects.get(revision=revision, repository=repository) except Push.DoesNotExist: return Response( "No push with revision: {0}".format(revision), status=HTTP_404_NOT_FOUND ) commit_history_details = None result_status, jobs = get_test_failure_jobs(push) # Parent compare only supported for Hg at this time. # Bug https://bugzilla.mozilla.org/show_bug.cgi?id=1612645 if repository.dvcs_type == 'hg': commit_history_details = get_commit_history(repository, revision, push) test_result, push_health_test_failures = get_test_failures( push, jobs, result_status, ) build_result, build_failures, _unused = get_build_failures(push) lint_result, lint_failures, _unused = get_lint_failures(push) push_result = 'pass' for metric_result in [test_result, lint_result, build_result]: if ( metric_result == 'indeterminate' or metric_result == 'unknown' and push_result != 'fail' ): push_result = metric_result elif metric_result == 'fail': push_result = metric_result newrelic.agent.record_custom_event( 'push_health_need_investigation', { 'revision': revision, 'repo': repository.name, 'needInvestigation': len(push_health_test_failures['needInvestigation']), 'author': push.author, }, ) return Response( { 'revision': revision, 'id': push.id, 'result': push_result, 'jobs': jobs, 'metrics': { 'commitHistory': { 'name': 'Commit History', 'result': 'none', 'details': commit_history_details, }, 'linting': { 'name': 'Linting', 'result': lint_result, 'details': lint_failures, }, 'tests': { 'name': 'Tests', 'result': test_result, 'details': push_health_test_failures, }, 'builds': { 'name': 'Builds', 'result': build_result, 'details': build_failures, }, }, 'status': push.get_status(), } )
def health_summary(self, request, project): """ Return a calculated summary of the health of this push. """ revision = request.query_params.get('revision') author = request.query_params.get('author') count = request.query_params.get('count') all_repos = request.query_params.get('all_repos') with_history = request.query_params.get('with_history') with_in_progress_tests = request.query_params.get('with_in_progress_tests', False) if revision: try: pushes = Push.objects.filter( revision__in=revision.split(','), repository__name=project ) except Push.DoesNotExist: return Response( "No push with revision: {0}".format(revision), status=HTTP_404_NOT_FOUND ) else: try: pushes = ( Push.objects.filter(author=author) .select_related('repository') .prefetch_related('commits') .order_by('-time') ) if not all_repos: pushes = pushes.filter(repository__name=project) pushes = pushes[: int(count)] except Push.DoesNotExist: return Response( "No pushes found for author: {0}".format(author), status=HTTP_404_NOT_FOUND ) data = [] commit_history = None for push in list(pushes): result_status, jobs = get_test_failure_jobs(push) test_result, push_health_test_failures = get_test_failures( push, jobs, result_status, ) build_result, push_health_build_failures, builds_in_progress_count = get_build_failures( push ) lint_result, push_health_lint_failures, linting_in_progress_count = get_lint_failures( push ) test_failure_count = len(push_health_test_failures['needInvestigation']) build_failure_count = len(push_health_build_failures) lint_failure_count = len(push_health_lint_failures) test_in_progress_count = None if with_history: serializer = PushSerializer([push], many=True) commit_history = serializer.data if with_in_progress_tests: test_in_progress_count = get_test_in_progress_count(push) data.append( { 'revision': push.revision, 'repository': push.repository.name, 'testFailureCount': test_failure_count, 'testInProgressCount': test_in_progress_count, 'buildFailureCount': build_failure_count, 'buildInProgressCount': builds_in_progress_count, 'lintFailureCount': lint_failure_count, 'lintingInProgressCount': linting_in_progress_count, 'needInvestigation': test_failure_count + build_failure_count + lint_failure_count, 'status': push.get_status(), 'history': commit_history, 'metrics': { 'linting': { 'name': 'Linting', 'result': lint_result, }, 'tests': { 'name': 'Tests', 'result': test_result, }, 'builds': { 'name': 'Builds', 'result': build_result, }, }, } ) return Response(data)
def _get_failure_data(self, mozciPush, push): # We need to retrieve both groups and labels because some tasks do not have groups, e.g. builds, talos, awsy # (so we retrieve the labels) and some tasks do not have consistent labels because the labels are defined at # runtime based on which tests need to run, e.g. mochitest, xpcshell (so we need their groups, which we then # query the Group table with to retrieve the equivalent label names). likely_regression_labels = [] likely_regression_groups = [] try: likely_regression_labels = list( mozciPush.get_likely_regressions('label')) except Exception as e: logger.error( 'Problem retrieving mozci labels for push {} : {}'.format( push, e)) try: likely_regression_groups = list( mozciPush.get_likely_regressions('group')) except Exception as e: logger.error( 'Problem retrieving mozci groups for push {} : {}'.format( push, e)) all_jobs = list( Job.objects.filter( push=push, tier__lte=2, ).select_related('machine_platform', 'taskcluster_metadata', 'job_type', 'job_group').values( 'id', 'option_collection_hash', 'job_type_id', 'job_group_id', 'result', 'state', 'failure_classification_id', 'push_id', 'start_time', 'tier', 'guid', 'result', 'job_type__name', 'job_type__symbol', 'job_group__name', 'job_group__symbol', 'machine_platform__platform', 'taskcluster_metadata__task_id', 'taskcluster_metadata__retry_id', )) result_status, jobs = get_test_failure_jobs(push, all_jobs) failed_jobs = list(jobs.keys()) job_ids = [value[0].get('id') for key, value in jobs.items()] group_regression_labels = (Group.objects.filter( name__in=likely_regression_groups, job_logs__job_id__in=job_ids).select_related( 'job', 'job_type').values_list('job_logs__job__job_type__name', flat=True)) likely_regression_labels.extend(list(group_regression_labels)) tests = get_test_failures(push, jobs, likely_regression_labels, result_status) likely_build_regression_labels = [ label for label in likely_regression_labels if label not in failed_jobs ] builds = get_build_failures(push, likely_build_regression_labels, all_jobs) lints = get_lint_failures(push, all_jobs) status = push.get_status() # Override the testfailed value added in push.get_status so that it aligns with how we detect lint, build and test failures # for the push health API's (total_failures doesn't include known intermittent failures) total_failures = len(tests[1]['needInvestigation']) + len( builds[1]) + len(lints[1]) status['testfailed'] = total_failures return [tests, builds, lints, status, total_failures, jobs]