def get_perf_failures(push): perf_groups = JobGroup.objects.filter(name__contains='performance') perf_failures = Job.objects.filter( push=push, tier__lte=2, result='testfailed', job_group__in=perf_groups ).select_related('machine_platform', 'taskcluster_metadata') return [job_to_dict(job) for job in perf_failures]
def get_build_failures(push): build_failures = Job.objects.filter( push=push, tier__lte=2, result='busted', ) return [job_to_dict(job) for job in build_failures]
def get_lint_failures(push): lint_failures = Job.objects.filter( push=push, tier__lte=2, result='testfailed', machine_platform__platform='lint').select_related('machine_platform') return [job_to_dict(job) for job in lint_failures]
def get_perf_failures(push): perf_groups = JobGroup.objects.filter(name__contains='performance') perf_failures = Job.objects.filter( push=push, tier__lte=2, result='testfailed', job_group__in=perf_groups ) return [job_to_dict(job) for job in perf_failures]
def get_push_failures(push, option_map): # Using .distinct(<fields>) here would help by removing duplicate FailureLines # for the same job (with different sub-tests), but it's only supported by # postgres. Just using .distinct() has no effect. new_failure_lines = FailureLine.objects.filter( action='test_result', job_log__job__push=push, job_log__job__result='testfailed', job_log__job__tier=1).exclude(test=None).select_related( 'job_log__job__job_type', 'job_log__job__machine_platform') # using a dict here to avoid duplicates due to multiple failure_lines for # each job. tests = {} for failure_line in new_failure_lines: test_name = clean_test(failure_line.test) if not test_name: continue job = failure_line.job_log.job config = clean_config(option_map[job.option_collection_hash]) platform = clean_platform(job.machine_platform.platform) jobName = job.job_type.name jobSymbol = job.job_type.symbol test_key = '{}{}{}{}'.format(test_name, config, platform, jobName) if test_key not in tests: line = { 'testName': test_name, 'jobName': jobName, 'jobSymbol': jobSymbol, 'platform': platform, 'config': config, 'key': test_key, 'failJobs': [], 'passJobs': [], 'logLines': [], 'suggestedClassification': 'New Failure', 'confidence': 0, } tests[test_key] = line # This ``test`` was either just added above, or already existed in the ``tests`` # list in a previous iteration through ``failure_lines`` test = tests[test_key] test['logLines'].append(failure_line.to_mozlog_format()) if not next( (find_job for find_job in test['failJobs'] if find_job['id'] == job.id), False): test['failJobs'].append(job_to_dict(job)) # Each line of the sorted list that is returned here represents one test file per platform/ # config. Each line will have at least one failing job, but may have several # passing/failing jobs associated with it. return sorted(tests.values(), key=lambda k: k['testName'])
def get_build_failures(push, parent_push=None): build_failures = Job.objects.filter( push=push, tier__lte=2, result='busted', ).select_related('machine_platform', 'taskcluster_metadata') failures = [job_to_dict(job) for job in build_failures] if parent_push: mark_failed_in_parent(failures, get_build_failures(parent_push)) return failures
def get_lint_failures(push, parent_push=None): lint_failures = Job.objects.filter( Q(machine_platform__platform='lint') | Q(job_type__symbol='mozlint'), push=push, tier__lte=2, result='testfailed', ).select_related('machine_platform', 'taskcluster_metadata') failures = [job_to_dict(job) for job in lint_failures] if parent_push: mark_failed_in_parent(failures, get_lint_failures(parent_push)) return failures
def get_push_failures(push, option_map): # Using .distinct(<fields>) here would help by removing duplicate FailureLines # for the same job (with different sub-tests), but it's only supported by # postgres. Just using .distinct() has no effect. new_failure_lines = FailureLine.objects.filter( action='test_result', job_log__job__push=push, job_log__job__result='testfailed', job_log__job__tier=1 ).exclude( test=None ).select_related( 'job_log__job__job_type', 'job_log__job__machine_platform' ) # using a dict here to avoid duplicates due to multiple failure_lines for # each job. tests = {} all_failed_jobs = {} for failure_line in new_failure_lines: test_name = clean_test(failure_line.test) if not test_name: continue job = failure_line.job_log.job config = clean_config(option_map[job.option_collection_hash]) platform = clean_platform(job.machine_platform.platform) job_name = job.job_type.name job_symbol = job.job_type.symbol job.job_key = '{}{}{}'.format(config, platform, job_name) all_failed_jobs[job.id] = job test_key = '{}{}{}{}'.format(test_name, config, platform, job_name) if test_key not in tests: line = { 'testName': test_name, 'jobName': job_name, 'jobSymbol': job_symbol, 'platform': platform, 'config': config, 'key': test_key, 'jobKey': job.job_key, 'failJobs': [], 'passJobs': [], 'passInFailedJobs': [], # This test passed in a job that failed for another test 'logLines': [], 'suggestedClassification': 'New Failure', 'confidence': 0, } tests[test_key] = line # This ``test`` was either just added above, or already existed in the ``tests`` # list in a previous iteration through ``failure_lines`` test = tests[test_key] test['logLines'].append(failure_line.to_mozlog_format()) if not has_job(job, test['failJobs']): test['failJobs'].append(job_to_dict(job)) # Check each test to find jobs where it passed, even if the job itself failed due to another test for test in tests.values(): for failed_job in all_failed_jobs.values(): if not has_job(failed_job, test['failJobs']) and test['jobKey'] == failed_job.job_key: test['passInFailedJobs'].append(job_to_dict(failed_job)) # Each line of the sorted list that is returned here represents one test file per platform/ # config. Each line will have at least one failing job, but may have several # passing/failing jobs associated with it. return sorted(tests.values(), key=lambda k: k['testName'])
def get_current_test_failures(push, option_map): all_testfailed = Job.objects.filter( push=push, tier__lte=2, result='testfailed', ).exclude(machine_platform__platform='lint') # Using .distinct(<fields>) here would help by removing duplicate FailureLines # for the same job (with different sub-tests), but it's only supported by # postgres. Just using .distinct() has no effect. new_failure_lines = FailureLine.objects.filter( action__in=['test_result', 'log', 'crash'], job_log__job__push=push, job_log__job__result='testfailed', job_log__job__tier__lte=2).select_related( 'job_log__job__job_type', 'job_log__job__job_group', 'job_log__job__machine_platform') # using a dict here to avoid duplicates due to multiple failure_lines for # each job. tests = {} all_failed_jobs = {} for failure_line in new_failure_lines: test_name = clean_test(failure_line.action, failure_line.test, failure_line.signature, failure_line.message) if not test_name: continue job = failure_line.job_log.job config = clean_config(option_map[job.option_collection_hash]) platform = clean_platform(job.machine_platform.platform) job_name = job.job_type.name job_symbol = job.job_type.symbol job_group = job.job_group.name job_group_symbol = job.job_group.symbol job.job_key = '{}{}{}{}'.format(config, platform, job_name, job_group) all_failed_jobs[job.id] = job test_key = re.sub( r'\W+', '', '{}{}{}{}{}'.format(test_name, config, platform, job_name, job_group)) if test_key not in tests: line = { 'testName': test_name, 'action': failure_line.action.split('_')[0], 'jobName': job_name, 'jobSymbol': job_symbol, 'jobGroup': job_group, 'jobGroupSymbol': job_group_symbol, 'platform': platform, 'config': config, 'key': test_key, 'jobKey': job.job_key, 'inProgressJobs': [], 'failJobs': [], 'passJobs': [], 'passInFailedJobs': [], # This test passed in a job that failed for another test 'logLines': [], 'suggestedClassification': 'New Failure', 'confidence': 0, 'tier': job.tier, } tests[test_key] = line # This ``test`` was either just added above, or already existed in the ``tests`` # list in a previous iteration through ``failure_lines`` test = tests[test_key] if not has_line(failure_line, test['logLines']): test['logLines'].append(failure_line.to_mozlog_format()) if not has_job(job, test['failJobs']): test['failJobs'].append(job_to_dict(job)) # Check each test to find jobs where it passed, even if the job itself failed due to another test for test in tests.values(): for failed_job in all_failed_jobs.values(): if not has_job( failed_job, test['failJobs']) and test['jobKey'] == failed_job.job_key: test['passInFailedJobs'].append(job_to_dict(failed_job)) # filter out testfailed jobs that are supported by failureline to get unsupported jobs supported_job_ids = all_failed_jobs.keys() unsupported_jobs = [ job_to_dict(job) for job in all_testfailed if job.id not in supported_job_ids ] # Each line of the sorted list that is returned here represents one test file per platform/ # config. Each line will have at least one failing job, but may have several # passing/failing jobs associated with it. return (sorted(tests.values(), key=lambda k: k['testName']), unsupported_jobs)