def test_no_topic(self): """Test when no topic is specified.""" self.mock.ProjectConfig.return_value = mock_config.MockConfig({}) issue_filer.notify_issue_update(self.testcase, 'new') messages = self.pubsub_client.pull_from_subscription( self.subscription, max_messages=16) self.assertEqual(0, len(messages))
def test_basic(self): """Basic test.""" issue_filer.notify_issue_update(self.testcase, 'new') messages = self.pubsub_client.pull_from_subscription( self.subscription, max_messages=16) self.assertEqual(1, len(messages)) self.assertDictEqual({ 'crash_address': '0xffff', 'crash_state': 'CRASH STATE', 'crash_type': 'CRASH TYPE', 'issue_id': '123', 'security': 'true', 'status': 'new', 'testcase_id': '1' }, messages[0].attributes)
def mark_issue_as_closed_if_testcase_is_fixed(policy, testcase, issue): """Mark an issue as fixed if all of its associated reproducible testcase are fixed.""" verified_label = policy.label('verified') if not verified_label: return # If there is no associated issue, then bail out. if not issue or not testcase.bug_information: return # If the issue is closed in a status other than Fixed, like Duplicate, WontFix # or Archived, we shouldn't change it. Bail out. if not issue.is_open and issue.status != policy.status('fixed'): return # Check testcase status, so as to skip unreproducible uploads. if testcase.status not in ['Processed', 'Duplicate']: return # If the testcase is still open, no work needs to be done. Bail out. if testcase.open: return # FIXME: Find a better solution to skip over reproducible tests that are now # showing up a flaky (esp when we are unable to reproduce crash in original # crash revision). if testcase.fixed == 'NA': return # We can only verify fixed issues for reproducible testcases. If the testcase # is unreproducible, bail out. Exception is if we explicitly marked this as # fixed. if testcase.one_time_crasher_flag and testcase.fixed != 'Yes': return # Make sure that no other testcases associated with this issue are open. similar_testcase = data_types.Testcase.query( data_types.Testcase.bug_information == testcase.bug_information, ndb_utils.is_true(data_types.Testcase.open), ndb_utils.is_false(data_types.Testcase.one_time_crasher_flag)).get() if similar_testcase: return # As a last check, do the expensive call of actually checking all issue # comments to make sure we didn't do the verification already and we didn't # get called out on issue mistriage. if (issue_tracker_utils.was_label_added(issue, verified_label) or issue_tracker_utils.was_label_added(issue, policy.label('wrong'))): return issue.labels.add(verified_label) comment = 'ClusterFuzz testcase %d is verified as fixed' % testcase.key.id( ) fixed_range_url = data_handler.get_fixed_range_url(testcase) if fixed_range_url: comment += ' in ' + fixed_range_url else: comment += '.' if utils.is_oss_fuzz(): comment += OSS_FUZZ_INCORRECT_COMMENT else: comment = _append_generic_incorrect_comment(comment, policy, issue, ' and re-open the issue.') skip_auto_close = data_handler.get_value_from_job_definition( testcase.job_type, 'SKIP_AUTO_CLOSE_ISSUE') if not skip_auto_close: issue.status = policy.status('verified') issue.save(new_comment=comment, notify=True) logs.log('Mark issue %d as verified for fixed testcase %d.' % (issue.id, testcase.key.id())) issue_filer.notify_issue_update(testcase, 'verified')
def get(self): """Handle a get request.""" try: grouper.group_testcases() except: logs.log_error('Error occurred while grouping test cases.') return # Free up memory after group task run. utils.python_gc() # Get a list of jobs excluded from bug filing. excluded_jobs = _get_excluded_jobs() # Get a list of all jobs. This is used to filter testcases whose jobs have # been removed. all_jobs = data_handler.get_all_job_type_names() for testcase_id in data_handler.get_open_testcase_id_iterator(): try: testcase = data_handler.get_testcase_by_id(testcase_id) except errors.InvalidTestcaseError: # Already deleted. continue # Skip if testcase's job is removed. if testcase.job_type not in all_jobs: continue # Skip if testcase's job is in exclusions list. if testcase.job_type in excluded_jobs: continue # Skip if we are running progression task at this time. if testcase.get_metadata('progression_pending'): continue # If the testcase has a bug filed already, no triage is needed. if _is_bug_filed(testcase): continue # Check if the crash is important, i.e. it is either a reproducible crash # or an unreproducible crash happening frequently. if not _is_crash_important(testcase): continue # Require that all tasks like minimizaton, regression testing, etc have # finished. if not data_handler.critical_tasks_completed(testcase): continue # For testcases that are not part of a group, wait an additional time till # group task completes. # FIXME: In future, grouping might be dependent on regression range, so we # would have to add an additional wait time. if not testcase.group_id and not dates.time_has_expired( testcase.timestamp, hours=data_types.MIN_ELAPSED_TIME_SINCE_REPORT): continue # If this project does not have an associated issue tracker, we cannot # file this crash anywhere. issue_tracker = issue_tracker_utils.get_issue_tracker_for_testcase( testcase) if not issue_tracker: issue_filer.notify_issue_update(testcase, 'new') continue # If there are similar issues to this test case already filed or recently # closed, skip filing a duplicate bug. if _check_and_update_similar_bug(testcase, issue_tracker): continue # Clean up old triage messages that would be not applicable now. testcase.delete_metadata(TRIAGE_MESSAGE_KEY, update_testcase=False) # File the bug first and then create filed bug metadata. try: issue_filer.file_issue(testcase, issue_tracker) except Exception: logs.log_error('Failed to file issue for testcase %d.' % testcase_id) continue _create_filed_bug_metadata(testcase) issue_filer.notify_issue_update(testcase, 'new') logs.log('Filed new issue %s for testcase %d.' % (testcase.bug_information, testcase_id))