def get_revisions_list(bucket_path, testcase=None): """Returns a sorted ascending list of revisions from a bucket path, excluding bad build revisions and testcase crash revision (if any).""" revision_pattern = revisions.revision_pattern_from_build_bucket_path( bucket_path) revision_urls = get_build_urls_list(bucket_path, reverse=False) if not revision_urls: return None # Parse the revisions out of the build urls. revision_list = [] for url in revision_urls: match = re.match(revision_pattern, url) if match: revision = revisions.convert_revision_to_integer(match.group(1)) revision_list.append(revision) # Remove revisions for bad builds from the revision list. job_type = environment.get_value('JOB_NAME') bad_builds = ndb_utils.get_all_from_query( data_types.BuildMetadata.query( ndb_utils.is_true(data_types.BuildMetadata.bad_build), data_types.BuildMetadata.job_type == job_type)) for bad_build in bad_builds: # Don't remove testcase revision even if it is in bad build list. This # usually happens when a bad bot sometimes marks a particular revision as # bad due to flakiness. if testcase and bad_build.revision == testcase.crash_revision: continue if bad_build.revision in revision_list: revision_list.remove(bad_build.revision) return revision_list
def cleanup_reports_metadata(): """Delete ReportMetadata for uploaded reports.""" uploaded_reports = ndb_utils.get_all_from_query( data_types.ReportMetadata.query( ndb_utils.is_true(data_types.ReportMetadata.is_uploaded)), keys_only=True) ndb.delete_multi(uploaded_reports)
def get_open_testcases_with_bugs(): """Return iterator to open testcases with bugs.""" return data_types.Testcase.query( ndb_utils.is_true(data_types.Testcase.open), data_types.Testcase.status == 'Processed', data_types.Testcase.bug_information != '').order( # pylint: disable=g-explicit-bool-comparison data_types.Testcase.bug_information, data_types.Testcase.key)
def update_issue_labels_for_flaky_testcase(policy, testcase, issue): """Update issue reproducibility label when testcase becomes flaky or unreproducible.""" if not issue or not issue.is_open: return # If the testcase is reproducible, then no change is needed. Bail out. if not testcase.one_time_crasher_flag: return # Make sure that no other reproducible testcases associated with this issue # are open. If yes, no need to update label. similar_reproducible_testcase = data_types.Testcase.query( data_types.Testcase.bug_information == testcase.bug_information, ndb_utils.is_true(data_types.Testcase.open), ndb_utils.is_false(data_types.Testcase.one_time_crasher_flag)).get() if similar_reproducible_testcase: return reproducible_label = policy.label('reproducible') unreproducible_label = policy.label('unreproducible') if not reproducible_label or not unreproducible_label: return # Make sure that this issue is not already marked Unreproducible. if unreproducible_label in issue.labels: return issue.labels.remove(reproducible_label) issue.labels.add(unreproducible_label) comment = ('ClusterFuzz testcase {testcase_id} appears to be flaky, ' 'updating reproducibility {label_type}.'.format( testcase_id=testcase.key.id(), label_type=issue.issue_tracker.label_type)) issue.save(new_comment=comment)
def _allowed_users_for_entity(name, entity_kind, auto_cc=None): """Return a list of users that have permissions for the given entity. Args: name: The name of the entity. entity_kind: The type (data_types.PermissionEntityKind) of the entity. auto_cc: The Auto CC type (data_types.AutoCCType) to filter on, or None. Returns: A list of user emails that have permission to access the given entity. """ if not name: return [] # Easy case: direct matches. direct_match_permissions = data_types.ExternalUserPermission.query( data_types.ExternalUserPermission.entity_kind == entity_kind, data_types.ExternalUserPermission.entity_name == name, ndb_utils.is_false(data_types.ExternalUserPermission.is_prefix), projection=[data_types.ExternalUserPermission.email], ) if auto_cc is not None: direct_match_permissions = direct_match_permissions.filter( data_types.ExternalUserPermission.auto_cc == auto_cc) allowed_users = [ permission.email for permission in direct_match_permissions ] # Find all permissions where the prefix matches the fuzzer_name. # Unfortunately, Datastore doesn't give us an easy way of doing so. To iterate # through a smaller set than every single permission, get all permissions that # contain a prefix string <= than the actual fuzzer name and >= the first # character. prefix_match_permissions = data_types.ExternalUserPermission.query( data_types.ExternalUserPermission.entity_kind == entity_kind, data_types.ExternalUserPermission.entity_name <= name, data_types.ExternalUserPermission.entity_name >= name[0], ndb_utils.is_true(data_types.ExternalUserPermission.is_prefix), projection=[ data_types.ExternalUserPermission.email, data_types.ExternalUserPermission.entity_name, ], ) if auto_cc is not None: prefix_match_permissions = prefix_match_permissions.filter( data_types.ExternalUserPermission.auto_cc == auto_cc) for permission in prefix_match_permissions: if not permission.entity_name: # No external user should have an empty prefix (access to all # fuzzers/jobs). continue if name.startswith(permission.entity_name): allowed_users.append(permission.email) return sorted(allowed_users)
def get_open_testcase_id_iterator(): """Get an iterator for open testcase ids.""" keys = ndb_utils.get_all_from_query( data_types.Testcase.query( ndb_utils.is_true(data_types.Testcase.open), data_types.Testcase.status == 'Processed'), keys_only=True, batch_size=data_types.TESTCASE_ENTITY_QUERY_LIMIT) for key in keys: yield key.id()
def _associate_testcase_with_existing_issue_if_needed(testcase, similar_testcase, issue): """Associate an interesting testcase with an existing issue which is already associated with an uninteresting testcase of similar crash signature if: 1. The current testcase is interesting as it is: a. Fully reproducible AND b. No other reproducible testcase is open and attached to issue. 3. Similar testcase attached to existing issue is uninteresting as it is: a. Either unreproducible (but filed since it occurs frequently) OR b. Got closed due to flakiness, but developer has re-opened the issue.""" # Don't change existing bug mapping if any. if testcase.bug_information: return # If this testcase is not reproducible, no need to update bug mapping. if testcase.one_time_crasher_flag: return # If another reproducible testcase is open and attached to this issue, then # no need to update bug mapping. if data_types.Testcase.query( data_types.Testcase.bug_information == str(issue.id), ndb_utils.is_true(data_types.Testcase.open), ndb_utils.is_false( data_types.Testcase.one_time_crasher_flag)).get(): return # If similar testcase is reproducible, make sure that it is not recently # closed. If it is, it means we might have not verified the testcase itself # as well, so need to give this for testcase to close as well. if not similar_testcase.open and not similar_testcase.one_time_crasher_flag: closed_time = similar_testcase.get_metadata('closed_time') if not closed_time: return if not dates.time_has_expired( closed_time, hours=data_types.MIN_ELAPSED_TIME_SINCE_FIXED): return testcase_id = testcase.key.id() report_url = data_handler.TESTCASE_REPORT_URL.format( domain=data_handler.get_domain(), testcase_id=testcase_id) comment = ('ClusterFuzz found another reproducible variant for this ' 'bug on {job_type} job: {report_url}.').format( job_type=testcase.job_type, report_url=report_url) issue.save(new_comment=comment, notify=True) testcase = data_handler.get_testcase_by_id(testcase_id) testcase.bug_information = str(issue.id) testcase.group_bug_information = 0 testcase.put()
def find_testcase(project_name, crash_type, crash_state, security_flag, testcase_to_exclude=None): """Find an open test case matching certain parameters.""" # Prepare the query. query = data_types.Testcase.query( data_types.Testcase.project_name == project_name, data_types.Testcase.crash_type == crash_type, data_types.Testcase.crash_state == crash_state, data_types.Testcase.security_flag == security_flag, data_types.Testcase.status == 'Processed', ndb_utils.is_true(data_types.Testcase.open)) # Return any open (not fixed) test cases if they exist. testcases = ndb_utils.get_all_from_query(query) testcase = None testcase_quality = -1 for current_testcase in testcases: if (testcase_to_exclude and current_testcase.key.id() == testcase_to_exclude.key.id()): continue if current_testcase.duplicate_of: continue # Replace the current test case in various situations where we have found # a better one to use. Testcase quality is based on the following factors: # - Is this test case reproducible? Reproducible tests are preferred. # - Is there a bug for this? We prefer showing tests with bugs to point # users to existing bugs. # - Is this test case minimized ? Minimization confirms that testcase is # reproducible and more usable for reproduction. current_testcase_quality = 0 if not current_testcase.one_time_crasher_flag: current_testcase_quality |= 2**2 if current_testcase.bug_information: current_testcase_quality |= 2**1 if current_testcase.minimized_keys: current_testcase_quality |= 2**0 if current_testcase_quality > testcase_quality: testcase = current_testcase testcase_quality = current_testcase_quality if testcase_quality == MAX_TESTCASE_QUALITY: # Already found the best testcase possible, no more work to do. Bail out. break return testcase
def get(self): """Handle a GET request.""" assert self.task # Create new tasks for the open reproducible test cases. for status in ['Processed', 'Duplicate']: testcases = data_types.Testcase.query( ndb_utils.is_true(data_types.Testcase.open), ndb_utils.is_false(data_types.Testcase.one_time_crasher_flag), data_types.Testcase.status == status) for testcase in testcases: try: tasks.add_task(self.task, testcase.key.id(), testcase.job_type, queue=tasks.queue_for_testcase(testcase)) except Exception: logs.log_error('Failed to add task.') continue
def mark_unreproducible_testcase_and_issue_as_closed_after_deadline( testcase, issue): """Closes an unreproducible testcase and its associated issue after a certain time period.""" # If the testcase is already closed, no more work to do. if not testcase.open: return # Check testcase status, so as to skip unreproducible uploads. if testcase.status not in ['Processed', 'Duplicate']: return # Make sure that this testcase is an unreproducible bug. If not, bail out. if not testcase.one_time_crasher_flag: return # Make sure that this testcase has an associated bug. If not, bail out. if not testcase.bug_information: return # If this testcase was manually uploaded, don't change issue state as our # reproduction result might be incorrect. if testcase.uploader_email: return # Make sure that there is an associated bug and it is in open state. if not issue or not issue.open: return # Check if there are any reproducible open testcases are associated with # this bug. If yes, return. similar_testcase = data_types.Testcase.query( data_types.Testcase.bug_information == testcase.bug_information, ndb_utils.is_true(data_types.Testcase.open), ndb_utils.is_false(data_types.Testcase.one_time_crasher_flag)).get() if similar_testcase: return # Make sure that testcase is atleast older than # |UNREPRODUCIBLE_TESTCASE_WITH_BUG_DEADLINE|, otherwise it will be seen in # crash stats anyway. if (testcase.timestamp and not dates.time_has_expired( testcase.timestamp, days=data_types.UNREPRODUCIBLE_TESTCASE_WITH_BUG_DEADLINE)): return # Handle testcase that turned from reproducible to unreproducible. Account # for the recent progression task run time. last_tested_crash_time = testcase.get_metadata('last_tested_crash_time') if (last_tested_crash_time and not dates.time_has_expired( last_tested_crash_time, days=data_types.UNREPRODUCIBLE_TESTCASE_WITH_BUG_DEADLINE)): return # Make that there is no crash seen in the deadline period. if get_crash_occurrence_platforms( testcase, data_types.UNREPRODUCIBLE_TESTCASE_WITH_BUG_DEADLINE): return # As a last check, do the expensive call of actually checking all issue # comments to make sure we we didn't get called out on issue mistriage. if issue.has_comment_with_label(data_types.ISSUE_MISTRIAGED_LABEL): return # Close associated issue and testcase. comment = ('ClusterFuzz testcase %d is flaky and no longer crashes, ' 'so closing issue.' % testcase.key.id()) if utils.is_oss_fuzz(): comment += OSS_FUZZ_INCORRECT_COMMENT else: comment += INTERNAL_INCORRECT_COMMENT comment += ' and re-open the issue.' issue.comment = comment issue.status = 'WontFix' issue.open = False issue.save(send_email=True) testcase.fixed = 'NA' testcase.open = False testcase.put() logs.log('Closed unreproducible testcase %d and associated issue.' % testcase.key.id())
def mark_issue_as_closed_if_testcase_is_fixed(testcase, issue): """Mark an issue as fixed if all of its associated reproducible testcase are fixed.""" # If there is no associated issue, then bail out. if not issue or not testcase.bug_information: return # If the issue is closed in a status other than Fixed, like Duplicate, WontFix # or Archived, we shouldn't change it. Bail out. if not issue.open and issue.status != 'Fixed': return # Check testcase status, so as to skip unreproducible uploads. if testcase.status not in ['Processed', 'Duplicate']: return # If the testcase is still open, no work needs to be done. Bail out. if testcase.open: return # FIXME: Find a better solution to skip over reproducible tests that are now # showing up a flaky (esp when we are unable to reproduce crash in original # crash revision). if testcase.fixed == 'NA': return # We can only verify fixed issues for reproducible testcases. If the testcase # is unreproducible, bail out. Exception is if we explicitly marked this as # fixed. if testcase.one_time_crasher_flag and testcase.fixed != 'Yes': return # Make sure that no other testcases associated with this issue are open. similar_testcase = data_types.Testcase.query( data_types.Testcase.bug_information == testcase.bug_information, ndb_utils.is_true(data_types.Testcase.open), ndb_utils.is_false(data_types.Testcase.one_time_crasher_flag)).get() if similar_testcase: return # As a last check, do the expensive call of actually checking all issue # comments to make sure we didn't do the verification already and we didn't # get called out on issue mistriage. if (issue.has_comment_with_label(data_types.ISSUE_VERIFIED_LABEL) or issue.has_comment_with_label(data_types.ISSUE_MISTRIAGED_LABEL)): return issue.add_label(data_types.ISSUE_VERIFIED_LABEL) comment = ('ClusterFuzz testcase %d is verified as fixed, ' 'so closing issue as verified.' % testcase.key.id()) if utils.is_oss_fuzz(): comment += OSS_FUZZ_INCORRECT_COMMENT else: comment += INTERNAL_INCORRECT_COMMENT comment += ' and re-open the issue.' issue.comment = comment issue.status = 'Verified' issue.open = False issue.save(send_email=True) logs.log('Closed issue %d for fixed testcase %d.' % (issue.id, testcase.key.id()))
def mark_issue_as_closed_if_testcase_is_fixed(policy, testcase, issue): """Mark an issue as fixed if all of its associated reproducible testcase are fixed.""" verified_label = policy.label('verified') if not verified_label: return # If there is no associated issue, then bail out. if not issue or not testcase.bug_information: return # If the issue is closed in a status other than Fixed, like Duplicate, WontFix # or Archived, we shouldn't change it. Bail out. if not issue.is_open and issue.status != policy.status('fixed'): return # Check testcase status, so as to skip unreproducible uploads. if testcase.status not in ['Processed', 'Duplicate']: return # If the testcase is still open, no work needs to be done. Bail out. if testcase.open: return # FIXME: Find a better solution to skip over reproducible tests that are now # showing up a flaky (esp when we are unable to reproduce crash in original # crash revision). if testcase.fixed == 'NA': return # We can only verify fixed issues for reproducible testcases. If the testcase # is unreproducible, bail out. Exception is if we explicitly marked this as # fixed. if testcase.one_time_crasher_flag and testcase.fixed != 'Yes': return # Make sure that no other testcases associated with this issue are open. similar_testcase = data_types.Testcase.query( data_types.Testcase.bug_information == testcase.bug_information, ndb_utils.is_true(data_types.Testcase.open), ndb_utils.is_false(data_types.Testcase.one_time_crasher_flag)).get() if similar_testcase: return # As a last check, do the expensive call of actually checking all issue # comments to make sure we didn't do the verification already and we didn't # get called out on issue mistriage. if (issue_tracker_utils.was_label_added(issue, verified_label) or issue_tracker_utils.was_label_added(issue, policy.label('wrong'))): return issue.labels.add(verified_label) comment = 'ClusterFuzz testcase %d is verified as fixed' % testcase.key.id() fixed_range_url = data_handler.get_fixed_range_url(testcase) if fixed_range_url: comment += ' in ' + fixed_range_url else: comment += '.' if utils.is_oss_fuzz(): comment += OSS_FUZZ_INCORRECT_COMMENT else: comment = _append_generic_incorrect_comment(comment, policy, issue, ' and re-open the issue.') skip_auto_close = data_handler.get_value_from_job_definition( testcase.job_type, 'SKIP_AUTO_CLOSE_ISSUE') if not skip_auto_close: issue.status = policy.status('verified') issue.save(new_comment=comment, notify=True) logs.log('Mark issue %d as verified for fixed testcase %d.' % (issue.id, testcase.key.id()))