def can_user_access_testcase(testcase): """Checks if the current user can access the testcase.""" if has_access( fuzzer_name=testcase.fuzzer_name, job_type=testcase.job_type, need_privileged_access=testcase.security_flag): return True user_email = helpers.get_user_email() if testcase.uploader_email and testcase.uploader_email == user_email: return True # Allow owners of bugs to see associated test cases and test case groups. issue_id = testcase.bug_information or testcase.group_bug_information if not issue_id: return False itm = issue_tracker_utils.get_issue_tracker_manager(testcase) issue = itm.get_issue(int(issue_id)) if not issue: return False config = db_config.get() if config.relax_testcase_restrictions or _is_domain_allowed(user_email): return (any(utils.emails_equal(user_email, cc) for cc in issue.cc) or utils.emails_equal(user_email, issue.owner) or utils.emails_equal(user_email, issue.reporter)) return utils.emails_equal(user_email, issue.owner)
def get_issue_tracker_manager(testcase): """Get an IssueTrackerManager or raise EarlyExitException.""" itm = issue_tracker_utils.get_issue_tracker_manager(testcase) if not itm: raise EarlyExitException( "The testcase doesn't have a corresponding issue tracker", 404) return itm
def _add_issue_comment(testcase, comment): """Helper function to add a comment to the bug associated with a test case.""" if not testcase.bug_information: return # Populate the full message. report = data_handler.get_issue_description(testcase).rstrip('\n') full_comment = '%s\n\n%s\n\n%s' % (comment, report, FIXED_REPORT_FOOTER) # Update the issue. issue_tracker_manager = ( issue_tracker_utils.get_issue_tracker_manager(testcase)) issue = issue_tracker_manager.get_issue(int(testcase.bug_information)) issue.comment = full_comment issue_tracker_manager.save(issue, send_email=True)
def get_issue_for_testcase(testcase): """Return issue associated with the testcase (if any).""" if not testcase.bug_information: return None issue_id = int(testcase.bug_information) itm = issue_tracker_utils.get_issue_tracker_manager(testcase) try: issue = itm.get_issue(issue_id) except Exception: logs.log_error('Unable to query issue %d.' % issue_id) return None return issue
def _process_failures(self, build_status, build_type): """Process failures.""" itm = issue_tracker_utils.get_issue_tracker_manager() if not itm: raise OssFuzzBuildStatusException('Failed to get issue tracker.') for build in build_status['failures']: project_name = build['name'] # Do not file an issue for non-main build types, if there is a main build # failure for the same project, as the root cause might be the same. if build_type != MAIN_BUILD_TYPE: build_failure = get_build_failure(project_name, MAIN_BUILD_TYPE) if build_failure: continue build_failure = get_build_failure(project_name, build_type) build_time = get_build_time(build) if build_failure: if build_time <= build_failure.last_checked_timestamp: # No updates. continue else: build_failure = create_build_failure(build, build_type) build_failure.last_checked_timestamp = build_time build_failure.consecutive_failures += 1 if build_failure.consecutive_failures >= MIN_CONSECUTIVE_BUILD_FAILURES: if build_failure.issue_id is None: oss_fuzz_project = _get_oss_fuzz_project(project_name) if not oss_fuzz_project: logs.log( 'Project %s is disabled, skipping bug filing.' % project_name) continue build_failure.issue_id = file_bug(itm, project_name, build['build_id'], oss_fuzz_project.ccs, build_type) elif (build_failure.consecutive_failures - MIN_CONSECUTIVE_BUILD_FAILURES) % REMINDER_INTERVAL == 0: send_reminder(itm, build_failure.issue_id, build['build_id']) build_failure.put()
def can_user_access_testcase(testcase): """Checks if the current user can access the testcase.""" config = db_config.get() need_privileged_access = ( testcase.security_flag and not config.relax_security_bug_restrictions) if has_access( fuzzer_name=testcase.fuzzer_name, job_type=testcase.job_type, need_privileged_access=need_privileged_access): return True user_email = helpers.get_user_email() if testcase.uploader_email and testcase.uploader_email == user_email: return True # Allow owners of bugs to see associated test cases and test case groups. issue_id = testcase.bug_information or testcase.group_bug_information if not issue_id: return False itm = issue_tracker_utils.get_issue_tracker_manager(testcase) issue_id = int(issue_id) associated_issue = itm.get_issue(issue_id) if not associated_issue: return False # Look at both associated issue and original issue (if the associated one # is a duplicate of the original issue). issues_to_check = [associated_issue] if associated_issue.merged_into: original_issue = itm.get_original_issue(issue_id) if original_issue: issues_to_check.append(original_issue) relaxed_restrictions = ( config.relax_testcase_restrictions or _is_domain_allowed(user_email)) for issue in issues_to_check: if relaxed_restrictions: if (any(utils.emails_equal(user_email, cc) for cc in issue.cc) or utils.emails_equal(user_email, issue.owner) or utils.emails_equal(user_email, issue.reporter)): return True elif utils.emails_equal(user_email, issue.owner): return True return False
def get(self): """Handle a cron job.""" @memoize.wrap(memoize.FifoInMemory(256)) def cc_users_for_job(job_type, security_flag): """Return users to CC for a job.""" # Memoized per cron run. return external_users.cc_users_for_job(job_type, security_flag) for testcase in get_open_testcases_with_bugs(): issue_tracker_manager = issue_tracker_utils.get_issue_tracker_manager( testcase=testcase, use_cache=True) if not issue_tracker_manager: logging.error('Failed to get issue tracker manager for %s', testcase.key.id()) continue try: issue_id = int(testcase.bug_information) issue = issue_tracker_manager.get_original_issue(issue_id) except: logging.error('Error occurred when fetching issue %s.', testcase.bug_information) continue if not issue or not issue.open: continue ccs = cc_users_for_job(testcase.job_type, testcase.security_flag) new_ccs = [cc for cc in ccs if not issue.has_cc(cc)] if not new_ccs: # Nothing to do. continue for cc in new_ccs: logging.info('CCing %s on %s', cc, issue.id) issue.add_cc(cc) if not issue.has_label_containing('reported-'): # Add reported label and deadline comment if necessary. issue.add_label(issue_filer.reported_label()) if issue.has_label_matching('Restrict-View-Commit'): logging.info('Adding deadline comment on %s', issue.id) issue.comment = issue_filer.DEADLINE_NOTE issue.save(send_email=True)
def get_issue_for_testcase(testcase): """Return issue object associated with testcase.""" if not testcase.bug_information: return None issue_tracker_manager = issue_tracker_utils.get_issue_tracker_manager( testcase=testcase, use_cache=True) if not issue_tracker_manager: return None try: issue_id = int(testcase.bug_information) issue = issue_tracker_manager.get_original_issue(issue_id) except: logs.log_error('Error occurred when fetching issue %s.' % testcase.bug_information) return None return issue
def handle_public_testcase(self, blob_info, testcase, fuzzer_binary_name): """Handle public testcase.""" if blob_info.key() != testcase.minimized_keys: return False if not testcase.bug_information: return False itm = issue_tracker_utils.get_issue_tracker_manager(testcase) issue = itm.get_issue(int(testcase.bug_information)) if not issue: return False # If the issue is explicitly marked as view restricted to committers only # (OSS-Fuzz only), then don't allow public download. if issue.has_label('restrict-view-commit'): return False self._send_blob(blob_info, testcase.key.id(), is_minimized=True, fuzzer_binary_name=fuzzer_binary_name) return True
def _close_fixed_builds(self, build_status, build_type): """Close bugs for fixed builds.""" itm = issue_tracker_utils.get_issue_tracker_manager() if not itm: raise OssFuzzBuildStatusException('Failed to get issue tracker.') for build in build_status['successes']: project_name = build['name'] build_failure = get_build_failure(project_name, build_type) if not build_failure: continue if build_failure.last_checked_timestamp >= get_build_time(build): logs.log_error( 'Latest successful build time for %s in %s config is ' 'older than or equal to last failure time.' % (project_name, build_type)) continue if build_failure.issue_id is not None: close_bug(itm, build_failure.issue_id, project_name) close_build_failure(build_failure)
def group_testcases(): """Group testcases based on rules like same bug numbers, similar crash states, etc.""" testcase_map = {} cached_issue_map = {} for testcase_id in data_handler.get_open_testcase_id_iterator(): try: testcase = data_handler.get_testcase_by_id(testcase_id) except errors.InvalidTestcaseError: # Already deleted. continue # Remove duplicates early on to avoid large groups. if (not testcase.bug_information and not testcase.uploader_email and has_testcase_with_same_params(testcase, testcase_map)): logs.log('Deleting duplicate testcase %d.' % testcase_id) testcase.key.delete() continue # Store needed testcase attributes into |testcase_map|. testcase_map[testcase_id] = TestcaseAttributes() testcase_attributes = testcase_map[testcase_id] for attribute_name in FORWARDED_ATTRIBUTES: setattr(testcase_attributes, attribute_name, getattr(testcase, attribute_name)) # Store original issue mappings in the testcase attributes. if testcase.bug_information: issue_id = int(testcase.bug_information) project_name = testcase.project_name if (project_name in cached_issue_map and issue_id in cached_issue_map[project_name]): testcase_attributes.issue_id = ( cached_issue_map[project_name][issue_id]) else: issue_tracker_manager = issue_tracker_utils.get_issue_tracker_manager( testcase=testcase, use_cache=True) if not issue_tracker_manager: continue # Determine the original issue id traversing the list of duplicates. try: issue = issue_tracker_manager.get_original_issue(issue_id) original_issue_id = issue.id except: # If we are unable to access the issue, then we can't determine # the original issue id. Assume that it is the same as issue id. logs.log_error( 'Unable to determine original issue for %d.' % issue_id) original_issue_id = issue_id if project_name not in cached_issue_map: cached_issue_map[project_name] = {} cached_issue_map[project_name][issue_id] = original_issue_id cached_issue_map[project_name][ original_issue_id] = original_issue_id testcase_attributes.issue_id = original_issue_id # No longer needed. Free up some memory. cached_issue_map.clear() group_testcases_with_similar_states(testcase_map) group_testcases_with_same_issues(testcase_map) group_leader.choose(testcase_map) # TODO(aarya): Replace with an optimized implementation using dirty flag. # Update the group mapping in testcase object. for testcase_id in data_handler.get_open_testcase_id_iterator(): if testcase_id not in testcase_map: # A new testcase that was just created. Skip for now, will be grouped in # next iteration of group task. continue # If we are part of a group, then calculate the number of testcases in that # group and lowest issue id of issues associated with testcases in that # group. updated_group_id = testcase_map[testcase_id].group_id updated_is_leader = testcase_map[testcase_id].is_leader updated_group_id_count = 0 updated_group_bug_information = 0 if updated_group_id: for other_testcase in six.itervalues(testcase_map): if other_testcase.group_id != updated_group_id: continue updated_group_id_count += 1 # Update group issue id to be lowest issue id in the entire group. if other_testcase.issue_id is None: continue if (not updated_group_bug_information or updated_group_bug_information > other_testcase.issue_id): updated_group_bug_information = other_testcase.issue_id # If this group id is used by only one testcase, then remove it. if updated_group_id_count == 1: data_handler.delete_group(updated_group_id, update_testcases=False) updated_group_id = 0 updated_group_bug_information = 0 updated_is_leader = True # If this group has more than the maximum allowed testcases, log an error # so that the sheriff can later debug what caused this. Usually, this is a # bug in grouping logic OR a ever changing crash signature (e.g. slightly # different crash types or crash states). We cannot bail out as otherwise, # we will not group the testcase leading to a spam of new filed bugs. if updated_group_id_count > GROUP_MAX_TESTCASE_LIMIT: logs.log_error('Group %d exceeds maximum allowed testcases.' % updated_group_id) try: testcase = data_handler.get_testcase_by_id(testcase_id) except errors.InvalidTestcaseError: # Already deleted. continue is_changed = ( (testcase.group_id != updated_group_id) or (testcase.group_bug_information != updated_group_bug_information) or (testcase.is_leader != updated_is_leader)) if not is_changed: # If nothing is changed, no more work to do. It's faster this way. continue testcase.group_bug_information = updated_group_bug_information testcase.group_id = updated_group_id testcase.is_leader = updated_is_leader testcase.put() logs.log('Updated testcase %d group to %d.' % (testcase_id, updated_group_id))
def get(self): """Handle a get request.""" try: grouper.group_testcases() except: logs.log_error('Error occurred while grouping test cases.') return # Free up memory after group task run. utils.python_gc() # Get list of jobs excluded from bug filing. excluded_jobs = get_excluded_jobs() for testcase_id in data_handler.get_open_testcase_id_iterator(): try: testcase = data_handler.get_testcase_by_id(testcase_id) except errors.InvalidTestcaseError: # Already deleted. continue # Skip if testcase's job type is in exclusions list. if testcase.job_type in excluded_jobs: continue # If the testcase has a bug filed already, no triage is needed. if is_bug_filed(testcase): continue # Check if the crash is important, i.e. it is either a reproducible crash # or an unreproducible crash happening frequently. if not is_crash_important(testcase): continue # Require that all tasks like minimizaton, regression testing, etc have # finished. if not data_handler.critical_tasks_completed(testcase): continue # For testcases that are not part of a group, wait an additional time till # group task completes. # FIXME: In future, grouping might be dependent on regression range, so we # would have to add an additional wait time. if not testcase.group_id and not dates.time_has_expired( testcase.timestamp, hours=data_types.MIN_ELAPSED_TIME_SINCE_REPORT): continue # If this project does not have an associated issue tracker, we cannot # file this crash anywhere. issue_tracker_manager = issue_tracker_utils.get_issue_tracker_manager( testcase, use_cache=True) if not issue_tracker_manager: continue # If there are similar issues to this test case already filed or recently # closed, skip filing a duplicate bug. if is_similar_bug_open_or_recently_closed(testcase, issue_tracker_manager): continue # File the bug first and then create filed bug metadata. issue_filer.file_issue(testcase, issue_tracker_manager) create_filed_bug_metadata(testcase) logs.log('Filed new issue %s for testcase %d.' % (testcase.bug_information, testcase_id))