Esempio n. 1
0
def get_issue_tracker_for_testcase(testcase):
  """Get an IssueTracker or raise EarlyExitException."""
  issue_tracker = issue_tracker_utils.get_issue_tracker_for_testcase(testcase)
  if not issue_tracker:
    raise EarlyExitException(
        "The testcase doesn't have a corresponding issue tracker", 404)
  return issue_tracker
Esempio n. 2
0
    def check_public_testcase(self, blob_info, testcase):
        """Check public testcase."""
        if blob_info.key() != testcase.minimized_keys:
            return False

        if not testcase.bug_information:
            return False

        issue_tracker = issue_tracker_utils.get_issue_tracker_for_testcase(
            testcase)
        issue = issue_tracker.get_issue(testcase.bug_information)
        if not issue:
            return False

        # If the issue is explicitly marked as view restricted to committers only
        # (OSS-Fuzz only), then don't allow public download.
        if 'restrict-view-commit' in issue.labels:
            return False

        # For OSS-Fuzz, delay the disclosure of the reproducer by 30 days.
        # If the deadline had previously exceeded, the reproducer was made public
        # already so exclude that case.
        if (utils.is_oss_fuzz() and 'deadline-exceeded' not in issue.labels
                and issue.closed_time and not dates.time_has_expired(
                    issue.closed_time, days=_OSS_FUZZ_REPRODUCER_DELAY)):
            return False

        return True
Esempio n. 3
0
    def get(self):
        """Handle a cron job."""
        @memoize.wrap(memoize.FifoInMemory(256))
        def cc_users_for_job(job_type, security_flag):
            """Return users to CC for a job."""
            # Memoized per cron run.
            return external_users.cc_users_for_job(job_type, security_flag)

        for testcase in get_open_testcases_with_bugs():
            issue_tracker = issue_tracker_utils.get_issue_tracker_for_testcase(
                testcase)
            if not issue_tracker:
                logging.error('Failed to get issue tracker manager for %s',
                              testcase.key.id())
                continue

            policy = issue_tracker_policy.get(issue_tracker.project)
            reported_label = policy.label('reported')
            if not reported_label:
                return

            reported_pattern = issue_filer.get_label_pattern(reported_label)

            try:
                issue = issue_tracker.get_original_issue(
                    testcase.bug_information)
            except:
                logging.error('Error occurred when fetching issue %s.',
                              testcase.bug_information)
                continue

            if not issue or not issue.is_open:
                continue

            ccs = cc_users_for_job(testcase.job_type, testcase.security_flag)
            new_ccs = [cc for cc in ccs if cc not in issue.ccs]
            if not new_ccs:
                # Nothing to do.
                continue

            for cc in new_ccs:
                logging.info('CCing %s on %s', cc, issue.id)
                issue.ccs.add(cc)

            comment = None

            if (not issue.labels.has_with_pattern(reported_pattern)
                    and not data_handler.get_value_from_job_definition(
                        testcase.job_type, 'DISABLE_DISCLOSURE', False)):
                # Add reported label and deadline comment if necessary.
                for result in issue_filer.apply_substitutions(
                        policy, reported_label, testcase):
                    issue.labels.add(result)

                if policy.label('restrict_view') in issue.labels:
                    logging.info('Adding deadline comment on %s', issue.id)
                    comment = policy.deadline_policy_message

            issue.save(new_comment=comment, notify=True)
Esempio n. 4
0
def can_user_access_testcase(testcase):
    """Checks if the current user can access the testcase."""
    config = db_config.get()
    need_privileged_access = (testcase.security_flag
                              and not config.relax_security_bug_restrictions)

    if has_access(fuzzer_name=testcase.actual_fuzzer_name(),
                  job_type=testcase.job_type,
                  need_privileged_access=need_privileged_access):
        return True

    user_email = helpers.get_user_email()
    if testcase.uploader_email and testcase.uploader_email == user_email:
        return True

    # Allow owners of bugs to see associated test cases and test case groups.
    issue_id = testcase.bug_information or testcase.group_bug_information
    if not issue_id:
        return False

    issue_tracker = issue_tracker_utils.get_issue_tracker_for_testcase(
        testcase)
    associated_issue = issue_tracker.get_issue(issue_id)
    if not associated_issue:
        return False

    # Look at both associated issue and original issue (if the associated one
    # is a duplicate of the original issue).
    issues_to_check = [associated_issue]
    if associated_issue.merged_into:
        original_issue = issue_tracker.get_original_issue(issue_id)
        if original_issue:
            issues_to_check.append(original_issue)

    relaxed_restrictions = (config.relax_testcase_restrictions
                            or _is_domain_allowed(user_email))
    for issue in issues_to_check:
        if relaxed_restrictions:
            if (any(utils.emails_equal(user_email, cc) for cc in issue.ccs)
                    or utils.emails_equal(user_email, issue.assignee)
                    or utils.emails_equal(user_email, issue.reporter)):
                return True

        elif utils.emails_equal(user_email, issue.assignee):
            return True

    return False
Esempio n. 5
0
  def check_public_testcase(self, blob_info, testcase):
    """Check public testcase."""
    if blob_info.key() != testcase.minimized_keys:
      return False

    if not testcase.bug_information:
      return False

    issue_tracker = issue_tracker_utils.get_issue_tracker_for_testcase(testcase)
    issue = issue_tracker.get_issue(testcase.bug_information)
    if not issue:
      return False

    # If the issue is explicitly marked as view restricted to committers only
    # (OSS-Fuzz only), then don't allow public download.
    if 'restrict-view-commit' in issue.labels:
      return False

    return True
Esempio n. 6
0
    def handle_public_testcase(self, blob_info, testcase, fuzzer_binary_name):
        """Handle public testcase."""
        if blob_info.key() != testcase.minimized_keys:
            return False

        if not testcase.bug_information:
            return False

        issue_tracker = issue_tracker_utils.get_issue_tracker_for_testcase(
            testcase)
        issue = issue_tracker.get_issue(testcase.bug_information)
        if not issue:
            return False

        # If the issue is explicitly marked as view restricted to committers only
        # (OSS-Fuzz only), then don't allow public download.
        if 'restrict-view-commit' in issue.labels:
            return False

        self._send_blob(blob_info,
                        testcase.key.id(),
                        is_minimized=True,
                        fuzzer_binary_name=fuzzer_binary_name)
        return True
Esempio n. 7
0
    def get(self):
        """Handle a get request."""
        try:
            grouper.group_testcases()
        except:
            logs.log_error('Error occurred while grouping test cases.')
            return

        # Free up memory after group task run.
        utils.python_gc()

        # Get a list of jobs excluded from bug filing.
        excluded_jobs = _get_excluded_jobs()

        # Get a list of all jobs. This is used to filter testcases whose jobs have
        # been removed.
        all_jobs = data_handler.get_all_job_type_names()

        for testcase_id in data_handler.get_open_testcase_id_iterator():
            try:
                testcase = data_handler.get_testcase_by_id(testcase_id)
            except errors.InvalidTestcaseError:
                # Already deleted.
                continue

            # Skip if testcase's job is removed.
            if testcase.job_type not in all_jobs:
                continue

            # Skip if testcase's job is in exclusions list.
            if testcase.job_type in excluded_jobs:
                continue

            # Skip if we are running progression task at this time.
            if testcase.get_metadata('progression_pending'):
                continue

            # If the testcase has a bug filed already, no triage is needed.
            if _is_bug_filed(testcase):
                continue

            # Check if the crash is important, i.e. it is either a reproducible crash
            # or an unreproducible crash happening frequently.
            if not _is_crash_important(testcase):
                continue

            # Require that all tasks like minimizaton, regression testing, etc have
            # finished.
            if not data_handler.critical_tasks_completed(testcase):
                continue

            # For testcases that are not part of a group, wait an additional time till
            # group task completes.
            # FIXME: In future, grouping might be dependent on regression range, so we
            # would have to add an additional wait time.
            if not testcase.group_id and not dates.time_has_expired(
                    testcase.timestamp,
                    hours=data_types.MIN_ELAPSED_TIME_SINCE_REPORT):
                continue

            # If this project does not have an associated issue tracker, we cannot
            # file this crash anywhere.
            issue_tracker = issue_tracker_utils.get_issue_tracker_for_testcase(
                testcase)
            if not issue_tracker:
                continue

            # If there are similar issues to this test case already filed or recently
            # closed, skip filing a duplicate bug.
            if _check_and_update_similar_bug(testcase, issue_tracker):
                continue

            # Clean up old triage messages that would be not applicable now.
            testcase.delete_metadata(TRIAGE_MESSAGE_KEY, update_testcase=False)

            # File the bug first and then create filed bug metadata.
            try:
                issue_filer.file_issue(testcase, issue_tracker)
            except Exception:
                logs.log_error('Failed to file issue for testcase %d.' %
                               testcase_id)
                continue

            _create_filed_bug_metadata(testcase)
            logs.log('Filed new issue %s for testcase %d.' %
                     (testcase.bug_information, testcase_id))
Esempio n. 8
0
def group_testcases():
  """Group testcases based on rules like same bug numbers, similar crash
  states, etc."""
  testcase_map = {}
  cached_issue_map = {}

  for testcase_id in data_handler.get_open_testcase_id_iterator():
    try:
      testcase = data_handler.get_testcase_by_id(testcase_id)
    except errors.InvalidTestcaseError:
      # Already deleted.
      continue

    # Remove duplicates early on to avoid large groups.
    if (not testcase.bug_information and not testcase.uploader_email and
        has_testcase_with_same_params(testcase, testcase_map)):
      logs.log('Deleting duplicate testcase %d.' % testcase_id)
      testcase.key.delete()
      continue

    # Wait for minimization to finish as this might change crash params such
    # as type and may mark it as duplicate / closed.
    if not testcase.minimized_keys:
      continue

    # Store needed testcase attributes into |testcase_map|.
    testcase_map[testcase_id] = TestcaseAttributes()
    testcase_attributes = testcase_map[testcase_id]
    for attribute_name in FORWARDED_ATTRIBUTES:
      setattr(testcase_attributes, attribute_name,
              getattr(testcase, attribute_name))

    # Store original issue mappings in the testcase attributes.
    if testcase.bug_information:
      issue_id = int(testcase.bug_information)
      project_name = testcase.project_name

      if (project_name in cached_issue_map and
          issue_id in cached_issue_map[project_name]):
        testcase_attributes.issue_id = (
            cached_issue_map[project_name][issue_id])
      else:
        issue_tracker = issue_tracker_utils.get_issue_tracker_for_testcase(
            testcase)
        if not issue_tracker:
          continue

        # Determine the original issue id traversing the list of duplicates.
        try:
          issue = issue_tracker.get_original_issue(issue_id)
          original_issue_id = issue.id
        except:
          # If we are unable to access the issue, then we can't determine
          # the original issue id. Assume that it is the same as issue id.
          logs.log_error(
              'Unable to determine original issue for %d.' % issue_id)
          original_issue_id = issue_id

        if project_name not in cached_issue_map:
          cached_issue_map[project_name] = {}
        cached_issue_map[project_name][issue_id] = original_issue_id
        cached_issue_map[project_name][original_issue_id] = original_issue_id
        testcase_attributes.issue_id = original_issue_id

  # No longer needed. Free up some memory.
  cached_issue_map.clear()

  group_testcases_with_similar_states(testcase_map)
  group_testcases_with_same_issues(testcase_map)
  group_leader.choose(testcase_map)

  # TODO(aarya): Replace with an optimized implementation using dirty flag.
  # Update the group mapping in testcase object.
  for testcase_id in data_handler.get_open_testcase_id_iterator():
    if testcase_id not in testcase_map:
      # A new testcase that was just created. Skip for now, will be grouped in
      # next iteration of group task.
      continue

    # If we are part of a group, then calculate the number of testcases in that
    # group and lowest issue id of issues associated with testcases in that
    # group.
    updated_group_id = testcase_map[testcase_id].group_id
    updated_is_leader = testcase_map[testcase_id].is_leader
    updated_group_id_count = 0
    updated_group_bug_information = 0
    if updated_group_id:
      for other_testcase in six.itervalues(testcase_map):
        if other_testcase.group_id != updated_group_id:
          continue
        updated_group_id_count += 1

        # Update group issue id to be lowest issue id in the entire group.
        if other_testcase.issue_id is None:
          continue
        if (not updated_group_bug_information or
            updated_group_bug_information > other_testcase.issue_id):
          updated_group_bug_information = other_testcase.issue_id

    # If this group id is used by only one testcase, then remove it.
    if updated_group_id_count == 1:
      data_handler.delete_group(updated_group_id, update_testcases=False)
      updated_group_id = 0
      updated_group_bug_information = 0
      updated_is_leader = True

    # If this group has more than the maximum allowed testcases, log an error
    # so that the sheriff can later debug what caused this. Usually, this is a
    # bug in grouping logic OR a ever changing crash signature (e.g. slightly
    # different crash types or crash states). We cannot bail out as otherwise,
    # we will not group the testcase leading to a spam of new filed bugs.
    if updated_group_id_count > GROUP_MAX_TESTCASE_LIMIT:
      logs.log_error(
          'Group %d exceeds maximum allowed testcases.' % updated_group_id)

    try:
      testcase = data_handler.get_testcase_by_id(testcase_id)
    except errors.InvalidTestcaseError:
      # Already deleted.
      continue

    is_changed = (
        (testcase.group_id != updated_group_id) or
        (testcase.group_bug_information != updated_group_bug_information) or
        (testcase.is_leader != updated_is_leader))

    if not is_changed:
      # If nothing is changed, no more work to do. It's faster this way.
      continue

    testcase.group_bug_information = updated_group_bug_information
    testcase.group_id = updated_group_id
    testcase.is_leader = updated_is_leader
    testcase.put()
    logs.log(
        'Updated testcase %d group to %d.' % (testcase_id, updated_group_id))
Esempio n. 9
0
def group_testcases():
    """Group testcases based on rules like same bug numbers, similar crash
  states, etc."""
    testcase_map = {}
    cached_issue_map = {}

    for testcase_id in data_handler.get_open_testcase_id_iterator():
        try:
            testcase = data_handler.get_testcase_by_id(testcase_id)
        except errors.InvalidTestcaseError:
            # Already deleted.
            continue

        # Remove duplicates early on to avoid large groups.
        if (not testcase.bug_information and not testcase.uploader_email
                and _has_testcase_with_same_params(testcase, testcase_map)):
            logs.log('Deleting duplicate testcase %d.' % testcase_id)
            testcase.key.delete()
            continue

        # Wait for minimization to finish as this might change crash params such
        # as type and may mark it as duplicate / closed.
        if not testcase.minimized_keys:
            continue

        # Store needed testcase attributes into |testcase_map|.
        testcase_map[testcase_id] = TestcaseAttributes(testcase_id)
        testcase_attributes = testcase_map[testcase_id]
        for attribute_name in FORWARDED_ATTRIBUTES:
            setattr(testcase_attributes, attribute_name,
                    getattr(testcase, attribute_name))

        # Store original issue mappings in the testcase attributes.
        if testcase.bug_information:
            issue_id = int(testcase.bug_information)
            project_name = testcase.project_name

            if (project_name in cached_issue_map
                    and issue_id in cached_issue_map[project_name]):
                testcase_attributes.issue_id = (
                    cached_issue_map[project_name][issue_id])
            else:
                issue_tracker = issue_tracker_utils.get_issue_tracker_for_testcase(
                    testcase)
                if not issue_tracker:
                    logs.log_error(
                        'Unable to access issue tracker for issue %d.' %
                        issue_id)
                    testcase_attributes.issue_id = issue_id
                    continue

                # Determine the original issue id traversing the list of duplicates.
                try:
                    issue = issue_tracker.get_original_issue(issue_id)
                    original_issue_id = int(issue.id)
                except:
                    # If we are unable to access the issue, then we can't determine
                    # the original issue id. Assume that it is the same as issue id.
                    logs.log_error(
                        'Unable to determine original issue for issue %d.' %
                        issue_id)
                    testcase_attributes.issue_id = issue_id
                    continue

                if project_name not in cached_issue_map:
                    cached_issue_map[project_name] = {}
                cached_issue_map[project_name][issue_id] = original_issue_id
                cached_issue_map[project_name][
                    original_issue_id] = original_issue_id
                testcase_attributes.issue_id = original_issue_id

    # No longer needed. Free up some memory.
    cached_issue_map.clear()

    _group_testcases_with_similar_states(testcase_map)
    _group_testcases_with_same_issues(testcase_map)
    _shrink_large_groups_if_needed(testcase_map)
    group_leader.choose(testcase_map)

    # TODO(aarya): Replace with an optimized implementation using dirty flag.
    # Update the group mapping in testcase object.
    for testcase_id in data_handler.get_open_testcase_id_iterator():
        if testcase_id not in testcase_map:
            # A new testcase that was just created. Skip for now, will be grouped in
            # next iteration of group task.
            continue

        # If we are part of a group, then calculate the number of testcases in that
        # group and lowest issue id of issues associated with testcases in that
        # group.
        updated_group_id = testcase_map[testcase_id].group_id
        updated_is_leader = testcase_map[testcase_id].is_leader
        updated_group_id_count = 0
        updated_group_bug_information = 0
        if updated_group_id:
            for other_testcase in six.itervalues(testcase_map):
                if other_testcase.group_id != updated_group_id:
                    continue
                updated_group_id_count += 1

                # Update group issue id to be lowest issue id in the entire group.
                if other_testcase.issue_id is None:
                    continue
                if (not updated_group_bug_information
                        or updated_group_bug_information >
                        other_testcase.issue_id):
                    updated_group_bug_information = other_testcase.issue_id

        # If this group id is used by only one testcase, then remove it.
        if updated_group_id_count == 1:
            data_handler.delete_group(updated_group_id, update_testcases=False)
            updated_group_id = 0
            updated_group_bug_information = 0
            updated_is_leader = True

        try:
            testcase = data_handler.get_testcase_by_id(testcase_id)
        except errors.InvalidTestcaseError:
            # Already deleted.
            continue

        is_changed = (
            (testcase.group_id != updated_group_id) or
            (testcase.group_bug_information != updated_group_bug_information)
            or (testcase.is_leader != updated_is_leader))

        if not testcase.get_metadata('ran_grouper'):
            testcase.set_metadata('ran_grouper',
                                  True,
                                  update_testcase=not is_changed)

        if not is_changed:
            continue

        testcase.group_bug_information = updated_group_bug_information
        testcase.group_id = updated_group_id
        testcase.is_leader = updated_is_leader
        testcase.put()
        logs.log('Updated testcase %d group to %d.' %
                 (testcase_id, updated_group_id))