Beispiel #1
0
def mark_unreproducible_testcase_and_issue_as_closed_after_deadline(
        policy, testcase, issue):
    """Closes an unreproducible testcase and its associated issue after a certain
  time period."""
    # If the testcase is already closed, no more work to do.
    if not testcase.open:
        return

    # Check testcase status, so as to skip unreproducible uploads.
    if testcase.status not in ['Processed', 'Duplicate']:
        return

    # Make sure that this testcase is an unreproducible bug. If not, bail out.
    if not testcase.one_time_crasher_flag:
        return

    # Make sure that this testcase has an associated bug. If not, bail out.
    if not testcase.bug_information:
        return

    # If this testcase was manually uploaded, don't change issue state as our
    # reproduction result might be incorrect.
    if testcase.uploader_email:
        return

    # Make sure that there is an associated bug and it is in open state.
    if not issue or not issue.is_open:
        return

    # Check if there are any reproducible open testcases are associated with
    # this bug. If yes, return.
    similar_testcase = data_types.Testcase.query(
        data_types.Testcase.bug_information == testcase.bug_information,
        ndb_utils.is_true(data_types.Testcase.open),
        ndb_utils.is_false(data_types.Testcase.one_time_crasher_flag)).get()
    if similar_testcase:
        return

    # Make sure that testcase is atleast older than
    # |UNREPRODUCIBLE_TESTCASE_WITH_BUG_DEADLINE|, otherwise it will be seen in
    # crash stats anyway.
    if (testcase.timestamp and not dates.time_has_expired(
            testcase.timestamp,
            days=data_types.UNREPRODUCIBLE_TESTCASE_WITH_BUG_DEADLINE)):
        return

    # Handle testcase that turned from reproducible to unreproducible. Account
    # for the recent progression task run time.
    last_tested_crash_time = testcase.get_metadata('last_tested_crash_time')
    if (last_tested_crash_time and not dates.time_has_expired(
            last_tested_crash_time,
            days=data_types.UNREPRODUCIBLE_TESTCASE_WITH_BUG_DEADLINE)):
        return

    # Make that there is no crash seen in the deadline period.
    if get_crash_occurrence_platforms(
            testcase, data_types.UNREPRODUCIBLE_TESTCASE_WITH_BUG_DEADLINE):
        return

    # As a last check, do the expensive call of actually checking all issue
    # comments to make sure we we didn't get called out on issue mistriage.
    if issue_tracker_utils.was_label_added(issue, policy.label('wrong')):
        return

    # Close associated issue and testcase.
    comment = ('ClusterFuzz testcase %d is flaky and no longer crashes, '
               'so closing issue.' % testcase.key.id())
    if utils.is_oss_fuzz():
        comment += OSS_FUZZ_INCORRECT_COMMENT
    else:
        comment = _append_generic_incorrect_comment(comment, policy, issue,
                                                    ' and re-open the issue.')

    issue.status = policy.status('wontfix')
    issue.save(new_comment=comment, notify=True)
    testcase.fixed = 'NA'
    testcase.open = False
    testcase.put()

    logs.log('Closed unreproducible testcase %d and associated issue.' %
             testcase.key.id())
Beispiel #2
0
def mark_issue_as_closed_if_testcase_is_fixed(policy, testcase, issue):
    """Mark an issue as fixed if all of its associated reproducible testcase are
  fixed."""
    verified_label = policy.label('verified')
    if not verified_label:
        return

    # If there is no associated issue, then bail out.
    if not issue or not testcase.bug_information:
        return

    # If the issue is closed in a status other than Fixed, like Duplicate, WontFix
    # or Archived, we shouldn't change it. Bail out.
    if not issue.is_open and issue.status != policy.status('fixed'):
        return

    # Check testcase status, so as to skip unreproducible uploads.
    if testcase.status not in ['Processed', 'Duplicate']:
        return

    # If the testcase is still open, no work needs to be done. Bail out.
    if testcase.open:
        return

    # FIXME: Find a better solution to skip over reproducible tests that are now
    # showing up a flaky (esp when we are unable to reproduce crash in original
    # crash revision).
    if testcase.fixed == 'NA':
        return

    # We can only verify fixed issues for reproducible testcases. If the testcase
    # is unreproducible, bail out. Exception is if we explicitly marked this as
    # fixed.
    if testcase.one_time_crasher_flag and testcase.fixed != 'Yes':
        return

    # Make sure that no other testcases associated with this issue are open.
    similar_testcase = data_types.Testcase.query(
        data_types.Testcase.bug_information == testcase.bug_information,
        ndb_utils.is_true(data_types.Testcase.open),
        ndb_utils.is_false(data_types.Testcase.one_time_crasher_flag)).get()
    if similar_testcase:
        return

    # As a last check, do the expensive call of actually checking all issue
    # comments to make sure we didn't do the verification already and we didn't
    # get called out on issue mistriage.
    if (issue_tracker_utils.was_label_added(issue, verified_label) or
            issue_tracker_utils.was_label_added(issue, policy.label('wrong'))):
        return

    issue.labels.add(verified_label)
    comment = 'ClusterFuzz testcase %d is verified as fixed' % testcase.key.id(
    )

    fixed_range_url = data_handler.get_fixed_range_url(testcase)
    if fixed_range_url:
        comment += ' in ' + fixed_range_url
    else:
        comment += '.'

    if utils.is_oss_fuzz():
        comment += OSS_FUZZ_INCORRECT_COMMENT
    else:
        comment = _append_generic_incorrect_comment(comment, policy, issue,
                                                    ' and re-open the issue.')

    skip_auto_close = data_handler.get_value_from_job_definition(
        testcase.job_type, 'SKIP_AUTO_CLOSE_ISSUE')
    if not skip_auto_close:
        issue.status = policy.status('verified')

    issue.save(new_comment=comment, notify=True)
    logs.log('Mark issue %d as verified for fixed testcase %d.' %
             (issue.id, testcase.key.id()))
Beispiel #3
0
def mark_issue_as_closed_if_testcase_is_fixed(testcase, issue):
    """Mark an issue as fixed if all of its associated reproducible testcase are
  fixed."""
    # If there is no associated issue, then bail out.
    if not issue or not testcase.bug_information:
        return

    # If the issue is closed in a status other than Fixed, like Duplicate, WontFix
    # or Archived, we shouldn't change it. Bail out.
    if not issue.open and issue.status != 'Fixed':
        return

    # Check testcase status, so as to skip unreproducible uploads.
    if testcase.status not in ['Processed', 'Duplicate']:
        return

    # If the testcase is still open, no work needs to be done. Bail out.
    if testcase.open:
        return

    # FIXME: Find a better solution to skip over reproducible tests that are now
    # showing up a flaky (esp when we are unable to reproduce crash in original
    # crash revision).
    if testcase.fixed == 'NA':
        return

    # We can only verify fixed issues for reproducible testcases. If the testcase
    # is unreproducible, bail out. Exception is if we explicitly marked this as
    # fixed.
    if testcase.one_time_crasher_flag and testcase.fixed != 'Yes':
        return

    # Make sure that no other testcases associated with this issue are open.
    similar_testcase = data_types.Testcase.query(
        data_types.Testcase.bug_information == testcase.bug_information,
        ndb_utils.is_true(data_types.Testcase.open),
        ndb_utils.is_false(data_types.Testcase.one_time_crasher_flag)).get()
    if similar_testcase:
        return

    # As a last check, do the expensive call of actually checking all issue
    # comments to make sure we didn't do the verification already and we didn't
    # get called out on issue mistriage.
    if (issue.has_comment_with_label(data_types.ISSUE_VERIFIED_LABEL) or
            issue.has_comment_with_label(data_types.ISSUE_MISTRIAGED_LABEL)):
        return

    issue.add_label(data_types.ISSUE_VERIFIED_LABEL)
    comment = ('ClusterFuzz testcase %d is verified as fixed, '
               'so closing issue as verified.' % testcase.key.id())
    if utils.is_oss_fuzz():
        comment += OSS_FUZZ_INCORRECT_COMMENT
    else:
        comment += INTERNAL_INCORRECT_COMMENT
        comment += ' and re-open the issue.'

    issue.comment = comment
    issue.status = 'Verified'
    issue.open = False
    issue.save(send_email=True)
    logs.log('Closed issue %d for fixed testcase %d.' %
             (issue.id, testcase.key.id()))
Beispiel #4
0
def cleanup_testcases_and_issues():
    """Clean up unneeded open testcases and their associated issues."""
    jobs = data_handler.get_all_job_type_names()
    testcase_keys = ndb_utils.get_all_from_query(data_types.Testcase.query(
        ndb_utils.is_false(data_types.Testcase.triaged)),
                                                 keys_only=True)
    top_crashes_by_project_and_platform_map = (
        get_top_crashes_for_all_projects_and_platforms())

    utils.python_gc()

    testcases_processed = 0
    empty_issue_tracker_policy = issue_tracker_policy.get_empty()
    for testcase_key in testcase_keys:
        testcase_id = testcase_key.id()
        try:
            testcase = data_handler.get_testcase_by_id(testcase_id)
        except errors.InvalidTestcaseError:
            # Already deleted.
            continue

        logs.log('Processing testcase %d.' % testcase_id)

        try:
            issue = issue_tracker_utils.get_issue_for_testcase(testcase)
            policy = issue_tracker_utils.get_issue_tracker_policy_for_testcase(
                testcase)
            if not policy:
                policy = empty_issue_tracker_policy

            # Issue updates.
            update_os_labels(policy, testcase, issue)
            update_fuzz_blocker_label(policy, testcase, issue,
                                      top_crashes_by_project_and_platform_map)
            update_component_labels(testcase, issue)
            update_issue_ccs_from_owners_file(policy, testcase, issue)
            update_issue_owner_and_ccs_from_predator_results(
                policy, testcase, issue)
            update_issue_labels_for_flaky_testcase(policy, testcase, issue)

            # Testcase marking rules.
            mark_duplicate_testcase_as_closed_with_no_issue(testcase)
            mark_issue_as_closed_if_testcase_is_fixed(policy, testcase, issue)
            mark_testcase_as_closed_if_issue_is_closed(policy, testcase, issue)
            mark_testcase_as_closed_if_job_is_invalid(testcase, jobs)
            mark_unreproducible_testcase_as_fixed_if_issue_is_closed(
                testcase, issue)
            mark_unreproducible_testcase_and_issue_as_closed_after_deadline(
                policy, testcase, issue)

            # Notification, to be done at end after testcase state is updated from
            # previous rules.
            notify_closed_issue_if_testcase_is_open(policy, testcase, issue)
            notify_issue_if_testcase_is_invalid(policy, testcase, issue)
            notify_uploader_when_testcase_is_processed(policy, testcase, issue)

            # Mark testcase as triage complete if both testcase and associated issue
            # are closed. This also need to be done before the deletion rules.
            mark_testcase_as_triaged_if_needed(testcase, issue)

            # Testcase deletion rules.
            delete_unreproducible_testcase_with_no_issue(testcase)
        except Exception:
            logs.log_error('Failed to process testcase %d.' % testcase_id)

        testcases_processed += 1
        if testcases_processed % 100 == 0:
            utils.python_gc()
Beispiel #5
0
def is_crash_important(testcase):
  """Indicate if the crash is important to file."""
  if not testcase.one_time_crasher_flag:
    # A reproducible crash is an important crash.
    return True

  if testcase.status != 'Processed':
    # A duplicate or unreproducible crash is not an important crash.
    return False

  # Testcase is unreproducible. Only those crashes that are crashing frequently
  # are important.

  if testcase.crash_type in UNREPRODUCIBLE_CRASH_IGNORE_CRASH_TYPES:
    return False

  # Ensure that there is no reproducible testcase in our group.
  if testcase.group_id:
    other_reproducible_testcase = data_types.Testcase.query(
        data_types.Testcase.group_id == testcase.group_id,
        ndb_utils.is_false(data_types.Testcase.one_time_crasher_flag)).get()
    if other_reproducible_testcase:
      # There is another reproducible testcase in our group. So, this crash is
      # not important.
      return False

  # Get crash statistics data on this unreproducible crash for last X days.
  last_hour = crash_stats.get_last_successful_hour()
  if not last_hour:
    # No crash stats available, skip.
    return False

  _, rows = crash_stats.get(
      end=last_hour,
      block='day',
      days=data_types.FILE_CONSISTENT_UNREPRODUCIBLE_TESTCASE_DEADLINE,
      group_by='reproducible_flag',
      where_clause=(
          'crash_type = %s AND crash_state = %s AND security_flag = %s' %
          (json.dumps(testcase.crash_type), json.dumps(testcase.crash_state),
           json.dumps(testcase.security_flag))),
      group_having_clause='',
      sort_by='total_count',
      offset=0,
      limit=1)

  # Calculate total crash count and crash days count.
  crash_days_indices = set([])
  total_crash_count = 0
  for row in rows:
    if 'groups' not in row:
      continue

    total_crash_count += row['totalCount']
    for group in row['groups']:
      for index in group['indices']:
        crash_days_indices.add(index['hour'])

  crash_days_count = len(crash_days_indices)

  # Only those unreproducible testcases are important that happened atleast once
  # everyday for the last X days and total crash count exceeded our threshold
  # limit.
  return (crash_days_count ==
          data_types.FILE_CONSISTENT_UNREPRODUCIBLE_TESTCASE_DEADLINE and
          total_crash_count >=
          data_types.FILE_UNREPRODUCIBLE_TESTCASE_MIN_CRASH_THRESHOLD)