예제 #1
0
def execute_task(testcase_id, job_type):
    """Execute progression task."""
    try:
        find_fixed_range(testcase_id, job_type)
    except errors.BuildSetupError as error:
        # If we failed to setup a build, it is likely a bot error. We can retry
        # the task in this case.
        testcase = data_handler.get_testcase_by_id(testcase_id)
        error_message = 'Build setup failed r%d' % error.revision
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.ERROR,
                                             error_message)
        build_fail_wait = environment.get_value('FAIL_WAIT')
        tasks.add_task('progression',
                       testcase_id,
                       job_type,
                       wait_time=build_fail_wait)
    except errors.BadBuildError:
        # Though bad builds when narrowing the range are recoverable, certain builds
        # being marked as bad may be unrecoverable. Recoverable ones should not
        # reach this point.
        testcase = data_handler.get_testcase_by_id(testcase_id)
        error_message = 'Unable to recover from bad build'
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.ERROR,
                                             error_message)

    # If there is a fine grained bisection service available, request it. Both
    # regression and fixed ranges are requested once. Regression is also requested
    # here as the bisection service may require details that are not yet available
    # (e.g. issue ID) at the time regress_task completes.
    testcase = data_handler.get_testcase_by_id(testcase_id)
    bisection.request_bisection(testcase)
 def test_update_comment_empty(self):
   """Basic test on a testcase with empty comments."""
   data_handler.update_testcase_comment(
       self.testcase, data_types.TaskState.STARTED, 'message')
   self.assertEqual(
       '[2019-01-01 00:00:00] bot: Progression task started: message.\n',
       self.testcase.comments)
예제 #3
0
def validate_regression_range(testcase, testcase_file_path, job_type,
                              revision_list, min_index):
    """Ensure that we found the correct min revision by testing earlier ones."""
    earlier_revisions = revision_list[
        min_index - EARLIER_REVISIONS_TO_CONSIDER_FOR_VALIDATION:min_index]
    revision_count = min(len(earlier_revisions),
                         REVISIONS_TO_TEST_FOR_VALIDATION)

    revisions_to_test = random.sample(earlier_revisions, revision_count)
    for revision in revisions_to_test:
        try:
            if _testcase_reproduces_in_revision(testcase, testcase_file_path,
                                                job_type, revision):
                testcase = data_handler.get_testcase_by_id(testcase.key.id())
                testcase.regression = 'NA'
                error_message = (
                    'Low confidence in regression range. Test case crashes in '
                    'revision r%d but not later revision r%d' %
                    (revision, revision_list[min_index]))
                data_handler.update_testcase_comment(
                    testcase, data_types.TaskState.ERROR, error_message)
                return False
        except errors.BadBuildError:
            pass

    return True
예제 #4
0
def _testcase_reproduces_in_revision(testcase,
                                     testcase_file_path,
                                     job_type,
                                     revision,
                                     update_metadata=False):
  """Test to see if a test case reproduces in the specified revision."""
  build_manager.setup_build(revision)
  if not build_manager.check_app_path():
    raise errors.BuildSetupError(revision, job_type)

  if testcase_manager.check_for_bad_build(job_type, revision):
    log_message = 'Bad build at r%d. Skipping' % revision
    testcase = data_handler.get_testcase_by_id(testcase.key.id())
    data_handler.update_testcase_comment(testcase, data_types.TaskState.WIP,
                                         log_message)
    raise errors.BadBuildError(revision, job_type)

  test_timeout = environment.get_value('TEST_TIMEOUT', 10)
  result = testcase_manager.test_for_crash_with_retries(
      testcase, testcase_file_path, test_timeout, http_flag=testcase.http_flag)
  _log_output(revision, result)

  if update_metadata:
    _update_issue_metadata(testcase)

  return result
예제 #5
0
def execute_task(testcase_id, job_type):
    """Run regression task and handle potential errors."""
    try:
        find_regression_range(testcase_id, job_type)
    except errors.BuildSetupError as error:
        # If we failed to setup a build, it is likely a bot error. We can retry
        # the task in this case.
        testcase = data_handler.get_testcase_by_id(testcase_id)
        error_message = 'Build setup failed r%d' % error.revision
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.ERROR,
                                             error_message)
        build_fail_wait = environment.get_value('FAIL_WAIT')
        tasks.add_task('regression',
                       testcase_id,
                       job_type,
                       wait_time=build_fail_wait)
    except errors.BadBuildError:
        # Though bad builds when narrowing the range are recoverable, certain builds
        # being marked as bad may be unrecoverable. Recoverable ones should not
        # reach this point.
        testcase = data_handler.get_testcase_by_id(testcase_id)
        testcase.regression = 'NA'
        error_message = 'Unable to recover from bad build'
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.ERROR,
                                             error_message)
  def test_update_comment_truncate(self):
    """Test truncating long comments."""
    self.testcase.comments = '\n' * data_types.TESTCASE_COMMENTS_LENGTH_LIMIT
    data_handler.update_testcase_comment(
        self.testcase, data_types.TaskState.STARTED, 'message')

    self.assertEqual(data_types.TESTCASE_COMMENTS_LENGTH_LIMIT,
                     len(self.testcase.comments))
    expected_new = (
        '[2019-01-01 00:00:00] bot: Progression task started: message.\n')
    expected = (
        '\n' * (data_types.TESTCASE_COMMENTS_LENGTH_LIMIT - len(expected_new)) +
        expected_new)
    self.assertEqual(expected, self.testcase.comments)
 def test_update_comment_clear(self):
   """Basic test on a testcase with existing comments, and clearing old
   progression messages."""
   self.testcase.comments = (
       '[2018-01-01 00:00:00] bot: Foo.\n'
       '[2018-01-01 00:00:00] bot: Progression task started: message.\n'
       '[2018-01-01 00:00:00] bot: Bar.\n'
       '[2018-01-01 00:00:00] bot: Progression task finished.\n'
       '[2018-01-01 00:00:00] bot: Blah.\n')
   data_handler.update_testcase_comment(
       self.testcase, data_types.TaskState.STARTED, 'message')
   self.assertEqual(
       ('[2018-01-01 00:00:00] bot: Foo.\n'
        '[2018-01-01 00:00:00] bot: Bar.\n'
        '[2018-01-01 00:00:00] bot: Blah.\n'
        '[2019-01-01 00:00:00] bot: Progression task started: message.\n'),
       self.testcase.comments)
예제 #8
0
def mark_unreproducible_if_flaky(testcase, potentially_flaky):
    """Check to see if a test case appears to be flaky."""
    task_name = environment.get_value('TASK_NAME')

    # If this run does not suggest that we are flaky, clear the flag and assume
    # that we are reproducible.
    if not potentially_flaky:
        testcase.set_metadata('potentially_flaky', False)
        return

    # If we have not been marked as potentially flaky in the past, don't mark
    # mark the test case as unreproducible yet. It is now potentially flaky.
    if not testcase.get_metadata('potentially_flaky'):
        testcase.set_metadata('potentially_flaky', True)

        # In this case, the current task will usually be in a state where it cannot
        # be completed. Recreate it.
        tasks.add_task(task_name, testcase.key.id(), testcase.job_type)
        return

    # At this point, this test case has been flagged as potentially flaky twice.
    # It should be marked as unreproducible. Mark it as unreproducible, and set
    # fields that cannot be populated accordingly.
    if task_name == 'minimize' and not testcase.minimized_keys:
        testcase.minimized_keys = 'NA'
    if task_name in ['minimize', 'impact']:
        testcase.set_impacts_as_na()
    if task_name in ['minimize', 'regression']:
        testcase.regression = 'NA'
    if task_name in ['minimize', 'progression']:
        testcase.fixed = 'NA'

    testcase.one_time_crasher_flag = True
    data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,
                                         'Testcase appears to be flaky')

    # Issue update to flip reproducibility label is done in App Engine cleanup
    # cron. This avoids calling the issue tracker apis from GCE.

    # For unreproducible testcases, it is still beneficial to get component
    # information from blame task.
    create_blame_task_if_needed(testcase)

    # Let bisection service know about flakiness.
    bisection.request_bisection(testcase)
예제 #9
0
def save_regression_range(testcase_id, regression_range_start,
                          regression_range_end):
    """Saves the regression range and creates blame and impact task if needed."""
    testcase = data_handler.get_testcase_by_id(testcase_id)
    testcase.regression = '%d:%d' % (regression_range_start,
                                     regression_range_end)
    data_handler.update_testcase_comment(
        testcase, data_types.TaskState.FINISHED,
        'regressed in range %s' % testcase.regression)

    write_to_big_query(testcase, regression_range_start, regression_range_end)

    # Force impacts update after regression range is updated. In several cases,
    # we might not have a production build to test with, so regression range is
    # used to decide impacts.
    task_creation.create_impact_task_if_needed(testcase)

    # Get blame information using the regression range result.
    task_creation.create_blame_task_if_needed(testcase)
예제 #10
0
def _testcase_reproduces_in_revision(testcase,
                                     testcase_file_path,
                                     job_type,
                                     revision,
                                     should_log=True,
                                     min_revision=None,
                                     max_revision=None):
    """Test to see if a test case reproduces in the specified revision."""
    if should_log:
        log_message = 'Testing r%d' % revision
        if min_revision is not None and max_revision is not None:
            log_message += ' (current range %d:%d)' % (min_revision,
                                                       max_revision)

        testcase = data_handler.get_testcase_by_id(testcase.key.id())
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.WIP,
                                             log_message)

    build_manager.setup_build(revision)
    if not build_manager.check_app_path():
        raise errors.BuildSetupError(revision, job_type)

    if testcase_manager.check_for_bad_build(job_type, revision):
        log_message = 'Bad build at r%d. Skipping' % revision
        testcase = data_handler.get_testcase_by_id(testcase.key.id())
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.WIP,
                                             log_message)
        raise errors.BadBuildError(revision, job_type)

    test_timeout = environment.get_value('TEST_TIMEOUT', 10)
    result = testcase_manager.test_for_crash_with_retries(
        testcase,
        testcase_file_path,
        test_timeout,
        http_flag=testcase.http_flag)
    return result.is_crash()
예제 #11
0
def execute_task(testcase_id, _):
    """Attempt to find the CL introducing the bug associated with testcase_id."""
    # Locate the testcase associated with the id.
    testcase = data_handler.get_testcase_by_id(testcase_id)
    if not testcase:
        return

    # Make sure that predator topic is configured. If not, nothing to do here.
    topic = db_config.get_value('predator_crash_topic')
    if not topic:
        logs.log('Predator is not configured, skipping blame task.')
        return

    data_handler.update_testcase_comment(testcase,
                                         data_types.TaskState.STARTED)

    # Prepare pubsub message to send to predator.
    message = _prepare_predator_message(testcase)
    if not message:
        testcase = data_handler.get_testcase_by_id(testcase_id)
        data_handler.update_testcase_comment(
            testcase, data_types.TaskState.ERROR,
            'Failed to generate request for Predator')
        return

    # Clear existing results and mark blame result as pending.
    testcase = data_handler.get_testcase_by_id(testcase_id)
    _clear_blame_result_and_set_pending_flag(testcase)

    # Post request to pub sub.
    client = pubsub.PubSubClient()
    message_ids = client.publish(topic, [message])
    logs.log(
        'Successfully published testcase %s to Predator. Message IDs: %s.' %
        (testcase_id, message_ids))
    data_handler.update_testcase_comment(testcase,
                                         data_types.TaskState.FINISHED)
예제 #12
0
def execute_task(testcase_id, job_type):
    """Execute a symbolize command."""
    # Locate the testcase associated with the id.
    testcase = data_handler.get_testcase_by_id(testcase_id)

    # We should atleast have a symbolized debug or release build.
    if not build_manager.has_symbolized_builds():
        return

    data_handler.update_testcase_comment(testcase,
                                         data_types.TaskState.STARTED)

    # Setup testcase and its dependencies.
    file_list, _, testcase_file_path = setup.setup_testcase(testcase, job_type)
    if not file_list:
        return

    # Initialize variables.
    build_fail_wait = environment.get_value('FAIL_WAIT')

    old_crash_stacktrace = data_handler.get_stacktrace(testcase)
    sym_crash_type = testcase.crash_type
    sym_crash_address = testcase.crash_address
    sym_crash_state = testcase.crash_state
    sym_redzone = DEFAULT_REDZONE
    warmup_timeout = environment.get_value('WARMUP_TIMEOUT')

    # Decide which build revision to use.
    if testcase.crash_stacktrace == 'Pending':
        # This usually happen when someone clicked the 'Update stacktrace from
        # trunk' button on the testcase details page. In this case, we are forced
        # to use trunk. No revision -> trunk build.
        build_revision = None
    else:
        build_revision = testcase.crash_revision

    # Set up a custom or regular build based on revision.
    build_manager.setup_build(build_revision)

    # Get crash revision used in setting up build.
    crash_revision = environment.get_value('APP_REVISION')

    if not build_manager.check_app_path():
        testcase = data_handler.get_testcase_by_id(testcase_id)
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.ERROR,
                                             'Build setup failed')
        tasks.add_task('symbolize',
                       testcase_id,
                       job_type,
                       wait_time=build_fail_wait)
        return

    # ASAN tool settings (if the tool is used).
    # See if we can get better stacks with higher redzone sizes.
    # A UAF might actually turn out to be OOB read/write with a bigger redzone.
    if environment.tool_matches('ASAN', job_type) and testcase.security_flag:
        redzone = MAX_REDZONE
        while redzone >= MIN_REDZONE:
            environment.reset_current_memory_tool_options(
                redzone_size=testcase.redzone,
                disable_ubsan=testcase.disable_ubsan)

            process_handler.terminate_stale_application_instances()
            command = testcase_manager.get_command_line_for_application(
                testcase_file_path, needs_http=testcase.http_flag)
            return_code, crash_time, output = (process_handler.run_process(
                command, timeout=warmup_timeout, gestures=testcase.gestures))
            crash_result = CrashResult(return_code, crash_time, output)

            if crash_result.is_crash() and 'AddressSanitizer' in output:
                state = crash_result.get_symbolized_data()
                security_flag = crash_result.is_security_issue()

                if (not crash_analyzer.ignore_stacktrace(
                        state.crash_stacktrace)
                        and security_flag == testcase.security_flag
                        and state.crash_type == testcase.crash_type
                        and (state.crash_type != sym_crash_type
                             or state.crash_state != sym_crash_state)):
                    logs.log(
                        'Changing crash parameters.\nOld : %s, %s, %s' %
                        (sym_crash_type, sym_crash_address, sym_crash_state))

                    sym_crash_type = state.crash_type
                    sym_crash_address = state.crash_address
                    sym_crash_state = state.crash_state
                    sym_redzone = redzone
                    old_crash_stacktrace = state.crash_stacktrace

                    logs.log(
                        '\nNew : %s, %s, %s' %
                        (sym_crash_type, sym_crash_address, sym_crash_state))
                    break

            redzone /= 2

    # We should have atleast a symbolized debug or a release build.
    symbolized_builds = build_manager.setup_symbolized_builds(crash_revision)
    if (not symbolized_builds
            or (not build_manager.check_app_path()
                and not build_manager.check_app_path('APP_PATH_DEBUG'))):
        testcase = data_handler.get_testcase_by_id(testcase_id)
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.ERROR,
                                             'Build setup failed')
        tasks.add_task('symbolize',
                       testcase_id,
                       job_type,
                       wait_time=build_fail_wait)
        return

    # Increase malloc_context_size to get all stack frames. Default is 30.
    environment.reset_current_memory_tool_options(
        redzone_size=sym_redzone,
        malloc_context_size=STACK_FRAME_COUNT,
        symbolize_inline_frames=True,
        disable_ubsan=testcase.disable_ubsan)

    # TSAN tool settings (if the tool is used).
    if environment.tool_matches('TSAN', job_type):
        environment.set_tsan_max_history_size()

    # Do the symbolization if supported by this application.
    result, sym_crash_stacktrace = (get_symbolized_stacktraces(
        testcase_file_path, testcase, old_crash_stacktrace, sym_crash_state))

    # Update crash parameters.
    testcase = data_handler.get_testcase_by_id(testcase_id)
    testcase.crash_type = sym_crash_type
    testcase.crash_address = sym_crash_address
    testcase.crash_state = sym_crash_state
    testcase.crash_stacktrace = (
        data_handler.filter_stacktrace(sym_crash_stacktrace))

    if not result:
        data_handler.update_testcase_comment(
            testcase, data_types.TaskState.ERROR,
            'Unable to reproduce crash, skipping '
            'stacktrace update')
    else:
        # Switch build url to use the less-optimized symbolized build with better
        # stacktrace.
        build_url = environment.get_value('BUILD_URL')
        if build_url:
            testcase.set_metadata('build_url',
                                  build_url,
                                  update_testcase=False)

        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.FINISHED)

    testcase.symbolized = True
    testcase.crash_revision = crash_revision
    testcase.put()

    # We might have updated the crash state. See if we need to marked as duplicate
    # based on other testcases.
    data_handler.handle_duplicate_entry(testcase)

    task_creation.create_blame_task_if_needed(testcase)

    # Switch current directory before builds cleanup.
    root_directory = environment.get_value('ROOT_DIR')
    os.chdir(root_directory)

    # Cleanup symbolized builds which are space-heavy.
    symbolized_builds.delete()
예제 #13
0
def _check_fixed_for_custom_binary(testcase, job_type, testcase_file_path):
  """Simplified fixed check for test cases using custom binaries."""
  revision = environment.get_value('APP_REVISION')

  # Update comments to reflect bot information and clean up old comments.
  testcase_id = testcase.key.id()
  testcase = data_handler.get_testcase_by_id(testcase_id)
  data_handler.update_testcase_comment(testcase, data_types.TaskState.STARTED)

  build_manager.setup_build()
  if not build_manager.check_app_path():
    testcase = data_handler.get_testcase_by_id(testcase_id)
    data_handler.update_testcase_comment(
        testcase, data_types.TaskState.ERROR,
        'Build setup failed for custom binary')
    build_fail_wait = environment.get_value('FAIL_WAIT')
    tasks.add_task(
        'progression', testcase_id, job_type, wait_time=build_fail_wait)
    return

  test_timeout = environment.get_value('TEST_TIMEOUT', 10)
  result = testcase_manager.test_for_crash_with_retries(
      testcase, testcase_file_path, test_timeout, http_flag=testcase.http_flag)
  _log_output(revision, result)

  # Re-fetch to finalize testcase updates in branches below.
  testcase = data_handler.get_testcase_by_id(testcase.key.id())

  # If this still crashes on the most recent build, it's not fixed. The task
  # will be rescheduled by a cron job and re-attempted eventually.
  if result.is_crash():
    app_path = environment.get_value('APP_PATH')
    command = testcase_manager.get_command_line_for_application(
        testcase_file_path, app_path=app_path, needs_http=testcase.http_flag)
    symbolized_crash_stacktrace = result.get_stacktrace(symbolized=True)
    unsymbolized_crash_stacktrace = result.get_stacktrace(symbolized=False)
    stacktrace = utils.get_crash_stacktrace_output(
        command, symbolized_crash_stacktrace, unsymbolized_crash_stacktrace)
    testcase.last_tested_crash_stacktrace = data_handler.filter_stacktrace(
        stacktrace)
    data_handler.update_progression_completion_metadata(
        testcase,
        revision,
        is_crash=True,
        message='still crashes on latest custom build')
    return

  if result.unexpected_crash:
    testcase.set_metadata(
        'crashes_on_unexpected_state', True, update_testcase=False)
  else:
    testcase.delete_metadata(
        'crashes_on_unexpected_state', update_testcase=False)

  # Retry once on another bot to confirm our results and in case this bot is in
  # a bad state which we didn't catch through our usual means.
  if data_handler.is_first_retry_for_task(testcase, reset_after_retry=True):
    tasks.add_task('progression', testcase_id, job_type)
    data_handler.update_progression_completion_metadata(testcase, revision)
    return

  # The bug is fixed.
  testcase.fixed = 'Yes'
  testcase.open = False
  data_handler.update_progression_completion_metadata(
      testcase, revision, message='fixed on latest custom build')
예제 #14
0
def find_fixed_range(testcase_id, job_type):
  """Attempt to find the revision range where a testcase was fixed."""
  deadline = tasks.get_task_completion_deadline()
  testcase = data_handler.get_testcase_by_id(testcase_id)
  if not testcase:
    return

  if testcase.fixed:
    logs.log_error('Fixed range is already set as %s, skip.' % testcase.fixed)
    return

  # Setup testcase and its dependencies.
  file_list, _, testcase_file_path = setup.setup_testcase(testcase, job_type)
  if not file_list:
    return

  # Set a flag to indicate we are running progression task. This shows pending
  # status on testcase report page and avoid conflicting testcase updates by
  # triage cron.
  testcase.set_metadata('progression_pending', True)

  # Custom binaries are handled as special cases.
  if build_manager.is_custom_binary():
    _check_fixed_for_custom_binary(testcase, job_type, testcase_file_path)
    return

  build_bucket_path = build_manager.get_primary_bucket_path()
  revision_list = build_manager.get_revisions_list(
      build_bucket_path, testcase=testcase)
  if not revision_list:
    data_handler.close_testcase_with_error(testcase_id,
                                           'Failed to fetch revision list')
    return

  # Use min, max_index to mark the start and end of revision list that is used
  # for bisecting the progression range. Set start to the revision where noticed
  # the crash. Set end to the trunk revision. Also, use min, max from past run
  # if it timed out.
  min_revision = testcase.get_metadata('last_progression_min')
  max_revision = testcase.get_metadata('last_progression_max')

  if min_revision or max_revision:
    # Clear these to avoid using them in next run. If this run fails, then we
    # should try next run without them to see it succeeds. If this run succeeds,
    # we should still clear them to avoid capping max revision in next run.
    testcase = data_handler.get_testcase_by_id(testcase_id)
    testcase.delete_metadata('last_progression_min', update_testcase=False)
    testcase.delete_metadata('last_progression_max', update_testcase=False)
    testcase.put()

  last_tested_revision = testcase.get_metadata('last_tested_crash_revision')
  known_crash_revision = last_tested_revision or testcase.crash_revision
  if not min_revision:
    min_revision = known_crash_revision
  if not max_revision:
    max_revision = revisions.get_last_revision_in_list(revision_list)

  min_index = revisions.find_min_revision_index(revision_list, min_revision)
  if min_index is None:
    raise errors.BuildNotFoundError(min_revision, job_type)
  max_index = revisions.find_max_revision_index(revision_list, max_revision)
  if max_index is None:
    raise errors.BuildNotFoundError(max_revision, job_type)

  testcase = data_handler.get_testcase_by_id(testcase_id)
  data_handler.update_testcase_comment(testcase, data_types.TaskState.STARTED,
                                       'r%d' % max_revision)

  # Check to see if this testcase is still crashing now. If it is, then just
  # bail out.
  result = _testcase_reproduces_in_revision(
      testcase,
      testcase_file_path,
      job_type,
      max_revision,
      update_metadata=True)
  if result.is_crash():
    logs.log('Found crash with same signature on latest revision r%d.' %
             max_revision)
    app_path = environment.get_value('APP_PATH')
    command = testcase_manager.get_command_line_for_application(
        testcase_file_path, app_path=app_path, needs_http=testcase.http_flag)
    symbolized_crash_stacktrace = result.get_stacktrace(symbolized=True)
    unsymbolized_crash_stacktrace = result.get_stacktrace(symbolized=False)
    stacktrace = utils.get_crash_stacktrace_output(
        command, symbolized_crash_stacktrace, unsymbolized_crash_stacktrace)
    testcase = data_handler.get_testcase_by_id(testcase_id)
    testcase.last_tested_crash_stacktrace = data_handler.filter_stacktrace(
        stacktrace)
    data_handler.update_progression_completion_metadata(
        testcase,
        max_revision,
        is_crash=True,
        message='still crashes on latest revision r%s' % max_revision)

    # Since we've verified that the test case is still crashing, clear out any
    # metadata indicating potential flake from previous runs.
    task_creation.mark_unreproducible_if_flaky(testcase, False)

    # For chromium project, save latest crash information for later upload
    # to chromecrash/.
    state = result.get_symbolized_data()
    crash_uploader.save_crash_info_if_needed(testcase_id, max_revision,
                                             job_type, state.crash_type,
                                             state.crash_address, state.frames)
    return

  if result.unexpected_crash:
    testcase.set_metadata('crashes_on_unexpected_state', True)
  else:
    testcase.delete_metadata('crashes_on_unexpected_state')

  # Don't burden NFS server with caching these random builds.
  environment.set_value('CACHE_STORE', False)

  # Verify that we do crash in the min revision. This is assumed to be true
  # while we are doing the bisect.
  result = _testcase_reproduces_in_revision(testcase, testcase_file_path,
                                            job_type, min_revision)
  if result and not result.is_crash():
    testcase = data_handler.get_testcase_by_id(testcase_id)

    # Retry once on another bot to confirm our result.
    if data_handler.is_first_retry_for_task(testcase, reset_after_retry=True):
      tasks.add_task('progression', testcase_id, job_type)
      error_message = (
          'Known crash revision %d did not crash, will retry on another bot to '
          'confirm result' % known_crash_revision)
      data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,
                                           error_message)
      data_handler.update_progression_completion_metadata(
          testcase, max_revision)
      return

    data_handler.clear_progression_pending(testcase)
    error_message = (
        'Known crash revision %d did not crash' % known_crash_revision)
    data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,
                                         error_message)
    task_creation.mark_unreproducible_if_flaky(testcase, True)
    return

  # Start a binary search to find last non-crashing revision. At this point, we
  # know that we do crash in the min_revision, and do not crash in max_revision.
  while time.time() < deadline:
    min_revision = revision_list[min_index]
    max_revision = revision_list[max_index]

    # If the min and max revisions are one apart this is as much as we can
    # narrow the range.
    if max_index - min_index == 1:
      _save_fixed_range(testcase_id, min_revision, max_revision,
                        testcase_file_path)
      return

    # Occasionally, we get into this bad state. It seems to be related to test
    # cases with flaky stacks, but the exact cause is unknown.
    if max_index - min_index < 1:
      testcase = data_handler.get_testcase_by_id(testcase_id)
      testcase.fixed = 'NA'
      testcase.open = False
      message = ('Fixed testing errored out (min and max revisions '
                 'are both %d)' % min_revision)
      data_handler.update_progression_completion_metadata(
          testcase, max_revision, message=message)

      # Let the bisection service know about the NA status.
      bisection.request_bisection(testcase)
      return

    # Test the middle revision of our range.
    middle_index = (min_index + max_index) // 2
    middle_revision = revision_list[middle_index]

    testcase = data_handler.get_testcase_by_id(testcase_id)
    log_message = 'Testing r%d (current range %d:%d)' % (
        middle_revision, min_revision, max_revision)
    data_handler.update_testcase_comment(testcase, data_types.TaskState.WIP,
                                         log_message)

    try:
      result = _testcase_reproduces_in_revision(testcase, testcase_file_path,
                                                job_type, middle_revision)
    except errors.BadBuildError:
      # Skip this revision.
      del revision_list[middle_index]
      max_index -= 1
      continue

    if result.is_crash():
      min_index = middle_index
    else:
      max_index = middle_index

    _save_current_fixed_range_indices(testcase_id, revision_list[min_index],
                                      revision_list[max_index])

  # If we've broken out of the loop, we've exceeded the deadline. Recreate the
  # task to pick up where we left off.
  testcase = data_handler.get_testcase_by_id(testcase_id)
  error_message = ('Timed out, current range r%d:r%d' %
                   (revision_list[min_index], revision_list[max_index]))
  data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,
                                       error_message)
  tasks.add_task('progression', testcase_id, job_type)
예제 #15
0
def execute_task(testcase_id, job_type):
    """Run analyze task."""
    # Reset redzones.
    environment.reset_current_memory_tool_options(redzone_size=128)

    # Unset window location size and position properties so as to use default.
    environment.set_value('WINDOW_ARG', '')

    # Locate the testcase associated with the id.
    testcase = data_handler.get_testcase_by_id(testcase_id)
    if not testcase:
        return

    data_handler.update_testcase_comment(testcase,
                                         data_types.TaskState.STARTED)

    metadata = data_types.TestcaseUploadMetadata.query(
        data_types.TestcaseUploadMetadata.testcase_id == int(
            testcase_id)).get()
    if not metadata:
        logs.log_error('Testcase %s has no associated upload metadata.' %
                       testcase_id)
        testcase.key.delete()
        return

    is_lsan_enabled = environment.get_value('LSAN')
    if is_lsan_enabled:
        # Creates empty local blacklist so all leaks will be visible to uploader.
        leak_blacklist.create_empty_local_blacklist()

    # Store the bot name and timestamp in upload metadata.
    bot_name = environment.get_value('BOT_NAME')
    metadata.bot_name = bot_name
    metadata.timestamp = datetime.datetime.utcnow()
    metadata.put()

    # Adjust the test timeout, if user has provided one.
    if metadata.timeout:
        environment.set_value('TEST_TIMEOUT', metadata.timeout)

    # Adjust the number of retries, if user has provided one.
    if metadata.retries is not None:
        environment.set_value('CRASH_RETRIES', metadata.retries)

    # Set up testcase and get absolute testcase path.
    file_list, _, testcase_file_path = setup.setup_testcase(testcase, job_type)
    if not file_list:
        return

    # Set up build.
    setup_build(testcase)

    # Check if we have an application path. If not, our build failed
    # to setup correctly.
    if not build_manager.check_app_path():
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.ERROR,
                                             'Build setup failed')

        if data_handler.is_first_retry_for_task(testcase):
            build_fail_wait = environment.get_value('FAIL_WAIT')
            tasks.add_task('analyze',
                           testcase_id,
                           job_type,
                           wait_time=build_fail_wait)
        else:
            data_handler.close_invalid_uploaded_testcase(
                testcase, metadata, 'Build setup failed')
        return

    # Update initial testcase information.
    testcase.absolute_path = testcase_file_path
    testcase.job_type = job_type
    testcase.binary_flag = utils.is_binary_file(testcase_file_path)
    testcase.queue = tasks.default_queue()
    testcase.crash_state = ''

    # Set initial testcase metadata fields (e.g. build url, etc).
    data_handler.set_initial_testcase_metadata(testcase)

    # Update minimized arguments and use ones provided during user upload.
    if not testcase.minimized_arguments:
        minimized_arguments = environment.get_value('APP_ARGS') or ''
        additional_command_line_flags = testcase.get_metadata(
            'uploaded_additional_args')
        if additional_command_line_flags:
            minimized_arguments += ' %s' % additional_command_line_flags
        environment.set_value('APP_ARGS', minimized_arguments)
        testcase.minimized_arguments = minimized_arguments

    # Update other fields not set at upload time.
    testcase.crash_revision = environment.get_value('APP_REVISION')
    data_handler.set_initial_testcase_metadata(testcase)
    testcase.put()

    # Initialize some variables.
    gestures = testcase.gestures
    http_flag = testcase.http_flag
    test_timeout = environment.get_value('TEST_TIMEOUT')

    # Get the crash output.
    result = testcase_manager.test_for_crash_with_retries(testcase,
                                                          testcase_file_path,
                                                          test_timeout,
                                                          http_flag=http_flag,
                                                          compare_crash=False)

    # If we don't get a crash, try enabling http to see if we can get a crash.
    # Skip engine fuzzer jobs (e.g. libFuzzer, AFL) for which http testcase paths
    # are not applicable.
    if (not result.is_crash() and not http_flag
            and not environment.is_engine_fuzzer_job()):
        result_with_http = testcase_manager.test_for_crash_with_retries(
            testcase,
            testcase_file_path,
            test_timeout,
            http_flag=True,
            compare_crash=False)
        if result_with_http.is_crash():
            logs.log('Testcase needs http flag for crash.')
            http_flag = True
            result = result_with_http

    # Refresh our object.
    testcase = data_handler.get_testcase_by_id(testcase_id)
    if not testcase:
        return

    # Set application command line with the correct http flag.
    application_command_line = (
        testcase_manager.get_command_line_for_application(
            testcase_file_path, needs_http=http_flag))

    # Get the crash data.
    crashed = result.is_crash()
    crash_time = result.get_crash_time()
    state = result.get_symbolized_data()
    unsymbolized_crash_stacktrace = result.get_stacktrace(symbolized=False)

    # Get crash info object with minidump info. Also, re-generate unsymbolized
    # stacktrace if needed.
    crash_info, _ = (crash_uploader.get_crash_info_and_stacktrace(
        application_command_line, state.crash_stacktrace, gestures))
    if crash_info:
        testcase.minidump_keys = crash_info.store_minidump()

    if not crashed:
        # Could not reproduce the crash.
        log_message = ('Testcase didn\'t crash in %d seconds (with retries)' %
                       test_timeout)
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.FINISHED,
                                             log_message)

        # In the general case, we will not attempt to symbolize if we do not detect
        # a crash. For user uploads, we should symbolize anyway to provide more
        # information about what might be happening.
        crash_stacktrace_output = utils.get_crash_stacktrace_output(
            application_command_line, state.crash_stacktrace,
            unsymbolized_crash_stacktrace)
        testcase.crash_stacktrace = data_handler.filter_stacktrace(
            crash_stacktrace_output)

        # For an unreproducible testcase, retry once on another bot to confirm
        # our results and in case this bot is in a bad state which we didn't catch
        # through our usual means.
        if data_handler.is_first_retry_for_task(testcase):
            testcase.status = 'Unreproducible, retrying'
            testcase.put()

            tasks.add_task('analyze', testcase_id, job_type)
            return

        data_handler.close_invalid_uploaded_testcase(testcase, metadata,
                                                     'Unreproducible')

        # A non-reproducing testcase might still impact production branches.
        # Add the impact task to get that information.
        task_creation.create_impact_task_if_needed(testcase)
        return

    # Update testcase crash parameters.
    testcase.http_flag = http_flag
    testcase.crash_type = state.crash_type
    testcase.crash_address = state.crash_address
    testcase.crash_state = state.crash_state
    crash_stacktrace_output = utils.get_crash_stacktrace_output(
        application_command_line, state.crash_stacktrace,
        unsymbolized_crash_stacktrace)
    testcase.crash_stacktrace = data_handler.filter_stacktrace(
        crash_stacktrace_output)

    # Try to guess if the bug is security or not.
    security_flag = crash_analyzer.is_security_issue(state.crash_stacktrace,
                                                     state.crash_type,
                                                     state.crash_address)
    testcase.security_flag = security_flag

    # If it is, guess the severity.
    if security_flag:
        testcase.security_severity = severity_analyzer.get_security_severity(
            state.crash_type, state.crash_stacktrace, job_type, bool(gestures))

    log_message = ('Testcase crashed in %d seconds (r%d)' %
                   (crash_time, testcase.crash_revision))
    data_handler.update_testcase_comment(testcase,
                                         data_types.TaskState.FINISHED,
                                         log_message)

    # See if we have to ignore this crash.
    if crash_analyzer.ignore_stacktrace(state.crash_stacktrace):
        data_handler.close_invalid_uploaded_testcase(testcase, metadata,
                                                     'Irrelavant')
        return

    # Test for reproducibility.
    one_time_crasher_flag = not testcase_manager.test_for_reproducibility(
        testcase.fuzzer_name, testcase.actual_fuzzer_name(),
        testcase_file_path, state.crash_state, security_flag, test_timeout,
        http_flag, gestures)
    testcase.one_time_crasher_flag = one_time_crasher_flag

    # Check to see if this is a duplicate.
    data_handler.check_uploaded_testcase_duplicate(testcase, metadata)

    # Set testcase and metadata status if not set already.
    if testcase.status == 'Duplicate':
        # For testcase uploaded by bots (with quiet flag), don't create additional
        # tasks.
        if metadata.quiet_flag:
            data_handler.close_invalid_uploaded_testcase(
                testcase, metadata, 'Duplicate')
            return
    else:
        # New testcase.
        testcase.status = 'Processed'
        metadata.status = 'Confirmed'

        # Reset the timestamp as well, to respect
        # data_types.MIN_ELAPSED_TIME_SINCE_REPORT. Otherwise it may get filed by
        # triage task prematurely without the grouper having a chance to run on this
        # testcase.
        testcase.timestamp = utils.utcnow()

        # Add new leaks to global blacklist to avoid detecting duplicates.
        # Only add if testcase has a direct leak crash and if it's reproducible.
        if is_lsan_enabled:
            leak_blacklist.add_crash_to_global_blacklist_if_needed(testcase)

    # Update the testcase values.
    testcase.put()

    # Update the upload metadata.
    metadata.security_flag = security_flag
    metadata.put()

    _add_default_issue_metadata(testcase)

    # Create tasks to
    # 1. Minimize testcase (minimize).
    # 2. Find regression range (regression).
    # 3. Find testcase impact on production branches (impact).
    # 4. Check whether testcase is fixed (progression).
    # 5. Get second stacktrace from another job in case of
    #    one-time crashes (stack).
    task_creation.create_tasks(testcase)
예제 #16
0
def find_regression_range(testcase_id, job_type):
    """Attempt to find when the testcase regressed."""
    deadline = tasks.get_task_completion_deadline()
    testcase = data_handler.get_testcase_by_id(testcase_id)
    if not testcase:
        return

    if testcase.regression:
        logs.log_error('Regression range is already set as %s, skip.' %
                       testcase.regression)
        return

    # This task is not applicable for custom binaries.
    if build_manager.is_custom_binary():
        testcase.regression = 'NA'
        data_handler.update_testcase_comment(
            testcase, data_types.TaskState.ERROR,
            'Not applicable for custom binaries')
        return

    data_handler.update_testcase_comment(testcase,
                                         data_types.TaskState.STARTED)

    # Setup testcase and its dependencies.
    file_list, _, testcase_file_path = setup.setup_testcase(testcase, job_type)
    if not file_list:
        testcase = data_handler.get_testcase_by_id(testcase_id)
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.ERROR,
                                             'Failed to setup testcase')
        tasks.add_task('regression', testcase_id, job_type)
        return

    build_bucket_path = build_manager.get_primary_bucket_path()
    revision_list = build_manager.get_revisions_list(build_bucket_path,
                                                     testcase=testcase)
    if not revision_list:
        data_handler.close_testcase_with_error(
            testcase_id, 'Failed to fetch revision list')
        return

    # Don't burden NFS server with caching these random builds.
    environment.set_value('CACHE_STORE', False)

    # Pick up where left off in a previous run if necessary.
    min_revision = testcase.get_metadata('last_regression_min')
    max_revision = testcase.get_metadata('last_regression_max')
    first_run = not min_revision and not max_revision
    if not min_revision:
        min_revision = revisions.get_first_revision_in_list(revision_list)
    if not max_revision:
        max_revision = testcase.crash_revision

    min_index = revisions.find_min_revision_index(revision_list, min_revision)
    if min_index is None:
        raise errors.BuildNotFoundError(min_revision, job_type)
    max_index = revisions.find_max_revision_index(revision_list, max_revision)
    if max_index is None:
        raise errors.BuildNotFoundError(max_revision, job_type)

    # Make sure that the revision where we noticed the crash, still crashes at
    # that revision. Otherwise, our binary search algorithm won't work correctly.
    max_revision = revision_list[max_index]
    crashes_in_max_revision = _testcase_reproduces_in_revision(
        testcase, testcase_file_path, job_type, max_revision, should_log=False)
    if not crashes_in_max_revision:
        testcase = data_handler.get_testcase_by_id(testcase_id)
        error_message = ('Known crash revision %d did not crash' %
                         max_revision)
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.ERROR,
                                             error_message)
        task_creation.mark_unreproducible_if_flaky(testcase, True)
        return

    # If we've made it this far, the test case appears to be reproducible. Clear
    # metadata from previous runs had it been marked as potentially flaky.
    task_creation.mark_unreproducible_if_flaky(testcase, False)

    # On the first run, check to see if we regressed near either the min or max
    # revision.
    if first_run and found_regression_near_extreme_revisions(
            testcase, testcase_file_path, job_type, revision_list, min_index,
            max_index):
        return

    while time.time() < deadline:
        min_revision = revision_list[min_index]
        max_revision = revision_list[max_index]

        # If the min and max revisions are one apart (or the same, if we only have
        # one build), this is as much as we can narrow the range.
        if max_index - min_index <= 1:
            # Verify that the regression range seems correct, and save it if so.
            if not validate_regression_range(testcase, testcase_file_path,
                                             job_type, revision_list,
                                             min_index):
                return

            save_regression_range(testcase_id, min_revision, max_revision)
            return

        middle_index = (min_index + max_index) // 2
        middle_revision = revision_list[middle_index]
        try:
            is_crash = _testcase_reproduces_in_revision(
                testcase,
                testcase_file_path,
                job_type,
                middle_revision,
                min_revision=min_revision,
                max_revision=max_revision)
        except errors.BadBuildError:
            # Skip this revision.
            del revision_list[middle_index]
            max_index -= 1
            continue

        if is_crash:
            max_index = middle_index
        else:
            min_index = middle_index

        _save_current_regression_range_indices(testcase_id,
                                               revision_list[min_index],
                                               revision_list[max_index])

    # If we've broken out of the above loop, we timed out. We'll finish by
    # running another regression task and picking up from this point.
    testcase = data_handler.get_testcase_by_id(testcase_id)
    error_message = 'Timed out, current range r%d:r%d' % (
        revision_list[min_index], revision_list[max_index])
    data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,
                                         error_message)
    tasks.add_task('regression', testcase_id, job_type)
예제 #17
0
def execute_task(testcase_id, job_type):
    """Run a test case with a different job type to see if they reproduce."""
    testcase = data_handler.get_testcase_by_id(testcase_id)
    if not testcase:
        return

    if (environment.is_engine_fuzzer_job(testcase.job_type) !=
            environment.is_engine_fuzzer_job(job_type)):
        # We should never reach here. But in case we do, we should bail out as
        # otherwise we will run into exceptions.
        return

    # Use a cloned testcase entity with different fuzz target paramaters for
    # a different fuzzing engine.
    original_job_type = testcase.job_type
    testcase = _get_variant_testcase_for_job(testcase, job_type)

    # Setup testcase and its dependencies.
    file_list, _, testcase_file_path = setup.setup_testcase(testcase, job_type)
    if not file_list:
        return

    # Set up a custom or regular build. We explicitly omit the crash revision
    # since we want to test against the latest build here.
    try:
        build_manager.setup_build()
    except errors.BuildNotFoundError:
        logs.log_warn('Matching build not found.')
        return

    # Check if we have an application path. If not, our build failed to setup
    # correctly.
    if not build_manager.check_app_path():
        testcase = data_handler.get_testcase_by_id(testcase_id)
        data_handler.update_testcase_comment(
            testcase, data_types.TaskState.ERROR,
            'Build setup failed with job: ' + job_type)
        return

    # Disable gestures if we're running on a different platform from that of
    # the original test case.
    use_gestures = testcase.platform == environment.platform().lower()

    # Reproduce the crash.
    app_path = environment.get_value('APP_PATH')
    command = testcase_manager.get_command_line_for_application(
        testcase_file_path, app_path=app_path, needs_http=testcase.http_flag)
    test_timeout = environment.get_value('TEST_TIMEOUT', 10)
    revision = environment.get_value('APP_REVISION')
    result = testcase_manager.test_for_crash_with_retries(
        testcase,
        testcase_file_path,
        test_timeout,
        http_flag=testcase.http_flag,
        use_gestures=use_gestures,
        compare_crash=False)

    if result.is_crash() and not result.should_ignore():
        crash_state = result.get_state()
        crash_type = result.get_type()
        security_flag = result.is_security_issue()

        gestures = testcase.gestures if use_gestures else None
        one_time_crasher_flag = not testcase_manager.test_for_reproducibility(
            testcase.fuzzer_name, testcase.actual_fuzzer_name(),
            testcase_file_path, crash_state, security_flag, test_timeout,
            testcase.http_flag, gestures)
        if one_time_crasher_flag:
            status = data_types.TestcaseVariantStatus.FLAKY
        else:
            status = data_types.TestcaseVariantStatus.REPRODUCIBLE

        crash_comparer = CrashComparer(crash_state, testcase.crash_state)
        is_similar = (crash_comparer.is_similar()
                      and security_flag == testcase.security_flag)

        unsymbolized_crash_stacktrace = result.get_stacktrace(symbolized=False)
        symbolized_crash_stacktrace = result.get_stacktrace(symbolized=True)
        crash_stacktrace_output = utils.get_crash_stacktrace_output(
            command, symbolized_crash_stacktrace,
            unsymbolized_crash_stacktrace)
    else:
        status = data_types.TestcaseVariantStatus.UNREPRODUCIBLE
        is_similar = False
        crash_type = None
        crash_state = None
        security_flag = False
        crash_stacktrace_output = 'No crash occurred.'

    if original_job_type == job_type:
        # This case happens when someone clicks 'Update last tested stacktrace using
        # trunk build' button.
        testcase = data_handler.get_testcase_by_id(testcase_id)
        testcase.last_tested_crash_stacktrace = (
            data_handler.filter_stacktrace(crash_stacktrace_output))
        testcase.set_metadata('last_tested_crash_revision',
                              revision,
                              update_testcase=True)
    else:
        # Regular case of variant analysis.
        variant = data_handler.get_testcase_variant(testcase_id, job_type)
        variant.status = status
        variant.revision = revision
        variant.crash_type = crash_type
        variant.crash_state = crash_state
        variant.security_flag = security_flag
        variant.is_similar = is_similar
        variant.platform = environment.platform().lower()
        # Explicitly skipping crash stacktrace for now as it make entities larger
        # and we plan to use only crash paramaters in UI.
        variant.put()
예제 #18
0
def setup_testcase(testcase, job_type, fuzzer_override=None):
    """Sets up the testcase and needed dependencies like fuzzer,
  data bundle, etc."""
    fuzzer_name = fuzzer_override or testcase.fuzzer_name
    task_name = environment.get_value('TASK_NAME')
    testcase_fail_wait = environment.get_value('FAIL_WAIT')
    testcase_id = testcase.key.id()

    # Clear testcase directories.
    shell.clear_testcase_directories()

    # Adjust the test timeout value if this is coming from an user uploaded
    # testcase.
    if testcase.uploader_email:
        _set_timeout_value_from_user_upload(testcase_id)

    # Update the fuzzer if necessary in order to get the updated data bundle.
    if fuzzer_name:
        try:
            update_successful = update_fuzzer_and_data_bundles(fuzzer_name)
        except errors.InvalidFuzzerError:
            # Close testcase and don't recreate tasks if this fuzzer is invalid.
            testcase.open = False
            testcase.fixed = 'NA'
            testcase.set_metadata('fuzzer_was_deleted', True)
            logs.log_error('Closed testcase %d with invalid fuzzer %s.' %
                           (testcase_id, fuzzer_name))

            error_message = 'Fuzzer %s no longer exists' % fuzzer_name
            data_handler.update_testcase_comment(testcase,
                                                 data_types.TaskState.ERROR,
                                                 error_message)
            return None, None, None

        if not update_successful:
            error_message = 'Unable to setup fuzzer %s' % fuzzer_name
            data_handler.update_testcase_comment(testcase,
                                                 data_types.TaskState.ERROR,
                                                 error_message)
            tasks.add_task(task_name,
                           testcase_id,
                           job_type,
                           wait_time=testcase_fail_wait)
            return None, None, None

    # Extract the testcase and any of its resources to the input directory.
    file_list, input_directory, testcase_file_path = unpack_testcase(testcase)
    if not file_list:
        error_message = 'Unable to setup testcase %s' % testcase_file_path
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.ERROR,
                                             error_message)
        tasks.add_task(task_name,
                       testcase_id,
                       job_type,
                       wait_time=testcase_fail_wait)
        return None, None, None

    # For Android/Fuchsia, we need to sync our local testcases directory with the
    # one on the device.
    if environment.is_android():
        _copy_testcase_to_device_and_setup_environment(testcase,
                                                       testcase_file_path)

    # Push testcases to worker.
    if environment.is_trusted_host():
        from clusterfuzz._internal.bot.untrusted_runner import file_host
        file_host.push_testcases_to_worker()

    # Copy global blacklist into local blacklist.
    is_lsan_enabled = environment.get_value('LSAN')
    if is_lsan_enabled:
        # Get local blacklist without this testcase's entry.
        leak_blacklist.copy_global_to_local_blacklist(
            excluded_testcase=testcase)

    prepare_environment_for_testcase(testcase, job_type, task_name)

    return file_list, input_directory, testcase_file_path
예제 #19
0
def execute_task(testcase_id, job_type):
    """Attempt to find if the testcase affects release branches on Chromium."""
    # This shouldn't ever get scheduled, but check just in case.
    if not utils.is_chromium():
        return

    # Locate the testcase associated with the id.
    testcase = data_handler.get_testcase_by_id(testcase_id)

    # If this testcase is fixed, we should no longer be doing impact testing.
    if testcase.fixed and testcase.is_impact_set_flag:
        return

    # For testcases with status unreproducible, we just do impact analysis just
    # once.
    if testcase.is_status_unreproducible() and testcase.is_impact_set_flag:
        return

    # Update comments only after checking the above bailout conditions.
    data_handler.update_testcase_comment(testcase,
                                         data_types.TaskState.STARTED)

    # This task is not applicable to unreproducible testcases.
    if testcase.one_time_crasher_flag:
        data_handler.update_testcase_comment(
            testcase, data_types.TaskState.ERROR,
            'Not applicable for unreproducible testcases')
        return

    # This task is not applicable for custom binaries. We cannot remove the
    # creation of such tasks specifically for custom binary testcase in cron,
    # so exit gracefully.
    if build_manager.is_custom_binary():
        data_handler.update_testcase_comment(
            testcase, data_types.TaskState.FINISHED,
            'Not applicable for custom binaries')
        return

    # If we don't have a stable or beta build url pattern, we try to use build
    # information url to make a guess.
    if not build_manager.has_production_builds():
        if not testcase.regression:
            data_handler.update_testcase_comment(
                testcase, data_types.TaskState.FINISHED,
                'Cannot run without regression range, will re-run once regression '
                'task finishes')
            return

        impacts = get_impacts_from_url(testcase.regression, testcase.job_type)
        testcase = data_handler.get_testcase_by_id(testcase_id)
        set_testcase_with_impacts(testcase, impacts)
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.FINISHED)
        return

    # Setup testcase and its dependencies.
    file_list, _, testcase_file_path = setup.setup_testcase(testcase, job_type)
    if not file_list:
        return

    # Setup extended stable, stable, beta builds
    # and get impact and crash stacktrace.
    try:
        impacts = get_impacts_on_prod_builds(testcase, testcase_file_path)
    except BuildFailedException as error:
        testcase = data_handler.get_testcase_by_id(testcase_id)
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.ERROR,
                                             str(error))
        tasks.add_task('impact',
                       testcase_id,
                       job_type,
                       wait_time=environment.get_value('FAIL_WAIT'))
        return

    testcase = data_handler.get_testcase_by_id(testcase_id)
    set_testcase_with_impacts(testcase, impacts)

    # Set stacktrace in case we have a unreproducible crash on trunk,
    # but it crashes on one of the production builds.
    if testcase.is_status_unreproducible() and impacts.get_extra_trace():
        testcase.crash_stacktrace = data_handler.filter_stacktrace(
            '%s\n\n%s' %
            (data_handler.get_stacktrace(testcase), impacts.get_extra_trace()))

    data_handler.update_testcase_comment(testcase,
                                         data_types.TaskState.FINISHED)