예제 #1
0
def execute_task(testcase_id, job_type):
    """Execute progression task."""
    try:
        find_fixed_range(testcase_id, job_type)
    except errors.BuildSetupError as error:
        # If we failed to setup a build, it is likely a bot error. We can retry
        # the task in this case.
        testcase = data_handler.get_testcase_by_id(testcase_id)
        error_message = 'Build setup failed r%d' % error.revision
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.ERROR,
                                             error_message)
        build_fail_wait = environment.get_value('FAIL_WAIT')
        tasks.add_task('progression',
                       testcase_id,
                       job_type,
                       wait_time=build_fail_wait)
    except errors.BadBuildError:
        # Though bad builds when narrowing the range are recoverable, certain builds
        # being marked as bad may be unrecoverable. Recoverable ones should not
        # reach this point.
        testcase = data_handler.get_testcase_by_id(testcase_id)
        error_message = 'Unable to recover from bad build'
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.ERROR,
                                             error_message)

    # If there is a fine grained bisection service available, request it. Both
    # regression and fixed ranges are requested once. Regression is also requested
    # here as the bisection service may require details that are not yet available
    # (e.g. issue ID) at the time regress_task completes.
    task_creation.request_bisection(testcase_id)
예제 #2
0
 def test_request_bisection_flaky(self):
   """Test request bisection for flaky testcases."""
   self.testcase.job_type = 'libfuzzer_asan_proj'
   self.testcase.one_time_crasher_flag = True
   self.testcase.put()
   task_creation.request_bisection(self.testcase.key.id())
   self.assertEqual(0, self.mock.publish.call_count)
예제 #3
0
  def _test(self, sanitizer):
    """Test task publication."""
    task_creation.request_bisection(self.testcase.key.id())
    publish_calls = self.mock.publish.call_args_list
    bisect_types = ('regressed', 'fixed')

    self.assertEqual(2, len(publish_calls))
    for bisect_type, publish_call in zip(bisect_types, publish_calls):
      topic = publish_call[0][1]
      message = publish_call[0][2][0]
      self.assertEqual('/projects/project/topics/topic', topic)
      self.assertEqual(b'reproducer', message.data)
      self.assertDictEqual({
          'crash_type': 'crash-type',
          'security': 'True',
          'fuzz_target': 'target',
          'new_commit': 'new',
          'old_commit': 'old',
          'project_name': 'proj',
          'sanitizer': sanitizer,
          'testcase_id': '1',
          'issue_id': '1337',
          'type': bisect_type,
      }, message.attributes)

    testcase = self.testcase.key.get()
    self.assertTrue(testcase.get_metadata('requested_regressed_bisect'))
    self.assertTrue(testcase.get_metadata('requested_fixed_bisect'))
예제 #4
0
 def test_request_bisection_no_bug(self):
   """Test request bisection for testcases with no bug attached."""
   self.testcase.job_type = 'libfuzzer_asan_proj'
   self.testcase.bug_information = ''
   self.testcase.put()
   task_creation.request_bisection(self.testcase.key.id())
   self.assertEqual(0, self.mock.publish.call_count)
예제 #5
0
 def test_request_bisection_blackbox(self):
   """Test request bisection for blackbox."""
   self.testcase.job_type = 'blackbox'
   self.testcase.overridden_fuzzer_name = None
   self.testcase.put()
   task_creation.request_bisection(self.testcase.key.id())
   self.assertEqual(0, self.mock.publish.call_count)
예제 #6
0
 def test_request_bisection_non_security(self):
   """Test request bisection for non-security testcases."""
   self.testcase.job_type = 'libfuzzer_asan_proj'
   self.testcase.security_flag = False
   self.testcase.put()
   task_creation.request_bisection(self.testcase.key.id())
   self.assertEqual(0, self.mock.publish.call_count)
 def test_request_bisection_invalid_range(self):
     """Test request bisection for testcases with no bug attached."""
     self.testcase.job_type = 'libfuzzer_asan_proj'
     self.testcase.regression = 'NA'
     self.testcase.fixed = 'NA'
     task_creation.request_bisection(self.testcase)
     self.assertEqual(0, self.mock.publish.call_count)
예제 #8
0
 def test_request_bisection_once_only(self):
   """Test request bisection for testcases isn't repeated if already
   requested."""
   self.testcase.set_metadata('requested_regressed_bisect', True)
   self.testcase.set_metadata('requested_fixed_bisect', True)
   self.testcase.put()
   task_creation.request_bisection(self.testcase.key.id())
   self.assertEqual(0, self.mock.publish.call_count)
예제 #9
0
def _save_fixed_range(testcase_id, min_revision, max_revision):
  """Update a test case and other metadata with a fixed range."""
  testcase = data_handler.get_testcase_by_id(testcase_id)
  testcase.fixed = '%d:%d' % (min_revision, max_revision)
  testcase.open = False

  _update_completion_metadata(
      testcase, max_revision, message='fixed in range r%s' % testcase.fixed)
  _write_to_bigquery(testcase, min_revision, max_revision)

  # If there is a fine grained bisection service available, request it.
  task_creation.request_bisection(testcase, 'fixed')
예제 #10
0
 def test_request_single_commit_range(self):
   """Request bisection with a single commit (invalid range)."""
   self.mock.get_primary_bucket_path.return_value = 'bucket'
   self.mock.get_revisions_list.return_value = list(range(6))
   self.mock.get_component_range_list.return_value = [
       {
           'link_text': 'one',
       },
   ]
   task_creation.request_bisection(self.testcase.key.id())
   self._test('address', old_commit='one', new_commit='one')
   self.mock.get_component_range_list.assert_has_calls([
       mock.call(123, 456, 'libfuzzer_asan_proj'),
       mock.call(0, 3, 'libfuzzer_asan_proj'),
       mock.call(123, 456, 'libfuzzer_asan_proj'),
       mock.call(4, 5, 'libfuzzer_asan_proj'),
   ])
예제 #11
0
 def _test(self, sanitizer, bisect_type):
   task_creation.request_bisection(self.testcase, bisect_type)
   publish_call = self.mock.publish.call_args[0]
   topic = publish_call[1]
   message = publish_call[2]
   self.assertEqual('/projects/project/topics/topic', topic)
   self.assertEqual(b'reproducer', message.data)
   self.assertDictEqual({
       'fuzz_target': 'target',
       'new_commit': 'new',
       'old_commit': 'old',
       'project_name': 'proj',
       'sanitizer': sanitizer,
       'testcase_id': 1,
       'issue_id': '1337',
       'type': bisect_type,
   }, message.attributes)
예제 #12
0
    def test_request_bisection_invalid_range(self):
        """Test request bisection for testcases with no bug attached."""
        self.testcase.job_type = 'libfuzzer_asan_proj'
        self.testcase.regression = 'NA'
        self.testcase.fixed = 'NA'
        self.testcase.put()
        task_creation.request_bisection(self.testcase.key.id())

        publish_calls = self.mock.publish.call_args_list
        self.assertEqual(1, len(publish_calls))

        publish_call = publish_calls[0]
        topic = publish_call[0][1]
        message = publish_call[0][2][0]
        self.assertEqual('/projects/project/topics/topic', topic)
        self.assertEqual(b'', message.data)
        self.assertDictEqual({
            'testcase_id': '1',
            'type': 'invalid',
        }, message.attributes)
예제 #13
0
def save_regression_range(testcase_id, regression_range_start,
                          regression_range_end):
    """Saves the regression range and creates blame and impact task if needed."""
    testcase = data_handler.get_testcase_by_id(testcase_id)
    testcase.regression = '%d:%d' % (regression_range_start,
                                     regression_range_end)
    data_handler.update_testcase_comment(
        testcase, data_types.TaskState.FINISHED,
        'regressed in range %s' % testcase.regression)

    write_to_big_query(testcase, regression_range_start, regression_range_end)

    # Force impacts update after regression range is updated. In several cases,
    # we might not have a production build to test with, so regression range is
    # used to decide impacts.
    task_creation.create_impact_task_if_needed(testcase)

    # Get blame information using the regression range result.
    task_creation.create_blame_task_if_needed(testcase)

    # If there is a fine grained bisection service available, request it.
    task_creation.request_bisection(testcase, 'regressed')
예제 #14
0
def find_fixed_range(testcase_id, job_type):
    """Attempt to find the revision range where a testcase was fixed."""
    deadline = tasks.get_task_completion_deadline()
    testcase = data_handler.get_testcase_by_id(testcase_id)
    if not testcase:
        return

    if testcase.fixed:
        logs.log_error('Fixed range is already set as %s, skip.' %
                       testcase.fixed)
        return

    # Setup testcase and its dependencies.
    file_list, _, testcase_file_path = setup.setup_testcase(testcase, job_type)
    if not file_list:
        return

    # Set a flag to indicate we are running progression task. This shows pending
    # status on testcase report page and avoid conflicting testcase updates by
    # triage cron.
    testcase.set_metadata('progression_pending', True)

    # Custom binaries are handled as special cases.
    if build_manager.is_custom_binary():
        _check_fixed_for_custom_binary(testcase, job_type, testcase_file_path)
        return

    build_bucket_path = build_manager.get_primary_bucket_path()
    revision_list = build_manager.get_revisions_list(build_bucket_path,
                                                     testcase=testcase)
    if not revision_list:
        data_handler.close_testcase_with_error(
            testcase_id, 'Failed to fetch revision list')
        return

    # Use min, max_index to mark the start and end of revision list that is used
    # for bisecting the progression range. Set start to the revision where noticed
    # the crash. Set end to the trunk revision. Also, use min, max from past run
    # if it timed out.
    min_revision = testcase.get_metadata('last_progression_min')
    max_revision = testcase.get_metadata('last_progression_max')

    if min_revision or max_revision:
        # Clear these to avoid using them in next run. If this run fails, then we
        # should try next run without them to see it succeeds. If this run succeeds,
        # we should still clear them to avoid capping max revision in next run.
        testcase = data_handler.get_testcase_by_id(testcase_id)
        testcase.delete_metadata('last_progression_min', update_testcase=False)
        testcase.delete_metadata('last_progression_max', update_testcase=False)
        testcase.put()

    last_tested_revision = testcase.get_metadata('last_tested_crash_revision')
    known_crash_revision = last_tested_revision or testcase.crash_revision
    if not min_revision:
        min_revision = known_crash_revision
    if not max_revision:
        max_revision = revisions.get_last_revision_in_list(revision_list)

    min_index = revisions.find_min_revision_index(revision_list, min_revision)
    if min_index is None:
        raise errors.BuildNotFoundError(min_revision, job_type)
    max_index = revisions.find_max_revision_index(revision_list, max_revision)
    if max_index is None:
        raise errors.BuildNotFoundError(max_revision, job_type)

    testcase = data_handler.get_testcase_by_id(testcase_id)
    data_handler.update_testcase_comment(testcase,
                                         data_types.TaskState.STARTED,
                                         'r%d' % max_revision)

    # Check to see if this testcase is still crashing now. If it is, then just
    # bail out.
    result = _testcase_reproduces_in_revision(testcase,
                                              testcase_file_path,
                                              job_type,
                                              max_revision,
                                              update_metadata=True)
    if result.is_crash():
        logs.log('Found crash with same signature on latest revision r%d.' %
                 max_revision)
        app_path = environment.get_value('APP_PATH')
        command = testcase_manager.get_command_line_for_application(
            testcase_file_path,
            app_path=app_path,
            needs_http=testcase.http_flag)
        symbolized_crash_stacktrace = result.get_stacktrace(symbolized=True)
        unsymbolized_crash_stacktrace = result.get_stacktrace(symbolized=False)
        stacktrace = utils.get_crash_stacktrace_output(
            command, symbolized_crash_stacktrace,
            unsymbolized_crash_stacktrace)
        testcase = data_handler.get_testcase_by_id(testcase_id)
        testcase.last_tested_crash_stacktrace = data_handler.filter_stacktrace(
            stacktrace)
        _update_completion_metadata(
            testcase,
            max_revision,
            is_crash=True,
            message='still crashes on latest revision r%s' % max_revision)

        # Since we've verified that the test case is still crashing, clear out any
        # metadata indicating potential flake from previous runs.
        task_creation.mark_unreproducible_if_flaky(testcase, False)

        # For chromium project, save latest crash information for later upload
        # to chromecrash/.
        state = result.get_symbolized_data()
        crash_uploader.save_crash_info_if_needed(testcase_id, max_revision,
                                                 job_type, state.crash_type,
                                                 state.crash_address,
                                                 state.frames)
        return

    # Don't burden NFS server with caching these random builds.
    environment.set_value('CACHE_STORE', False)

    # Verify that we do crash in the min revision. This is assumed to be true
    # while we are doing the bisect.
    result = _testcase_reproduces_in_revision(testcase, testcase_file_path,
                                              job_type, min_revision)
    if result and not result.is_crash():
        testcase = data_handler.get_testcase_by_id(testcase_id)

        # Retry once on another bot to confirm our result.
        if data_handler.is_first_retry_for_task(testcase,
                                                reset_after_retry=True):
            tasks.add_task('progression', testcase_id, job_type)
            error_message = (
                'Known crash revision %d did not crash, will retry on another bot to '
                'confirm result' % known_crash_revision)
            data_handler.update_testcase_comment(testcase,
                                                 data_types.TaskState.ERROR,
                                                 error_message)
            _update_completion_metadata(testcase, max_revision)
            return

        _clear_progression_pending(testcase)
        error_message = ('Known crash revision %d did not crash' %
                         known_crash_revision)
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.ERROR,
                                             error_message)
        task_creation.mark_unreproducible_if_flaky(testcase, True)
        return

    # Start a binary search to find last non-crashing revision. At this point, we
    # know that we do crash in the min_revision, and do not crash in max_revision.
    while time.time() < deadline:
        min_revision = revision_list[min_index]
        max_revision = revision_list[max_index]

        # If the min and max revisions are one apart this is as much as we can
        # narrow the range.
        if max_index - min_index == 1:
            _save_fixed_range(testcase_id, min_revision, max_revision,
                              testcase_file_path)
            return

        # Occasionally, we get into this bad state. It seems to be related to test
        # cases with flaky stacks, but the exact cause is unknown.
        if max_index - min_index < 1:
            testcase = data_handler.get_testcase_by_id(testcase_id)
            testcase.fixed = 'NA'
            testcase.open = False
            message = ('Fixed testing errored out (min and max revisions '
                       'are both %d)' % min_revision)
            _update_completion_metadata(testcase,
                                        max_revision,
                                        message=message)

            # Let the bisection service know about the NA status.
            task_creation.request_bisection(testcase_id)
            return

        # Test the middle revision of our range.
        middle_index = (min_index + max_index) // 2
        middle_revision = revision_list[middle_index]

        testcase = data_handler.get_testcase_by_id(testcase_id)
        log_message = 'Testing r%d (current range %d:%d)' % (
            middle_revision, min_revision, max_revision)
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.WIP,
                                             log_message)

        try:
            result = _testcase_reproduces_in_revision(testcase,
                                                      testcase_file_path,
                                                      job_type,
                                                      middle_revision)
        except errors.BadBuildError:
            # Skip this revision.
            del revision_list[middle_index]
            max_index -= 1
            continue

        if result.is_crash():
            min_index = middle_index
        else:
            max_index = middle_index

        _save_current_fixed_range_indices(testcase_id,
                                          revision_list[min_index],
                                          revision_list[max_index])

    # If we've broken out of the loop, we've exceeded the deadline. Recreate the
    # task to pick up where we left off.
    testcase = data_handler.get_testcase_by_id(testcase_id)
    error_message = ('Timed out, current range r%d:r%d' %
                     (revision_list[min_index], revision_list[max_index]))
    data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,
                                         error_message)
    tasks.add_task('progression', testcase_id, job_type)