Exemplo n.º 1
0
def execute_task(testcase_id, job_type):
    """Execute progression task."""
    try:
        find_fixed_range(testcase_id, job_type)
    except errors.BuildSetupError as error:
        # If we failed to setup a build, it is likely a bot error. We can retry
        # the task in this case.
        testcase = data_handler.get_testcase_by_id(testcase_id)
        error_message = 'Build setup failed r%d' % error.revision
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.ERROR,
                                             error_message)
        build_fail_wait = environment.get_value('FAIL_WAIT')
        tasks.add_task('progression',
                       testcase_id,
                       job_type,
                       wait_time=build_fail_wait)
    except errors.BadBuildError:
        # Though bad builds when narrowing the range are recoverable, certain builds
        # being marked as bad may be unrecoverable. Recoverable ones should not
        # reach this point.
        testcase = data_handler.get_testcase_by_id(testcase_id)
        error_message = 'Unable to recover from bad build'
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.ERROR,
                                             error_message)

    # If there is a fine grained bisection service available, request it. Both
    # regression and fixed ranges are requested once. Regression is also requested
    # here as the bisection service may require details that are not yet available
    # (e.g. issue ID) at the time regress_task completes.
    testcase = data_handler.get_testcase_by_id(testcase_id)
    bisection.request_bisection(testcase)
Exemplo n.º 2
0
    def _test(self, sanitizer, old_commit='old', new_commit='new'):
        """Test task publication."""
        bisection.request_bisection(self.testcase)
        publish_calls = self.mock.publish.call_args_list
        bisect_types = ('regressed', 'fixed')

        self.assertEqual(2, len(publish_calls))
        for bisect_type, publish_call in zip(bisect_types, publish_calls):
            topic = publish_call[0][1]
            message = publish_call[0][2][0]
            self.assertEqual('/projects/project/topics/topic', topic)
            self.assertEqual(b'reproducer', message.data)
            self.assertDictEqual(
                {
                    'crash_state': 'A\nB\nC',
                    'crash_type': 'crash-type',
                    'security': 'True',
                    'severity': 'Medium',
                    'fuzz_target': 'target',
                    'new_commit': new_commit,
                    'old_commit': old_commit,
                    'project_name': 'proj',
                    'sanitizer': sanitizer,
                    'testcase_id': '1',
                    'issue_id': '1337',
                    'type': bisect_type,
                    'timestamp': '2021-01-01T00:00:00',
                }, message.attributes)

        testcase = self.testcase.key.get()
        self.assertTrue(testcase.get_metadata('requested_regressed_bisect'))
        self.assertTrue(testcase.get_metadata('requested_fixed_bisect'))
Exemplo n.º 3
0
 def test_request_bisection_no_bug(self):
     """Test request bisection for testcases with no bug attached."""
     self.testcase.job_type = 'libfuzzer_asan_proj'
     self.testcase.bug_information = ''
     self.testcase.put()
     bisection.request_bisection(self.testcase)
     self.assertEqual(0, self.mock.publish.call_count)
Exemplo n.º 4
0
 def test_request_bisection_flaky(self):
     """Test request bisection for flaky testcases."""
     self.testcase.job_type = 'libfuzzer_asan_proj'
     self.testcase.one_time_crasher_flag = True
     self.testcase.put()
     bisection.request_bisection(self.testcase)
     self.assertEqual(0, self.mock.publish.call_count)
Exemplo n.º 5
0
 def test_request_bisection_non_security(self):
     """Test request bisection for non-security testcases."""
     self.testcase.job_type = 'libfuzzer_asan_proj'
     self.testcase.security_flag = False
     self.testcase.put()
     bisection.request_bisection(self.testcase)
     self.assertEqual(0, self.mock.publish.call_count)
Exemplo n.º 6
0
 def test_request_bisection_blackbox(self):
     """Test request bisection for blackbox."""
     self.testcase.job_type = 'blackbox'
     self.testcase.overridden_fuzzer_name = None
     self.testcase.put()
     bisection.request_bisection(self.testcase)
     self.assertEqual(0, self.mock.publish.call_count)
Exemplo n.º 7
0
 def test_request_bisection_once_only(self):
     """Test request bisection for testcases isn't repeated if already
 requested."""
     self.testcase.set_metadata('requested_regressed_bisect', True)
     self.testcase.set_metadata('requested_fixed_bisect', True)
     self.testcase.put()
     bisection.request_bisection(self.testcase)
     self.assertEqual(0, self.mock.publish.call_count)
Exemplo n.º 8
0
 def test_request_single_commit_range(self):
     """Request bisection with a single commit (invalid range)."""
     self.mock.get_primary_bucket_path.return_value = 'bucket'
     self.mock.get_revisions_list.return_value = list(range(6))
     self.mock.get_component_range_list.return_value = [
         {
             'link_text': 'one',
         },
     ]
     bisection.request_bisection(self.testcase)
     self._test('address', old_commit='one', new_commit='one')
     self.mock.get_component_range_list.assert_has_calls([
         mock.call(123, 456, 'libfuzzer_asan_proj'),
         mock.call(0, 3, 'libfuzzer_asan_proj'),
         mock.call(123, 456, 'libfuzzer_asan_proj'),
         mock.call(4, 5, 'libfuzzer_asan_proj'),
     ])
Exemplo n.º 9
0
def mark_unreproducible_if_flaky(testcase, potentially_flaky):
    """Check to see if a test case appears to be flaky."""
    task_name = environment.get_value('TASK_NAME')

    # If this run does not suggest that we are flaky, clear the flag and assume
    # that we are reproducible.
    if not potentially_flaky:
        testcase.set_metadata('potentially_flaky', False)
        return

    # If we have not been marked as potentially flaky in the past, don't mark
    # mark the test case as unreproducible yet. It is now potentially flaky.
    if not testcase.get_metadata('potentially_flaky'):
        testcase.set_metadata('potentially_flaky', True)

        # In this case, the current task will usually be in a state where it cannot
        # be completed. Recreate it.
        tasks.add_task(task_name, testcase.key.id(), testcase.job_type)
        return

    # At this point, this test case has been flagged as potentially flaky twice.
    # It should be marked as unreproducible. Mark it as unreproducible, and set
    # fields that cannot be populated accordingly.
    if task_name == 'minimize' and not testcase.minimized_keys:
        testcase.minimized_keys = 'NA'
    if task_name in ['minimize', 'impact']:
        testcase.set_impacts_as_na()
    if task_name in ['minimize', 'regression']:
        testcase.regression = 'NA'
    if task_name in ['minimize', 'progression']:
        testcase.fixed = 'NA'

    testcase.one_time_crasher_flag = True
    data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,
                                         'Testcase appears to be flaky')

    # Issue update to flip reproducibility label is done in App Engine cleanup
    # cron. This avoids calling the issue tracker apis from GCE.

    # For unreproducible testcases, it is still beneficial to get component
    # information from blame task.
    create_blame_task_if_needed(testcase)

    # Let bisection service know about flakiness.
    bisection.request_bisection(testcase)
Exemplo n.º 10
0
    def test_request_bisection_invalid_range(self):
        """Test request bisection for testcases with no bug attached."""
        self.testcase.job_type = 'libfuzzer_asan_proj'
        self.testcase.regression = 'NA'
        self.testcase.fixed = 'NA'
        self.testcase.put()
        bisection.request_bisection(self.testcase)

        publish_calls = self.mock.publish.call_args_list
        self.assertEqual(1, len(publish_calls))

        publish_call = publish_calls[0]
        topic = publish_call[0][1]
        message = publish_call[0][2][0]
        self.assertEqual('/projects/project/topics/topic', topic)
        self.assertEqual(b'', message.data)
        self.assertDictEqual({
            'testcase_id': '1',
            'type': 'invalid',
        }, message.attributes)
Exemplo n.º 11
0
def mark(testcase, security, severity):
    """Mark the testcase as security-related."""
    testcase.security_flag = security
    if security:
        if not severity:
            severity = severity_analyzer.get_security_severity(
                testcase.crash_type, testcase.crash_stacktrace,
                testcase.job_type, bool(testcase.gestures))

        testcase.security_severity = severity
        bisection.request_bisection(testcase)
    else:
        # The bisection infrastructure only cares about security bugs. If this was
        # marked as non-security, mark it as invalid.
        bisection.notify_bisection_invalid(testcase)

    testcase.put()
    helpers.log(
        f'Set security flags on testcase {testcase.key.id()} to {security}.',
        helpers.MODIFY_OPERATION)
Exemplo n.º 12
0
def find_fixed_range(testcase_id, job_type):
    """Attempt to find the revision range where a testcase was fixed."""
    deadline = tasks.get_task_completion_deadline()
    testcase = data_handler.get_testcase_by_id(testcase_id)
    if not testcase:
        return

    if testcase.fixed:
        logs.log_error('Fixed range is already set as %s, skip.' %
                       testcase.fixed)
        return

    # Setup testcase and its dependencies.
    file_list, _, testcase_file_path = setup.setup_testcase(testcase, job_type)
    if not file_list:
        return

    # Set a flag to indicate we are running progression task. This shows pending
    # status on testcase report page and avoid conflicting testcase updates by
    # triage cron.
    testcase.set_metadata('progression_pending', True)

    # Custom binaries are handled as special cases.
    if build_manager.is_custom_binary():
        _check_fixed_for_custom_binary(testcase, job_type, testcase_file_path)
        return

    build_bucket_path = build_manager.get_primary_bucket_path()
    revision_list = build_manager.get_revisions_list(build_bucket_path,
                                                     testcase=testcase)
    if not revision_list:
        data_handler.close_testcase_with_error(
            testcase_id, 'Failed to fetch revision list')
        return

    # Use min, max_index to mark the start and end of revision list that is used
    # for bisecting the progression range. Set start to the revision where noticed
    # the crash. Set end to the trunk revision. Also, use min, max from past run
    # if it timed out.
    min_revision = testcase.get_metadata('last_progression_min')
    max_revision = testcase.get_metadata('last_progression_max')

    if min_revision or max_revision:
        # Clear these to avoid using them in next run. If this run fails, then we
        # should try next run without them to see it succeeds. If this run succeeds,
        # we should still clear them to avoid capping max revision in next run.
        testcase = data_handler.get_testcase_by_id(testcase_id)
        testcase.delete_metadata('last_progression_min', update_testcase=False)
        testcase.delete_metadata('last_progression_max', update_testcase=False)
        testcase.put()

    last_tested_revision = testcase.get_metadata('last_tested_crash_revision')
    known_crash_revision = last_tested_revision or testcase.crash_revision
    if not min_revision:
        min_revision = known_crash_revision
    if not max_revision:
        max_revision = revisions.get_last_revision_in_list(revision_list)

    min_index = revisions.find_min_revision_index(revision_list, min_revision)
    if min_index is None:
        raise errors.BuildNotFoundError(min_revision, job_type)
    max_index = revisions.find_max_revision_index(revision_list, max_revision)
    if max_index is None:
        raise errors.BuildNotFoundError(max_revision, job_type)

    testcase = data_handler.get_testcase_by_id(testcase_id)
    data_handler.update_testcase_comment(testcase,
                                         data_types.TaskState.STARTED,
                                         'r%d' % max_revision)

    # Check to see if this testcase is still crashing now. If it is, then just
    # bail out.
    result = _testcase_reproduces_in_revision(testcase,
                                              testcase_file_path,
                                              job_type,
                                              max_revision,
                                              update_metadata=True)
    if result.is_crash():
        logs.log('Found crash with same signature on latest revision r%d.' %
                 max_revision)
        app_path = environment.get_value('APP_PATH')
        command = testcase_manager.get_command_line_for_application(
            testcase_file_path,
            app_path=app_path,
            needs_http=testcase.http_flag)
        symbolized_crash_stacktrace = result.get_stacktrace(symbolized=True)
        unsymbolized_crash_stacktrace = result.get_stacktrace(symbolized=False)
        stacktrace = utils.get_crash_stacktrace_output(
            command, symbolized_crash_stacktrace,
            unsymbolized_crash_stacktrace)
        testcase = data_handler.get_testcase_by_id(testcase_id)
        testcase.last_tested_crash_stacktrace = data_handler.filter_stacktrace(
            stacktrace)
        _update_completion_metadata(
            testcase,
            max_revision,
            is_crash=True,
            message='still crashes on latest revision r%s' % max_revision)

        # Since we've verified that the test case is still crashing, clear out any
        # metadata indicating potential flake from previous runs.
        task_creation.mark_unreproducible_if_flaky(testcase, False)

        # For chromium project, save latest crash information for later upload
        # to chromecrash/.
        state = result.get_symbolized_data()
        crash_uploader.save_crash_info_if_needed(testcase_id, max_revision,
                                                 job_type, state.crash_type,
                                                 state.crash_address,
                                                 state.frames)
        return

    # Don't burden NFS server with caching these random builds.
    environment.set_value('CACHE_STORE', False)

    # Verify that we do crash in the min revision. This is assumed to be true
    # while we are doing the bisect.
    result = _testcase_reproduces_in_revision(testcase, testcase_file_path,
                                              job_type, min_revision)
    if result and not result.is_crash():
        testcase = data_handler.get_testcase_by_id(testcase_id)

        # Retry once on another bot to confirm our result.
        if data_handler.is_first_retry_for_task(testcase,
                                                reset_after_retry=True):
            tasks.add_task('progression', testcase_id, job_type)
            error_message = (
                'Known crash revision %d did not crash, will retry on another bot to '
                'confirm result' % known_crash_revision)
            data_handler.update_testcase_comment(testcase,
                                                 data_types.TaskState.ERROR,
                                                 error_message)
            _update_completion_metadata(testcase, max_revision)
            return

        _clear_progression_pending(testcase)
        error_message = ('Known crash revision %d did not crash' %
                         known_crash_revision)
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.ERROR,
                                             error_message)
        task_creation.mark_unreproducible_if_flaky(testcase, True)
        return

    # Start a binary search to find last non-crashing revision. At this point, we
    # know that we do crash in the min_revision, and do not crash in max_revision.
    while time.time() < deadline:
        min_revision = revision_list[min_index]
        max_revision = revision_list[max_index]

        # If the min and max revisions are one apart this is as much as we can
        # narrow the range.
        if max_index - min_index == 1:
            _save_fixed_range(testcase_id, min_revision, max_revision,
                              testcase_file_path)
            return

        # Occasionally, we get into this bad state. It seems to be related to test
        # cases with flaky stacks, but the exact cause is unknown.
        if max_index - min_index < 1:
            testcase = data_handler.get_testcase_by_id(testcase_id)
            testcase.fixed = 'NA'
            testcase.open = False
            message = ('Fixed testing errored out (min and max revisions '
                       'are both %d)' % min_revision)
            _update_completion_metadata(testcase,
                                        max_revision,
                                        message=message)

            # Let the bisection service know about the NA status.
            bisection.request_bisection(testcase)
            return

        # Test the middle revision of our range.
        middle_index = (min_index + max_index) // 2
        middle_revision = revision_list[middle_index]

        testcase = data_handler.get_testcase_by_id(testcase_id)
        log_message = 'Testing r%d (current range %d:%d)' % (
            middle_revision, min_revision, max_revision)
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.WIP,
                                             log_message)

        try:
            result = _testcase_reproduces_in_revision(testcase,
                                                      testcase_file_path,
                                                      job_type,
                                                      middle_revision)
        except errors.BadBuildError:
            # Skip this revision.
            del revision_list[middle_index]
            max_index -= 1
            continue

        if result.is_crash():
            min_index = middle_index
        else:
            max_index = middle_index

        _save_current_fixed_range_indices(testcase_id,
                                          revision_list[min_index],
                                          revision_list[max_index])

    # If we've broken out of the loop, we've exceeded the deadline. Recreate the
    # task to pick up where we left off.
    testcase = data_handler.get_testcase_by_id(testcase_id)
    error_message = ('Timed out, current range r%d:r%d' %
                     (revision_list[min_index], revision_list[max_index]))
    data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,
                                         error_message)
    tasks.add_task('progression', testcase_id, job_type)