Exemplo n.º 1
0
def execute_task(testcase_id, job_type):
    """Execute progression task."""
    try:
        find_fixed_range(testcase_id, job_type)
    except errors.BuildSetupError as error:
        # If we failed to setup a build, it is likely a bot error. We can retry
        # the task in this case.
        testcase = data_handler.get_testcase_by_id(testcase_id)
        error_message = 'Build setup failed r%d' % error.revision
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.ERROR,
                                             error_message)
        build_fail_wait = environment.get_value('FAIL_WAIT')
        tasks.add_task('progression',
                       testcase_id,
                       job_type,
                       wait_time=build_fail_wait)
    except errors.BadBuildError:
        # Though bad builds when narrowing the range are recoverable, certain builds
        # being marked as bad may be unrecoverable. Recoverable ones should not
        # reach this point.
        testcase = data_handler.get_testcase_by_id(testcase_id)
        error_message = 'Unable to recover from bad build'
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.ERROR,
                                             error_message)

    # If there is a fine grained bisection service available, request it. Both
    # regression and fixed ranges are requested once. Regression is also requested
    # here as the bisection service may require details that are not yet available
    # (e.g. issue ID) at the time regress_task completes.
    testcase = data_handler.get_testcase_by_id(testcase_id)
    bisection.request_bisection(testcase)
Exemplo n.º 2
0
def execute_task(testcase_id, job_type):
    """Run regression task and handle potential errors."""
    try:
        find_regression_range(testcase_id, job_type)
    except errors.BuildSetupError as error:
        # If we failed to setup a build, it is likely a bot error. We can retry
        # the task in this case.
        testcase = data_handler.get_testcase_by_id(testcase_id)
        error_message = 'Build setup failed r%d' % error.revision
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.ERROR,
                                             error_message)
        build_fail_wait = environment.get_value('FAIL_WAIT')
        tasks.add_task('regression',
                       testcase_id,
                       job_type,
                       wait_time=build_fail_wait)
    except errors.BadBuildError:
        # Though bad builds when narrowing the range are recoverable, certain builds
        # being marked as bad may be unrecoverable. Recoverable ones should not
        # reach this point.
        testcase = data_handler.get_testcase_by_id(testcase_id)
        testcase.regression = 'NA'
        error_message = 'Unable to recover from bad build'
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.ERROR,
                                             error_message)
Exemplo n.º 3
0
 def test_recovered_exception(self):
     """Test recovered exception."""
     self.mock.file_issue.return_value = 'ID', Exception('recovered')
     self.assertTrue(triage._file_issue(self.testcase, self.issue_tracker))
     testcase = data_handler.get_testcase_by_id(self.testcase.key.id())
     self.assertEqual('Failed to file issue due to exception: recovered',
                      testcase.get_metadata(triage.TRIAGE_MESSAGE_KEY))
Exemplo n.º 4
0
def validate_regression_range(testcase, testcase_file_path, job_type,
                              revision_list, min_index):
    """Ensure that we found the correct min revision by testing earlier ones."""
    earlier_revisions = revision_list[
        min_index - EARLIER_REVISIONS_TO_CONSIDER_FOR_VALIDATION:min_index]
    revision_count = min(len(earlier_revisions),
                         REVISIONS_TO_TEST_FOR_VALIDATION)

    revisions_to_test = random.sample(earlier_revisions, revision_count)
    for revision in revisions_to_test:
        try:
            if _testcase_reproduces_in_revision(testcase, testcase_file_path,
                                                job_type, revision):
                testcase = data_handler.get_testcase_by_id(testcase.key.id())
                testcase.regression = 'NA'
                error_message = (
                    'Low confidence in regression range. Test case crashes in '
                    'revision r%d but not later revision r%d' %
                    (revision, revision_list[min_index]))
                data_handler.update_testcase_comment(
                    testcase, data_types.TaskState.ERROR, error_message)
                return False
        except errors.BadBuildError:
            pass

    return True
Exemplo n.º 5
0
    def post(self, message):
        """Handle a post request."""
        testcase_id = message.attributes.get('testcaseId')
        if not testcase_id:
            raise helpers.EarlyExitException('Missing testcaseId.', 400)

        revision = message.attributes.get('revision')
        if not revision or not revision.isdigit():
            raise helpers.EarlyExitException('Missing revision.', 400)

        revision = int(revision)
        testcase = data_handler.get_testcase_by_id(testcase_id)
        job = data_types.Job.query(
            data_types.Job.name == testcase.job_type).get()
        if not job or not job.is_external():
            raise helpers.EarlyExitException('Invalid job.', 400)

        if message.data:
            stacktrace = message.data.decode()
        else:
            logs.log(f'No stacktrace provided (testcase_id={testcase_id}).')
            stacktrace = ''

        error = message.attributes.get('error')
        handle_update(testcase, revision, stacktrace, error)
        return 'OK'
Exemplo n.º 6
0
def execute_task(*_):
    """Execute the report uploads."""
    logs.log('Uploading pending reports.')

    # Get metadata for reports requiring upload.
    reports_metadata = ndb_utils.get_all_from_query(
        data_types.ReportMetadata.query(
            ndb_utils.is_false(data_types.ReportMetadata.is_uploaded)))
    reports_metadata = list(reports_metadata)
    if not reports_metadata:
        logs.log('No reports that need upload found.')
        return

    environment.set_value('UPLOAD_MODE', 'prod')

    # Otherwise, upload corresponding reports.
    logs.log('Uploading reports for testcases: %s' %
             str([report.testcase_id for report in reports_metadata]))

    report_metadata_to_delete = []
    for report_metadata in reports_metadata:
        # Convert metadata back into actual report.
        crash_info = crash_uploader.crash_report_info_from_metadata(
            report_metadata)
        testcase_id = report_metadata.testcase_id

        try:
            _ = data_handler.get_testcase_by_id(testcase_id)
        except errors.InvalidTestcaseError:
            logs.log_warn('Could not find testcase %s.' % testcase_id)
            report_metadata_to_delete.append(report_metadata.key)
            continue

        # Upload the report and update the corresponding testcase info.
        logs.log('Processing testcase %s for crash upload.' % testcase_id)
        crash_report_id = crash_info.upload()
        if crash_report_id is None:
            logs.log_error(
                'Crash upload for testcase %s failed, retry later.' %
                testcase_id)
            continue

        # Update the report metadata to indicate successful upload.
        report_metadata.crash_report_id = crash_report_id
        report_metadata.is_uploaded = True
        report_metadata.put()

        logs.log('Uploaded testcase %s to crash, got back report id %s.' %
                 (testcase_id, crash_report_id))
        time.sleep(1)

    # Delete report metadata entries where testcase does not exist anymore or
    # upload is not supported.
    if report_metadata_to_delete:
        ndb_utils.delete_multi(report_metadata_to_delete)

    # Log done with uploads.
    # Deletion happens in batches in cleanup_task, so that in case of error there
    # is some buffer for looking at stored ReportMetadata in the meantime.
    logs.log('Finished uploading crash reports.')
Exemplo n.º 7
0
 def test_unrecovered_exception(self):
     """Test recovered exception."""
     self.mock.file_issue.side_effect = Exception('unrecovered')
     self.assertFalse(triage._file_issue(self.testcase, self.issue_tracker))
     testcase = data_handler.get_testcase_by_id(self.testcase.key.id())
     self.assertEqual('Failed to file issue due to exception: unrecovered',
                      testcase.get_metadata(triage.TRIAGE_MESSAGE_KEY))
Exemplo n.º 8
0
  def test_similar_testcase_reproducible_and_closed_but_issue_open_1(self):
    """Tests result is true when there is a similar testcase which is
    reproducible and fixed due to flakiness but issue is kept open. Only update
    testcase bug mapping if similar testcase is fixed longer than the grace
    period."""
    self.issue.save()

    similar_testcase = test_utils.create_generic_testcase()
    similar_testcase.one_time_crasher_flag = False
    similar_testcase.open = False
    similar_testcase.bug_information = str(self.issue.id)
    similar_testcase.put()

    self.assertEqual(
        True,
        triage._check_and_update_similar_bug(self.testcase, self.issue_tracker))
    testcase = data_handler.get_testcase_by_id(self.testcase.key.id())
    self.assertEqual(None, testcase.bug_information)
    self.assertEqual('', self.issue._monorail_issue.comment)

    similar_testcase.set_metadata(
        'closed_time',
        test_utils.CURRENT_TIME -
        datetime.timedelta(hours=data_types.MIN_ELAPSED_TIME_SINCE_FIXED + 1))
    self.assertEqual(
        True,
        triage._check_and_update_similar_bug(self.testcase, self.issue_tracker))
Exemplo n.º 9
0
  def test_similar_testcase_reproducible_and_closed_but_issue_open_2(self):
    """Tests result is true when there is a similar testcase which is
    reproducible and fixed due to flakiness but issue is kept open. Don't update
    testcase bug mapping if another reproducible testcase is open and attached
    to this bug."""
    self.issue.save()

    similar_testcase_1 = test_utils.create_generic_testcase()
    similar_testcase_1.one_time_crasher_flag = False
    similar_testcase_1.open = False
    similar_testcase_1.bug_information = str(self.issue.id)
    similar_testcase_1.put()

    similar_testcase_2 = test_utils.create_generic_testcase()
    similar_testcase_2.one_time_crasher_flag = False
    similar_testcase_2.open = True
    similar_testcase_2.bug_information = str(self.issue.id)
    similar_testcase_2.put()

    self.assertEqual(
        True,
        triage._check_and_update_similar_bug(self.testcase, self.issue_tracker))
    testcase = data_handler.get_testcase_by_id(self.testcase.key.id())
    self.assertEqual(None, testcase.bug_information)
    self.assertEqual('', self.issue._monorail_issue.comment)
Exemplo n.º 10
0
def _testcase_reproduces_in_revision(testcase,
                                     testcase_file_path,
                                     job_type,
                                     revision,
                                     update_metadata=False):
  """Test to see if a test case reproduces in the specified revision."""
  build_manager.setup_build(revision)
  if not build_manager.check_app_path():
    raise errors.BuildSetupError(revision, job_type)

  if testcase_manager.check_for_bad_build(job_type, revision):
    log_message = 'Bad build at r%d. Skipping' % revision
    testcase = data_handler.get_testcase_by_id(testcase.key.id())
    data_handler.update_testcase_comment(testcase, data_types.TaskState.WIP,
                                         log_message)
    raise errors.BadBuildError(revision, job_type)

  test_timeout = environment.get_value('TEST_TIMEOUT', 10)
  result = testcase_manager.test_for_crash_with_retries(
      testcase, testcase_file_path, test_timeout, http_flag=testcase.http_flag)
  _log_output(revision, result)

  if update_metadata:
    _update_issue_metadata(testcase)

  return result
Exemplo n.º 11
0
    def test_similar_testcase_with_issue_recently_closed(self):
        """Tests result is true when there is a similar testcase with issue closed
    recently."""
        self.issue.status = 'Fixed'
        self.issue._monorail_issue.open = False
        self.issue._monorail_issue.closed = (
            test_utils.CURRENT_TIME - datetime.timedelta(
                hours=data_types.MIN_ELAPSED_TIME_SINCE_FIXED - 1))
        self.issue.save()

        similar_testcase = test_utils.create_generic_testcase()
        similar_testcase.one_time_crasher_flag = False
        similar_testcase.open = False
        similar_testcase.bug_information = str(self.issue.id)
        similar_testcase.put()

        self.assertEqual(
            True,
            triage._check_and_update_similar_bug(self.testcase,
                                                 self.issue_tracker))

        testcase = data_handler.get_testcase_by_id(self.testcase.key.id())
        self.assertEqual(
            'Delaying filing a bug since similar testcase (2) in issue (1) '
            'was just fixed.',
            testcase.get_metadata(triage.TRIAGE_MESSAGE_KEY))
Exemplo n.º 12
0
def create_tasks(testcase):
    """Create tasks like minimization, regression, impact, progression, stack
  stack for a newly generated testcase."""
    # No need to create progression task. It is automatically created by the cron
    # handler for reproducible testcases.

    # For a non reproducible crash.
    if testcase.one_time_crasher_flag:
        # For unreproducible testcases, it is still beneficial to get component
        # information from blame task.
        create_blame_task_if_needed(testcase)
        return

    # For a fully reproducible crash.

    # MIN environment variable defined in a job definition indicates if
    # we want to do the heavy weight tasks like minimization, regression,
    # impact, etc on this testcase. These are usually skipped when we have
    # a large timeout and we can't afford to waste more than a couple of hours
    # on these jobs.
    testcase_id = testcase.key.id()
    if environment.get_value('MIN') == 'No':
        testcase = data_handler.get_testcase_by_id(testcase_id)
        testcase.minimized_keys = 'NA'
        testcase.regression = 'NA'
        testcase.set_impacts_as_na()
        testcase.put()
        return

    # Just create the minimize task for now. Once minimization is complete, it
    # automatically created the rest of the needed tasks.
    create_minimize_task_if_needed(testcase)
Exemplo n.º 13
0
def _add_triage_message(testcase, message):
  """Add a triage message."""
  if testcase.get_metadata(TRIAGE_MESSAGE_KEY) == message:
    # Message already exists, skip update.
    return
  # Re-fetch testcase to get latest entity and avoid race condition in updates.
  testcase = data_handler.get_testcase_by_id(testcase.key.id())
  testcase.set_metadata(TRIAGE_MESSAGE_KEY, message)
Exemplo n.º 14
0
def _save_current_fixed_range_indices(testcase_id, fixed_range_start,
                                      fixed_range_end):
  """Save current fixed range indices in case we die in middle of task."""
  testcase = data_handler.get_testcase_by_id(testcase_id)
  testcase.set_metadata(
      'last_progression_min', fixed_range_start, update_testcase=False)
  testcase.set_metadata(
      'last_progression_max', fixed_range_end, update_testcase=False)
  testcase.put()
Exemplo n.º 15
0
    def get(self, resource=None):
        """Handle a get request with resource."""
        testcase = None
        testcase_id = request.args.get('testcase_id')
        if not testcase_id and not resource:
            raise helpers.EarlyExitException('No file requested.', 400)

        if testcase_id:
            try:
                testcase = data_handler.get_testcase_by_id(testcase_id)
            except errors.InvalidTestcaseError:
                raise helpers.EarlyExitException('Invalid testcase.', 400)

            if not resource:
                if testcase.minimized_keys and testcase.minimized_keys != 'NA':
                    resource = testcase.minimized_keys
                else:
                    resource = testcase.fuzzed_keys

        fuzzer_binary_name = None
        if testcase:
            fuzzer_binary_name = testcase.get_metadata('fuzzer_binary_name')

        resource = str(urllib.parse.unquote(resource))
        blob_info = blobs.get_blob_info(resource)
        if not blob_info:
            raise helpers.EarlyExitException('File does not exist.', 400)

        if (testcase and testcase.fuzzed_keys != blob_info.key()
                and testcase.minimized_keys != blob_info.key()):
            raise helpers.EarlyExitException('Invalid testcase.', 400)

        if (utils.is_oss_fuzz() and testcase
                and self.check_public_testcase(blob_info, testcase)):
            # Public OSS-Fuzz testcase.
            return self._send_blob(blob_info,
                                   testcase.key.id(),
                                   is_minimized=True,
                                   fuzzer_binary_name=fuzzer_binary_name)

        is_minimized = testcase and blob_info.key() == testcase.minimized_keys
        if access.has_access():
            # User has general access.
            return self._send_blob(blob_info, testcase_id, is_minimized,
                                   fuzzer_binary_name)

        # If this blobstore file is for a testcase, check if the user has access to
        # the testcase.
        if not testcase:
            raise helpers.AccessDeniedException()

        if access.can_user_access_testcase(testcase):
            return self._send_blob(blob_info, testcase_id, is_minimized,
                                   fuzzer_binary_name)

        raise helpers.AccessDeniedException()
Exemplo n.º 16
0
    def test_minimize(self):
        """Test minimize."""
        helpers.patch(self, ['clusterfuzz._internal.base.utils.is_oss_fuzz'])
        self.mock.is_oss_fuzz.return_value = True

        testcase_file_path = os.path.join(self.temp_dir, 'testcase')
        with open(testcase_file_path, 'wb') as f:
            f.write(b'EEE')

        with open(testcase_file_path) as f:
            fuzzed_keys = blobs.write_blob(f)

        testcase_path = os.path.join(self.temp_dir, 'testcase')

        testcase = data_types.Testcase(
            crash_type='Null-dereference WRITE',
            crash_address='',
            crash_state='Foo\n',
            crash_stacktrace='',
            crash_revision=1337,
            fuzzed_keys=fuzzed_keys,
            fuzzer_name='libFuzzer',
            overridden_fuzzer_name='libFuzzer_test_fuzzer',
            job_type='libfuzzer_asan_job',
            absolute_path=testcase_path,
            minimized_arguments='%TESTCASE% test_fuzzer')
        testcase.put()

        data_types.FuzzTarget(engine='libFuzzer', binary='test_fuzzer').put()

        fuzzers_init.run()

        self._setup_env(job_type='libfuzzer_asan_job')
        environment.set_value('APP_ARGS', testcase.minimized_arguments)
        environment.set_value('LIBFUZZER_MINIMIZATION_ROUNDS', 3)
        environment.set_value('UBSAN_OPTIONS',
                              'unneeded_option=1:silence_unsigned_overflow=1')
        minimize_task.execute_task(testcase.key.id(), 'libfuzzer_asan_job')

        testcase = data_handler.get_testcase_by_id(testcase.key.id())
        self.assertNotEqual('', testcase.minimized_keys)
        self.assertNotEqual('NA', testcase.minimized_keys)
        self.assertNotEqual(testcase.fuzzed_keys, testcase.minimized_keys)
        self.assertEqual(
            {
                'ASAN_OPTIONS': {},
                'UBSAN_OPTIONS': {
                    'silence_unsigned_overflow': 1
                }
            }, testcase.get_metadata('env'))

        blobs.read_blob_to_disk(testcase.minimized_keys, testcase_path)

        with open(testcase_path, 'rb') as f:
            self.assertEqual(1, len(f.read()))
Exemplo n.º 17
0
  def test_group_of_one(self):
    """Test that a group id with just one testcase gets removed."""
    self.testcases[0].group_id = 1
    self.testcases[0].put()
    self.testcases[1].key.delete()

    grouper.group_testcases()

    testcase = data_handler.get_testcase_by_id(self.testcases[0].key.id())
    self.assertEqual(testcase.group_id, 0)
    self.assertTrue(testcase.is_leader)
Exemplo n.º 18
0
def _save_fixed_range(testcase_id, min_revision, max_revision,
                      testcase_file_path):
  """Update a test case and other metadata with a fixed range."""
  testcase = data_handler.get_testcase_by_id(testcase_id)
  testcase.fixed = '%d:%d' % (min_revision, max_revision)
  testcase.open = False

  data_handler.update_progression_completion_metadata(
      testcase, max_revision, message='fixed in range r%s' % testcase.fixed)
  _write_to_bigquery(testcase, min_revision, max_revision)

  _store_testcase_for_regression_testing(testcase, testcase_file_path)
    def test_invalid_range(self):
        """Ensure that we handle invalid ranges correctly."""
        testcase = data_types.Testcase()
        testcase.put()

        self.mock._testcase_reproduces_in_revision.return_value = True
        result = regression_task.validate_regression_range(
            testcase, '/a/b', 'job_type', [0, 1, 2, 3, 4], 4)
        self.assertFalse(result)

        testcase = data_handler.get_testcase_by_id(testcase.key.id())
        self.assertEqual(testcase.regression, 'NA')
Exemplo n.º 20
0
def get_testcase(testcase_id):
    """Get a valid testcase or raise EarlyExitException."""
    testcase = None
    try:
        testcase = data_handler.get_testcase_by_id(testcase_id)
    except errors.InvalidTestcaseError:
        pass

    if not testcase:
        raise EarlyExitException(
            "Testcase (id=%s) doesn't exist" % testcase_id, 404)
    return testcase
Exemplo n.º 21
0
def _testcase_reproduces_in_revision(testcase,
                                     testcase_file_path,
                                     job_type,
                                     revision,
                                     should_log=True,
                                     min_revision=None,
                                     max_revision=None):
    """Test to see if a test case reproduces in the specified revision."""
    if should_log:
        log_message = 'Testing r%d' % revision
        if min_revision is not None and max_revision is not None:
            log_message += ' (current range %d:%d)' % (min_revision,
                                                       max_revision)

        testcase = data_handler.get_testcase_by_id(testcase.key.id())
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.WIP,
                                             log_message)

    build_manager.setup_build(revision)
    if not build_manager.check_app_path():
        raise errors.BuildSetupError(revision, job_type)

    if testcase_manager.check_for_bad_build(job_type, revision):
        log_message = 'Bad build at r%d. Skipping' % revision
        testcase = data_handler.get_testcase_by_id(testcase.key.id())
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.WIP,
                                             log_message)
        raise errors.BadBuildError(revision, job_type)

    test_timeout = environment.get_value('TEST_TIMEOUT', 10)
    result = testcase_manager.test_for_crash_with_retries(
        testcase,
        testcase_file_path,
        test_timeout,
        http_flag=testcase.http_flag)
    return result.is_crash()
Exemplo n.º 22
0
def execute_task(testcase_id, _):
    """Attempt to find the CL introducing the bug associated with testcase_id."""
    # Locate the testcase associated with the id.
    testcase = data_handler.get_testcase_by_id(testcase_id)
    if not testcase:
        return

    # Make sure that predator topic is configured. If not, nothing to do here.
    topic = db_config.get_value('predator_crash_topic')
    if not topic:
        logs.log('Predator is not configured, skipping blame task.')
        return

    data_handler.update_testcase_comment(testcase,
                                         data_types.TaskState.STARTED)

    # Prepare pubsub message to send to predator.
    message = _prepare_predator_message(testcase)
    if not message:
        testcase = data_handler.get_testcase_by_id(testcase_id)
        data_handler.update_testcase_comment(
            testcase, data_types.TaskState.ERROR,
            'Failed to generate request for Predator')
        return

    # Clear existing results and mark blame result as pending.
    testcase = data_handler.get_testcase_by_id(testcase_id)
    _clear_blame_result_and_set_pending_flag(testcase)

    # Post request to pub sub.
    client = pubsub.PubSubClient()
    message_ids = client.publish(topic, [message])
    logs.log(
        'Successfully published testcase %s to Predator. Message IDs: %s.' %
        (testcase_id, message_ids))
    data_handler.update_testcase_comment(testcase,
                                         data_types.TaskState.FINISHED)
Exemplo n.º 23
0
def check_access_and_get_testcase(testcase_id):
    """Check the failed attempt count and get the testcase."""
    if not helpers.get_user_email():
        raise helpers.UnauthorizedException()

    if not testcase_id:
        raise helpers.EarlyExitException('No test case specified!', 404)

    try:
        testcase = data_handler.get_testcase_by_id(testcase_id)
    except errors.InvalidTestcaseError:
        raise helpers.EarlyExitException('Invalid test case!', 404)

    if not can_user_access_testcase(testcase):
        raise helpers.AccessDeniedException()

    return testcase
Exemplo n.º 24
0
  def test_same_crash_different_security(self):
    """Test that crashes with same crash states, but different security
      flags."""
    self.testcases[0].security_flag = False
    self.testcases[0].crash_state = 'abc\ndef'
    self.testcases[1].security_flag = True
    self.testcases[1].crash_state = 'abc\ndef'

    for t in self.testcases:
      t.put()

    grouper.group_testcases()

    for index, t in enumerate(self.testcases):
      self.testcases[index] = data_handler.get_testcase_by_id(t.key.id())
      self.assertEqual(self.testcases[index].group_id, 0)
      self.assertTrue(self.testcases[index].is_leader)
Exemplo n.º 25
0
  def test_different_crash_same_security(self):
    """Test that crashes with different crash states and same security flags
      don't get grouped together."""
    self.testcases[0].security_flag = True
    self.testcases[0].crash_state = 'abc\ndef'
    self.testcases[1].security_flag = True
    self.testcases[1].crash_state = 'uvw\nxyz'

    for t in self.testcases:
      t.put()

    grouper.group_testcases()

    for index, t in enumerate(self.testcases):
      self.testcases[index] = data_handler.get_testcase_by_id(t.key.id())
      self.assertEqual(self.testcases[index].group_id, 0)
      self.assertTrue(self.testcases[index].is_leader)
Exemplo n.º 26
0
  def test_same_crash_same_security(self):
    """Test that crashes with same crash states and same security flags get
    de-duplicated with one of them removed."""
    for index, t in enumerate(self.testcases):
      t.security_flag = True
      t.crash_state = 'abc\ndef'
      t.timestamp = datetime.datetime.utcfromtimestamp(index)
      t.put()

    grouper.group_testcases()

    testcases = []
    for testcase_id in data_handler.get_open_testcase_id_iterator():
      testcases.append(data_handler.get_testcase_by_id(testcase_id))

    self.assertEqual(len(testcases), 1)
    self.assertEqual(testcases[0].group_id, 0)
    self.assertTrue(testcases[0].is_leader)
Exemplo n.º 27
0
def cleanup_global_blacklist():
    """Cleans out closed and deleted testcases from the global blacklist."""
    blacklists_to_delete = []
    global_blacklists = data_types.Blacklist.query(
        data_types.Blacklist.tool_name == LSAN_TOOL_NAME)
    for blacklist in global_blacklists:
        testcase_id = blacklist.testcase_id

        try:
            testcase = data_handler.get_testcase_by_id(testcase_id)
        except errors.InvalidTestcaseError:
            testcase = None

        # Delete entry if testcase is closed, deleted, or unreproducible.
        if not testcase or not testcase.open or testcase.one_time_crasher_flag:
            blacklists_to_delete.append(blacklist.key)

    ndb_utils.delete_multi(blacklists_to_delete)
Exemplo n.º 28
0
def add_external_task(command, testcase_id, job):
  """Add external task."""
  if command != 'progression':
    # Only progression is supported.
    return

  pubsub_client = pubsub.PubSubClient()
  topic_name = job.external_reproduction_topic
  assert topic_name is not None

  testcase = data_handler.get_testcase_by_id(testcase_id)
  fuzz_target = testcase.get_fuzz_target()

  memory_tool_name = environment.get_memory_tool_name(job.name)
  sanitizer = environment.SANITIZER_NAME_MAP.get(memory_tool_name)
  job_environment = job.get_environment()
  if job_environment.get('CUSTOM_BINARY'):
    raise RuntimeError('External jobs should never have custom binaries.')

  build_path = (
      job_environment.get('RELEASE_BUILD_BUCKET_PATH') or
      job_environment.get('FUZZ_TARGET_BUILD_BUCKET_PATH'))
  if build_path is None:
    raise RuntimeError(f'{job.name} has no build path defined.')

  min_revision = (
      testcase.get_metadata('last_tested_revision') or testcase.crash_revision)

  logs.log(f'Publishing external reproduction task for {testcase_id}.')
  attributes = {
      'project': job.project,
      'target': fuzz_target.binary,
      'fuzzer': testcase.fuzzer_name,
      'sanitizer': sanitizer,
      'job': job.name,
      'testcaseId': str(testcase_id),
      'buildPath': build_path,
      'minRevisionAbove': str(min_revision),
      'numTrials': str(_NUM_TRIALS),
  }

  reproducer = blobs.read_key(testcase.fuzzed_keys)
  message = pubsub.Message(data=reproducer, attributes=attributes)
  pubsub_client.publish(topic_name, [message])
Exemplo n.º 29
0
def _set_predator_result_with_error(testcase, error_message):
    """Sets predator result with error."""
    predator_result = {
        'result': {
            'found': False,
            'project': '',
            'suspected_components': '',
            'suspected_cls': '',
            'feedback_url': '',
            'error_message': error_message,
        }
    }

    testcase = data_handler.get_testcase_by_id(testcase.key.id())
    testcase.set_metadata('predator_result',
                          predator_result,
                          update_testcase=False)
    testcase.delete_metadata('blame_pending', update_testcase=False)
    testcase.put()
Exemplo n.º 30
0
def save_regression_range(testcase_id, regression_range_start,
                          regression_range_end):
    """Saves the regression range and creates blame and impact task if needed."""
    testcase = data_handler.get_testcase_by_id(testcase_id)
    testcase.regression = '%d:%d' % (regression_range_start,
                                     regression_range_end)
    data_handler.update_testcase_comment(
        testcase, data_types.TaskState.FINISHED,
        'regressed in range %s' % testcase.regression)

    write_to_big_query(testcase, regression_range_start, regression_range_end)

    # Force impacts update after regression range is updated. In several cases,
    # we might not have a production build to test with, so regression range is
    # used to decide impacts.
    task_creation.create_impact_task_if_needed(testcase)

    # Get blame information using the regression range result.
    task_creation.create_blame_task_if_needed(testcase)