Esempio n. 1
0
    def test_store_minidump(self, mock_write_testcase):
        """Tests (very roughly) minidump upload to blobstore: just check there /is/
       a blobstore ID returned."""
        self.needs_file_delete = False
        mock_write_testcase.return_value = '11111'

        sample_report_info = crash_uploader.CrashReportInfo(
            minidump_path=EXPECTED_DMP_PATH,
            product='Chrome_Android',
            version='46.0.2482.0')
        minidump_key = sample_report_info.store_minidump()
        if not minidump_key:
            self.fail('Could not upload minidump to blobstore.')
Esempio n. 2
0
    def test_to_report_metadata_and_back(self):
        """Test to report metadata and back."""
        self.needs_file_delete = False

        expected_report_info = crash_uploader.CrashReportInfo()
        expected_report_info.product = 'Chrome_Android'
        expected_report_info.version = '59.0.3035.0'
        expected_report_info.testcase_id = 'CF_TESTSUITE_TEST_UPLOAD'
        expected_report_info.bot_id = 'test_upload'
        with open(EXPECTED_PROCESSED_REPORT_PATH, 'rb') as processed_report:
            expected_report_info.serialized_crash_stack_frames = (
                processed_report.read())

        report_metadata = expected_report_info.to_report_metadata()
        actual_report_info = crash_uploader.crash_report_info_from_metadata(
            report_metadata)

        self._validate_report_fields(expected_report_info, actual_report_info)
Esempio n. 3
0
    def test_upload_processed_report(self):
        """Tests (very roughly) crash report upload with a processed report: just
       check there /is/ a returned report id."""
        self.needs_file_delete = False

        report_info = crash_uploader.CrashReportInfo()
        report_info.product = 'Chrome_Android'
        report_info.version = '59.0.3035.0'
        report_info.testcase_id = 'CF_TESTSUITE_TEST_UPLOAD'
        report_info.bot_id = 'test_upload'
        with open(EXPECTED_PROCESSED_REPORT_PATH, 'rb') as processed_report:
            report_info.serialized_crash_stack_frames = processed_report.read()

        # Attempt upload.
        if self.server_error:
            self.fail(self.server_error)
        report_id = report_info.upload()
        if not report_id:
            self.fail('No report id returned.')
Esempio n. 4
0
# .dmp file, suffixed by the PID to distinguish it from the minidump file
# itself.
SAMPLE_MIME_FILENAME = 'android_mime_minidump'
SAMPLE_MIME_PATH = os.path.join(DATA_DIRECTORY,
                                '%s.mime' % SAMPLE_MIME_FILENAME)
EXPECTED_DMP_PATH = os.path.join(DATA_DIRECTORY,
                                 'android_parsed_minidump_expected.dmp')
ACTUAL_DMP_PATH = os.path.join(DATA_DIRECTORY, '%s.dmp' % SAMPLE_MIME_FILENAME)
EXPECTED_PROCESSED_REPORT_PATH = os.path.join(
    DATA_DIRECTORY, 'expected_processed_report_bytes')
SAMPLE_OUTPUT = open(
    os.path.join(DATA_DIRECTORY, 'android_crash_stack_output'), 'r').read()
SAMPLE_OUTPUT_TO_PARSE = open(
    os.path.join(DATA_DIRECTORY, 'crash_output_to_parse'), 'r').read()
EXPECTED_REPORT_INFO = crash_uploader.CrashReportInfo(
    minidump_path=ACTUAL_DMP_PATH,
    product='Chrome_Android',
    version='46.0.2482.0')

# Environment variables.
TEST_JOB_NAME = 'android_asan_chrome_l'
TEST_OS = 'android'
TEST_FAIL_RETRIES = '4'
TEST_BOT_TMPDIR = DATA_DIRECTORY
TEST_CRASH_STACKTRACES_DIR = DATA_DIRECTORY


class CrashBaseTest(unittest.TestCase):
    """Base for setup, teardown of crash report processing tests."""
    def setUp(self):
        test_helpers.patch_environ(self)
Esempio n. 5
0
  def test_create_many_groups(self, project_name):
    """Test creating many groups."""
    self.mock.get_project_name.return_value = project_name

    self.mock.insert.return_value = {'insertErrors': [{'index': 0}]}
    invalid_crash = self._make_crash('e1', state='error1')
    invalid_crash.error = 'error'

    # TODO(metzman): Add a seperate test for strategies.
    r2_stacktrace = ('r2\ncf::fuzzing_strategies: value_profile\n')

    crashes = [
        self._make_crash('r1', state='reproducible1'),
        self._make_crash(r2_stacktrace, state='reproducible1'),
        self._make_crash('r3', state='reproducible1'),
        self._make_crash('r4', state='reproducible2'),
        self._make_crash('u1', state='unreproducible1'),
        self._make_crash('u2', state='unreproducible2'),
        self._make_crash('u3', state='unreproducible2'),
        self._make_crash('u4', state='unreproducible3'), invalid_crash
    ]

    self.mock.test_for_reproducibility.side_effect = [
        False,  # For r1. It returns False. So, r1 is demoted.
        True,  # For r2. It returns True. So, r2 becomes primary for its group.
        True,  # For r4.
        False,  # For u1.
        False,  # For u2.
        False,  # For u3.
        False
    ]  # For u4.

    new_crash_count, known_crash_count, groups = fuzz_task.process_crashes(
        crashes=crashes,
        context=fuzz_task.Context(
            project_name=project_name,
            bot_name='bot',
            job_type='job',
            fuzz_target=data_types.FuzzTarget(engine='engine', binary='binary'),
            redzone=111,
            platform_id='platform',
            crash_revision=1234,
            fuzzer_name='fuzzer',
            window_argument='win_args',
            fuzzer_metadata={},
            testcases_metadata={},
            timeout_multiplier=1,
            test_timeout=2,
            thread_wait_timeout=3,
            data_directory='/data'))
    self.assertEqual(5, new_crash_count)
    self.assertEqual(3, known_crash_count)

    self.assertEqual(5, len(groups))
    self.assertEqual([
        'reproducible1', 'reproducible2', 'unreproducible1', 'unreproducible2',
        'unreproducible3'
    ], [group.main_crash.crash_state for group in groups])
    self.assertEqual([True, True, True, True, True],
                     [group.is_new() for group in groups])
    self.assertEqual([3, 1, 1, 2, 1], [len(group.crashes) for group in groups])

    testcases = list(data_types.Testcase.query())
    self.assertEqual(5, len(testcases))
    self.assertSetEqual(
        set([r2_stacktrace, 'r4', 'u1', 'u2', 'u4']),
        set(t.crash_stacktrace for t in testcases))

    self.assertSetEqual(
        set([
            '{"fuzzing_strategies": ["value_profile"]}', None, None, None, None
        ]), set(t.additional_metadata for t in testcases))

    # There's one invalid_crash. And r2 is a reproducible crash, so r3 doesn't
    # invoke archive_testcase_in_blobstore. Therefore, the
    # archive_testcase_in_blobstore is called `len(crashes) - 2`.
    self.assertEqual(
        len(crashes) - 2,
        self.mock.archive_testcase_and_dependencies_in_gcs.call_count)

    # Check only the desired testcases were saved.
    actual_crash_infos = [group.main_crash.crash_info for group in groups]
    if project_name != 'chromium':
      expected_crash_infos = [None] * len(actual_crash_infos)
    else:
      expected_saved_crash_info = crash_uploader.CrashReportInfo(
          product='Chrome_' + environment.platform().lower().capitalize(),
          version='this.is.fake.ver',
          serialized_crash_stack_frames='f00df00d')
      expected_crash_infos = [
          expected_saved_crash_info,  # r2 is main crash for group r1,r2,r3
          expected_saved_crash_info,  # r4 is main crash for its own group
          None,  # u1 is not reproducible
          None,  # u2, u3 are not reproducible
          None,  # u4 is not reproducible
      ]

    self.assertEqual(len(expected_crash_infos), len(actual_crash_infos))
    for expected, actual in zip(expected_crash_infos, actual_crash_infos):
      if not expected:
        self.assertIsNone(actual)
        continue

      self.assertEqual(expected.product, actual.product)
      self.assertEqual(expected.version, actual.version)
      self.assertEqual(expected.serialized_crash_stack_frames,
                       actual.serialized_crash_stack_frames)

    def _make_big_query_json(crash, reproducible_flag, new_flag, testcase_id):
      return {
          'crash_type': crash.crash_type,
          'crash_state': crash.crash_state,
          'created_at': 987,
          'platform': 'platform',
          'crash_time_in_ms': int(crash.crash_time * 1000),
          'parent_fuzzer_name': 'engine',
          'fuzzer_name': 'engine_binary',
          'job_type': 'job',
          'security_flag': crash.security_flag,
          'reproducible_flag': reproducible_flag,
          'revision': '1234',
          'new_flag': new_flag,
          'project': project_name,
          'testcase_id': testcase_id
      }

    def _get_testcase_id(crash):
      rows = list(
          data_types.Testcase.query(
              data_types.Testcase.crash_type == crash.crash_type,
              data_types.Testcase.crash_state == crash.crash_state,
              data_types.Testcase.security_flag == crash.security_flag))
      if not rows:
        return None
      return str(rows[0].key.id())

    # Calls to write 5 groups of crashes to BigQuery.
    self.assertEqual(5, self.mock.insert.call_count)
    self.mock.insert.assert_has_calls([
        mock.call(mock.ANY, [
            big_query.Insert(
                _make_big_query_json(crashes[0], True, False, None),
                '%s:bot:987:0' % crashes[0].key),
            big_query.Insert(
                _make_big_query_json(crashes[1], True, True,
                                     _get_testcase_id(crashes[1])),
                '%s:bot:987:1' % crashes[0].key),
            big_query.Insert(
                _make_big_query_json(crashes[2], True, False, None),
                '%s:bot:987:2' % crashes[0].key)
        ]),
        mock.call(mock.ANY, [
            big_query.Insert(
                _make_big_query_json(crashes[3], True, True,
                                     _get_testcase_id(crashes[3])),
                '%s:bot:987:0' % crashes[3].key)
        ]),
        mock.call(mock.ANY, [
            big_query.Insert(
                _make_big_query_json(crashes[4], False, True,
                                     _get_testcase_id(crashes[4])),
                '%s:bot:987:0' % crashes[4].key)
        ]),
        mock.call(mock.ANY, [
            big_query.Insert(
                _make_big_query_json(crashes[5], False, True,
                                     _get_testcase_id(crashes[5])),
                '%s:bot:987:0' % crashes[5].key),
            big_query.Insert(
                _make_big_query_json(crashes[6], False, False, None),
                '%s:bot:987:1' % crashes[5].key)
        ]),
        mock.call(mock.ANY, [
            big_query.Insert(
                _make_big_query_json(crashes[7], False, True,
                                     _get_testcase_id(crashes[7])),
                '%s:bot:987:0' % crashes[7].key)
        ]),
    ])