def test_upload_with_timestamp_from_stats(self):
    """Log name should be generated using timestamp value from the stats."""
    mock_gsutil = mock.MagicMock()
    self.mock.write_data.return_value = mock_gsutil

    self.fs.create_file(
        self.testcase_path + '.stats2',
        contents='{"stat": 1000, "timestamp": 1472846341.017923, "kind": '
        '"TestcaseRun", "job": "job", "fuzzer": "fuzzer", '
        '"build_revision": 123}\n')

    environment.set_value('BOT_NAME', 'hostname.company.com')
    crash_result = CrashResult(
        return_code=1, crash_time=5, output='fake output')

    log = testcase_manager.prepare_log_for_upload(crash_result.get_stacktrace(),
                                                  crash_result.return_code)
    log_time = testcase_manager._get_testcase_time(self.testcase_path)
    testcase_manager.upload_log(log, log_time)

    # Date and time below is derived from 1472846341 timestamp value.
    self.mock.write_data.assert_called_once_with(
        b'Component revisions (build r123):\n'
        b'Component: REVISION\nComponent2: REVISION2\n\n'
        b'Bot name: hostname.company.com\n'
        b'Return code: 1\n\nfake output',
        'gs://fake-gcs-logs/fuzzer/job/2016-09-02/19:59:01:017923.log')
    def test_libfuzzer_skip_minimization_initial_crash_state(self):
        """Test libFuzzer minimization skipping with a valid initial crash state."""
        # TODO(ochang): Fix circular import.
        from crash_analysis.crash_result import CrashResult

        testcase = data_types.Testcase(minimized_keys='',
                                       fuzzed_keys='FUZZED_KEY',
                                       job_type='libfuzzer_asan_job',
                                       security_flag=True)
        testcase.put()

        stacktrace = (
            '==14970==ERROR: AddressSanitizer: heap-buffer-overflow on address '
            '0x61b00001f7d0 at pc 0x00000064801b bp 0x7ffce478dbd0 sp '
            '0x7ffce478dbc8 READ of size 4 at 0x61b00001f7d0 thread T0\n'
            '#0 0x64801a in frame0() src/test.cpp:1819:15\n'
            '#1 0x647ac5 in frame1() src/test.cpp:1954:25\n'
            '#2 0xb1dee7 in frame2() src/test.cpp:160:9\n'
            '#3 0xb1ddd8 in frame3() src/test.cpp:148:34\n')
        self.mock._run_libfuzzer_testcase.return_value = CrashResult(  # pylint: disable=protected-access
            1, 1.0, stacktrace)

        self.mock._run_libfuzzer_tool.return_value = (None, None)  # pylint: disable=protected-access

        minimize_task.do_libfuzzer_minimization(testcase,
                                                '/testcase_file_path')

        testcase = data_handler.get_testcase_by_id(testcase.key.id())
        self.assertEqual('Heap-buffer-overflow', testcase.crash_type)
        self.assertEqual('frame0\nframe1\nframe2\n', testcase.crash_state)
        self.assertEqual('0x61b00001f7d0', testcase.crash_address)
        self.assertEqual(
            '+----------------------------------------Release Build Stacktrace'
            '----------------------------------------+\n%s' % stacktrace,
            testcase.crash_stacktrace)
Exemple #3
0
    def run(self, round_number):
        """Run the testcase once."""
        app_directory = environment.get_value('APP_DIR')
        warmup_timeout = environment.get_value('WARMUP_TIMEOUT')
        run_timeout = warmup_timeout if round_number == 1 else self._test_timeout

        if self._is_black_box:
            return_code, crash_time, output = process_handler.run_process(
                self._command,
                timeout=run_timeout,
                gestures=self._gestures,
                current_working_directory=app_directory)
        else:
            result = engine_reproduce(self._engine_impl,
                                      self._fuzz_target.binary,
                                      self._testcase_path, self._arguments,
                                      run_timeout)
            return_code = result.return_code
            crash_time = result.time_executed

            log_header = engine_common.get_log_header(
                result.command, environment.get_value('BOT_NAME'),
                result.time_executed)
            output = log_header + '\n' + result.output

        process_handler.terminate_stale_application_instances()

        crash_result = CrashResult(return_code, crash_time, output)
        if not crash_result.is_crash():
            logs.log('No crash occurred (round {round_number}).'.format(
                round_number=round_number),
                     output=output)

        return crash_result
Exemple #4
0
def test_for_crash_with_retries(testcase,
                                testcase_path,
                                test_timeout,
                                http_flag=False,
                                use_gestures=True,
                                compare_crash=True,
                                crash_retries=None):
    """Test for a crash and return crash parameters like crash type, crash state,
  crash stacktrace, etc."""
    gestures = testcase.gestures if use_gestures else None
    fuzz_target = testcase.get_fuzz_target()
    try:
        runner = TestcaseRunner(fuzz_target, testcase_path, test_timeout,
                                gestures, http_flag)
    except TargetNotFoundError:
        # If a target isn't found, treat it as not crashing.
        return CrashResult(return_code=0, crash_time=0, output='')

    if crash_retries is None:
        crash_retries = environment.get_value('CRASH_RETRIES')

    if compare_crash:
        expected_state = testcase.crash_state
        expected_security_flag = testcase.security_flag
    else:
        expected_state = None
        expected_security_flag = None

    return runner.reproduce_with_retries(crash_retries, expected_state,
                                         expected_security_flag,
                                         testcase.flaky_stack)
  def test_upload_with_hostname_and_serial(self):
    """Log name should be generated using current (mocked) timestamp value."""
    mock_gsutil = mock.MagicMock()
    self.mock.write_data.return_value = mock_gsutil
    environment.set_value('BOT_NAME', 'hostname.company.com')
    environment.set_value('OS_OVERRIDE', 'ANDROID_KERNEL')
    environment.set_value('ANDROID_SERIAL', '123456789')

    self.fs.create_file(
        self.testcase_path + '.stats2',
        contents='{"stat": 1000, "kind": "TestcaseRun", "job": "job", '
        '"fuzzer": "fuzzer", "build_revision": 123}\n')

    crash_result = CrashResult(return_code=None, crash_time=None, output=None)
    log = testcase_manager.prepare_log_for_upload(crash_result.get_stacktrace(),
                                                  crash_result.return_code)
    log_time = testcase_manager._get_testcase_time(self.testcase_path)
    testcase_manager.upload_log(log, log_time)
    self.mock.write_data.assert_called_once_with(
        b'Component revisions (build r123):\n'
        b'Component: REVISION\nComponent2: REVISION2\n\n'
        b'Bot name: hostname.company.com\n'
        b'Device serial: 123456789\n'
        b'Return code: None\n\nNo output!',
        'gs://fake-gcs-logs/fuzzer/job/2017-05-15/16:10:28:374119.log')
    def test_upload_without_component_revisions(self):
        """Log should contain message on empty component revisions."""
        self.mock.get_component_range_list.return_value = []

        mock_gsutil = mock.MagicMock()
        self.mock.write_data.return_value = mock_gsutil

        self.fs.create_file(
            self.testcase_path + '.stats2',
            contents='{"stat": 1000, "timestamp": 1472846341.017923, "kind": '
            '"TestcaseRun", "job": "job", "fuzzer": "fuzzer", '
            '"build_revision": 123}\n')

        crash_result = CrashResult(return_code=1,
                                   crash_time=5,
                                   output='fake output')
        log = testcase_manager.prepare_log_for_upload(
            crash_result.get_stacktrace(), crash_result.return_code)
        log_time = testcase_manager._get_testcase_time(self.testcase_path)
        testcase_manager.upload_log(log, log_time)

        # Date and time below is derived from 1472846341 timestamp value.
        self.mock.write_data.assert_called_once_with(
            b'Component revisions (build r123):\n'
            b'Not available.\n\n'
            b'Return code: 1\n\nfake output',
            'gs://fake-gcs-logs/fuzzer/job/2016-09-02/19:59:01:017923.log')
Exemple #7
0
  def run(self,
          file_path=None,
          gestures=None,
          arguments=None,
          timeout=None,
          log_command=False,
          use_fresh_profile=False):
    """Run the test."""
    if file_path is None:
      file_path = self.file_path

    if gestures is None:
      gestures = self.gestures

    if arguments is None:
      arguments = self.arguments

    # TODO(mbarbella): Dynamic timeout adjustment.
    if timeout is None:
      timeout = self.timeout

    needs_http = self.testcase.http_flag
    profile_index = self._get_profile_index()

    if use_fresh_profile and environment.get_value('USER_PROFILE_ARG'):
      shell.remove_directory(
          testcase_manager.get_user_profile_directory(profile_index))

    # For Android, we need to sync our local testcases directory with the one on
    # the device.
    if environment.platform() == 'ANDROID':
      android.device.push_testcases_to_device()
    elif environment.is_trusted_host():
      from bot.untrusted_runner import file_host
      file_host.push_testcases_to_worker()

    # If we need to write a command line file, only do so if the arguments have
    # changed.
    arguments_changed = arguments != self._previous_arguments
    self._previous_arguments = arguments

    command = testcase_manager.get_command_line_for_application(
        file_to_run=file_path,
        app_args=arguments,
        needs_http=needs_http,
        user_profile_index=profile_index,
        write_command_line_file=arguments_changed)
    if log_command:
      logs.log('Executing command: %s' % command)

    return_code, crash_time, output = process_handler.run_process(
        command, timeout=timeout, gestures=gestures)

    self._release_profile(profile_index)
    return CrashResult(return_code, crash_time, output)
Exemple #8
0
    def reproduce_with_retries(
        self,
        retries,
        expected_state=None,
        expected_security_flag=None,
        flaky_stacktrace=False,
    ):
        """Try reproducing a crash with retries."""
        self._pre_run_cleanup()
        crash_result = None

        for round_number in range(1, retries + 1):
            crash_result = self.run(round_number)
            state = self._get_crash_state(round_number, crash_result)

            if not crash_result.is_crash():
                continue

            if not expected_state:
                logs.log("Crash stacktrace comparison skipped.")
                return crash_result

            if crash_result.should_ignore():
                logs.log(
                    "Crash stacktrace matched ignore signatures, ignored.")
                continue

            if crash_result.is_security_issue() != expected_security_flag:
                logs.log("Crash security flag does not match, ignored.")
                continue

            if flaky_stacktrace:
                logs.log(
                    "Crash stacktrace is marked flaky, skipping comparison.")
                return crash_result

            crash_comparer = CrashComparer(state.crash_state, expected_state)
            if crash_comparer.is_similar():
                logs.log("Crash stacktrace is similar to original stacktrace.")
                return crash_result
            else:
                logs.log(
                    "Crash stacktrace does not match original stacktrace.")

        logs.log("Didn't crash at all.")
        return CrashResult(return_code=0,
                           crash_time=0,
                           output=crash_result.output)
Exemple #9
0
  def test_upload_without_timestamp(self):
    """Log name should be generated using current (mocked) timestamp value."""
    mock_gsutil = mock.MagicMock()
    self.mock.write_data.return_value = mock_gsutil

    self.fs.CreateFile(
        self.testcase_path + '.stats2',
        contents='{"stat": 1000, "kind": "TestcaseRun", "job": "job", '
        '"fuzzer": "fuzzer", "build_revision": 123}\n')

    crash_result = CrashResult(return_code=None, crash_time=None, output=None)
    tests.upload_testcase_output(crash_result, self.testcase_path)
    self.mock.write_data.assert_called_once_with(
        'Revisions:\nComponent: REVISION\nComponent2: REVISION2\n\n'
        'Return code: None\n\nNo output!',
        'gs://fake-gcs-logs/fuzzer/job/2017-05-15/16:10:28:374119.log')
def _run_libfuzzer_testcase(testcase, testcase_file_path):
  """Run libFuzzer testcase, and return the CrashResult."""
  # Cleanup any existing application instances and temp directories.
  process_handler.cleanup_stale_processes()
  shell.clear_temp_directory()

  if environment.is_trusted_host():
    from bot.untrusted_runner import file_host
    file_host.copy_file_to_worker(
        testcase_file_path, file_host.rebase_to_worker_root(testcase_file_path))

  test_timeout = environment.get_value('TEST_TIMEOUT',
                                       process_handler.DEFAULT_TEST_TIMEOUT)
  repro_command = tests.get_command_line_for_application(
      file_to_run=testcase_file_path, needs_http=testcase.http_flag)
  return_code, crash_time, output = process_handler.run_process(
      repro_command, timeout=test_timeout)
  return CrashResult(return_code, crash_time, output)
Exemple #11
0
  def test_upload_without_timestamp(self):
    """Log name should be generated using current (mocked) timestamp value."""
    mock_gsutil = mock.MagicMock()
    self.mock.write_data.return_value = mock_gsutil

    self.fs.create_file(
        self.testcase_path + '.stats2',
        contents='{"stat": 1000, "kind": "TestcaseRun", "job": "job", '
        '"fuzzer": "fuzzer", "build_revision": 123}\n')

    crash_result = CrashResult(return_code=None, crash_time=None, output=None)
    log = testcase_manager.prepare_log_for_upload(crash_result.get_stacktrace(),
                                                  crash_result.return_code)
    log_time = testcase_manager._get_testcase_time(self.testcase_path)
    testcase_manager.upload_log(log, log_time)
    self.mock.write_data.assert_called_once_with(
        b'Component revisions (build r123):\n'
        b'Component: REVISION\nComponent2: REVISION2\n\n'
        b'Return code: None\n\nNo output!',
        'gs://fake-gcs-logs/fuzzer/job/2017-05-15/16:10:28:374119.log')
Exemple #12
0
  def test_upload_with_timestamp_from_stats(self):
    """Log name should be generated using timestamp value from the stats."""
    mock_gsutil = mock.MagicMock()
    self.mock.write_data.return_value = mock_gsutil

    self.fs.CreateFile(
        self.testcase_path + '.stats2',
        contents='{"stat": 1000, "timestamp": 1472846341.017923, "kind": '
        '"TestcaseRun", "job": "job", "fuzzer": "fuzzer", '
        '"build_revision": 123}\n')

    crash_result = CrashResult(
        return_code=1, crash_time=5, output='fake output')
    tests.upload_testcase_output(crash_result, self.testcase_path)

    # Date and time below is derived from 1472846341 timestamp value.
    self.mock.write_data.assert_called_once_with(
        'Revisions:\nComponent: REVISION\nComponent2: REVISION2\n\n'
        'Return code: 1\n\nfake output',
        'gs://fake-gcs-logs/fuzzer/job/2016-09-02/19:59:01:017923.log')
Exemple #13
0
def test_for_reproducibility(testcase_path, expected_state,
                             expected_security_flag, test_timeout, http_flag,
                             gestures):
    """Test to see if a crash is fully reproducible or is a one-time crasher."""
    # Cleanup any existing application instances and user profile directories.
    # Cleaning up temp clears user profile directories and should be done before
    # calling |get_command_line_for_application| call since that creates
    # dependencies in the profile folder.
    process_handler.terminate_stale_application_instances()
    shell.clear_temp_directory()

    app_directory = environment.get_value('APP_DIR')
    command = get_command_line_for_application(testcase_path,
                                               needs_http=http_flag)
    crash_count = 0
    crash_retries = environment.get_value('CRASH_RETRIES')
    reproducible_crash_target_count = crash_retries * REPRODUCIBILITY_FACTOR
    warmup_timeout = environment.get_value('WARMUP_TIMEOUT')

    logs.log('Testing for crash (command="%s").' % command)

    round_number = 0
    for round_number in xrange(1, crash_retries + 1):
        # Bail out early if there is no hope of finding a reproducible crash.
        if (crash_retries - round_number + crash_count + 1 <
                reproducible_crash_target_count):
            break

        run_timeout = warmup_timeout if round_number == 1 else test_timeout
        return_code, crash_time, output = process_handler.run_process(
            command,
            timeout=run_timeout,
            gestures=gestures,
            current_working_directory=app_directory)
        process_handler.terminate_stale_application_instances()

        crash_result = CrashResult(return_code, crash_time, output)
        if not crash_result.is_crash():
            continue

        state = crash_result.get_symbolized_data()
        crash_state = state.crash_state
        security_flag = crash_result.is_security_issue()

        # If we don't have an expected crash state, set it to the one from initial
        # crash.
        if not expected_state:
            expected_state = crash_state

        if security_flag != expected_security_flag:
            logs.log('Detected a crash without the correct security flag.')
            continue

        crash_comparer = CrashComparer(crash_state, expected_state)
        if not crash_comparer.is_similar():
            logs.log('Detected a crash with an unrelated state: '
                     'Expected(%s), Found(%s).' %
                     (expected_state, crash_state))
            continue

        crash_count += 1
        if crash_count >= reproducible_crash_target_count:
            logs.log('Crash is reproducible.')
            return True

    logs.log('Crash is not reproducible. Crash count: %d/%d.' %
             (crash_count, round_number))
    return False
Exemple #14
0
def run_testcase_and_return_result_in_queue(crash_queue,
                                            thread_index,
                                            file_path,
                                            gestures,
                                            env_copy,
                                            upload_output=False):
    """Run a single testcase and return crash results in the crash queue."""

    # Since this is running in its own process, initialize the log handler again.
    # This is needed for Windows where instances are not shared across child
    # processes. See:
    # https://stackoverflow.com/questions/34724643/python-logging-with-multiprocessing-root-logger-different-in-windows
    logs.configure('run_testcase', {
        'testcase_path': file_path,
    })

    try:
        # Run testcase and check whether a crash occurred or not.
        return_code, crash_time, output = run_testcase(thread_index, file_path,
                                                       gestures, env_copy)

        # Pull testcase directory to host to get any stats files.
        if environment.is_trusted_host():
            from bot.untrusted_runner import file_host
            file_host.pull_testcases_from_worker()

        # Analyze the crash.
        crash_output = _get_crash_output(output)
        crash_result = CrashResult(return_code, crash_time, crash_output)

        # To provide consistency between stats and logs, we use timestamp taken
        # from stats when uploading logs and testcase.
        if upload_output:
            log_time = _get_testcase_time(file_path)

        if crash_result.is_crash():
            # Initialize resource list with the testcase path.
            resource_list = [file_path]
            resource_list += get_resource_paths(crash_output)

            # Store the crash stack file in the crash stacktrace directory
            # with filename as the hash of the testcase path.
            crash_stacks_directory = environment.get_value(
                'CRASH_STACKTRACES_DIR')
            stack_file_path = os.path.join(crash_stacks_directory,
                                           utils.string_hash(file_path))
            utils.write_data_to_file(crash_output, stack_file_path)

            # Put crash/no-crash results in the crash queue.
            crash_queue.put(
                Crash(file_path=file_path,
                      crash_time=crash_time,
                      return_code=return_code,
                      resource_list=resource_list,
                      gestures=gestures,
                      stack_file_path=stack_file_path))

            # Don't upload uninteresting testcases (no crash) or if there is no log to
            # correlate it with (not upload_output).
            if upload_output:
                upload_testcase(file_path, log_time)

        if upload_output:
            # Include full output for uploaded logs (crash output, merge output, etc).
            crash_result_full = CrashResult(return_code, crash_time, output)
            log = prepare_log_for_upload(crash_result_full.get_stacktrace(),
                                         return_code)
            upload_log(log, log_time)
    except Exception:
        logs.log_error('Exception occurred while running '
                       'run_testcase_and_return_result_in_queue.')
Exemple #15
0
def check_for_bad_build(job_type, crash_revision):
    """Return true if the build is bad, i.e. crashes on startup."""
    # Check the bad build check flag to see if we want do this.
    if not environment.get_value('BAD_BUILD_CHECK'):
        return False

    # Create a blank command line with no file to run and no http.
    command = get_command_line_for_application(file_to_run='',
                                               needs_http=False)

    # When checking for bad builds, we use the default window size.
    # We don't want to pick a custom size since it can potentially cause a
    # startup crash and cause a build to be detected incorrectly as bad.
    default_window_argument = environment.get_value('WINDOW_ARG', '')
    if default_window_argument:
        command = command.replace(' %s' % default_window_argument, '')

    # TSAN is slow, and boots slow on first startup. Increase the warmup
    # timeout for this case.
    if environment.tool_matches('TSAN', job_type):
        fast_warmup_timeout = environment.get_value('WARMUP_TIMEOUT')
    else:
        fast_warmup_timeout = environment.get_value('FAST_WARMUP_TIMEOUT')

    # Initialize helper variables.
    is_bad_build = False
    build_run_console_output = ''
    app_directory = environment.get_value('APP_DIR')

    # Exit all running instances.
    process_handler.terminate_stale_application_instances()

    # Check if the build is bad.
    return_code, crash_time, output = process_handler.run_process(
        command,
        timeout=fast_warmup_timeout,
        current_working_directory=app_directory)
    crash_result = CrashResult(return_code, crash_time, output)

    # 1. Need to account for startup crashes with no crash state. E.g. failed to
    #    load shared library. So, ignore state for comparison.
    # 2. Ignore leaks as they don't block a build from reporting regular crashes
    #    and also don't impact regression range calculations.
    if (crash_result.is_crash(ignore_state=True)
            and not crash_result.should_ignore() and
            not crash_result.get_type() in ['Direct-leak', 'Indirect-leak']):
        is_bad_build = True
        build_run_console_output = utils.get_crash_stacktrace_output(
            command, crash_result.get_stacktrace(symbolized=True),
            crash_result.get_stacktrace(symbolized=False))
        logs.log('Bad build for %s detected at r%d.' %
                 (job_type, crash_revision),
                 output=build_run_console_output)

    # Exit all running instances.
    process_handler.terminate_stale_application_instances()

    # Any of the conditions below indicate that bot is in a bad state and it is
    # not caused by the build itself. In that case, just exit.
    build_state = data_handler.get_build_state(job_type, crash_revision)
    if is_bad_build and utils.sub_string_exists_in(BAD_STATE_HINTS, output):
        logs.log_fatal_and_exit(
            'Bad bot environment detected, exiting.',
            output=build_run_console_output,
            snapshot=process_handler.get_runtime_snapshot())

    # If none of the other bots have added information about this build,
    # then add it now.
    if (build_state == data_types.BuildState.UNMARKED
            and not crash_result.should_ignore()):
        data_handler.add_build_metadata(job_type, crash_revision, is_bad_build,
                                        build_run_console_output)

    return is_bad_build
Exemple #16
0
    parser.add_argument("--outputfile",
                        type=str,
                        help="crashing program output",
                        required=True)
    parser.add_argument("--exitcode",
                        type=int,
                        help="crashing program exit code",
                        required=True)
    parser.add_argument("--time",
                        type=int,
                        help="crash collection time, Unix timestamp",
                        default=0)

    args = parser.parse_args()

    output = open(args.outputfile).read()

    cr = CrashResult(args.exitcode, args.time, output)

    result = {
        'type': cr.get_type(),
        'is_crash': cr.is_crash(),
        'is_security_issue': cr.is_security_issue(),
        'should_ignore': cr.should_ignore(),
        'stacktrace': cr.get_stacktrace(),
        'output': cr.output,
        'return_code': cr.return_code,
    }

    print(json.dumps(result, indent=4))
def execute_task(testcase_id, job_type):
  """Execute a symbolize command."""
  # Locate the testcase associated with the id.
  testcase = data_handler.get_testcase_by_id(testcase_id)

  # We should atleast have a symbolized debug or release build.
  if not build_manager.has_symbolized_builds():
    return

  data_handler.update_testcase_comment(testcase, data_types.TaskState.STARTED)

  # Setup testcase and its dependencies.
  file_list, _, testcase_file_path = setup.setup_testcase(testcase, job_type)
  if not file_list:
    return

  # Initialize variables.
  build_fail_wait = environment.get_value("FAIL_WAIT")

  old_crash_stacktrace = data_handler.get_stacktrace(testcase)
  sym_crash_type = testcase.crash_type
  sym_crash_address = testcase.crash_address
  sym_crash_state = testcase.crash_state
  sym_redzone = DEFAULT_REDZONE
  warmup_timeout = environment.get_value("WARMUP_TIMEOUT")

  # Decide which build revision to use.
  if testcase.crash_stacktrace == "Pending":
    # This usually happen when someone clicked the 'Update stacktrace from
    # trunk' button on the testcase details page. In this case, we are forced
    # to use trunk. No revision -> trunk build.
    build_revision = None
  else:
    build_revision = testcase.crash_revision

  # Set up a custom or regular build based on revision.
  build_manager.setup_build(build_revision)

  # Get crash revision used in setting up build.
  crash_revision = environment.get_value("APP_REVISION")

  if not build_manager.check_app_path():
    testcase = data_handler.get_testcase_by_id(testcase_id)
    data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,
                                         "Build setup failed")
    tasks.add_task(
        "symbolize", testcase_id, job_type, wait_time=build_fail_wait)
    return

  # ASAN tool settings (if the tool is used).
  # See if we can get better stacks with higher redzone sizes.
  # A UAF might actually turn out to be OOB read/write with a bigger redzone.
  if environment.tool_matches("ASAN", job_type) and testcase.security_flag:
    redzone = MAX_REDZONE
    while redzone >= MIN_REDZONE:
      environment.reset_current_memory_tool_options(
          redzone_size=testcase.redzone, disable_ubsan=testcase.disable_ubsan)

      process_handler.terminate_stale_application_instances()
      command = testcase_manager.get_command_line_for_application(
          testcase_file_path, needs_http=testcase.http_flag)
      return_code, crash_time, output = process_handler.run_process(
          command, timeout=warmup_timeout, gestures=testcase.gestures)
      crash_result = CrashResult(return_code, crash_time, output)

      if crash_result.is_crash() and "AddressSanitizer" in output:
        state = crash_result.get_symbolized_data()
        security_flag = crash_result.is_security_issue()

        if (not crash_analyzer.ignore_stacktrace(state.crash_stacktrace) and
            security_flag == testcase.security_flag and
            state.crash_type == testcase.crash_type and
            (state.crash_type != sym_crash_type or
             state.crash_state != sym_crash_state)):
          logs.log("Changing crash parameters.\nOld : %s, %s, %s" %
                   (sym_crash_type, sym_crash_address, sym_crash_state))

          sym_crash_type = state.crash_type
          sym_crash_address = state.crash_address
          sym_crash_state = state.crash_state
          sym_redzone = redzone
          old_crash_stacktrace = state.crash_stacktrace

          logs.log("\nNew : %s, %s, %s" %
                   (sym_crash_type, sym_crash_address, sym_crash_state))
          break

      redzone /= 2

  # We should have atleast a symbolized debug or a release build.
  symbolized_builds = build_manager.setup_symbolized_builds(crash_revision)
  if not symbolized_builds or (
      not build_manager.check_app_path() and
      not build_manager.check_app_path("APP_PATH_DEBUG")):
    testcase = data_handler.get_testcase_by_id(testcase_id)
    data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,
                                         "Build setup failed")
    tasks.add_task(
        "symbolize", testcase_id, job_type, wait_time=build_fail_wait)
    return

  # Increase malloc_context_size to get all stack frames. Default is 30.
  environment.reset_current_memory_tool_options(
      redzone_size=sym_redzone,
      malloc_context_size=STACK_FRAME_COUNT,
      symbolize_inline_frames=True,
      disable_ubsan=testcase.disable_ubsan,
  )

  # TSAN tool settings (if the tool is used).
  if environment.tool_matches("TSAN", job_type):
    environment.set_tsan_max_history_size()

  # Do the symbolization if supported by this application.
  result, sym_crash_stacktrace = get_symbolized_stacktraces(
      testcase_file_path, testcase, old_crash_stacktrace, sym_crash_state)

  # Update crash parameters.
  testcase = data_handler.get_testcase_by_id(testcase_id)
  testcase.crash_type = sym_crash_type
  testcase.crash_address = sym_crash_address
  testcase.crash_state = sym_crash_state
  testcase.crash_stacktrace = data_handler.filter_stacktrace(
      sym_crash_stacktrace)

  if not result:
    data_handler.update_testcase_comment(
        testcase,
        data_types.TaskState.ERROR,
        "Unable to reproduce crash, skipping "
        "stacktrace update",
    )
  else:
    # Switch build url to use the less-optimized symbolized build with better
    # stacktrace.
    build_url = environment.get_value("BUILD_URL")
    if build_url:
      testcase.set_metadata("build_url", build_url, update_testcase=False)

    data_handler.update_testcase_comment(testcase,
                                         data_types.TaskState.FINISHED)

  testcase.symbolized = True
  testcase.crash_revision = crash_revision
  testcase.put()

  # We might have updated the crash state. See if we need to marked as duplicate
  # based on other testcases.
  data_handler.handle_duplicate_entry(testcase)

  task_creation.create_blame_task_if_needed(testcase)

  # Switch current directory before builds cleanup.
  root_directory = environment.get_value("ROOT_DIR")
  os.chdir(root_directory)

  # Cleanup symbolized builds which are space-heavy.
  symbolized_builds.delete()
def get_symbolized_stacktraces(testcase_file_path, testcase,
                               old_crash_stacktrace, expected_state):
  """Use the symbolized builds to generate an updated stacktrace."""
  # Initialize variables.
  app_path = environment.get_value("APP_PATH")
  app_path_debug = environment.get_value("APP_PATH_DEBUG")
  long_test_timeout = environment.get_value("WARMUP_TIMEOUT")
  retry_limit = environment.get_value("FAIL_RETRIES")
  symbolized = False

  debug_build_stacktrace = ""
  release_build_stacktrace = old_crash_stacktrace

  # Symbolize using the debug build first so that the debug build stacktrace
  # comes after the more important release build stacktrace.
  if app_path_debug:
    for _ in range(retry_limit):
      process_handler.terminate_stale_application_instances()
      command = testcase_manager.get_command_line_for_application(
          testcase_file_path,
          app_path=app_path_debug,
          needs_http=testcase.http_flag,
      )
      return_code, crash_time, output = process_handler.run_process(
          command, timeout=long_test_timeout, gestures=testcase.gestures)
      crash_result = CrashResult(return_code, crash_time, output)

      if crash_result.is_crash():
        state = crash_result.get_symbolized_data()

        if crash_analyzer.ignore_stacktrace(state.crash_stacktrace):
          continue

        unsymbolized_crash_stacktrace = crash_result.get_stacktrace(
            symbolized=False)
        debug_build_stacktrace = utils.get_crash_stacktrace_output(
            command,
            state.crash_stacktrace,
            unsymbolized_crash_stacktrace,
            build_type="debug",
        )
        symbolized = True
        break

  # Symbolize using the release build.
  if app_path:
    for _ in range(retry_limit):
      process_handler.terminate_stale_application_instances()
      command = testcase_manager.get_command_line_for_application(
          testcase_file_path, app_path=app_path, needs_http=testcase.http_flag)
      return_code, crash_time, output = process_handler.run_process(
          command, timeout=long_test_timeout, gestures=testcase.gestures)
      crash_result = CrashResult(return_code, crash_time, output)

      if crash_result.is_crash():
        state = crash_result.get_symbolized_data()

        if crash_analyzer.ignore_stacktrace(state.crash_stacktrace):
          continue

        if state.crash_state != expected_state:
          continue

        # Release stack's security flag has to match the symbolized release
        # stack's security flag.
        security_flag = crash_result.is_security_issue()
        if security_flag != testcase.security_flag:
          continue

        unsymbolized_crash_stacktrace = crash_result.get_stacktrace(
            symbolized=False)
        release_build_stacktrace = utils.get_crash_stacktrace_output(
            command,
            state.crash_stacktrace,
            unsymbolized_crash_stacktrace,
            build_type="release",
        )
        symbolized = True
        break

  stacktrace = release_build_stacktrace
  if debug_build_stacktrace:
    stacktrace += "\n\n" + debug_build_stacktrace

  return symbolized, stacktrace
Exemple #19
0
def test_for_crash_with_retries(testcase,
                                testcase_path,
                                test_timeout,
                                http_flag=False,
                                compare_crash=True):
    """Test for a crash and return crash parameters like crash type, crash state,
  crash stacktrace, etc."""
    # Cleanup any existing application instances and user profile directories.
    # Cleaning up temp clears user profile directories and should be done before
    # calling |get_command_line_for_application| call since that creates
    # dependencies in the profile folder.
    process_handler.terminate_stale_application_instances()
    shell.clear_temp_directory()

    app_directory = environment.get_value('APP_DIR')
    command = get_command_line_for_application(testcase_path,
                                               needs_http=http_flag)
    crash_retries = environment.get_value('CRASH_RETRIES')
    flaky_stacktrace = testcase.flaky_stack
    warmup_timeout = environment.get_value('WARMUP_TIMEOUT')

    logs.log('Testing for crash (command="%s").' % command)

    for round_number in xrange(1, crash_retries + 1):
        run_timeout = warmup_timeout if round_number == 1 else test_timeout
        return_code, crash_time, output = process_handler.run_process(
            command,
            timeout=run_timeout,
            gestures=testcase.gestures,
            current_working_directory=app_directory)
        process_handler.terminate_stale_application_instances()

        crash_result = CrashResult(return_code, crash_time, output)
        if not crash_result.is_crash():
            continue

        state = crash_result.get_symbolized_data()
        logs.log('Crash occurred in %d seconds (round %d). State:\n%s' %
                 (crash_time, round_number, state.crash_state))

        if not compare_crash or not testcase.crash_state:
            logs.log('Crash stacktrace comparison skipped.')
            return crash_result

        if flaky_stacktrace:
            logs.log('Crash stacktrace is marked flaky, skipping comparison.')
            return crash_result

        if crash_result.should_ignore():
            logs.log('Crash stacktrace matched ignore signatures, ignored.')
            continue

        if crash_result.is_security_issue() != testcase.security_flag:
            logs.log('Crash security flag does not match, ignored.')
            continue

        crash_comparer = CrashComparer(state.crash_state, testcase.crash_state)
        if crash_comparer.is_similar():
            logs.log('Crash stacktrace is similar to original stacktrace.')
            return crash_result
        else:
            logs.log('Crash stacktrace does not match original stacktrace.')

    logs.log("Didn't crash at all.")
    crash_result = CrashResult(return_code=0, crash_time=0, output=output)
    return crash_result