Beispiel #1
0
  def test_run_process_non_testcase(self):
    """Test run_process for non-testcase runs."""
    return_code, _, output = process_handler.run_process(
        '/bin/sh -c \'echo $TRUSTED_HOST\'', testcase_run=False)
    self.assertEqual(return_code, 0)
    self.assertEqual(output, 'True')

    return_code, _, output = process_handler.run_process(
        '/bin/sh -c \'echo $UNTRUSTED_WORKER\'', testcase_run=False)
    self.assertEqual(return_code, 0)
    self.assertEqual(output, '')
Beispiel #2
0
    def run(self, round_number):
        """Run the testcase once."""
        app_directory = environment.get_value('APP_DIR')
        warmup_timeout = environment.get_value('WARMUP_TIMEOUT')
        run_timeout = warmup_timeout if round_number == 1 else self._test_timeout

        if self._is_black_box:
            return_code, crash_time, output = process_handler.run_process(
                self._command,
                timeout=run_timeout,
                gestures=self._gestures,
                current_working_directory=app_directory)
        else:
            result = engine_reproduce(self._engine_impl,
                                      self._fuzz_target.binary,
                                      self._testcase_path, self._arguments,
                                      run_timeout)
            return_code = result.return_code
            crash_time = result.time_executed

            log_header = engine_common.get_log_header(
                result.command, environment.get_value('BOT_NAME'),
                result.time_executed)
            output = log_header + '\n' + result.output

        process_handler.terminate_stale_application_instances()

        crash_result = CrashResult(return_code, crash_time, output)
        if not crash_result.is_crash():
            logs.log('No crash occurred (round {round_number}).'.format(
                round_number=round_number),
                     output=output)

        return crash_result
Beispiel #3
0
def run_testcase(thread_index, file_path, gestures, env_copy):
    """Run a single testcase and return crash results in the crash queue."""
    try:
        # Update environment with environment copy from parent.
        if env_copy:
            os.environ.update(env_copy)

        # Initialize variables.
        needs_http = '-http-' in file_path
        test_timeout = environment.get_value('TEST_TIMEOUT', 10)
        app_directory = environment.get_value('APP_DIR')
        environment.set_value('PIDS', '[]')

        # Get command line options.
        command = get_command_line_for_application(
            file_path, user_profile_index=thread_index, needs_http=needs_http)

        # Run testcase.
        return process_handler.run_process(
            command,
            timeout=test_timeout,
            gestures=gestures,
            env_copy=env_copy,
            current_working_directory=app_directory)
    except Exception:
        logs.log_error('Exception occurred while running run_testcase.')

        return None, None, None
Beispiel #4
0
  def run(self,
          file_path=None,
          gestures=None,
          arguments=None,
          timeout=None,
          log_command=False,
          use_fresh_profile=False):
    """Run the test."""
    if file_path is None:
      file_path = self.file_path

    if gestures is None:
      gestures = self.gestures

    if arguments is None:
      arguments = self.arguments

    # TODO(mbarbella): Dynamic timeout adjustment.
    if timeout is None:
      timeout = self.timeout

    needs_http = self.testcase.http_flag
    profile_index = self._get_profile_index()

    if use_fresh_profile and environment.get_value('USER_PROFILE_ARG'):
      shell.remove_directory(
          testcase_manager.get_user_profile_directory(profile_index))

    # For Android, we need to sync our local testcases directory with the one on
    # the device.
    if environment.platform() == 'ANDROID':
      android.device.push_testcases_to_device()
    elif environment.is_trusted_host():
      from bot.untrusted_runner import file_host
      file_host.push_testcases_to_worker()

    # If we need to write a command line file, only do so if the arguments have
    # changed.
    arguments_changed = arguments != self._previous_arguments
    self._previous_arguments = arguments

    command = testcase_manager.get_command_line_for_application(
        file_to_run=file_path,
        app_args=arguments,
        needs_http=needs_http,
        user_profile_index=profile_index,
        write_command_line_file=arguments_changed)
    if log_command:
      logs.log('Executing command: %s' % command)

    return_code, crash_time, output = process_handler.run_process(
        command, timeout=timeout, gestures=gestures)

    self._release_profile(profile_index)
    return CrashResult(return_code, crash_time, output)
def run():
    """Run custom platform specific init scripts."""
    platform = environment.platform().lower()
    script_path = os.path.join(environment.get_config_directory(), SCRIPT_DIR,
                               platform + _extension(platform))
    if not os.path.exists(script_path):
        return

    os.chmod(script_path, 0o750)
    if script_path.endswith('.ps1'):
        cmd = 'powershell.exe ' + script_path
    else:
        cmd = script_path

    try:
        process_handler.run_process(cmd,
                                    timeout=1800,
                                    need_shell=True,
                                    testcase_run=False,
                                    ignore_children=True)
    except Exception:
        logs.log_error('Failed to execute platform initialization script.')
def combine_ipc_dumps(ipcdumps, original_file_path):
  """Combines a list of ipcdump files into a single dump."""
  input_file_string = ','.join(ipcdumps)
  executable = get_ipc_message_util_executable()
  output_file_path = get_temporary_file_name(original_file_path)
  command_line = shell.get_command_line_from_argument_list(
      [executable, input_file_string, output_file_path])
  return_code, _, output = process_handler.run_process(
      command_line, testcase_run=False, timeout=COMBINED_IPCDUMP_TIMEOUT)

  for ipcdump in ipcdumps:
    shell.remove_file(ipcdump)

  if return_code or not os.path.exists(output_file_path):
    logs.log_error('Failed to create ipc dump file %s.' % output)
    return None

  return output_file_path
def _run_libfuzzer_testcase(testcase, testcase_file_path):
  """Run libFuzzer testcase, and return the CrashResult."""
  # Cleanup any existing application instances and temp directories.
  process_handler.cleanup_stale_processes()
  shell.clear_temp_directory()

  if environment.is_trusted_host():
    from bot.untrusted_runner import file_host
    file_host.copy_file_to_worker(
        testcase_file_path, file_host.rebase_to_worker_root(testcase_file_path))

  test_timeout = environment.get_value('TEST_TIMEOUT',
                                       process_handler.DEFAULT_TEST_TIMEOUT)
  repro_command = tests.get_command_line_for_application(
      file_to_run=testcase_file_path, needs_http=testcase.http_flag)
  return_code, crash_time, output = process_handler.run_process(
      repro_command, timeout=test_timeout)
  return CrashResult(return_code, crash_time, output)
  def tokenize(current_file_path):
    """Generate a token list for an IPC fuzzer test case."""
    command_line = shell.get_command_line_from_argument_list(
        [get_ipc_message_util_executable(), '--dump', current_file_path])
    _, _, output = process_handler.run_process(
        command_line, testcase_run=False, timeout=IPCDUMP_TIMEOUT)
    output_lines = output.splitlines()
    if not output_lines:
      return []

    # Each output line starts with the message index followed by a ".", but
    # we are only interested in the total number of messages in the file. To
    # find this, we add one to the index of the final message.
    try:
      last_index = int(output_lines[-1].split('.')[0])
    except ValueError:
      return []

    return list(range(last_index + 1))
def create_partial_ipc_dump(tokens, original_file_path):
  """Use the ipc_message_util utility to create a file for up to
     |TOKENS_PER_IPCDUMP| tokens."""
  assert len(tokens) <= TOKENS_PER_IPCDUMP

  token_list = ','.join([str(token) for token in tokens])
  temp_file_path = get_temporary_file_name(original_file_path)

  executable = get_ipc_message_util_executable()
  command_line = shell.get_command_line_from_argument_list(
      [executable,
       '--in=%s' % token_list, original_file_path, temp_file_path])
  return_code, _, output = process_handler.run_process(
      command_line, testcase_run=False, timeout=IPCDUMP_TIMEOUT)
  if return_code or not os.path.exists(temp_file_path):
    # For some reason, generating the new file failed.
    logs.log_error('Failed to create ipc dump file %s.' % output)
    return None

  return temp_file_path
Beispiel #10
0
def get_crash_info_and_stacktrace(application_command_line, crash_stacktrace,
                                  gestures):
  """Return crash minidump location and updated crash stacktrace."""
  app_name_lower = environment.get_value('APP_NAME').lower()
  platform = environment.platform()
  retry_limit = environment.get_value('FAIL_RETRIES')
  using_android = platform == 'ANDROID'
  using_chrome = 'chrome' in app_name_lower or 'chromium' in app_name_lower
  warmup_timeout = environment.get_value('WARMUP_TIMEOUT', 90)

  # Minidump generation is only applicable on Chrome application.
  # FIXME: Support minidump generation on platforms other than Android.
  if not using_chrome or not using_android:
    return None, crash_stacktrace

  # Get the crash info from stacktrace.
  crash_info = get_crash_info(crash_stacktrace)

  # If we lost the minidump file, we need to recreate it.
  # Note that because of the way crash_info is generated now, if we have a
  # non-None crash_info, we should also have its minidump path; we insert
  # the check to safeguard against possibly constructing the crash_info in
  # other ways in the future that might potentially lose the minidump path.
  if not crash_info or not crash_info.minidump_info.path:
    for _ in xrange(retry_limit):
      _, _, output = (
          process_handler.run_process(
              application_command_line,
              timeout=warmup_timeout,
              gestures=gestures))

      crash_info = get_crash_info(output)
      if crash_info and crash_info.minidump_info.path:
        crash_stacktrace = utils.decode_to_unicode(output)
        break

    if not crash_info or not crash_info.minidump_info.path:
      # We could not regenerate a minidump for this crash.
      logs.log('Unable to regenerate a minidump for this crash.')

  return crash_info, crash_stacktrace
def supports_ipc_minimization(file_path):
  """Check to see if IPC minimization is supported for the current build."""
  executable = get_ipc_message_util_executable()
  if not executable:
    # IPC fuzzer minimization is not supported on this platform.
    return False

  command_line = shell.get_command_line_from_argument_list(
      [executable, '--dump', '--in=0', file_path])
  return_code, _, output = process_handler.run_process(
      command_line, testcase_run=False, timeout=IPCDUMP_TIMEOUT)

  # If --in is not supported by this version of the ipc_message_util binary,
  # it will exit with a nonzero exit status. Also ensure that the first message
  # is printed in case the build is bad for some other reason.
  # Example output: 0. AutofillHostMsg_DidFillAutofillFormData
  if return_code or not output.startswith('0.'):
    return False

  supports_ipc_minimization.is_supported = True
  return True
Beispiel #12
0
    def test_run_process_env(self):
        """Test environment passing when running processes."""
        environment.set_value('ASAN_OPTIONS', 'host_value')
        runner = remote_process_host.RemoteProcessRunner('/bin/sh', ['-c'])
        result = runner.run_and_wait(['echo $ASAN_OPTIONS'])
        self.assertEqual(result.output, 'host_value\n')

        result = runner.run_and_wait(['echo $ASAN_OPTIONS $UBSAN_OPTIONS'],
                                     env={
                                         'UBSAN_OPTIONS': 'ubsan',
                                         'NOT_PASSED': 'blah'
                                     })
        self.assertEqual(result.output, 'ubsan\n')

        _, _, output = process_handler.run_process(
            '/bin/sh -c \'echo $ASAN_OPTIONS $MSAN_OPTIONS\'',
            testcase_run=True,
            env_copy={
                'MSAN_OPTIONS': 'msan',
                'NOT_PASSED': 'blah'
            })
        self.assertEqual(output, 'host_value msan')
Beispiel #13
0
def run_process(request, _):
    """Implementation of RunProcess."""
    args = {}
    protobuf_utils.get_protobuf_field(args, request, 'cmdline')
    protobuf_utils.get_protobuf_field(args, request,
                                      'current_working_directory')
    protobuf_utils.get_protobuf_field(args, request, 'timeout')
    protobuf_utils.get_protobuf_field(args, request, 'need_shell')

    if request.gestures:
        args['gestures'] = request.gestures

    if request.env_copy:
        args['env_copy'] = request.env_copy

    protobuf_utils.get_protobuf_field(args, request, 'testcase_run')
    protobuf_utils.get_protobuf_field(args, request, 'ignore_children')

    return_code, execution_time, output = process_handler.run_process(**args)
    response = untrusted_runner_pb2.RunProcessResponse(
        return_code=return_code, execution_time=execution_time, output=output)

    return response
Beispiel #14
0
def test_for_crash_with_retries(testcase,
                                testcase_path,
                                test_timeout,
                                http_flag=False,
                                compare_crash=True):
    """Test for a crash and return crash parameters like crash type, crash state,
  crash stacktrace, etc."""
    # Cleanup any existing application instances and user profile directories.
    # Cleaning up temp clears user profile directories and should be done before
    # calling |get_command_line_for_application| call since that creates
    # dependencies in the profile folder.
    process_handler.terminate_stale_application_instances()
    shell.clear_temp_directory()

    app_directory = environment.get_value('APP_DIR')
    command = get_command_line_for_application(testcase_path,
                                               needs_http=http_flag)
    crash_retries = environment.get_value('CRASH_RETRIES')
    flaky_stacktrace = testcase.flaky_stack
    warmup_timeout = environment.get_value('WARMUP_TIMEOUT')

    logs.log('Testing for crash (command="%s").' % command)

    for round_number in xrange(1, crash_retries + 1):
        run_timeout = warmup_timeout if round_number == 1 else test_timeout
        return_code, crash_time, output = process_handler.run_process(
            command,
            timeout=run_timeout,
            gestures=testcase.gestures,
            current_working_directory=app_directory)
        process_handler.terminate_stale_application_instances()

        crash_result = CrashResult(return_code, crash_time, output)
        if not crash_result.is_crash():
            continue

        state = crash_result.get_symbolized_data()
        logs.log('Crash occurred in %d seconds (round %d). State:\n%s' %
                 (crash_time, round_number, state.crash_state))

        if not compare_crash or not testcase.crash_state:
            logs.log('Crash stacktrace comparison skipped.')
            return crash_result

        if flaky_stacktrace:
            logs.log('Crash stacktrace is marked flaky, skipping comparison.')
            return crash_result

        if crash_result.should_ignore():
            logs.log('Crash stacktrace matched ignore signatures, ignored.')
            continue

        if crash_result.is_security_issue() != testcase.security_flag:
            logs.log('Crash security flag does not match, ignored.')
            continue

        crash_comparer = CrashComparer(state.crash_state, testcase.crash_state)
        if crash_comparer.is_similar():
            logs.log('Crash stacktrace is similar to original stacktrace.')
            return crash_result
        else:
            logs.log('Crash stacktrace does not match original stacktrace.')

    logs.log("Didn't crash at all.")
    crash_result = CrashResult(return_code=0, crash_time=0, output=output)
    return crash_result
def _run_libfuzzer_tool(tool_name,
                        testcase,
                        testcase_file_path,
                        timeout,
                        expected_crash_state,
                        set_dedup_flags=False):
  """Run libFuzzer tool to either minimize or cleanse."""

  memory_tool_options_var = environment.get_current_memory_tool_var()
  saved_memory_tool_options = environment.get_value(memory_tool_options_var)

  def _set_dedup_flags():
    """Allow libFuzzer to do its own crash comparison during minimization."""
    memory_tool_options = environment.get_memory_tool_options(
        memory_tool_options_var)

    memory_tool_options['symbolize'] = 1
    memory_tool_options['dedup_token_length'] = 3

    environment.set_memory_tool_options(memory_tool_options_var,
                                        memory_tool_options)

  def _unset_dedup_flags():
    """Reset memory tool options."""
    # This is needed so that when we re-run, we can symbolize ourselves
    # (ignoring inline frames).
    environment.set_value(memory_tool_options_var, saved_memory_tool_options)

  output_file_path = get_temporary_file_name(testcase_file_path)
  rebased_output_file_path = output_file_path

  if environment.is_trusted_host():
    from bot.untrusted_runner import file_host
    file_host.copy_file_to_worker(
        testcase_file_path, file_host.rebase_to_worker_root(testcase_file_path))
    rebased_output_file_path = file_host.rebase_to_worker_root(output_file_path)

  arguments = environment.get_value('APP_ARGS', '')
  arguments += (' --cf-{tool_name}-timeout={timeout} '
                '--cf-{tool_name}-to={output_file_path}').format(
                    tool_name=tool_name,
                    output_file_path=rebased_output_file_path,
                    timeout=timeout)
  command = tests.get_command_line_for_application(
      file_to_run=testcase_file_path,
      app_args=arguments,
      needs_http=testcase.http_flag)
  logs.log('Executing command: %s' % command)

  if set_dedup_flags:
    _set_dedup_flags()

  # A small buffer is added to the timeout to allow the current test to
  # finish, and file to be written. Since we should terminate beforehand, a
  # long delay only slows fuzzing in cases where it's necessary.
  _, _, output = process_handler.run_process(command, timeout=timeout + 60)

  if environment.is_trusted_host():
    from bot.untrusted_runner import file_host
    file_host.copy_file_from_worker(rebased_output_file_path, output_file_path)

  if set_dedup_flags:
    _unset_dedup_flags()

  if not os.path.exists(output_file_path):
    logs.log_warn('LibFuzzer %s run failed.' % tool_name, output=output)
    return None, None

  # Ensure that the crash parameters match. It's possible that we will
  # minimize/cleanse to an unrelated bug, such as a timeout.
  crash_result = _run_libfuzzer_testcase(testcase, output_file_path)
  state = crash_result.get_symbolized_data()
  security_flag = crash_result.is_security_issue()
  if (security_flag != testcase.security_flag or
      state.crash_state != expected_crash_state):
    logs.log_warn('Ignoring unrelated crash.\n'
                  'State: %s (expected %s)\n'
                  'Security: %s (expected %s)\n'
                  'Output: %s\n' %
                  (state.crash_state, expected_crash_state, security_flag,
                   testcase.security_flag, state.crash_stacktrace))
    return None, None

  with open(output_file_path, 'rb') as file_handle:
    minimized_keys = blobs.write_blob(file_handle)

  testcase.minimized_keys = minimized_keys
  testcase.put()

  return output_file_path, crash_result
Beispiel #16
0
def check_for_bad_build(job_type, crash_revision):
    """Return true if the build is bad, i.e. crashes on startup."""
    # Check the bad build check flag to see if we want do this.
    if not environment.get_value('BAD_BUILD_CHECK'):
        return False

    # Create a blank command line with no file to run and no http.
    command = get_command_line_for_application(file_to_run='',
                                               needs_http=False)

    # When checking for bad builds, we use the default window size.
    # We don't want to pick a custom size since it can potentially cause a
    # startup crash and cause a build to be detected incorrectly as bad.
    default_window_argument = environment.get_value('WINDOW_ARG', '')
    if default_window_argument:
        command = command.replace(' %s' % default_window_argument, '')

    # TSAN is slow, and boots slow on first startup. Increase the warmup
    # timeout for this case.
    if environment.tool_matches('TSAN', job_type):
        fast_warmup_timeout = environment.get_value('WARMUP_TIMEOUT')
    else:
        fast_warmup_timeout = environment.get_value('FAST_WARMUP_TIMEOUT')

    # Initialize helper variables.
    is_bad_build = False
    build_run_console_output = ''
    app_directory = environment.get_value('APP_DIR')

    # Exit all running instances.
    process_handler.terminate_stale_application_instances()

    # Check if the build is bad.
    return_code, crash_time, output = process_handler.run_process(
        command,
        timeout=fast_warmup_timeout,
        current_working_directory=app_directory)
    crash_result = CrashResult(return_code, crash_time, output)

    # 1. Need to account for startup crashes with no crash state. E.g. failed to
    #    load shared library. So, ignore state for comparison.
    # 2. Ignore leaks as they don't block a build from reporting regular crashes
    #    and also don't impact regression range calculations.
    if (crash_result.is_crash(ignore_state=True)
            and not crash_result.should_ignore() and
            not crash_result.get_type() in ['Direct-leak', 'Indirect-leak']):
        is_bad_build = True
        build_run_console_output = utils.get_crash_stacktrace_output(
            command, crash_result.get_stacktrace(symbolized=True),
            crash_result.get_stacktrace(symbolized=False))
        logs.log('Bad build for %s detected at r%d.' %
                 (job_type, crash_revision),
                 output=build_run_console_output)

    # Exit all running instances.
    process_handler.terminate_stale_application_instances()

    # Any of the conditions below indicate that bot is in a bad state and it is
    # not caused by the build itself. In that case, just exit.
    build_state = data_handler.get_build_state(job_type, crash_revision)
    if is_bad_build and utils.sub_string_exists_in(BAD_STATE_HINTS, output):
        logs.log_fatal_and_exit(
            'Bad bot environment detected, exiting.',
            output=build_run_console_output,
            snapshot=process_handler.get_runtime_snapshot())

    # If none of the other bots have added information about this build,
    # then add it now.
    if (build_state == data_types.BuildState.UNMARKED
            and not crash_result.should_ignore()):
        data_handler.add_build_metadata(job_type, crash_revision, is_bad_build,
                                        build_run_console_output)

    return is_bad_build
def execute_task(testcase_id, job_type):
  """Execute a symbolize command."""
  # Locate the testcase associated with the id.
  testcase = data_handler.get_testcase_by_id(testcase_id)

  # We should atleast have a symbolized debug or release build.
  if not build_manager.has_symbolized_builds():
    return

  data_handler.update_testcase_comment(testcase, data_types.TaskState.STARTED)

  # Setup testcase and its dependencies.
  file_list, _, testcase_file_path = setup.setup_testcase(testcase, job_type)
  if not file_list:
    return

  # Initialize variables.
  build_fail_wait = environment.get_value("FAIL_WAIT")

  old_crash_stacktrace = data_handler.get_stacktrace(testcase)
  sym_crash_type = testcase.crash_type
  sym_crash_address = testcase.crash_address
  sym_crash_state = testcase.crash_state
  sym_redzone = DEFAULT_REDZONE
  warmup_timeout = environment.get_value("WARMUP_TIMEOUT")

  # Decide which build revision to use.
  if testcase.crash_stacktrace == "Pending":
    # This usually happen when someone clicked the 'Update stacktrace from
    # trunk' button on the testcase details page. In this case, we are forced
    # to use trunk. No revision -> trunk build.
    build_revision = None
  else:
    build_revision = testcase.crash_revision

  # Set up a custom or regular build based on revision.
  build_manager.setup_build(build_revision)

  # Get crash revision used in setting up build.
  crash_revision = environment.get_value("APP_REVISION")

  if not build_manager.check_app_path():
    testcase = data_handler.get_testcase_by_id(testcase_id)
    data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,
                                         "Build setup failed")
    tasks.add_task(
        "symbolize", testcase_id, job_type, wait_time=build_fail_wait)
    return

  # ASAN tool settings (if the tool is used).
  # See if we can get better stacks with higher redzone sizes.
  # A UAF might actually turn out to be OOB read/write with a bigger redzone.
  if environment.tool_matches("ASAN", job_type) and testcase.security_flag:
    redzone = MAX_REDZONE
    while redzone >= MIN_REDZONE:
      environment.reset_current_memory_tool_options(
          redzone_size=testcase.redzone, disable_ubsan=testcase.disable_ubsan)

      process_handler.terminate_stale_application_instances()
      command = testcase_manager.get_command_line_for_application(
          testcase_file_path, needs_http=testcase.http_flag)
      return_code, crash_time, output = process_handler.run_process(
          command, timeout=warmup_timeout, gestures=testcase.gestures)
      crash_result = CrashResult(return_code, crash_time, output)

      if crash_result.is_crash() and "AddressSanitizer" in output:
        state = crash_result.get_symbolized_data()
        security_flag = crash_result.is_security_issue()

        if (not crash_analyzer.ignore_stacktrace(state.crash_stacktrace) and
            security_flag == testcase.security_flag and
            state.crash_type == testcase.crash_type and
            (state.crash_type != sym_crash_type or
             state.crash_state != sym_crash_state)):
          logs.log("Changing crash parameters.\nOld : %s, %s, %s" %
                   (sym_crash_type, sym_crash_address, sym_crash_state))

          sym_crash_type = state.crash_type
          sym_crash_address = state.crash_address
          sym_crash_state = state.crash_state
          sym_redzone = redzone
          old_crash_stacktrace = state.crash_stacktrace

          logs.log("\nNew : %s, %s, %s" %
                   (sym_crash_type, sym_crash_address, sym_crash_state))
          break

      redzone /= 2

  # We should have atleast a symbolized debug or a release build.
  symbolized_builds = build_manager.setup_symbolized_builds(crash_revision)
  if not symbolized_builds or (
      not build_manager.check_app_path() and
      not build_manager.check_app_path("APP_PATH_DEBUG")):
    testcase = data_handler.get_testcase_by_id(testcase_id)
    data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,
                                         "Build setup failed")
    tasks.add_task(
        "symbolize", testcase_id, job_type, wait_time=build_fail_wait)
    return

  # Increase malloc_context_size to get all stack frames. Default is 30.
  environment.reset_current_memory_tool_options(
      redzone_size=sym_redzone,
      malloc_context_size=STACK_FRAME_COUNT,
      symbolize_inline_frames=True,
      disable_ubsan=testcase.disable_ubsan,
  )

  # TSAN tool settings (if the tool is used).
  if environment.tool_matches("TSAN", job_type):
    environment.set_tsan_max_history_size()

  # Do the symbolization if supported by this application.
  result, sym_crash_stacktrace = get_symbolized_stacktraces(
      testcase_file_path, testcase, old_crash_stacktrace, sym_crash_state)

  # Update crash parameters.
  testcase = data_handler.get_testcase_by_id(testcase_id)
  testcase.crash_type = sym_crash_type
  testcase.crash_address = sym_crash_address
  testcase.crash_state = sym_crash_state
  testcase.crash_stacktrace = data_handler.filter_stacktrace(
      sym_crash_stacktrace)

  if not result:
    data_handler.update_testcase_comment(
        testcase,
        data_types.TaskState.ERROR,
        "Unable to reproduce crash, skipping "
        "stacktrace update",
    )
  else:
    # Switch build url to use the less-optimized symbolized build with better
    # stacktrace.
    build_url = environment.get_value("BUILD_URL")
    if build_url:
      testcase.set_metadata("build_url", build_url, update_testcase=False)

    data_handler.update_testcase_comment(testcase,
                                         data_types.TaskState.FINISHED)

  testcase.symbolized = True
  testcase.crash_revision = crash_revision
  testcase.put()

  # We might have updated the crash state. See if we need to marked as duplicate
  # based on other testcases.
  data_handler.handle_duplicate_entry(testcase)

  task_creation.create_blame_task_if_needed(testcase)

  # Switch current directory before builds cleanup.
  root_directory = environment.get_value("ROOT_DIR")
  os.chdir(root_directory)

  # Cleanup symbolized builds which are space-heavy.
  symbolized_builds.delete()
def get_symbolized_stacktraces(testcase_file_path, testcase,
                               old_crash_stacktrace, expected_state):
  """Use the symbolized builds to generate an updated stacktrace."""
  # Initialize variables.
  app_path = environment.get_value("APP_PATH")
  app_path_debug = environment.get_value("APP_PATH_DEBUG")
  long_test_timeout = environment.get_value("WARMUP_TIMEOUT")
  retry_limit = environment.get_value("FAIL_RETRIES")
  symbolized = False

  debug_build_stacktrace = ""
  release_build_stacktrace = old_crash_stacktrace

  # Symbolize using the debug build first so that the debug build stacktrace
  # comes after the more important release build stacktrace.
  if app_path_debug:
    for _ in range(retry_limit):
      process_handler.terminate_stale_application_instances()
      command = testcase_manager.get_command_line_for_application(
          testcase_file_path,
          app_path=app_path_debug,
          needs_http=testcase.http_flag,
      )
      return_code, crash_time, output = process_handler.run_process(
          command, timeout=long_test_timeout, gestures=testcase.gestures)
      crash_result = CrashResult(return_code, crash_time, output)

      if crash_result.is_crash():
        state = crash_result.get_symbolized_data()

        if crash_analyzer.ignore_stacktrace(state.crash_stacktrace):
          continue

        unsymbolized_crash_stacktrace = crash_result.get_stacktrace(
            symbolized=False)
        debug_build_stacktrace = utils.get_crash_stacktrace_output(
            command,
            state.crash_stacktrace,
            unsymbolized_crash_stacktrace,
            build_type="debug",
        )
        symbolized = True
        break

  # Symbolize using the release build.
  if app_path:
    for _ in range(retry_limit):
      process_handler.terminate_stale_application_instances()
      command = testcase_manager.get_command_line_for_application(
          testcase_file_path, app_path=app_path, needs_http=testcase.http_flag)
      return_code, crash_time, output = process_handler.run_process(
          command, timeout=long_test_timeout, gestures=testcase.gestures)
      crash_result = CrashResult(return_code, crash_time, output)

      if crash_result.is_crash():
        state = crash_result.get_symbolized_data()

        if crash_analyzer.ignore_stacktrace(state.crash_stacktrace):
          continue

        if state.crash_state != expected_state:
          continue

        # Release stack's security flag has to match the symbolized release
        # stack's security flag.
        security_flag = crash_result.is_security_issue()
        if security_flag != testcase.security_flag:
          continue

        unsymbolized_crash_stacktrace = crash_result.get_stacktrace(
            symbolized=False)
        release_build_stacktrace = utils.get_crash_stacktrace_output(
            command,
            state.crash_stacktrace,
            unsymbolized_crash_stacktrace,
            build_type="release",
        )
        symbolized = True
        break

  stacktrace = release_build_stacktrace
  if debug_build_stacktrace:
    stacktrace += "\n\n" + debug_build_stacktrace

  return symbolized, stacktrace
Beispiel #19
0
def check_for_bad_build(job_type, crash_revision):
    """Return true if the build is bad, i.e. crashes on startup."""
    # Check the bad build check flag to see if we want do this.
    if not environment.get_value('BAD_BUILD_CHECK'):
        return False

    # Do not detect leaks while checking for bad builds.
    environment.reset_current_memory_tool_options(leaks=False)

    # Create a blank command line with no file to run and no http.
    command = get_command_line_for_application(file_to_run='',
                                               needs_http=False)

    # When checking for bad builds, we use the default window size.
    # We don't want to pick a custom size since it can potentially cause a
    # startup crash and cause a build to be detected incorrectly as bad.
    default_window_argument = environment.get_value('WINDOW_ARG', '')
    if default_window_argument:
        command = command.replace(' %s' % default_window_argument, '')

    # Warmup timeout.
    fast_warmup_timeout = environment.get_value('FAST_WARMUP_TIMEOUT')

    # TSAN is slow, and boots slow on first startup. Increase the warmup
    # timeout for this case.
    if environment.tool_matches('TSAN', job_type):
        fast_warmup_timeout = environment.get_value('WARMUP_TIMEOUT')

    # Initialize helper variables.
    is_bad_build = False
    build_run_console_output = ''
    output = ''
    app_directory = environment.get_value('APP_DIR')

    # Check if the build is bad.
    process_handler.terminate_stale_application_instances()
    exit_code, _, output = process_handler.run_process(
        command,
        timeout=fast_warmup_timeout,
        current_working_directory=app_directory)
    output = utils.decode_to_unicode(output)
    if crash_analyzer.is_crash(exit_code, output):
        is_bad_build = True
        build_run_console_output = (
            '%s\n\n%s\n\n%s' %
            (command, stack_symbolizer.symbolize_stacktrace(output), output))
        logs.log('Bad build for %s detected at r%d.' %
                 (job_type, crash_revision),
                 output=build_run_console_output)

    # Exit all running instances.
    process_handler.terminate_stale_application_instances()

    # Any of the conditions below indicate that bot is in a bad state and it is
    # not caused by the build itself. In that case, just exit.
    build_state = data_handler.get_build_state(job_type, crash_revision)
    if (is_bad_build and ('cannot open display' in output
                          or 'logging service has stopped' in output
                          or 'Maximum number of clients reached' in output)):
        logs.log_fatal_and_exit('Bad bot environment detected, exiting.',
                                output=build_run_console_output)

    # If none of the other bots have added information about this build,
    # then add it now.
    if build_state == data_types.BuildState.UNMARKED:
        data_handler.add_build_metadata(job_type, crash_revision, is_bad_build,
                                        build_run_console_output)

    # Reset memory tool options.
    environment.reset_current_memory_tool_options()

    return is_bad_build
Beispiel #20
0
def test_for_reproducibility(testcase_path, expected_state,
                             expected_security_flag, test_timeout, http_flag,
                             gestures):
    """Test to see if a crash is fully reproducible or is a one-time crasher."""
    # Cleanup any existing application instances and user profile directories.
    # Cleaning up temp clears user profile directories and should be done before
    # calling |get_command_line_for_application| call since that creates
    # dependencies in the profile folder.
    process_handler.terminate_stale_application_instances()
    shell.clear_temp_directory()

    app_directory = environment.get_value('APP_DIR')
    command = get_command_line_for_application(testcase_path,
                                               needs_http=http_flag)
    crash_count = 0
    crash_retries = environment.get_value('CRASH_RETRIES')
    reproducible_crash_target_count = crash_retries * REPRODUCIBILITY_FACTOR
    warmup_timeout = environment.get_value('WARMUP_TIMEOUT')

    logs.log('Testing for crash (command="%s").' % command)

    round_number = 0
    for round_number in xrange(1, crash_retries + 1):
        # Bail out early if there is no hope of finding a reproducible crash.
        if (crash_retries - round_number + crash_count + 1 <
                reproducible_crash_target_count):
            break

        run_timeout = warmup_timeout if round_number == 1 else test_timeout
        return_code, crash_time, output = process_handler.run_process(
            command,
            timeout=run_timeout,
            gestures=gestures,
            current_working_directory=app_directory)
        process_handler.terminate_stale_application_instances()

        crash_result = CrashResult(return_code, crash_time, output)
        if not crash_result.is_crash():
            continue

        state = crash_result.get_symbolized_data()
        crash_state = state.crash_state
        security_flag = crash_result.is_security_issue()

        # If we don't have an expected crash state, set it to the one from initial
        # crash.
        if not expected_state:
            expected_state = crash_state

        if security_flag != expected_security_flag:
            logs.log('Detected a crash without the correct security flag.')
            continue

        crash_comparer = CrashComparer(crash_state, expected_state)
        if not crash_comparer.is_similar():
            logs.log('Detected a crash with an unrelated state: '
                     'Expected(%s), Found(%s).' %
                     (expected_state, crash_state))
            continue

        crash_count += 1
        if crash_count >= reproducible_crash_target_count:
            logs.log('Crash is reproducible.')
            return True

    logs.log('Crash is not reproducible. Crash count: %d/%d.' %
             (crash_count, round_number))
    return False