Example #1
0
 def test_update_comment_empty(self):
   """Basic test on a testcase with empty comments."""
   data_handler.update_testcase_comment(
       self.testcase, data_types.TaskState.STARTED, 'message')
   self.assertEqual(
       '[2019-01-01 00:00:00] bot: Progression task started: message.\n',
       self.testcase.comments)
Example #2
0
def _testcase_reproduces_in_revision(testcase,
                                     testcase_file_path,
                                     job_type,
                                     revision,
                                     update_metadata=False):
    """Test to see if a test case reproduces in the specified revision."""
    build_manager.setup_build(revision)
    if not build_manager.check_app_path():
        raise errors.BuildSetupError(revision, job_type)

    if testcase_manager.check_for_bad_build(job_type, revision):
        log_message = 'Bad build at r%d. Skipping' % revision
        testcase = data_handler.get_testcase_by_id(testcase.key.id())
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.WIP,
                                             log_message)
        raise errors.BadBuildError(revision, job_type)

    test_timeout = environment.get_value('TEST_TIMEOUT', 10)
    result = testcase_manager.test_for_crash_with_retries(
        testcase,
        testcase_file_path,
        test_timeout,
        http_flag=testcase.http_flag)
    _log_output(revision, result)

    if update_metadata:
        _update_issue_metadata(testcase)

    return result
Example #3
0
def validate_regression_range(testcase, testcase_file_path, job_type,
                              revision_list, min_index):
    """Ensure that we found the correct min revision by testing earlier ones."""
    earlier_revisions = revision_list[
        min_index - EARLIER_REVISIONS_TO_CONSIDER_FOR_VALIDATION:min_index]
    revision_count = min(len(earlier_revisions),
                         REVISIONS_TO_TEST_FOR_VALIDATION)

    revisions_to_test = random.sample(earlier_revisions, revision_count)
    for revision in revisions_to_test:
        try:
            if _testcase_reproduces_in_revision(testcase, testcase_file_path,
                                                job_type, revision):
                testcase = data_handler.get_testcase_by_id(testcase.key.id())
                testcase.regression = 'NA'
                error_message = (
                    'Low confidence in regression range. Test case crashes in '
                    'revision r%d but not later revision r%d' %
                    (revision, revision_list[min_index]))
                data_handler.update_testcase_comment(
                    testcase, data_types.TaskState.ERROR, error_message)
                return False
        except errors.BadBuildError:
            pass

    return True
Example #4
0
def execute_task(testcase_id, job_type):
    """Execute progression task."""
    try:
        find_fixed_range(testcase_id, job_type)
    except errors.BuildSetupError as error:
        # If we failed to setup a build, it is likely a bot error. We can retry
        # the task in this case.
        testcase = data_handler.get_testcase_by_id(testcase_id)
        error_message = 'Build setup failed r%d' % error.revision
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.ERROR,
                                             error_message)
        build_fail_wait = environment.get_value('FAIL_WAIT')
        tasks.add_task('progression',
                       testcase_id,
                       job_type,
                       wait_time=build_fail_wait)
    except errors.BadBuildError:
        # Though bad builds when narrowing the range are recoverable, certain builds
        # being marked as bad may be unrecoverable. Recoverable ones should not
        # reach this point.
        testcase = data_handler.get_testcase_by_id(testcase_id)
        error_message = 'Unable to recover from bad build'
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.ERROR,
                                             error_message)

    # If there is a fine grained bisection service available, request it. Both
    # regression and fixed ranges are requested once. Regression is also requested
    # here as the bisection service may require details that are not yet available
    # (e.g. issue ID) at the time regress_task completes.
    task_creation.request_bisection(testcase_id)
Example #5
0
def execute_task(testcase_id, job_type):
    """Run regression task and handle potential errors."""
    try:
        find_regression_range(testcase_id, job_type)
    except errors.BuildSetupError as error:
        # If we failed to setup a build, it is likely a bot error. We can retry
        # the task in this case.
        testcase = data_handler.get_testcase_by_id(testcase_id)
        error_message = 'Build setup failed r%d' % error.revision
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.ERROR,
                                             error_message)
        build_fail_wait = environment.get_value('FAIL_WAIT')
        tasks.add_task('regression',
                       testcase_id,
                       job_type,
                       wait_time=build_fail_wait)
    except errors.BadBuildError:
        # Though bad builds when narrowing the range are recoverable, certain builds
        # being marked as bad may be unrecoverable. Recoverable ones should not
        # reach this point.
        testcase = data_handler.get_testcase_by_id(testcase_id)
        testcase.regression = 'NA'
        error_message = 'Unable to recover from bad build'
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.ERROR,
                                             error_message)
def _testcase_reproduces_in_revision(testcase,
                                     testcase_file_path,
                                     job_type,
                                     revision,
                                     should_log=True,
                                     min_revision=None,
                                     max_revision=None):
  """Test to see if a test case reproduces in the specified revision."""
  if should_log:
    log_message = 'Testing r%d' % revision
    if min_revision is not None and max_revision is not None:
      log_message += ' (current range %d:%d)' % (min_revision, max_revision)

    testcase = data_handler.get_testcase_by_id(testcase.key.id())
    data_handler.update_testcase_comment(testcase, data_types.TaskState.WIP,
                                         log_message)

  build_manager.setup_regular_build(revision)
  app_path = environment.get_value('APP_PATH')
  if not app_path:
    raise errors.BuildSetupError(revision, job_type)

  if tests.check_for_bad_build(job_type, revision):
    log_message = 'Bad build at r%d. Skipping' % revision
    testcase = data_handler.get_testcase_by_id(testcase.key.id())
    data_handler.update_testcase_comment(testcase, data_types.TaskState.WIP,
                                         log_message)
    raise errors.BadBuildError(revision, job_type)

  test_timeout = environment.get_value('TEST_TIMEOUT', 10)
  result = tests.test_for_crash_with_retries(
      testcase, testcase_file_path, test_timeout, http_flag=testcase.http_flag)
  return result.is_crash()
def _check_fixed_for_custom_binary(testcase, job_type, testcase_file_path):
  """Simplified fixed check for test cases using custom binaries."""
  revision = environment.get_value('APP_REVISION')

  # Update comments to reflect bot information and clean up old comments.
  testcase_id = testcase.key.id()
  testcase = data_handler.get_testcase_by_id(testcase_id)
  data_handler.update_testcase_comment(testcase, data_types.TaskState.STARTED)

  build_manager.setup_build()
  if not build_manager.check_app_path():
    testcase = data_handler.get_testcase_by_id(testcase_id)
    data_handler.update_testcase_comment(
        testcase, data_types.TaskState.ERROR,
        'Build setup failed for custom binary')
    build_fail_wait = environment.get_value('FAIL_WAIT')
    tasks.add_task(
        'progression', testcase_id, job_type, wait_time=build_fail_wait)
    return

  test_timeout = environment.get_value('TEST_TIMEOUT', 10)
  result = testcase_manager.test_for_crash_with_retries(
      testcase, testcase_file_path, test_timeout, http_flag=testcase.http_flag)
  _log_output(revision, result)

  # Re-fetch to finalize testcase updates in branches below.
  testcase = data_handler.get_testcase_by_id(testcase.key.id())

  # If this still crashes on the most recent build, it's not fixed. The task
  # will be rescheduled by a cron job and re-attempted eventually.
  if result.is_crash():
    app_path = environment.get_value('APP_PATH')
    command = testcase_manager.get_command_line_for_application(
        testcase_file_path, app_path=app_path, needs_http=testcase.http_flag)
    symbolized_crash_stacktrace = result.get_stacktrace(symbolized=True)
    unsymbolized_crash_stacktrace = result.get_stacktrace(symbolized=False)
    stacktrace = utils.get_crash_stacktrace_output(
        command, symbolized_crash_stacktrace, unsymbolized_crash_stacktrace)
    testcase.last_tested_crash_stacktrace = data_handler.filter_stacktrace(
        stacktrace)
    _update_completion_metadata(
        testcase,
        revision,
        is_crash=True,
        message='still crashes on latest custom build')
    return

  # Retry once on another bot to confirm our results and in case this bot is in
  # a bad state which we didn't catch through our usual means.
  if data_handler.is_first_retry_for_task(testcase, reset_after_retry=True):
    tasks.add_task('progression', testcase_id, job_type)
    _update_completion_metadata(testcase, revision)
    return

  # The bug is fixed.
  testcase.fixed = 'Yes'
  testcase.open = False
  _update_completion_metadata(
      testcase, revision, message='fixed on latest custom build')
Example #8
0
def _skip_minimization(testcase, message, crash_result=None, command=None):
  """Skip minimization for a testcase."""
  testcase.minimized_keys = testcase.fuzzed_keys

  if crash_result:
    _update_crash_result(testcase, crash_result, command)

  data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,
                                       message)
  create_additional_tasks(testcase)
def _update_completion_metadata(testcase,
                                revision,
                                is_crash=False,
                                message=None):
  """Update metadata the progression task completes."""
  _clear_progression_pending(testcase)
  testcase.set_metadata('last_tested_revision', revision, update_testcase=False)
  if is_crash:
    testcase.set_metadata(
        'last_tested_crash_revision', revision, update_testcase=False)
    testcase.set_metadata(
        'last_tested_crash_time', utils.utcnow(), update_testcase=False)
  data_handler.update_testcase_comment(testcase, data_types.TaskState.FINISHED,
                                       message)
Example #10
0
  def test_update_comment_truncate(self):
    """Test truncating long comments."""
    self.testcase.comments = '\n' * data_types.TESTCASE_COMMENTS_LENGTH_LIMIT
    data_handler.update_testcase_comment(
        self.testcase, data_types.TaskState.STARTED, 'message')

    self.assertEqual(data_types.TESTCASE_COMMENTS_LENGTH_LIMIT,
                     len(self.testcase.comments))
    expected_new = (
        '[2019-01-01 00:00:00] bot: Progression task started: message.\n')
    expected = (
        '\n' * (data_types.TESTCASE_COMMENTS_LENGTH_LIMIT - len(expected_new)) +
        expected_new)
    self.assertEqual(expected, self.testcase.comments)
Example #11
0
def mark_unreproducible_if_flaky(testcase, potentially_flaky):
    """Check to see if a test case appears to be flaky."""
    task_name = environment.get_value('TASK_NAME')

    # If this run does not suggest that we are flaky, clear the flag and assume
    # that we are reproducible.
    if not potentially_flaky:
        testcase.set_metadata('potentially_flaky', False)
        return

    # If we have not been marked as potentially flaky in the past, don't mark
    # mark the test case as unreproducible yet. It is now potentially flaky.
    if not testcase.get_metadata('potentially_flaky'):
        testcase.set_metadata('potentially_flaky', True)

        # In this case, the current task will usually be in a state where it cannot
        # be completed. Recreate it.
        tasks.add_task(task_name, testcase.key.id(), testcase.job_type)
        return

    # At this point, this test case has been flagged as potentially flaky twice.
    # It should be marked as unreproducible. Mark it as unreproducible, and set
    # fields that cannot be populated accordingly.
    if task_name == 'minimize' and not testcase.minimized_keys:
        testcase.minimized_keys = 'NA'
    if task_name in ['minimize', 'impact']:
        testcase.set_impacts_as_na()
    if task_name in ['minimize', 'regression']:
        testcase.regression = 'NA'
    if task_name in ['minimize', 'progression']:
        testcase.fixed = 'NA'

    testcase.one_time_crasher_flag = True
    data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,
                                         'Testcase appears to be flaky')

    issue = data_handler.get_issue_for_testcase(testcase)
    if issue and issue.has_label('Reproducible'):
        issue.remove_label('Reproducible')
        issue.add_label('Unreproducible')
        issue.comment = ('ClusterFuzz testcase %d appears to be flaky, '
                         'updating reproducibility label.' % testcase.key.id())
        issue.save()

    # For unreproducible testcases, it is still beneficial to get second stack
    # information from stack task and component information from blame task.
    create_blame_task_if_needed(testcase)
    create_stack_task_if_needed(testcase)
def save_regression_range(testcase_id, regression_range_start,
                          regression_range_end):
  """Saves the regression range and creates blame and impact task if needed."""
  testcase = data_handler.get_testcase_by_id(testcase_id)
  testcase.regression = '%d:%d' % (regression_range_start, regression_range_end)
  data_handler.update_testcase_comment(testcase, data_types.TaskState.FINISHED)

  write_to_big_query(testcase, regression_range_start, regression_range_end)

  # Force impacts update after regression range is updated. In several cases,
  # we might not have a production build to test with, so regression range is
  # used to decide impacts.
  task_creation.create_impact_task_if_needed(testcase)

  # Get blame information using the regression range result.
  task_creation.create_blame_task_if_needed(testcase)
Example #13
0
 def test_update_comment_clear(self):
   """Basic test on a testcase with existing comments, and clearing old
   progression messages."""
   self.testcase.comments = (
       '[2018-01-01 00:00:00] bot: Foo.\n'
       '[2018-01-01 00:00:00] bot: Progression task started: message.\n'
       '[2018-01-01 00:00:00] bot: Bar.\n'
       '[2018-01-01 00:00:00] bot: Progression task finished.\n'
       '[2018-01-01 00:00:00] bot: Blah.\n')
   data_handler.update_testcase_comment(
       self.testcase, data_types.TaskState.STARTED, 'message')
   self.assertEqual(
       ('[2018-01-01 00:00:00] bot: Foo.\n'
        '[2018-01-01 00:00:00] bot: Bar.\n'
        '[2018-01-01 00:00:00] bot: Blah.\n'
        '[2019-01-01 00:00:00] bot: Progression task started: message.\n'),
       self.testcase.comments)
Example #14
0
def mark_unreproducible_if_flaky(testcase, potentially_flaky):
  """Check to see if a test case appears to be flaky."""
  task_name = environment.get_value('TASK_NAME')

  # If this run does not suggest that we are flaky, clear the flag and assume
  # that we are reproducible.
  if not potentially_flaky:
    testcase.set_metadata('potentially_flaky', False)
    return

  # If we have not been marked as potentially flaky in the past, don't mark
  # mark the test case as unreproducible yet. It is now potentially flaky.
  if not testcase.get_metadata('potentially_flaky'):
    testcase.set_metadata('potentially_flaky', True)

    # In this case, the current task will usually be in a state where it cannot
    # be completed. Recreate it.
    tasks.add_task(task_name, testcase.key.id(), testcase.job_type)
    return

  # At this point, this test case has been flagged as potentially flaky twice.
  # It should be marked as unreproducible. Mark it as unreproducible, and set
  # fields that cannot be populated accordingly.
  if task_name == 'minimize' and not testcase.minimized_keys:
    testcase.minimized_keys = 'NA'
  if task_name in ['minimize', 'impact']:
    testcase.set_impacts_as_na()
  if task_name in ['minimize', 'regression']:
    testcase.regression = 'NA'
  if task_name in ['minimize', 'progression']:
    testcase.fixed = 'NA'

  testcase.one_time_crasher_flag = True
  data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,
                                       'Testcase appears to be flaky')

  # Issue update to flip reproducibility label is done in App Engine cleanup
  # cron. This avoids calling the issue tracker apis from GCE.

  # For unreproducible testcases, it is still beneficial to get component
  # information from blame task.
  create_blame_task_if_needed(testcase)

  # Let bisection service know about flakiness.
  request_bisection(testcase.key.id())
Example #15
0
def execute_task(testcase_id, _):
  """Attempt to find the CL introducing the bug associated with testcase_id."""
  # Locate the testcase associated with the id.
  testcase = data_handler.get_testcase_by_id(testcase_id)
  if not testcase:
    return

  # Make sure that predator topic is configured. If not, nothing to do here.
  topic = db_config.get_value('predator_crash_topic')
  if not topic:
    logs.log('Predator is not configured, skipping blame task.')
    return

  data_handler.update_testcase_comment(testcase, data_types.TaskState.STARTED)

  # Prepare pubsub message to send to predator.
  message = _prepare_predator_message(testcase)
  if not message:
    testcase = data_handler.get_testcase_by_id(testcase_id)
    data_handler.update_testcase_comment(
        testcase, data_types.TaskState.ERROR,
        'Failed to generate request for Predator.')
    return

  # Clear existing results and mark blame result as pending.
  testcase = data_handler.get_testcase_by_id(testcase_id)
  _clear_blame_result_and_set_pending_flag(testcase)

  # Post request to pub sub.
  client = pubsub.PubSubClient()
  message_ids = client.publish(topic, [message])
  logs.log('Successfully published testcase %s to Predator. Message IDs: %s.' %
           (testcase_id, message_ids))
  data_handler.update_testcase_comment(testcase, data_types.TaskState.FINISHED)
Example #16
0
def finalize_testcase(testcase_id,
                      command,
                      last_crash_result,
                      flaky_stack=False):
  """Perform final updates on a test case and prepare it for other tasks."""
  # Symbolize crash output if we have it.
  testcase = data_handler.get_testcase_by_id(testcase_id)
  if last_crash_result:
    _update_crash_result(testcase, last_crash_result, command)

  # Update remaining test case information.
  testcase.flaky_stack = flaky_stack
  if build_manager.is_custom_binary():
    testcase.set_impacts_as_na()
    testcase.regression = 'NA'
  data_handler.update_testcase_comment(testcase, data_types.TaskState.FINISHED)

  # We might have updated the crash state. See if we need to marked as duplicate
  # based on other testcases.
  data_handler.handle_duplicate_entry(testcase)

  create_additional_tasks(testcase)
Example #17
0
def setup_testcase(testcase, fuzzer_override=None):
    """Sets up the testcase and needed dependencies like fuzzer,
  data bundle, etc."""
    fuzzer_name = fuzzer_override or testcase.fuzzer_name
    job_type = testcase.job_type
    task_name = environment.get_value('TASK_NAME')
    testcase_fail_wait = environment.get_value('FAIL_WAIT')
    testcase_id = testcase.key.id()

    # Clear testcase directories.
    shell.clear_testcase_directories()

    # Adjust the test timeout value if this is coming from an user uploaded
    # testcase.
    _set_timeout_value_from_user_upload(testcase_id)

    # Update the fuzzer if necessary in order to get the updated data bundle.
    if fuzzer_name:
        try:
            update_successful = update_fuzzer_and_data_bundles(fuzzer_name)
        except errors.InvalidFuzzerError:
            # Close testcase and don't recreate tasks if this fuzzer is invalid.
            testcase.open = False
            testcase.fixed = 'NA'
            testcase.set_metadata('fuzzer_was_deleted', True)
            logs.log_error('Closed testcase %d with invalid fuzzer %s.' %
                           (testcase_id, fuzzer_name))

            error_message = 'Fuzzer %s no longer exists' % fuzzer_name
            data_handler.update_testcase_comment(testcase,
                                                 data_types.TaskState.ERROR,
                                                 error_message)
            return None, None, None

        if not update_successful:
            error_message = 'Unable to setup fuzzer %s' % fuzzer_name
            data_handler.update_testcase_comment(testcase,
                                                 data_types.TaskState.ERROR,
                                                 error_message)
            tasks.add_task(task_name,
                           testcase_id,
                           job_type,
                           wait_time=testcase_fail_wait)
            return None, None, None

    # Extract the testcase and any of its resources to the input directory.
    file_list, input_directory, testcase_file_path = unpack_testcase(testcase)
    if not file_list:
        error_message = 'Unable to setup testcase %s' % testcase_file_path
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.ERROR,
                                             error_message)
        tasks.add_task(task_name,
                       testcase_id,
                       job_type,
                       wait_time=testcase_fail_wait)
        return None, None, None

    # For Android/Fuchsia, we need to sync our local testcases directory with the
    # one on the device.
    if environment.platform() == 'ANDROID':
        _copy_testcase_to_device_and_setup_environment(testcase,
                                                       testcase_file_path)

    # Push testcases to worker.
    if environment.is_trusted_host():
        from bot.untrusted_runner import file_host
        file_host.push_testcases_to_worker()

    # Copy global blacklist into local blacklist.
    is_lsan_enabled = environment.get_value('LSAN')
    if is_lsan_enabled:
        # Get local blacklist without this testcase's entry.
        leak_blacklist.copy_global_to_local_blacklist(
            excluded_testcase=testcase)

    prepare_environment_for_testcase(testcase)

    return file_list, input_directory, testcase_file_path
Example #18
0
def execute_task(testcase_id, job_type):
    """Run a test case with a second job type to generate a second stack trace."""
    # Locate the testcase associated with the id.
    testcase = data_handler.get_testcase_by_id(testcase_id)

    data_handler.update_testcase_comment(testcase,
                                         data_types.TaskState.STARTED)

    # Setup testcase and its dependencies.
    file_list, _, testcase_file_path = setup.setup_testcase(testcase)
    if not file_list:
        return

    # Initialize timeout values.
    test_timeout = environment.get_value('TEST_TIMEOUT', 10)

    # Set up a custom or regular build. We explicitly omit the crash revision
    # since we want to test against the latest build here.
    build_manager.setup_build()

    # Check if we have an application path. If not, our build failed to setup
    # correctly.
    app_path = environment.get_value('APP_PATH')
    if not app_path:
        testcase = data_handler.get_testcase_by_id(testcase_id)
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.ERROR,
                                             'Build setup failed')
        return

    # TSAN tool settings (if the tool is used).
    if environment.tool_matches('TSAN', job_type):
        environment.set_tsan_max_history_size()

    command = tests.get_command_line_for_application(
        testcase_file_path, app_path=app_path, needs_http=testcase.http_flag)
    result = tests.test_for_crash_with_retries(testcase,
                                               testcase_file_path,
                                               test_timeout,
                                               http_flag=testcase.http_flag,
                                               compare_crash=False)

    # Get revision information.
    revision = environment.get_value('APP_REVISION')

    # If a crash occurs, then we add the second stacktrace information.
    if result.is_crash():
        state = result.get_symbolized_data()
        security_flag = result.is_security_issue()
        one_time_crasher_flag = not tests.test_for_reproducibility(
            testcase_file_path, state.crash_state, security_flag, test_timeout,
            testcase.http_flag, testcase.gestures)

        # Attach a header to indicate information on reproducibility flag.
        if one_time_crasher_flag:
            crash_stacktrace_header = 'Unreliable'
        else:
            crash_stacktrace_header = 'Fully reproducible'
        crash_stacktrace_header += (' crash found using %s job.\n\n' %
                                    job_type)

        unsymbolized_crash_stacktrace = result.get_stacktrace(symbolized=False)
        stacktrace = utils.get_crash_stacktrace_output(
            command, state.crash_stacktrace, unsymbolized_crash_stacktrace)

        crash_stacktrace = data_handler.filter_stacktrace(
            '%s%s' % (crash_stacktrace_header, stacktrace))
    else:
        crash_stacktrace = 'No crash found using %s job.' % job_type

    # Decide which stacktrace to update this stacktrace with.
    testcase = data_handler.get_testcase_by_id(testcase_id)
    if testcase.last_tested_crash_stacktrace == 'Pending':
        # This case happens when someone clicks 'Update last tested stacktrace using
        # trunk build' button.
        testcase.last_tested_crash_stacktrace = crash_stacktrace
        testcase.set_metadata('last_tested_crash_revision',
                              revision,
                              update_testcase=False)
    else:
        # Default case when someone defines |SECOND_STACK_JOB_TYPE| in the job
        # type. This helps to test the unreproducible crash with a different memory
        # debugging tool to get a second stacktrace (e.g. running TSAN on a flaky
        # crash found in ASAN build).
        testcase.second_crash_stacktrace = crash_stacktrace
        testcase.set_metadata('second_crash_stacktrace_revision',
                              revision,
                              update_testcase=False)

    data_handler.update_testcase_comment(testcase,
                                         data_types.TaskState.FINISHED)
Example #19
0
def find_regression_range(testcase_id, job_type):
    """Attempt to find when the testcase regressed."""
    deadline = tasks.get_task_completion_deadline()
    testcase = data_handler.get_testcase_by_id(testcase_id)
    if not testcase:
        return

    if testcase.regression:
        logs.log_error('Regression range is already set as %s, skip.' %
                       testcase.regression)
        return

    # This task is not applicable for custom binaries.
    if build_manager.is_custom_binary():
        testcase.regression = 'NA'
        data_handler.update_testcase_comment(
            testcase, data_types.TaskState.ERROR,
            'Not applicable for custom binaries')
        return

    data_handler.update_testcase_comment(testcase,
                                         data_types.TaskState.STARTED)

    # Setup testcase and its dependencies.
    file_list, _, testcase_file_path = setup.setup_testcase(testcase)
    if not file_list:
        testcase = data_handler.get_testcase_by_id(testcase_id)
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.ERROR,
                                             'Failed to setup testcase')
        tasks.add_task('regression', testcase_id, job_type)
        return

    release_build_bucket_path = environment.get_value(
        'RELEASE_BUILD_BUCKET_PATH')
    revision_list = build_manager.get_revisions_list(release_build_bucket_path,
                                                     testcase=testcase)
    if not revision_list:
        testcase = data_handler.get_testcase_by_id(testcase_id)
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.ERROR,
                                             'Failed to fetch revision list')
        tasks.add_task('regression', testcase_id, job_type)
        return

    # Don't burden NFS server with caching these random builds.
    environment.set_value('CACHE_STORE', False)

    # Pick up where left off in a previous run if necessary.
    min_revision = testcase.get_metadata('last_regression_min')
    max_revision = testcase.get_metadata('last_regression_max')
    first_run = not min_revision and not max_revision
    if not min_revision:
        min_revision = revisions.get_first_revision_in_list(revision_list)
    if not max_revision:
        max_revision = testcase.crash_revision

    min_index = revisions.find_min_revision_index(revision_list, min_revision)
    if min_index is None:
        raise errors.BuildNotFoundError(min_revision, job_type)
    max_index = revisions.find_max_revision_index(revision_list, max_revision)
    if max_index is None:
        raise errors.BuildNotFoundError(max_revision, job_type)

    # Make sure that the revision where we noticed the crash, still crashes at
    # that revision. Otherwise, our binary search algorithm won't work correctly.
    max_revision = revision_list[max_index]
    crashes_in_max_revision = _testcase_reproduces_in_revision(
        testcase, testcase_file_path, job_type, max_revision, should_log=False)
    if not crashes_in_max_revision:
        testcase = data_handler.get_testcase_by_id(testcase_id)
        error_message = ('Known crash revision %d did not crash' %
                         max_revision)
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.ERROR,
                                             error_message)
        task_creation.mark_unreproducible_if_flaky(testcase, True)
        return

    # If we've made it this far, the test case appears to be reproducible. Clear
    # metadata from previous runs had it been marked as potentially flaky.
    task_creation.mark_unreproducible_if_flaky(testcase, False)

    # On the first run, check to see if we regressed near either the min or max
    # revision.
    if first_run and found_regression_near_extreme_revisions(
            testcase, testcase_file_path, job_type, revision_list, min_index,
            max_index):
        return

    while time.time() < deadline:
        min_revision = revision_list[min_index]
        max_revision = revision_list[max_index]

        # If the min and max revisions are one apart (or the same, if we only have
        # one build), this is as much as we can narrow the range.
        if max_index - min_index <= 1:
            # Verify that the regression range seems correct, and save it if so.
            if not validate_regression_range(testcase, testcase_file_path,
                                             job_type, revision_list,
                                             min_index):
                return

            save_regression_range(testcase_id, min_revision, max_revision)
            return

        middle_index = (min_index + max_index) / 2
        middle_revision = revision_list[middle_index]
        try:
            is_crash = _testcase_reproduces_in_revision(
                testcase,
                testcase_file_path,
                job_type,
                middle_revision,
                min_revision=min_revision,
                max_revision=max_revision)
        except errors.BadBuildError:
            # Skip this revision.
            del revision_list[middle_index]
            max_index -= 1
            continue

        if is_crash:
            max_index = middle_index
        else:
            min_index = middle_index

        _save_current_regression_range_indices(testcase_id,
                                               revision_list[min_index],
                                               revision_list[max_index])

    # If we've broken out of the above loop, we timed out. We'll finish by
    # running another regression task and picking up from this point.
    testcase = data_handler.get_testcase_by_id(testcase_id)
    error_message = 'Timed out, current range r%d:r%d' % (
        revision_list[min_index], revision_list[max_index])
    data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,
                                         error_message)
    tasks.add_task('regression', testcase_id, job_type)
Example #20
0
def execute_task(testcase_id, job_type):
  """Attempt to minimize a given testcase."""
  # Get deadline to finish this task.
  deadline = tasks.get_task_completion_deadline()

  # Locate the testcase associated with the id.
  testcase = data_handler.get_testcase_by_id(testcase_id)
  if not testcase:
    return

  # Update comments to reflect bot information.
  data_handler.update_testcase_comment(testcase, data_types.TaskState.STARTED)

  # Setup testcase and its dependencies.
  file_list, input_directory, testcase_file_path = setup.setup_testcase(
      testcase)
  if not file_list:
    return

  # Initialize variables.
  max_timeout = environment.get_value('TEST_TIMEOUT', 10)
  app_arguments = environment.get_value('APP_ARGS')

  # Set up a custom or regular build based on revision.
  last_tested_crash_revision = testcase.get_metadata(
      'last_tested_crash_revision')

  crash_revision = last_tested_crash_revision or testcase.crash_revision
  build_manager.setup_build(crash_revision)

  # Check if we have an application path. If not, our build failed
  # to setup correctly.
  app_path = environment.get_value('APP_PATH')
  if not app_path:
    logs.log_error('Unable to setup build for minimization.')
    build_fail_wait = environment.get_value('FAIL_WAIT')

    if environment.get_value('ORIGINAL_JOB_NAME'):
      _skip_minimization(testcase, 'Failed to setup build for overridden job.')
    else:
      # Only recreate task if this isn't an overriden job. It's possible that a
      # revision exists for the original job, but doesn't exist for the
      # overriden job.
      tasks.add_task(
          'minimize', testcase_id, job_type, wait_time=build_fail_wait)

    return

  if environment.is_libfuzzer_job():
    do_libfuzzer_minimization(testcase, testcase_file_path)
    return

  max_threads = utils.maximum_parallel_processes_allowed()

  # Prepare the test case runner.
  crash_retries = environment.get_value('CRASH_RETRIES')
  warmup_timeout = environment.get_value('WARMUP_TIMEOUT')
  required_arguments = environment.get_value('REQUIRED_APP_ARGS', '')

  # Add any testcase-specific required arguments if needed.
  additional_required_arguments = testcase.get_metadata(
      'additional_required_app_args')
  if additional_required_arguments:
    required_arguments = '%s %s' % (required_arguments,
                                    additional_required_arguments)

  test_runner = TestRunner(testcase, testcase_file_path, file_list,
                           input_directory, app_arguments, required_arguments,
                           max_threads, deadline)

  # Verify the crash with a long timeout.
  warmup_crash_occurred = False
  result = test_runner.run(timeout=warmup_timeout, log_command=True)
  if result.is_crash():
    warmup_crash_occurred = True
    logs.log('Warmup crash occurred in %d seconds.' % result.crash_time)

  saved_unsymbolized_crash_state, flaky_stack, crash_times = (
      check_for_initial_crash(test_runner, crash_retries, testcase))

  # If the warmup crash occurred but we couldn't reproduce this in with
  # multiple processes running in parallel, try to minimize single threaded.
  if (len(crash_times) < tests.REPRODUCIBILITY_FACTOR * crash_retries and
      warmup_crash_occurred and max_threads > 1):
    logs.log('Attempting to continue single-threaded.')

    max_threads = 1
    test_runner = TestRunner(testcase, testcase_file_path, file_list,
                             input_directory, app_arguments, required_arguments,
                             max_threads, deadline)

    saved_unsymbolized_crash_state, flaky_stack, crash_times = (
        check_for_initial_crash(test_runner, crash_retries, testcase))

  if flaky_stack:
    testcase.flaky_stack = flaky_stack
    testcase.put()

  if len(crash_times) < tests.REPRODUCIBILITY_FACTOR * crash_retries:
    if not crash_times:
      # We didn't crash at all, so try again. This might be a legitimately
      # unreproducible test case, so it will get marked as such after being
      # retried on other bots.
      testcase = data_handler.get_testcase_by_id(testcase_id)
      data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,
                                           'Unable to reproduce crash')
      task_creation.mark_unreproducible_if_flaky(testcase, True)
    else:
      # We reproduced this crash at least once. It's too flaky to minimize, but
      # maybe we'll have more luck in the other jobs.
      testcase = data_handler.get_testcase_by_id(testcase_id)
      testcase.minimized_keys = 'NA'
      error_message = (
          'Unable to reproduce crash reliably, skipping '
          'minimization (crashed %d/%d)' % (len(crash_times), crash_retries))
      data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,
                                           error_message)
      create_additional_tasks(testcase)
    return

  # If we've made it this far, the test case appears to be reproducible. Clear
  # metadata from previous runs had it been marked as potentially flaky.
  task_creation.mark_unreproducible_if_flaky(testcase, False)

  test_runner.set_test_expectations(testcase.security_flag, flaky_stack,
                                    saved_unsymbolized_crash_state)

  # Use the max crash time unless this would be greater than the max timeout.
  test_timeout = min(max(crash_times), max_timeout) + 1
  logs.log('Using timeout %d (was %d)' % (test_timeout, max_timeout))
  test_runner.timeout = test_timeout

  logs.log('Starting minimization.')

  if should_attempt_phase(testcase, MinimizationPhase.GESTURES):
    gestures = minimize_gestures(test_runner, testcase)

    # We can't call check_deadline_exceeded_and_store_partial_minimized_testcase
    # at this point because we do not have a test case to store.
    testcase = data_handler.get_testcase_by_id(testcase.key.id())

    if testcase.security_flag and len(testcase.gestures) != len(gestures):
      # Re-run security severity analysis since gestures affect the severity.
      testcase.security_severity = severity_analyzer.get_security_severity(
          testcase.crash_type, data_handler.get_stacktrace(testcase), job_type,
          bool(gestures))

    testcase.gestures = gestures
    testcase.set_metadata('minimization_phase', MinimizationPhase.MAIN_FILE)

    if time.time() > test_runner.deadline:
      tasks.add_task('minimize', testcase.key.id(), job_type)
      return

  # Minimize the main file.
  data = utils.get_file_contents_with_fatal_error_on_failure(testcase_file_path)
  if should_attempt_phase(testcase, MinimizationPhase.MAIN_FILE):
    data = minimize_main_file(test_runner, testcase_file_path, data)

    if check_deadline_exceeded_and_store_partial_minimized_testcase(
        deadline, testcase_id, job_type, input_directory, file_list, data,
        testcase_file_path):
      return

    testcase.set_metadata('minimization_phase', MinimizationPhase.FILE_LIST)

  # Minimize the file list.
  if should_attempt_phase(testcase, MinimizationPhase.FILE_LIST):
    if environment.get_value('MINIMIZE_FILE_LIST', True):
      file_list = minimize_file_list(test_runner, file_list, input_directory,
                                     testcase_file_path)

      if check_deadline_exceeded_and_store_partial_minimized_testcase(
          deadline, testcase_id, job_type, input_directory, file_list, data,
          testcase_file_path):
        return
    else:
      logs.log('Skipping minimization of file list.')

    testcase.set_metadata('minimization_phase', MinimizationPhase.RESOURCES)

  # Minimize any files remaining in the file list.
  if should_attempt_phase(testcase, MinimizationPhase.RESOURCES):
    if environment.get_value('MINIMIZE_RESOURCES', True):
      for dependency in file_list:
        minimize_resource(test_runner, dependency, input_directory,
                          testcase_file_path)

        if check_deadline_exceeded_and_store_partial_minimized_testcase(
            deadline, testcase_id, job_type, input_directory, file_list, data,
            testcase_file_path):
          return
    else:
      logs.log('Skipping minimization of resources.')

    testcase.set_metadata('minimization_phase', MinimizationPhase.ARGUMENTS)

  if should_attempt_phase(testcase, MinimizationPhase.ARGUMENTS):
    app_arguments = minimize_arguments(test_runner, app_arguments)

    # Arguments must be stored here in case we time out below.
    testcase.minimized_arguments = app_arguments
    testcase.put()

    if check_deadline_exceeded_and_store_partial_minimized_testcase(
        deadline, testcase_id, job_type, input_directory, file_list, data,
        testcase_file_path):
      return

  command = tests.get_command_line_for_application(
      testcase_file_path, app_args=app_arguments, needs_http=testcase.http_flag)
  last_crash_result = test_runner.last_failing_result

  store_minimized_testcase(testcase, input_directory, file_list, data,
                           testcase_file_path)
  finalize_testcase(
      testcase_id, command, last_crash_result, flaky_stack=flaky_stack)
Example #21
0
def execute_task(testcase_id, job_type):
  """Run analyze task."""
  # Reset redzones.
  environment.reset_current_memory_tool_options(redzone_size=128)

  # Unset window location size and position properties so as to use default.
  environment.set_value('WINDOW_ARG', '')

  # Locate the testcase associated with the id.
  testcase = data_handler.get_testcase_by_id(testcase_id)
  if not testcase:
    return

  data_handler.update_testcase_comment(testcase, data_types.TaskState.STARTED)

  metadata = data_types.TestcaseUploadMetadata.query(
      data_types.TestcaseUploadMetadata.testcase_id == int(testcase_id)).get()
  if not metadata:
    logs.log_error(
        'Testcase %s has no associated upload metadata.' % testcase_id)
    testcase.key.delete()
    return

  is_lsan_enabled = environment.get_value('LSAN')
  if is_lsan_enabled:
    # Creates empty local blacklist so all leaks will be visible to uploader.
    leak_blacklist.create_empty_local_blacklist()

  # Store the bot name and timestamp in upload metadata.
  bot_name = environment.get_value('BOT_NAME')
  metadata.bot_name = bot_name
  metadata.timestamp = datetime.datetime.utcnow()
  metadata.put()

  # Adjust the test timeout, if user has provided one.
  if metadata.timeout:
    environment.set_value('TEST_TIMEOUT', metadata.timeout)

  # Adjust the number of retries, if user has provided one.
  if metadata.retries is not None:
    environment.set_value('CRASH_RETRIES', metadata.retries)

  # Setup testcase and get absolute testcase path.
  file_list, _, testcase_file_path = setup.setup_testcase(testcase)
  if not file_list:
    return

  # Set up a custom or regular build based on revision.
  build_manager.setup_build(testcase.crash_revision)

  # Check if we have an application path. If not, our build failed
  # to setup correctly.
  app_path = environment.get_value('APP_PATH')
  if not app_path:
    data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,
                                         'Build setup failed')

    if data_handler.is_first_retry_for_task(testcase):
      build_fail_wait = environment.get_value('FAIL_WAIT')
      tasks.add_task(
          'analyze', testcase_id, job_type, wait_time=build_fail_wait)
    else:
      close_invalid_testcase_and_update_status(testcase, metadata,
                                               'Build setup failed')
    return

  # Update initial testcase information.
  testcase.absolute_path = testcase_file_path
  testcase.job_type = job_type
  testcase.binary_flag = utils.is_binary_file(testcase_file_path)
  testcase.queue = tasks.default_queue()
  testcase.crash_state = ''

  # Set initial testcase metadata fields (e.g. build url, etc).
  data_handler.set_initial_testcase_metadata(testcase)

  # Update minimized arguments and use ones provided during user upload.
  if not testcase.minimized_arguments:
    minimized_arguments = environment.get_value('APP_ARGS') or ''
    additional_command_line_flags = testcase.get_metadata(
        'uploaded_additional_args')
    if additional_command_line_flags:
      minimized_arguments += ' %s' % additional_command_line_flags
    environment.set_value('APP_ARGS', minimized_arguments)
    testcase.minimized_arguments = minimized_arguments

  # Update other fields not set at upload time.
  testcase.crash_revision = environment.get_value('APP_REVISION')
  data_handler.set_initial_testcase_metadata(testcase)
  testcase.put()

  # Initialize some variables.
  gestures = testcase.gestures
  http_flag = testcase.http_flag
  test_timeout = environment.get_value('TEST_TIMEOUT')

  # Get the crash output.
  result = testcase_manager.test_for_crash_with_retries(
      testcase,
      testcase_file_path,
      test_timeout,
      http_flag=http_flag,
      compare_crash=False)

  # If we don't get a crash, try enabling http to see if we can get a crash.
  # Skip engine fuzzer jobs (e.g. libFuzzer, AFL) for which http testcase paths
  # are not applicable.
  if (not result.is_crash() and not http_flag and
      not environment.is_engine_fuzzer_job()):
    result_with_http = testcase_manager.test_for_crash_with_retries(
        testcase,
        testcase_file_path,
        test_timeout,
        http_flag=True,
        compare_crash=False)
    if result_with_http.is_crash():
      logs.log('Testcase needs http flag for crash.')
      http_flag = True
      result = result_with_http

  # Refresh our object.
  testcase = data_handler.get_testcase_by_id(testcase_id)
  if not testcase:
    return

  # Set application command line with the correct http flag.
  application_command_line = (
      testcase_manager.get_command_line_for_application(
          testcase_file_path, needs_http=http_flag))

  # Get the crash data.
  crashed = result.is_crash()
  crash_time = result.get_crash_time()
  state = result.get_symbolized_data()
  unsymbolized_crash_stacktrace = result.get_stacktrace(symbolized=False)

  # Get crash info object with minidump info. Also, re-generate unsymbolized
  # stacktrace if needed.
  crash_info, _ = (
      crash_uploader.get_crash_info_and_stacktrace(
          application_command_line, state.crash_stacktrace, gestures))
  if crash_info:
    testcase.minidump_keys = crash_info.store_minidump()

  if not crashed:
    # Could not reproduce the crash.
    log_message = (
        'Testcase didn\'t crash in %d seconds (with retries)' % test_timeout)
    data_handler.update_testcase_comment(
        testcase, data_types.TaskState.FINISHED, log_message)

    # For an unreproducible testcase, retry once on another bot to confirm
    # our results and in case this bot is in a bad state which we didn't catch
    # through our usual means.
    if data_handler.is_first_retry_for_task(testcase):
      testcase.status = 'Unreproducible, retrying'
      testcase.put()

      tasks.add_task('analyze', testcase_id, job_type)
      return

    # In the general case, we will not attempt to symbolize if we do not detect
    # a crash. For user uploads, we should symbolize anyway to provide more
    # information about what might be happening.
    crash_stacktrace_output = utils.get_crash_stacktrace_output(
        application_command_line, state.crash_stacktrace,
        unsymbolized_crash_stacktrace)
    testcase.crash_stacktrace = data_handler.filter_stacktrace(
        crash_stacktrace_output)
    close_invalid_testcase_and_update_status(testcase, metadata,
                                             'Unreproducible')

    # A non-reproducing testcase might still impact production branches.
    # Add the impact task to get that information.
    task_creation.create_impact_task_if_needed(testcase)
    return

  # Update http flag and re-run testcase to store dependencies (for bundled
  # archives only).
  testcase.http_flag = http_flag
  if not store_testcase_dependencies_from_bundled_testcase_archive(
      metadata, testcase, testcase_file_path):
    return

  # Update testcase crash parameters.
  testcase.crash_type = state.crash_type
  testcase.crash_address = state.crash_address
  testcase.crash_state = state.crash_state

  # Try to guess if the bug is security or not.
  security_flag = crash_analyzer.is_security_issue(
      state.crash_stacktrace, state.crash_type, state.crash_address)
  testcase.security_flag = security_flag

  # If it is, guess the severity.
  if security_flag:
    testcase.security_severity = severity_analyzer.get_security_severity(
        state.crash_type, state.crash_stacktrace, job_type, bool(gestures))

  log_message = ('Testcase crashed in %d seconds (r%d)' %
                 (crash_time, testcase.crash_revision))
  data_handler.update_testcase_comment(testcase, data_types.TaskState.FINISHED,
                                       log_message)

  # See if we have to ignore this crash.
  if crash_analyzer.ignore_stacktrace(state.crash_stacktrace):
    close_invalid_testcase_and_update_status(testcase, metadata, 'Irrelavant')
    return

  # Test for reproducibility.
  one_time_crasher_flag = not testcase_manager.test_for_reproducibility(
      testcase_file_path, state.crash_state, security_flag, test_timeout,
      http_flag, gestures)
  testcase.one_time_crasher_flag = one_time_crasher_flag

  # Check to see if this is a duplicate.
  project_name = data_handler.get_project_name(job_type)
  existing_testcase = data_handler.find_testcase(
      project_name, state.crash_type, state.crash_state, security_flag)
  if existing_testcase:
    # If the existing test case is unreproducible and we are, replace the
    # existing test case with this one.
    if existing_testcase.one_time_crasher_flag and not one_time_crasher_flag:
      duplicate_testcase = existing_testcase
      original_testcase = testcase
    else:
      duplicate_testcase = testcase
      original_testcase = existing_testcase
      metadata.status = 'Duplicate'
      metadata.duplicate_of = existing_testcase.key.id()

    duplicate_testcase.status = 'Duplicate'
    duplicate_testcase.duplicate_of = original_testcase.key.id()
    duplicate_testcase.put()

  # Set testcase and metadata status if not set already.
  if testcase.status != 'Duplicate':
    testcase.status = 'Processed'
    metadata.status = 'Confirmed'

    # Add new leaks to global blacklist to avoid detecting duplicates.
    # Only add if testcase has a direct leak crash and if it's reproducible.
    if is_lsan_enabled:
      leak_blacklist.add_crash_to_global_blacklist_if_needed(testcase)

  # Add application specific information in the trace.
  crash_stacktrace_output = utils.get_crash_stacktrace_output(
      application_command_line, state.crash_stacktrace,
      unsymbolized_crash_stacktrace)
  testcase.crash_stacktrace = data_handler.filter_stacktrace(
      crash_stacktrace_output)

  # Update the testcase values.
  testcase.put()

  # Update the upload metadata.
  metadata.security_flag = security_flag
  metadata.put()

  # Create tasks to
  # 1. Minimize testcase (minimize).
  # 2. Find regression range (regression).
  # 3. Find testcase impact on production branches (impact).
  # 4. Check whether testcase is fixed (progression).
  # 5. Get second stacktrace from another job in case of
  #    one-time crashers (stack).
  task_creation.create_tasks(testcase)
Example #22
0
def execute_task(testcase_id, job_type):
    """Attempt to find if the testcase affects release branches on Chromium."""
    # This shouldn't ever get scheduled, but check just in case.
    if not utils.is_chromium():
        return

    # Locate the testcase associated with the id.
    testcase = data_handler.get_testcase_by_id(testcase_id)

    # If this testcase is fixed, we should no longer be doing impact testing.
    if testcase.fixed and testcase.is_impact_set_flag:
        return

    # For testcases with status unreproducible, we just do impact analysis just
    # once.
    if testcase.is_status_unreproducible() and testcase.is_impact_set_flag:
        return

    # Update comments only after checking the above bailout conditions.
    data_handler.update_testcase_comment(testcase,
                                         data_types.TaskState.STARTED)

    # This task is not applicable to unreproducible testcases.
    if testcase.one_time_crasher_flag:
        data_handler.update_testcase_comment(
            testcase, data_types.TaskState.ERROR,
            'Not applicable for unreproducible testcases')
        return

    # This task is not applicable for custom binaries. We cannot remove the
    # creation of such tasks specifically for custom binary testcase in cron,
    # so exit gracefully.
    if build_manager.is_custom_binary():
        data_handler.update_testcase_comment(
            testcase, data_types.TaskState.FINISHED,
            'Not applicable for custom binaries')
        return

    # If we don't have a stable or beta build url pattern, we try to use build
    # information url to make a guess.
    if not build_manager.has_production_builds():
        if not testcase.regression:
            data_handler.update_testcase_comment(
                testcase, data_types.TaskState.FINISHED,
                'Cannot run without regression range, will re-run once regression '
                'task finishes')
            return

        impacts = get_impacts_from_url(testcase.regression, testcase.job_type)
        testcase = data_handler.get_testcase_by_id(testcase_id)
        set_testcase_with_impacts(testcase, impacts)
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.FINISHED)
        return

    # Setup testcase and its dependencies.
    file_list, _, testcase_file_path = setup.setup_testcase(testcase)
    if not file_list:
        return

    # Setup stable, beta builds and get impact and crash stacktrace.
    try:
        impacts = get_impacts_on_prod_builds(testcase, testcase_file_path)
    except BuildFailedException as error:
        testcase = data_handler.get_testcase_by_id(testcase_id)
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.ERROR,
                                             error.message)
        tasks.add_task('impact',
                       testcase_id,
                       job_type,
                       wait_time=environment.get_value('FAIL_WAIT'))
        return

    testcase = data_handler.get_testcase_by_id(testcase_id)
    set_testcase_with_impacts(testcase, impacts)

    # Set stacktrace in case we have a unreproducible crash on trunk,
    # but it crashes on one of the production builds.
    if testcase.is_status_unreproducible() and impacts.get_extra_trace():
        testcase.crash_stacktrace = data_handler.filter_stacktrace(
            '%s\n\n%s' %
            (data_handler.get_stacktrace(testcase), impacts.get_extra_trace()))

    data_handler.update_testcase_comment(testcase,
                                         data_types.TaskState.FINISHED)
def find_fixed_range(testcase_id, job_type):
    """Attempt to find the revision range where a testcase was fixed."""
    deadline = tasks.get_task_completion_deadline()
    testcase = data_handler.get_testcase_by_id(testcase_id)
    if not testcase:
        return

    if testcase.fixed:
        logs.log_error('Fixed range is already set as %s, skip.' %
                       testcase.fixed)
        return

    # Setup testcase and its dependencies.
    file_list, _, testcase_file_path = setup.setup_testcase(testcase)
    if not file_list:
        return

    # Set a flag to indicate we are running progression task. This shows pending
    # status on testcase report page and avoid conflicting testcase updates by
    # triage cron.
    testcase.set_metadata('progression_pending', True)

    # Custom binaries are handled as special cases.
    if build_manager.is_custom_binary():
        _check_fixed_for_custom_binary(testcase, job_type, testcase_file_path)
        return

    release_build_bucket_path = environment.get_value(
        'RELEASE_BUILD_BUCKET_PATH')
    revision_list = build_manager.get_revisions_list(release_build_bucket_path,
                                                     testcase=testcase)
    if not revision_list:
        testcase = data_handler.get_testcase_by_id(testcase_id)
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.ERROR,
                                             'Failed to fetch revision list')
        tasks.add_task('progression', testcase_id, job_type)
        return

    # Use min, max_index to mark the start and end of revision list that is used
    # for bisecting the progression range. Set start to the revision where noticed
    # the crash. Set end to the trunk revision. Also, use min, max from past run
    # if it timed out.
    min_revision = testcase.get_metadata('last_progression_min')
    max_revision = testcase.get_metadata('last_progression_max')
    last_tested_revision = testcase.get_metadata('last_tested_crash_revision')
    known_crash_revision = last_tested_revision or testcase.crash_revision
    if not min_revision:
        min_revision = known_crash_revision
    if not max_revision:
        max_revision = revisions.get_last_revision_in_list(revision_list)

    min_index = revisions.find_min_revision_index(revision_list, min_revision)
    if min_index is None:
        raise errors.BuildNotFoundError(min_revision, job_type)
    max_index = revisions.find_max_revision_index(revision_list, max_revision)
    if max_index is None:
        raise errors.BuildNotFoundError(max_revision, job_type)

    testcase = data_handler.get_testcase_by_id(testcase_id)
    data_handler.update_testcase_comment(testcase,
                                         data_types.TaskState.STARTED,
                                         'r%d' % max_revision)

    # Check to see if this testcase is still crashing now. If it is, then just
    # bail out.
    result = _testcase_reproduces_in_revision(testcase, testcase_file_path,
                                              job_type, max_revision)
    if result.is_crash():
        logs.log('Found crash with same signature on latest revision r%d.' %
                 max_revision)
        app_path = environment.get_value('APP_PATH')
        command = testcase_manager.get_command_line_for_application(
            testcase_file_path,
            app_path=app_path,
            needs_http=testcase.http_flag)
        symbolized_crash_stacktrace = result.get_stacktrace(symbolized=True)
        unsymbolized_crash_stacktrace = result.get_stacktrace(symbolized=False)
        stacktrace = utils.get_crash_stacktrace_output(
            command, symbolized_crash_stacktrace,
            unsymbolized_crash_stacktrace)
        testcase = data_handler.get_testcase_by_id(testcase_id)
        testcase.last_tested_crash_stacktrace = data_handler.filter_stacktrace(
            stacktrace)
        _update_completion_metadata(
            testcase,
            max_revision,
            is_crash=True,
            message='still crashes on latest revision r%s' % max_revision)

        # Since we've verified that the test case is still crashing, clear out any
        # metadata indicating potential flake from previous runs.
        task_creation.mark_unreproducible_if_flaky(testcase, False)

        # For chromium project, save latest crash information for later upload
        # to chromecrash/.
        state = result.get_symbolized_data()
        crash_uploader.save_crash_info_if_needed(testcase_id, max_revision,
                                                 job_type, state.crash_type,
                                                 state.crash_address,
                                                 state.frames)
        return

    # Don't burden NFS server with caching these random builds.
    environment.set_value('CACHE_STORE', False)

    # Verify that we do crash in the min revision. This is assumed to be true
    # while we are doing the bisect.
    result = _testcase_reproduces_in_revision(testcase, testcase_file_path,
                                              job_type, min_revision)
    if result and not result.is_crash():
        testcase = data_handler.get_testcase_by_id(testcase_id)

        # Retry once on another bot to confirm our result.
        if data_handler.is_first_retry_for_task(testcase,
                                                reset_after_retry=True):
            tasks.add_task('progression', testcase_id, job_type)
            error_message = (
                'Known crash revision %d did not crash, will retry on another bot to '
                'confirm result' % known_crash_revision)
            data_handler.update_testcase_comment(testcase,
                                                 data_types.TaskState.ERROR,
                                                 error_message)
            _update_completion_metadata(testcase, max_revision)
            return

        _clear_progression_pending(testcase)
        error_message = ('Known crash revision %d did not crash' %
                         known_crash_revision)
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.ERROR,
                                             error_message)
        task_creation.mark_unreproducible_if_flaky(testcase, True)
        return

    # Start a binary search to find last non-crashing revision. At this point, we
    # know that we do crash in the min_revision, and do not crash in max_revision.
    while time.time() < deadline:
        min_revision = revision_list[min_index]
        max_revision = revision_list[max_index]

        # If the min and max revisions are one apart this is as much as we can
        # narrow the range.
        if max_index - min_index == 1:
            _save_fixed_range(testcase_id, min_revision, max_revision)
            return

        # Test the middle revision of our range.
        middle_index = (min_index + max_index) // 2
        middle_revision = revision_list[middle_index]

        testcase = data_handler.get_testcase_by_id(testcase_id)
        log_message = 'Testing r%d (current range %d:%d)' % (
            middle_revision, min_revision, max_revision)
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.WIP,
                                             log_message)

        try:
            result = _testcase_reproduces_in_revision(testcase,
                                                      testcase_file_path,
                                                      job_type,
                                                      middle_revision)
        except errors.BadBuildError:
            # Skip this revision.
            del revision_list[middle_index]
            max_index -= 1
            continue

        if result.is_crash():
            min_index = middle_index
        else:
            max_index = middle_index

        _save_current_fixed_range_indices(testcase_id,
                                          revision_list[min_index],
                                          revision_list[max_index])

    # If we've broken out of the loop, we've exceeded the deadline. Recreate the
    # task to pick up where we left off.
    testcase = data_handler.get_testcase_by_id(testcase_id)
    error_message = ('Timed out, current range r%d:r%d' %
                     (revision_list[min_index], revision_list[max_index]))
    data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,
                                         error_message)
    tasks.add_task('progression', testcase_id, job_type)
Example #24
0
def execute_task(testcase_id, job_type):
    """Run a test case with a different job type to see if they reproduce."""
    testcase = data_handler.get_testcase_by_id(testcase_id)
    if not testcase:
        return

    if environment.is_engine_fuzzer_job(
            testcase.job_type) != environment.is_engine_fuzzer_job(job_type):
        # We should never reach here. But in case we do, we should bail out as
        # otherwise we will run into exceptions.
        return

    # Use a cloned testcase entity with different fuzz target paramaters for
    # a different fuzzing engine.
    original_job_type = testcase.job_type
    testcase = _get_variant_testcase_for_job(testcase, job_type)

    # Setup testcase and its dependencies.
    file_list, _, testcase_file_path = setup.setup_testcase(testcase, job_type)
    if not file_list:
        return

    # Set up a custom or regular build. We explicitly omit the crash revision
    # since we want to test against the latest build here.
    build_manager.setup_build()

    # Check if we have an application path. If not, our build failed to setup
    # correctly.
    if not build_manager.check_app_path():
        testcase = data_handler.get_testcase_by_id(testcase_id)
        data_handler.update_testcase_comment(
            testcase,
            data_types.TaskState.ERROR,
            "Build setup failed with job: " + job_type,
        )
        return

    # Disable gestures if we're running on a different platform from that of
    # the original test case.
    use_gestures = testcase.platform == environment.platform().lower()

    # Reproduce the crash.
    app_path = environment.get_value("APP_PATH")
    command = testcase_manager.get_command_line_for_application(
        testcase_file_path, app_path=app_path, needs_http=testcase.http_flag)
    test_timeout = environment.get_value("TEST_TIMEOUT", 10)
    revision = environment.get_value("APP_REVISION")
    result = testcase_manager.test_for_crash_with_retries(
        testcase,
        testcase_file_path,
        test_timeout,
        http_flag=testcase.http_flag,
        use_gestures=use_gestures,
        compare_crash=False,
    )

    if result.is_crash() and not result.should_ignore():
        crash_state = result.get_state()
        crash_type = result.get_type()
        security_flag = result.is_security_issue()

        gestures = testcase.gestures if use_gestures else None
        one_time_crasher_flag = not testcase_manager.test_for_reproducibility(
            testcase.fuzzer_name,
            testcase.actual_fuzzer_name(),
            testcase_file_path,
            crash_state,
            security_flag,
            test_timeout,
            testcase.http_flag,
            gestures,
        )
        if one_time_crasher_flag:
            status = data_types.TestcaseVariantStatus.FLAKY
        else:
            status = data_types.TestcaseVariantStatus.REPRODUCIBLE

        crash_comparer = CrashComparer(crash_state, testcase.crash_state)
        is_similar = (crash_comparer.is_similar()
                      and security_flag == testcase.security_flag)

        unsymbolized_crash_stacktrace = result.get_stacktrace(symbolized=False)
        symbolized_crash_stacktrace = result.get_stacktrace(symbolized=True)
        crash_stacktrace_output = utils.get_crash_stacktrace_output(
            command, symbolized_crash_stacktrace,
            unsymbolized_crash_stacktrace)
    else:
        status = data_types.TestcaseVariantStatus.UNREPRODUCIBLE
        is_similar = False
        crash_type = None
        crash_state = None
        security_flag = False
        crash_stacktrace_output = "No crash occurred."

    if original_job_type == job_type:
        # This case happens when someone clicks 'Update last tested stacktrace using
        # trunk build' button.
        testcase = data_handler.get_testcase_by_id(testcase_id)
        testcase.last_tested_crash_stacktrace = data_handler.filter_stacktrace(
            crash_stacktrace_output)
        testcase.set_metadata("last_tested_crash_revision",
                              revision,
                              update_testcase=True)
    else:
        # Regular case of variant analysis.
        variant = data_handler.get_testcase_variant(testcase_id, job_type)
        variant.status = status
        variant.revision = revision
        variant.crash_type = crash_type
        variant.crash_state = crash_state
        variant.security_flag = security_flag
        variant.is_similar = is_similar
        # Explicitly skipping crash stacktrace for now as it make entities larger
        # and we plan to use only crash paramaters in UI.
        variant.put()
def execute_task(testcase_id, job_type):
  """Execute a symbolize command."""
  # Locate the testcase associated with the id.
  testcase = data_handler.get_testcase_by_id(testcase_id)

  # We should atleast have a symbolized debug or release build.
  if not build_manager.has_symbolized_builds():
    return

  data_handler.update_testcase_comment(testcase, data_types.TaskState.STARTED)

  # Setup testcase and its dependencies.
  file_list, _, testcase_file_path = setup.setup_testcase(testcase, job_type)
  if not file_list:
    return

  # Initialize variables.
  build_fail_wait = environment.get_value("FAIL_WAIT")

  old_crash_stacktrace = data_handler.get_stacktrace(testcase)
  sym_crash_type = testcase.crash_type
  sym_crash_address = testcase.crash_address
  sym_crash_state = testcase.crash_state
  sym_redzone = DEFAULT_REDZONE
  warmup_timeout = environment.get_value("WARMUP_TIMEOUT")

  # Decide which build revision to use.
  if testcase.crash_stacktrace == "Pending":
    # This usually happen when someone clicked the 'Update stacktrace from
    # trunk' button on the testcase details page. In this case, we are forced
    # to use trunk. No revision -> trunk build.
    build_revision = None
  else:
    build_revision = testcase.crash_revision

  # Set up a custom or regular build based on revision.
  build_manager.setup_build(build_revision)

  # Get crash revision used in setting up build.
  crash_revision = environment.get_value("APP_REVISION")

  if not build_manager.check_app_path():
    testcase = data_handler.get_testcase_by_id(testcase_id)
    data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,
                                         "Build setup failed")
    tasks.add_task(
        "symbolize", testcase_id, job_type, wait_time=build_fail_wait)
    return

  # ASAN tool settings (if the tool is used).
  # See if we can get better stacks with higher redzone sizes.
  # A UAF might actually turn out to be OOB read/write with a bigger redzone.
  if environment.tool_matches("ASAN", job_type) and testcase.security_flag:
    redzone = MAX_REDZONE
    while redzone >= MIN_REDZONE:
      environment.reset_current_memory_tool_options(
          redzone_size=testcase.redzone, disable_ubsan=testcase.disable_ubsan)

      process_handler.terminate_stale_application_instances()
      command = testcase_manager.get_command_line_for_application(
          testcase_file_path, needs_http=testcase.http_flag)
      return_code, crash_time, output = process_handler.run_process(
          command, timeout=warmup_timeout, gestures=testcase.gestures)
      crash_result = CrashResult(return_code, crash_time, output)

      if crash_result.is_crash() and "AddressSanitizer" in output:
        state = crash_result.get_symbolized_data()
        security_flag = crash_result.is_security_issue()

        if (not crash_analyzer.ignore_stacktrace(state.crash_stacktrace) and
            security_flag == testcase.security_flag and
            state.crash_type == testcase.crash_type and
            (state.crash_type != sym_crash_type or
             state.crash_state != sym_crash_state)):
          logs.log("Changing crash parameters.\nOld : %s, %s, %s" %
                   (sym_crash_type, sym_crash_address, sym_crash_state))

          sym_crash_type = state.crash_type
          sym_crash_address = state.crash_address
          sym_crash_state = state.crash_state
          sym_redzone = redzone
          old_crash_stacktrace = state.crash_stacktrace

          logs.log("\nNew : %s, %s, %s" %
                   (sym_crash_type, sym_crash_address, sym_crash_state))
          break

      redzone /= 2

  # We should have atleast a symbolized debug or a release build.
  symbolized_builds = build_manager.setup_symbolized_builds(crash_revision)
  if not symbolized_builds or (
      not build_manager.check_app_path() and
      not build_manager.check_app_path("APP_PATH_DEBUG")):
    testcase = data_handler.get_testcase_by_id(testcase_id)
    data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,
                                         "Build setup failed")
    tasks.add_task(
        "symbolize", testcase_id, job_type, wait_time=build_fail_wait)
    return

  # Increase malloc_context_size to get all stack frames. Default is 30.
  environment.reset_current_memory_tool_options(
      redzone_size=sym_redzone,
      malloc_context_size=STACK_FRAME_COUNT,
      symbolize_inline_frames=True,
      disable_ubsan=testcase.disable_ubsan,
  )

  # TSAN tool settings (if the tool is used).
  if environment.tool_matches("TSAN", job_type):
    environment.set_tsan_max_history_size()

  # Do the symbolization if supported by this application.
  result, sym_crash_stacktrace = get_symbolized_stacktraces(
      testcase_file_path, testcase, old_crash_stacktrace, sym_crash_state)

  # Update crash parameters.
  testcase = data_handler.get_testcase_by_id(testcase_id)
  testcase.crash_type = sym_crash_type
  testcase.crash_address = sym_crash_address
  testcase.crash_state = sym_crash_state
  testcase.crash_stacktrace = data_handler.filter_stacktrace(
      sym_crash_stacktrace)

  if not result:
    data_handler.update_testcase_comment(
        testcase,
        data_types.TaskState.ERROR,
        "Unable to reproduce crash, skipping "
        "stacktrace update",
    )
  else:
    # Switch build url to use the less-optimized symbolized build with better
    # stacktrace.
    build_url = environment.get_value("BUILD_URL")
    if build_url:
      testcase.set_metadata("build_url", build_url, update_testcase=False)

    data_handler.update_testcase_comment(testcase,
                                         data_types.TaskState.FINISHED)

  testcase.symbolized = True
  testcase.crash_revision = crash_revision
  testcase.put()

  # We might have updated the crash state. See if we need to marked as duplicate
  # based on other testcases.
  data_handler.handle_duplicate_entry(testcase)

  task_creation.create_blame_task_if_needed(testcase)

  # Switch current directory before builds cleanup.
  root_directory = environment.get_value("ROOT_DIR")
  os.chdir(root_directory)

  # Cleanup symbolized builds which are space-heavy.
  symbolized_builds.delete()
Example #26
0
def execute_task(testcase_id, job_type):
    """Run a test case with a different job type to see if they reproduce."""
    testcase = data_handler.get_testcase_by_id(testcase_id)
    if not testcase:
        return

    if (environment.is_engine_fuzzer_job(testcase.job_type) !=
            environment.is_engine_fuzzer_job(job_type)):
        # We should never reach here. But in case we do, we should bail out as
        # otherwise we will run into exceptions.
        return

    # Setup testcase and its dependencies.
    fuzzer_override = builtin_fuzzers.get_fuzzer_for_job(job_type)
    file_list, _, testcase_file_path = setup.setup_testcase(
        testcase, fuzzer_override=fuzzer_override)
    if not file_list:
        return

    # Set up a custom or regular build. We explicitly omit the crash revision
    # since we want to test against the latest build here.
    build_manager.setup_build()

    # Check if we have an application path. If not, our build failed to setup
    # correctly.
    app_path = environment.get_value('APP_PATH')
    if not app_path:
        testcase = data_handler.get_testcase_by_id(testcase_id)
        data_handler.update_testcase_comment(
            testcase, data_types.TaskState.ERROR,
            'Build setup failed with job: ' + job_type)
        return

    # Reproduce the crash.
    command = testcase_manager.get_command_line_for_application(
        testcase_file_path, app_path=app_path, needs_http=testcase.http_flag)
    test_timeout = environment.get_value('TEST_TIMEOUT', 10)
    revision = environment.get_value('APP_REVISION')
    result = testcase_manager.test_for_crash_with_retries(
        testcase,
        testcase_file_path,
        test_timeout,
        http_flag=testcase.http_flag,
        compare_crash=False)

    if result.is_crash() and not result.should_ignore():
        crash_state = result.get_state()
        crash_type = result.get_type()
        security_flag = result.is_security_issue()

        one_time_crasher_flag = not testcase_manager.test_for_reproducibility(
            testcase_file_path, crash_state, security_flag, test_timeout,
            testcase.http_flag, testcase.gestures)
        if one_time_crasher_flag:
            status = data_types.TestcaseVariantStatus.FLAKY
        else:
            status = data_types.TestcaseVariantStatus.REPRODUCIBLE

        crash_comparer = CrashComparer(crash_state, testcase.crash_state)
        is_similar = (crash_comparer.is_similar()
                      and security_flag == testcase.security_flag)

        unsymbolized_crash_stacktrace = result.get_stacktrace(symbolized=False)
        symbolized_crash_stacktrace = result.get_stacktrace(symbolized=True)
        crash_stacktrace_output = utils.get_crash_stacktrace_output(
            command, symbolized_crash_stacktrace,
            unsymbolized_crash_stacktrace)
    else:
        status = data_types.TestcaseVariantStatus.UNREPRODUCIBLE
        is_similar = False
        crash_type = None
        crash_state = None
        security_flag = False
        crash_stacktrace_output = 'No crash occurred.'

    testcase = data_handler.get_testcase_by_id(testcase_id)
    if testcase.job_type == job_type:
        # This case happens when someone clicks 'Update last tested stacktrace using
        # trunk build' button.
        testcase.last_tested_crash_stacktrace = (
            data_handler.filter_stacktrace(crash_stacktrace_output))
        testcase.set_metadata('last_tested_crash_revision',
                              revision,
                              update_testcase=False)
    else:
        # Regular case of variant analysis.
        variant = data_handler.get_testcase_variant(testcase_id, job_type)
        variant.status = status
        variant.revision = revision
        variant.crash_type = crash_type
        variant.crash_state = crash_state
        variant.security_flag = security_flag
        variant.is_similar = is_similar
        # Explicitly skipping crash stacktrace for now as it make entities larger
        # and we plan to use only crash paramaters in UI.
        variant.put()
Example #27
0
def setup_testcase(testcase):
    """Sets up the testcase and needed dependencies like fuzzer,
  data bundle, etc."""
    fuzzer_name = testcase.fuzzer_name
    job_type = testcase.job_type
    task_name = environment.get_value('TASK_NAME')
    testcase_fail_wait = environment.get_value('FAIL_WAIT')
    testcase_id = testcase.key.id()

    # Clear testcase directories.
    shell.clear_testcase_directories()

    # Setup memory debugging tool environment.
    environment.reset_current_memory_tool_options(
        redzone_size=testcase.redzone)

    # Adjust the test timeout value if this is coming from an user uploaded
    # testcase.
    _set_timeout_value_from_user_upload(testcase_id)

    if task_name == 'minimize':
        # Allow minimizing with a different fuzzer set up.
        minimize_fuzzer_override = environment.get_value(
            'MINIMIZE_FUZZER_OVERRIDE')
        fuzzer_name = minimize_fuzzer_override or fuzzer_name

    # Update the fuzzer if necessary in order to get the updated data bundle.
    if fuzzer_name:
        try:
            update_successful = update_fuzzer_and_data_bundles(fuzzer_name)
        except errors.InvalidFuzzerError:
            # Close testcase and don't recreate tasks if this fuzzer is invalid.
            testcase.open = False
            testcase.fixed = 'NA'
            testcase.set_metadata('fuzzer_was_deleted', True)
            logs.log_error('Closed testcase %d with invalid fuzzer %s.' %
                           (testcase_id, fuzzer_name))

            error_message = 'Fuzzer %s no longer exists.' % fuzzer_name
            data_handler.update_testcase_comment(testcase,
                                                 data_types.TaskState.ERROR,
                                                 error_message)
            return None, None, None

        if not update_successful:
            error_message = 'Unable to setup fuzzer %s.' % fuzzer_name
            data_handler.update_testcase_comment(testcase,
                                                 data_types.TaskState.ERROR,
                                                 error_message)
            tasks.add_task(task_name,
                           testcase_id,
                           job_type,
                           wait_time=testcase_fail_wait)
            return None, None, None

    # Extract the testcase and any of its resources to the input directory.
    file_list, input_directory, testcase_file_path = unpack_testcase(testcase)
    if not file_list:
        error_message = 'Unable to setup testcase %s.' % testcase_file_path
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.ERROR,
                                             error_message)
        tasks.add_task(task_name,
                       testcase_id,
                       job_type,
                       wait_time=testcase_fail_wait)
        return None, None, None

    # For Android/Fuchsia, we need to sync our local testcases directory with the
    # one on the device.
    if environment.platform() == 'ANDROID':
        _copy_testcase_to_device_and_setup_environment(testcase,
                                                       testcase_file_path)

    if environment.platform() == 'FUCHSIA':
        fuchsia.device.copy_testcase_to_device(testcase_file_path)

    # Push testcases to worker.
    if environment.is_trusted_host():
        from bot.untrusted_runner import file_host
        file_host.push_testcases_to_worker()

    # Copy global blacklist into local blacklist.
    is_lsan_enabled = environment.get_value('LSAN')
    if is_lsan_enabled:
        # Get local blacklist without this testcase's entry.
        leak_blacklist.copy_global_to_local_blacklist(
            excluded_testcase=testcase)

    # Setup environment variable for windows size and location properties.
    # Explicitly use empty string to indicate use of default window properties.
    if hasattr(testcase, 'window_argument'):
        environment.set_value('WINDOW_ARG', testcase.window_argument)

    # Adjust timeout based on the stored multiplier (if available).
    if hasattr(testcase, 'timeout_multiplier') and testcase.timeout_multiplier:
        test_timeout = environment.get_value('TEST_TIMEOUT')
        environment.set_value('TEST_TIMEOUT',
                              int(test_timeout * testcase.timeout_multiplier))

    # Override APP_ARGS with minimized arguments (if available).
    if (hasattr(testcase, 'minimized_arguments')
            and testcase.minimized_arguments):
        environment.set_value('APP_ARGS', testcase.minimized_arguments)

    # Add FUZZ_TARGET to environment if this is a fuzz target testcase.
    fuzz_target = testcase.get_metadata('fuzzer_binary_name')
    if fuzz_target:
        environment.set_value('FUZZ_TARGET', fuzz_target)

    return file_list, input_directory, testcase_file_path