Пример #1
0
def finalize_testcase(testcase_id,
                      command,
                      last_crash_result,
                      flaky_stack=False):
  """Perform final updates on a test case and prepare it for other tasks."""
  # Symbolize crash output if we have it.
  testcase = data_handler.get_testcase_by_id(testcase_id)
  if last_crash_result:
    _update_crash_result(testcase, last_crash_result, command)

  testcase.delete_metadata('redo_minimize', update_testcase=False)

  # Update remaining test case information.
  testcase.flaky_stack = flaky_stack
  if build_manager.is_custom_binary():
    testcase.set_impacts_as_na()
    testcase.regression = 'NA'
  data_handler.update_testcase_comment(testcase, data_types.TaskState.FINISHED)

  # We might have updated the crash state. See if we need to marked as duplicate
  # based on other testcases.
  data_handler.handle_duplicate_entry(testcase)

  create_additional_tasks(testcase)
Пример #2
0
    def test_similar_testcase_with_issue_closed_with_ignore_label(self):
        """Tests result is true when there is a similar testcase with closed issue
    blacklisted with ignore label."""
        self.issue.status = 'WontFix'
        self.issue._monorail_issue.open = False
        self.issue.labels.add('ClusterFuzz-Ignore')
        self.issue.save()

        similar_testcase = test_utils.create_generic_testcase()
        similar_testcase.one_time_crasher_flag = False
        similar_testcase.open = False
        similar_testcase.bug_information = str(self.issue.id)
        similar_testcase.put()

        self.assertEqual(
            True,
            triage._check_and_update_similar_bug(self.testcase,
                                                 self.issue_tracker))

        testcase = data_handler.get_testcase_by_id(self.testcase.key.id())
        self.assertEqual(
            'Skipping filing a bug since similar testcase (2) in issue (1) '
            'is blacklisted with ClusterFuzz-Ignore label.',
            testcase.get_metadata(triage.TRIAGE_MESSAGE_KEY))
Пример #3
0
  def get(self):
    """Handle a GET request."""
    # pylint: disable=unexpected-keyword-arg

    # Memoize all project and job names.
    _ = data_handler.get_all_project_names(__memoize_force__=True)
    _ = data_handler.get_all_job_type_names(__memoize_force__=True)

    # Memoize both variants of get_all_fuzzer_names_including_children.
    _ = data_handler.get_all_fuzzer_names_including_children(
        include_parents=True, __memoize_force__=True)
    _ = data_handler.get_all_fuzzer_names_including_children(
        __memoize_force__=True)

    # Memoize expensive testcase attribute calls.
    for testcase_id in data_handler.get_open_testcase_id_iterator():
      try:
        testcase = data_handler.get_testcase_by_id(testcase_id)
      except errors.InvalidTestcaseError:
        # Already deleted.
        continue

      blobs.get_blob_size(testcase.fuzzed_keys)
      blobs.get_blob_size(testcase.minimized_keys)
Пример #4
0
    def test_reproducible_no_regression_range(self):
        """Test reproducible with no regression range."""
        testcase = test_utils.create_generic_testcase()
        testcase.one_time_crasher_flag = False
        testcase.regression = ''
        testcase.put()

        expected_result = {
            'result': {
                'error_message':
                'No regression range, wait for regression task to finish.',
                'feedback_url': '',
                'found': False,
                'project': '',
                'suspected_components': '',
                'suspected_cls': '',
            }
        }

        self.assertIsNone(blame_task._prepare_predator_message(testcase))

        testcase = data_handler.get_testcase_by_id(testcase.key.id())
        actual_result = testcase.get_metadata('predator_result')
        self.assertEqual(actual_result, expected_result)
Пример #5
0
    def test_unminimized(self):
        """Test that unminimized testcase is not processed for grouping."""
        self.testcases[0].security_flag = True
        self.testcases[0].crash_state = 'abc\ndef'
        self.testcases[0].crash_type = 'Heap-buffer-overflow\nREAD {*}'
        self.testcases[0].minimized_keys = None
        self.testcases[1].security_flag = True
        self.testcases[1].crash_state = 'abc\ndef'
        self.testcases[1].crash_type = 'Heap-buffer-overflow\nREAD 3'

        for t in self.testcases:
            t.put()

        grouper.group_testcases()

        testcases = []
        for testcase_id in data_handler.get_open_testcase_id_iterator():
            testcases.append(data_handler.get_testcase_by_id(testcase_id))

        self.assertEqual(len(testcases), 2)
        self.assertEqual(testcases[0].group_id, 0)
        self.assertFalse(testcases[0].is_leader)
        self.assertEqual(testcases[1].group_id, 0)
        self.assertTrue(testcases[1].is_leader)
Пример #6
0
    def test_same_unique_crash_type_with_same_state(self):
        """Test that the crashes with same unique crash type and same state get
    de-duplicated with one of them removed.."""
        self.testcases[0].security_flag = False
        self.testcases[0].crash_type = 'Timeout'
        self.testcases[0].crash_state = 'abcde'
        self.testcases[0].timestamp = datetime.datetime.utcfromtimestamp(0)
        self.testcases[1].security_flag = False
        self.testcases[1].crash_type = 'Timeout'
        self.testcases[1].crash_state = 'abcde'
        self.testcases[1].timestamp = datetime.datetime.utcfromtimestamp(1)

        for t in self.testcases:
            t.put()

        grouper.group_testcases()

        testcases = []
        for testcase_id in data_handler.get_open_testcase_id_iterator():
            testcases.append(data_handler.get_testcase_by_id(testcase_id))

        self.assertEqual(len(testcases), 1)
        self.assertEqual(testcases[0].group_id, 0)
        self.assertTrue(testcases[0].is_leader)
Пример #7
0
def _testcase_reproduces_in_revision(testcase, testcase_file_path, job_type,
                                     revision):
    """Test to see if a test case reproduces in the specified revision."""
    build_manager.setup_regular_build(revision)
    app_path = environment.get_value('APP_PATH')
    if not app_path:
        raise errors.BuildSetupError(revision, job_type)

    if testcase_manager.check_for_bad_build(job_type, revision):
        log_message = 'Bad build at r%d. Skipping' % revision
        testcase = data_handler.get_testcase_by_id(testcase.key.id())
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.WIP,
                                             log_message)
        raise errors.BadBuildError(revision, job_type)

    test_timeout = environment.get_value('TEST_TIMEOUT', 10)
    result = testcase_manager.test_for_crash_with_retries(
        testcase,
        testcase_file_path,
        test_timeout,
        http_flag=testcase.http_flag)
    _log_output(revision, result)
    return result
Пример #8
0
    def do_post(self):
        """Upload a testcase."""
        testcase_id = self.request.get('testcaseId')
        uploaded_file = self.get_upload()
        if testcase_id and not uploaded_file:
            testcase = helpers.get_testcase(testcase_id)
            if not access.can_user_access_testcase(testcase):
                raise helpers.AccessDeniedException()

            # Use minimized testcase for upload (if available).
            key = (testcase.minimized_keys if testcase.minimized_keys
                   and testcase.minimized_keys != 'NA' else
                   testcase.fuzzed_keys)

            uploaded_file = blobs.get_blob_info(key)

            # Extract filename part from blob.
            uploaded_file.filename = os.path.basename(
                uploaded_file.filename.replace('\\', os.sep))

        job_type = self.request.get('job')
        if not job_type:
            raise helpers.EarlyExitException('Missing job name.', 400)

        if (not data_types.Job.VALID_NAME_REGEX.match(job_type)
                or not data_types.Job.query(
                    data_types.Job.name == job_type).get()):
            raise helpers.EarlyExitException('Invalid job name.', 400)

        fuzzer_name = ''
        job_type_lowercase = job_type.lower()
        if 'libfuzzer' in job_type_lowercase:
            fuzzer_name = 'libFuzzer'
        elif 'afl' in job_type_lowercase:
            fuzzer_name = 'afl'

        target_name = self.request.get('target')
        if not fuzzer_name and target_name:
            raise helpers.EarlyExitException(
                'Target name is not applicable to non-engine jobs (AFL, libFuzzer).',
                400)

        if fuzzer_name and not target_name:
            raise helpers.EarlyExitException(
                'Missing target name for engine job (AFL, libFuzzer).', 400)

        if (target_name
                and not data_types.Fuzzer.VALID_NAME_REGEX.match(target_name)):
            raise helpers.EarlyExitException('Invalid target name.', 400)

        fully_qualified_fuzzer_name = ''
        if fuzzer_name and target_name:
            fully_qualified_fuzzer_name, target_name = find_fuzz_target(
                fuzzer_name, target_name, job_type)
            if not fully_qualified_fuzzer_name:
                raise helpers.EarlyExitException('Target does not exist.', 400)

        if not access.has_access(need_privileged_access=False,
                                 job_type=job_type,
                                 fuzzer_name=(fully_qualified_fuzzer_name
                                              or fuzzer_name)):
            raise helpers.AccessDeniedException()

        multiple_testcases = bool(self.request.get('multiple'))
        http_flag = bool(self.request.get('http'))
        high_end_job = bool(self.request.get('highEnd'))
        bug_information = self.request.get('issue')
        crash_revision = self.request.get('revision')
        timeout = self.request.get('timeout')
        retries = self.request.get('retries')
        bug_summary_update_flag = bool(self.request.get('updateIssue'))
        additional_arguments = self.request.get('args')
        app_launch_command = self.request.get('cmd')
        platform_id = self.request.get('platform')

        testcase_metadata = self.request.get('metadata')
        if testcase_metadata:
            try:
                testcase_metadata = json.loads(testcase_metadata)
                if not isinstance(testcase_metadata, dict):
                    raise helpers.EarlyExitException(
                        'Metadata is not a JSON object.', 400)
            except Exception:
                raise helpers.EarlyExitException('Invalid metadata JSON.', 400)

        archive_state = 0
        bundled = False
        file_path_input = ''
        email = helpers.get_user_email()

        # If we have a AFL or libFuzzer target, use that for arguments.
        # Launch command looks like
        # python launcher.py {testcase_path} {target_name}
        if target_name:
            additional_arguments = '%%TESTCASE%% %s' % target_name

        # Certain modifications such as app launch command, issue updates are only
        # allowed for privileged users.
        privileged_user = access.has_access(need_privileged_access=True)
        if not privileged_user:
            if bug_information or bug_summary_update_flag:
                raise helpers.EarlyExitException(
                    'You are not privileged to update existing issues.', 400)

            need_privileged_access = utils.string_is_true(
                data_handler.get_value_from_job_definition(
                    job_type, 'PRIVILEGED_ACCESS'))
            if need_privileged_access:
                raise helpers.EarlyExitException(
                    'You are not privileged to run this job type.', 400)

            if app_launch_command:
                raise helpers.EarlyExitException(
                    'You are not privileged to run arbitary launch commands.',
                    400)

            if testcase_metadata:
                raise helpers.EarlyExitException(
                    'You are not privileged to set testcase metadata.', 400)

        if crash_revision and crash_revision.isdigit():
            crash_revision = int(crash_revision)
        else:
            crash_revision = 0

        if bug_information and not bug_information.isdigit():
            raise helpers.EarlyExitException('Bug is not a number.', 400)

        if not timeout:
            timeout = 0
        elif not timeout.isdigit() or timeout == '0':
            raise helpers.EarlyExitException(
                'Testcase timeout must be a number greater than 0.', 400)
        else:
            timeout = int(timeout)
            if timeout > 120:
                raise helpers.EarlyExitException(
                    'Testcase timeout may not be greater than 120 seconds.',
                    400)

        if retries:
            if retries.isdigit():
                retries = int(retries)
            else:
                retries = None

            if retries is None or retries > MAX_RETRIES:
                raise helpers.EarlyExitException(
                    'Testcase retries must be a number less than %d.' %
                    MAX_RETRIES, 400)
        else:
            retries = None

        try:
            gestures = ast.literal_eval(self.request.get('gestures'))
        except:
            gestures = []
        if not gestures:
            gestures = []

        job_queue = tasks.queue_for_job(job_type, is_high_end=high_end_job)

        if uploaded_file is not None:
            filename = ''.join([
                x for x in uploaded_file.filename
                if x not in ' ;/?:@&=+$,{}|<>()\\'
            ])
            key = str(uploaded_file.key())
            if archive.is_archive(filename):
                archive_state = data_types.ArchiveStatus.FUZZED
            if archive_state:
                if multiple_testcases:
                    if testcase_metadata:
                        raise helpers.EarlyExitException(
                            'Testcase metadata not supported with multiple testcases.',
                            400)
                    # Create a job to unpack an archive.
                    metadata = data_types.BundledArchiveMetadata()
                    metadata.blobstore_key = key
                    metadata.timeout = timeout
                    metadata.job_queue = job_queue
                    metadata.job_type = job_type
                    metadata.http_flag = http_flag
                    metadata.archive_filename = filename
                    metadata.uploader_email = email
                    metadata.gestures = gestures
                    metadata.crash_revision = crash_revision
                    metadata.additional_arguments = additional_arguments
                    metadata.bug_information = bug_information
                    metadata.platform_id = platform_id
                    metadata.app_launch_command = app_launch_command
                    metadata.fuzzer_name = fuzzer_name
                    metadata.overridden_fuzzer_name = fully_qualified_fuzzer_name
                    metadata.fuzzer_binary_name = target_name
                    metadata.put()

                    tasks.add_task('unpack',
                                   str(metadata.key.id()),
                                   job_type,
                                   queue=tasks.queue_for_job(job_type))

                    # Create a testcase metadata object to show the user their upload.
                    upload_metadata = data_types.TestcaseUploadMetadata()
                    upload_metadata.timestamp = datetime.datetime.utcnow()
                    upload_metadata.filename = filename
                    upload_metadata.blobstore_key = key
                    upload_metadata.original_blobstore_key = key
                    upload_metadata.status = 'Pending'
                    upload_metadata.bundled = True
                    upload_metadata.uploader_email = email
                    upload_metadata.retries = retries
                    upload_metadata.bug_summary_update_flag = bug_summary_update_flag
                    upload_metadata.put()

                    helpers.log('Uploaded multiple testcases.',
                                helpers.VIEW_OPERATION)
                    self.render_json({'multiple': True})
                    return

                file_path_input = guess_input_file(uploaded_file, filename)
                if not file_path_input:
                    raise helpers.EarlyExitException((
                        "Unable to detect which file to launch. The main file\'s name "
                        'must contain either of %s.' % str(RUN_FILE_PATTERNS)),
                                                     400)

        else:
            raise helpers.EarlyExitException('Please select a file to upload.',
                                             400)

        testcase_id = data_handler.create_user_uploaded_testcase(
            key,
            key,
            archive_state,
            filename,
            file_path_input,
            timeout,
            job_type,
            job_queue,
            http_flag,
            gestures,
            additional_arguments,
            bug_information,
            crash_revision,
            email,
            platform_id,
            app_launch_command,
            fuzzer_name,
            fully_qualified_fuzzer_name,
            target_name,
            bundled,
            retries,
            bug_summary_update_flag,
            additional_metadata=testcase_metadata)

        testcase = data_handler.get_testcase_by_id(testcase_id)
        issue = issue_tracker_utils.get_issue_for_testcase(testcase)
        if issue:
            report_url = data_handler.TESTCASE_REPORT_URL.format(
                domain=data_handler.get_domain(), testcase_id=testcase_id)
            comment = ('ClusterFuzz is analyzing your testcase. '
                       'Developers can follow the progress at %s.' %
                       report_url)
            issue.save(new_comment=comment)

        helpers.log('Uploaded testcase %s' % testcase_id,
                    helpers.VIEW_OPERATION)
        self.render_json({'id': '%s' % testcase_id})
Пример #9
0
def cleanup_testcases_and_issues():
  """Clean up unneeded open testcases and their associated issues."""
  jobs = data_handler.get_all_job_type_names()
  testcase_keys = ndb_utils.get_all_from_query(
      data_types.Testcase.query(
          ndb_utils.is_false(data_types.Testcase.triaged)),
      keys_only=True)
  top_crashes_by_project_and_platform_map = (
      get_top_crashes_for_all_projects_and_platforms())

  utils.python_gc()

  testcases_processed = 0
  empty_issue_tracker_policy = issue_tracker_policy.get_empty()
  for testcase_key in testcase_keys:
    testcase_id = testcase_key.id()
    try:
      testcase = data_handler.get_testcase_by_id(testcase_id)
    except errors.InvalidTestcaseError:
      # Already deleted.
      continue

    logs.log('Processing testcase %d.' % testcase_id)

    issue = issue_tracker_utils.get_issue_for_testcase(testcase)
    policy = issue_tracker_utils.get_issue_tracker_policy_for_testcase(testcase)
    if not policy:
      policy = empty_issue_tracker_policy

    # Issue updates.
    update_os_labels(policy, testcase, issue)
    update_fuzz_blocker_label(policy, testcase, issue,
                              top_crashes_by_project_and_platform_map)
    update_component_labels(testcase, issue)
    update_issue_ccs_from_owners_file(policy, testcase, issue)
    update_issue_owner_and_ccs_from_predator_results(policy, testcase, issue)
    update_issue_labels_for_flaky_testcase(policy, testcase, issue)

    # Testcase marking rules.
    mark_duplicate_testcase_as_closed_with_no_issue(testcase)
    mark_issue_as_closed_if_testcase_is_fixed(policy, testcase, issue)
    mark_testcase_as_closed_if_issue_is_closed(policy, testcase, issue)
    mark_testcase_as_closed_if_job_is_invalid(testcase, jobs)
    mark_unreproducible_testcase_as_fixed_if_issue_is_closed(testcase, issue)
    mark_unreproducible_testcase_and_issue_as_closed_after_deadline(
        policy, testcase, issue)

    # Notification, to be done at end after testcase state is updated from
    # previous rules.
    notify_closed_issue_if_testcase_is_open(policy, testcase, issue)
    notify_issue_if_testcase_is_invalid(policy, testcase, issue)
    notify_uploader_when_testcase_is_processed(policy, testcase, issue)

    # Mark testcase as triage complete if both testcase and associated issue
    # are closed. This also need to be done before the deletion rules.
    mark_testcase_as_triaged_if_needed(testcase, issue)

    # Testcase deletion rules.
    delete_unreproducible_testcase_with_no_issue(testcase)

    testcases_processed += 1
    if testcases_processed % 100 == 0:
      utils.python_gc()
Пример #10
0
def execute_task(testcase_id, job_type):
  """Execute a symbolize command."""
  # Locate the testcase associated with the id.
  testcase = data_handler.get_testcase_by_id(testcase_id)

  # We should atleast have a symbolized debug or release build.
  if not build_manager.has_symbolized_builds():
    return

  data_handler.update_testcase_comment(testcase, data_types.TaskState.STARTED)

  # Setup testcase and its dependencies.
  file_list, _, testcase_file_path = setup.setup_testcase(testcase, job_type)
  if not file_list:
    return

  # Initialize variables.
  build_fail_wait = environment.get_value("FAIL_WAIT")

  old_crash_stacktrace = data_handler.get_stacktrace(testcase)
  sym_crash_type = testcase.crash_type
  sym_crash_address = testcase.crash_address
  sym_crash_state = testcase.crash_state
  sym_redzone = DEFAULT_REDZONE
  warmup_timeout = environment.get_value("WARMUP_TIMEOUT")

  # Decide which build revision to use.
  if testcase.crash_stacktrace == "Pending":
    # This usually happen when someone clicked the 'Update stacktrace from
    # trunk' button on the testcase details page. In this case, we are forced
    # to use trunk. No revision -> trunk build.
    build_revision = None
  else:
    build_revision = testcase.crash_revision

  # Set up a custom or regular build based on revision.
  build_manager.setup_build(build_revision)

  # Get crash revision used in setting up build.
  crash_revision = environment.get_value("APP_REVISION")

  if not build_manager.check_app_path():
    testcase = data_handler.get_testcase_by_id(testcase_id)
    data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,
                                         "Build setup failed")
    tasks.add_task(
        "symbolize", testcase_id, job_type, wait_time=build_fail_wait)
    return

  # ASAN tool settings (if the tool is used).
  # See if we can get better stacks with higher redzone sizes.
  # A UAF might actually turn out to be OOB read/write with a bigger redzone.
  if environment.tool_matches("ASAN", job_type) and testcase.security_flag:
    redzone = MAX_REDZONE
    while redzone >= MIN_REDZONE:
      environment.reset_current_memory_tool_options(
          redzone_size=testcase.redzone, disable_ubsan=testcase.disable_ubsan)

      process_handler.terminate_stale_application_instances()
      command = testcase_manager.get_command_line_for_application(
          testcase_file_path, needs_http=testcase.http_flag)
      return_code, crash_time, output = process_handler.run_process(
          command, timeout=warmup_timeout, gestures=testcase.gestures)
      crash_result = CrashResult(return_code, crash_time, output)

      if crash_result.is_crash() and "AddressSanitizer" in output:
        state = crash_result.get_symbolized_data()
        security_flag = crash_result.is_security_issue()

        if (not crash_analyzer.ignore_stacktrace(state.crash_stacktrace) and
            security_flag == testcase.security_flag and
            state.crash_type == testcase.crash_type and
            (state.crash_type != sym_crash_type or
             state.crash_state != sym_crash_state)):
          logs.log("Changing crash parameters.\nOld : %s, %s, %s" %
                   (sym_crash_type, sym_crash_address, sym_crash_state))

          sym_crash_type = state.crash_type
          sym_crash_address = state.crash_address
          sym_crash_state = state.crash_state
          sym_redzone = redzone
          old_crash_stacktrace = state.crash_stacktrace

          logs.log("\nNew : %s, %s, %s" %
                   (sym_crash_type, sym_crash_address, sym_crash_state))
          break

      redzone /= 2

  # We should have atleast a symbolized debug or a release build.
  symbolized_builds = build_manager.setup_symbolized_builds(crash_revision)
  if not symbolized_builds or (
      not build_manager.check_app_path() and
      not build_manager.check_app_path("APP_PATH_DEBUG")):
    testcase = data_handler.get_testcase_by_id(testcase_id)
    data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,
                                         "Build setup failed")
    tasks.add_task(
        "symbolize", testcase_id, job_type, wait_time=build_fail_wait)
    return

  # Increase malloc_context_size to get all stack frames. Default is 30.
  environment.reset_current_memory_tool_options(
      redzone_size=sym_redzone,
      malloc_context_size=STACK_FRAME_COUNT,
      symbolize_inline_frames=True,
      disable_ubsan=testcase.disable_ubsan,
  )

  # TSAN tool settings (if the tool is used).
  if environment.tool_matches("TSAN", job_type):
    environment.set_tsan_max_history_size()

  # Do the symbolization if supported by this application.
  result, sym_crash_stacktrace = get_symbolized_stacktraces(
      testcase_file_path, testcase, old_crash_stacktrace, sym_crash_state)

  # Update crash parameters.
  testcase = data_handler.get_testcase_by_id(testcase_id)
  testcase.crash_type = sym_crash_type
  testcase.crash_address = sym_crash_address
  testcase.crash_state = sym_crash_state
  testcase.crash_stacktrace = data_handler.filter_stacktrace(
      sym_crash_stacktrace)

  if not result:
    data_handler.update_testcase_comment(
        testcase,
        data_types.TaskState.ERROR,
        "Unable to reproduce crash, skipping "
        "stacktrace update",
    )
  else:
    # Switch build url to use the less-optimized symbolized build with better
    # stacktrace.
    build_url = environment.get_value("BUILD_URL")
    if build_url:
      testcase.set_metadata("build_url", build_url, update_testcase=False)

    data_handler.update_testcase_comment(testcase,
                                         data_types.TaskState.FINISHED)

  testcase.symbolized = True
  testcase.crash_revision = crash_revision
  testcase.put()

  # We might have updated the crash state. See if we need to marked as duplicate
  # based on other testcases.
  data_handler.handle_duplicate_entry(testcase)

  task_creation.create_blame_task_if_needed(testcase)

  # Switch current directory before builds cleanup.
  root_directory = environment.get_value("ROOT_DIR")
  os.chdir(root_directory)

  # Cleanup symbolized builds which are space-heavy.
  symbolized_builds.delete()
Пример #11
0
def group_testcases():
  """Group testcases based on rules like same bug numbers, similar crash
  states, etc."""
  testcase_map = {}
  cached_issue_map = {}

  for testcase_id in data_handler.get_open_testcase_id_iterator():
    try:
      testcase = data_handler.get_testcase_by_id(testcase_id)
    except errors.InvalidTestcaseError:
      # Already deleted.
      continue

    # Remove duplicates early on to avoid large groups.
    if (not testcase.bug_information and not testcase.uploader_email and
        has_testcase_with_same_params(testcase, testcase_map)):
      logs.log('Deleting duplicate testcase %d.' % testcase_id)
      testcase.key.delete()
      continue

    # Wait for minimization to finish as this might change crash params such
    # as type and may mark it as duplicate / closed.
    if not testcase.minimized_keys:
      continue

    # Store needed testcase attributes into |testcase_map|.
    testcase_map[testcase_id] = TestcaseAttributes()
    testcase_attributes = testcase_map[testcase_id]
    for attribute_name in FORWARDED_ATTRIBUTES:
      setattr(testcase_attributes, attribute_name,
              getattr(testcase, attribute_name))

    # Store original issue mappings in the testcase attributes.
    if testcase.bug_information:
      issue_id = int(testcase.bug_information)
      project_name = testcase.project_name

      if (project_name in cached_issue_map and
          issue_id in cached_issue_map[project_name]):
        testcase_attributes.issue_id = (
            cached_issue_map[project_name][issue_id])
      else:
        issue_tracker = issue_tracker_utils.get_issue_tracker_for_testcase(
            testcase)
        if not issue_tracker:
          continue

        # Determine the original issue id traversing the list of duplicates.
        try:
          issue = issue_tracker.get_original_issue(issue_id)
          original_issue_id = issue.id
        except:
          # If we are unable to access the issue, then we can't determine
          # the original issue id. Assume that it is the same as issue id.
          logs.log_error(
              'Unable to determine original issue for %d.' % issue_id)
          original_issue_id = issue_id

        if project_name not in cached_issue_map:
          cached_issue_map[project_name] = {}
        cached_issue_map[project_name][issue_id] = original_issue_id
        cached_issue_map[project_name][original_issue_id] = original_issue_id
        testcase_attributes.issue_id = original_issue_id

  # No longer needed. Free up some memory.
  cached_issue_map.clear()

  group_testcases_with_similar_states(testcase_map)
  group_testcases_with_same_issues(testcase_map)
  group_leader.choose(testcase_map)

  # TODO(aarya): Replace with an optimized implementation using dirty flag.
  # Update the group mapping in testcase object.
  for testcase_id in data_handler.get_open_testcase_id_iterator():
    if testcase_id not in testcase_map:
      # A new testcase that was just created. Skip for now, will be grouped in
      # next iteration of group task.
      continue

    # If we are part of a group, then calculate the number of testcases in that
    # group and lowest issue id of issues associated with testcases in that
    # group.
    updated_group_id = testcase_map[testcase_id].group_id
    updated_is_leader = testcase_map[testcase_id].is_leader
    updated_group_id_count = 0
    updated_group_bug_information = 0
    if updated_group_id:
      for other_testcase in six.itervalues(testcase_map):
        if other_testcase.group_id != updated_group_id:
          continue
        updated_group_id_count += 1

        # Update group issue id to be lowest issue id in the entire group.
        if other_testcase.issue_id is None:
          continue
        if (not updated_group_bug_information or
            updated_group_bug_information > other_testcase.issue_id):
          updated_group_bug_information = other_testcase.issue_id

    # If this group id is used by only one testcase, then remove it.
    if updated_group_id_count == 1:
      data_handler.delete_group(updated_group_id, update_testcases=False)
      updated_group_id = 0
      updated_group_bug_information = 0
      updated_is_leader = True

    # If this group has more than the maximum allowed testcases, log an error
    # so that the sheriff can later debug what caused this. Usually, this is a
    # bug in grouping logic OR a ever changing crash signature (e.g. slightly
    # different crash types or crash states). We cannot bail out as otherwise,
    # we will not group the testcase leading to a spam of new filed bugs.
    if updated_group_id_count > GROUP_MAX_TESTCASE_LIMIT:
      logs.log_error(
          'Group %d exceeds maximum allowed testcases.' % updated_group_id)

    try:
      testcase = data_handler.get_testcase_by_id(testcase_id)
    except errors.InvalidTestcaseError:
      # Already deleted.
      continue

    is_changed = (
        (testcase.group_id != updated_group_id) or
        (testcase.group_bug_information != updated_group_bug_information) or
        (testcase.is_leader != updated_is_leader))

    if not is_changed:
      # If nothing is changed, no more work to do. It's faster this way.
      continue

    testcase.group_bug_information = updated_group_bug_information
    testcase.group_id = updated_group_id
    testcase.is_leader = updated_is_leader
    testcase.put()
    logs.log(
        'Updated testcase %d group to %d.' % (testcase_id, updated_group_id))
Пример #12
0
    def do_post(self):
        """Upload a testcase."""
        email = helpers.get_user_email()
        testcase_id = self.request.get("testcaseId")
        uploaded_file = self.get_upload()
        if testcase_id and not uploaded_file:
            testcase = helpers.get_testcase(testcase_id)
            if not access.can_user_access_testcase(testcase):
                raise helpers.AccessDeniedException()

            # Use minimized testcase for upload (if available).
            key = (testcase.minimized_keys if testcase.minimized_keys
                   and testcase.minimized_keys != "NA" else
                   testcase.fuzzed_keys)

            uploaded_file = blobs.get_blob_info(key)

            # Extract filename part from blob.
            uploaded_file.filename = os.path.basename(
                uploaded_file.filename.replace("\\", os.sep))

        job_type = self.request.get("job")
        if not job_type:
            raise helpers.EarlyExitException("Missing job name.", 400)

        if (not data_types.Job.VALID_NAME_REGEX.match(job_type)
                or not data_types.Job.query(
                    data_types.Job.name == job_type).get()):
            raise helpers.EarlyExitException("Invalid job name.", 400)

        fuzzer_name = ""
        job_type_lowercase = job_type.lower()
        if "libfuzzer" in job_type_lowercase:
            fuzzer_name = "libFuzzer"
        elif "afl" in job_type_lowercase:
            fuzzer_name = "afl"

        target_name = self.request.get("target")
        if not fuzzer_name and target_name:
            raise helpers.EarlyExitException(
                "Target name is not applicable to non-engine jobs (AFL, libFuzzer).",
                400,
            )

        if fuzzer_name and not target_name:
            raise helpers.EarlyExitException(
                "Missing target name for engine job (AFL, libFuzzer).", 400)

        if target_name and not data_types.Fuzzer.VALID_NAME_REGEX.match(
                target_name):
            raise helpers.EarlyExitException("Invalid target name.", 400)

        fully_qualified_fuzzer_name = ""
        if fuzzer_name and target_name:
            fully_qualified_fuzzer_name, target_name = find_fuzz_target(
                fuzzer_name, target_name, job_type)
            if not fully_qualified_fuzzer_name:
                raise helpers.EarlyExitException("Target does not exist.", 400)

        if not access.has_access(
                need_privileged_access=False,
                job_type=job_type,
                fuzzer_name=(fully_qualified_fuzzer_name or fuzzer_name),
        ) and not _is_uploader_allowed(email):
            raise helpers.AccessDeniedException()

        multiple_testcases = bool(self.request.get("multiple"))
        http_flag = bool(self.request.get("http"))
        high_end_job = bool(self.request.get("highEnd"))
        bug_information = self.request.get("issue")
        crash_revision = self.request.get("revision")
        timeout = self.request.get("timeout")
        retries = self.request.get("retries")
        bug_summary_update_flag = bool(self.request.get("updateIssue"))
        quiet_flag = bool(self.request.get("quiet"))
        additional_arguments = self.request.get("args")
        app_launch_command = self.request.get("cmd")
        platform_id = self.request.get("platform")
        issue_labels = self.request.get("issue_labels")
        gestures = self.request.get("gestures") or "[]"

        testcase_metadata = self.request.get("metadata", {})
        if testcase_metadata:
            try:
                testcase_metadata = json.loads(testcase_metadata)
            except Exception:
                raise helpers.EarlyExitException("Invalid metadata JSON.", 400)
            if not isinstance(testcase_metadata, dict):
                raise helpers.EarlyExitException(
                    "Metadata is not a JSON object.", 400)
        if issue_labels:
            testcase_metadata["issue_labels"] = issue_labels

        try:
            gestures = ast.literal_eval(gestures)
        except Exception:
            raise helpers.EarlyExitException("Failed to parse gestures.", 400)

        archive_state = 0
        bundled = False
        file_path_input = ""

        # Certain modifications such as app launch command, issue updates are only
        # allowed for privileged users.
        privileged_user = access.has_access(need_privileged_access=True)
        if not privileged_user:
            if bug_information or bug_summary_update_flag:
                raise helpers.EarlyExitException(
                    "You are not privileged to update existing issues.", 400)

            need_privileged_access = utils.string_is_true(
                data_handler.get_value_from_job_definition(
                    job_type, "PRIVILEGED_ACCESS"))
            if need_privileged_access:
                raise helpers.EarlyExitException(
                    "You are not privileged to run this job type.", 400)

            if app_launch_command:
                raise helpers.EarlyExitException(
                    "You are not privileged to run arbitrary launch commands.",
                    400)

            if testcase_metadata and not _allow_unprivileged_metadata(
                    testcase_metadata):
                raise helpers.EarlyExitException(
                    "You are not privileged to set testcase metadata.", 400)

            if additional_arguments:
                raise helpers.EarlyExitException(
                    "You are not privileged to add command-line arguments.",
                    400)

            if gestures:
                raise helpers.EarlyExitException(
                    "You are not privileged to run arbitrary gestures.", 400)

        # TODO(aarya): Remove once AFL is migrated to engine pipeline.
        if target_name:
            additional_arguments = "%TESTCASE%"

        if crash_revision and crash_revision.isdigit():
            crash_revision = int(crash_revision)
        else:
            crash_revision = 0

        if bug_information == "0":  # Auto-recover from this bad input.
            bug_information = None
        if bug_information and not bug_information.isdigit():
            raise helpers.EarlyExitException("Bug is not a number.", 400)

        if not timeout:
            timeout = 0
        elif not timeout.isdigit() or timeout == "0":
            raise helpers.EarlyExitException(
                "Testcase timeout must be a number greater than 0.", 400)
        else:
            timeout = int(timeout)
            if timeout > 120:
                raise helpers.EarlyExitException(
                    "Testcase timeout may not be greater than 120 seconds.",
                    400)

        if retries:
            if retries.isdigit():
                retries = int(retries)
            else:
                retries = None

            if retries is None or retries > MAX_RETRIES:
                raise helpers.EarlyExitException(
                    "Testcase retries must be a number less than %d." %
                    MAX_RETRIES, 400)
        else:
            retries = None

        job_queue = tasks.queue_for_job(job_type, is_high_end=high_end_job)

        if uploaded_file is not None:
            filename = "".join(x for x in uploaded_file.filename
                               if x not in " ;/?:@&=+$,{}|<>()\\")
            key = str(uploaded_file.key())
            if archive.is_archive(filename):
                archive_state = data_types.ArchiveStatus.FUZZED
            if archive_state:
                if multiple_testcases:
                    # Create a job to unpack an archive.
                    metadata = data_types.BundledArchiveMetadata()
                    metadata.blobstore_key = key
                    metadata.timeout = timeout
                    metadata.job_queue = job_queue
                    metadata.job_type = job_type
                    metadata.http_flag = http_flag
                    metadata.archive_filename = filename
                    metadata.uploader_email = email
                    metadata.gestures = gestures
                    metadata.crash_revision = crash_revision
                    metadata.additional_arguments = additional_arguments
                    metadata.bug_information = bug_information
                    metadata.platform_id = platform_id
                    metadata.app_launch_command = app_launch_command
                    metadata.fuzzer_name = fuzzer_name
                    metadata.overridden_fuzzer_name = fully_qualified_fuzzer_name
                    metadata.fuzzer_binary_name = target_name
                    metadata.put()

                    tasks.add_task(
                        "unpack",
                        str(metadata.key.id()),
                        job_type,
                        queue=tasks.queue_for_job(job_type),
                    )

                    # Create a testcase metadata object to show the user their upload.
                    upload_metadata = data_types.TestcaseUploadMetadata()
                    upload_metadata.timestamp = datetime.datetime.utcnow()
                    upload_metadata.filename = filename
                    upload_metadata.blobstore_key = key
                    upload_metadata.original_blobstore_key = key
                    upload_metadata.status = "Pending"
                    upload_metadata.bundled = True
                    upload_metadata.uploader_email = email
                    upload_metadata.retries = retries
                    upload_metadata.bug_summary_update_flag = bug_summary_update_flag
                    upload_metadata.quiet_flag = quiet_flag
                    upload_metadata.additional_metadata_string = json.dumps(
                        testcase_metadata)
                    upload_metadata.put()

                    helpers.log("Uploaded multiple testcases.",
                                helpers.VIEW_OPERATION)
                    self.render_json({"multiple": True})
                    return

                file_path_input = guess_input_file(uploaded_file, filename)
                if not file_path_input:
                    raise helpers.EarlyExitException(
                        ("Unable to detect which file to launch. The main file's name "
                         "must contain either of %s." %
                         str(RUN_FILE_PATTERNS)),
                        400,
                    )

        else:
            raise helpers.EarlyExitException("Please select a file to upload.",
                                             400)

        testcase_id = data_handler.create_user_uploaded_testcase(
            key,
            key,
            archive_state,
            filename,
            file_path_input,
            timeout,
            job_type,
            job_queue,
            http_flag,
            gestures,
            additional_arguments,
            bug_information,
            crash_revision,
            email,
            platform_id,
            app_launch_command,
            fuzzer_name,
            fully_qualified_fuzzer_name,
            target_name,
            bundled,
            retries,
            bug_summary_update_flag,
            quiet_flag,
            additional_metadata=testcase_metadata,
        )

        if not quiet_flag:
            testcase = data_handler.get_testcase_by_id(testcase_id)
            issue = issue_tracker_utils.get_issue_for_testcase(testcase)
            if issue:
                report_url = data_handler.TESTCASE_REPORT_URL.format(
                    domain=data_handler.get_domain(), testcase_id=testcase_id)

                comment = ("ClusterFuzz is analyzing your testcase. "
                           "Developers can follow the progress at %s." %
                           report_url)
                issue.save(new_comment=comment)

        helpers.log("Uploaded testcase %s" % testcase_id,
                    helpers.VIEW_OPERATION)
        self.render_json({"id": "%s" % testcase_id})
Пример #13
0
def _process_corpus_crashes(context, result):
    """Process crashes found in the corpus."""
    # Default Testcase entity values.
    crash_revision = result.revision
    job_type = environment.get_value("JOB_NAME")
    minimized_arguments = "%TESTCASE% " + context.fuzz_target.binary
    project_name = data_handler.get_project_name(job_type)

    comment = "Fuzzer %s generated corpus testcase crashed (r%s)" % (
        context.fuzz_target.project_qualified_name(),
        crash_revision,
    )

    # Generate crash reports.
    for crash in result.crashes:
        existing_testcase = data_handler.find_testcase(project_name,
                                                       crash.crash_type,
                                                       crash.crash_state,
                                                       crash.security_flag)
        if existing_testcase:
            continue

        # Upload/store testcase.
        if environment.is_trusted_host():
            from bot.untrusted_runner import file_host

            unit_path = os.path.join(context.bad_units_path,
                                     os.path.basename(crash.unit_path))
            # Prevent the worker from escaping out of |context.bad_units_path|.
            if not file_host.is_directory_parent(unit_path,
                                                 context.bad_units_path):
                raise CorpusPruningException("Invalid units path from worker.")

            file_host.copy_file_from_worker(crash.unit_path, unit_path)
        else:
            unit_path = crash.unit_path

        with open(unit_path, "rb") as f:
            key = blobs.write_blob(f)

        # Set the absolute_path property of the Testcase to a file in FUZZ_INPUTS
        # instead of the local quarantine directory.
        absolute_testcase_path = os.path.join(
            environment.get_value("FUZZ_INPUTS"), "testcase")

        testcase_id = data_handler.store_testcase(
            crash=crash,
            fuzzed_keys=key,
            minimized_keys="",
            regression="",
            fixed="",
            one_time_crasher_flag=False,
            crash_revision=crash_revision,
            comment=comment,
            absolute_path=absolute_testcase_path,
            fuzzer_name=context.fuzz_target.engine,
            fully_qualified_fuzzer_name=context.fuzz_target.
            fully_qualified_name(),
            job_type=job_type,
            archived=False,
            archive_filename="",
            binary_flag=True,
            http_flag=False,
            gestures=None,
            redzone=DEFAULT_REDZONE,
            disable_ubsan=False,
            minidump_keys=None,
            window_argument=None,
            timeout_multiplier=1.0,
            minimized_arguments=minimized_arguments,
        )

        # Set fuzzer_binary_name in testcase metadata.
        testcase = data_handler.get_testcase_by_id(testcase_id)
        testcase.set_metadata("fuzzer_binary_name", result.fuzzer_binary_name)

        issue_metadata = engine_common.get_all_issue_metadata_for_testcase(
            testcase)
        if issue_metadata:
            for key, value in issue_metadata.items():
                testcase.set_metadata(key, value, update_testcase=False)

            testcase.put()

        # Create additional tasks for testcase (starting with minimization).
        testcase = data_handler.get_testcase_by_id(testcase_id)
        task_creation.create_tasks(testcase)
Пример #14
0
def execute_task(testcase_id, job_type):
  """Run analyze task."""
  # Reset redzones.
  environment.reset_current_memory_tool_options(redzone_size=128)

  # Unset window location size and position properties so as to use default.
  environment.set_value('WINDOW_ARG', '')

  # Locate the testcase associated with the id.
  testcase = data_handler.get_testcase_by_id(testcase_id)
  if not testcase:
    return

  data_handler.update_testcase_comment(testcase, data_types.TaskState.STARTED)

  metadata = data_types.TestcaseUploadMetadata.query(
      data_types.TestcaseUploadMetadata.testcase_id == int(testcase_id)).get()
  if not metadata:
    logs.log_error(
        'Testcase %s has no associated upload metadata.' % testcase_id)
    testcase.key.delete()
    return

  is_lsan_enabled = environment.get_value('LSAN')
  if is_lsan_enabled:
    # Creates empty local blacklist so all leaks will be visible to uploader.
    leak_blacklist.create_empty_local_blacklist()

  # Store the bot name and timestamp in upload metadata.
  bot_name = environment.get_value('BOT_NAME')
  metadata.bot_name = bot_name
  metadata.timestamp = datetime.datetime.utcnow()
  metadata.put()

  # Adjust the test timeout, if user has provided one.
  if metadata.timeout:
    environment.set_value('TEST_TIMEOUT', metadata.timeout)

  # Adjust the number of retries, if user has provided one.
  if metadata.retries is not None:
    environment.set_value('CRASH_RETRIES', metadata.retries)

  # Setup testcase and get absolute testcase path.
  file_list, _, testcase_file_path = setup.setup_testcase(testcase)
  if not file_list:
    return

  # Set up a custom or regular build based on revision.
  build_manager.setup_build(testcase.crash_revision)

  # Check if we have an application path. If not, our build failed
  # to setup correctly.
  app_path = environment.get_value('APP_PATH')
  if not app_path:
    data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,
                                         'Build setup failed')

    if data_handler.is_first_retry_for_task(testcase):
      build_fail_wait = environment.get_value('FAIL_WAIT')
      tasks.add_task(
          'analyze', testcase_id, job_type, wait_time=build_fail_wait)
    else:
      close_invalid_testcase_and_update_status(testcase, metadata,
                                               'Build setup failed')
    return

  # Update initial testcase information.
  testcase.absolute_path = testcase_file_path
  testcase.job_type = job_type
  testcase.binary_flag = utils.is_binary_file(testcase_file_path)
  testcase.queue = tasks.default_queue()
  testcase.crash_state = ''

  # Set initial testcase metadata fields (e.g. build url, etc).
  data_handler.set_initial_testcase_metadata(testcase)

  # Update minimized arguments and use ones provided during user upload.
  if not testcase.minimized_arguments:
    minimized_arguments = environment.get_value('APP_ARGS') or ''
    additional_command_line_flags = testcase.get_metadata(
        'uploaded_additional_args')
    if additional_command_line_flags:
      minimized_arguments += ' %s' % additional_command_line_flags
    environment.set_value('APP_ARGS', minimized_arguments)
    testcase.minimized_arguments = minimized_arguments

  # Update other fields not set at upload time.
  testcase.crash_revision = environment.get_value('APP_REVISION')
  data_handler.set_initial_testcase_metadata(testcase)
  testcase.put()

  # Initialize some variables.
  gestures = testcase.gestures
  http_flag = testcase.http_flag
  test_timeout = environment.get_value('TEST_TIMEOUT')

  # Get the crash output.
  result = testcase_manager.test_for_crash_with_retries(
      testcase,
      testcase_file_path,
      test_timeout,
      http_flag=http_flag,
      compare_crash=False)

  # If we don't get a crash, try enabling http to see if we can get a crash.
  # Skip engine fuzzer jobs (e.g. libFuzzer, AFL) for which http testcase paths
  # are not applicable.
  if (not result.is_crash() and not http_flag and
      not environment.is_engine_fuzzer_job()):
    result_with_http = testcase_manager.test_for_crash_with_retries(
        testcase,
        testcase_file_path,
        test_timeout,
        http_flag=True,
        compare_crash=False)
    if result_with_http.is_crash():
      logs.log('Testcase needs http flag for crash.')
      http_flag = True
      result = result_with_http

  # Refresh our object.
  testcase = data_handler.get_testcase_by_id(testcase_id)
  if not testcase:
    return

  # Set application command line with the correct http flag.
  application_command_line = (
      testcase_manager.get_command_line_for_application(
          testcase_file_path, needs_http=http_flag))

  # Get the crash data.
  crashed = result.is_crash()
  crash_time = result.get_crash_time()
  state = result.get_symbolized_data()
  unsymbolized_crash_stacktrace = result.get_stacktrace(symbolized=False)

  # Get crash info object with minidump info. Also, re-generate unsymbolized
  # stacktrace if needed.
  crash_info, _ = (
      crash_uploader.get_crash_info_and_stacktrace(
          application_command_line, state.crash_stacktrace, gestures))
  if crash_info:
    testcase.minidump_keys = crash_info.store_minidump()

  if not crashed:
    # Could not reproduce the crash.
    log_message = (
        'Testcase didn\'t crash in %d seconds (with retries)' % test_timeout)
    data_handler.update_testcase_comment(
        testcase, data_types.TaskState.FINISHED, log_message)

    # For an unreproducible testcase, retry once on another bot to confirm
    # our results and in case this bot is in a bad state which we didn't catch
    # through our usual means.
    if data_handler.is_first_retry_for_task(testcase):
      testcase.status = 'Unreproducible, retrying'
      testcase.put()

      tasks.add_task('analyze', testcase_id, job_type)
      return

    # In the general case, we will not attempt to symbolize if we do not detect
    # a crash. For user uploads, we should symbolize anyway to provide more
    # information about what might be happening.
    crash_stacktrace_output = utils.get_crash_stacktrace_output(
        application_command_line, state.crash_stacktrace,
        unsymbolized_crash_stacktrace)
    testcase.crash_stacktrace = data_handler.filter_stacktrace(
        crash_stacktrace_output)
    close_invalid_testcase_and_update_status(testcase, metadata,
                                             'Unreproducible')

    # A non-reproducing testcase might still impact production branches.
    # Add the impact task to get that information.
    task_creation.create_impact_task_if_needed(testcase)
    return

  # Update http flag and re-run testcase to store dependencies (for bundled
  # archives only).
  testcase.http_flag = http_flag
  if not store_testcase_dependencies_from_bundled_testcase_archive(
      metadata, testcase, testcase_file_path):
    return

  # Update testcase crash parameters.
  testcase.crash_type = state.crash_type
  testcase.crash_address = state.crash_address
  testcase.crash_state = state.crash_state

  # Try to guess if the bug is security or not.
  security_flag = crash_analyzer.is_security_issue(
      state.crash_stacktrace, state.crash_type, state.crash_address)
  testcase.security_flag = security_flag

  # If it is, guess the severity.
  if security_flag:
    testcase.security_severity = severity_analyzer.get_security_severity(
        state.crash_type, state.crash_stacktrace, job_type, bool(gestures))

  log_message = ('Testcase crashed in %d seconds (r%d)' %
                 (crash_time, testcase.crash_revision))
  data_handler.update_testcase_comment(testcase, data_types.TaskState.FINISHED,
                                       log_message)

  # See if we have to ignore this crash.
  if crash_analyzer.ignore_stacktrace(state.crash_stacktrace):
    close_invalid_testcase_and_update_status(testcase, metadata, 'Irrelavant')
    return

  # Test for reproducibility.
  one_time_crasher_flag = not testcase_manager.test_for_reproducibility(
      testcase_file_path, state.crash_state, security_flag, test_timeout,
      http_flag, gestures)
  testcase.one_time_crasher_flag = one_time_crasher_flag

  # Check to see if this is a duplicate.
  project_name = data_handler.get_project_name(job_type)
  existing_testcase = data_handler.find_testcase(
      project_name, state.crash_type, state.crash_state, security_flag)
  if existing_testcase:
    # If the existing test case is unreproducible and we are, replace the
    # existing test case with this one.
    if existing_testcase.one_time_crasher_flag and not one_time_crasher_flag:
      duplicate_testcase = existing_testcase
      original_testcase = testcase
    else:
      duplicate_testcase = testcase
      original_testcase = existing_testcase
      metadata.status = 'Duplicate'
      metadata.duplicate_of = existing_testcase.key.id()

    duplicate_testcase.status = 'Duplicate'
    duplicate_testcase.duplicate_of = original_testcase.key.id()
    duplicate_testcase.put()

  # Set testcase and metadata status if not set already.
  if testcase.status != 'Duplicate':
    testcase.status = 'Processed'
    metadata.status = 'Confirmed'

    # Add new leaks to global blacklist to avoid detecting duplicates.
    # Only add if testcase has a direct leak crash and if it's reproducible.
    if is_lsan_enabled:
      leak_blacklist.add_crash_to_global_blacklist_if_needed(testcase)

  # Add application specific information in the trace.
  crash_stacktrace_output = utils.get_crash_stacktrace_output(
      application_command_line, state.crash_stacktrace,
      unsymbolized_crash_stacktrace)
  testcase.crash_stacktrace = data_handler.filter_stacktrace(
      crash_stacktrace_output)

  # Update the testcase values.
  testcase.put()

  # Update the upload metadata.
  metadata.security_flag = security_flag
  metadata.put()

  # Create tasks to
  # 1. Minimize testcase (minimize).
  # 2. Find regression range (regression).
  # 3. Find testcase impact on production branches (impact).
  # 4. Check whether testcase is fixed (progression).
  # 5. Get second stacktrace from another job in case of
  #    one-time crashers (stack).
  task_creation.create_tasks(testcase)
Пример #15
0
def _check_fixed_for_custom_binary(testcase, job_type, testcase_file_path):
    """Simplified fixed check for test cases using custom binaries."""
    revision = environment.get_value('APP_REVISION')

    # Update comments to reflect bot information and clean up old comments.
    testcase_id = testcase.key.id()
    testcase = data_handler.get_testcase_by_id(testcase_id)
    data_handler.update_testcase_comment(testcase,
                                         data_types.TaskState.STARTED)

    build_manager.setup_build()
    app_path = environment.get_value('APP_PATH')
    if not app_path:
        testcase = data_handler.get_testcase_by_id(testcase_id)
        data_handler.update_testcase_comment(
            testcase, data_types.TaskState.ERROR,
            'Build setup failed for custom binary')
        build_fail_wait = environment.get_value('FAIL_WAIT')
        tasks.add_task('progression',
                       testcase_id,
                       job_type,
                       wait_time=build_fail_wait)
        return

    test_timeout = environment.get_value('TEST_TIMEOUT', 10)
    result = testcase_manager.test_for_crash_with_retries(
        testcase,
        testcase_file_path,
        test_timeout,
        http_flag=testcase.http_flag)
    _log_output(revision, result)

    # Re-fetch to finalize testcase updates in branches below.
    testcase = data_handler.get_testcase_by_id(testcase.key.id())

    # If this still crashes on the most recent build, it's not fixed. The task
    # will be rescheduled by a cron job and re-attempted eventually.
    if result.is_crash():
        command = testcase_manager.get_command_line_for_application(
            testcase_file_path,
            app_path=app_path,
            needs_http=testcase.http_flag)
        symbolized_crash_stacktrace = result.get_stacktrace(symbolized=True)
        unsymbolized_crash_stacktrace = result.get_stacktrace(symbolized=False)
        stacktrace = utils.get_crash_stacktrace_output(
            command, symbolized_crash_stacktrace,
            unsymbolized_crash_stacktrace)
        testcase.last_tested_crash_stacktrace = data_handler.filter_stacktrace(
            stacktrace)
        _update_completion_metadata(
            testcase,
            revision,
            is_crash=True,
            message='still crashes on latest custom build')
        return

    # Retry once on another bot to confirm our results and in case this bot is in
    # a bad state which we didn't catch through our usual means.
    if data_handler.is_first_retry_for_task(testcase, reset_after_retry=True):
        tasks.add_task('progression', testcase_id, job_type)
        _update_completion_metadata(testcase, revision)
        return

    # The bug is fixed.
    testcase.fixed = 'Yes'
    testcase.open = False
    _update_completion_metadata(testcase,
                                revision,
                                message='fixed on latest custom build')
Пример #16
0
def execute_task(testcase_id, job_type):
    """Run a test case with a different job type to see if they reproduce."""
    testcase = data_handler.get_testcase_by_id(testcase_id)
    if not testcase:
        return

    if environment.is_engine_fuzzer_job(
            testcase.job_type) != environment.is_engine_fuzzer_job(job_type):
        # We should never reach here. But in case we do, we should bail out as
        # otherwise we will run into exceptions.
        return

    # Use a cloned testcase entity with different fuzz target paramaters for
    # a different fuzzing engine.
    original_job_type = testcase.job_type
    testcase = _get_variant_testcase_for_job(testcase, job_type)

    # Setup testcase and its dependencies.
    file_list, _, testcase_file_path = setup.setup_testcase(testcase, job_type)
    if not file_list:
        return

    # Set up a custom or regular build. We explicitly omit the crash revision
    # since we want to test against the latest build here.
    build_manager.setup_build()

    # Check if we have an application path. If not, our build failed to setup
    # correctly.
    if not build_manager.check_app_path():
        testcase = data_handler.get_testcase_by_id(testcase_id)
        data_handler.update_testcase_comment(
            testcase,
            data_types.TaskState.ERROR,
            "Build setup failed with job: " + job_type,
        )
        return

    # Disable gestures if we're running on a different platform from that of
    # the original test case.
    use_gestures = testcase.platform == environment.platform().lower()

    # Reproduce the crash.
    app_path = environment.get_value("APP_PATH")
    command = testcase_manager.get_command_line_for_application(
        testcase_file_path, app_path=app_path, needs_http=testcase.http_flag)
    test_timeout = environment.get_value("TEST_TIMEOUT", 10)
    revision = environment.get_value("APP_REVISION")
    result = testcase_manager.test_for_crash_with_retries(
        testcase,
        testcase_file_path,
        test_timeout,
        http_flag=testcase.http_flag,
        use_gestures=use_gestures,
        compare_crash=False,
    )

    if result.is_crash() and not result.should_ignore():
        crash_state = result.get_state()
        crash_type = result.get_type()
        security_flag = result.is_security_issue()

        gestures = testcase.gestures if use_gestures else None
        one_time_crasher_flag = not testcase_manager.test_for_reproducibility(
            testcase.fuzzer_name,
            testcase.actual_fuzzer_name(),
            testcase_file_path,
            crash_state,
            security_flag,
            test_timeout,
            testcase.http_flag,
            gestures,
        )
        if one_time_crasher_flag:
            status = data_types.TestcaseVariantStatus.FLAKY
        else:
            status = data_types.TestcaseVariantStatus.REPRODUCIBLE

        crash_comparer = CrashComparer(crash_state, testcase.crash_state)
        is_similar = (crash_comparer.is_similar()
                      and security_flag == testcase.security_flag)

        unsymbolized_crash_stacktrace = result.get_stacktrace(symbolized=False)
        symbolized_crash_stacktrace = result.get_stacktrace(symbolized=True)
        crash_stacktrace_output = utils.get_crash_stacktrace_output(
            command, symbolized_crash_stacktrace,
            unsymbolized_crash_stacktrace)
    else:
        status = data_types.TestcaseVariantStatus.UNREPRODUCIBLE
        is_similar = False
        crash_type = None
        crash_state = None
        security_flag = False
        crash_stacktrace_output = "No crash occurred."

    if original_job_type == job_type:
        # This case happens when someone clicks 'Update last tested stacktrace using
        # trunk build' button.
        testcase = data_handler.get_testcase_by_id(testcase_id)
        testcase.last_tested_crash_stacktrace = data_handler.filter_stacktrace(
            crash_stacktrace_output)
        testcase.set_metadata("last_tested_crash_revision",
                              revision,
                              update_testcase=True)
    else:
        # Regular case of variant analysis.
        variant = data_handler.get_testcase_variant(testcase_id, job_type)
        variant.status = status
        variant.revision = revision
        variant.crash_type = crash_type
        variant.crash_state = crash_state
        variant.security_flag = security_flag
        variant.is_similar = is_similar
        # Explicitly skipping crash stacktrace for now as it make entities larger
        # and we plan to use only crash paramaters in UI.
        variant.put()
Пример #17
0
def execute_task(testcase_id, job_type):
  """Attempt to minimize a given testcase."""
  # Get deadline to finish this task.
  deadline = tasks.get_task_completion_deadline()

  # Locate the testcase associated with the id.
  testcase = data_handler.get_testcase_by_id(testcase_id)
  if not testcase:
    return

  # Update comments to reflect bot information.
  data_handler.update_testcase_comment(testcase, data_types.TaskState.STARTED)

  # Setup testcase and its dependencies.
  file_list, input_directory, testcase_file_path = setup.setup_testcase(
      testcase)
  if not file_list:
    return

  # Initialize variables.
  max_timeout = environment.get_value('TEST_TIMEOUT', 10)
  app_arguments = environment.get_value('APP_ARGS')

  # Set up a custom or regular build based on revision.
  last_tested_crash_revision = testcase.get_metadata(
      'last_tested_crash_revision')

  crash_revision = last_tested_crash_revision or testcase.crash_revision
  build_manager.setup_build(crash_revision)

  # Check if we have an application path. If not, our build failed
  # to setup correctly.
  app_path = environment.get_value('APP_PATH')
  if not app_path:
    logs.log_error('Unable to setup build for minimization.')
    build_fail_wait = environment.get_value('FAIL_WAIT')

    if environment.get_value('ORIGINAL_JOB_NAME'):
      _skip_minimization(testcase, 'Failed to setup build for overridden job.')
    else:
      # Only recreate task if this isn't an overriden job. It's possible that a
      # revision exists for the original job, but doesn't exist for the
      # overriden job.
      tasks.add_task(
          'minimize', testcase_id, job_type, wait_time=build_fail_wait)

    return

  if environment.is_libfuzzer_job():
    do_libfuzzer_minimization(testcase, testcase_file_path)
    return

  max_threads = utils.maximum_parallel_processes_allowed()

  # Prepare the test case runner.
  crash_retries = environment.get_value('CRASH_RETRIES')
  warmup_timeout = environment.get_value('WARMUP_TIMEOUT')
  required_arguments = environment.get_value('REQUIRED_APP_ARGS', '')

  # Add any testcase-specific required arguments if needed.
  additional_required_arguments = testcase.get_metadata(
      'additional_required_app_args')
  if additional_required_arguments:
    required_arguments = '%s %s' % (required_arguments,
                                    additional_required_arguments)

  test_runner = TestRunner(testcase, testcase_file_path, file_list,
                           input_directory, app_arguments, required_arguments,
                           max_threads, deadline)

  # Verify the crash with a long timeout.
  warmup_crash_occurred = False
  result = test_runner.run(timeout=warmup_timeout, log_command=True)
  if result.is_crash():
    warmup_crash_occurred = True
    logs.log('Warmup crash occurred in %d seconds.' % result.crash_time)

  saved_unsymbolized_crash_state, flaky_stack, crash_times = (
      check_for_initial_crash(test_runner, crash_retries, testcase))

  # If the warmup crash occurred but we couldn't reproduce this in with
  # multiple processes running in parallel, try to minimize single threaded.
  if (len(crash_times) < tests.REPRODUCIBILITY_FACTOR * crash_retries and
      warmup_crash_occurred and max_threads > 1):
    logs.log('Attempting to continue single-threaded.')

    max_threads = 1
    test_runner = TestRunner(testcase, testcase_file_path, file_list,
                             input_directory, app_arguments, required_arguments,
                             max_threads, deadline)

    saved_unsymbolized_crash_state, flaky_stack, crash_times = (
        check_for_initial_crash(test_runner, crash_retries, testcase))

  if flaky_stack:
    testcase.flaky_stack = flaky_stack
    testcase.put()

  if len(crash_times) < tests.REPRODUCIBILITY_FACTOR * crash_retries:
    if not crash_times:
      # We didn't crash at all, so try again. This might be a legitimately
      # unreproducible test case, so it will get marked as such after being
      # retried on other bots.
      testcase = data_handler.get_testcase_by_id(testcase_id)
      data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,
                                           'Unable to reproduce crash')
      task_creation.mark_unreproducible_if_flaky(testcase, True)
    else:
      # We reproduced this crash at least once. It's too flaky to minimize, but
      # maybe we'll have more luck in the other jobs.
      testcase = data_handler.get_testcase_by_id(testcase_id)
      testcase.minimized_keys = 'NA'
      error_message = (
          'Unable to reproduce crash reliably, skipping '
          'minimization (crashed %d/%d)' % (len(crash_times), crash_retries))
      data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,
                                           error_message)
      create_additional_tasks(testcase)
    return

  # If we've made it this far, the test case appears to be reproducible. Clear
  # metadata from previous runs had it been marked as potentially flaky.
  task_creation.mark_unreproducible_if_flaky(testcase, False)

  test_runner.set_test_expectations(testcase.security_flag, flaky_stack,
                                    saved_unsymbolized_crash_state)

  # Use the max crash time unless this would be greater than the max timeout.
  test_timeout = min(max(crash_times), max_timeout) + 1
  logs.log('Using timeout %d (was %d)' % (test_timeout, max_timeout))
  test_runner.timeout = test_timeout

  logs.log('Starting minimization.')

  if should_attempt_phase(testcase, MinimizationPhase.GESTURES):
    gestures = minimize_gestures(test_runner, testcase)

    # We can't call check_deadline_exceeded_and_store_partial_minimized_testcase
    # at this point because we do not have a test case to store.
    testcase = data_handler.get_testcase_by_id(testcase.key.id())

    if testcase.security_flag and len(testcase.gestures) != len(gestures):
      # Re-run security severity analysis since gestures affect the severity.
      testcase.security_severity = severity_analyzer.get_security_severity(
          testcase.crash_type, data_handler.get_stacktrace(testcase), job_type,
          bool(gestures))

    testcase.gestures = gestures
    testcase.set_metadata('minimization_phase', MinimizationPhase.MAIN_FILE)

    if time.time() > test_runner.deadline:
      tasks.add_task('minimize', testcase.key.id(), job_type)
      return

  # Minimize the main file.
  data = utils.get_file_contents_with_fatal_error_on_failure(testcase_file_path)
  if should_attempt_phase(testcase, MinimizationPhase.MAIN_FILE):
    data = minimize_main_file(test_runner, testcase_file_path, data)

    if check_deadline_exceeded_and_store_partial_minimized_testcase(
        deadline, testcase_id, job_type, input_directory, file_list, data,
        testcase_file_path):
      return

    testcase.set_metadata('minimization_phase', MinimizationPhase.FILE_LIST)

  # Minimize the file list.
  if should_attempt_phase(testcase, MinimizationPhase.FILE_LIST):
    if environment.get_value('MINIMIZE_FILE_LIST', True):
      file_list = minimize_file_list(test_runner, file_list, input_directory,
                                     testcase_file_path)

      if check_deadline_exceeded_and_store_partial_minimized_testcase(
          deadline, testcase_id, job_type, input_directory, file_list, data,
          testcase_file_path):
        return
    else:
      logs.log('Skipping minimization of file list.')

    testcase.set_metadata('minimization_phase', MinimizationPhase.RESOURCES)

  # Minimize any files remaining in the file list.
  if should_attempt_phase(testcase, MinimizationPhase.RESOURCES):
    if environment.get_value('MINIMIZE_RESOURCES', True):
      for dependency in file_list:
        minimize_resource(test_runner, dependency, input_directory,
                          testcase_file_path)

        if check_deadline_exceeded_and_store_partial_minimized_testcase(
            deadline, testcase_id, job_type, input_directory, file_list, data,
            testcase_file_path):
          return
    else:
      logs.log('Skipping minimization of resources.')

    testcase.set_metadata('minimization_phase', MinimizationPhase.ARGUMENTS)

  if should_attempt_phase(testcase, MinimizationPhase.ARGUMENTS):
    app_arguments = minimize_arguments(test_runner, app_arguments)

    # Arguments must be stored here in case we time out below.
    testcase.minimized_arguments = app_arguments
    testcase.put()

    if check_deadline_exceeded_and_store_partial_minimized_testcase(
        deadline, testcase_id, job_type, input_directory, file_list, data,
        testcase_file_path):
      return

  command = tests.get_command_line_for_application(
      testcase_file_path, app_args=app_arguments, needs_http=testcase.http_flag)
  last_crash_result = test_runner.last_failing_result

  store_minimized_testcase(testcase, input_directory, file_list, data,
                           testcase_file_path)
  finalize_testcase(
      testcase_id, command, last_crash_result, flaky_stack=flaky_stack)
Пример #18
0
def execute_task(testcase_id, job_type):
    """Run a test case with a second job type to generate a second stack trace."""
    # Locate the testcase associated with the id.
    testcase = data_handler.get_testcase_by_id(testcase_id)

    data_handler.update_testcase_comment(testcase,
                                         data_types.TaskState.STARTED)

    # Setup testcase and its dependencies.
    file_list, _, testcase_file_path = setup.setup_testcase(testcase)
    if not file_list:
        return

    # Initialize timeout values.
    test_timeout = environment.get_value('TEST_TIMEOUT', 10)

    # Set up a custom or regular build. We explicitly omit the crash revision
    # since we want to test against the latest build here.
    build_manager.setup_build()

    # Check if we have an application path. If not, our build failed to setup
    # correctly.
    app_path = environment.get_value('APP_PATH')
    if not app_path:
        testcase = data_handler.get_testcase_by_id(testcase_id)
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.ERROR,
                                             'Build setup failed')
        return

    # TSAN tool settings (if the tool is used).
    if environment.tool_matches('TSAN', job_type):
        environment.set_tsan_max_history_size()

    command = tests.get_command_line_for_application(
        testcase_file_path, app_path=app_path, needs_http=testcase.http_flag)
    result = tests.test_for_crash_with_retries(testcase,
                                               testcase_file_path,
                                               test_timeout,
                                               http_flag=testcase.http_flag,
                                               compare_crash=False)

    # Get revision information.
    revision = environment.get_value('APP_REVISION')

    # If a crash occurs, then we add the second stacktrace information.
    if result.is_crash():
        state = result.get_symbolized_data()
        security_flag = result.is_security_issue()
        one_time_crasher_flag = not tests.test_for_reproducibility(
            testcase_file_path, state.crash_state, security_flag, test_timeout,
            testcase.http_flag, testcase.gestures)

        # Attach a header to indicate information on reproducibility flag.
        if one_time_crasher_flag:
            crash_stacktrace_header = 'Unreliable'
        else:
            crash_stacktrace_header = 'Fully reproducible'
        crash_stacktrace_header += (' crash found using %s job.\n\n' %
                                    job_type)

        unsymbolized_crash_stacktrace = result.get_stacktrace(symbolized=False)
        stacktrace = utils.get_crash_stacktrace_output(
            command, state.crash_stacktrace, unsymbolized_crash_stacktrace)

        crash_stacktrace = data_handler.filter_stacktrace(
            '%s%s' % (crash_stacktrace_header, stacktrace))
    else:
        crash_stacktrace = 'No crash found using %s job.' % job_type

    # Decide which stacktrace to update this stacktrace with.
    testcase = data_handler.get_testcase_by_id(testcase_id)
    if testcase.last_tested_crash_stacktrace == 'Pending':
        # This case happens when someone clicks 'Update last tested stacktrace using
        # trunk build' button.
        testcase.last_tested_crash_stacktrace = crash_stacktrace
        testcase.set_metadata('last_tested_crash_revision',
                              revision,
                              update_testcase=False)
    else:
        # Default case when someone defines |SECOND_STACK_JOB_TYPE| in the job
        # type. This helps to test the unreproducible crash with a different memory
        # debugging tool to get a second stacktrace (e.g. running TSAN on a flaky
        # crash found in ASAN build).
        testcase.second_crash_stacktrace = crash_stacktrace
        testcase.set_metadata('second_crash_stacktrace_revision',
                              revision,
                              update_testcase=False)

    data_handler.update_testcase_comment(testcase,
                                         data_types.TaskState.FINISHED)
Пример #19
0
def find_fixed_range(testcase_id, job_type):
    """Attempt to find the revision range where a testcase was fixed."""
    deadline = tasks.get_task_completion_deadline()
    testcase = data_handler.get_testcase_by_id(testcase_id)
    if not testcase:
        return

    if testcase.fixed:
        logs.log_error('Fixed range is already set as %s, skip.' %
                       testcase.fixed)
        return

    # Setup testcase and its dependencies.
    file_list, _, testcase_file_path = setup.setup_testcase(testcase)
    if not file_list:
        return

    # Set a flag to indicate we are running progression task. This shows pending
    # status on testcase report page and avoid conflicting testcase updates by
    # triage cron.
    testcase.set_metadata('progression_pending', True)

    # Custom binaries are handled as special cases.
    if build_manager.is_custom_binary():
        _check_fixed_for_custom_binary(testcase, job_type, testcase_file_path)
        return

    release_build_bucket_path = environment.get_value(
        'RELEASE_BUILD_BUCKET_PATH')
    revision_list = build_manager.get_revisions_list(release_build_bucket_path,
                                                     testcase=testcase)
    if not revision_list:
        testcase = data_handler.get_testcase_by_id(testcase_id)
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.ERROR,
                                             'Failed to fetch revision list')
        tasks.add_task('progression', testcase_id, job_type)
        return

    # Use min, max_index to mark the start and end of revision list that is used
    # for bisecting the progression range. Set start to the revision where noticed
    # the crash. Set end to the trunk revision. Also, use min, max from past run
    # if it timed out.
    min_revision = testcase.get_metadata('last_progression_min')
    max_revision = testcase.get_metadata('last_progression_max')
    last_tested_revision = testcase.get_metadata('last_tested_crash_revision')
    known_crash_revision = last_tested_revision or testcase.crash_revision
    if not min_revision:
        min_revision = known_crash_revision
    if not max_revision:
        max_revision = revisions.get_last_revision_in_list(revision_list)

    min_index = revisions.find_min_revision_index(revision_list, min_revision)
    if min_index is None:
        raise errors.BuildNotFoundError(min_revision, job_type)
    max_index = revisions.find_max_revision_index(revision_list, max_revision)
    if max_index is None:
        raise errors.BuildNotFoundError(max_revision, job_type)

    testcase = data_handler.get_testcase_by_id(testcase_id)
    data_handler.update_testcase_comment(testcase,
                                         data_types.TaskState.STARTED,
                                         'r%d' % max_revision)

    # Check to see if this testcase is still crashing now. If it is, then just
    # bail out.
    result = _testcase_reproduces_in_revision(testcase, testcase_file_path,
                                              job_type, max_revision)
    if result.is_crash():
        logs.log('Found crash with same signature on latest revision r%d.' %
                 max_revision)
        app_path = environment.get_value('APP_PATH')
        command = testcase_manager.get_command_line_for_application(
            testcase_file_path,
            app_path=app_path,
            needs_http=testcase.http_flag)
        symbolized_crash_stacktrace = result.get_stacktrace(symbolized=True)
        unsymbolized_crash_stacktrace = result.get_stacktrace(symbolized=False)
        stacktrace = utils.get_crash_stacktrace_output(
            command, symbolized_crash_stacktrace,
            unsymbolized_crash_stacktrace)
        testcase = data_handler.get_testcase_by_id(testcase_id)
        testcase.last_tested_crash_stacktrace = data_handler.filter_stacktrace(
            stacktrace)
        _update_completion_metadata(
            testcase,
            max_revision,
            is_crash=True,
            message='still crashes on latest revision r%s' % max_revision)

        # Since we've verified that the test case is still crashing, clear out any
        # metadata indicating potential flake from previous runs.
        task_creation.mark_unreproducible_if_flaky(testcase, False)

        # For chromium project, save latest crash information for later upload
        # to chromecrash/.
        state = result.get_symbolized_data()
        crash_uploader.save_crash_info_if_needed(testcase_id, max_revision,
                                                 job_type, state.crash_type,
                                                 state.crash_address,
                                                 state.frames)
        return

    # Don't burden NFS server with caching these random builds.
    environment.set_value('CACHE_STORE', False)

    # Verify that we do crash in the min revision. This is assumed to be true
    # while we are doing the bisect.
    result = _testcase_reproduces_in_revision(testcase, testcase_file_path,
                                              job_type, min_revision)
    if result and not result.is_crash():
        testcase = data_handler.get_testcase_by_id(testcase_id)

        # Retry once on another bot to confirm our result.
        if data_handler.is_first_retry_for_task(testcase,
                                                reset_after_retry=True):
            tasks.add_task('progression', testcase_id, job_type)
            error_message = (
                'Known crash revision %d did not crash, will retry on another bot to '
                'confirm result' % known_crash_revision)
            data_handler.update_testcase_comment(testcase,
                                                 data_types.TaskState.ERROR,
                                                 error_message)
            _update_completion_metadata(testcase, max_revision)
            return

        _clear_progression_pending(testcase)
        error_message = ('Known crash revision %d did not crash' %
                         known_crash_revision)
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.ERROR,
                                             error_message)
        task_creation.mark_unreproducible_if_flaky(testcase, True)
        return

    # Start a binary search to find last non-crashing revision. At this point, we
    # know that we do crash in the min_revision, and do not crash in max_revision.
    while time.time() < deadline:
        min_revision = revision_list[min_index]
        max_revision = revision_list[max_index]

        # If the min and max revisions are one apart this is as much as we can
        # narrow the range.
        if max_index - min_index == 1:
            _save_fixed_range(testcase_id, min_revision, max_revision)
            return

        # Test the middle revision of our range.
        middle_index = (min_index + max_index) // 2
        middle_revision = revision_list[middle_index]

        testcase = data_handler.get_testcase_by_id(testcase_id)
        log_message = 'Testing r%d (current range %d:%d)' % (
            middle_revision, min_revision, max_revision)
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.WIP,
                                             log_message)

        try:
            result = _testcase_reproduces_in_revision(testcase,
                                                      testcase_file_path,
                                                      job_type,
                                                      middle_revision)
        except errors.BadBuildError:
            # Skip this revision.
            del revision_list[middle_index]
            max_index -= 1
            continue

        if result.is_crash():
            min_index = middle_index
        else:
            max_index = middle_index

        _save_current_fixed_range_indices(testcase_id,
                                          revision_list[min_index],
                                          revision_list[max_index])

    # If we've broken out of the loop, we've exceeded the deadline. Recreate the
    # task to pick up where we left off.
    testcase = data_handler.get_testcase_by_id(testcase_id)
    error_message = ('Timed out, current range r%d:r%d' %
                     (revision_list[min_index], revision_list[max_index]))
    data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,
                                         error_message)
    tasks.add_task('progression', testcase_id, job_type)
Пример #20
0
    def do_post(self):
        """Upload a testcase."""
        email = helpers.get_user_email()
        testcase_id = request.get('testcaseId')
        uploaded_file = self.get_upload()
        if testcase_id and not uploaded_file:
            testcase = helpers.get_testcase(testcase_id)
            if not access.can_user_access_testcase(testcase):
                raise helpers.AccessDeniedException()

            # Use minimized testcase for upload (if available).
            key = (testcase.minimized_keys if testcase.minimized_keys
                   and testcase.minimized_keys != 'NA' else
                   testcase.fuzzed_keys)

            uploaded_file = blobs.get_blob_info(key)

            # Extract filename part from blob.
            uploaded_file.filename = os.path.basename(
                uploaded_file.filename.replace('\\', os.sep))

        job_type = request.get('job')
        if not job_type:
            raise helpers.EarlyExitException('Missing job name.', 400)

        job = data_types.Job.query(data_types.Job.name == job_type).get()
        if not job:
            raise helpers.EarlyExitException('Invalid job name.', 400)

        fuzzer_name = request.get('fuzzer')
        job_type_lowercase = job_type.lower()
        if 'libfuzzer' in job_type_lowercase:
            fuzzer_name = 'libFuzzer'
        elif 'afl' in job_type_lowercase:
            fuzzer_name = 'afl'
        elif 'honggfuzz' in job_type_lowercase:
            fuzzer_name = 'honggfuzz'

        is_engine_job = fuzzer_name and environment.is_engine_fuzzer_job(
            job_type)
        target_name = request.get('target')
        if not is_engine_job and target_name:
            raise helpers.EarlyExitException(
                'Target name is not applicable to non-engine jobs (AFL, libFuzzer).',
                400)

        if is_engine_job and not target_name:
            raise helpers.EarlyExitException(
                'Missing target name for engine job (AFL, libFuzzer).', 400)

        if (target_name
                and not data_types.Fuzzer.VALID_NAME_REGEX.match(target_name)):
            raise helpers.EarlyExitException('Invalid target name.', 400)

        fully_qualified_fuzzer_name = ''
        if is_engine_job and target_name:
            if job.is_external():
                # External jobs don't run and set FuzzTarget entities as part of
                # fuzz_task. Set it here instead.
                fuzz_target = (data_handler.record_fuzz_target(
                    fuzzer_name, target_name, job_type))
                fully_qualified_fuzzer_name = fuzz_target.fully_qualified_name(
                )
                target_name = fuzz_target.binary
            else:
                fully_qualified_fuzzer_name, target_name = find_fuzz_target(
                    fuzzer_name, target_name, job_type)

        if (not access.has_access(need_privileged_access=False,
                                  job_type=job_type,
                                  fuzzer_name=(fully_qualified_fuzzer_name
                                               or fuzzer_name))
                and not _is_uploader_allowed(email)):
            raise helpers.AccessDeniedException()

        multiple_testcases = bool(request.get('multiple'))
        http_flag = bool(request.get('http'))
        high_end_job = bool(request.get('highEnd'))
        bug_information = request.get('issue')
        crash_revision = request.get('revision')
        timeout = request.get('timeout')
        retries = request.get('retries')
        bug_summary_update_flag = bool(request.get('updateIssue'))
        quiet_flag = bool(request.get('quiet'))
        additional_arguments = request.get('args')
        app_launch_command = request.get('cmd')
        platform_id = request.get('platform')
        issue_labels = request.get('issue_labels')
        gestures = request.get('gestures') or '[]'
        stacktrace = request.get('stacktrace')

        crash_data = None
        if job.is_external():
            if not stacktrace:
                raise helpers.EarlyExitException(
                    'Stacktrace required for external jobs.', 400)

            if not crash_revision:
                raise helpers.EarlyExitException(
                    'Revision required for external jobs.', 400)

            crash_data = stack_analyzer.get_crash_data(
                stacktrace,
                fuzz_target=target_name,
                symbolize_flag=False,
                already_symbolized=True,
                detect_ooms_and_hangs=True)
        elif stacktrace:
            raise helpers.EarlyExitException(
                'Should not specify stacktrace for non-external jobs.', 400)

        testcase_metadata = request.get('metadata', {})
        if testcase_metadata:
            try:
                testcase_metadata = json.loads(testcase_metadata)
            except Exception as e:
                raise helpers.EarlyExitException('Invalid metadata JSON.',
                                                 400) from e
            if not isinstance(testcase_metadata, dict):
                raise helpers.EarlyExitException(
                    'Metadata is not a JSON object.', 400)
        if issue_labels:
            testcase_metadata['issue_labels'] = issue_labels

        try:
            gestures = ast.literal_eval(gestures)
        except Exception as e:
            raise helpers.EarlyExitException('Failed to parse gestures.',
                                             400) from e

        archive_state = 0
        bundled = False
        file_path_input = ''

        # Certain modifications such as app launch command, issue updates are only
        # allowed for privileged users.
        privileged_user = access.has_access(need_privileged_access=True)
        if not privileged_user:
            if bug_information or bug_summary_update_flag:
                raise helpers.EarlyExitException(
                    'You are not privileged to update existing issues.', 400)

            need_privileged_access = utils.string_is_true(
                data_handler.get_value_from_job_definition(
                    job_type, 'PRIVILEGED_ACCESS'))
            if need_privileged_access:
                raise helpers.EarlyExitException(
                    'You are not privileged to run this job type.', 400)

            if app_launch_command:
                raise helpers.EarlyExitException(
                    'You are not privileged to run arbitrary launch commands.',
                    400)

            if (testcase_metadata
                    and not _allow_unprivileged_metadata(testcase_metadata)):
                raise helpers.EarlyExitException(
                    'You are not privileged to set testcase metadata.', 400)

            if additional_arguments:
                raise helpers.EarlyExitException(
                    'You are not privileged to add command-line arguments.',
                    400)

            if gestures:
                raise helpers.EarlyExitException(
                    'You are not privileged to run arbitrary gestures.', 400)

        # TODO(aarya): Remove once AFL is migrated to engine pipeline.
        if target_name:
            additional_arguments = '%TESTCASE%'

        if crash_revision and crash_revision.isdigit():
            crash_revision = int(crash_revision)
        else:
            crash_revision = 0

        if bug_information == '0':  # Auto-recover from this bad input.
            bug_information = None
        if bug_information and not bug_information.isdigit():
            raise helpers.EarlyExitException('Bug is not a number.', 400)

        if not timeout:
            timeout = 0
        elif not timeout.isdigit() or timeout == '0':
            raise helpers.EarlyExitException(
                'Testcase timeout must be a number greater than 0.', 400)
        else:
            timeout = int(timeout)
            if timeout > 120:
                raise helpers.EarlyExitException(
                    'Testcase timeout may not be greater than 120 seconds.',
                    400)

        if retries:
            if retries.isdigit():
                retries = int(retries)
            else:
                retries = None

            if retries is None or retries > MAX_RETRIES:
                raise helpers.EarlyExitException(
                    'Testcase retries must be a number less than %d.' %
                    MAX_RETRIES, 400)
        else:
            retries = None

        job_queue = tasks.queue_for_job(job_type, is_high_end=high_end_job)

        if uploaded_file is not None:
            filename = ''.join([
                x for x in uploaded_file.filename
                if x not in ' ;/?:@&=+$,{}|<>()\\'
            ])
            key = str(uploaded_file.key())
            if archive.is_archive(filename):
                archive_state = data_types.ArchiveStatus.FUZZED
            if archive_state:
                if multiple_testcases:
                    # Create a job to unpack an archive.
                    metadata = data_types.BundledArchiveMetadata()
                    metadata.blobstore_key = key
                    metadata.timeout = timeout
                    metadata.job_queue = job_queue
                    metadata.job_type = job_type
                    metadata.http_flag = http_flag
                    metadata.archive_filename = filename
                    metadata.uploader_email = email
                    metadata.gestures = gestures
                    metadata.crash_revision = crash_revision
                    metadata.additional_arguments = additional_arguments
                    metadata.bug_information = bug_information
                    metadata.platform_id = platform_id
                    metadata.app_launch_command = app_launch_command
                    metadata.fuzzer_name = fuzzer_name
                    metadata.overridden_fuzzer_name = fully_qualified_fuzzer_name
                    metadata.fuzzer_binary_name = target_name
                    metadata.put()

                    tasks.add_task('unpack',
                                   str(metadata.key.id()),
                                   job_type,
                                   queue=tasks.queue_for_job(job_type))

                    # Create a testcase metadata object to show the user their upload.
                    upload_metadata = data_types.TestcaseUploadMetadata()
                    upload_metadata.timestamp = datetime.datetime.utcnow()
                    upload_metadata.filename = filename
                    upload_metadata.blobstore_key = key
                    upload_metadata.original_blobstore_key = key
                    upload_metadata.status = 'Pending'
                    upload_metadata.bundled = True
                    upload_metadata.uploader_email = email
                    upload_metadata.retries = retries
                    upload_metadata.bug_summary_update_flag = bug_summary_update_flag
                    upload_metadata.quiet_flag = quiet_flag
                    upload_metadata.additional_metadata_string = json.dumps(
                        testcase_metadata)
                    upload_metadata.bug_information = bug_information
                    upload_metadata.put()

                    helpers.log('Uploaded multiple testcases.',
                                helpers.VIEW_OPERATION)
                    return self.render_json({'multiple': True})

                file_path_input = guess_input_file(uploaded_file, filename)
                if not file_path_input:
                    raise helpers.EarlyExitException((
                        "Unable to detect which file to launch. The main file\'s name "
                        'must contain either of %s.' % str(RUN_FILE_PATTERNS)),
                                                     400)

        else:
            raise helpers.EarlyExitException('Please select a file to upload.',
                                             400)

        testcase_id = data_handler.create_user_uploaded_testcase(
            key,
            key,
            archive_state,
            filename,
            file_path_input,
            timeout,
            job,
            job_queue,
            http_flag,
            gestures,
            additional_arguments,
            bug_information,
            crash_revision,
            email,
            platform_id,
            app_launch_command,
            fuzzer_name,
            fully_qualified_fuzzer_name,
            target_name,
            bundled,
            retries,
            bug_summary_update_flag,
            quiet_flag,
            additional_metadata=testcase_metadata,
            crash_data=crash_data)

        if not quiet_flag:
            testcase = data_handler.get_testcase_by_id(testcase_id)
            issue = issue_tracker_utils.get_issue_for_testcase(testcase)
            if issue:
                report_url = data_handler.TESTCASE_REPORT_URL.format(
                    domain=data_handler.get_domain(), testcase_id=testcase_id)

                comment = ('ClusterFuzz is analyzing your testcase. '
                           'Developers can follow the progress at %s.' %
                           report_url)
                issue.save(new_comment=comment)

        helpers.log('Uploaded testcase %s' % testcase_id,
                    helpers.VIEW_OPERATION)
        return self.render_json({'id': '%s' % testcase_id})
Пример #21
0
def execute_task(testcase_id, job_type):
    """Attempt to find if the testcase affects release branches on Chromium."""
    # This shouldn't ever get scheduled, but check just in case.
    if not utils.is_chromium():
        return

    # Locate the testcase associated with the id.
    testcase = data_handler.get_testcase_by_id(testcase_id)

    # If this testcase is fixed, we should no longer be doing impact testing.
    if testcase.fixed and testcase.is_impact_set_flag:
        return

    # For testcases with status unreproducible, we just do impact analysis just
    # once.
    if testcase.is_status_unreproducible() and testcase.is_impact_set_flag:
        return

    # Update comments only after checking the above bailout conditions.
    data_handler.update_testcase_comment(testcase,
                                         data_types.TaskState.STARTED)

    # This task is not applicable to unreproducible testcases.
    if testcase.one_time_crasher_flag:
        data_handler.update_testcase_comment(
            testcase, data_types.TaskState.ERROR,
            'Not applicable for unreproducible testcases')
        return

    # This task is not applicable for custom binaries. We cannot remove the
    # creation of such tasks specifically for custom binary testcase in cron,
    # so exit gracefully.
    if build_manager.is_custom_binary():
        data_handler.update_testcase_comment(
            testcase, data_types.TaskState.FINISHED,
            'Not applicable for custom binaries')
        return

    # If we don't have a stable or beta build url pattern, we try to use build
    # information url to make a guess.
    if not build_manager.has_production_builds():
        if not testcase.regression:
            data_handler.update_testcase_comment(
                testcase, data_types.TaskState.FINISHED,
                'Cannot run without regression range, will re-run once regression '
                'task finishes')
            return

        impacts = get_impacts_from_url(testcase.regression, testcase.job_type)
        testcase = data_handler.get_testcase_by_id(testcase_id)
        set_testcase_with_impacts(testcase, impacts)
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.FINISHED)
        return

    # Setup testcase and its dependencies.
    file_list, _, testcase_file_path = setup.setup_testcase(testcase)
    if not file_list:
        return

    # Setup stable, beta builds and get impact and crash stacktrace.
    try:
        impacts = get_impacts_on_prod_builds(testcase, testcase_file_path)
    except BuildFailedException as error:
        testcase = data_handler.get_testcase_by_id(testcase_id)
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.ERROR,
                                             error.message)
        tasks.add_task('impact',
                       testcase_id,
                       job_type,
                       wait_time=environment.get_value('FAIL_WAIT'))
        return

    testcase = data_handler.get_testcase_by_id(testcase_id)
    set_testcase_with_impacts(testcase, impacts)

    # Set stacktrace in case we have a unreproducible crash on trunk,
    # but it crashes on one of the production builds.
    if testcase.is_status_unreproducible() and impacts.get_extra_trace():
        testcase.crash_stacktrace = data_handler.filter_stacktrace(
            '%s\n\n%s' %
            (data_handler.get_stacktrace(testcase), impacts.get_extra_trace()))

    data_handler.update_testcase_comment(testcase,
                                         data_types.TaskState.FINISHED)
Пример #22
0
  def test_external_upload_uaf(self):
    """Test external upload (uaf)."""
    stacktrace = self._read_test_data('uaf.txt')
    response = self.app.post(
        '/',
        params={
            'job': 'libfuzzer_proj_external',
            'target': 'target',
            'stacktrace': stacktrace,
            'revision': 1337,
        },
        upload_files=[('file', 'file', b'contents')])

    self.assertDictEqual({
        'id': '2',
        'uploadUrl': 'http://localhost//upload-testcase/upload-oauth'
    }, response.json)

    testcase = data_handler.get_testcase_by_id(2)
    self.assert_dict_has_items({
        'absolute_path':
            'input',
        'additional_metadata':
            '{"fuzzer_binary_name": "target", '
            '"uploaded_additional_args": "%TESTCASE%"}',
        'archive_filename':
            None,
        'archive_state':
            0,
        'binary_flag':
            False,
        'bug_information':
            '',
        'comments':
            '[2021-01-01 00:00:00 UTC] uploader@email: '
            'External testcase upload.\n',
        'crash_address':
            '0x60f00003b280',
        'crash_revision':
            1337,
        'crash_stacktrace':
            stacktrace,
        'crash_state': ('blink::InputTypeView::element\n'
                        'blink::TextFieldInputType::didSetValueByUserEdit\n'
                        'blink::TextFieldInputType::subtreeHasChanged\n'),
        'crash_type':
            'Heap-use-after-free\nREAD 8',
        'disable_ubsan':
            False,
        'duplicate_of':
            None,
        'fixed':
            '',
        'flaky_stack':
            False,
        'fuzzed_keys':
            'blob_key',
        'fuzzer_name':
            'libFuzzer',
        'gestures': [],
        'group_bug_information':
            0,
        'group_id':
            0,
        'has_bug_flag':
            False,
        'http_flag':
            False,
        'impact_beta_version':
            None,
        'impact_beta_version_likely':
            None,
        'impact_stable_version':
            None,
        'impact_stable_version_likely':
            None,
        'is_a_duplicate_flag':
            False,
        'is_impact_set_flag':
            None,
        'is_leader':
            False,
        'job_type':
            'libfuzzer_proj_external',
        'last_tested_crash_stacktrace':
            None,
        'minidump_keys':
            None,
        'minimized_arguments':
            '',
        'minimized_keys':
            'NA',
        'one_time_crasher_flag':
            False,
        'open':
            True,
        'overridden_fuzzer_name':
            'libFuzzer_proj_target',
        'platform':
            'linux',
        'platform_id':
            'linux',
        'project_name':
            'proj',
        'queue':
            None,
        'redzone':
            128,
        'regression':
            'NA',
        'security_flag':
            True,
        'security_severity':
            None,
        'status':
            'Processed',
        'symbolized':
            False,
        'timeout_multiplier':
            1.0,
        'timestamp':
            datetime.datetime(2021, 1, 1),
        'triaged':
            False,
        'uploader_email':
            'uploader@email',
        'window_argument':
            ''
    }, testcase._to_dict())

    metadata = data_types.TestcaseUploadMetadata.query(
        data_types.TestcaseUploadMetadata.testcase_id ==
        testcase.key.id()).get()
    self.assertIsNotNone(metadata)
    self.assertDictEqual({
        'additional_metadata_string': None,
        'blobstore_key': 'blob_key',
        'bot_name': None,
        'bug_information': '',
        'bug_summary_update_flag': False,
        'bundled': False,
        'duplicate_of': None,
        'filename': 'input',
        'original_blobstore_key': 'blob_key',
        'path_in_archive': None,
        'quiet_flag': False,
        'retries': None,
        'security_flag': True,
        'status': 'Confirmed',
        'testcase_id': 2,
        'timeout': 0,
        'timestamp': datetime.datetime(2021, 1, 1, 0, 0),
        'uploader_email': 'uploader@email'
    }, metadata._to_dict())
Пример #23
0
def find_regression_range(testcase_id, job_type):
    """Attempt to find when the testcase regressed."""
    deadline = tasks.get_task_completion_deadline()
    testcase = data_handler.get_testcase_by_id(testcase_id)
    if not testcase:
        return

    if testcase.regression:
        logs.log_error('Regression range is already set as %s, skip.' %
                       testcase.regression)
        return

    # This task is not applicable for custom binaries.
    if build_manager.is_custom_binary():
        testcase.regression = 'NA'
        data_handler.update_testcase_comment(
            testcase, data_types.TaskState.ERROR,
            'Not applicable for custom binaries')
        return

    data_handler.update_testcase_comment(testcase,
                                         data_types.TaskState.STARTED)

    # Setup testcase and its dependencies.
    file_list, _, testcase_file_path = setup.setup_testcase(testcase)
    if not file_list:
        testcase = data_handler.get_testcase_by_id(testcase_id)
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.ERROR,
                                             'Failed to setup testcase')
        tasks.add_task('regression', testcase_id, job_type)
        return

    release_build_bucket_path = environment.get_value(
        'RELEASE_BUILD_BUCKET_PATH')
    revision_list = build_manager.get_revisions_list(release_build_bucket_path,
                                                     testcase=testcase)
    if not revision_list:
        testcase = data_handler.get_testcase_by_id(testcase_id)
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.ERROR,
                                             'Failed to fetch revision list')
        tasks.add_task('regression', testcase_id, job_type)
        return

    # Don't burden NFS server with caching these random builds.
    environment.set_value('CACHE_STORE', False)

    # Pick up where left off in a previous run if necessary.
    min_revision = testcase.get_metadata('last_regression_min')
    max_revision = testcase.get_metadata('last_regression_max')
    first_run = not min_revision and not max_revision
    if not min_revision:
        min_revision = revisions.get_first_revision_in_list(revision_list)
    if not max_revision:
        max_revision = testcase.crash_revision

    min_index = revisions.find_min_revision_index(revision_list, min_revision)
    if min_index is None:
        raise errors.BuildNotFoundError(min_revision, job_type)
    max_index = revisions.find_max_revision_index(revision_list, max_revision)
    if max_index is None:
        raise errors.BuildNotFoundError(max_revision, job_type)

    # Make sure that the revision where we noticed the crash, still crashes at
    # that revision. Otherwise, our binary search algorithm won't work correctly.
    max_revision = revision_list[max_index]
    crashes_in_max_revision = _testcase_reproduces_in_revision(
        testcase, testcase_file_path, job_type, max_revision, should_log=False)
    if not crashes_in_max_revision:
        testcase = data_handler.get_testcase_by_id(testcase_id)
        error_message = ('Known crash revision %d did not crash' %
                         max_revision)
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.ERROR,
                                             error_message)
        task_creation.mark_unreproducible_if_flaky(testcase, True)
        return

    # If we've made it this far, the test case appears to be reproducible. Clear
    # metadata from previous runs had it been marked as potentially flaky.
    task_creation.mark_unreproducible_if_flaky(testcase, False)

    # On the first run, check to see if we regressed near either the min or max
    # revision.
    if first_run and found_regression_near_extreme_revisions(
            testcase, testcase_file_path, job_type, revision_list, min_index,
            max_index):
        return

    while time.time() < deadline:
        min_revision = revision_list[min_index]
        max_revision = revision_list[max_index]

        # If the min and max revisions are one apart (or the same, if we only have
        # one build), this is as much as we can narrow the range.
        if max_index - min_index <= 1:
            # Verify that the regression range seems correct, and save it if so.
            if not validate_regression_range(testcase, testcase_file_path,
                                             job_type, revision_list,
                                             min_index):
                return

            save_regression_range(testcase_id, min_revision, max_revision)
            return

        middle_index = (min_index + max_index) / 2
        middle_revision = revision_list[middle_index]
        try:
            is_crash = _testcase_reproduces_in_revision(
                testcase,
                testcase_file_path,
                job_type,
                middle_revision,
                min_revision=min_revision,
                max_revision=max_revision)
        except errors.BadBuildError:
            # Skip this revision.
            del revision_list[middle_index]
            max_index -= 1
            continue

        if is_crash:
            max_index = middle_index
        else:
            min_index = middle_index

        _save_current_regression_range_indices(testcase_id,
                                               revision_list[min_index],
                                               revision_list[max_index])

    # If we've broken out of the above loop, we timed out. We'll finish by
    # running another regression task and picking up from this point.
    testcase = data_handler.get_testcase_by_id(testcase_id)
    error_message = 'Timed out, current range r%d:r%d' % (
        revision_list[min_index], revision_list[max_index])
    data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,
                                         error_message)
    tasks.add_task('regression', testcase_id, job_type)
Пример #24
0
  def test_external_duplicate(self):
    """Test uploading a duplicate."""
    existing = data_types.Testcase(
        crash_address='',
        crash_state='target\n',
        crash_type='Out-of-memory',
        project_name='proj',
        minimized_keys='NA',
        security_flag=False)
    existing.put()

    stacktrace = self._read_test_data('oom.txt')
    response = self.app.post(
        '/',
        params={
            'job': 'libfuzzer_proj_external',
            'target': 'target',
            'stacktrace': stacktrace,
            'revision': 1337,
        },
        upload_files=[('file', 'file', b'contents')])

    self.assertDictEqual({
        'id': '3',
        'uploadUrl': 'http://localhost//upload-testcase/upload-oauth'
    }, response.json)

    testcase = data_handler.get_testcase_by_id(3)
    self.assert_dict_has_items({
        'absolute_path': 'input',
        'additional_metadata': '{"fuzzer_binary_name": "target", '
                               '"uploaded_additional_args": "%TESTCASE%"}',
        'archive_filename': None,
        'archive_state': 0,
        'binary_flag': False,
        'bug_information': '',
        'comments': '[2021-01-01 00:00:00 UTC] uploader@email: '
                    'External testcase upload.\n',
        'crash_address': '',
        'crash_revision': 1337,
        'crash_stacktrace': stacktrace,
        'crash_state': 'target\n',
        'crash_type': 'Out-of-memory',
        'disable_ubsan': False,
        'duplicate_of': 2,
        'fixed': 'NA',
        'flaky_stack': False,
        'fuzzed_keys': 'blob_key',
        'fuzzer_name': 'libFuzzer',
        'gestures': [],
        'group_bug_information': 0,
        'group_id': 0,
        'has_bug_flag': False,
        'http_flag': False,
        'impact_beta_version': None,
        'impact_beta_version_likely': False,
        'impact_stable_version': None,
        'impact_stable_version_likely': False,
        'is_a_duplicate_flag': True,
        'is_impact_set_flag': False,
        'is_leader': False,
        'job_type': 'libfuzzer_proj_external',
        'last_tested_crash_stacktrace': None,
        'minidump_keys': None,
        'minimized_arguments': '',
        'minimized_keys': 'NA',
        'one_time_crasher_flag': False,
        'open': False,
        'overridden_fuzzer_name': 'libFuzzer_proj_target',
        'platform': 'linux',
        'platform_id': 'linux',
        'project_name': 'proj',
        'queue': None,
        'redzone': 128,
        'regression': 'NA',
        'security_flag': False,
        'security_severity': None,
        'status': 'Duplicate',
        'symbolized': False,
        'timeout_multiplier': 1.0,
        'timestamp': datetime.datetime(2021, 1, 1),
        'triaged': True,
        'uploader_email': 'uploader@email',
        'window_argument': ''
    }, testcase._to_dict())

    metadata = data_types.TestcaseUploadMetadata.query(
        data_types.TestcaseUploadMetadata.testcase_id ==
        testcase.key.id()).get()
    self.assertIsNotNone(metadata)
    self.assertDictEqual({
        'additional_metadata_string': None,
        'blobstore_key': 'blob_key',
        'bot_name': None,
        'bug_information': '',
        'bug_summary_update_flag': False,
        'bundled': False,
        'duplicate_of': 2,
        'filename': 'input',
        'original_blobstore_key': 'blob_key',
        'path_in_archive': None,
        'quiet_flag': False,
        'retries': None,
        'security_flag': False,
        'status': 'Duplicate',
        'testcase_id': 3,
        'timeout': 0,
        'timestamp': datetime.datetime(2021, 1, 1, 0, 0),
        'uploader_email': 'uploader@email'
    }, metadata._to_dict())
Пример #25
0
    def get(self):
        """Handle a get request."""
        try:
            grouper.group_testcases()
        except:
            logs.log_error('Error occurred while grouping test cases.')
            return

        # Free up memory after group task run.
        utils.python_gc()

        # Get a list of jobs excluded from bug filing.
        excluded_jobs = _get_excluded_jobs()

        # Get a list of all jobs. This is used to filter testcases whose jobs have
        # been removed.
        all_jobs = data_handler.get_all_job_type_names()

        for testcase_id in data_handler.get_open_testcase_id_iterator():
            try:
                testcase = data_handler.get_testcase_by_id(testcase_id)
            except errors.InvalidTestcaseError:
                # Already deleted.
                continue

            # Skip if testcase's job is removed.
            if testcase.job_type not in all_jobs:
                continue

            # Skip if testcase's job is in exclusions list.
            if testcase.job_type in excluded_jobs:
                continue

            # Skip if we are running progression task at this time.
            if testcase.get_metadata('progression_pending'):
                continue

            # If the testcase has a bug filed already, no triage is needed.
            if _is_bug_filed(testcase):
                continue

            # Check if the crash is important, i.e. it is either a reproducible crash
            # or an unreproducible crash happening frequently.
            if not _is_crash_important(testcase):
                continue

            # Require that all tasks like minimizaton, regression testing, etc have
            # finished.
            if not data_handler.critical_tasks_completed(testcase):
                continue

            # For testcases that are not part of a group, wait an additional time till
            # group task completes.
            # FIXME: In future, grouping might be dependent on regression range, so we
            # would have to add an additional wait time.
            if not testcase.group_id and not dates.time_has_expired(
                    testcase.timestamp,
                    hours=data_types.MIN_ELAPSED_TIME_SINCE_REPORT):
                continue

            # If this project does not have an associated issue tracker, we cannot
            # file this crash anywhere.
            issue_tracker = issue_tracker_utils.get_issue_tracker_for_testcase(
                testcase)
            if not issue_tracker:
                continue

            # If there are similar issues to this test case already filed or recently
            # closed, skip filing a duplicate bug.
            if _check_and_update_similar_bug(testcase, issue_tracker):
                continue

            # Clean up old triage messages that would be not applicable now.
            testcase.delete_metadata(TRIAGE_MESSAGE_KEY, update_testcase=False)

            # File the bug first and then create filed bug metadata.
            try:
                issue_filer.file_issue(testcase, issue_tracker)
            except Exception:
                logs.log_error('Failed to file issue for testcase %d.' %
                               testcase_id)
                continue

            _create_filed_bug_metadata(testcase)
            logs.log('Filed new issue %s for testcase %d.' %
                     (testcase.bug_information, testcase_id))
Пример #26
0
def execute_task(testcase_id, job_type):
    """Run a test case with a different job type to see if they reproduce."""
    testcase = data_handler.get_testcase_by_id(testcase_id)
    if not testcase:
        return

    if (environment.is_engine_fuzzer_job(testcase.job_type) !=
            environment.is_engine_fuzzer_job(job_type)):
        # We should never reach here. But in case we do, we should bail out as
        # otherwise we will run into exceptions.
        return

    # Setup testcase and its dependencies.
    fuzzer_override = builtin_fuzzers.get_fuzzer_for_job(job_type)
    file_list, _, testcase_file_path = setup.setup_testcase(
        testcase, fuzzer_override=fuzzer_override)
    if not file_list:
        return

    # Set up a custom or regular build. We explicitly omit the crash revision
    # since we want to test against the latest build here.
    build_manager.setup_build()

    # Check if we have an application path. If not, our build failed to setup
    # correctly.
    app_path = environment.get_value('APP_PATH')
    if not app_path:
        testcase = data_handler.get_testcase_by_id(testcase_id)
        data_handler.update_testcase_comment(
            testcase, data_types.TaskState.ERROR,
            'Build setup failed with job: ' + job_type)
        return

    # Reproduce the crash.
    command = testcase_manager.get_command_line_for_application(
        testcase_file_path, app_path=app_path, needs_http=testcase.http_flag)
    test_timeout = environment.get_value('TEST_TIMEOUT', 10)
    revision = environment.get_value('APP_REVISION')
    result = testcase_manager.test_for_crash_with_retries(
        testcase,
        testcase_file_path,
        test_timeout,
        http_flag=testcase.http_flag,
        compare_crash=False)

    if result.is_crash() and not result.should_ignore():
        crash_state = result.get_state()
        crash_type = result.get_type()
        security_flag = result.is_security_issue()

        one_time_crasher_flag = not testcase_manager.test_for_reproducibility(
            testcase_file_path, crash_state, security_flag, test_timeout,
            testcase.http_flag, testcase.gestures)
        if one_time_crasher_flag:
            status = data_types.TestcaseVariantStatus.FLAKY
        else:
            status = data_types.TestcaseVariantStatus.REPRODUCIBLE

        crash_comparer = CrashComparer(crash_state, testcase.crash_state)
        is_similar = (crash_comparer.is_similar()
                      and security_flag == testcase.security_flag)

        unsymbolized_crash_stacktrace = result.get_stacktrace(symbolized=False)
        symbolized_crash_stacktrace = result.get_stacktrace(symbolized=True)
        crash_stacktrace_output = utils.get_crash_stacktrace_output(
            command, symbolized_crash_stacktrace,
            unsymbolized_crash_stacktrace)
    else:
        status = data_types.TestcaseVariantStatus.UNREPRODUCIBLE
        is_similar = False
        crash_type = None
        crash_state = None
        security_flag = False
        crash_stacktrace_output = 'No crash occurred.'

    testcase = data_handler.get_testcase_by_id(testcase_id)
    if testcase.job_type == job_type:
        # This case happens when someone clicks 'Update last tested stacktrace using
        # trunk build' button.
        testcase.last_tested_crash_stacktrace = (
            data_handler.filter_stacktrace(crash_stacktrace_output))
        testcase.set_metadata('last_tested_crash_revision',
                              revision,
                              update_testcase=False)
    else:
        # Regular case of variant analysis.
        variant = data_handler.get_testcase_variant(testcase_id, job_type)
        variant.status = status
        variant.revision = revision
        variant.crash_type = crash_type
        variant.crash_state = crash_state
        variant.security_flag = security_flag
        variant.is_similar = is_similar
        # Explicitly skipping crash stacktrace for now as it make entities larger
        # and we plan to use only crash paramaters in UI.
        variant.put()