Пример #1
0
def _get_variant_testcase_for_job(testcase, job_type):
    """Return a testcase entity for variant task use. This changes the fuzz
  target params for a particular fuzzing engine."""
    if testcase.job_type == job_type:
        # Update stack operation on same testcase.
        return testcase

    if not environment.is_engine_fuzzer_job(testcase.job_type):
        # For blackbox fuzzer testcases, there is no change of fuzzer required.
        return testcase

    engine_name = environment.get_engine_for_job(job_type)
    project = data_handler.get_project_name(job_type)
    binary_name = testcase.get_metadata('fuzzer_binary_name')
    fully_qualified_fuzzer_name = data_types.fuzz_target_fully_qualified_name(
        engine_name, project, binary_name)

    variant_testcase = data_types.clone_entity(testcase)
    variant_testcase.key = testcase.key
    variant_testcase.fuzzer_name = engine_name
    variant_testcase.overridden_fuzzer_name = fully_qualified_fuzzer_name
    variant_testcase.job_type = job_type

    # Remove put() method to avoid updates. DO NOT REMOVE THIS.
    variant_testcase.put = lambda: None

    return variant_testcase
Пример #2
0
def get_components_list(component_revisions_dict, job_type):
    """Return a prioritized order of components based on job type."""
    components = sorted(component_revisions_dict.keys())

    if utils.is_chromium():
        # Components prioritization only applies to non-chromium projects.
        return components

    project_name = data_handler.get_project_name(job_type)
    if not project_name:
        # No project name found in job environment, return list as-is.
        return components

    main_repo = data_handler.get_main_repo(job_type)
    project_src = '/src/' + project_name
    for component in components.copy():
        if component_revisions_dict[component]['url'] == main_repo:
            # Matches recorded main repo.
            components.remove(component)
            components.insert(0, component)
            break

        if component == project_src:
            components.remove(component)
            components.insert(0, component)
            break

        if project_name.lower() in os.path.basename(component).lower():
            components.remove(component)
            components.insert(0, component)
            # Keep trying in case an exact match is found later.

    return components
Пример #3
0
def save_crash_info_if_needed(testcase_id, crash_revision, job_type,
                              crash_type, crash_address, crash_frames):
    """Saves crash report for chromium project, skip otherwise."""
    if data_handler.get_project_name(job_type) != 'chromium':
        return None

    serialized_crash_stack_frames = get_symbolized_stack_bytes(
        crash_type, crash_address, crash_frames)
    if not serialized_crash_stack_frames:
        return None

    crash_info = CrashReportInfo(
        serialized_crash_stack_frames=serialized_crash_stack_frames)

    # Get product and version (required).
    platform = environment.platform()
    crash_info.product = PRODUCT_MAP[platform]
    crash_info.version = revisions.get_real_revision(crash_revision,
                                                     job_type,
                                                     display=True)

    # Update crash_info object with bot information and testcase id.
    crash_info.bot_id = environment.get_value('BOT_NAME')
    crash_info.testcase_id = int(testcase_id)

    # Store CrashInfo metadata.
    crash_report_metadata = crash_info.to_report_metadata()
    crash_report_metadata.job_type = job_type
    crash_report_metadata.crash_revision = crash_revision
    crash_report_metadata.put()

    logs.log('Created crash report entry for testcase %s.' % testcase_id)
    return crash_info
Пример #4
0
def create_variant_tasks_if_needed(testcase):
  """Creates a variant task if needed."""
  if testcase.duplicate_of:
    # If another testcase exists with same params, no need to spend cycles on
    # calculating variants again.
    return

  testcase_id = testcase.key.id()
  project = data_handler.get_project_name(testcase.job_type)
  jobs = data_types.Job.query(data_types.Job.project == project)
  for job in jobs:
    # The variant needs to be tested in a different job type than us.
    job_type = job.name
    if testcase.job_type == job_type:
      continue

    # Don't try to reproduce engine fuzzer testcase with blackbox fuzzer
    # testcases and vice versa.
    if (environment.is_engine_fuzzer_job(testcase.job_type) !=
        environment.is_engine_fuzzer_job(job_type)):
      continue

    # Skip experimental jobs.
    job_environment = job.get_environment()
    if utils.string_is_true(job_environment.get('EXPERIMENTAL')):
      continue

    queue = tasks.queue_for_platform(job.platform)
    tasks.add_task('variant', testcase_id, job_type, queue)

    variant = data_handler.get_testcase_variant(testcase_id, job_type)
    variant.status = data_types.TestcaseVariantStatus.PENDING
    variant.put()
 def test_get_from_job(self):
   """Test getting from job."""
   data_types.Job(
       name='job',
       environment_string=('PROJECT_NAME = from_internal_project\n'
                           'HELP_URL = help_url\n')).put()
   self.assertEqual('from_internal_project',
                    data_handler.get_project_name('job'))
Пример #6
0
def find_fuzz_target(engine, target_name, job_name):
    """Return fuzz target values given the engine, target name (which may or may
  not be prefixed with project), and job."""
    project_name = data_handler.get_project_name(job_name)
    candidate_name = data_types.fuzz_target_fully_qualified_name(
        engine, project_name, target_name)

    target = data_handler.get_fuzz_target(candidate_name)
    if not target:
        raise helpers.EarlyExitException('Fuzz target does not exist.', 400)

    return target.fully_qualified_name(), target.binary
Пример #7
0
    def get_coverage_info(self, fuzzer, date=None):
        """Return coverage info of child fuzzers."""
        if fuzzer in data_types.BUILTIN_FUZZERS:
            # Get coverage info for a job (i.e. a project).
            job = self.single_job_or_none()
            project = data_handler.get_project_name(job)
            return get_coverage_info(project, date)

        fuzz_target = data_handler.get_fuzz_target(fuzzer)
        if fuzz_target:
            fuzzer = fuzz_target.project_qualified_name()

        return get_coverage_info(fuzzer, date)
Пример #8
0
def _get_project_report_url(job, date):
    """Return url for the report requested."""
    project = data_handler.get_project_name(job)
    if not project:
        return None

    if date == 'latest':
        date = None
    else:
        try:
            date = datetime.datetime.strptime(date, '%Y-%m-%d').date()
        except:
            raise helpers.EarlyExitException('Invalid date.', 400)

    info = fuzzer_stats.get_coverage_info(project, date)
    if not info:
        return None

    return info.html_report_url
Пример #9
0
  def get(self):
    """Handle a GET request."""
    project = request.get('project')

    if access.has_access():
      # User is an internal user of ClusterFuzz (eg: ClusterFuzz developer).

      # Show all projects in the list, since this allows user to pick another
      # project as needed.
      projects_list = data_handler.get_all_project_names()

      # Filter fuzzers and job list if a project is provided.
      fuzzers_list = (
          data_handler.get_all_fuzzer_names_including_children(
              include_parents=True, project=project))
      jobs_list = data_handler.get_all_job_type_names(project=project)
    else:
      # User is an external user of ClusterFuzz (eg: non-Chrome dev who
      # submitted a fuzzer or someone with a project in OSS-Fuzz).
      user_email = helpers.get_user_email()

      # TODO(aarya): Filter fuzzer and job if |project| is provided.
      fuzzers_list = sorted(
          external_users.allowed_fuzzers_for_user(
              user_email, include_from_jobs=True, include_parents=True))
      if not fuzzers_list:
        # User doesn't actually have access to any fuzzers.
        raise helpers.AccessDeniedException(
            "You don't have access to any fuzzers.")

      jobs_list = sorted(external_users.allowed_jobs_for_user(user_email))
      projects_list = sorted(
          {data_handler.get_project_name(job) for job in jobs_list})

    result = {
        'projects': projects_list,
        'fuzzers': fuzzers_list,
        'jobs': jobs_list,
    }
    return self.render_json(result)
Пример #10
0
 def test_get_from_default(self):
   """Test getting from local config."""
   self.assertEqual('test-project', data_handler.get_project_name('job'))
def _process_corpus_crashes(context, result):
    """Process crashes found in the corpus."""
    # Default Testcase entity values.
    crash_revision = result.revision
    job_type = environment.get_value('JOB_NAME')
    minimized_arguments = '%TESTCASE% ' + context.fuzz_target.binary
    project_name = data_handler.get_project_name(job_type)

    comment = 'Fuzzer %s generated corpus testcase crashed (r%s)' % (
        context.fuzz_target.project_qualified_name(), crash_revision)

    # Generate crash reports.
    for crash in result.crashes:
        existing_testcase = data_handler.find_testcase(project_name,
                                                       crash.crash_type,
                                                       crash.crash_state,
                                                       crash.security_flag)
        if existing_testcase:
            continue

        # Upload/store testcase.
        if environment.is_trusted_host():
            from clusterfuzz._internal.bot.untrusted_runner import file_host
            unit_path = os.path.join(context.bad_units_path,
                                     os.path.basename(crash.unit_path))
            # Prevent the worker from escaping out of |context.bad_units_path|.
            if not file_host.is_directory_parent(unit_path,
                                                 context.bad_units_path):
                raise CorpusPruningException('Invalid units path from worker.')

            file_host.copy_file_from_worker(crash.unit_path, unit_path)
        else:
            unit_path = crash.unit_path

        with open(unit_path, 'rb') as f:
            key = blobs.write_blob(f)

        # Set the absolute_path property of the Testcase to a file in FUZZ_INPUTS
        # instead of the local quarantine directory.
        absolute_testcase_path = os.path.join(
            environment.get_value('FUZZ_INPUTS'), 'testcase')

        testcase_id = data_handler.store_testcase(
            crash=crash,
            fuzzed_keys=key,
            minimized_keys='',
            regression='',
            fixed='',
            one_time_crasher_flag=False,
            crash_revision=crash_revision,
            comment=comment,
            absolute_path=absolute_testcase_path,
            fuzzer_name=context.fuzz_target.engine,
            fully_qualified_fuzzer_name=context.fuzz_target.
            fully_qualified_name(),
            job_type=job_type,
            archived=False,
            archive_filename='',
            binary_flag=True,
            http_flag=False,
            gestures=None,
            redzone=DEFAULT_REDZONE,
            disable_ubsan=False,
            minidump_keys=None,
            window_argument=None,
            timeout_multiplier=1.0,
            minimized_arguments=minimized_arguments)

        # Set fuzzer_binary_name in testcase metadata.
        testcase = data_handler.get_testcase_by_id(testcase_id)
        testcase.set_metadata('fuzzer_binary_name', result.fuzzer_binary_name)

        issue_metadata = engine_common.get_all_issue_metadata_for_testcase(
            testcase)
        if issue_metadata:
            for key, value in issue_metadata.items():
                testcase.set_metadata(key, value, update_testcase=False)

            testcase.put()

        # Create additional tasks for testcase (starting with minimization).
        testcase = data_handler.get_testcase_by_id(testcase_id)
        task_creation.create_tasks(testcase)
Пример #12
0
def get_component_revisions_dict(revision, job_type, platform_id=None):
    """Retrieve revision vars dict."""
    if revision == 0 or revision == '0' or revision is None:
        # Return empty dict for zero start revision.
        return {}

    revision_vars_url_format = _get_revision_vars_url_format(
        job_type, platform_id=platform_id)
    if not revision_vars_url_format:
        return None

    project_name = data_handler.get_project_name(job_type)
    revisions_dict = {}

    if utils.is_chromium():
        component = data_handler.get_component_name(job_type)
        repository = data_handler.get_repository_for_component(component)
        if repository and not _is_clank(revision_vars_url_format):
            revision_hash = _git_commit_position_to_git_hash_for_chromium(
                revision, repository)
            if revision_hash is None:
                return None

            # FIXME: While we check for this explicitly appended component in all
            # applicable cases that we know of within this codebase, if the dict
            # is shared with an external service (e.g. Predator) we may need to clean
            # this up beforehand.
            revisions_dict['/src'] = {
                'name': _get_component_display_name(component, project_name),
                'url': _git_url_for_chromium_repository(repository),
                'rev': revision_hash,
                'commit_pos': revision
            }

            # Use revision hash for info url later.
            revision = revision_hash

    revision_vars_url = revision_vars_url_format % revision
    url_content = _get_url_content(revision_vars_url)
    if not url_content:
        logs.log_error('Failed to get component revisions from %s.' %
                       revision_vars_url)
        return None

    # Parse as per DEPS format.
    if _is_deps(revision_vars_url):
        deps_revisions_dict = deps_to_revisions_dict(url_content)
        if not deps_revisions_dict:
            return None

        revisions_dict.update(deps_revisions_dict)
        return revisions_dict

    # Parse as per Clank DEPS format.
    if _is_clank(revision_vars_url):
        return _clank_revision_file_to_revisions_dict(url_content)

    # Default case: parse content as yaml.
    revisions_dict = _to_dict(url_content)
    if not revisions_dict:
        logs.log_error('Failed to parse component revisions from %s.' %
                       revision_vars_url)
        return None

    # Parse as per source map format.
    if revision_vars_url.endswith(SOURCE_MAP_EXTENSION):
        revisions_dict = _src_map_to_revisions_dict(revisions_dict,
                                                    project_name)

    return revisions_dict