Ejemplo n.º 1
0
def _process_project(project, bucket):
    """Collects coverage information for all fuzz targets in the given project and
  the total stats for the project."""
    project_name = _basename(project)
    logs.log('Processing coverage for %s project.' % project_name)
    report_path = storage.get_cloud_storage_file_path(bucket, project)
    report_info = _read_json(report_path)
    if not report_info:
        logs.log_warn('Skipping code coverage for %s project.' % project_name)
        return

    # Iterate through report_info['fuzzer_stats_dir'] and prepare
    # CoverageInformation entities for invididual fuzz targets.
    entities = []
    for fuzzer in storage.list_blobs(report_info['fuzzer_stats_dir'],
                                     recursive=False):
        entities.append(
            _process_fuzzer_stats(fuzzer, report_info, project_name, bucket))

    logs.log('Processed coverage for %d targets in %s project.' %
             (len(entities), project_name))

    # Prepare CoverageInformation entity for the total project stats.
    entities.append(_process_project_stats(report_info, project_name))

    ndb_utils.put_multi(entities)
Ejemplo n.º 2
0
def _process_fuzzer_stats(fuzzer, project_info, project_name, bucket):
    """Processes coverage stats for a single fuzz target."""
    fuzzer_name = data_types.fuzz_target_project_qualified_name(
        project_name, _basename(fuzzer))
    fuzzer_info_path = storage.get_cloud_storage_file_path(bucket, fuzzer)
    logs.log('Processing fuzzer stats for %s (%s).' %
             (fuzzer_name, fuzzer_info_path))
    return _coverage_information(fuzzer_info_path, fuzzer_name, project_info)
Ejemplo n.º 3
0
def _limit_corpus_size(corpus_url, size_limit):
  """Limit number of files in a corpus url."""
  files_list = list(storage.list_blobs(corpus_url))
  corpus_size = len(files_list)

  if corpus_size <= size_limit:
    # Corpus directory size is within limit, no more work to do.
    return

  logs.log(
      'Limit corpus at {corpus_url} from {corpus_size} to {size_limit}.'.format(
          corpus_url=corpus_url, corpus_size=corpus_size,
          size_limit=size_limit))
  files_to_delete = random.sample(files_list, corpus_size - size_limit)
  bucket, _ = storage.get_bucket_name_and_path(corpus_url)
  for file_to_delete in files_to_delete:
    path_to_delete = storage.get_cloud_storage_file_path(bucket, file_to_delete)
    storage.delete(path_to_delete)
Ejemplo n.º 4
0
def _limit_corpus_size(corpus_url):
    """Limit number of files and size of a corpus."""
    corpus_count = 0
    corpus_size = 0
    deleted_corpus_count = 0
    bucket, _ = storage.get_bucket_name_and_path(corpus_url)
    for corpus_file in storage.get_blobs(corpus_url):
        corpus_count += 1
        corpus_size += corpus_file['size']
        if (corpus_count > CORPUS_FILES_LIMIT_FOR_FAILURES
                or corpus_size > CORPUS_SIZE_LIMIT_FOR_FAILURES):
            path_to_delete = storage.get_cloud_storage_file_path(
                bucket, corpus_file['name'])
            storage.delete(path_to_delete)
            deleted_corpus_count += 1

    if deleted_corpus_count:
        logs.log('Removed %d files from oversized corpus: %s.' %
                 (deleted_corpus_count, corpus_url))
Ejemplo n.º 5
0
def upload_testcases_if_needed(fuzzer_name, testcase_list, testcase_directory,
                               data_directory):
    """Upload test cases from the list to a cloud storage bucket."""
    # Since builtin fuzzers have a coverage minimized corpus, no need to upload
    # test case samples for them.
    if fuzzer_name in builtin_fuzzers.BUILTIN_FUZZERS:
        return

    bucket_name = local_config.ProjectConfig().get(
        'coverage.fuzzer-testcases.bucket')
    if not bucket_name:
        return

    files_list = []
    has_testcases_in_testcase_directory = False
    has_testcases_in_data_directory = False
    for testcase_path in testcase_list:
        if testcase_path.startswith(testcase_directory):
            files_list.append(
                os.path.relpath(testcase_path, testcase_directory))
            has_testcases_in_testcase_directory = True
        elif testcase_path.startswith(data_directory):
            files_list.append(os.path.relpath(testcase_path, data_directory))
            has_testcases_in_data_directory = True
    if not files_list:
        return

    formatted_date = str(utils.utcnow().date())
    gcs_base_url = 'gs://{bucket_name}/{date}/{fuzzer_name}/'.format(
        bucket_name=bucket_name, date=formatted_date, fuzzer_name=fuzzer_name)

    runner = gsutil.GSUtilRunner()
    batch_directory_blobs = storage.list_blobs(gcs_base_url)
    total_testcases = 0
    for blob in batch_directory_blobs:
        if not blob.endswith(LIST_FILE_BASENAME):
            continue

        list_gcs_url = storage.get_cloud_storage_file_path(bucket_name, blob)
        data = storage.read_data(list_gcs_url)
        if not data:
            logs.log_error(
                'Read no data from test case list at {gcs_url}'.format(
                    gcs_url=list_gcs_url))
            continue

        total_testcases += len(data.splitlines())

        # If we've already uploaded enough test cases for this fuzzer today, return.
        if total_testcases >= TESTCASES_PER_DAY:
            return

    # Cap the number of files.
    testcases_limit = min(len(files_list), TESTCASES_PER_DAY - total_testcases)
    files_list = files_list[:testcases_limit]

    # Upload each batch of tests to its own unique sub-bucket.
    identifier = environment.get_value('BOT_NAME') + str(utils.utcnow())
    gcs_base_url += utils.string_hash(identifier)

    list_gcs_url = gcs_base_url + '/' + LIST_FILE_BASENAME
    if not storage.write_data('\n'.join(files_list), list_gcs_url):
        return

    if has_testcases_in_testcase_directory:
        # Sync everything in |testcase_directory| since it is fuzzer-generated.
        runner.rsync(testcase_directory, gcs_base_url)

    if has_testcases_in_data_directory:
        # Sync all fuzzer generated testcase in data bundle directory.
        runner.rsync(data_directory,
                     gcs_base_url,
                     exclusion_pattern=('(?!.*{fuzz_prefix})'.format(
                         fuzz_prefix=testcase_manager.FUZZ_PREFIX)))

        # Sync all possible resource dependencies as a best effort. It matches
        # |resources-| prefix that a fuzzer can use to indicate resources. Also, it
        # matches resources directory that Chromium web_tests use for dependencies.
        runner.rsync(data_directory,
                     gcs_base_url,
                     exclusion_pattern='(?!.*resource)')

    logs.log('Synced {count} test cases to {gcs_url}.'.format(
        count=len(files_list), gcs_url=gcs_base_url))