Esempio n. 1
0
  def _read_to_bytesio(self, gcs_path):
    """Return a bytesio representing a GCS object."""
    data = storage.read_data(gcs_path)
    if not data:
      raise helpers.EarlyExitException('Failed to read uploaded archive.', 500)

    return io.BytesIO(data)
Esempio n. 2
0
def _read_to_stringio(gcs_path):
    """Return a StringIO representing a GCS object."""
    data = storage.read_data(gcs_path)
    if not data:
        raise helpers.EarlyExitException('Failed to read uploaded archive.')

    return StringIO.StringIO(data)
def upload_testcases_if_needed(fuzzer_name, testcase_list, testcase_directory):
    """Upload test cases from the list to a cloud storage bucket."""
    bucket_name = local_config.ProjectConfig().get(
        'coverage.fuzzer-testcases.bucket')
    if not bucket_name:
        return

    # Only consider test cases in the output directory. We might upload too much
    # if we search the data directory as well, or have missing resources.
    # TODO(mbarbella): Support resources in data bundles.
    testcase_list = [
        os.path.relpath(testcase, testcase_directory)
        for testcase in testcase_list
        if testcase.startswith(testcase_directory)
    ]
    if not testcase_list:
        return

    # Bail out if this batch of test cases is too large.
    directory_size = shell.get_directory_size(testcase_directory)
    if directory_size >= MAX_TESTCASE_DIRECTORY_SIZE:
        return

    formatted_date = str(utils.utcnow().date())
    gcs_base_url = 'gs://{bucket_name}/{date}/{fuzzer_name}/'.format(
        bucket_name=bucket_name, date=formatted_date, fuzzer_name=fuzzer_name)

    runner = gsutil.GSUtilRunner()
    batch_directory_blobs = storage.list_blobs(gcs_base_url)
    total_testcases = 0
    for blob in batch_directory_blobs:
        if not blob.endswith(LIST_FILE_BASENAME):
            continue

        list_gcs_url = 'gs://{bucket}/{blob}'.format(bucket=bucket_name,
                                                     blob=blob)
        data = storage.read_data(list_gcs_url)
        if not data:
            logs.log_error(
                'Read no data from test case list at {gcs_url}'.format(
                    gcs_url=list_gcs_url))
            continue

        total_testcases += len(data.splitlines())

        # If we've already uploaded enough test cases for this fuzzer today, return.
        if total_testcases >= TESTCASES_PER_DAY:
            return

    # Upload each batch of tests to its own unique sub-bucket.
    identifier = environment.get_value('BOT_NAME') + str(utils.utcnow())
    gcs_base_url += utils.string_hash(identifier)

    list_gcs_url = gcs_base_url + '/' + LIST_FILE_BASENAME
    if not storage.write_data('\n'.join(testcase_list), list_gcs_url):
        return

    runner.rsync(testcase_directory, gcs_base_url)
    logs.log('Synced {count} test cases to {gcs_url}'.format(
        count=len(testcase_list), gcs_url=gcs_base_url))
Esempio n. 4
0
    def _compare_and_swap_gcs_dictionary(self, old_content, new_content):
        """Compare and swap implementation for dictionary stored in GCS. Of course,
    this function is not atomic, but window for race is acceptably small."""
        current_content = storage.read_data(self.gcs_path)
        if current_content != old_content:
            return False, current_content

        storage.write_data(new_content, self.gcs_path)
        return True, old_content
def _read_json(url):
    """Returns a JSON obejct loaded from the given GCS url."""
    data = storage.read_data(url)

    result = None
    try:
        result = json.loads(data)
    except Exception as e:
        logs.log_warn('Empty or malformed code coverage JSON (%s): %s.' %
                      (url, str(e)))

    return result
Esempio n. 6
0
def _get_url_content(url):
    """Read a potentially base64-encoded resource from the given URL."""
    if url.startswith(storage.GS_PREFIX):
        # Fetch a GCS path with authentication.
        url_content = storage.read_data(url)
    else:
        # Fetch a regular url without authentication.
        url_content = utils.fetch_url(url)

        # Urls on googlesource.com return file data as base64 encoded to avoid
        # cross-site scripting attacks. If the requested url contains |format=text|,
        # then the output is base64 encoded. So, decode it first.
        if url_content and url.endswith('format=text'):
            url_content = base64.b64decode(url_content)

    return url_content
    def update_recommended_dictionary(self, new_dictionary):
        """Update recommended dictionary stored in GCS with new dictionary elements.

    Args:
      new_dictionary: A set of dictionary elements to be added into dictionary.

    Returns:
      A number of new elements actually added to the dictionary stored in GCS.
    """
        if environment.is_lib():
            return 0

        # If the dictionary does not already exist, then directly update it.
        if not storage.exists(self.gcs_path):
            storage.write_data('\n'.join(new_dictionary).encode('utf-8'),
                               self.gcs_path)
            return len(new_dictionary)

        # Read current version of the dictionary.
        old_dictionary_data = storage.read_data(self.gcs_path).decode('utf-8')

        # Use "Compare-and-swap"-like approach to avoid race conditions and also to
        # avoid having a separate job merging multiple recommended dictionaries.
        succeeded = False
        while not succeeded:
            # If old_dictionary_data is None, there is no dictionary in GCS yet, i.e.
            # it's empty. Otherwise, we parse it and use it.
            old_dictionary = set()
            if old_dictionary_data:
                old_dictionary = set(old_dictionary_data.splitlines())

            # Merge two dictionaries.
            new_dictionary |= old_dictionary
            if new_dictionary == old_dictionary:
                # "New dictionary" elements have been already added to GCS, bail out.
                return 0

            succeeded, old_dictionary_data = self._compare_and_swap_gcs_dictionary(
                old_dictionary_data, '\n'.join(new_dictionary))

        return len(new_dictionary) - len(old_dictionary)
Esempio n. 8
0
def upload_testcases_if_needed(fuzzer_name, testcase_list, testcase_directory,
                               data_directory):
    """Upload test cases from the list to a cloud storage bucket."""
    # Since builtin fuzzers have a coverage minimized corpus, no need to upload
    # test case samples for them.
    if fuzzer_name in builtin_fuzzers.BUILTIN_FUZZERS:
        return

    bucket_name = local_config.ProjectConfig().get(
        'coverage.fuzzer-testcases.bucket')
    if not bucket_name:
        return

    files_list = []
    has_testcases_in_testcase_directory = False
    has_testcases_in_data_directory = False
    for testcase_path in testcase_list:
        if testcase_path.startswith(testcase_directory):
            files_list.append(
                os.path.relpath(testcase_path, testcase_directory))
            has_testcases_in_testcase_directory = True
        elif testcase_path.startswith(data_directory):
            files_list.append(os.path.relpath(testcase_path, data_directory))
            has_testcases_in_data_directory = True
    if not files_list:
        return

    formatted_date = str(utils.utcnow().date())
    gcs_base_url = 'gs://{bucket_name}/{date}/{fuzzer_name}/'.format(
        bucket_name=bucket_name, date=formatted_date, fuzzer_name=fuzzer_name)

    runner = gsutil.GSUtilRunner()
    batch_directory_blobs = storage.list_blobs(gcs_base_url)
    total_testcases = 0
    for blob in batch_directory_blobs:
        if not blob.endswith(LIST_FILE_BASENAME):
            continue

        list_gcs_url = 'gs://{bucket}/{blob}'.format(bucket=bucket_name,
                                                     blob=blob)
        data = storage.read_data(list_gcs_url)
        if not data:
            logs.log_error(
                'Read no data from test case list at {gcs_url}'.format(
                    gcs_url=list_gcs_url))
            continue

        total_testcases += len(data.splitlines())

        # If we've already uploaded enough test cases for this fuzzer today, return.
        if total_testcases >= TESTCASES_PER_DAY:
            return

    # Cap the number of files.
    testcases_limit = min(len(files_list), TESTCASES_PER_DAY - total_testcases)
    files_list = files_list[:testcases_limit]

    # Upload each batch of tests to its own unique sub-bucket.
    identifier = environment.get_value('BOT_NAME') + str(utils.utcnow())
    gcs_base_url += utils.string_hash(identifier)

    list_gcs_url = gcs_base_url + '/' + LIST_FILE_BASENAME
    if not storage.write_data('\n'.join(files_list), list_gcs_url):
        return

    if has_testcases_in_testcase_directory:
        # Sync everything in |testcase_directory| since it is fuzzer-generated.
        runner.rsync(testcase_directory, gcs_base_url)

    if has_testcases_in_data_directory:
        # Sync all fuzzer generated testcase in data bundle directory.
        runner.rsync(data_directory,
                     gcs_base_url,
                     exclusion_pattern=('(?!.*{fuzz_prefix})'.format(
                         fuzz_prefix=testcase_manager.FUZZ_PREFIX)))

        # Sync all possible resource dependencies as a best effort. It matches
        # |resources-| prefix that a fuzzer can use to indicate resources. Also, it
        # matches resources directory that Chromium web_tests use for dependencies.
        runner.rsync(data_directory,
                     gcs_base_url,
                     exclusion_pattern='(?!.*resource)')

    logs.log('Synced {count} test cases to {gcs_url}.'.format(
        count=len(files_list), gcs_url=gcs_base_url))
Esempio n. 9
0
def get_remote_source_revision(source_manifest_url):
  """Get remote revision. We refactor this method out, so that we can mock
    it."""
  return storage.read_data(source_manifest_url).strip()
Esempio n. 10
0
def get_projects_from_gcs(gcs_url):
  """Get projects from GCS path."""
  data = json.loads(storage.read_data(gcs_url))
  return [(project['name'], project) for project in data['projects']]
Esempio n. 11
0
def _read_json(url):
  """Returns a JSON obejct loaded from the given GCS url."""
  data = storage.read_data(url)
  return json.loads(data)