Пример #1
0
def _make_bisection_request(pubsub_topic, testcase, target, bisect_type):
    """Make a bisection request to the external bisection service. Returns whether
  or not a request was actually made."""
    if bisect_type == 'fixed':
        old_commit, new_commit = _get_commits(testcase.fixed,
                                              testcase.job_type)
    elif bisect_type == 'regressed':
        old_commit, new_commit = _get_commits(testcase.regression,
                                              testcase.job_type)
    else:
        raise ValueError('Invalid bisection type: ' + bisect_type)

    if not new_commit:
        # old_commit can be empty (i.e. '0' case), but new_commit should never be.
        return False

    old_commit, new_commit = _check_commits(testcase, bisect_type, old_commit,
                                            new_commit)

    repo_url = data_handler.get_main_repo(testcase.job_type) or ''
    reproducer = blobs.read_key(testcase.minimized_keys
                                or testcase.fuzzed_keys)
    pubsub_client = pubsub.PubSubClient()
    pubsub_client.publish(pubsub_topic, [
        pubsub.Message(
            reproducer, {
                'type':
                bisect_type,
                'project_name':
                target.project,
                'sanitizer':
                environment.SANITIZER_NAME_MAP[
                    environment.get_memory_tool_name(testcase.job_type)],
                'fuzz_target':
                target.binary,
                'old_commit':
                old_commit,
                'new_commit':
                new_commit,
                'testcase_id':
                str(testcase.key.id()),
                'issue_id':
                testcase.bug_information,
                'crash_type':
                testcase.crash_type,
                'crash_state':
                testcase.crash_state,
                'security':
                str(testcase.security_flag),
                'severity':
                severity_analyzer.severity_to_string(
                    testcase.security_severity),
                'timestamp':
                testcase.timestamp.isoformat(),
                'repo_url':
                repo_url,
            })
    ])
    return True
Пример #2
0
    def get_file_handle(self):
        """Return file handle to metadata contents. Prefer to use blobstore key if
       available, otherwise raw contents."""
        if self.key:
            contents = blobs.read_key(self.key)
        elif self.contents:
            contents = self.contents
        else:
            # No bot-independent file for which to get a file handle. Let the caller
            # handle any errors.
            return None

        metadata_file = tempfile.TemporaryFile()
        metadata_file.write(contents)
        metadata_file.seek(0)
        return metadata_file
Пример #3
0
def add_external_task(command, testcase_id, job):
  """Add external task."""
  if command != 'progression':
    # Only progression is supported.
    return

  pubsub_client = pubsub.PubSubClient()
  topic_name = job.external_reproduction_topic
  assert topic_name is not None

  testcase = data_handler.get_testcase_by_id(testcase_id)
  fuzz_target = testcase.get_fuzz_target()

  memory_tool_name = environment.get_memory_tool_name(job.name)
  sanitizer = environment.SANITIZER_NAME_MAP.get(memory_tool_name)
  job_environment = job.get_environment()
  if job_environment.get('CUSTOM_BINARY'):
    raise RuntimeError('External jobs should never have custom binaries.')

  build_path = (
      job_environment.get('RELEASE_BUILD_BUCKET_PATH') or
      job_environment.get('FUZZ_TARGET_BUILD_BUCKET_PATH'))
  if build_path is None:
    raise RuntimeError(f'{job.name} has no build path defined.')

  min_revision = (
      testcase.get_metadata('last_tested_revision') or testcase.crash_revision)

  logs.log(f'Publishing external reproduction task for {testcase_id}.')
  attributes = {
      'project': job.project,
      'target': fuzz_target.binary,
      'fuzzer': testcase.fuzzer_name,
      'sanitizer': sanitizer,
      'job': job.name,
      'testcaseId': str(testcase_id),
      'buildPath': build_path,
      'minRevisionAbove': str(min_revision),
      'numTrials': str(_NUM_TRIALS),
  }

  reproducer = blobs.read_key(testcase.fuzzed_keys)
  message = pubsub.Message(data=reproducer, attributes=attributes)
  pubsub_client.publish(topic_name, [message])
Пример #4
0
  def get(self):
    """Get the HTML page."""
    key = request.get('key')
    if not key:
      raise helpers.EarlyExitException('No key provided.', 400)

    testcase_id = request.get('testcase_id')
    if testcase_id:
      testcase = helpers.get_testcase(testcase_id)
      if not access.can_user_access_testcase(testcase):
        raise helpers.AccessDeniedException()

      if key not in [testcase.fuzzed_keys, testcase.minimized_keys]:
        raise helpers.AccessDeniedException()
    else:
      if not access.has_access():
        raise helpers.AccessDeniedException()

    blob_size = blobs.get_blob_size(key)
    if blob_size > MAX_ALLOWED_CONTENT_SIZE:
      raise helpers.EarlyExitException('Content exceeds max allowed size.', 400)

    # TODO(mbarbella): Workaround for an issue in the Cloud Storage API. Remove
    # once it is fixed properly upstream:
    # https://github.com/googleapis/google-cloud-python/issues/6572
    if blob_size:
      try:
        content = blobs.read_key(key).decode('utf-8', errors='replace')
      except Exception:
        raise helpers.EarlyExitException('Failed to read content.', 400)
    else:
      content = u''

    line_count = len(content.splitlines())
    size = len(content)
    title = '%s, %s' % (utils.get_line_count_string(line_count),
                        utils.get_size_string(size))

    return self.render('viewer.html', {'content': content, 'title': title})