예제 #1
0
def _connect():
    """Initial connect to the worker."""
    worker_assignment = _get_host_worker_assignment()
    if worker_assignment is None:
        raise AssertionError
    if worker_assignment.worker_name is None:
        raise AssertionError
    if worker_assignment.project_name is None:
        raise AssertionError

    root_cert = _get_root_cert(worker_assignment.project_name)
    if not root_cert:
        logs.log_warn("TLS certs not yet generated.")
        time.sleep(WAIT_TLS_CERT_SECONDS)
        sys.exit(0)

    environment.set_value(
        "QUEUE_OVERRIDE",
        untrusted.platform_name(worker_assignment.project_name, "linux"),
    )

    server_name = worker_assignment.worker_name
    if not environment.get_value("LOCAL_DEVELOPMENT"):
        server_name += untrusted.internal_network_domain()

    _host_state.worker_bot_name = worker_assignment.worker_name

    credentials = grpc.ssl_channel_credentials(root_cert)
    _host_state.channel = grpc.secure_channel(
        "%s:%d" % (server_name, config.PORT),
        credentials=credentials,
        options=config.GRPC_OPTIONS,
    )
    _host_state.stub = UntrustedRunnerStub(_host_state.channel)

    logs.log("Connecting to worker %s..." % server_name)
    _host_state.channel.subscribe(_channel_connectivity_changed,
                                  try_to_connect=True)

    channel_state = _check_channel_state(
        config.INITIAL_CONNECT_TIMEOUT_SECONDS)
    if channel_state == ChannelState.INCONSISTENT:
        logs.log_warn("Worker inconsistent on initial connect.")
        monitoring_metrics.HOST_INCONSISTENT_COUNT.increment()
        host_exit_no_return(return_code=0)

    if channel_state != ChannelState.READY:
        raise untrusted.HostException("Failed to connect to worker.")

    environment.set_value("WORKER_BOT_NAME", worker_assignment.worker_name)

    _host_state.heartbeat_thread = threading.Thread(target=_do_heartbeat)
    _host_state.heartbeat_thread.daemon = True
    _host_state.heartbeat_thread.start()
예제 #2
0
def sync_cf_job(project, info, corpus_bucket, quarantine_bucket, logs_bucket,
                backup_bucket, libfuzzer, afl):
  """Sync the config with ClusterFuzz."""
  # Create/update ClusterFuzz jobs.
  for template in get_jobs_for_project(project, info):
    if template.engine == 'libfuzzer':
      fuzzer_entity = libfuzzer
    elif template.engine == 'afl':
      fuzzer_entity = afl
    elif template.engine == 'none':
      # Engine-less jobs are not automatically managed.
      continue
    else:
      raise OssFuzzSetupException('Invalid fuzzing engine.')

    job_name = template.job_name(project)
    job = data_types.Job.query(data_types.Job.name == job_name).get()
    if not job:
      job = data_types.Job()

    if job_name not in fuzzer_entity.jobs and not info.get('disabled', False):
      # Enable new job.
      fuzzer_entity.jobs.append(job_name)

    job.name = job_name
    job.platform = untrusted.platform_name(project, 'linux')
    job.templates = template.cf_job_templates

    revision_vars_url = REVISION_URL.format(
        project=project,
        bucket=_get_build_bucket_for_engine(template.engine),
        sanitizer=template.memory_tool)

    job.environment_string = JOB_TEMPLATE.format(
        build_bucket_path=get_build_bucket_path(project, template.engine,
                                                template.memory_tool),
        logs_bucket=logs_bucket,
        corpus_bucket=corpus_bucket,
        quarantine_bucket=quarantine_bucket,
        backup_bucket=backup_bucket,
        engine=template.engine,
        project=project,
        revision_vars_url=revision_vars_url)

    help_url = info.get('help_url')
    if help_url:
      job.environment_string += 'HELP_URL = %s\n' % help_url

    if template.experimental:
      job.environment_string += 'EXPERIMENTAL = True\n'

    if template.minimize_job_override:
      minimize_job_override = template.minimize_job_override.job_name(project)
      job.environment_string += (
          'MINIMIZE_JOB_OVERRIDE = %s\n' % minimize_job_override)

    view_restrictions = info.get('view_restrictions')
    if view_restrictions:
      if view_restrictions in ALLOWED_VIEW_RESTRICTIONS:
        job.environment_string += (
            'ISSUE_VIEW_RESTRICTIONS = %s\n' % view_restrictions)
      else:
        logs.log_error('Invalid view restriction setting %s for project %s.' %
                       (view_restrictions, project))

    selective_unpack = info.get('selective_unpack')
    if selective_unpack:
      job.environment_string += 'UNPACK_ALL_FUZZ_TARGETS_AND_FILES = False\n'

    job.put()
예제 #3
0
  def _sync_job(self, project, info, corpus_bucket_name, quarantine_bucket_name,
                logs_bucket_name, backup_bucket_name):
    """Sync the config with ClusterFuzz."""
    # Create/update ClusterFuzz jobs.
    for template in get_jobs_for_project(project, info):
      if template.engine == 'none':
        # Engine-less jobs are not automatically managed.
        continue

      fuzzer_entity = self._fuzzer_entities.get(template.engine)
      if not fuzzer_entity:
        raise ProjectSetupError('Invalid fuzzing engine ' + template.engine)

      job_name = template.job_name(project, self._config_suffix)
      job = data_types.Job.query(data_types.Job.name == job_name).get()
      if not job:
        job = data_types.Job()

      if job_name not in fuzzer_entity.jobs and not info.get('disabled', False):
        # Enable new job.
        fuzzer_entity.jobs.append(job_name)

      job.name = job_name
      if self._segregate_projects:
        job.platform = untrusted.platform_name(project, 'linux')
      else:
        # TODO(ochang): Support other platforms?
        job.platform = 'LINUX'

      job.templates = template.cf_job_templates

      job.environment_string = JOB_TEMPLATE.format(
          build_type=self._build_type,
          build_bucket_path=self._get_build_bucket_path(
              project, info, template.engine, template.memory_tool,
              template.architecture),
          engine=template.engine,
          project=project)

      if self._add_revision_mappings:
        revision_vars_url = self._revision_url_template.format(
            project=project,
            bucket=self._get_build_bucket(template.engine,
                                          template.architecture),
            sanitizer=template.memory_tool)

        job.environment_string += (
            'REVISION_VARS_URL = {revision_vars_url}\n'.format(
                revision_vars_url=revision_vars_url))

      if logs_bucket_name:
        job.environment_string += 'FUZZ_LOGS_BUCKET = {logs_bucket}\n'.format(
            logs_bucket=logs_bucket_name)

      if corpus_bucket_name:
        job.environment_string += 'CORPUS_BUCKET = {corpus_bucket}\n'.format(
            corpus_bucket=corpus_bucket_name)

      if quarantine_bucket_name:
        job.environment_string += (
            'QUARANTINE_BUCKET = {quarantine_bucket}\n'.format(
                quarantine_bucket=quarantine_bucket_name))

      if backup_bucket_name:
        job.environment_string += 'BACKUP_BUCKET = {backup_bucket}\n'.format(
            backup_bucket=backup_bucket_name)

      if self._add_info_labels:
        job.environment_string += (
            'AUTOMATIC_LABELS = Proj-{project},Engine-{engine}\n'.format(
                project=project,
                engine=template.engine,
            ))

      help_url = info.get('help_url')
      if help_url:
        job.environment_string += 'HELP_URL = %s\n' % help_url

      if template.experimental:
        job.environment_string += 'EXPERIMENTAL = True\n'

      if template.minimize_job_override:
        minimize_job_override = template.minimize_job_override.job_name(
            project, self._config_suffix)
        job.environment_string += (
            'MINIMIZE_JOB_OVERRIDE = %s\n' % minimize_job_override)

      view_restrictions = info.get('view_restrictions')
      if view_restrictions:
        if view_restrictions in ALLOWED_VIEW_RESTRICTIONS:
          job.environment_string += (
              'ISSUE_VIEW_RESTRICTIONS = %s\n' % view_restrictions)
        else:
          logs.log_error('Invalid view restriction setting %s for project %s.' %
                         (view_restrictions, project))

      selective_unpack = info.get('selective_unpack')
      if selective_unpack:
        job.environment_string += 'UNPACK_ALL_FUZZ_TARGETS_AND_FILES = False\n'

      main_repo = info.get('main_repo')
      if main_repo:
        job.environment_string += f'MAIN_REPO = {main_repo}\n'

      if (template.engine == 'libfuzzer' and
          template.architecture == 'x86_64' and
          'dataflow' in info.get('fuzzing_engines', DEFAULT_ENGINES)):
        # Dataflow binaries are built with dataflow sanitizer, but can be used
        # as an auxiliary build with libFuzzer builds (e.g. with ASan or UBSan).
        dataflow_build_bucket_path = self._get_build_bucket_path(
            project_name=project,
            info=info,
            engine='dataflow',
            memory_tool='dataflow',
            architecture=template.architecture)
        job.environment_string += (
            'DATAFLOW_BUILD_BUCKET_PATH = %s\n' % dataflow_build_bucket_path)

      if self._additional_vars:
        additional_vars = {}
        additional_vars.update(self._additional_vars.get('all', {}))

        engine_vars = self._additional_vars.get(template.engine, {})
        engine_sanitizer_vars = engine_vars.get(template.memory_tool, {})
        additional_vars.update(engine_sanitizer_vars)

        for key, value in sorted(six.iteritems(additional_vars)):
          job.environment_string += ('{} = {}\n'.format(
              key,
              str(value).encode('unicode-escape').decode('utf-8')))

      job.put()
예제 #4
0
    def _sync_job(
        self,
        project,
        info,
        corpus_bucket_name,
        quarantine_bucket_name,
        logs_bucket_name,
        backup_bucket_name,
    ):
        """Sync the config with ClusterFuzz."""
        # Create/update ClusterFuzz jobs.
        for template in get_jobs_for_project(project, info):
            if template.engine == "none":
                # Engine-less jobs are not automatically managed.
                continue

            fuzzer_entity = self._fuzzer_entities.get(template.engine)
            if not fuzzer_entity:
                raise ProjectSetupError("Invalid fuzzing engine " +
                                        template.engine)

            job_name = template.job_name(project)
            job = data_types.Job.query(data_types.Job.name == job_name).get()
            if not job:
                job = data_types.Job()

            if job_name not in fuzzer_entity.jobs and not info.get(
                    "disabled", False):
                # Enable new job.
                fuzzer_entity.jobs.append(job_name)

            job.name = job_name
            if self._segregate_projects:
                job.platform = untrusted.platform_name(project, "linux")
            else:
                # TODO(ochang): Support other platforms?
                job.platform = "LINUX"

            job.templates = template.cf_job_templates

            job.environment_string = JOB_TEMPLATE.format(
                build_type=self._build_type,
                build_bucket_path=self._get_build_bucket_path(
                    project,
                    info,
                    template.engine,
                    template.memory_tool,
                    template.architecture,
                ),
                engine=template.engine,
                project=project,
            )

            if self._add_revision_mappings:
                revision_vars_url = self._revision_url_template.format(
                    project=project,
                    bucket=self._get_build_bucket(template.engine,
                                                  template.architecture),
                    sanitizer=template.memory_tool,
                )

                job.environment_string += "REVISION_VARS_URL = {revision_vars_url}\n".format(
                    revision_vars_url=revision_vars_url)

            if logs_bucket_name:
                job.environment_string += "FUZZ_LOGS_BUCKET = {logs_bucket}\n".format(
                    logs_bucket=logs_bucket_name)

            if corpus_bucket_name:
                job.environment_string += "CORPUS_BUCKET = {corpus_bucket}\n".format(
                    corpus_bucket=corpus_bucket_name)

            if quarantine_bucket_name:
                job.environment_string += "QUARANTINE_BUCKET = {quarantine_bucket}\n".format(
                    quarantine_bucket=quarantine_bucket_name)

            if backup_bucket_name:
                job.environment_string += "BACKUP_BUCKET = {backup_bucket}\n".format(
                    backup_bucket=backup_bucket_name)

            if self._add_info_labels:
                job.environment_string += "AUTOMATIC_LABELS = Proj-{project},Engine-{engine}\n".format(
                    project=project, engine=template.engine)

            help_url = info.get("help_url")
            if help_url:
                job.environment_string += "HELP_URL = %s\n" % help_url

            if template.experimental:
                job.environment_string += "EXPERIMENTAL = True\n"

            if template.minimize_job_override:
                minimize_job_override = template.minimize_job_override.job_name(
                    project)
                job.environment_string += ("MINIMIZE_JOB_OVERRIDE = %s\n" %
                                           minimize_job_override)

            view_restrictions = info.get("view_restrictions")
            if view_restrictions:
                if view_restrictions in ALLOWED_VIEW_RESTRICTIONS:
                    job.environment_string += (
                        "ISSUE_VIEW_RESTRICTIONS = %s\n" % view_restrictions)
                else:
                    logs.log_error(
                        "Invalid view restriction setting %s for project %s." %
                        (view_restrictions, project))

            selective_unpack = info.get("selective_unpack")
            if selective_unpack:
                job.environment_string += "UNPACK_ALL_FUZZ_TARGETS_AND_FILES = False\n"

            if (template.engine == "libfuzzer"
                    and template.architecture == "x86_64" and "dataflow"
                    in info.get("fuzzing_engines", DEFAULT_ENGINES)):
                # Dataflow binaries are built with dataflow sanitizer, but can be used
                # as an auxiliary build with libFuzzer builds (e.g. with ASan or UBSan).
                dataflow_build_bucket_path = self._get_build_bucket_path(
                    project_name=project,
                    info=info,
                    engine="dataflow",
                    memory_tool="dataflow",
                    architecture=template.architecture,
                )
                job.environment_string += (
                    "DATAFLOW_BUILD_BUCKET_PATH = %s\n" %
                    dataflow_build_bucket_path)

            if self._additional_vars:
                additional_vars = {}
                additional_vars.update(self._additional_vars.get("all", {}))

                engine_vars = self._additional_vars.get(template.engine, {})
                engine_sanitizer_vars = engine_vars.get(
                    template.memory_tool, {})
                additional_vars.update(engine_sanitizer_vars)

                for key, value in sorted(six.iteritems(additional_vars)):
                    job.environment_string += "{} = {}\n".format(
                        key,
                        str(value).encode("unicode-escape").decode("utf-8"))

            job.put()