コード例 #1
0
def request_bisection(testcase, bisect_type):
    """Request precise bisection."""
    pubsub_topic = local_config.ProjectConfig().get(
        'bisect_service.pubsub_topic')
    if not pubsub_topic:
        return

    target = testcase.get_fuzz_target()
    if not target:
        return

    if bisect_type == 'fixed':
        old_commit, new_commit = _get_commits(testcase.fixed,
                                              testcase.job_type)
    elif bisect_type == 'regressed':
        old_commit, new_commit = _get_commits(testcase.regression,
                                              testcase.job_type)
    else:
        raise ValueError('Invalid bisection type: ' + bisect_type)

    reproducer = blobs.read_key(testcase.minimized_keys
                                or testcase.fuzzed_keys)
    pubsub_client = pubsub.PubSubClient()
    pubsub_client.publish(
        pubsub_topic,
        pubsub.Message(
            reproducer, {
                'type':
                bisect_type,
                'project_name':
                target.project,
                'sanitizer':
                environment.SANITIZER_NAME_MAP[
                    environment.get_memory_tool_name(testcase.job_type)],
                'fuzz_target':
                target.binary,
                'old_commit':
                old_commit,
                'new_commit':
                new_commit,
                'testcase_id':
                testcase.key.id(),
                'issue_id':
                testcase.bug_information,
                'crash_type':
                testcase.crash_type,
                'security':
                str(testcase.security_flag),
            }))
コード例 #2
0
def _get_revision_vars_url_format(job_type):
    """Return REVISION_VARS_URL from job environment if available. Otherwise,
  default to one set in project.yaml. For custom binary jobs, this is not
  applicable."""
    if job_type is None:
        # Force it to use env attribute in project.yaml.
        return local_config.ProjectConfig().get('env.REVISION_VARS_URL')

    custom_binary = data_handler.get_value_from_job_definition(
        job_type, 'CUSTOM_BINARY')
    if utils.string_is_true(custom_binary):
        return None

    return data_handler.get_value_from_job_definition_or_environment(
        job_type, 'REVISION_VARS_URL')
コード例 #3
0
ファイル: monitor.py プロジェクト: whiteHat001/clusterfuzz
def initialize():
    """Initialize if monitoring is enabled for this bot."""
    global _monitoring_v3_client
    global _flusher_thread

    if environment.get_value('LOCAL_DEVELOPMENT'):
        return

    if not local_config.ProjectConfig().get('monitoring.enabled'):
        return

    if check_module_loaded(monitoring_v3):
        _initialize_monitored_resource()
        _monitoring_v3_client = monitoring_v3.MetricServiceClient(
            credentials=credentials.get_default()[0])
        _flusher_thread = _FlusherThread()
        _flusher_thread.start()
コード例 #4
0
def notify_issue_update(testcase, status):
    """Notify that an issue update occurred (i.e. issue was filed or closed)."""
    topic = local_config.ProjectConfig().get('issue_updates.pubsub_topic')
    if not topic:
        return

    pubsub_client = pubsub.PubSubClient()
    pubsub_client.publish(topic, [
        pubsub.Message(
            attributes={
                'crash_address': testcase.crash_address,
                'crash_state': testcase.crash_state,
                'crash_type': testcase.crash_type,
                'issue_id': testcase.bug_information or '',
                'security': str(testcase.security_flag).lower(),
                'status': status,
                'testcase_id': str(testcase.key.id()),
            })
    ])
コード例 #5
0
def ignore_stacktrace(crash_stacktrace):
  """Return whether the stacktrace needs to be ignored."""
  # Filter crash based on search exclude pattern specified in job definition.
  search_excludes = environment.get_value('SEARCH_EXCLUDES')
  if search_excludes and re.search(search_excludes, crash_stacktrace):
    return True

  # Match stacktrace against custom defined blacklist regexes in project config.
  stack_blacklist_regexes = (
      local_config.ProjectConfig().get('stacktrace.stack_blacklist_regexes'))
  if not stack_blacklist_regexes:
    return False

  stack_blacklist_regex = re.compile(
      r'(%s)' % '|'.join(stack_blacklist_regexes))
  for line in crash_stacktrace.splitlines():
    if stack_blacklist_regex.match(line):
      return True
  return False
コード例 #6
0
    def get(self):
        """Handles a GET request."""
        libfuzzer = data_types.Fuzzer.query(
            data_types.Fuzzer.name == 'libFuzzer').get()
        if not libfuzzer:
            logs.log_error('Failed to get libFuzzer Fuzzer entity.')
            return

        afl = data_types.Fuzzer.query(data_types.Fuzzer.name == 'afl').get()
        if not afl:
            logs.log_error('Failed to get AFL Fuzzer entity.')
            return

        bucket_config = local_config.ProjectConfig().sub_config(
            'project_setup.build_buckets')

        if not bucket_config:
            raise ProjectSetupError('Project setup buckets not specified.')

        config = ProjectSetup(BUILD_BUCKET_PATH_TEMPLATE,
                              REVISION_URL,
                              segregate_projects=True,
                              engine_build_buckets={
                                  'libfuzzer':
                                  bucket_config.get('libfuzzer'),
                                  'libfuzzer-i386':
                                  bucket_config.get('libfuzzer_i386'),
                                  'afl':
                                  bucket_config.get('afl'),
                                  'none':
                                  bucket_config.get('no_engine'),
                                  'dataflow':
                                  bucket_config.get('dataflow'),
                              },
                              fuzzer_entities={
                                  'libfuzzer': libfuzzer,
                                  'afl': afl,
                              },
                              add_info_labels=True)

        projects = get_projects()
        config.set_up(projects)
コード例 #7
0
def request_bisection(testcase):
  """Request precise bisection."""
  pubsub_topic = local_config.ProjectConfig().get('bisect_service.pubsub_topic')
  if not pubsub_topic:
    return

  # Only request bisects for reproducible security bugs with a bug filed, found
  # by engine fuzzers.
  if not testcase.security_flag:
    return

  if testcase.one_time_crasher_flag:
    return

  if not testcase.bug_information:
    return

  target = testcase.get_fuzz_target()
  if not target:
    return

  _make_bisection_request(pubsub_topic, testcase, target, 'regressed')
  _make_bisection_request(pubsub_topic, testcase, target, 'fixed')
コード例 #8
0
def request_bisection(testcase_id):
  """Request precise bisection."""
  pubsub_topic = local_config.ProjectConfig().get('bisect_service.pubsub_topic')
  if not pubsub_topic:
    return

  testcase = data_handler.get_testcase_by_id(testcase_id)

  # Only request bisects for reproducible security bugs with a bug filed, found
  # by engine fuzzers.
  if not testcase.security_flag:
    return

  if testcase.fixed == 'NA':
    # Testcase got into an invalid state.
    _notify_bisection_invalid(pubsub_topic, testcase)
    return

  if testcase.one_time_crasher_flag:
    return

  if not testcase.bug_information:
    return

  target = testcase.get_fuzz_target()
  if not target:
    return

  # Only make 1 request of each type per testcase.
  if (not testcase.get_metadata('requested_regressed_bisect') and
      _make_bisection_request(pubsub_topic, testcase, target, 'regressed')):
    testcase.set_metadata('requested_regressed_bisect', True)

  if (not testcase.get_metadata('requested_fixed_bisect') and
      _make_bisection_request(pubsub_topic, testcase, target, 'fixed')):
    testcase.set_metadata('requested_fixed_bisect', True)
コード例 #9
0
def upload_testcases_if_needed(fuzzer_name, testcase_list, testcase_directory,
                               data_directory):
    """Upload test cases from the list to a cloud storage bucket."""
    # Since builtin fuzzers have a coverage minimized corpus, no need to upload
    # test case samples for them.
    if fuzzer_name in builtin_fuzzers.BUILTIN_FUZZERS:
        return

    bucket_name = local_config.ProjectConfig().get(
        'coverage.fuzzer-testcases.bucket')
    if not bucket_name:
        return

    files_list = []
    has_testcases_in_testcase_directory = False
    has_testcases_in_data_directory = False
    for testcase_path in testcase_list:
        if testcase_path.startswith(testcase_directory):
            files_list.append(
                os.path.relpath(testcase_path, testcase_directory))
            has_testcases_in_testcase_directory = True
        elif testcase_path.startswith(data_directory):
            files_list.append(os.path.relpath(testcase_path, data_directory))
            has_testcases_in_data_directory = True
    if not files_list:
        return

    formatted_date = str(utils.utcnow().date())
    gcs_base_url = 'gs://{bucket_name}/{date}/{fuzzer_name}/'.format(
        bucket_name=bucket_name, date=formatted_date, fuzzer_name=fuzzer_name)

    runner = gsutil.GSUtilRunner()
    batch_directory_blobs = storage.list_blobs(gcs_base_url)
    total_testcases = 0
    for blob in batch_directory_blobs:
        if not blob.endswith(LIST_FILE_BASENAME):
            continue

        list_gcs_url = 'gs://{bucket}/{blob}'.format(bucket=bucket_name,
                                                     blob=blob)
        data = storage.read_data(list_gcs_url)
        if not data:
            logs.log_error(
                'Read no data from test case list at {gcs_url}'.format(
                    gcs_url=list_gcs_url))
            continue

        total_testcases += len(data.splitlines())

        # If we've already uploaded enough test cases for this fuzzer today, return.
        if total_testcases >= TESTCASES_PER_DAY:
            return

    # Cap the number of files.
    testcases_limit = min(len(files_list), TESTCASES_PER_DAY - total_testcases)
    files_list = files_list[:testcases_limit]

    # Upload each batch of tests to its own unique sub-bucket.
    identifier = environment.get_value('BOT_NAME') + str(utils.utcnow())
    gcs_base_url += utils.string_hash(identifier)

    list_gcs_url = gcs_base_url + '/' + LIST_FILE_BASENAME
    if not storage.write_data('\n'.join(files_list), list_gcs_url):
        return

    if has_testcases_in_testcase_directory:
        # Sync everything in |testcase_directory| since it is fuzzer-generated.
        runner.rsync(testcase_directory, gcs_base_url)

    if has_testcases_in_data_directory:
        # Sync all fuzzer generated testcase in data bundle directory.
        runner.rsync(data_directory,
                     gcs_base_url,
                     exclusion_pattern=('(?!.*{fuzz_prefix})'.format(
                         fuzz_prefix=testcase_manager.FUZZ_PREFIX)))

        # Sync all possible resource dependencies as a best effort. It matches
        # |resources-| prefix that a fuzzer can use to indicate resources. Also, it
        # matches resources directory that Chromium web_tests use for dependencies.
        runner.rsync(data_directory,
                     gcs_base_url,
                     exclusion_pattern='(?!.*resource)')

    logs.log('Synced {count} test cases to {gcs_url}.'.format(
        count=len(files_list), gcs_url=gcs_base_url))
コード例 #10
0
def bucket_domain_suffix():
  domain = local_config.ProjectConfig().get('bucket_domain_suffix')
  if not domain:
    domain = '%s.appspot.com' % utils.get_application_id()

  return domain
コード例 #11
0
def default_project_name():
    """Return the default project name for this instance of ClusterFuzz."""
    # Do not use |PROJECT_NAME| environment variable as that is the overridden
    # project name from job type and is not the default project name.
    return local_config.ProjectConfig().get('env.PROJECT_NAME')
コード例 #12
0
ファイル: utils.py プロジェクト: wreck1t/clusterfuzz
def default_backup_bucket():
  """Return the default backup bucket for this instance of ClusterFuzz."""
  # Do not use |BACKUP_BUCKET| environment variable as that is the overridden
  # backup bucket from job type and is not the default backup bucket.
  return local_config.ProjectConfig().get('env.BACKUP_BUCKET')
コード例 #13
0
    def get(self):
        """Handles a GET request."""
        libfuzzer = data_types.Fuzzer.query(
            data_types.Fuzzer.name == "libFuzzer").get()
        if not libfuzzer:
            logs.log_error("Failed to get libFuzzer Fuzzer entity.")
            return

        afl = data_types.Fuzzer.query(data_types.Fuzzer.name == "afl").get()
        if not afl:
            logs.log_error("Failed to get AFL Fuzzer entity.")
            return

        honggfuzz = data_types.Fuzzer.query(
            data_types.Fuzzer.name == "honggfuzz").get()
        if not honggfuzz:
            logs.log_error("Failed to get honggfuzz Fuzzer entity.")
            return

        project_setup_config = local_config.ProjectConfig().sub_config(
            "project_setup")
        bucket_config = project_setup_config.sub_config("build_buckets")

        if not bucket_config:
            raise ProjectSetupError("Project setup buckets not specified.")

        config = ProjectSetup(
            BUILD_BUCKET_PATH_TEMPLATE,
            REVISION_URL,
            project_setup_config.get("build_type"),
            segregate_projects=project_setup_config.get("segregate_projects",
                                                        default=False),
            engine_build_buckets={
                "libfuzzer": bucket_config.get("libfuzzer"),
                "libfuzzer-i386": bucket_config.get("libfuzzer_i386"),
                "afl": bucket_config.get("afl"),
                "honggfuzz": bucket_config.get("honggfuzz"),
                "none": bucket_config.get("no_engine"),
                "dataflow": bucket_config.get("dataflow"),
            },
            fuzzer_entities={
                "libfuzzer": libfuzzer,
                "honggfuzz": honggfuzz,
                "afl": afl,
            },
            add_info_labels=project_setup_config.get("add_info_labels",
                                                     default=False),
            add_revision_mappings=project_setup_config.get(
                "add_revision_mappings", default=False),
            additional_vars=project_setup_config.get("additional_vars"),
        )

        projects_source = project_setup_config.get("source")
        if projects_source == "oss-fuzz":
            projects = get_oss_fuzz_projects()
        elif projects_source.startswith(storage.GS_PREFIX):
            projects = get_projects_from_gcs(projects_source)
        else:
            raise ProjectSetupError("Invalid projects source: " +
                                    projects_source)

        if not projects:
            raise ProjectSetupError("Missing projects list.")

        config.set_up(projects)
コード例 #14
0
  def get(self):
    """Handles a GET request."""
    libfuzzer = data_types.Fuzzer.query(
        data_types.Fuzzer.name == 'libFuzzer').get()
    if not libfuzzer:
      logs.log_error('Failed to get libFuzzer Fuzzer entity.')
      return

    afl = data_types.Fuzzer.query(data_types.Fuzzer.name == 'afl').get()
    if not afl:
      logs.log_error('Failed to get AFL Fuzzer entity.')
      return

    honggfuzz = data_types.Fuzzer.query(
        data_types.Fuzzer.name == 'honggfuzz').get()
    if not honggfuzz:
      logs.log_error('Failed to get honggfuzz Fuzzer entity.')
      return

    project_config = local_config.ProjectConfig()
    segregate_projects = project_config.get('segregate_projects')
    project_setup_configs = project_config.get('project_setup')
    project_names = set()

    for setup_config in project_setup_configs:
      bucket_config = setup_config.get('build_buckets')

      if not bucket_config:
        raise ProjectSetupError('Project setup buckets not specified.')

      config = ProjectSetup(
          BUILD_BUCKET_PATH_TEMPLATE,
          REVISION_URL,
          setup_config.get('build_type'),
          config_suffix=setup_config.get('job_suffix', ''),
          segregate_projects=segregate_projects,
          engine_build_buckets={
              'libfuzzer': bucket_config.get('libfuzzer'),
              'libfuzzer-i386': bucket_config.get('libfuzzer_i386'),
              'afl': bucket_config.get('afl'),
              'honggfuzz': bucket_config.get('honggfuzz'),
              'none': bucket_config.get('no_engine'),
              'dataflow': bucket_config.get('dataflow'),
          },
          fuzzer_entities={
              'libfuzzer': libfuzzer,
              'honggfuzz': honggfuzz,
              'afl': afl,
          },
          add_info_labels=setup_config.get('add_info_labels', False),
          add_revision_mappings=setup_config.get('add_revision_mappings',
                                                 False),
          additional_vars=setup_config.get('additional_vars'))

      projects_source = setup_config.get('source')
      if projects_source == 'oss-fuzz':
        projects = get_oss_fuzz_projects()
      elif projects_source.startswith(storage.GS_PREFIX):
        projects = get_projects_from_gcs(projects_source)
      else:
        raise ProjectSetupError('Invalid projects source: ' + projects_source)

      if not projects:
        raise ProjectSetupError('Missing projects list.')

      project_names.update(config.set_up(projects))

    cleanup_stale_projects([libfuzzer, afl, honggfuzz], project_names,
                           segregate_projects)
コード例 #15
0
ファイル: fuzzer_logs.py プロジェクト: zzdxxd/clusterfuzz
def get_bucket():
    """Return path to fuzzer logs bucket."""
    return local_config.ProjectConfig().get('logs.fuzzer.bucket')
コード例 #16
0
def upload_testcases_if_needed(fuzzer_name, testcase_list, testcase_directory):
    """Upload test cases from the list to a cloud storage bucket."""
    # Since builtin fuzzers have a coverage minimized corpus, no need to upload
    # test case samples for them.
    if fuzzer_name in builtin_fuzzers.BUILTIN_FUZZERS:
        return

    bucket_name = local_config.ProjectConfig().get(
        'coverage.fuzzer-testcases.bucket')
    if not bucket_name:
        return

    # Only consider test cases in the output directory. We might upload too much
    # if we search the data directory as well, or have missing resources.
    # TODO(mbarbella): Support resources in data bundles.
    testcase_list = [
        os.path.relpath(testcase, testcase_directory)
        for testcase in testcase_list
        if testcase.startswith(testcase_directory)
    ]
    if not testcase_list:
        return

    # Bail out if this batch of test cases is too large.
    directory_size = shell.get_directory_size(testcase_directory)
    if directory_size >= MAX_TESTCASE_DIRECTORY_SIZE:
        return

    formatted_date = str(utils.utcnow().date())
    gcs_base_url = 'gs://{bucket_name}/{date}/{fuzzer_name}/'.format(
        bucket_name=bucket_name, date=formatted_date, fuzzer_name=fuzzer_name)

    runner = gsutil.GSUtilRunner()
    batch_directory_blobs = storage.list_blobs(gcs_base_url)
    total_testcases = 0
    for blob in batch_directory_blobs:
        if not blob.endswith(LIST_FILE_BASENAME):
            continue

        list_gcs_url = 'gs://{bucket}/{blob}'.format(bucket=bucket_name,
                                                     blob=blob)
        data = storage.read_data(list_gcs_url)
        if not data:
            logs.log_error(
                'Read no data from test case list at {gcs_url}'.format(
                    gcs_url=list_gcs_url))
            continue

        total_testcases += len(data.splitlines())

        # If we've already uploaded enough test cases for this fuzzer today, return.
        if total_testcases >= TESTCASES_PER_DAY:
            return

    # Upload each batch of tests to its own unique sub-bucket.
    identifier = environment.get_value('BOT_NAME') + str(utils.utcnow())
    gcs_base_url += utils.string_hash(identifier)

    list_gcs_url = gcs_base_url + '/' + LIST_FILE_BASENAME
    if not storage.write_data('\n'.join(testcase_list), list_gcs_url):
        return

    runner.rsync(testcase_directory, gcs_base_url)
    logs.log('Synced {count} test cases to {gcs_url}'.format(
        count=len(testcase_list), gcs_url=gcs_base_url))
コード例 #17
0
def get_crash_data(crash_data,
                   symbolize_flag=True,
                   fuzz_target=None,
                   already_symbolized=False,
                   detect_ooms_and_hangs=None):
    """Get crash parameters from crash data.
  Crash parameters include crash type, address, state and stacktrace.
  If the stacktrace is not already symbolized, we will try to symbolize it
  unless |symbolize| flag is set to False. Symbolized stacktrace will contain
  inline frames, but we do exclude them for purposes of crash state generation
  (helps in testcase deduplication)."""
    # Decide whether to symbolize or not symbolize the input stacktrace.
    # Note that Fuchsia logs are always symbolized.
    if symbolize_flag:
        # Defer imports since stack_symbolizer pulls in a lot of things.
        from crash_analysis.stack_parsing import stack_symbolizer
        crash_stacktrace_with_inlines = stack_symbolizer.symbolize_stacktrace(
            crash_data, enable_inline_frames=True)
        crash_stacktrace_without_inlines = stack_symbolizer.symbolize_stacktrace(
            crash_data, enable_inline_frames=False)
    else:
        # We are explicitly indicated to not symbolize using |symbolize_flag|. There
        # is no distinction between inline and non-inline frames for an unsymbolized
        # stacktrace.
        crash_stacktrace_with_inlines = crash_data
        crash_stacktrace_without_inlines = crash_data

    # Additional stack frame ignore regexes.
    custom_stack_frame_ignore_regexes = (local_config.ProjectConfig().get(
        'stacktrace.stack_frame_ignore_regexes', []))

    if environment.get_value('TASK_NAME') == 'analyze':
        detect_v8_runtime_errors = True
    else:
        detect_v8_runtime_errors = environment.get_value(
            'DETECT_V8_RUNTIME_ERRORS', False)

    fuzz_target = fuzz_target or environment.get_value('FUZZ_TARGET')
    redzone_size = environment.get_value('REDZONE')
    if detect_ooms_and_hangs is None:
        detect_ooms_and_hangs = (
            environment.get_value('REPORT_OOMS_AND_HANGS')
            and (not redzone_size
                 or redzone_size <= MAX_REDZONE_SIZE_FOR_OOMS_AND_HANGS))

    include_ubsan = 'halt_on_error=0' not in environment.get_value(
        'UBSAN_OPTIONS', '')

    stack_parser = stacktraces.StackParser(
        symbolized=symbolize_flag or already_symbolized,
        detect_ooms_and_hangs=detect_ooms_and_hangs,
        detect_v8_runtime_errors=detect_v8_runtime_errors,
        custom_stack_frame_ignore_regexes=custom_stack_frame_ignore_regexes,
        fuzz_target=fuzz_target,
        include_ubsan=include_ubsan)

    result = stack_parser.parse(crash_stacktrace_without_inlines)

    # Use stacktrace with inlines for the result.
    if result.crash_stacktrace:
        result.crash_stacktrace = crash_stacktrace_with_inlines

    # Linkify Android stacktrace.
    if environment.is_android() and (result.found_android_kernel_crash
                                     or result.is_kasan):
        linkify_android_stacktrace(result)

    return result
コード例 #18
0
def set_bot_environment():
    """Set environment for the bots."""
    root_dir = get_value('ROOT_DIR')

    if not root_dir:
        # Error, bail out.
        return False

    # Reset our current working directory. Our's last job might
    # have left us in a non-existent temp directory.
    # Or ROOT_DIR might be deleted and recreated.
    os.chdir(root_dir)

    # Set some default directories. These can be overriden by config files below.
    bot_dir = os.path.join(root_dir, 'bot')
    if is_trusted_host(ensure_connected=False):
        worker_root_dir = os.environ['WORKER_ROOT_DIR']
        os.environ['BUILDS_DIR'] = os.path.join(worker_root_dir, 'bot',
                                                'builds')
    else:
        os.environ['BUILDS_DIR'] = os.path.join(bot_dir, 'builds')

    os.environ['BUILD_URLS_DIR'] = os.path.join(bot_dir, 'build-urls')
    os.environ['LOG_DIR'] = os.path.join(bot_dir, 'logs')
    os.environ['CACHE_DIR'] = os.path.join(bot_dir, 'cache')

    inputs_dir = os.path.join(bot_dir, 'inputs')
    os.environ['INPUT_DIR'] = inputs_dir
    os.environ['CRASH_STACKTRACES_DIR'] = os.path.join(inputs_dir,
                                                       'crash-stacks')
    os.environ['FUZZERS_DIR'] = os.path.join(inputs_dir, 'fuzzers')
    os.environ['DATA_BUNDLES_DIR'] = os.path.join(inputs_dir, 'data-bundles')
    os.environ['FUZZ_INPUTS'] = os.path.join(inputs_dir, 'fuzzer-testcases')
    os.environ['FUZZ_INPUTS_MEMORY'] = os.environ['FUZZ_INPUTS']
    os.environ['FUZZ_INPUTS_DISK'] = os.path.join(inputs_dir,
                                                  'fuzzer-testcases-disk')
    os.environ['MUTATOR_PLUGINS_DIR'] = os.path.join(inputs_dir,
                                                     'mutator-plugins')
    os.environ['FUZZ_DATA'] = os.path.join(inputs_dir,
                                           'fuzzer-common-data-bundles')
    os.environ['IMAGES_DIR'] = os.path.join(inputs_dir, 'images')
    os.environ['SYMBOLS_DIR'] = os.path.join(inputs_dir, 'symbols')
    os.environ['USER_PROFILE_ROOT_DIR'] = os.path.join(inputs_dir,
                                                       'user-profile-dirs')

    # Set bot name.
    if not get_value('BOT_NAME'):
        # If not defined, default to host name.
        os.environ['BOT_NAME'] = socket.gethostname().lower()

    # Local temp directory (non-tmpfs).
    local_tmp_dir = os.path.join(bot_dir, 'tmp')

    # Set BOT_TMPDIR if not already set.
    if not get_value('BOT_TMPDIR'):
        os.environ['BOT_TMPDIR'] = local_tmp_dir

    # Add common environment variables needed by Bazel test runner.
    # See https://docs.bazel.build/versions/master/test-encyclopedia.html.
    # NOTE: Do not use a tmpfs folder as some fuzz targets don't work.
    os.environ['TEST_TMPDIR'] = local_tmp_dir
    os.environ['TZ'] = 'UTC'

    # Sets the default configuration. Can be overridden by job environment.
    set_default_vars()

    # Set environment variable from local project configuration.
    from config import local_config
    local_config.ProjectConfig().set_environment()

    # Success.
    return True
コード例 #19
0
ファイル: big_query.py プロジェクト: thurday/clusterfuzz
def get_bucket():
    """Return bucket for bigquery stats."""
    return local_config.ProjectConfig().get('bigquery.bucket')
コード例 #20
0
def set_bot_environment():
    """Set environment for the bots."""
    root_dir = get_value("ROOT_DIR")

    if not root_dir:
        # Error, bail out.
        return False

    # Reset our current working directory. Our's last job might
    # have left us in a non-existent temp directory.
    # Or ROOT_DIR might be deleted and recreated.
    os.chdir(root_dir)

    # Set some default directories. These can be overriden by config files below.
    bot_dir = os.path.join(root_dir, "bot")
    if is_trusted_host(ensure_connected=False):
        worker_root_dir = os.environ["WORKER_ROOT_DIR"]
        os.environ["BUILDS_DIR"] = os.path.join(worker_root_dir, "bot",
                                                "builds")
    else:
        os.environ["BUILDS_DIR"] = os.path.join(bot_dir, "builds")

    os.environ["BUILD_URLS_DIR"] = os.path.join(bot_dir, "build-urls")
    os.environ["LOG_DIR"] = os.path.join(bot_dir, "logs")
    os.environ["CACHE_DIR"] = os.path.join(bot_dir, "cache")

    inputs_dir = os.path.join(bot_dir, "inputs")
    os.environ["INPUT_DIR"] = inputs_dir
    os.environ["CRASH_STACKTRACES_DIR"] = os.path.join(inputs_dir,
                                                       "crash-stacks")
    os.environ["FUZZERS_DIR"] = os.path.join(inputs_dir, "fuzzers")
    os.environ["DATA_BUNDLES_DIR"] = os.path.join(inputs_dir, "data-bundles")
    os.environ["FUZZ_INPUTS"] = os.path.join(inputs_dir, "fuzzer-testcases")
    os.environ["FUZZ_INPUTS_MEMORY"] = os.environ["FUZZ_INPUTS"]
    os.environ["FUZZ_INPUTS_DISK"] = os.path.join(inputs_dir,
                                                  "fuzzer-testcases-disk")
    os.environ["MUTATOR_PLUGINS_DIR"] = os.path.join(inputs_dir,
                                                     "mutator-plugins")
    os.environ["FUZZ_DATA"] = os.path.join(inputs_dir,
                                           "fuzzer-common-data-bundles")
    os.environ["IMAGES_DIR"] = os.path.join(inputs_dir, "images")
    os.environ["SYMBOLS_DIR"] = os.path.join(inputs_dir, "symbols")
    os.environ["USER_PROFILE_ROOT_DIR"] = os.path.join(inputs_dir,
                                                       "user-profile-dirs")

    # Set bot name.
    if not get_value("BOT_NAME"):
        # If not defined, default to host name.
        os.environ["BOT_NAME"] = socket.gethostname().lower()

    # Set BOT_TMPDIR if not already set.
    if not get_value("BOT_TMPDIR"):
        os.environ["BOT_TMPDIR"] = os.path.join(bot_dir, "tmp")

    # Add common environment variables needed by Bazel test runner.
    # See https://docs.bazel.build/versions/master/test-encyclopedia.html.
    os.environ["TEST_TMPDIR"] = get_value("BOT_TMPDIR")
    os.environ["TZ"] = "UTC"

    # Sets the default configuration. Can be overridden by job environment.
    set_default_vars()

    # Set environment variable from local project configuration.
    from config import local_config

    local_config.ProjectConfig().set_environment()

    # Success.
    return True
コード例 #21
0
ファイル: bisection.py プロジェクト: yuanjunh-git/clusterfuzz
def _get_topic():
    """Get the Pub/Sub topic for publishing tasks."""
    return local_config.ProjectConfig().get('bisect_service.pubsub_topic')