Exemple #1
0
    def get(self):
        """Handle a GET request."""
        jobs = ndb_utils.get_all_from_model(data_types.Job)
        for job in jobs:
            job_environment = job.get_environment()
            if utils.string_is_true(job_environment.get('EXPERIMENTAL')):
                # Don't use corpus backups from experimental jobs. Skip.
                continue

            if not utils.string_is_true(job_environment.get('CORPUS_PRUNE')):
                # There won't be any corpus backups for these jobs. Skip.
                continue

            corpus_backup_bucket_name = job_environment.get('BACKUP_BUCKET')
            if not corpus_backup_bucket_name:
                # No backup bucket found. Skip.
                continue

            corpus_fuzzer_name_override = job_environment.get(
                'CORPUS_FUZZER_NAME_OVERRIDE')

            target_jobs = list(
                fuzz_target_utils.get_fuzz_target_jobs(job=job.name))
            fuzz_targets = fuzz_target_utils.get_fuzz_targets_for_target_jobs(
                target_jobs)

            for target in fuzz_targets:
                _make_corpus_backup_public(target, corpus_fuzzer_name_override,
                                           corpus_backup_bucket_name)
Exemple #2
0
    def get(self):
        """Handle a GET request."""
        jobs = ndb_utils.get_all_from_model(data_types.Job)
        default_backup_bucket = utils.default_backup_bucket()
        for job in jobs:
            job_environment = job.get_environment()
            if utils.string_is_true(job_environment.get("EXPERIMENTAL")):
                # Don't use corpus backups from experimental jobs. Skip.
                continue

            if not utils.string_is_true(job_environment.get("CORPUS_PRUNE")):
                # There won't be any corpus backups for these jobs. Skip.
                continue

            corpus_backup_bucket_name = job_environment.get(
                "BACKUP_BUCKET", default_backup_bucket)
            if not corpus_backup_bucket_name:
                # No backup bucket found. Skip.
                continue

            corpus_fuzzer_name_override = job_environment.get(
                "CORPUS_FUZZER_NAME_OVERRIDE")

            target_jobs = list(
                fuzz_target_utils.get_fuzz_target_jobs(job=job.name))
            fuzz_targets = fuzz_target_utils.get_fuzz_targets_for_target_jobs(
                target_jobs)

            for target in fuzz_targets:
                if not target:
                    # This is expected if any fuzzer/job combinations become outdated.
                    continue

                _make_corpus_backup_public(target, corpus_fuzzer_name_override,
                                           corpus_backup_bucket_name)
Exemple #3
0
def get_jobs_and_platforms_for_project():
  """Return a map of projects to jobs and platforms map to use for picking top
  crashes."""
  all_jobs = ndb_utils.get_all_from_model(data_types.Job)
  projects_to_jobs_and_platforms = {}
  for job in all_jobs:
    job_environment = job.get_environment()

    # Skip experimental jobs.
    if utils.string_is_true(job_environment.get('EXPERIMENTAL')):
      continue

    # Skip custom binary jobs.
    if (utils.string_is_true(job_environment.get('CUSTOM_BINARY')) or
        job_environment.get('SYSTEM_BINARY_DIR')):
      continue

    # Skip if explicitly excluded using flag.
    if utils.string_is_true(job_environment.get('EXCLUDE_FROM_TOP_CRASHES')):
      continue

    if job.project not in projects_to_jobs_and_platforms:
      projects_to_jobs_and_platforms[job.project] = ProjectMap(set(), set())

    projects_to_jobs_and_platforms[job.project].jobs.add(job.name)
    projects_to_jobs_and_platforms[job.project].platforms.add(
        job_platform_to_real_platform(job.platform))

  return projects_to_jobs_and_platforms
Exemple #4
0
def get_jobs_and_platforms_for_top_crashes():
    """Return list of jobs and platforms to use for picking top crashes."""
    jobs = set()
    platforms = set()

    all_jobs = ndb_utils.get_all_from_model(data_types.Job)
    for job in all_jobs:
        job_environment = job.get_environment()

        # Skip experimental jobs.
        if utils.string_is_true(job_environment.get('EXPERIMENTAL')):
            continue

        # Skip custom binary jobs.
        if (utils.string_is_true(job_environment.get('CUSTOM_BINARY'))
                or job_environment.get('SYSTEM_BINARY_DIR')):
            continue

        # Skip if explicitly excluded using flag.
        if utils.string_is_true(
                job_environment.get('EXCLUDE_FROM_TOP_CRASHES')):
            continue

        jobs.add(job.name)
        platforms.add(job_platform_to_real_platform(job.platform))

    return jobs, platforms
Exemple #5
0
def _query_and_upload_strategy_probabilities():
    """Uploads queried data into datastore.

  Calls query functions and uploads query results
  to datastore to use as new probabilities. Probabilities
  are based on new_edges feature."""
    strategy_data = []
    data = _query_multi_armed_bandit_probabilities()

    # TODO(mukundv): Update once we choose a temperature parameter for final
    # implementation.
    for row in data:
        curr_strategy = data_types.FuzzStrategyProbability()
        curr_strategy.strategy_name = str(row['strategy'])
        curr_strategy.probability_high_temperature = float(
            row['bandit_weight_high_temperature'])
        curr_strategy.probability_low_temperature = float(
            row['bandit_weight_low_temperature'])
        curr_strategy.probability_medium_temperature = float(
            row['bandit_weight_medium_temperature'])
        strategy_data.append(curr_strategy)

    ndb.delete_multi([
        entity.key for entity in ndb_utils.get_all_from_model(
            data_types.FuzzStrategyProbability)
    ])
    ndb.put_multi(strategy_data)
    _store_probabilities_in_bigquery(data)
Exemple #6
0
def create_variant_tasks_if_needed(testcase):
    """Creates a variant task if needed."""
    testcase_id = testcase.key.id()
    jobs = ndb_utils.get_all_from_model(data_types.Job)
    for job in jobs:
        # The variant needs to be tested in a different job type than us.
        job_type = job.name
        if testcase.job_type == job_type:
            continue

        # Don't try to reproduce engine fuzzer testcase with blackbox fuzzer
        # testcases and vice versa.
        if (environment.is_engine_fuzzer_job(testcase.job_type) !=
                environment.is_engine_fuzzer_job(job_type)):
            continue

        # Skip experimental jobs.
        job_environment = job.get_environment()
        if utils.string_is_true(job_environment.get('EXPERIMENTAL')):
            continue

        # Don't look for variants in other projects.
        project_name = data_handler.get_project_name(job_type)
        if testcase.project_name != project_name:
            continue

        queue = tasks.queue_for_platform(job.platform)
        tasks.add_task('variant', testcase_id, job_type, queue)

        variant = data_handler.get_testcase_variant(testcase_id, job_type)
        variant.status = data_types.TestcaseVariantStatus.PENDING
        variant.put()
Exemple #7
0
def _query_and_upload_strategy_probabilities(engine):
    """Uploads queried data into datastore.

  Calls query functions and uploads query results
  to datastore to use as new probabilities. Probabilities
  are based on new_edges feature."""
    strategy_data = []
    data = _query_multi_armed_bandit_probabilities(engine)
    logs.log('Queried distribution for {}.'.format(engine.name))

    # TODO(mukundv): Update once we choose a temperature parameter for final
    # implementation.
    for row in data:
        curr_strategy = data_types.FuzzStrategyProbability()
        curr_strategy.strategy_name = str(row['strategy'])
        curr_strategy.probability = float(row['bandit_weight'])
        curr_strategy.engine = engine.name
        strategy_data.append(curr_strategy)

    ndb.delete_multi([
        entity.key for entity in ndb_utils.get_all_from_model(
            data_types.FuzzStrategyProbability)
    ])
    ndb.put_multi(strategy_data)
    logs.log('Uploaded queried distribution to ndb for {}'.format(engine.name))
    _store_probabilities_in_bigquery(engine, data)
    logs.log('Uploaded queried distribution to BigQuery for {}'.format(
        engine.name))
Exemple #8
0
    def get(self):
        """Render the bot list HTML."""
        if utils.is_oss_fuzz():
            heartbeats = _get_host_workers_heartbeats()
        else:
            heartbeats = ndb_utils.get_all_from_model(data_types.Heartbeat)

        bots = _convert_heartbeats_to_dicts(heartbeats)
        self.render('bots.html', {
            'bots': bots,
        })
Exemple #9
0
def _get_excluded_jobs():
    """Return list of jobs excluded from bug filing."""
    excluded_jobs = []

    jobs = ndb_utils.get_all_from_model(data_types.Job)
    for job in jobs:
        job_environment = job.get_environment()

        # Exclude experimental jobs.
        if utils.string_is_true(job_environment.get('EXPERIMENTAL')):
            excluded_jobs.append(job.name)

    return excluded_jobs
def store_current_weights_in_bigquery():
    """Update a bigquery table containing the daily stats."""
    rows = []
    target_jobs = ndb_utils.get_all_from_model(data_types.FuzzTargetJob)
    for target_job in target_jobs:
        row = {
            'fuzzer': target_job.fuzz_target_name,
            'job': target_job.job,
            'weight': target_job.weight
        }
        rows.append(big_query.Insert(row=row, insert_id=None))

    client = big_query.Client(dataset_id='main', table_id='fuzzer_weights')
    client.insert(rows)
Exemple #11
0
  def get(self):
    """Render dead bots as json (used by automated scripts)."""

    # This a publicly exposed chromium-specific page.
    if utils.is_chromium():
      heartbeats = ndb_utils.get_all_from_model(data_types.Heartbeat)
    else:
      raise helpers.EarlyExitException('Dead bots list unavailable.', 400)

    result = {}
    alive_cutoff = _get_alive_cutoff()
    for heartbeat in heartbeats:
      if heartbeat.last_beat_time <= alive_cutoff:
        result[heartbeat.bot_name] = 'dead'

    self.render_json(result)
Exemple #12
0
def cleanup_unused_fuzz_targets_and_jobs():
    """Clean up unused FuzzTarget and FuzzTargetJob entities."""
    last_run_cutoff = utils.utcnow() - datetime.timedelta(
        days=FUZZ_TARGET_UNUSED_THRESHOLD)

    unused_target_jobs = data_types.FuzzTargetJob.query(
        data_types.FuzzTargetJob.last_run < last_run_cutoff)
    valid_target_jobs = data_types.FuzzTargetJob.query(
        data_types.FuzzTargetJob.last_run >= last_run_cutoff)

    to_delete = [t.key for t in unused_target_jobs]

    valid_fuzz_targets = set(t.fuzz_target_name for t in valid_target_jobs)
    for fuzz_target in ndb_utils.get_all_from_model(data_types.FuzzTarget):
        if fuzz_target.fully_qualified_name() not in valid_fuzz_targets:
            to_delete.append(fuzz_target.key)

    ndb.delete_multi(to_delete)
Exemple #13
0
def get_excluded_jobs():
    """Return list of jobs excluded from bug filing."""
    excluded_jobs = []

    jobs = ndb_utils.get_all_from_model(data_types.Job)
    for job in jobs:
        job_environment = job.get_environment()

        # Exclude experimental jobs.
        if utils.string_is_true(job_environment.get('EXPERIMENTAL')):
            excluded_jobs.append(job.name)

        # Exclude custom binary jobs.
        elif (utils.string_is_true(job_environment.get('CUSTOM_BINARY'))
              or job_environment.get('SYSTEM_BINARY_DIR')):
            excluded_jobs.append(job.name)

    return excluded_jobs
Exemple #14
0
    def test_jobs_updated(self):
        """Ensure that we properly update multiple jobs."""
        fuzzer_1_mapping = data_types.FuzzerJob()
        fuzzer_1_mapping.fuzzer = 'fuzzer_1'
        fuzzer_1_mapping.job = 'test_job'
        fuzzer_1_mapping.platform = 'wrong_platform'
        fuzzer_1_mapping.put()

        fuzzer_2_mapping = data_types.FuzzerJob()
        fuzzer_2_mapping.fuzzer = 'fuzzer_2'
        fuzzer_2_mapping.job = 'test_job'
        fuzzer_2_mapping.platform = 'wrong_platform'
        fuzzer_2_mapping.put()

        fuzzer_selection.update_platform_for_job('test_job', 'right_platform')

        platforms = [
            job.platform
            for job in ndb_utils.get_all_from_model(data_types.FuzzerJob)
        ]
        self.assertListEqual(platforms, ['right_platform', 'right_platform'])
Exemple #15
0
def _query_and_upload_strategy_probabilities():
    """Uploads queried data into datastore.

  Calls query functions and uploads query results
  to datastore to use as new probabilities. Probabilities
  are based on new_edges feature."""
    strategy_data = []
    data = _query_multi_armed_bandit_probabilities()

    for row in data:
        curr_strategy = data_types.FuzzStrategyProbability()
        curr_strategy.strategy_name = str(row['strategy'])
        curr_strategy.probability = float(row['bandit_weight'])
        strategy_data.append(curr_strategy)

    ndb.delete_multi([
        entity.key for entity in ndb_utils.get_all_from_model(
            data_types.FuzzStrategyProbability)
    ])
    ndb.put_multi(strategy_data)
    _store_probabilities_in_bigquery(data)
def update_admins(new_admins):
    """Update list of admins."""
    existing_admins = ndb_utils.get_all_from_model(data_types.Admin)

    to_remove = []
    existing_admin_emails = set()
    for admin in existing_admins:
        if admin.email not in new_admins:
            logs.log('Removing admin ' + admin.email)
            to_remove.append(admin.key)

        existing_admin_emails.add(admin.email)

    ndb_utils.delete_multi(to_remove)

    to_add = []
    for admin in new_admins:
        if admin not in existing_admin_emails:
            to_add.append(data_types.Admin(id=admin, email=admin))
            logs.log('Adding admin ' + admin)

    ndb_utils.put_multi(to_add)
Exemple #17
0
    def post(self):
        """Handle a post request."""
        key = helpers.get_integer_key(self.request)
        job = ndb.Key(data_types.Job, key).get()
        if not job:
            raise helpers.EarlyExitException('Job not found.', 400)

        # Delete from fuzzers' jobs' list.
        for fuzzer in ndb_utils.get_all_from_model(data_types.Fuzzer):
            if job.name in fuzzer.jobs:
                fuzzer.jobs.remove(job.name)
                fuzzer.put()

        # Delete associated fuzzer-job mapping(s).
        query = data_types.FuzzerJob.query()
        query = query.filter(data_types.FuzzerJob.job == job.name)
        for mapping in ndb_utils.get_all_from_query(query):
            mapping.key.delete()

        # Delete job.
        job.key.delete()

        helpers.log('Deleted job %s' % job.name, helpers.MODIFY_OPERATION)
        self.redirect('/jobs')
Exemple #18
0
def unpack_crash_testcases(crash_testcases_directory):
    """Unpacks the old crash testcases in the provided directory."""
    for testcase in ndb_utils.get_all_from_model(data_types.Testcase):
        testcase_id = testcase.key.id()

        # 1. If we have already stored the testcase, then just skip.
        if testcase_id in STORED_TESTCASES_LIST:
            continue

        # 2. Make sure that it is a unique crash testcase. Ignore duplicates,
        # uploaded repros.
        if testcase.status != 'Processed':
            continue

        # 3. Check if the testcase is fixed. If not, skip.
        if testcase.open:
            continue

        # 4. Check if the testcase has a minimized repro. If not, skip.
        if not testcase.minimized_keys or testcase.minimized_keys == 'NA':
            continue

        # 5. Only use testcases that have bugs associated with them.
        if not testcase.bug_information:
            continue

        # 6. Existing IPC testcases are un-interesting and unused in furthur
        # mutations. Due to size bloat, ignoring these for now.
        if testcase.absolute_path.endswith(testcase_manager.IPCDUMP_EXTENSION):
            continue

        # 7. Ignore testcases that are archives (e.g. Langfuzz fuzzer tests).
        if archive.get_archive_type(testcase.absolute_path):
            continue

        # 8. Skip in-process fuzzer testcases, since these are only applicable to
        # fuzz targets and don't run with blackbox binaries.
        if testcase.fuzzer_name and testcase.fuzzer_name in [
                'afl', 'libFuzzer'
        ]:
            continue

        # Un-pack testcase.
        try:
            _, input_directory, _ = setup.unpack_testcase(testcase)
        except Exception:
            logs.log_error('Failed to unpack testcase %d.' % testcase.key.id())
            continue

        # Move this to our crash testcases directory.
        crash_testcase_directory = os.path.join(crash_testcases_directory,
                                                str(testcase_id))
        shell.move(input_directory, crash_testcase_directory)

        # Re-create input directory for unpacking testcase in next iteration.
        shell.create_directory(input_directory)

        STORED_TESTCASES_LIST.append(testcase_id)

    # Remove testcase directories that exceed the max size limit.
    for directory_name in os.listdir(crash_testcases_directory):
        directory_path = os.path.join(crash_testcases_directory,
                                      directory_name)
        if not os.path.isdir(directory_path):
            continue

        if shell.get_directory_size(
                directory_path) <= MAX_TESTCASE_DIRECTORY_SIZE:
            continue

        shell.remove_directory(directory_path)

    # Rename all fuzzed testcase files as regular files.
    for root, _, files in os.walk(crash_testcases_directory):
        for filename in files:
            if not filename.startswith(testcase_manager.FUZZ_PREFIX):
                continue

            file_path = os.path.join(root, filename)
            stripped_file_name = os.path.basename(
                file_path)[len(testcase_manager.FUZZ_PREFIX):]
            stripped_file_path = os.path.join(os.path.dirname(file_path),
                                              stripped_file_name)
            try:
                os.rename(file_path, stripped_file_path)
            except:
                raise Exception('Failed to rename testcase %s.' % file_path)

    # Remove empty files and dirs to avoid the case where a fuzzer randomly
    # chooses an empty dir/file and generates zero testcases.
    shell.remove_empty_files(crash_testcases_directory)
    shell.remove_empty_directories(crash_testcases_directory)