Exemple #1
0
def test_get_preempted_trials_new_preempted(mocked_get_preempted_operations,
                                            preempt_exp_conf):
    """Tests that TrialInstanceManager.get_preempted_trials returns trials that
    new preempted trials we don't know about until we query for them and not
    trials that we already knew were preempted."""
    trial_instance_manager = get_trial_instance_manager(preempt_exp_conf)

    # Create trials.
    experiment = preempt_exp_conf['experiment']
    time_started = ARBITRARY_DATETIME
    known_preempted = models.Trial(experiment=experiment,
                                   fuzzer=FUZZER,
                                   benchmark=BENCHMARK,
                                   time_started=time_started)
    unknown_preempted = models.Trial(experiment=experiment,
                                     fuzzer=FUZZER,
                                     benchmark=BENCHMARK,
                                     time_started=time_started)
    trials = [known_preempted, unknown_preempted]
    db_utils.add_all(trials)
    mocked_get_preempted_operations.return_value = [
        _get_preemption_operation(trial.id, preempt_exp_conf)
        for trial in trials
    ]

    trial_instance_manager.preempted_trials = {
        known_preempted.id: known_preempted
    }
    result = trial_instance_manager.get_preempted_trials()
    expected_result = [unknown_preempted]
    assert result == expected_result
Exemple #2
0
def test_initialize_experiment_in_db(dispatcher_experiment):
    """Tests that _initialize_experiment_in_db adds the right things to the
    database."""
    trials_args = itertools.product(dispatcher_experiment.benchmarks,
                                    range(dispatcher_experiment.num_trials),
                                    dispatcher_experiment.fuzzers)
    trials = [
        models.Trial(fuzzer=fuzzer,
                     experiment=dispatcher_experiment.experiment_name,
                     benchmark=benchmark)
        for benchmark, _, fuzzer in trials_args
    ]
    dispatcher._initialize_experiment_in_db(dispatcher_experiment.config,
                                            trials)
    db_experiments = db_utils.query(models.Experiment).all()
    assert len(db_experiments) == 1
    db_experiment = db_experiments[0]
    assert db_experiment.name == os.environ['EXPERIMENT']
    trials = db_utils.query(models.Trial).all()
    fuzzer_and_benchmarks = [(trial.benchmark, trial.fuzzer)
                             for trial in trials]
    assert fuzzer_and_benchmarks == ([('benchmark-1', 'fuzzer-a'),
                                      ('benchmark-1', 'fuzzer-b')] *
                                     4) + [('benchmark-2', 'fuzzer-a'),
                                           ('benchmark-2', 'fuzzer-b')] * 4
Exemple #3
0
def build_images_for_trials(fuzzers: List[str],
                            benchmarks: List[str],
                            num_trials: int,
                            preemptible: bool,
                            concurrent_builds=None) -> List[models.Trial]:
    """Builds the images needed to run |experiment| and returns a list of trials
    that can be run for experiment. This is the number of trials specified in
    experiment times each pair of fuzzer+benchmark that builds successfully."""
    # This call will raise an exception if the images can't be built which will
    # halt the experiment.
    builder.build_base_images()

    # Only build fuzzers for benchmarks whose measurers built successfully.
    if concurrent_builds is None:
        benchmarks = builder.build_all_measurers(benchmarks)
        build_successes = builder.build_all_fuzzer_benchmarks(
            fuzzers, benchmarks)
    else:
        benchmarks = builder.build_all_measurers(benchmarks, concurrent_builds)
        build_successes = builder.build_all_fuzzer_benchmarks(
            fuzzers, benchmarks, concurrent_builds)
    experiment_name = experiment_utils.get_experiment_name()
    trials = []
    for fuzzer, benchmark in build_successes:
        fuzzer_benchmark_trials = [
            models.Trial(fuzzer=fuzzer,
                         experiment=experiment_name,
                         benchmark=benchmark,
                         preemptible=preemptible) for _ in range(num_trials)
        ]
        trials.extend(fuzzer_benchmark_trials)
    return trials
Exemple #4
0
 def create_trial(experiment, time_started=None, time_ended=None):
     """Creates a database trial."""
     return models.Trial(experiment=experiment,
                         benchmark=BENCHMARK,
                         fuzzer=FUZZER,
                         time_started=time_started,
                         time_ended=time_ended)
Exemple #5
0
def replace_trial(trial, preemptible):
    """Returns a new trial to replace |trial|. The trial is preemptible if
    |preemptible|. Sets trial.replacement to the replacement trial."""
    replacement = models.Trial(fuzzer=trial.fuzzer,
                               benchmark=trial.benchmark,
                               experiment=trial.experiment,
                               preemptible=preemptible)
    trial.replacement = replacement.id
    return replacement
Exemple #6
0
def test_get_last_trial_time_started_called_early(db, experiment_config):
    """Tests that get_last_trial_time_started raises an exception if called
    while there are still pending trials."""
    experiment = experiment_config['experiment']
    db_utils.add_all([
        models.Experiment(name=experiment),
    ])
    trial1 = models.Trial(experiment=experiment,
                          benchmark=BENCHMARK,
                          fuzzer=FUZZER)
    trial2 = models.Trial(experiment=experiment,
                          benchmark=BENCHMARK,
                          fuzzer=FUZZER)
    first_time = datetime.datetime.fromtimestamp(time.mktime(time.gmtime(0)))
    trial1.time_started = first_time
    trials = [trial1, trial2]
    db_utils.add_all(trials)
    with pytest.raises(AssertionError):
        scheduler.get_last_trial_time_started(experiment)
Exemple #7
0
    def test_measure_snapshot_coverage(  # pylint: disable=too-many-locals
            self, mocked_is_cycle_unchanged, db, experiment, tmp_path):
        """Integration test for measure_snapshot_coverage."""
        # WORK is set by experiment to a directory that only makes sense in a
        # fakefs. A directory containing necessary llvm tools is also added to
        # PATH.
        llvm_tools_path = get_test_data_path('llvm_tools')
        os.environ["PATH"] += os.pathsep + llvm_tools_path
        os.environ['WORK'] = str(tmp_path)
        mocked_is_cycle_unchanged.return_value = False
        # Set up the coverage binary.
        benchmark = 'freetype2-2017'
        coverage_binary_src = get_test_data_path(
            'test_measure_snapshot_coverage', benchmark + '-coverage')
        benchmark_cov_binary_dir = os.path.join(
            build_utils.get_coverage_binaries_dir(), benchmark)

        os.makedirs(benchmark_cov_binary_dir)
        coverage_binary_dst_dir = os.path.join(benchmark_cov_binary_dir,
                                               'ftfuzzer')

        shutil.copy(coverage_binary_src, coverage_binary_dst_dir)

        # Set up entities in database so that the snapshot can be created.
        experiment = models.Experiment(name=os.environ['EXPERIMENT'])
        db_utils.add_all([experiment])
        trial = models.Trial(fuzzer=FUZZER,
                             benchmark=benchmark,
                             experiment=os.environ['EXPERIMENT'])
        db_utils.add_all([trial])

        snapshot_measurer = measurer.SnapshotMeasurer(trial.fuzzer,
                                                      trial.benchmark,
                                                      trial.id,
                                                      SNAPSHOT_LOGGER)

        # Set up the snapshot archive.
        cycle = 1
        archive = get_test_data_path('test_measure_snapshot_coverage',
                                     'corpus-archive-%04d.tar.gz' % cycle)
        corpus_dir = os.path.join(snapshot_measurer.trial_dir, 'corpus')
        os.makedirs(corpus_dir)
        shutil.copy(archive, corpus_dir)

        with mock.patch('common.filestore_utils.cp') as mocked_cp:
            mocked_cp.return_value = new_process.ProcessResult(0, '', False)
            # TODO(metzman): Create a system for using actual buckets in
            # integration tests.
            snapshot = measurer.measure_snapshot_coverage(
                snapshot_measurer.fuzzer, snapshot_measurer.benchmark,
                snapshot_measurer.trial_num, cycle)
        assert snapshot
        assert snapshot.time == cycle * experiment_utils.get_snapshot_seconds()
        assert snapshot.edges_covered == 13178
Exemple #8
0
def test_get_last_trial_time_started(db, experiment_config):
    """Tests that get_last_trial_time_started returns the time_started of the
    last trial to be started."""
    experiment = experiment_config['experiment']
    db_utils.add_all([
        models.Experiment(name=experiment),
    ])
    trial1 = models.Trial(experiment=experiment,
                          benchmark=BENCHMARK,
                          fuzzer=FUZZER)
    trial2 = models.Trial(experiment=experiment,
                          benchmark=BENCHMARK,
                          fuzzer=FUZZER)
    first_time = datetime.datetime.fromtimestamp(time.mktime(time.gmtime(0)))
    trial1.time_started = first_time
    last_time_started = first_time + datetime.timedelta(days=1)
    trial2.time_started = last_time_started
    trials = [trial1, trial2]
    db_utils.add_all(trials)

    assert scheduler.get_last_trial_time_started(
        experiment) == last_time_started
Exemple #9
0
def _initialize_experiment_in_db(experiment: str, benchmarks: List[str],
                                 fuzzers: List[str], num_trials: int):
    """Initializes |experiment| in the database by creating the experiment
    entity and entities for each trial in the experiment."""
    db_utils.add_all(
        [db_utils.get_or_create(models.Experiment, name=experiment)])

    trials_args = itertools.product(sorted(benchmarks), range(num_trials),
                                    sorted(fuzzers))
    trials = [
        models.Trial(fuzzer=fuzzer, experiment=experiment, benchmark=benchmark)
        for benchmark, _, fuzzer in trials_args
    ]
    # TODO(metzman): Consider doing this without sqlalchemy. This can get
    # slow with SQLalchemy (it's much worse with add_all).
    db_utils.bulk_save(trials)
Exemple #10
0
def test_get_preempted_trials_stale_preempted(_, preempt_exp_conf):
    """Tests that TrialInstanceManager.get_preempted_trials doesn't return
    trials that we already know were preempted."""
    trial_instance_manager = get_trial_instance_manager(preempt_exp_conf)
    trial = models.Trial(experiment=preempt_exp_conf['experiment'],
                         fuzzer=FUZZER,
                         benchmark=BENCHMARK)
    db_utils.add_all([trial])
    instance_name = experiment_utils.get_trial_instance_name(
        preempt_exp_conf['experiment'], trial.id)
    trial_instance_manager.preempted_trials = {instance_name: trial}
    with mock.patch(
            'experiment.scheduler.TrialInstanceManager.'
            '_get_started_unfinished_instances',
            return_value=[instance_name]):
        assert trial_instance_manager.get_preempted_trials() == []
Exemple #11
0
    def test_measure_snapshot_coverage(  # pylint: disable=too-many-locals
            self, mocked_is_cycle_unchanged, create_measurer, db, experiment):
        """Integration test for measure_snapshot_coverage."""
        mocked_is_cycle_unchanged.return_value = False
        # Set up the coverage binary.
        benchmark = 'freetype2-2017'
        coverage_binary_src = get_test_data_path(
            'test_measure_snapshot_coverage', benchmark + '-coverage')
        benchmark_cov_binary_dir = os.path.join(
            build_utils.get_coverage_binaries_dir(), benchmark)

        os.makedirs(benchmark_cov_binary_dir)
        coverage_binary_dst_dir = os.path.join(benchmark_cov_binary_dir,
                                               'fuzz-target')

        shutil.copy(coverage_binary_src, coverage_binary_dst_dir)

        # Set up entities in database so that the snapshot can be created.
        experiment = models.Experiment(name=os.environ['EXPERIMENT'])
        db_utils.add_all([experiment])
        trial = models.Trial(fuzzer=FUZZER,
                             benchmark=benchmark,
                             experiment=os.environ['EXPERIMENT'])
        db_utils.add_all([trial])

        snapshot_measurer = create_measurer(trial.fuzzer, trial.benchmark,
                                            trial.id)

        # Set up the snapshot archive.
        cycle = 1
        archive = get_test_data_path('test_measure_snapshot_coverage',
                                     'corpus-archive-%04d.tar.gz' % cycle)
        corpus_dir = os.path.join(snapshot_measurer.trial_dir, 'corpus')
        os.makedirs(corpus_dir)
        shutil.copy(archive, corpus_dir)

        with mock.patch('common.gsutil.cp') as mocked_cp:
            mocked_cp.return_value = new_process.ProcessResult(0, '', False)
            # TODO(metzman): Create a system for using actual buckets in
            # integration tests.
            snapshot = measurer.measure_snapshot_coverage(
                snapshot_measurer.fuzzer, snapshot_measurer.benchmark,
                snapshot_measurer.trial_num, cycle)
        assert snapshot
        assert snapshot.time == cycle * experiment_utils.get_snapshot_seconds()
        assert snapshot.edges_covered == 3798
Exemple #12
0
def test_get_experiment_data_fuzzer_stats(db):
    """Tests that get_experiment_data handles fuzzer_stats correctly."""
    experiment_name = 'experiment-1'
    db_utils.add_all([
        models.Experiment(name=experiment_name,
                          time_created=ARBITRARY_DATETIME,
                          private=False)
    ])
    trial = models.Trial(fuzzer='afl',
                         experiment=experiment_name,
                         benchmark='libpng')
    db_utils.add_all([trial])
    fuzzer_stats = {'execs_per_sec': 100.0}
    snapshot = models.Snapshot(time=900,
                               trial_id=trial.id,
                               edges_covered=100,
                               fuzzer_stats=fuzzer_stats)
    db_utils.add_all([snapshot])
    experiment_df = queries.get_experiment_data([experiment_name])  # pylint: disable=unused-variable