Пример #1
0
def build_images_for_trials(fuzzers: List[str],
                            benchmarks: List[str],
                            num_trials: int,
                            preemptible: bool,
                            concurrent_builds=None) -> List[models.Trial]:
    """Builds the images needed to run |experiment| and returns a list of trials
    that can be run for experiment. This is the number of trials specified in
    experiment times each pair of fuzzer+benchmark that builds successfully."""
    # This call will raise an exception if the images can't be built which will
    # halt the experiment.
    builder.build_base_images()

    # Only build fuzzers for benchmarks whose measurers built successfully.
    if concurrent_builds is None:
        benchmarks = builder.build_all_measurers(benchmarks)
        build_successes = builder.build_all_fuzzer_benchmarks(
            fuzzers, benchmarks)
    else:
        benchmarks = builder.build_all_measurers(benchmarks, concurrent_builds)
        build_successes = builder.build_all_fuzzer_benchmarks(
            fuzzers, benchmarks, concurrent_builds)
    experiment_name = experiment_utils.get_experiment_name()
    trials = []
    for fuzzer, benchmark in build_successes:
        fuzzer_benchmark_trials = [
            models.Trial(fuzzer=fuzzer,
                         experiment=experiment_name,
                         benchmark=benchmark,
                         preemptible=preemptible) for _ in range(num_trials)
        ]
        trials.extend(fuzzer_benchmark_trials)
    return trials
Пример #2
0
def dispatcher_main():
    """Do the experiment and report results."""
    logs.info('Starting experiment.')

    # Set this here because we get failures if we do it in measurer for some
    # reason.
    multiprocessing.set_start_method('spawn')
    db_utils.initialize()
    if os.getenv('LOCAL_EXPERIMENT'):
        models.Base.metadata.create_all(db_utils.engine)

    builder.build_base_images()

    experiment_config_file_path = os.path.join(fuzzer_config_utils.get_dir(),
                                               'experiment.yaml')
    experiment = Experiment(experiment_config_file_path)

    # When building, we only care about the underlying fuzzer rather than the
    # display name that we use to identify a specific configuration.
    unique_fuzzers = list({
        fuzzer_config_utils.get_underlying_fuzzer_name(f)
        for f in experiment.fuzzers
    })
    builder.build_all_fuzzer_benchmarks(unique_fuzzers, experiment.benchmarks)

    create_work_subdirs(['experiment-folders', 'measurement-folders'])

    # Start measurer and scheduler in threads.
    scheduler_loop_thread = threading.Thread(target=scheduler.schedule_loop,
                                             args=(experiment.config, ))
    scheduler_loop_thread.start()
    measurer_loop_thread = multiprocessing.Process(
        target=measurer.measure_loop,
        args=(
            experiment.config['experiment'],
            experiment.config['max_total_time'],
        ))
    measurer_loop_thread.start()

    while True:
        time.sleep(LOOP_WAIT_SECONDS)
        is_complete = (not scheduler_loop_thread.is_alive()
                       and not measurer_loop_thread.is_alive())

        # Generate periodic output reports.
        reporter.output_report(experiment.web_bucket,
                               in_progress=not is_complete)

        if is_complete:
            # Experiment is complete, bail out.
            break