Пример #1
0
def db():  # pylint: disable=invalid-name
    """Connect to the SQLite database and create all the expected tables."""
    db_utils.initialize()
    models.Base.metadata.create_all(db_utils.engine)
    with mock.patch('database.utils.cleanup'):
        yield
    db_utils.cleanup()
Пример #2
0
def measure_loop(experiment: str, max_total_time: int):
    """Continuously measure trials for |experiment|."""
    db_utils.initialize()
    logs.initialize(default_extras={
        'component': 'dispatcher',
    })
    with multiprocessing.Pool() as pool, multiprocessing.Manager() as manager:
        # Using Multiprocessing.Queue will fail with a complaint about
        # inheriting queue.
        q = manager.Queue()  # pytype: disable=attribute-error
        while True:
            try:
                # Get whether all trials have ended before we measure to prevent
                # races.
                all_trials_ended = scheduler.all_trials_ended(experiment)

                if not measure_all_trials(experiment, max_total_time, pool, q):
                    # We didn't measure any trials.
                    if all_trials_ended:
                        # There are no trials producing snapshots to measure.
                        # Given that we couldn't measure any snapshots, we won't
                        # be able to measure any the future, so break now.
                        break
            except Exception:  # pylint: disable=broad-except
                logger.error('Error occurred during measuring.')

            time.sleep(FAIL_WAIT_SECONDS)

    logger.info('Finished measuring.')
Пример #3
0
def dispatcher_main():
    """Do the experiment and report results."""
    logs.info('Starting experiment.')

    # Set this here because we get failures if we do it in measurer for some
    # reason.
    multiprocessing.set_start_method('spawn')
    db_utils.initialize()
    if experiment_utils.is_local_experiment():
        models.Base.metadata.create_all(db_utils.engine)

    experiment_config_file_path = _get_config_file_path()
    experiment = Experiment(experiment_config_file_path)

    _initialize_experiment_in_db(experiment.config)

    trials = build_images_for_trials(experiment.fuzzers, experiment.benchmarks,
                                     experiment.num_trials,
                                     experiment.preemptible,
                                     experiment.concurrent_builds)
    _initialize_trials_in_db(trials)

    create_work_subdirs(['experiment-folders', 'measurement-folders'])

    # Start measurer and scheduler in seperate threads/processes.
    scheduler_loop_thread = threading.Thread(target=scheduler.schedule_loop,
                                             args=(experiment.config, ))
    scheduler_loop_thread.start()

    measurer_main_process = multiprocessing.Process(
        target=measure_manager.measure_main, args=(experiment.config, ))

    measurer_main_process.start()

    is_complete = False
    while True:
        time.sleep(LOOP_WAIT_SECONDS)
        if not scheduler_loop_thread.is_alive():
            is_complete = not measurer_main_process.is_alive()

        # Generate periodic output reports.
        reporter.output_report(experiment.config,
                               in_progress=not is_complete,
                               coverage_report=is_complete)

        if is_complete:
            # Experiment is complete, bail out.
            break

    scheduler_loop_thread.join()
    measurer_main_process.join()

    _record_experiment_time_ended(experiment.experiment_name)
    logs.info('Experiment ended.')
Пример #4
0
def dispatcher_main():
    """Do the experiment and report results."""
    logs.info('Starting experiment.')

    # Set this here because we get failures if we do it in measurer for some
    # reason.
    multiprocessing.set_start_method('spawn')
    db_utils.initialize()
    if os.getenv('LOCAL_EXPERIMENT'):
        models.Base.metadata.create_all(db_utils.engine)

    experiment_config_file_path = os.path.join(fuzzer_config_utils.get_dir(),
                                               'experiment.yaml')
    experiment = Experiment(experiment_config_file_path)
    preemptible = experiment.preemptible
    trials = build_images_for_trials(experiment.fuzzers, experiment.benchmarks,
                                     experiment.num_trials, preemptible)
    _initialize_experiment_in_db(experiment.experiment_name,
                                 experiment.git_hash, trials)

    create_work_subdirs(['experiment-folders', 'measurement-folders'])

    # Start measurer and scheduler in seperate threads/processes.
    scheduler_loop_thread = threading.Thread(target=scheduler.schedule_loop,
                                             args=(experiment.config, ))
    scheduler_loop_thread.start()

    max_total_time = experiment.config['max_total_time']
    measurer_loop_process = multiprocessing.Process(
        target=measurer.measure_loop,
        args=(experiment.experiment_name, max_total_time))

    measurer_loop_process.start()

    is_complete = False
    while True:
        time.sleep(LOOP_WAIT_SECONDS)
        if not scheduler_loop_thread.is_alive():
            is_complete = not measurer_loop_process.is_alive()

        # Generate periodic output reports.
        reporter.output_report(experiment.web_bucket,
                               in_progress=not is_complete)

        if is_complete:
            # Experiment is complete, bail out.
            break

    logs.info('Dispatcher finished.')
    scheduler_loop_thread.join()
    measurer_loop_process.join()
Пример #5
0
def dispatcher_main():
    """Do the experiment and report results."""
    logs.info('Starting experiment.')

    # Set this here because we get failures if we do it in measurer for some
    # reason.
    multiprocessing.set_start_method('spawn')
    db_utils.initialize()
    if os.getenv('LOCAL_EXPERIMENT'):
        models.Base.metadata.create_all(db_utils.engine)

    builder.build_base_images()

    experiment_config_file_path = os.path.join(fuzzer_config_utils.get_dir(),
                                               'experiment.yaml')
    experiment = Experiment(experiment_config_file_path)

    # When building, we only care about the underlying fuzzer rather than the
    # display name that we use to identify a specific configuration.
    unique_fuzzers = list({
        fuzzer_config_utils.get_underlying_fuzzer_name(f)
        for f in experiment.fuzzers
    })
    builder.build_all_fuzzer_benchmarks(unique_fuzzers, experiment.benchmarks)

    create_work_subdirs(['experiment-folders', 'measurement-folders'])

    # Start measurer and scheduler in threads.
    scheduler_loop_thread = threading.Thread(target=scheduler.schedule_loop,
                                             args=(experiment.config, ))
    scheduler_loop_thread.start()
    measurer_loop_thread = multiprocessing.Process(
        target=measurer.measure_loop,
        args=(
            experiment.config['experiment'],
            experiment.config['max_total_time'],
        ))
    measurer_loop_thread.start()

    while True:
        time.sleep(LOOP_WAIT_SECONDS)
        is_complete = (not scheduler_loop_thread.is_alive()
                       and not measurer_loop_thread.is_alive())

        # Generate periodic output reports.
        reporter.output_report(experiment.web_bucket,
                               in_progress=not is_complete)

        if is_complete:
            # Experiment is complete, bail out.
            break