def test_measure_all_trials_not_ready(mocked_rsync, mocked_ls, experiment): """Test running measure_all_trials before it is ready works as intended.""" mocked_ls.return_value = new_process.ProcessResult(1, '', False) assert measurer.measure_all_trials(experiment_utils.get_experiment_name(), MAX_TOTAL_TIME, test_utils.MockPool(), queue.Queue()) assert not mocked_rsync.called
def test_measure_all_trials_no_more(mocked_directories_have_same_files, mocked_execute): """Test measure_all_trials does what is intended when the experiment is done.""" mocked_directories_have_same_files.return_value = True mocked_execute.return_value = new_process.ProcessResult(0, '', False) mock_pool = test_utils.MockPool() assert not measurer.measure_all_trials( experiment_utils.get_experiment_name(), MAX_TOTAL_TIME, mock_pool, queue.Queue())
def test_measure_all_trials(_, __, mocked_execute, db, fs): """Tests that measure_all_trials does what is intended under normal conditions.""" mocked_execute.return_value = new_process.ProcessResult(0, '', False) dispatcher._initialize_experiment_in_db( experiment_utils.get_experiment_name(), BENCHMARKS, FUZZERS, NUM_TRIALS) trials = scheduler.get_pending_trials( experiment_utils.get_experiment_name()).all() for trial in trials: trial.time_started = datetime.datetime.utcnow() db_utils.add_all(trials) fs.create_file(measurer.get_experiment_folders_dir() / NEW_UNIT) mock_pool = test_utils.MockPool() assert measurer.measure_all_trials(experiment_utils.get_experiment_name(), MAX_TOTAL_TIME, mock_pool, queue.Queue()) actual_ids = [call[2] for call in mock_pool.func_calls] # 4 (trials) * 2 (fuzzers) * 2 (benchmarks) assert sorted(actual_ids) == list(range(1, 17))