def test_build_images_for_trials_base_images_fail(dispatcher_experiment): """Tests that build_for_trial raises an exception when base images can't be built. This is important because the experiment should not proceed.""" with pytest.raises(Exception): dispatcher.build_images_for_trials(dispatcher_experiment.fuzzers, dispatcher_experiment.benchmarks, dispatcher_experiment.num_trials)
def test_build_images_for_trials_benchmark_fail(_, dispatcher_experiment): """Tests that build_for_trial doesn't return trials or try to build fuzzers for a benchmark whose coverage build failed.""" successful_benchmark = 'benchmark-1' def mocked_build_all_fuzzer_benchmarks(fuzzers, benchmarks): assert benchmarks == [successful_benchmark] return list(itertools.product(fuzzers, benchmarks)) with mock.patch('experiment.build.builder.build_all_measurers', return_value=[successful_benchmark]): with mock.patch('experiment.build.builder.build_all_fuzzer_benchmarks', side_effect=mocked_build_all_fuzzer_benchmarks): # Sanity check this test so that we know we are actually testing # behavior when benchmarks fail. assert len(set(dispatcher_experiment.benchmarks)) > 1 trials = dispatcher.build_images_for_trials( dispatcher_experiment.fuzzers, dispatcher_experiment.benchmarks, dispatcher_experiment.num_trials) for trial in trials: assert trial.benchmark == successful_benchmark
def test_build_images_for_trials_fuzzer_fail(_, dispatcher_experiment): """Tests that build_for_trial doesn't return trials a fuzzer whose build failed on a benchmark.""" successful_fuzzer = 'fuzzer-a' fail_fuzzer = 'fuzzer-b' fuzzers = [successful_fuzzer, fail_fuzzer] successful_benchmark_for_fail_fuzzer = 'benchmark-1' fail_benchmark_for_fail_fuzzer = 'benchmark-2' benchmarks = [ successful_benchmark_for_fail_fuzzer, fail_benchmark_for_fail_fuzzer ] successful_builds = [(successful_fuzzer, fail_benchmark_for_fail_fuzzer), (successful_fuzzer, successful_benchmark_for_fail_fuzzer), (fail_fuzzer, successful_benchmark_for_fail_fuzzer)] num_trials = 10 def mocked_build_all_fuzzer_benchmarks(fuzzers, benchmarks): # Sanity check this test so that we know we are actually testing # behavior when fuzzers fail. assert sorted(fuzzers) == sorted([successful_fuzzer, fail_fuzzer]) assert successful_benchmark_for_fail_fuzzer in benchmarks return successful_builds with mock.patch('experiment.build.builder.build_all_measurers', return_value=benchmarks): with mock.patch('experiment.build.builder.build_all_fuzzer_benchmarks', side_effect=mocked_build_all_fuzzer_benchmarks): trials = dispatcher.build_images_for_trials(fuzzers, benchmarks, num_trials, False) trial_fuzzer_benchmarks = [ (trial.fuzzer, trial.benchmark) for trial in trials ] expected_trial_fuzzer_benchmarks = [ fuzzer_benchmark for fuzzer_benchmark in successful_builds for _ in range(num_trials) ] assert (sorted(expected_trial_fuzzer_benchmarks) == sorted( trial_fuzzer_benchmarks))
def test_build_images_for_trials_build_success(_, dispatcher_experiment): """Tests that build_for_trial returns all trials we expect to run in an experiment when builds are successful.""" fuzzer_benchmarks = list( itertools.product(dispatcher_experiment.fuzzers, dispatcher_experiment.benchmarks)) with mock.patch('experiment.build.builder.build_all_measurers', return_value=dispatcher_experiment.benchmarks): with mock.patch('experiment.build.builder.build_all_fuzzer_benchmarks', return_value=fuzzer_benchmarks): trials = dispatcher.build_images_for_trials( dispatcher_experiment.fuzzers, dispatcher_experiment.benchmarks, dispatcher_experiment.num_trials) trial_fuzzer_benchmarks = [ (trial.fuzzer, trial.benchmark) for trial in trials ] expected_trial_fuzzer_benchmarks = [ fuzzer_benchmark for fuzzer_benchmark in fuzzer_benchmarks for _ in range(dispatcher_experiment.num_trials) ] assert (sorted(expected_trial_fuzzer_benchmarks) == sorted( trial_fuzzer_benchmarks))