def do_build(build_type, fuzzer, always_build): """Build fuzzer,benchmark pairs for CI.""" if build_type == 'oss-fuzz': benchmarks = benchmark_utils.get_oss_fuzz_coverage_benchmarks() elif build_type == 'standard': benchmarks = benchmark_utils.get_standard_coverage_benchmarks() elif build_type == 'bug': benchmarks = benchmark_utils.get_bug_benchmarks() else: raise Exception('Invalid build_type: %s' % build_type) if always_build: # Always do a build if always_build is True. return make_builds(benchmarks, fuzzer) changed_files = diff_utils.get_changed_files() changed_fuzzers = change_utils.get_changed_fuzzers(changed_files) if fuzzer in changed_fuzzers: # Otherwise if fuzzer is in changed_fuzzers then build it with all # benchmarks, the change could have affected any benchmark. return make_builds(benchmarks, fuzzer) # Otherwise, only build benchmarks that have changed. changed_benchmarks = change_utils.get_changed_benchmarks(changed_files) benchmarks = set(benchmarks).intersection(changed_benchmarks) return make_builds(benchmarks, fuzzer)
def run_requested_experiment(dry_run): """Run the oldest requested experiment that hasn't been run yet in experiment-requests.yaml.""" requested_experiments = _get_requested_experiments() # TODO(metzman): Look into supporting benchmarks as an optional parameter so # that people can add fuzzers that don't support everything. if PAUSE_SERVICE_KEYWORD in requested_experiments: # Check if automated experiment service is paused. logs.warning('Pause service requested, not running experiment.') return None requested_experiment = None for experiment_config in reversed(requested_experiments): experiment_name = _get_experiment_name(experiment_config) with db_utils.session_scope() as session: is_new_experiment = session.query(models.Experiment).filter( models.Experiment.name == experiment_name).first() is None if is_new_experiment: requested_experiment = experiment_config break if requested_experiment is None: logs.info('No new experiment to run. Exiting.') return None experiment_name = _get_experiment_name(requested_experiment) if not validate_experiment_requests([requested_experiment]): logs.error('Requested experiment: %s in %s is not valid.', requested_experiment, REQUESTED_EXPERIMENTS_PATH) return None fuzzers = requested_experiment['fuzzers'] benchmark_type = requested_experiment.get('type') if benchmark_type == benchmark_utils.BenchmarkType.BUG.value: benchmarks = benchmark_utils.get_bug_benchmarks() else: benchmarks = (benchmark_utils.get_oss_fuzz_coverage_benchmarks() + benchmark_utils.get_standard_coverage_benchmarks()) logs.info('Running experiment: %s with fuzzers: %s.', experiment_name, ' '.join(fuzzers)) description = _get_description(requested_experiment) oss_fuzz_corpus = _use_oss_fuzz_corpus(requested_experiment) return _run_experiment(experiment_name, fuzzers, benchmarks, description, oss_fuzz_corpus, dry_run)