Esempio n. 1
0
def main():
    """Copies files needed to integrate an OSS-Fuzz benchmark and creates the
    benchmark's benchmark.yaml file."""
    parser = argparse.ArgumentParser(description='Integrate a new benchmark.')
    parser.add_argument('-p',
                        '--project',
                        help='Project for benchmark. Example: "zlib"',
                        required=True)
    parser.add_argument(
        '-f',
        '--fuzz-target',
        help='Fuzz target for benchmark. Example: "zlib_uncompress_fuzzer"',
        required=True)
    parser.add_argument(
        '-n',
        '--benchmark-name',
        help='Benchmark name. Defaults to <project>_<fuzz_target>',
        required=False)
    parser.add_argument('-c', '--commit', help='Project commit hash.')
    parser.add_argument(
        '-d',
        '--date',
        help='Date of the commit. Example: 2019-10-19T09:07:25+01:00')

    logs.initialize()
    args = parser.parse_args()
    benchmark = integrate_benchmark(args.project, args.fuzz_target,
                                    args.benchmark_name, args.commit,
                                    args.date)
    logs.info('Successfully integrated benchmark: %s.', benchmark)
    logs.info('Please run "make test-run-afl-%s" to test integration.',
              benchmark)
    return 0
Esempio n. 2
0
def main() -> int:
    """Check that this branch conforms to the standards of fuzzbench."""
    logs.initialize()
    parser = argparse.ArgumentParser(
        description='Presubmit script for fuzzbench.')
    choices = [
        'format', 'lint', 'typecheck', 'licensecheck',
        'test_changed_integrations'
    ]
    parser.add_argument('command', choices=choices, nargs='?')

    args = parser.parse_args()
    os.chdir(_SRC_ROOT)
    changed_files = [Path(path) for path in diff_utils.get_changed_files()]

    if not args.command:
        success = do_checks(changed_files)
        return bool_to_returncode(success)

    command_check_mapping = {
        'format': yapf,
        'lint': lint,
        'typecheck': pytype,
        'test_changed_integrations': test_changed_integrations
    }

    check = command_check_mapping[args.command]
    if args.command == 'format':
        success = check(changed_files, False)
    else:
        success = check(changed_files)
    if not success:
        print('ERROR: %s failed, see errors above.' % check.__name__)
    return bool_to_returncode(success)
Esempio n. 3
0
def main():
    """Build fuzzer, benchmark pairs on Google Cloud Build."""
    parser = argparse.ArgumentParser(
        description='Build fuzzer, benchmark pairs on Google Cloud Build.')

    parser.add_argument('-b',
                        '--benchmarks',
                        help='Benchmark names.',
                        nargs='+',
                        required=True)

    parser.add_argument('-f',
                        '--fuzzers',
                        help='Fuzzer names.',
                        nargs='+',
                        required=True)

    parser.add_argument('-n',
                        '--num-concurrent-builds',
                        help='Max concurrent builds allowed.',
                        type=int,
                        default=DEFAULT_MAX_CONCURRENT_BUILDS,
                        required=False)

    logs.initialize()
    args = parser.parse_args()

    build_all_fuzzer_benchmarks(args.fuzzers, args.benchmarks,
                                args.num_concurrent_builds)

    return 0
Esempio n. 4
0
def main():
    """Stop the experiment."""
    if len(sys.argv) != 3:
        print("Usage {0} <experiment-name> <experiment-config.yaml>")
        return 1
    logs.initialize()
    return 0 if stop_experiment(sys.argv[1], sys.argv[2]) else 1
Esempio n. 5
0
def measure_loop(experiment: str, max_total_time: int):
    """Continuously measure trials for |experiment|."""
    db_utils.initialize()
    logs.initialize(default_extras={
        'component': 'dispatcher',
    })
    with multiprocessing.Pool() as pool, multiprocessing.Manager() as manager:
        # Using Multiprocessing.Queue will fail with a complaint about
        # inheriting queue.
        q = manager.Queue()  # pytype: disable=attribute-error
        while True:
            try:
                # Get whether all trials have ended before we measure to prevent
                # races.
                all_trials_ended = scheduler.all_trials_ended(experiment)

                if not measure_all_trials(experiment, max_total_time, pool, q):
                    # We didn't measure any trials.
                    if all_trials_ended:
                        # There are no trials producing snapshots to measure.
                        # Given that we couldn't measure any snapshots, we won't
                        # be able to measure any the future, so break now.
                        break
            except Exception:  # pylint: disable=broad-except
                logger.error('Error occurred during measuring.')

            time.sleep(FAIL_WAIT_SECONDS)

    logger.info('Finished measuring.')
def main():
    """Reproduce a specified experiment."""
    logs.initialize()
    parser = argparse.ArgumentParser(
        description='Reproduce an experiment from a full config file.')
    parser.add_argument('-c',
                        '--experiment-config',
                        help='Path to the experiment configuration yaml file.',
                        required=True)

    parser.add_argument('-e',
                        '--experiment-name',
                        help='Experiment name.',
                        required=True)

    parser.add_argument('-d',
                        '--description',
                        help='Description of the experiment.',
                        required=False)

    args = parser.parse_args()
    config = yaml_utils.read(args.experiment_config)
    run_experiment.validate_experiment_name(args.experiment_name)
    if args.experiment_name == config['experiment']:
        raise Exception('Must use a different experiment name.')
    config['experiment'] = args.experiment_name
    config['description'] = args.description
    validate_config(config)
    run_experiment.start_experiment_from_full_config(config)
    return 0
Esempio n. 7
0
def main() -> int:
    """Check that this branch conforms to the standards of fuzzbench."""
    parser = argparse.ArgumentParser(
        description='Presubmit script for fuzzbench.')
    choices = [
        'format', 'lint', 'typecheck', 'licensecheck',
        'test_changed_integrations'
    ]
    parser.add_argument(
        'command',
        choices=choices,
        nargs='?',
        help='The presubmit check to run. Defaults to all of them')
    parser.add_argument('--all-files',
                        action='store_true',
                        help='Run presubmit check(s) on all files',
                        default=False)
    parser.add_argument('-v', '--verbose', action='store_true', default=False)

    args = parser.parse_args()

    os.chdir(_SRC_ROOT)

    if not args.verbose:
        logs.initialize()
    else:
        logs.initialize(log_level=logging.DEBUG)

    if not args.all_files:
        relevant_files = [
            Path(path) for path in diff_utils.get_changed_files()
        ]
    else:
        relevant_files = get_all_files()

    relevant_files = filter_ignored_files(relevant_files)

    logs.debug('Running presubmit check(s) on: %s',
               ' '.join(str(path) for path in relevant_files))

    if not args.command:
        success = do_checks(relevant_files)
        return bool_to_returncode(success)

    command_check_mapping = {
        'format': yapf,
        'lint': lint,
        'typecheck': pytype,
        'test_changed_integrations': test_changed_integrations
    }

    check = command_check_mapping[args.command]
    if args.command == 'format':
        success = check(relevant_files, False)
    else:
        success = check(relevant_files)
    if not success:
        print('ERROR: %s failed, see errors above.' % check.__name__)
    return bool_to_returncode(success)
Esempio n. 8
0
def _initialize_logs(experiment):
    """Initialize logs. This must be called on process start."""
    logs.initialize(
        default_extras={
            'experiment': experiment,
            'component': 'dispatcher',
            'subcomponent': 'scheduler'
        })
Esempio n. 9
0
def main():
    """Run an experiment in the cloud."""
    logs.initialize()

    parser = argparse.ArgumentParser(
        description='Begin an experiment that evaluates fuzzers on one or '
        'more benchmarks.')

    all_benchmarks = benchmark_utils.get_all_benchmarks()
    all_fuzzers = fuzzer_utils.get_fuzzer_names()

    parser.add_argument('-b',
                        '--benchmarks',
                        help='Benchmark names. All of them by default.',
                        nargs='+',
                        required=False,
                        default=all_benchmarks,
                        choices=all_benchmarks)
    parser.add_argument('-c',
                        '--experiment-config',
                        help='Path to the experiment configuration yaml file.',
                        required=True)
    parser.add_argument('-e',
                        '--experiment-name',
                        help='Experiment name.',
                        required=True)
    parser.add_argument('-f',
                        '--fuzzers',
                        help='Fuzzers to use.',
                        nargs='+',
                        required=False,
                        default=None,
                        choices=all_fuzzers)
    parser.add_argument(
        '-ns',
        '--no-seeds',
        help='Should trials be conducted without seed corpora.',
        required=False,
        default=False,
        action='store_true')
    parser.add_argument(
        '-nd',
        '--no-dictionaries',
        help='Should trials be conducted without dictionaries.',
        required=False,
        default=False,
        action='store_true')

    args = parser.parse_args()
    fuzzers = args.fuzzers or all_fuzzers

    start_experiment(args.experiment_name,
                     args.experiment_config,
                     args.benchmarks,
                     fuzzers,
                     no_seeds=args.no_seeds,
                     no_dictionaries=args.no_dictionaries)
    return 0
Esempio n. 10
0
def main():
    """Generates report."""
    logs.initialize()

    parser = get_arg_parser()
    args = parser.parse_args()

    generate_report(args.experiment, args.report_dir, args.report_name,
                    args.report_type, args.quick, args.from_cached_data)
Esempio n. 11
0
def main():
    """Run an experiment in the cloud."""
    logs.initialize()

    parser = argparse.ArgumentParser(
        description='Begin an experiment that evaluates fuzzers on one or '
        'more benchmarks.')

    all_benchmarks = benchmark_utils.get_all_benchmarks()
    all_fuzzers = fuzzer_utils.get_fuzzer_names()

    parser.add_argument('-b',
                        '--benchmarks',
                        help='Benchmark names. All of them by default.',
                        nargs='+',
                        required=False,
                        default=all_benchmarks,
                        choices=all_benchmarks)
    parser.add_argument('-c',
                        '--experiment-config',
                        help='Path to the experiment configuration yaml file.',
                        required=True)
    parser.add_argument('-e',
                        '--experiment-name',
                        help='Experiment name.',
                        required=True)
    fuzzers_group = parser.add_mutually_exclusive_group()
    fuzzers_group.add_argument('-f',
                               '--fuzzers',
                               help='Fuzzers to use.',
                               nargs='+',
                               required=False,
                               default=None,
                               choices=all_fuzzers)
    fuzzers_group.add_argument('-cf',
                               '--changed-fuzzers',
                               help=('Use fuzzers that have changed since the '
                                     'last experiment. The last experiment is '
                                     'determined by the database your '
                                     'experiment uses, not necessarily the '
                                     'fuzzbench service'),
                               action='store_true',
                               required=False)

    args = parser.parse_args()

    if args.changed_fuzzers:
        fuzzers = experiment_changes.get_fuzzers_changed_since_last()
        if not fuzzers:
            logs.error('No fuzzers changed since last experiment. Exiting.')
            return 1
    else:
        fuzzers = args.fuzzers or all_fuzzers

    start_experiment(args.experiment_name, args.experiment_config,
                     args.benchmarks, fuzzers)
    return 0
Esempio n. 12
0
def main():
    """Do the experiment and report results."""
    logs.initialize(default_extras={
        'component': 'dispatcher',
    })

    try:
        dispatcher_main()
    except Exception as error:
        logs.error('Error conducting experiment.')
        raise error
Esempio n. 13
0
def main():
    """Do an experiment on a development machine or on a GCP runner instance."""
    logs.initialize(
        default_extras={
            'benchmark': environment.get('BENCHMARK'),
            'component': 'runner',
            'fuzzer': environment.get('FUZZER'),
            'trial_id': str(environment.get('TRIAL_ID')),
        })
    experiment_main()
    return 0
Esempio n. 14
0
def main():
    """Creates or gets an already created service account key and saves it to
    the provided path."""
    logs.initialize()
    try:
        keyfile = sys.argv[1]
        get_or_create_key(sys.argv[2], keyfile)
        logs.info('Saved key to %s.', keyfile)
    except Exception:  # pylint: disable=broad-except
        logs.error('Failed to get or create key.')
        return 1
    return 0
Esempio n. 15
0
def main():
    """Main function for running scheduler independently."""
    logs.initialize(default_extras={'component': 'dispatcher'})

    if len(sys.argv) != 2:
        print('Usage: {} <experiment_config.yaml>'.format(sys.argv[0]))
        return 1

    experiment_config = yaml_utils.read(sys.argv[1])
    schedule_loop(experiment_config)

    return 0
Esempio n. 16
0
def main():
    """Run an experiment in the cloud."""
    logs.initialize()

    parser = argparse.ArgumentParser(
        description='Begin an experiment that evaluates fuzzers on one or '
        'more benchmarks.')

    all_benchmarks = benchmark_utils.get_all_benchmarks()

    parser.add_argument('-b',
                        '--benchmarks',
                        help='Benchmark names. All of them by default.',
                        nargs='+',
                        required=False,
                        default=all_benchmarks,
                        choices=all_benchmarks)
    parser.add_argument('-c',
                        '--experiment-config',
                        help='Path to the experiment configuration yaml file.',
                        required=True)
    parser.add_argument('-e',
                        '--experiment-name',
                        help='Experiment name.',
                        required=True)
    parser.add_argument('-f',
                        '--fuzzers',
                        help='Fuzzers to use.',
                        nargs='+',
                        required=False,
                        default=[])
    parser.add_argument('-fc',
                        '--fuzzer-configs',
                        help='Fuzzer configurations to use.',
                        nargs='+',
                        required=False,
                        default=[])
    args = parser.parse_args()

    if not args.fuzzer_configs:
        fuzzer_configs = fuzzer_utils.get_fuzzer_configs(fuzzers=args.fuzzers)
    else:
        fuzzer_configs = [
            yaml_utils.read(fuzzer_config)
            for fuzzer_config in args.fuzzer_configs
        ]

    start_experiment(args.experiment_name, args.experiment_config,
                     args.benchmarks, fuzzer_configs)
    if not os.getenv('MANUAL_EXPERIMENT'):
        stop_experiment.stop_experiment(args.experiment_name,
                                        args.experiment_config)
    return 0
def main():
    """Run an experiment."""
    logs.initialize()
    parser = argparse.ArgumentParser(
        description='Run a full or diff experiment (if needed).')
    parser.add_argument('experiment_type', choices=['diff', 'full'])
    args = parser.parse_args()
    if args.experiment_type == 'diff':
        run_diff_experiment()
    else:
        run_full_experiment()
    return 0
Esempio n. 18
0
def generate_coverage_reports(experiment_config: dict):
    """Generates coverage reports for each benchmark and fuzzer."""
    logs.initialize()
    logger.info('Start generating coverage reports.')

    benchmarks = experiment_config['benchmarks']
    fuzzers = experiment_config['fuzzers']
    experiment = experiment_config['experiment']

    for benchmark in benchmarks:
        for fuzzer in fuzzers:
            generate_coverage_report(experiment, benchmark, fuzzer)

    logger.info('Finished generating coverage reports.')
def main():
    """Run an experiment."""
    logs.initialize()
    parser = argparse.ArgumentParser(description='Run a requested experiment.')
    # TODO(metzman): Add a way to exit immediately if there is already an
    # experiment running. FuzzBench's scheduler isn't smart enough to deal with
    # this properly.
    parser.add_argument('-d',
                        '--dry-run',
                        help='Dry run, don\'t actually run the experiment',
                        default=False,
                        action='store_true')
    args = parser.parse_args()
    run_requested_experiment(args.dry_run)
    return 0
def main():
    """Run schedule_measure_workers as a standalone script by calling schedule
    in a loop. Useful for debugging."""
    logs.initialize(
        default_extras={
            'experiment': os.environ['EXPERIMENT'],
            'component': 'dispatcher',
            'subcomponent': 'scheduler'
        })
    gce.initialize()
    config_path = sys.argv[1]
    config = yaml_utils.read(config_path)
    queue = initialize(config)
    while True:
        schedule(config, queue)
        time.sleep(30)
Esempio n. 21
0
def main():
    """Generates report."""
    logs.initialize()

    parser = get_arg_parser()
    args = parser.parse_args()

    generate_report(experiment_names=args.experiments,
                    report_directory=args.report_dir,
                    report_name=args.report_name,
                    label_by_experiment=args.label_by_experiment,
                    benchmarks=args.benchmarks,
                    fuzzers=args.fuzzers,
                    report_type=args.report_type,
                    quick=args.quick,
                    from_cached_data=args.from_cached_data,
                    end_time=args.end_time,
                    merge_with_clobber=args.merge_with_clobber)
Esempio n. 22
0
def main():
    """Run an experiment."""
    logs.initialize()
    parser = argparse.ArgumentParser(
        description='Run a full or diff experiment (if needed).')
    # TODO(metzman): Add a way to exit immediately if there is already an
    # experiment running. FuzzBench's scheduler isn't smart enough to deal with
    # this properly.
    parser.add_argument('experiment_type', choices=['diff', 'full'])
    parser.add_argument('-d',
                        '--dry-run',
                        help='Dry run, don\'t actually run the experiment',
                        default=False,
                        action='store_true')
    args = parser.parse_args()
    if args.experiment_type == 'full':
        run_full_experiment()
    else:
        run_diff_experiment(args.dry_run)
    return 0
Esempio n. 23
0
def main():
    """Run an experiment in the cloud."""
    parser = argparse.ArgumentParser(
        description='Begin an experiment that evaluates fuzzers on one or '
        'more benchmarks.')

    parser.add_argument('-b',
                        '--benchmarks',
                        help='Benchmark names.',
                        nargs='+',
                        required=True)
    parser.add_argument('-c',
                        '--experiment-config',
                        help='Path to the experiment configuration yaml file.',
                        required=True)
    parser.add_argument('-e',
                        '--experiment-name',
                        help='Experiment name.',
                        required=True)
    parser.add_argument('-f',
                        '--fuzzers',
                        help='Fuzzers to use.',
                        nargs='+',
                        required=False,
                        default=[])
    parser.add_argument('-fc',
                        '--fuzzer-configs',
                        help='Fuzzer configurations to use.',
                        nargs='+',
                        required=False,
                        default=[])
    args = parser.parse_args()

    logs.initialize()
    start_experiment(args.experiment_name, args.experiment_config,
                     args.benchmarks, args.fuzzers, args.fuzzer_configs)
    if not os.getenv('MANUAL_EXPERIMENT'):
        stop_experiment.stop_experiment(args.experiment_name,
                                        args.experiment_config)
    return 0
Esempio n. 24
0
def main():
    """Do the experiment and report results."""
    logs.initialize(default_extras={
        'component': 'dispatcher',
    })

    try:
        dispatcher_main()
    except Exception as error:
        logs.error('Error conducting experiment.')
        raise error

    if experiment_utils.is_local_experiment():
        return 0

    experiment_config_file_path = _get_config_file_path()

    if stop_experiment.stop_experiment(experiment_utils.get_experiment_name(),
                                       experiment_config_file_path):
        return 0

    return 1
Esempio n. 25
0
def main():
    """Build fuzzer, benchmark pairs on Google Cloud Build."""
    parser = argparse.ArgumentParser(
        description='Build fuzzer, benchmark pairs on Google Cloud Build.')

    parser.add_argument('-b',
                        '--benchmarks',
                        help='Benchmark names.',
                        nargs='+',
                        required=True)

    parser.add_argument('-f',
                        '--fuzzers',
                        help='Fuzzer names.',
                        nargs='+',
                        required=True)
    logs.initialize()
    args = parser.parse_args()

    build_all_fuzzer_benchmarks(args.fuzzers, args.benchmarks)

    return 0
Esempio n. 26
0
def main() -> int:
    """Check that this branch conforms to the standards of fuzzbench."""
    logs.initialize()
    parser = argparse.ArgumentParser(
        description='Presubmit script for fuzzbench.')
    choices = [
        'format', 'lint', 'typecheck', 'licensecheck',
        'test_changed_integrations'
    ]
    parser.add_argument('command', choices=choices, nargs='?')

    args = parser.parse_args()
    os.chdir(_SRC_ROOT)
    changed_files = get_changed_files()

    if args.command == 'format':
        success = yapf(changed_files, False)
        return bool_to_returncode(success)

    if args.command == 'lint':
        success = lint(changed_files)
        return bool_to_returncode(success)

    if args.command == 'typecheck':
        success = pytype(changed_files)
        return bool_to_returncode(success)

    if args.command == 'licensecheck':
        success = license_check(changed_files)
        return bool_to_returncode(success)

    if args.command == 'test_changed_integrations':
        success = test_changed_integrations(changed_files)
        return bool_to_returncode(success)

    success = do_checks(changed_files)
    return bool_to_returncode(success)
Esempio n. 27
0
def initialize_logs(verbose: bool):
    """Initialize logging."""
    if not verbose:
        logs.initialize()
    else:
        logs.initialize(log_level=logging.DEBUG)
Esempio n. 28
0
def initialize_logs():
    """Initialize logs. This must be called on process start."""
    logs.initialize(default_extras={
        'component': 'dispatcher',
    })
Esempio n. 29
0
def main():
    """Run an experiment in the cloud."""
    logs.initialize()

    parser = argparse.ArgumentParser(
        description='Begin an experiment that evaluates fuzzers on one or '
        'more benchmarks.')

    all_benchmarks = benchmark_utils.get_all_benchmarks()
    coverage_benchmarks = benchmark_utils.get_coverage_benchmarks()
    parser.add_argument('-b',
                        '--benchmarks',
                        help=('Benchmark names. '
                              'All code coverage benchmarks of them by '
                              'default.'),
                        nargs='+',
                        required=False,
                        default=coverage_benchmarks,
                        choices=all_benchmarks)
    parser.add_argument('-c',
                        '--experiment-config',
                        help='Path to the experiment configuration yaml file.',
                        required=True)
    parser.add_argument('-e',
                        '--experiment-name',
                        help='Experiment name.',
                        required=True)
    parser.add_argument('-d',
                        '--description',
                        help='Description of the experiment.',
                        required=False)
    parser.add_argument('-cb',
                        '--concurrent-builds',
                        help='Max concurrent builds allowed.',
                        required=False)

    all_fuzzers = fuzzer_utils.get_fuzzer_names()
    parser.add_argument('-f',
                        '--fuzzers',
                        help='Fuzzers to use.',
                        nargs='+',
                        required=False,
                        default=None,
                        choices=all_fuzzers)
    parser.add_argument('-ns',
                        '--no-seeds',
                        help='Should trials be conducted without seed corpora.',
                        required=False,
                        default=False,
                        action='store_true')
    parser.add_argument('-nd',
                        '--no-dictionaries',
                        help='Should trials be conducted without dictionaries.',
                        required=False,
                        default=False,
                        action='store_true')
    parser.add_argument('-a',
                        '--allow-uncommitted-changes',
                        help='Skip check that no uncommited changes made.',
                        required=False,
                        default=False,
                        action='store_true')
    parser.add_argument(
        '-o',
        '--oss-fuzz-corpus',
        help='Should trials be conducted with OSS-Fuzz corpus (if available).',
        required=False,
        default=False,
        action='store_true')
    args = parser.parse_args()
    fuzzers = args.fuzzers or all_fuzzers

    concurrent_builds = args.concurrent_builds
    if concurrent_builds is not None:
        if not concurrent_builds.isdigit():
            parser.error(
                "The concurrent build argument must be a positive number")
        concurrent_builds = int(concurrent_builds)

    start_experiment(args.experiment_name,
                     args.experiment_config,
                     args.benchmarks,
                     fuzzers,
                     description=args.description,
                     no_seeds=args.no_seeds,
                     no_dictionaries=args.no_dictionaries,
                     oss_fuzz_corpus=args.oss_fuzz_corpus,
                     allow_uncommitted_changes=args.allow_uncommitted_changes,
                     concurrent_builds=concurrent_builds)
    return 0