Exemplo n.º 1
0
def run_full_experiment():
    """Run a full experiment."""
    experiment_name = get_experiment_name()
    fuzzer_configs = fuzzer_utils.get_fuzzer_configs()
    benchmarks = benchmark_utils.get_all_benchmarks()
    run_experiment.start_experiment(experiment_name, EXPERIMENT_CONFIG_FILE,
                                    benchmarks, fuzzer_configs)
Exemplo n.º 2
0
def run_diff_experiment(dry_run):
    """Run a diff experiment. This is an experiment that runs only on
    fuzzers that have changed since the last experiment."""
    fuzzers = experiment_changes.get_fuzzers_changed_since_last()
    logs.info('Running experiment with fuzzers: %s.', ' '.join(fuzzers))
    fuzzer_configs = fuzzer_utils.get_fuzzer_configs(fuzzers=fuzzers)
    return _run_experiment(fuzzer_configs, dry_run)
Exemplo n.º 3
0
def main():
    """Run an experiment in the cloud."""
    logs.initialize()

    parser = argparse.ArgumentParser(
        description='Begin an experiment that evaluates fuzzers on one or '
        'more benchmarks.')

    all_benchmarks = benchmark_utils.get_all_benchmarks()

    parser.add_argument('-b',
                        '--benchmarks',
                        help='Benchmark names. All of them by default.',
                        nargs='+',
                        required=False,
                        default=all_benchmarks,
                        choices=all_benchmarks)
    parser.add_argument('-c',
                        '--experiment-config',
                        help='Path to the experiment configuration yaml file.',
                        required=True)
    parser.add_argument('-e',
                        '--experiment-name',
                        help='Experiment name.',
                        required=True)
    parser.add_argument('-f',
                        '--fuzzers',
                        help='Fuzzers to use.',
                        nargs='+',
                        required=False,
                        default=[])
    parser.add_argument('-fc',
                        '--fuzzer-configs',
                        help='Fuzzer configurations to use.',
                        nargs='+',
                        required=False,
                        default=[])
    args = parser.parse_args()

    if not args.fuzzer_configs:
        fuzzer_configs = fuzzer_utils.get_fuzzer_configs(fuzzers=args.fuzzers)
    else:
        fuzzer_configs = [
            yaml_utils.read(fuzzer_config)
            for fuzzer_config in args.fuzzer_configs
        ]

    start_experiment(args.experiment_name, args.experiment_config,
                     args.benchmarks, fuzzer_configs)
    if not os.getenv('MANUAL_EXPERIMENT'):
        stop_experiment.stop_experiment(args.experiment_name,
                                        args.experiment_config)
    return 0
Exemplo n.º 4
0
def run_requested_experiment(dry_run):
    """Run the oldest requested experiment that hasn't been run yet in
    experiment-requests.yaml."""
    requested_experiments = _get_requested_experiments()

    # TODO(metzman): Look into supporting benchmarks as an optional parameter so
    # that people can add fuzzers that don't support everything.

    if PAUSE_SERVICE_KEYWORD in requested_experiments:
        # Check if automated experiment service is paused.
        logs.warning('Pause service requested, not running experiment.')
        return None

    requested_experiment = None
    for experiment_config in reversed(requested_experiments):
        experiment_name = _get_experiment_name(experiment_config)
        is_new_experiment = db_utils.query(models.Experiment).filter(
            models.Experiment.name == experiment_name).first() is None
        if is_new_experiment:
            requested_experiment = experiment_config
            break

    if requested_experiment is None:
        logs.info('No new experiment to run. Exiting.')
        return None

    experiment_name = _get_experiment_name(requested_experiment)
    if not validate_experiment_requests([requested_experiment]):
        logs.error('Requested experiment: %s in %s is not valid.',
                   requested_experiment, REQUESTED_EXPERIMENTS_PATH)
        return None
    fuzzers = requested_experiment['fuzzers']

    logs.info('Running experiment: %s with fuzzers: %s.', experiment_name,
              ' '.join(fuzzers))
    fuzzer_configs = fuzzer_utils.get_fuzzer_configs(fuzzers=fuzzers)
    return _run_experiment(experiment_name, fuzzer_configs, dry_run)
Exemplo n.º 5
0
def test_variant_configs_valid():
    """Ensure that all variant configs (variants.yaml files) are valid."""
    fuzzer_configs = fuzzer_utils.get_fuzzer_configs()
    for config in fuzzer_configs:
        run_experiment.validate_fuzzer_config(config)
Exemplo n.º 6
0
def main():
    """Run an experiment in the cloud."""
    logs.initialize()

    parser = argparse.ArgumentParser(
        description='Begin an experiment that evaluates fuzzers on one or '
        'more benchmarks.')

    all_benchmarks = benchmark_utils.get_all_benchmarks()
    all_fuzzers = fuzzer_utils.get_fuzzer_names()

    parser.add_argument('-b',
                        '--benchmarks',
                        help='Benchmark names. All of them by default.',
                        nargs='+',
                        required=False,
                        default=all_benchmarks,
                        choices=all_benchmarks)
    parser.add_argument('-c',
                        '--experiment-config',
                        help='Path to the experiment configuration yaml file.',
                        required=True)
    parser.add_argument('-e',
                        '--experiment-name',
                        help='Experiment name.',
                        required=True)
    fuzzers_group = parser.add_mutually_exclusive_group()
    fuzzers_group.add_argument('-f',
                               '--fuzzers',
                               help='Fuzzers to use.',
                               nargs='+',
                               required=False,
                               default=None,
                               choices=all_fuzzers)
    fuzzers_group.add_argument('-fc',
                               '--fuzzer-configs',
                               help='Fuzzer configurations to use.',
                               nargs='+',
                               required=False,
                               default=[])
    fuzzers_group.add_argument('-cf',
                               '--changed-fuzzers',
                               help=('Use fuzzers that have changed since the '
                                     'last experiment. The last experiment is '
                                     'determined by the database your '
                                     'experiment uses, not necessarily the '
                                     'fuzzbench service'),
                               action='store_true',
                               required=False)

    args = parser.parse_args()

    if args.fuzzer_configs:
        fuzzer_configs = [
            yaml_utils.read(fuzzer_config)
            for fuzzer_config in args.fuzzer_configs
        ]
    else:
        if args.changed_fuzzers:
            fuzzers = experiment_changes.get_fuzzers_changed_since_last()
            if not fuzzers:
                logs.error('No fuzzers changed since last experiment. Exiting.')
                return 1
        else:
            fuzzers = args.fuzzers
        fuzzer_configs = fuzzer_utils.get_fuzzer_configs(fuzzers)

    start_experiment(args.experiment_name, args.experiment_config,
                     args.benchmarks, fuzzer_configs)
    if not os.getenv('MANUAL_EXPERIMENT'):
        stop_experiment.stop_experiment(args.experiment_name,
                                        args.experiment_config)
    return 0
Exemplo n.º 7
0
def test_get_fuzzer_configs_empty_list():
    """Test that get_fuzzer_configs returns no configs when given an empty list.
    The reason for this is subtle, but code can call this function when the
    caller means no fuzzers but end up with many fuzzers. Please check all
    callers of get_fuzzer_configs before removing or altering this test."""
    assert fuzzer_utils.get_fuzzer_configs([]) == []
Exemplo n.º 8
0
def run_full_experiment():
    """Run a full experiment."""
    fuzzer_configs = fuzzer_utils.get_fuzzer_configs()
    return _run_experiment(fuzzer_configs)
Exemplo n.º 9
0
from typing import Dict, List, Set
import sys

from common import filesystem
from common import fuzzer_utils

# The max depth of dependencies _get_python_dependencies will search.
PY_DEPENDENCIES_MAX_DEPTH = 10

# A cache of Python dependencies for modules. Keys are strings of module paths.
# Values are sets of module paths.
PY_DEPENDENCIES_CACHE: Dict[str, Set[str]] = {}

# Cache these so we don't need to do it every time we call
# get_underlying_fuzzer.
FUZZER_CONFIGS = fuzzer_utils.get_fuzzer_configs()
FUZZER_NAMES_TO_UNDERLYING = {
    fuzzer_utils.get_fuzzer_from_config(config): config['fuzzer']
    for config in FUZZER_CONFIGS
}


def _get_fuzzer_module_name(fuzzer: str) -> str:
    """Returns the name of the fuzzer.py module of |fuzzer|. Assumes |fuzzer| is
    an underlying fuzzer."""
    return 'fuzzers.{}.fuzzer'.format(fuzzer)


def is_builtin_module(module: types.ModuleType) -> bool:
    """Returns True if |module| is a python builtin module."""
    return module.__name__ in sys.builtin_module_names