Beispiel #1
0
def run_benchmarks(control, experiment, benchmark_dir, benchmarks, trials,
                   vcs=None, record_dir=None, profile_dir=None,
                   continue_on_error=False, control_python=sys.executable,
                   experiment_python=sys.executable):
    if benchmarks:
        print("Running benchmarks: %s" % " ".join(benchmarks))
    else:
        print("Running all benchmarks")

    if record_dir:
        record_dir = os.path.abspath(record_dir)
        if not os.path.isdir(record_dir):
            raise ValueError('Recording directory "%s" does not exist' % record_dir)
        print("Recording data to '%s'" % record_dir)
    if profile_dir:
        profile_dir = os.path.abspath(profile_dir)
        if not os.path.isdir(profile_dir):
            raise ValueError('Profile directory "%s" does not exist' % profile_dir)
        print("Recording profile data to '%s'" % profile_dir)

    control_label = get_django_version(control, vcs=vcs)
    experiment_label = get_django_version(experiment, vcs=vcs)
    branch_info = "%s branch " % vcs if vcs else ""
    print("Control: Django %s (in %s%s)" % (control_label, branch_info, control))
    print("Experiment: Django %s (in %s%s)" % (experiment_label, branch_info, experiment))
    print('')

    # Calculate the subshell envs that we'll use to execute the
    # benchmarks in.
    if vcs:
        control_env = {
            'PYTHONPATH': '%s:%s' % (os.path.abspath(os.getcwd()), benchmark_dir),
        }
        experiment_env = control_env.copy()
    else:
        control_env = {'PYTHONPATH': '%s:%s' % (os.path.abspath(control), benchmark_dir)}
        experiment_env = {'PYTHONPATH': '%s:%s' % (os.path.abspath(experiment), benchmark_dir)}

    for benchmark in discover_benchmarks(benchmark_dir):
        if not benchmarks or benchmark in benchmarks:
            print("Running '%s' benchmark ..." % benchmark)
            settings_mod = '%s.settings' % benchmark
            control_env['DJANGO_SETTINGS_MODULE'] = settings_mod
            experiment_env['DJANGO_SETTINGS_MODULE'] = settings_mod
            if profile_dir is not None:
                control_env['DJANGOBENCH_PROFILE_FILE'] = os.path.join(profile_dir, "con-%s" % benchmark)
                experiment_env['DJANGOBENCH_PROFILE_FILE'] = os.path.join(profile_dir, "exp-%s" % benchmark)
            try:
                if vcs:
                    switch_to_branch(vcs, control)
                control_data = run_benchmark(benchmark, benchmark_dir, trials,
                                             executable=control_python,
                                             env=control_env)
                if vcs:
                    switch_to_branch(vcs, experiment)
                experiment_data = run_benchmark(benchmark, benchmark_dir, trials,
                                                executable=experiment_python,
                                                env=experiment_env)
            except SkipBenchmark as reason:
                print("Skipped: %s\n" % reason)
                continue
            except RuntimeError as error:
                if continue_on_error:
                    print("Failed: %s\n" % error)
                    continue
                raise

            options = argparse.Namespace(
                track_memory=False,
                diff_instrumentation=False,
                benchmark_name=benchmark,
                disable_timelines=True,
                control_label=control_label,
                experiment_label=experiment_label,
            )
            result = perf.CompareBenchmarkData(control_data, experiment_data, options)
            if record_dir:
                record_benchmark_results(
                    dest=os.path.join(record_dir, '%s.json' % benchmark),
                    name=benchmark,
                    result=result,
                    control=control_label,
                    experiment=experiment_label,
                    control_data=control_data,
                    experiment_data=experiment_data,
                )
            print(format_benchmark_result(result, len(control_data.runtimes)))
            print('')
Beispiel #2
0
def run_benchmarks(control, experiment, benchmark_dir, benchmarks, trials, vcs=None, record_dir=None, profile_dir=None, continue_on_errror=False):
    if benchmarks:
        print "Running benchmarks: %s" % " ".join(benchmarks)
    else:
        print "Running all benchmarks"

    if record_dir:
        record_dir = Path(record_dir).expand().absolute()
        if not record_dir.exists():
            raise ValueError('Recording directory "%s" does not exist' % record_dir)
        print "Recording data to '%s'" % record_dir

    control_label = get_django_version(control, vcs=vcs)
    experiment_label = get_django_version(experiment, vcs=vcs)
    branch_info = "%s branch " % vcs if vcs else ""
    print "Control: Django %s (in %s%s)" % (control_label, branch_info, control)
    print "Experiment: Django %s (in %s%s)" % (experiment_label, branch_info, experiment)
    print

    # Calculate the subshell envs that we'll use to execute the
    # benchmarks in.
    if vcs:
        control_env = {
            'PYTHONPATH': '%s:%s' % (Path.cwd().absolute(), Path(benchmark_dir)),
        }
        experiment_env = control_env.copy()
    else:
        control_env = {'PYTHONPATH': '%s:%s' % (Path(control).absolute(), Path(benchmark_dir))}
        experiment_env = {'PYTHONPATH': '%s:%s' % (Path(experiment).absolute(), Path(benchmark_dir))}

    for benchmark in discover_benchmarks(benchmark_dir):
        if not benchmarks or benchmark.name in benchmarks:
            print "Running '%s' benchmark ..." % benchmark.name
            settings_mod = '%s.settings' % benchmark.name
            control_env['DJANGO_SETTINGS_MODULE'] = settings_mod
            experiment_env['DJANGO_SETTINGS_MODULE'] = settings_mod
            if profile_dir is not None:
                control_env['DJANGOBENCH_PROFILE_FILE'] = Path(profile_dir, "con-%s" % benchmark.name)
                experiment_env['DJANGOBENCH_PROFILE_FILE'] = Path(profile_dir, "exp-%s" % benchmark.name)
            try:
                if vcs: switch_to_branch(vcs, control)
                control_data = run_benchmark(benchmark, trials, control_env)
                if vcs: switch_to_branch(vcs, experiment)
                experiment_data = run_benchmark(benchmark, trials, experiment_env)
            except SkipBenchmark, reason:
                print "Skipped: %s\n" % reason
                continue
            except RuntimeError, error:
                if continue_on_errror:
                    print "Failed: %s\n" % error
                    continue
                raise

            options = argparse.Namespace(
                track_memory = False,
                diff_instrumentation = False,
                benchmark_name = benchmark.name,
                disable_timelines = True,
                control_label = control_label,
                experiment_label = experiment_label,
            )
            result = perf.CompareBenchmarkData(control_data, experiment_data, options)
            if record_dir:
                record_benchmark_results(
                    dest = record_dir.child('%s.json' % benchmark.name),
                    name = benchmark.name,
                    result = result,
                    control = control_label,
                    experiment = experiment_label,
                    control_data = control_data,
                    experiment_data = experiment_data,
                )
            print format_benchmark_result(result, len(control_data.runtimes))
            print