Ejemplo n.º 1
0
def get_django_version(loc, vcs=None):
    if vcs:
        switch_to_branch(vcs, loc, do_cleanup=True)
        pythonpath = Path.cwd()
    else:
        pythonpath = Path(loc).absolute()
    out, err, _ = perf.CallAndCaptureOutput(
        [sys.executable, "-c" "import django; print django.get_version()"], env={"PYTHONPATH": pythonpath}
    )
    return out.strip()
Ejemplo n.º 2
0
def get_widgy_version(loc, vcs=None):
    if vcs:
        switch_to_branch(vcs, loc, do_cleanup=True)
        pythonpath = Path.cwd()
    else:
        pythonpath = Path(loc).absolute()
    out, err, _ = perf.CallAndCaptureOutput(
        [sys.executable, '-c'
         'import widgy; print widgy.get_version()'],
        env={'PYTHONPATH': pythonpath})
    return out.strip()
Ejemplo n.º 3
0
def get_widgy_version(loc, vcs=None):
    if vcs:
        switch_to_branch(vcs, loc, do_cleanup=True)
        pythonpath = Path.cwd()
    else:
        pythonpath = Path(loc).absolute()
    out, err, _ = perf.CallAndCaptureOutput(
        [sys.executable, '-c' 'import widgy; print widgy.get_version()'],
        env = {'PYTHONPATH': pythonpath}
    )
    return out.strip()
Ejemplo n.º 4
0
def _make_cli(tmpdir, request):
    """
    Set up a CLI instance pointing to a tmpdir for a library.

    This can't be in the body of cli() itself because we want to call it
    both from the cli fixture and from TestBuild.build.
    """
    cli = TestCLI(
        stdout = io.StringIO(),
        stderr = io.StringIO(),
        libpath = str(tmpdir),
    )

    # Chdir to the temp dir, remembering to restore when we're done.
    oldcwd = Path.cwd()
    os.chdir(str(tmpdir))
    request.addfinalizer(lambda: os.chdir(oldcwd))

    return cli
Ejemplo n.º 5
0
def run_benchmarks(control,
                   experiment,
                   benchmark_dir,
                   benchmarks,
                   trials,
                   vcs=None,
                   record_dir=None,
                   profile_dir=None,
                   continue_on_error=False):
    if benchmarks:
        print "Running benchmarks: %s" % " ".join(benchmarks)
    else:
        print "Running all benchmarks"

    if record_dir:
        record_dir = Path(record_dir).expand().absolute()
        if not record_dir.exists():
            raise ValueError('Recording directory "%s" does not exist' %
                             record_dir)
        print "Recording data to '%s'" % record_dir

    control_label = get_widgy_version(control, vcs=vcs)
    experiment_label = get_widgy_version(experiment, vcs=vcs)
    branch_info = "%s branch " % vcs if vcs else ""
    print "Control: Widgy %s (in %s%s)" % (control_label, branch_info, control)
    print "Experiment: Widgy %s (in %s%s)" % (experiment_label, branch_info,
                                              experiment)
    print

    # Calculate the subshell envs that we'll use to execute the
    # benchmarks in.
    if vcs:
        control_env = {
            'PYTHONPATH':
            '%s:%s' % (Path.cwd().absolute(), Path(benchmark_dir)),
        }
        experiment_env = control_env.copy()
    else:
        control_env = {
            'PYTHONPATH':
            '%s:%s' % (Path(control).absolute(), Path(benchmark_dir))
        }
        experiment_env = {
            'PYTHONPATH':
            '%s:%s' % (Path(experiment).absolute(), Path(benchmark_dir))
        }

    for benchmark in discover_benchmarks(benchmark_dir):
        if not benchmarks or benchmark.name in benchmarks:
            print "Running '%s' benchmark ..." % benchmark.name
            settings_mod = '%s.settings' % benchmark.name
            control_env['DJANGO_SETTINGS_MODULE'] = settings_mod
            experiment_env['DJANGO_SETTINGS_MODULE'] = settings_mod
            if profile_dir is not None:
                control_env['DJANGOBENCH_PROFILE_FILE'] = Path(
                    profile_dir, "con-%s" % benchmark.name)
                experiment_env['DJANGOBENCH_PROFILE_FILE'] = Path(
                    profile_dir, "exp-%s" % benchmark.name)
            try:
                if vcs: switch_to_branch(vcs, control)
                control_data = run_benchmark(benchmark, trials, control_env)
                if vcs: switch_to_branch(vcs, experiment)
                experiment_data = run_benchmark(benchmark, trials,
                                                experiment_env)
            except SkipBenchmark, reason:
                print "Skipped: %s\n" % reason
                continue
            except RuntimeError, error:
                if continue_on_error:
                    print "Failed: %s\n" % error
                    continue
                raise

            options = argparse.Namespace(
                track_memory=False,
                diff_instrumentation=False,
                benchmark_name=benchmark.name,
                disable_timelines=True,
                control_label=control_label,
                experiment_label=experiment_label,
            )
            result = perf.CompareBenchmarkData(control_data, experiment_data,
                                               options)
            if record_dir:
                record_benchmark_results(
                    dest=record_dir.child('%s.json' % benchmark.name),
                    name=benchmark.name,
                    result=result,
                    control=control_label,
                    experiment=experiment_label,
                    control_data=control_data,
                    experiment_data=experiment_data,
                )
            print format_benchmark_result(result, len(control_data.runtimes))
            print
Ejemplo n.º 6
0
def run_benchmarks(control, experiment, benchmark_dir, benchmarks, trials, vcs=None, record_dir=None, profile_dir=None):
    if benchmarks:
        print "Running benchmarks: %s" % " ".join(benchmarks)
    else:
        print "Running all benchmarks"

    if record_dir:
        record_dir = Path(record_dir).expand().absolute()
        if not record_dir.exists():
            raise ValueError('Recording directory "%s" does not exist' % record_dir)
        print "Recording data to '%s'" % record_dir

    control_label = get_django_version(control, vcs=vcs)
    experiment_label = get_django_version(experiment, vcs=vcs)
    branch_info = "%s branch " % vcs if vcs else ""
    print "Control: Django %s (in %s%s)" % (control_label, branch_info, control)
    print "Experiment: Django %s (in %s%s)" % (experiment_label, branch_info, experiment)
    print

    # Calculate the subshell envs that we'll use to execute the
    # benchmarks in.
    if vcs:
        control_env = experiment_env = {
            'PYTHONPATH': '%s:%s' % (Path.cwd().absolute(), Path(benchmark_dir)),
        }
    else:
        control_env = {'PYTHONPATH': '%s:%s' % (Path(control).absolute(), Path(benchmark_dir))}
        experiment_env = {'PYTHONPATH': '%s:%s' % (Path(experiment).absolute(), Path(benchmark_dir))}

    for benchmark in discover_benchmarks(benchmark_dir):
        if not benchmarks or benchmark.name in benchmarks:
            print "Running '%s' benchmark ..." % benchmark.name
            settings_mod = '%s.settings' % benchmark.name
            control_env['DJANGO_SETTINGS_MODULE'] = settings_mod
            experiment_env['DJANGO_SETTINGS_MODULE'] = settings_mod
            if profile_dir is not None:
                control_env['DJANGOBENCH_PROFILE_FILE'] = Path(profile_dir, "con-%s" % benchmark.name)
                experiment_env['DJANGOBENCH_PROFILE_FILE'] = Path(profile_dir, "exp-%s" % benchmark.name)
            try:
                if vcs: switch_to_branch(vcs, control)
                control_data = run_benchmark(benchmark, trials, control_env)
                if vcs: switch_to_branch(vcs, experiment)
                experiment_data = run_benchmark(benchmark, trials, experiment_env)
            except SkipBenchmark, reason:
                print "Skipped: %s\n" % reason
                continue

            options = argparse.Namespace(
                track_memory = False,
                diff_instrumentation = False,
                benchmark_name = benchmark.name,
                disable_timelines = True,
                control_label = control_label,
                experiment_label = experiment_label,
            )
            result = perf.CompareBenchmarkData(control_data, experiment_data, options)
            if record_dir:
                record_benchmark_results(
                    dest = record_dir.child('%s.json' % benchmark.name),
                    name = benchmark.name,
                    result = result,
                    control = control_label,
                    experiment = experiment_label,
                    control_data = control_data,
                    experiment_data = experiment_data,
                )
            print format_benchmark_result(result, len(control_data.runtimes))
            print
Ejemplo n.º 7
0
 def __init__(self, cwd):
     self.prev_cwd = FSPath.cwd()
     self.cwd = Path(cwd)
     if not self.cwd.exists():
         self.cwd.mkdir(parents=True)
Ejemplo n.º 8
0
def run_benchmarks(control, experiment, benchmark_dir, benchmarks, trials,
                   record_dir, profile_dir):
    
    if benchmarks:
        print "Running benchmarks: %s" % " ".join(benchmarks)
    else:
        print "Running all benchmarks"

    if record_dir:
        record_dir = Path(record_dir).expand().absolute()
        if not record_dir.exists():
            raise ValueError('Recording directory "%s" does not exist' % record_dir)
        print "Recording data to '%s'" % record_dir


    print "Control: %s" % control 
    print "Experiment: %s" % experiment
    print
    
    control_env_dir = tempfile.mkdtemp()
    experiment_env_dir = tempfile.mkdtemp()
    
    # create envs
    virtualenv.create_environment(control_env_dir, False)
    virtualenv.create_environment(experiment_env_dir, False)
    
    control_python = setup_env(control_env_dir, control)
    experiment_python = setup_env(experiment_env_dir, experiment)

    control_env = {
        'PYTHONPATH': '%s:%s:%s' % (Path.cwd().absolute(), Path(benchmark_dir), Path(__file__).parent.parent.absolute()),
    }
    experiment_env = control_env.copy()

    for benchmark in discover_benchmarks(benchmark_dir):
        if not benchmarks or benchmark.name in benchmarks:
            print "Running '%s' benchmark ..." % benchmark.name
            settings_mod = '%s.settings' % benchmark.name
            control_env['DJANGO_SETTINGS_MODULE'] = settings_mod
            experiment_env['DJANGO_SETTINGS_MODULE'] = settings_mod
            if profile_dir is not None:
                control_env['DJANGOBENCH_PROFILE_FILE'] = Path(profile_dir, "con-%s" % benchmark.name)
                experiment_env['DJANGOBENCH_PROFILE_FILE'] = Path(profile_dir, "exp-%s" % benchmark.name)
            try:
                control_data = run_benchmark(benchmark, trials, control_env, control_python)
                experiment_data = run_benchmark(benchmark, trials, experiment_env, experiment_python)
            except SkipBenchmark, reason:
                print "Skipped: %s\n" % reason
                continue

            options = argparse.Namespace(
                track_memory = False,
                diff_instrumentation = False,
                benchmark_name = benchmark.name,
                disable_timelines = True,
                control_label = control,
                experiment_label = experiment,
            )
            result = perf.CompareBenchmarkData(control_data, experiment_data, options)
            if record_dir:
                record_benchmark_results(
                    dest = record_dir.child('%s.json' % benchmark.name),
                    name = benchmark.name,
                    result = result,
                    control = control,
                    experiment = experiment,
                    control_data = control_data,
                    experiment_data = experiment_data,
                )
            print format_benchmark_result(result, len(control_data.runtimes))
            print