def run_process(): runner = BenchmarkRunner(benchmarks, REPO_PATH, REPO_URL, BUILD, DB_PATH, TMP_DIR, PREPARE, always_clean=True, run_option='eod', start_date=START_DATE, module_dependencies=dependencies) runner.run()
def run_process(): runner = BenchmarkRunner( benchmarks, REPO_PATH, REPO_URL, BUILD, DB_PATH, TMP_DIR, PREPARE, always_clean=True, run_option='eod', start_date=START_DATE, module_dependencies=dependencies) runner.run() generate_rst_files(runner.benchmarks, DB_PATH, RST_BASE, """LONG DESC.""")
def main(args=None): import argparse parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description=__doc__ + "For more information, see the docstring of `BenchmarkRunner`:\n" + BenchmarkRunner.__doc__) parser.add_argument('--run-option', default='eod', help="one of {'eod', 'all', 'last', integer}") ns = parser.parse_args(args) run_option = ns.run_option if run_option.isdigit(): run_option = int(run_option) runner = BenchmarkRunner(benchmarks, REPO_PATH, REPO_URL, BUILD, DB_PATH, TMP_DIR, PREPARE, run_option=run_option, start_date=START_DATE) runner.run() return runner
def run_process(run_option='eod'): runner = BenchmarkRunner(benchmarks, REPO_PATH, REPO_URL, BUILD, DB_PATH, TMP_DIR, PREPARE, always_clean=True, run_option=run_option, start_date=START_DATE, module_dependencies=dependencies) runner.run()
def run_process(): runner = BenchmarkRunner(benchmarks, REPO_PATH, REPO_URL, BUILD, DB_PATH, TMP_DIR, PREPARE, clean_cmd=PREPARE, run_option='eod', run_order='multires', start_date=START_DATE, module_dependencies=dependencies, verify=True) runner.run()
def run_process(existing='min', run_order='multires', run_limit=None): runner = BenchmarkRunner(benchmarks, REPO_PATH, REPO_URL, BUILD, DB_PATH, TMP_DIR, PREPARE, branches=BRANCHES, clean_cmd=PREPARE, run_option='all', run_order=run_order, run_limit=run_limit, start_date=START_DATE, existing=existing, module_dependencies=dependencies, verify=True) runner.run()
def run_process(existing='min', run_order='multires', run_limit=None): runner = BenchmarkRunner(suite.benchmarks, suite.REPO_PATH, suite.REPO_URL, suite.BUILD, suite.DB_PATH, suite.TMP_DIR, suite.PREPARE, branches=suite.BRANCHES, clean_cmd=suite.PREPARE, run_option='all', run_order=run_order, run_limit=run_limit, start_date=suite.START_DATE, existing=existing, module_dependencies=suite.dependencies, verify=True) runner.run()
def main(args=None): import argparse parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description=__doc__ + "For more information, see the docstring of `BenchmarkRunner`:\n" + BenchmarkRunner.__doc__) parser.add_argument( '--run-option', default='eod', help="one of {'eod', 'all', 'last', integer}") ns = parser.parse_args(args) run_option = ns.run_option if run_option.isdigit(): run_option = int(run_option) runner = BenchmarkRunner(benchmarks, REPO_PATH, REPO_URL, BUILD, DB_PATH, TMP_DIR, PREPARE, run_option=run_option, start_date=START_DATE) runner.run() return runner
def test_benchmarkrunner(): from suite import * # Just to make sure there are no left-overs shutil.rmtree(TMP_DIR) if exists(DB_PATH): os.unlink(DB_PATH) ok_(not exists(DB_PATH)) runner = BenchmarkRunner(benchmarks, REPO_PATH, REPO_URL, BUILD, DB_PATH, TMP_DIR, PREPARE, clean_cmd=CLEAN, run_option='all', run_order='normal', start_date=START_DATE, module_dependencies=DEPENDENCIES) revisions_to_run = runner._get_revisions_to_run() eq_(len(revisions_to_run), 6) # we have 6 now revisions_ran = runner.run() # print "D1: ", revisions_ran # for this test we should inject our "failed to build revision" # Since no tests were ran for it -- it is not reported revisions_ran_ = [x[0] for x in revisions_ran] revisions_ran_.insert(4, 'e83ffa5') assert_array_equal(revisions_ran_, revisions_to_run) # First revision eq_(revisions_ran[0][1], (False, 3)) # no functions were available at that point eq_(revisions_ran[1][1], (True, 3)) # all 3 tests were available in the first rev ok_(exists(TMP_DIR)) ok_(exists(DB_PATH)) eq_(runner.blacklist, set(['e83ffa5'])) # one which failed to build # Run 2nd time and verify that all are still listed BUT none new succeeds revisions_ran = runner.run() #print "D2: ", revisions_ran for rev, v in revisions_ran: eq_(v, (False, 0)) # What if we expand list of benchmarks and run 3rd time runner.benchmarks = collect_benchmarks(['vb_sins', 'vb_sins2']) revisions_ran = runner.run() # for that single added benchmark there still were no function eq_(revisions_ran[0][1], (False, 1)) # all others should have "succeeded" on that single one for rev, v in revisions_ran[1:]: eq_(v, (True, 1)) # and on 4th run -- nothing new revisions_ran = runner.run() for rev, v in revisions_ran: eq_(v, (False, 0)) # Now let's smoke test generation of the .rst files from vbench.reports import generate_rst_files rstdir = pjoin(TMP_DIR, 'sources') generate_rst_files(runner.benchmarks, DB_PATH, rstdir, """VERY LONG DESCRIPTION""") # Verify that it all looks close to the desired image_files = [ basename(x) for x in glob(pjoin(rstdir, 'vbench/figures/*.png')) ] target_image_files = [b.name + '.png' for b in runner.benchmarks] eq_(set(image_files), set(target_image_files)) rst_files = [basename(x) for x in glob(pjoin(rstdir, 'vbench/*.rst'))] target_rst_files = [b.name + '.rst' for b in runner.benchmarks] eq_(set(rst_files), set(target_rst_files)) module_files = [basename(x) for x in glob(pjoin(rstdir, '*.rst'))] target_module_files = list( set(['vb_' + b.module_name + '.rst' for b in runner.benchmarks])) eq_(set(module_files), set(target_module_files + ['index.rst'])) #print TMP_DIR shutil.rmtree(TMP_DIR) shutil.rmtree(dirname(DB_PATH))
def run_process(): runner = BenchmarkRunner(benchmarks, REPO_PATH, REPO_URL, '', DB_PATH, TMP_DIR, '', run_option='all', start_date=START_DATE, module_dependencies=[]) runner.run()
SVN_URL = 'https://neuralensemble.org/svn/brian/trunk' REPO_PATH = os.path.join(PATH, 'brian-trunk') if not os.path.exists(REPO_PATH): # create the repository os.makedirs(REPO_PATH) os.system('git svn clone %s %s' % (SVN_URL, REPO_PATH)) else: # update the repository (can only be called from the directory...) os.chdir(REPO_PATH) os.system('git svn rebase') os.system('git svn fetch') REPO_URL = 'file://' + REPO_PATH TMP_DIR = tempfile.mkdtemp(suffix='vbench') # Those two are not really needed at the moment as no C extensions are # compiled by default # TODO: Does using sys.executable here work on Windows? PREPARE = "%s setup.py clean" % sys.executable BUILD = "%s setup.py build_ext" % sys.executable START_DATE = datetime(2008, 9, 23) # Brian version 1.0.0 repo = GitRepo(REPO_PATH) runner = BenchmarkRunner(benchmarks, REPO_PATH, REPO_URL, BUILD, DB_PATH, TMP_DIR, PREPARE, run_option=run_option, start_date=START_DATE) runner.run()
def test_benchmarkrunner(): from vbench.api import BenchmarkRunner # Just to make sure there are no left-overs shutil.rmtree(TMP_DIR) if exists(DB_PATH): os.unlink(DB_PATH) ok_(not exists(DB_PATH)) runner = BenchmarkRunner(benchmarks, REPO_PATH, REPO_URL, BUILD, DB_PATH, TMP_DIR, PREPARE, clean_cmd=CLEAN, run_option='all', run_order='normal', start_date=START_DATE, module_dependencies=DEPENDENCIES) revisions_to_run = runner._get_revisions_to_run() eq_(len(revisions_to_run), 4) # we had 4 so far revisions_ran = runner.run() # print "D1: ", revisions_ran assert_array_equal([x[0] for x in revisions_ran], revisions_to_run) # First revision eq_(revisions_ran[0][1], (False, 3)) # no functions were available at that point eq_(revisions_ran[1][1], (True, 3)) # all 3 tests were available in the first rev ok_(exists(TMP_DIR)) ok_(exists(DB_PATH)) eq_(len(runner.blacklist), 0) # Run 2nd time and verify that all are still listed BUT none new succeeds revisions_ran = runner.run() #print "D2: ", revisions_ran for rev, v in revisions_ran: eq_(v, (False, 0)) # What if we expand list of benchmarks and run 3rd time runner.benchmarks = collect_benchmarks(['vb_sins', 'vb_sins2']) revisions_ran = runner.run() # for that single added benchmark there still were no function eq_(revisions_ran[0][1], (False, 1)) # all others should have "succeeded" on that single one for rev, v in revisions_ran[1:]: eq_(v, (True, 1)) # and on 4th run -- nothing new revisions_ran = runner.run() for rev, v in revisions_ran: eq_(v, (False, 0)) # Now let's smoke test generation of the .rst files from vbench.reports import generate_rst_files rstdir = pjoin(TMP_DIR, 'sources') generate_rst_files(runner.benchmarks, DB_PATH, rstdir, """VERY LONG DESCRIPTION""") # Verify that it all looks close to the desired image_files = [basename(x) for x in glob(pjoin(rstdir, 'vbench/figures/*.png'))] target_image_files = [b.name + '.png' for b in runner.benchmarks] eq_(set(image_files), set(target_image_files)) rst_files = [basename(x) for x in glob(pjoin(rstdir, 'vbench/*.rst'))] target_rst_files = [b.name + '.rst' for b in runner.benchmarks] eq_(set(rst_files), set(target_rst_files)) module_files = [basename(x) for x in glob(pjoin(rstdir, '*.rst'))] target_module_files = list(set(['vb_' + b.module_name + '.rst' for b in runner.benchmarks])) eq_(set(module_files), set(target_module_files + ['index.rst'])) #print TMP_DIR shutil.rmtree(TMP_DIR) shutil.rmtree(dirname(DB_PATH))
def run_process(run_option): runner = BenchmarkRunner(benchmarks, REPO_PATH, REPO_URL, BUILD, DB_PATH, TMP_DIR, PREPARE, run_option=run_option, start_date=START_DATE, module_dependencies=dependencies) runner.run()
fig_full_path = os.path.join(fig_base_path, '%s.png' % bmk.name) # make the figure plt.figure(figsize=(10, 6)) ax = plt.gca() bmk.plot(DB_PATH, ax=ax) start, end = ax.get_xlim() plt.xlim([start - 30, end + 30]) plt.savefig(fig_full_path, bbox_inches='tight') plt.close('all') fig_rel_path = 'vbench/figures/%s.png' % bmk.name rst_text = bmk.to_rst(image_path=fig_rel_path) with open(rst_path, 'w') as f: f.write(rst_text) ref = __import__('benchmarks') benchmarks = [v for v in ref.__dict__.values() if isinstance(v, Benchmark)] runner = BenchmarkRunner(benchmarks, REPO_PATH, REPO_URL, BUILD, DB_PATH, TMP_DIR, PREPARE, always_clean=True, run_option='eod', start_date=START_DATE, module_dependencies=dependencies) if __name__ == '__main__': runner.run(files=) generate_rst_files(benchmarks)