def do_submit(directory, force_restart, yes): """Submit the benchmarks.""" # Migrate from MDBenchmark<2 to MDBenchmark=>2 mds_to_dtr.migrate_to_datreant(directory) bundle = dtr.discover(directory) # Exit if no bundles were found in the current directory. if not bundle: console.error("No benchmarks found.") grouped_bundles = bundle.categories.groupby("started") try: bundles_not_yet_started = grouped_bundles[False] except KeyError: bundles_not_yet_started = None if not bundles_not_yet_started and not force_restart: console.error( "All generated benchmarks were already started once. " "You can force a restart with {}.", "--force", ) # Start all benchmark simulations if a restart was requested. Otherwise # only start the ones that were not run yet. bundles_to_start = bundle if not force_restart: bundles_to_start = bundles_not_yet_started df = DataFrameFromBundle(bundles_to_start) # Reformat NaN values nicely into question marks. df_to_print = df.replace(np.nan, "?") df_to_print = df.drop(columns=["ns/day", "ncores"]) console.info("{}", "Benchmark Summary:") df_short = ConsolidateDataFrame(df_to_print) PrintDataFrame(df_short) # Ask the user to confirm whether they want to submit the benchmarks if yes: console.info("The above benchmarks will be submitted.") elif not click.confirm("The above benchmarks will be submitted. Continue?"): console.error("Exiting. No benchmarks submitted.") batch_cmd = get_batch_command() console.info("Submitting a total of {} benchmarks.", len(bundles_to_start)) for sim in bundles_to_start: # Remove files generated by previous mdbenchmark run if force_restart: engine = detect_md_engine(sim.categories["module"]) cleanup_before_restart(engine=engine, sim=sim) sim.categories["started"] = True os.chdir(sim.abspath) subprocess.call([batch_cmd, "bench.job"]) console.info( "Submitted all benchmarks. Run {} once they are finished to get the results.", "mdbenchmark analyze", )
def test_analyze_with_errors(cli_runner, tmpdir, data): """Test that we warn the user of errors in the output files. Also test that we show a question mark instead of a float in the corresponding cell. """ with tmpdir.as_cwd(): result = cli_runner.invoke(cli, [ "analyze", "--directory={}".format(data["analyze-files-w-errors"]) ]) bundle = dtr.discover(data["analyze-files-w-errors"]) df = DataFrameFromBundle(bundle) df = df.replace(np.nan, "?") test_output = PrintDataFrame(df, False) + "\n" assert result.exit_code == 0 assert result.output == test_output
def do_analyze(directory, plot, ncores, save_csv): """Analyze benchmarks.""" # Migrate from MDBenchmark<2 to MDBenchmark=>2 mds_to_dtr.migrate_to_datreant(directory) bundle = dtr.discover(directory) df = DataFrameFromBundle(bundle) if save_csv is not None and not save_csv.endswith(".csv"): save_csv = "{}.csv".format(save_csv) df.to_csv(save_csv) # Reformat NaN values nicely into question marks. # move this to the bundle function! df = df.replace(np.nan, "?") if df.isnull().values.any(): console.warn( "We were not able to gather informations for all systems. " "Systems marked with question marks have either crashed or " "were not started yet." ) PrintDataFrame(df) if plot: console.warn("'--plot' has been deprecated, use '{}'.", "mdbenchmark plot") fig = Figure() FigureCanvas(fig) ax = fig.add_subplot(111) df = pd.read_csv(save_csv) if ncores: console.warn( "Ignoring your value from '{}' and parsing number of cores from log files.", "--number-cores/-ncores", ) ax = plot_over_group(df, plot_cores=ncores, fit=True, ax=ax) lgd = ax.legend(loc="upper center", bbox_to_anchor=(0.5, -0.175)) fig.tight_layout() fig.savefig( "runtimes.pdf", type="pdf", bbox_extra_artists=(lgd,), bbox_inches="tight" )