def bench(problem, **kwargs): """ Complete benchmark with multiple simulation and performance parameters. """ try: from opescibench import Benchmark, Executor except: raise ImportError( 'Could not import opescibench utility package.\n' 'Please install https://github.com/opesci/opescibench') run = tti_run if problem == 'tti' else acoustic_run resultsdir = kwargs.pop('resultsdir') repeats = kwargs.pop('repeats') class BenchExecutor(Executor): """Executor class that defines how to run the benchmark""" def run(self, *args, **kwargs): gflopss, oi, timings, _ = run(*args, **kwargs) for key in timings.keys(): self.register(gflopss[key], measure="gflopss", event=key) self.register(oi[key], measure="oi", event=key) self.register(timings[key], measure="timings", event=key) clear_cache() bench = Benchmark(name=problem, resultsdir=resultsdir, parameters=kwargs) bench.execute(BenchExecutor(), warmups=0, repeats=repeats) bench.save()
class DiffusionExecutor(Executor): """Executor class that defines how to run the benchmark""" def setup(self, **kwargs): self.ui = ring_initial(spacing=kwargs['spacing']) def run(self, *args, **kwargs): u, time = exec_func[kwargs['mode']]( self.ui, spacing=kwargs['spacing'], timesteps=kwargs['timesteps']) self.register(time) # Run benchmark across parameters and save the result bench = Benchmark(name='Diffusion', resultsdir=args.resultsdir, parameters=parameters) bench.execute(DiffusionExecutor(), warmups=0, repeats=1) bench.save() elif args.execmode == 'plot': # Load previously generated benchmark data bench = Benchmark(name='Diffusion', resultsdir=args.resultsdir, parameters=parameters) bench.load() # Generate the plot from loaded benchmark data plotter = Plotter() plotter.plot_comparison('DiffusionModes.pdf', args.mode, bench.lookup())
"Please install https://github.com/opesci/opescibench") class BenchExecutor(Executor): """Executor class that defines how to run the benchmark""" def run(self, *args, **kwargs): gflopss, oi, timings, _ = run(*args, **kwargs) for key in timings.keys(): self.register(gflopss[key], measure="gflopss", event=key) self.register(oi[key], measure="oi", event=key) self.register(timings[key], measure="timings", event=key) clear_cache() bench = Benchmark(name=args.problem, resultsdir=args.resultsdir, parameters=parameters) bench.execute(BenchExecutor(), warmups=0, repeats=args.repeats) bench.save() elif args.execmode == "plot": try: from opescibench import Benchmark, RooflinePlotter except: raise ImportError( "Could not import opescibench utility package.\n" "Please install https://github.com/opesci/opescibench " "and Matplotlib to plot performance results") bench = Benchmark(name=args.problem, resultsdir=args.resultsdir,
elif args.execmode == 'bench': if Benchmark is None: raise ImportError("Could not find opescibench utility package.\n" "Please install from https://github.com/opesci/opescibench") class DiffusionExecutor(Executor): """Executor class that defines how to run the benchmark""" def setup(self, **kwargs): self.ui = ring_initial(spacing=kwargs['spacing']) def run(self, *args, **kwargs): u, time = exec_func[kwargs['mode']](self.ui, spacing=kwargs['spacing'], timesteps=kwargs['timesteps']) self.register(time) # Run benchmark across parameters and save the result bench = Benchmark(name='Diffusion', resultsdir=args.resultsdir, parameters=parameters) bench.execute(DiffusionExecutor(), warmups=0, repeats=1) bench.save() elif args.execmode == 'plot': # Load previously generated benchmark data bench = Benchmark(name='Diffusion', resultsdir=args.resultsdir, parameters=parameters) bench.load() # Generate the plot from loaded benchmark data plotter = Plotter() plotter.plot_comparison('DiffusionModes.pdf', args.mode, bench.lookup())
spacing=kwargs["spacing"], timesteps=kwargs["timesteps"], space_order=kwargs["space_order"], dse=kwargs["dse"]) for key in timings.keys(): self.register(gflopss[key], measure="gflopss", event=key) self.register(oi[key], measure="oi", event=key) self.register(timings[key], measure="timings", event=key) if __name__ == "__main__": bench = Benchmark(name="Diffusion", resultsdir="res", parameters={ 'timesteps': 1000, 'spacing': [0.0001, 0.0004], 'space_order': [4, 8, 12, 16], 'dse': ["aggressive", "advanced"] }) bench.execute(DiffusionExecutor(), warmups=0, repeats=1) """ Plotting mode to generate plots for performance analysis. """ backend = "ops" max_bw = 320 flop_ceils = [(8228, "ideal peak")] resultsdir = "results" for dse in ["aggressive", "advanced"]: for spacing in [0.0001, 0.0004]:
def plot(problem, **kwargs): """ Plotting mode to generate plots for performance analysis. """ try: from opescibench import Benchmark, RooflinePlotter except: raise ImportError( "Could not import opescibench utility package.\n" "Please install https://github.com/opesci/opescibench " "and Matplotlib to plot performance results") resultsdir = kwargs.pop('resultsdir') plotdir = kwargs.pop('plotdir') arch = kwargs.pop('arch') max_bw = kwargs.pop('max_bw') max_flops = kwargs.pop('max_flops') point_runtime = kwargs.pop('point_runtime') bench = Benchmark(name=problem, resultsdir=resultsdir, parameters=kwargs) bench.load() if not bench.loaded: warning("Could not load any results, nothing to plot. Exiting...") sys.exit(0) gflopss = bench.lookup(params=kwargs, measure="gflopss", event='main') oi = bench.lookup(params=kwargs, measure="oi", event='main') time = bench.lookup(params=kwargs, measure="timings", event='main') name = "%s_dim%s_so%s_to%s_arch[%s].pdf" % (problem, kwargs['shape'], kwargs['space_order'], kwargs['time_order'], arch) name = name.replace(' ', '') problem_styles = {'acoustic': 'Acoustic', 'tti': 'TTI'} title = "%s [grid=%s, TO=%s, duration=%sms], varying <DSE,DLE> on %s" %\ (problem_styles[problem], list(kwargs['shape']), kwargs['time_order'], kwargs['tn'], arch) styles = { # (marker, color) # DLE basic ('basic', 'basic'): ('D', 'r'), ('advanced', 'basic'): ('D', 'g'), ('speculative', 'basic'): ('D', 'y'), ('aggressive', 'basic'): ('D', 'b'), # DLE advanced ('basic', 'advanced'): ('o', 'r'), ('advanced', 'advanced'): ('o', 'g'), ('speculative', 'advanced'): ('o', 'y'), ('aggressive', 'advanced'): ('o', 'b'), # DLE speculative ('basic', 'speculative'): ('s', 'r'), ('advanced', 'speculative'): ('s', 'g'), ('speculative', 'speculative'): ('s', 'y'), ('aggressive', 'speculative'): ('s', 'b') } # Find min and max runtimes for instances having the same OI min_max = {v: [0, sys.maxsize] for v in oi.values()} for k, v in time.items(): i = oi[k] min_max[i][0] = v if min_max[i][0] == 0 else min(v, min_max[i][0]) min_max[i][1] = v if min_max[i][1] == sys.maxsize else max( v, min_max[i][1]) with RooflinePlotter(title=title, figname=name, plotdir=plotdir, max_bw=max_bw, max_flops=max_flops, fancycolor=True, legend={ 'fontsize': 5, 'ncol': 4 }) as plot: for key, gflopss in gflopss.items(): oi_value = oi[key] time_value = time[key] key = dict(key) run = (key["dse"], key["dle"]) label = "<%s,%s>" % run oi_loc = 0.05 if len(str(key["space_order"])) == 1 else 0.06 oi_annotate = { 's': 'SO=%s' % key["space_order"], 'size': 4, 'xy': (oi_value, oi_loc) } if run[0] else None if time_value in min_max[oi_value] and point_runtime: # Only annotate min and max runtimes on each OI line, to avoid # polluting the plot too much point_annotate = { 's': "%.1f s" % time_value, 'xytext': (0, 5.2), 'size': 3.5, 'weight': 'bold', 'rotation': 0 } else: point_annotate = None oi_line = time_value == min_max[oi_value][0] if oi_line: perf_annotate = {'size': 4, 'xytext': (-4, 4)} plot.add_point(gflops=gflopss, oi=oi_value, marker=styles[run][0], color=styles[run][1], oi_line=oi_line, label=label, perf_annotate=perf_annotate, oi_annotate=oi_annotate, point_annotate=point_annotate)
elif args.execmode == "bench": class BenchExecutor(Executor): """Executor class that defines how to run the benchmark""" def run(self, *args, **kwargs): gflopss, oi, timings, _ = run(*args, **kwargs) for key in timings.keys(): self.register(gflopss[key], measure="gflopss", event=key) self.register(oi[key], measure="oi", event=key) self.register(timings[key], measure="timings", event=key) clear_cache() bench = Benchmark(name=args.problem, resultsdir=args.resultsdir, parameters=parameters) bench.execute(BenchExecutor(), warmups=0) bench.save() elif args.execmode == "plot": bench = Benchmark(name=args.problem, resultsdir=args.resultsdir, parameters=parameters) bench.load() if not bench.loaded: warning("Could not load any results, nothing to plot. Exiting...") sys.exit(0) gflopss = bench.lookup(params=parameters, measure="gflopss",