def run(self, genspec: str) -> None: ''' Run benchmark test. ''' logger.emlog('# Starting Runs...') # Generate run commands for current experiment. rcmd = self.config.args.runcmds pruns = experiment.runcmds(rcmd[0], rcmd[1], rcmd[2], rcmd[3]) # Create factor registry, or use provided. factors = [] if len(self.manual_factors) == 0: for val in range(rcmd[0], rcmd[1] + 1): factors.append(experiment.factorize(val, 3)) else: factors = self.manual_factors executable = self.config.args.executable for i, prun in enumerate(pruns): pex = factors[i][0] pey = factors[i][1] pez = factors[i][2] appargs = genspec.format(executable, pex, pey, pez) logger.log('') container.prun( prun, appargs, postaction=self.post_action )
def main(argv): experiment.name('nbody') logger.log('# Experiment: {}'.format(experiment.name())) prun = host.whichl(['srun', 'mpiexec']) if prun is None: sys.exit('Cannot find a parallel launcher...') app = '/nbody/nbody-mpi' # The seemingly strange use of {{}} allows us to first format the string # with arguments (the {}) and then generate strings with values passed to -n # from the output of range() (the {{}}). runcmds = experiment.generate('{} -n {{}}'.format(prun), range(1, 3)) etimes = list() for r in runcmds: stime = utils.now() # TODO(skg) FIXME container.prun(r, app) etime = utils.now() telapsed = etime - stime etimes.append(telapsed) logger.log(F'# Execution Time: {telapsed}\n') # Take a break between runs. time.sleep(1) logger.log('# Report') logger.log('# Command, Execution Time') for i in zip(runcmds, etimes): logger.log('{}, {}'.format(*i))
def run(self): def _get_numpe(prun): numpe_match = re.search(r'\s+-n\s?(?P<numpe>[0-9]+)', prun) if numpe_match is None: estr = F"Cannot determine numpe from:'{prun}'" raise ValueError(estr) return int(numpe_match.group('numpe')) # Generate the run commands for the given experiment. rcmd = self.config.args.runcmds pruns = experiment.runcmds(rcmd[0], rcmd[1], rcmd[2], rcmd[3]) # Generate list of apps for the given benchmarks. apps = [b.strip() for b in self.config.args.benchmarks.split(',')] logger.emlog('# Starting Runs...') for app in apps: if not Benchmark.recognized(app): logger.emlog(F'# SKIPPING UNRECOGNIZED BENCHMARK: {app}') continue for prun in pruns: logger.log('') container.prun(F'{prun}', os.path.join(self.config.args.bin_dir, app), postaction=self.post_action, user_data={ 'app': app, 'numpe': _get_numpe(prun) })
def run(self, genspec): logger.emlog('# Starting Runs...') # Generate the run commands for the given experiment. rcmd = self.config.args.runcmds pruns = experiment.runcmds(rcmd[0], rcmd[1], rcmd[2], rcmd[3]) # The application and its arguments. executable = self.config.args.executable appargs = genspec.format(executable) for prun in pruns: logger.log('') container.prun(prun, appargs, postaction=self.post_action)
def run(self, genspec: str) -> None: ''' Experiment iterations definition ''' logger.log('# Starting Runs...') # Generate the iterative run commands. rcmd = self.config.args.runcmds pruns = experiment.runcmds(rcmd[0], rcmd[1], rcmd[2], rcmd[3]) executable = self.config.args.executable appargs = genspec.format(executable) # Execute generated run commands. for prun in pruns: logger.log('') container.prun(prun, appargs, postaction=self.post_action)
def run(self, genspec: str) -> None: ''' Run benchmark test. ''' logger.emlog('# Starting Runs...') # Generate run commands for current experiment. rcmd = self.config.args.runcmds pruns = experiment.runcmds(rcmd[0], rcmd[1], rcmd[2], rcmd[3]) executable = self.config.args.executable s_input = self.snap_input s_output = self.snap_output appargs = genspec.format(executable, s_input, s_output) for prun in pruns: logger.log('') container.prun(prun, appargs, preaction=self.pre_action, postaction=self.post_action)