def report(self) -> None: ''' Generate report ''' logger.emlog(F'# {self.config.args.name} Report') logger.log('creating report...\n') # Setup Table table = utils.Table() sio = io.StringIO(newline=None) dataraw = csv.writer(sio) header = ['Time', 'KBytesXchng/Rank-Max', 'MB/S/Rank', 'Command'] dataraw.writerow(header) table.addrow(header) # Populate table. for index, entry in enumerate(self.data['results']): table.addrow(entry) entry.append(self.data['commands'][index]) dataraw.writerow(entry) # Write table to csv & display to terminal. csvname = self.config.args.csv_output metadata.add_asset(metadata.StringIOAsset(sio, csvname)) table.emit() logger.log('')
def report(self): logger.emlog(F'# {experiment.name()} Report') header = ['numpe', 'tottime', 'cgh1', 'cgl2'] data = zip(self.data['command'], self.data['starttime'], self.data['numpe'], self.data['nthread'], self.data['tottime'], self.data['cgh1'], self.data['cgl2']) icapt_rds = None if utils.module_imported('icaptdb'): icapt_rds = icaptdb.RunDataStore(self.config.args) table = utils.Table() sio = io.StringIO(newline=None) dataw = csv.writer(sio) dataw.writerow([F'## {self.config.args.description}']) dataw.writerow(header) table.addrow(header, withrule=True) for cmd, stime, numpe, nthread, tott, cgh1, cgl2 in data: row = [numpe, tott, cgh1, cgl2] dataw.writerow(row) table.addrow(row) if icapt_rds is not None: icapt_rds.add(stime.strftime('%a %b %d %H:%M:%S %Y'), tott, numpe, nthread, cmd, [ FOMFactory.build('cgh1', cgh1), FOMFactory.build('cgl2', cgl2) ]) csvfname = self.config.args.csv_output metadata.add_asset(metadata.StringIOAsset(sio, csvfname)) table.emit() logger.log('')
def run(self): def _get_numpe(prun): numpe_match = re.search(r'\s+-n\s?(?P<numpe>[0-9]+)', prun) if numpe_match is None: estr = F"Cannot determine numpe from:'{prun}'" raise ValueError(estr) return int(numpe_match.group('numpe')) # Generate the run commands for the given experiment. rcmd = self.config.args.runcmds pruns = experiment.runcmds(rcmd[0], rcmd[1], rcmd[2], rcmd[3]) # Generate list of apps for the given benchmarks. apps = [b.strip() for b in self.config.args.benchmarks.split(',')] logger.emlog('# Starting Runs...') for app in apps: if not Benchmark.recognized(app): logger.emlog(F'# SKIPPING UNRECOGNIZED BENCHMARK: {app}') continue for prun in pruns: logger.log('') container.prun(F'{prun}', os.path.join(self.config.args.bin_dir, app), postaction=self.post_action, user_data={ 'app': app, 'numpe': _get_numpe(prun) })
def post_action(**kwargs): ''' Actions performed after running the experiment (analysis). ''' logger.emlog('# Entering post_action') cmd = kwargs.pop('command') # Command string out = kwargs.pop('output') # Output gathered from example-app stm = kwargs.pop('start_time') # Timing values etm = kwargs.pop('end_time') tet = kwargs.pop('exectime') logger.log(F'Command: {cmd}') logger.log(F'Start time: {stm}') logger.log(F'End time: {etm}') logger.log(F'Total Execution Time (s): {tet}\n') # It is possible to process the many outputs of the example application. lines = [x.rstrip() for x in out] for i, line in enumerate(lines): # Scan application output for "Data" tag. if line.startswith('Data'): data = line.split(': ')[1] logger.log(F' >> Data {i} is {data}') continue
def report(self) -> None: ''' Generate csv report from run iterations. ''' logger.emlog(F'# {experiment.name()} Report') # Setup table. table = utils.Table() sio = io.StringIO(newline=None) dataraw = csv.writer(sio) header = ['Cycle', 'Cstop', 'Time', 'Tstop', 'Hydro Cycle', 'Command'] dataraw.writerow(header) table.addrow(header) # Populate table. for index, entry in enumerate(self.data['results']): entry.append(self.data['commands'][index]) dataraw.writerow(entry) table.addrow(entry) # Write table to csv & display to terminal. csvname = self.config.args.csv_output metadata.add_asset(metadata.StringIOAsset(sio, csvname)) table.emit() logger.log('')
def report(self) -> None: ''' Generate csv report ''' logger.emlog(F'# {self.config.args.name} Report') logger.log('Creating report...') # Setup table. table = utils.Table() sio = io.StringIO(newline=None) dataraw = csv.writer(sio) # Column headers. columns = [] for label in self.keywords: columns.append(label) dataraw.writerow(columns) table.addrow(columns) # Populate table. for index, entry in enumerate(self.data['results']): table.addrow(entry) entry.append(self.data['commands'][index]) dataraw.writerow(entry) # Write table to csv ad display to terminal. csvname = self.config.args.csv_output metadata.add_asset(metadata.StringIOAsset(sio, csvname)) table.emit() logger.log('')
def run(self, genspec: str) -> None: ''' Run benchmark test. ''' logger.emlog('# Starting Runs...') # Generate run commands for current experiment. rcmd = self.config.args.runcmds pruns = experiment.runcmds(rcmd[0], rcmd[1], rcmd[2], rcmd[3]) # Create factor registry, or use provided. factors = [] if len(self.manual_factors) == 0: for val in range(rcmd[0], rcmd[1] + 1): factors.append(experiment.factorize(val, 3)) else: factors = self.manual_factors executable = self.config.args.executable for i, prun in enumerate(pruns): pex = factors[i][0] pey = factors[i][1] pez = factors[i][2] appargs = genspec.format(executable, pex, pey, pez) logger.log('') container.prun( prun, appargs, postaction=self.post_action )
def main(argv): # Name the experiment. experiment.name('test-shellcmds') fname = 'afile.txt' logger.log('# Testing globbing...') shargs = {'echo': True} # Wildcards don't need to be escaped with a `\' or quoted to protect them # from expansion by the host. We take care of that for you. container.run('ls *') # host and container interfaces should behave as identically as possible. host.run('ls *', **shargs) logger.emlog('# Testing redirection...') logger.log(F'# Adding text to {fname}:') host.run(F'echo "Some Text" | tee {fname}', **shargs) host.run(F'echo "More \'Text\'" >> {fname}', **shargs) logger.emlog(F'# The contents of {fname} are:') host.run(F'cat {fname}', **shargs) logger.emlog('# Testing quoting...') container.run('echo "Some \'Text\'"') logger.emlog('# Testing command chaining...') container.run('true && echo true!') container.run('false || echo false is good && echo true is good') logger.emlog('# Testing variable lifetimes within chained commands...') container.run('export FOO="bar" && ' 'test ! -z $FOO && ' 'echo "Looks good!" || ' 'exit 1') metadata.add_asset(metadata.FileAsset(fname))
def report(self): logger.emlog(F'# {experiment.name()} Report') header = [ 'solver_id', 'numpe', 'tottime', 'nx,ny,nz', 'px,py,pz', 'fom' ] data = zip(self.data['solver_id'], self.data['numpe'], self.data['tottime'], self.data['nxnynz'], self.data['pxpypz'], self.data['fom']) table = utils.Table() sio = io.StringIO(newline=None) dataw = csv.writer(sio) dataw.writerow([F'## {self.config.args.description}']) dataw.writerow(header) table.addrow(header, withrule=True) for solid, numpe, tott, nxyz, pxyz, fom in data: row = [solid, numpe, tott, nxyz, pxyz, fom] dataw.writerow(row) table.addrow(row) csvfname = self.config.args.csv_output metadata.add_asset(metadata.StringIOAsset(sio, csvfname)) table.emit() logger.log('')
def report(self) -> None: ''' Generate csv report. ''' logger.emlog(F'# {self.config.args.name} Report') logger.log('creating report...\n') # Setup table table = utils.Table() sio = io.StringIO(newline=None) dataraw = csv.writer(sio) # Column header header = self.keywords dataraw.writerow(header) table.addrow(header) # Populate csv table. for index, entry in enumerate(self.data['results']): table.addrow(entry) # Terminal table. # Add command column to csv file. entry.append(self.data['commands'][index]) dataraw.writerow(entry) csvfname = self.csv_output metadata.add_asset(metadata.StringIOAsset(sio, csvfname)) table.emit() logger.log('')
def _add_container_metadata(self) -> None: ''' Adds container metadata to run metadata assets. ''' logger.emlog('# Looking for container metadata...') # Skip any image activators that do not have build metadata. if not cntrimg.activator().requires_img_activation(): iact = self.args.image_activator logger.log(F'# Note: the {iact} activator has no metadata\n') return imgdir = self.inflated_cntrimg_path # The subdirectory where container metadata are stored. buildl = os.path.join( imgdir, constants.METADATA_DIR, constants.SERVICE_LOG_NAME ) # Don't error out if the image doesn't have our metadata. if not os.path.exists(buildl): logger.log('# Note: container image provides no metadata\n') return logger.log(F'# Adding metadata from {imgdir}\n') mdatadir = 'container' metadata.add_asset(metadata.FileAsset(buildl, mdatadir))
def runcmds(start: int, stop: int, spec: str, nfun: str) -> List[str]: ''' TODO(skg) Add proper description. - start: The start index of nidx. - stop: The termination value for nfun(nidx) for some value nidx. - spec: The run specification template having the following variables: - %n: The number of processes to run. ''' # XXX(skg) I wish we could use something like pylint: disable=W0511 # __name__ for this... fname = 'runcmds' # Regex string used to find variables in nfun expressions. vidx_res = '''\ ( # Start of capture group 1 \\b # Start of whole word search nidx # Variable literal \\b # End of whole word search ) # End of capture group 1 ''' # Make sure that the provided start and stop values make sense. if start < 0 or stop < 0: estr = F'{__name__}.{fname} start and ' \ 'stop must both be positive values.' raise ValueError(estr) if start > stop: estr = F'{__name__}.{fname} value error: ' \ 'start cannot be less than stop.' raise ValueError(estr) # Find all variables in the provided function specification string. Also # enforce that *at least one* variable is provided. if _runcmds_nargs(nfun, vidx_res) == 0: # We didn't find at least one variable. estr = F'{__name__}.{fname} syntax error: ' \ 'At least one variable must be present. ' \ F"'nidx' was not found in the following expression:\n{nfun}" raise SyntaxError(estr) # Generate the requisite values. nvals = list() nidx = start regex = re.compile(vidx_res, flags=re.X) while True: nval = mathex.evaluate(regex.sub(str(nidx), nfun)) if nval > stop: break nvals.append(nval) nidx += 1 # Now generate the run commands. # Regex string used to find %n variables in spec expressions. n_res = '%n' if _runcmds_nargs(spec, n_res) == 0: wstr = F"# WARNING: '{n_res}' not found in " \ F'the following expression:\n# {spec}' logger.emlog(wstr) regex = re.compile(n_res) cmds = list() for idx in nvals: cmds.append(regex.sub(str(idx), spec)) return cmds
def post_action(self, **kwargs: typing.Dict[str, str]) -> None: ''' Custom post action: metadata collection. ''' logger.emlog('# POST-ACTION') logger.log('Retrieving SNAP output...') # Record command used. # Process snap output. self.data['commands'].append(str(kwargs['command'])) self.parse_snapfile()
def run(self, genspec): logger.emlog('# Starting Runs...') # Generate the run commands for the given experiment. rcmd = self.config.args.runcmds pruns = experiment.runcmds(rcmd[0], rcmd[1], rcmd[2], rcmd[3]) # The application and its arguments. executable = self.config.args.executable appargs = genspec.format(executable) for prun in pruns: logger.log('') container.prun(prun, appargs, postaction=self.post_action)
def report(self): logger.emlog(F'# {experiment.name()} Report') cmddata = zip(self.data['commands'], self.data['bmdata']) for cmd, bmdata in cmddata: logger.log(F"#{'#'*79}") logger.log(F"#{'#'*79}") logger.log(F'# {cmd}') logger.log(F"#{'#'*79}") logger.log(F"#{'#'*79}\n") bmdata.tabulate()
def post_action(self, **kwargs: typing.Dict[str, str]) -> None: ''' Custom post action: metadata collection for report ''' logger.emlog('# Post-ACTION') logger.log('Retrieving branson output...') cmd = kwargs.pop('command') # Record command used. # Process snap output. self.data['commands'].append(cmd) self.parse_output(list(kwargs.pop('output')))
def main(argv): # Name the experiment. experiment.name('test-shellcmds') fname = 'afile.txt' logger.log('# Testing globbing...') shargs = {'echo': True} # Wildcards need to be escaped with a `\' or quoted to protect them from # expansion by the host. container.run('ls \\*') # shell and container interfaces should behave as identically as possible. host.run('ls \\*', **shargs) logger.emlog('# Testing redirection...') logger.log(F'# Adding text to {fname}:') container.run(F'echo "Some Text" | tee {fname}') container.run(F'echo "More \'Text\'" >> {fname}') logger.emlog(F'# The contents of {fname} are:') host.run(F'cat {fname}', **shargs) logger.emlog('# Testing quoting...') container.run('echo "Some \'Text\'"') logger.emlog('# Testing command chaining...') container.run('true && echo true!') container.run('false || echo false... && echo and done!') metadata.add_asset(metadata.FileAsset(fname))
def _build(self) -> None: dockerf = self.config['spec'] context = os.path.dirname(self.config['spec']) bcmd = '{} -b {} -t {} -f {} {}'.format( self.buildc, self.builder, self.config['tag'], dockerf, context ) logger.emlog('# Begin Build Output') # Run the command specified by bcmd. host.run(bcmd, echo=True) logger.emlog('# End Build Output')
def readgs( gspath: str, config: Optional[CLIConfiguration] = None ) -> Iterable[str]: ''' A convenience routine for reading generate specification files. TODO(skg) Add description of formatting rules, semantics, etc. Don't forget about yield! We accept the following forms: # -a/--aarg [ARG_PARAMS] -b/--bargs [ARG PARAMS] # -c/--carg [ARG PARAMS] [positional arguments] ''' logger.emlog(F'# Reading Generate Specification File: {gspath}') # Emit contents of gs file. logger.log('# Begin Generate Specification') logger.log(utils.chomp(str().join(utils.cat(gspath)))) logger.log('# End Generate Specification\n') with open(gspath) as file: argv = list() lines = [x.strip() for x in utils.read_logical_lines(file)] for line in lines: # Interpret as special comment used to specify run-time arguments. if line.startswith('# -'): # Add to argument list. if config is not None: argv.extend(shlex.split(line.lstrip('# '))) continue # Skip comments and empty lines. if line.startswith('#') or utils.emptystr(line): continue # Parse arguments if provided an argument parser. gsargs = None if config is not None: if not isinstance(config, CLIConfiguration): estr = F'{__name__} expects an instance of CLIConfiguration' raise ValueError(estr) gsargs = parsedargs(config.argparser, argv) config.update(gsargs) # Not a comment; yield generate specification string. yield line # Clear out argument list for next round. argv = list()
def start(self) -> None: logger.emlog('# Starting {} at {}'.format(self.prog, utils.nows())) logger.log('# $ {}\n'.format(' '.join(sys.argv))) try: stime = utils.now() self._emit_config() self._do_build() etime = utils.now() logger.log('# {} Time {}'.format(self.prog, etime - stime)) logger.log('# {} Done {}'.format(self.prog, utils.nows())) except Exception as exception: estr = utils.ehorf() estr += 'What: {} error encountered.\n' \ 'Why: {}'.format(self.prog, exception) estr += utils.ehorf() raise type(exception)(estr) from exception
def run(self, genspec: str) -> None: ''' Run benchmark test. ''' logger.emlog('# Starting Runs...') # Generate run commands for current experiment. rcmd = self.config.args.runcmds pruns = experiment.runcmds(rcmd[0], rcmd[1], rcmd[2], rcmd[3]) executable = self.config.args.executable s_input = self.snap_input s_output = self.snap_output appargs = genspec.format(executable, s_input, s_output) for prun in pruns: logger.log('') container.prun(prun, appargs, preaction=self.pre_action, postaction=self.post_action)
def _check_env(self) -> None: ''' Build environment verification function. Raises OSError if the environment is unsatisfactory. ''' logger.emlog('# Checking your build environment...') inyp = 'Is it in your PATH?\n' notf = "'{}' not found. " + inyp errs = str() if not host.which(self.buildc): errs += notf.format(self.buildc) if not host.which(self.tarcmd): errs += notf.format(self.tarcmd) if errs: raise OSError(utils.chomp(errs))
def _stage_container_image(self) -> None: ''' TODO(skg) Add proper description. Stages container images. ''' imgp = self.args.image # The 'we don't need or want to stage paths.' if not cntrimg.activator().requires_img_activation(): return if self.args.do_not_stage: # We know that imgp cannot be None. hlps = 'Unstaged executions require access to ' \ 'an image directory path.' if not os.path.isdir(imgp): estr = F'{imgp} is not a directory. Cannot continue.\n{hlps}' raise RuntimeError(estr) self.inflated_cntrimg_path = imgp logger.log(F'# Image path: {imgp}') cntrimg.activator().set_img_path(imgp) return # The 'stage' path. logger.emlog('# Staging container image...') hlps = 'Staged executions require access to an image tarball path.' istf = False try: istf = tarfile.is_tarfile(imgp) except Exception as exception: estr = F'{exception}. Cannot continue.\n{hlps}' raise RuntimeError(estr) from exception # We do this check here so we can raise an exception that isn't caught # above because it produces redundant error messages. is_tarfile() can # raise exceptions, so that's what the above try/except block is for. if not istf: raise RuntimeError( F'{imgp} is not a tarball. Cannot continue.\n{hlps}' ) self.inflated_cntrimg_path = _ImageStager().stage(imgp) # Let the user and image activator know about the image's path. logger.log(F'# Staged image path: {self.inflated_cntrimg_path}') cntrimg.activator().set_img_path(self.inflated_cntrimg_path)
def report(self): logger.emlog(F'# {experiment.name()} Report') header = ['NUMPE', 'NThread', 'Average Lookups/s', 'Total Lookups/s'] data = zip(self.data['numpe'], self.data['nthread'], self.data['alups'], self.data['tlups']) table = utils.Table() sio = io.StringIO(newline=None) dataw = csv.writer(sio) dataw.writerow([F'## {self.config.args.description}']) dataw.writerow(header) table.addrow(header, withrule=True) for numpe, nthread, alups, tlups in data: row = [numpe, nthread, alups, tlups] dataw.writerow(row) table.addrow(row) csvfname = self.config.args.csv_output metadata.add_asset(metadata.StringIOAsset(sio, csvfname)) table.emit() logger.log('')
def start(self) -> None: logger.emlog(F'# Starting {self.prog} at {utils.nows()}') logger.log(F"# $ {' '.join(sys.argv)}\n") try: stime = utils.now() self._emit_config() self._build_image_activator() self._stage_container_image() self._add_container_metadata() self._run() etime = utils.now() logger.log(F'# {self.prog} Time {etime - stime}') logger.log(F'# {self.prog} Done {utils.nows()}') self._write_metadata() except Exception as exception: estr = utils.ehorf() estr += F'What: {self.prog} error encountered.\n' \ F'Why: {exception}' estr += utils.ehorf() raise type(exception)(estr)
def pre_action(self, **kwargs: typing.Dict[str, str]) -> None: ''' Custom pre action: update snap input ''' logger.emlog('# PRE-ACTION') logger.log('Updating snap input...') # Fetch volume for decomposition from command execution. # Perform factor evaluation. volume = int(str(kwargs["command"]).split(" ")[2]) dimensions = experiment.factorize(volume, 2) logger.log('Factor calculated!') # Parse snap input file to list # Update configuration settings to match volume. with open(self.snap_input) as in_file: lines = in_file.readlines() updated = [] for row in lines: trim = row.strip() if trim.startswith('npey'): updated.append(F' npey={dimensions[0]}\n') elif trim.startswith('npez'): updated.append(F' npez={dimensions[1]}\n') elif trim.startswith('ny'): updated.append(F' ny={dimensions[0]}\n') elif trim.startswith('nz'): updated.append(F' nz={dimensions[1]}\n') else: updated.append(row) # overwrite SNAP input file with open(self.snap_input, 'wt') as in_file: in_file.writelines(updated) logger.log('')
def _run(self) -> None: pname = os.path.basename(self.args.program[0]) logger.emlog(F'# Begin Program Output ({pname})') _Runner.run(self.args.program) logger.emlog('# End Program Output')
def pre_action(**kwargs): ''' Actions performed before running the experiment (setup). ''' logger.emlog('# Entering pre_action')