def report(self) -> None: ''' Generate csv report. ''' logger.emlog(F'# {self.config.args.name} Report') logger.log('creating report...\n') # Setup table table = utils.Table() sio = io.StringIO(newline=None) dataraw = csv.writer(sio) # Column header header = self.keywords dataraw.writerow(header) table.addrow(header) # Populate csv table. for index, entry in enumerate(self.data['results']): table.addrow(entry) # Terminal table. # Add command column to csv file. entry.append(self.data['commands'][index]) dataraw.writerow(entry) csvfname = self.csv_output metadata.add_asset(metadata.StringIOAsset(sio, csvfname)) table.emit() logger.log('')
def report(self): logger.emlog(F'# {experiment.name()} Report') header = [ 'solver_id', 'numpe', 'tottime', 'nx,ny,nz', 'px,py,pz', 'fom' ] data = zip(self.data['solver_id'], self.data['numpe'], self.data['tottime'], self.data['nxnynz'], self.data['pxpypz'], self.data['fom']) table = utils.Table() sio = io.StringIO(newline=None) dataw = csv.writer(sio) dataw.writerow([F'## {self.config.args.description}']) dataw.writerow(header) table.addrow(header, withrule=True) for solid, numpe, tott, nxyz, pxyz, fom in data: row = [solid, numpe, tott, nxyz, pxyz, fom] dataw.writerow(row) table.addrow(row) csvfname = self.config.args.csv_output metadata.add_asset(metadata.StringIOAsset(sio, csvfname)) table.emit() logger.log('')
def main(argv): experiment.name('nbody') logger.log('# Experiment: {}'.format(experiment.name())) prun = host.whichl(['srun', 'mpiexec']) if prun is None: sys.exit('Cannot find a parallel launcher...') app = '/nbody/nbody-mpi' # The seemingly strange use of {{}} allows us to first format the string # with arguments (the {}) and then generate strings with values passed to -n # from the output of range() (the {{}}). runcmds = experiment.generate('{} -n {{}}'.format(prun), range(1, 3)) etimes = list() for r in runcmds: stime = utils.now() # TODO(skg) FIXME container.prun(r, app) etime = utils.now() telapsed = etime - stime etimes.append(telapsed) logger.log(F'# Execution Time: {telapsed}\n') # Take a break between runs. time.sleep(1) logger.log('# Report') logger.log('# Command, Execution Time') for i in zip(runcmds, etimes): logger.log('{}, {}'.format(*i))
def post_action(**kwargs): ''' Actions performed after running the experiment (analysis). ''' logger.emlog('# Entering post_action') cmd = kwargs.pop('command') # Command string out = kwargs.pop('output') # Output gathered from example-app stm = kwargs.pop('start_time') # Timing values etm = kwargs.pop('end_time') tet = kwargs.pop('exectime') logger.log(F'Command: {cmd}') logger.log(F'Start time: {stm}') logger.log(F'End time: {etm}') logger.log(F'Total Execution Time (s): {tet}\n') # It is possible to process the many outputs of the example application. lines = [x.rstrip() for x in out] for i, line in enumerate(lines): # Scan application output for "Data" tag. if line.startswith('Data'): data = line.split(': ')[1] logger.log(F' >> Data {i} is {data}') continue
def parse_output(self, out1: typing.List[str]) -> None: ''' Parse timing results from app EOR terminal output. ''' timetable = [] # Find time table & populate local. for pos, line in enumerate(out1): if line.startswith('Total cells requested'): logger.log(F'Found EOR table on line: {pos}') timetable = out1[pos:] break # Collect results from current iteration. iter_results = [] for row in timetable: # Ignore decorative lines. if '*' in row: continue label, value = row.split(': ') if label in self.keywords: iter_results.append(value[:-1]) # Remove newline # Add iteration results to experiment data. self.data['results'].append(iter_results)
def run(self, genspec: str) -> None: ''' Run benchmark test. ''' logger.emlog('# Starting Runs...') # Generate run commands for current experiment. rcmd = self.config.args.runcmds pruns = experiment.runcmds(rcmd[0], rcmd[1], rcmd[2], rcmd[3]) # Create factor registry, or use provided. factors = [] if len(self.manual_factors) == 0: for val in range(rcmd[0], rcmd[1] + 1): factors.append(experiment.factorize(val, 3)) else: factors = self.manual_factors executable = self.config.args.executable for i, prun in enumerate(pruns): pex = factors[i][0] pey = factors[i][1] pez = factors[i][2] appargs = genspec.format(executable, pex, pey, pez) logger.log('') container.prun( prun, appargs, postaction=self.post_action )
def report(self) -> None: ''' Generate csv report ''' logger.emlog(F'# {self.config.args.name} Report') logger.log('Creating report...') # Setup table. table = utils.Table() sio = io.StringIO(newline=None) dataraw = csv.writer(sio) # Column headers. columns = [] for label in self.keywords: columns.append(label) dataraw.writerow(columns) table.addrow(columns) # Populate table. for index, entry in enumerate(self.data['results']): table.addrow(entry) entry.append(self.data['commands'][index]) dataraw.writerow(entry) # Write table to csv ad display to terminal. csvname = self.config.args.csv_output metadata.add_asset(metadata.StringIOAsset(sio, csvname)) table.emit() logger.log('')
def main(argv): # Name the experiment. experiment.name('test-shellcmds') fname = 'afile.txt' logger.log('# Testing globbing...') shargs = {'echo': True} # Wildcards need to be escaped with a `\' or quoted to protect them from # expansion by the host. container.run('ls \\*') # shell and container interfaces should behave as identically as possible. host.run('ls \\*', **shargs) logger.emlog('# Testing redirection...') logger.log(F'# Adding text to {fname}:') container.run(F'echo "Some Text" | tee {fname}') container.run(F'echo "More \'Text\'" >> {fname}') logger.emlog(F'# The contents of {fname} are:') host.run(F'cat {fname}', **shargs) logger.emlog('# Testing quoting...') container.run('echo "Some \'Text\'"') logger.emlog('# Testing command chaining...') container.run('true && echo true!') container.run('false || echo false... && echo and done!') metadata.add_asset(metadata.FileAsset(fname))
def report(self) -> None: ''' Generate report ''' logger.emlog(F'# {self.config.args.name} Report') logger.log('creating report...\n') # Setup Table table = utils.Table() sio = io.StringIO(newline=None) dataraw = csv.writer(sio) header = ['Time', 'KBytesXchng/Rank-Max', 'MB/S/Rank', 'Command'] dataraw.writerow(header) table.addrow(header) # Populate table. for index, entry in enumerate(self.data['results']): table.addrow(entry) entry.append(self.data['commands'][index]) dataraw.writerow(entry) # Write table to csv & display to terminal. csvname = self.config.args.csv_output metadata.add_asset(metadata.StringIOAsset(sio, csvname)) table.emit() logger.log('')
def report(self) -> None: ''' Generate csv report from run iterations. ''' logger.emlog(F'# {experiment.name()} Report') # Setup table. table = utils.Table() sio = io.StringIO(newline=None) dataraw = csv.writer(sio) header = ['Cycle', 'Cstop', 'Time', 'Tstop', 'Hydro Cycle', 'Command'] dataraw.writerow(header) table.addrow(header) # Populate table. for index, entry in enumerate(self.data['results']): entry.append(self.data['commands'][index]) dataraw.writerow(entry) table.addrow(entry) # Write table to csv & display to terminal. csvname = self.config.args.csv_output metadata.add_asset(metadata.StringIOAsset(sio, csvname)) table.emit() logger.log('')
def main(argv): # Name the experiment. experiment.name('test-shellcmds') fname = 'afile.txt' logger.log('# Testing globbing...') shargs = {'echo': True} # Wildcards don't need to be escaped with a `\' or quoted to protect them # from expansion by the host. We take care of that for you. container.run('ls *') # host and container interfaces should behave as identically as possible. host.run('ls *', **shargs) logger.emlog('# Testing redirection...') logger.log(F'# Adding text to {fname}:') host.run(F'echo "Some Text" | tee {fname}', **shargs) host.run(F'echo "More \'Text\'" >> {fname}', **shargs) logger.emlog(F'# The contents of {fname} are:') host.run(F'cat {fname}', **shargs) logger.emlog('# Testing quoting...') container.run('echo "Some \'Text\'"') logger.emlog('# Testing command chaining...') container.run('true && echo true!') container.run('false || echo false is good && echo true is good') logger.emlog('# Testing variable lifetimes within chained commands...') container.run('export FOO="bar" && ' 'test ! -z $FOO && ' 'echo "Looks good!" || ' 'exit 1') metadata.add_asset(metadata.FileAsset(fname))
def report(self): logger.emlog(F'# {experiment.name()} Report') header = ['numpe', 'tottime', 'cgh1', 'cgl2'] data = zip(self.data['command'], self.data['starttime'], self.data['numpe'], self.data['nthread'], self.data['tottime'], self.data['cgh1'], self.data['cgl2']) icapt_rds = None if utils.module_imported('icaptdb'): icapt_rds = icaptdb.RunDataStore(self.config.args) table = utils.Table() sio = io.StringIO(newline=None) dataw = csv.writer(sio) dataw.writerow([F'## {self.config.args.description}']) dataw.writerow(header) table.addrow(header, withrule=True) for cmd, stime, numpe, nthread, tott, cgh1, cgl2 in data: row = [numpe, tott, cgh1, cgl2] dataw.writerow(row) table.addrow(row) if icapt_rds is not None: icapt_rds.add(stime.strftime('%a %b %d %H:%M:%S %Y'), tott, numpe, nthread, cmd, [ FOMFactory.build('cgh1', cgh1), FOMFactory.build('cgl2', cgl2) ]) csvfname = self.config.args.csv_output metadata.add_asset(metadata.StringIOAsset(sio, csvfname)) table.emit() logger.log('')
def run(self): def _get_numpe(prun): numpe_match = re.search(r'\s+-n\s?(?P<numpe>[0-9]+)', prun) if numpe_match is None: estr = F"Cannot determine numpe from:'{prun}'" raise ValueError(estr) return int(numpe_match.group('numpe')) # Generate the run commands for the given experiment. rcmd = self.config.args.runcmds pruns = experiment.runcmds(rcmd[0], rcmd[1], rcmd[2], rcmd[3]) # Generate list of apps for the given benchmarks. apps = [b.strip() for b in self.config.args.benchmarks.split(',')] logger.emlog('# Starting Runs...') for app in apps: if not Benchmark.recognized(app): logger.emlog(F'# SKIPPING UNRECOGNIZED BENCHMARK: {app}') continue for prun in pruns: logger.log('') container.prun(F'{prun}', os.path.join(self.config.args.bin_dir, app), postaction=self.post_action, user_data={ 'app': app, 'numpe': _get_numpe(prun) })
def write(self, basep: str) -> None: ''' Writes metadata contained in assets. ''' logger.log(F'# Writing Metadata Assets at {utils.nows()}') for asset in self.assets: asset.write(basep)
def _write_metadata(self) -> None: base = os.path.join(self.args.output_path, str(experiment.name())) outp = impl.getmetasubd(base) # Do this here so the output log has the output directory in it. logger.log(F'# {self.prog} Output Target: {outp}') metadata.write(outp) logger.log(F'# {self.prog} Output Written to: {outp}')
def emit(self) -> None: ''' Emits the contents of the table using logger.log(). ''' rowf = Table._RowFormatter(self.maxcollens) for row in self.rows: logger.log(rowf.format(row))
def main(argv): # Set the experiment's name. This particular experiment is named # hello-world. experiment.name('hello-world') # The logger emits strings to the console. Additionally, the output # produced by logging actions is recorded and stored in experiment metadata # written (by default) after experiment termination. logger.log('hello world')
def main(argv): logger.log('adding a file asset...') metadata.add_asset(metadata.FileAsset('some-metadata.txt')) logger.log('adding a yaml dict asset...') adict = dict() adict['Application'] = {'argv': argv} adict['System'] = {'whoami': host.whoami(), 'hostname': host.hostname()} metadata.add_asset(metadata.YAMLDictAsset(adict, 'yaml-metadata'))
def run( # pylint: disable=too-many-arguments cmd: str, verbatim: bool = False, echo: bool = False, capture_output: bool = False, verbose: bool = True, check_exit_code: bool = True) -> List[str]: ''' Executes the provided command. Returns newline-delimited list of output if capture_output if True. Throws ChildProcessError on error if check_exit_code is True. ''' def getrealcmd(cmd: str, verbatim: bool) -> str: # The user wants us to run the string exactly as provided. if verbatim: return cmd return F'{constants.BASH_MAGIC} {shlex.quote(cmd)}' realcmd = getrealcmd(cmd, verbatim) if echo: logger.log(F'# $ {realcmd}') # Output list of strings used to (optionally) capture command output. olst: List[str] = list() with subprocess.Popen( realcmd, shell=True, # nosec bufsize=1, # Enables text mode, making write() et al. happy. universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) as spo: # Show progress and store output to a string (if requested). while True: stdout = spo.stdout.readline() if not stdout: break if capture_output: olst.append(stdout) if verbose: logger.log(utils.chomp(stdout)) wrc = spo.wait() if wrc != os.EX_OK and check_exit_code: cpe = ChildProcessError() cpe.errno = wrc estr = F"Command '{realcmd}' returned non-zero exit status." cpe.strerror = estr raise cpe return olst
def _flatten(self) -> None: tcmd = '{} {} {}'.format( self.tarcmd, self.config['tag'], self.config['output_path'] ) logger.log('# Begin Flatten Output') os.environ['CH_BUILDER'] = self.builder host.run(tcmd, echo=True) logger.log('# End Flatten Output')
def post_action(self, **kwargs: typing.Dict[str, str]) -> None: ''' Custom post action: metadata collection. ''' logger.emlog('# POST-ACTION') logger.log('Retrieving SNAP output...') # Record command used. # Process snap output. self.data['commands'].append(str(kwargs['command'])) self.parse_snapfile()
def run(self, genspec): logger.emlog('# Starting Runs...') # Generate the run commands for the given experiment. rcmd = self.config.args.runcmds pruns = experiment.runcmds(rcmd[0], rcmd[1], rcmd[2], rcmd[3]) # The application and its arguments. executable = self.config.args.executable appargs = genspec.format(executable) for prun in pruns: logger.log('') container.prun(prun, appargs, postaction=self.post_action)
def report(self): logger.emlog(F'# {experiment.name()} Report') cmddata = zip(self.data['commands'], self.data['bmdata']) for cmd, bmdata in cmddata: logger.log(F"#{'#'*79}") logger.log(F"#{'#'*79}") logger.log(F'# {cmd}') logger.log(F"#{'#'*79}") logger.log(F"#{'#'*79}\n") bmdata.tabulate()
def post_action(self, **kwargs: typing.Dict[str, str]) -> None: ''' Post experiment iteration action ''' logger.log('# Starting Post Action...') cmd = kwargs.pop('command') # Record command used in iteration. self.data['commands'].append(cmd) # Record timing data from PENNANT terminal output. self.parse_output(kwargs.pop('output'))
def post_action(self, **kwargs: typing.Dict[str, str]) -> None: ''' Custom post action: metadata collection for report ''' logger.emlog('# Post-ACTION') logger.log('Retrieving branson output...') cmd = kwargs.pop('command') # Record command used. # Process snap output. self.data['commands'].append(cmd) self.parse_output(list(kwargs.pop('output')))
def post_action(self, **kwargs: typing.Dict[str, str]) -> None: ''' Post experiment iteration action ''' logger.log('# POST-ACTION') logger.log('') cmd = kwargs.pop('command') # Record command used in iteration. self.data['commands'].append(cmd) # Record iteration output data self.parse_output(list(kwargs.pop('output')))
def main(argv): logger.log('adding a file asset...') # adds an arbitrary metadata file to a subfolder: custom metadata.add_asset( metadata.FileAsset('some-metadata.txt', 'subdir-a/subdir-b')) logger.log('adding a yaml dict asset...') adict = dict() # collect metadata adict['Application'] = {'argv': argv} adict['System'] = {'whoami': host.whoami(), 'hostname': host.hostname()} # save metadata to file metadata.add_asset(metadata.YAMLDictAsset(adict, 'yaml-metadata'))
def run(self, genspec: str) -> None: ''' Experiment iterations definition ''' logger.log('# Starting Runs...') # Generate the iterative run commands. rcmd = self.config.args.runcmds pruns = experiment.runcmds(rcmd[0], rcmd[1], rcmd[2], rcmd[3]) executable = self.config.args.executable appargs = genspec.format(executable) # Execute generated run commands. for prun in pruns: logger.log('') container.prun(prun, appargs, postaction=self.post_action)
def _add_container_metadata(self) -> None: ''' Adds container metadata to run metadata assets. ''' logger.emlog('# Looking for container metadata...') # Skip any image activators that do not have build metadata. if not cntrimg.activator().requires_img_activation(): iact = self.args.image_activator logger.log(F'# Note: the {iact} activator has no metadata\n') return imgdir = self.inflated_cntrimg_path # The subdirectory where container metadata are stored. mdatadir = 'container' logger.log(F'# Adding metadata from {imgdir}\n') buildl = os.path.join(imgdir, constants.METADATA_DIR, constants.SERVICE_LOG_NAME) metadata.add_asset(metadata.FileAsset(buildl, mdatadir))
def run(self, genspec: str) -> None: ''' Run benchmark test. ''' logger.emlog('# Starting Runs...') # Generate run commands for current experiment. rcmd = self.config.args.runcmds pruns = experiment.runcmds(rcmd[0], rcmd[1], rcmd[2], rcmd[3]) executable = self.config.args.executable s_input = self.snap_input s_output = self.snap_output appargs = genspec.format(executable, s_input, s_output) for prun in pruns: logger.log('') container.prun(prun, appargs, preaction=self.pre_action, postaction=self.post_action)