def test_strace(): inc = Config.find_machine() relpath = os.path.relpath(CURDIR, start=ROOTDIR) outdir = '%s/%s'%(os.path.expandvars(inc.get('variable', 'work_directory')), \ '%s_%s'%(relpath.replace('/', '_'), os.path.basename(__file__)[:-3])) if os.path.exists(outdir): shutil.rmtree(outdir) os.makedirs(outdir) for filename in glob.glob(os.path.join(SRCDIR, '*')): shutil.copy(filename, outdir) # create a kgen command cmds = [] cmds.append(KGEN_APP) if inc.has_section('compiler') and inc.has_option( 'compiler', 'intel') and inc.get('compiler', 'intel'): cmds.extend(['--prerun', 'build="%(cmd)s",run="%(cmd)s",kernel_build="%(cmd)s",kernel_run="%(cmd)s"'%\ {'cmd': inc.get('compiler', 'intel')}]) cmds.append('--cmd-clean "cd %s; make -f Makefile.mpirun clean"' % outdir) cmds.append('--cmd-build "cd %s; make -f Makefile.mpirun build"' % outdir) cmds.append('--cmd-run "cd %s; make -f Makefile.mpirun run"' % outdir) cmds.append('--kernel-option FC=ifort') cmds.append('--outdir %s' % outdir) cmds.append('--source alias=/glade/scratch:/glade2/scratch2') cmds.append('--mpi enable') cmds.append('--openmp enable') cmds.append('-I %s' % outdir) cmds.append('%s/%s' % (outdir, CALLSITE)) # run kgen print 'SHCMD: %s' % ' \\ \n'.join(cmds) out, err, retcode = run_shcmd(' '.join(cmds)) print '\n******* STDOUT KGEN **********\n' print out print '\n******* STDERR KGEN **********\n' print err assert retcode == 0 out, err, retcode = run_shcmd('cd %s/kernel; make' % outdir) print '\n******* STDOUT KERNEL **********\n' print out print '\n******* STDERR KERNEL **********\n' print err # check output if retcode == 0: outlines = out.split('\n') if any(line.find('Verification FAILED') >= 0 for line in outlines): assert False if not any(line.find('Verification PASSED') >= 0 for line in outlines): assert False assert True else: assert False
def download(self, myname, result): systestdir = result['mkdir_task']['sysdir'] workdir = result['mkdir_task']['workdir'] #appsrc = '%s/cesm_ref'%systestdir appsrc = '/glade/u/home/youngsun/apps/cesm/cesm1_5_beta07' if not os.path.exists(appsrc): os.mkdir(appsrc) # check if cesm exists in appsrc dir out, err, retcode = run_shcmd('svn info | grep URL', cwd=appsrc) if retcode != 0 or not out or len(out) < 3 or not out.startswith( 'URL'): out, err, retcode = run_shcmd( 'svn checkout -r 82434 https://svn-ccsm-models.cgd.ucar.edu/cesm1/tags/cesm1_5_beta07 .', cwd=appsrc) # copy cesm src into test specific src dir tmpsrc = '%s/cesm_work' % systestdir if not os.path.exists(tmpsrc): shutil.copytree(appsrc, tmpsrc) result[myname]['appsrc'] = appsrc result[myname]['tmpsrc'] = tmpsrc if os.path.exists(os.path.join(self.TEST_DIR, 'exclude.ini')): shutil.copy(os.path.join(self.TEST_DIR, 'exclude.ini'), workdir) self.set_status(result, myname, self.PASSED) return result
def config(self, myname, result): workdir = result['mkdir_task']['workdir'] systestdir = result['mkdir_task']['sysdir'] tmpsrc = result['download_task']['tmpsrc'] scriptdir = '%s/cime/scripts'%tmpsrc casename = 'KGNUCESM' casedir = '%s/%s'%(systestdir, casename) datadir = '%s/data'%workdir # NOTE: svn co https://svn-ccsm-models.cgd.ucar.edu/cesm1/tags/cesm1_4_beta07/ systestdir/cesm_ref if self.REBUILD or not os.path.exists(datadir) or len([name for name in os.listdir(datadir) if os.path.isfile(os.path.join(datadir, name))])==0 or \ not os.path.exists(casedir): # check if project option exists if 'project' not in self.OPTIONS: self.set_status(result, myname, self.FAILED, errmsg='"project" user option is not provided. Use "-o project=<your porject id>"') return result # create a case if not os.path.exists(casedir): casecmd = './create_newcase -project %s -mach yellowstone -compset FC5 -res ne16_ne16 -compiler gnu -case %s'%(self.OPTIONS['project'], casedir) out, err, retcode = run_shcmd(casecmd, cwd=scriptdir) if retcode!=0: self.set_status(result, myname, self.FAILED, errmsg='MG2 case generation is failed: %s\n\n%s'%(err, out)) return result # modify env_build.xml to enable MG2 out, err, retcode = run_shcmd('grep mg2 env_build.xml', cwd=casedir) if retcode!=0: xmlchange = './xmlchange -f env_build.xml -id CAM_CONFIG_OPTS -val "-microphys mg2 -clubb_sgs" -a' out, err, retcode = run_shcmd(xmlchange, cwd=casedir) if retcode!=0: self.set_status(result, myname, self.FAILED, errmsg='Modification of env_build.xml is failed: '%(err, out)) return result # cesm.setup if not os.path.exists('%s/%s.run'%(casedir, casename)): out, err, retcode = run_shcmd('./cesm_setup', cwd=casedir) if retcode!=0: self.set_status(result, myname, self.FAILED, errmsg='cesm.setup is failed: %s\n\n%s'%(err, out)) return result # # for fname in os.listdir('%s/SourceMods'%casedir): # if fname.startswith('src.') and os.path.isdir(os.path.join('%s/SourceMods'%casedir, fname)): # for srcfile in os.listdir('%s/SourceMods/%s'%(casedir, fname)): # if os.path.isfile(os.path.join('%s/SourceMods/%s'%(casedir, fname), srcfile)): # os.remove(os.path.join('%s/SourceMods/%s'%(casedir, fname), srcfile)) # include.ini was created manually # result[myname]['srcmods'] = '%s/SourceMods'%casedir result[myname]['casedir'] = casedir result[myname]['casename'] = casename self.set_status(result, myname, self.PASSED) return result
def run(self): # build app. stracepath = '%s/%s' % (Config.path['outdir'], Config.stracefile) includepath = '%s/%s' % (Config.path['outdir'], Config.includefile) if not os.path.exists( stracepath ) or 'all' in Config.rebuild or 'strace' in Config.rebuild: # clean app. if Config.cmd_clean['cmds']: kgutils.run_shcmd(Config.cmd_clean['cmds']) if Config.state_switch['clean']: kgutils.run_shcmd(Config.state_switch['clean']) #with open(SH%Config.cwd, 'w') as f: # f.write(TEMP_SH%(Config.cmd_clean['cmds'], Config.prerun['build'], Config.cmd_build['cmds'])) #st = os.stat(SH%Config.cwd) #os.chmod(SH%Config.cwd, st.st_mode | stat.S_IEXEC) if Config.prerun['build']: cmdstr = '%s;%s' % (Config.prerun['build'], Config.cmd_build['cmds']) else: cmdstr = Config.cmd_build['cmds'] bld_cmd = 'strace -o %s -f -q -s 100000 -e trace=execve -v -- /bin/sh -c "%s"' % ( stracepath, cmdstr) kgutils.logger.info('Creating KGen strace logfile: %s' % stracepath) try: out, err, retcode = kgutils.run_shcmd(bld_cmd) if retcode != 0 and os.path.exists(stracepath): os.remove(stracepath) kgutils.logger.error('%s\n%s' % (err, out)) except: if os.path.exists(stracepath): os.remove(stracepath) kgutils.logger.error('%s\n%s' % (err, out)) raise else: kgutils.logger.info('Reusing KGen strace logfile: %s' % stracepath) # parse strace.log and generate include.ini if not os.path.exists( includepath ) or 'all' in Config.rebuild or 'include' in Config.rebuild: if stracepath: self._geninclude(stracepath, includepath) else: kgutils.logger.error('strace logfile is not found at: %s' % stracepath) kgutils.kgenexit( 'Please retry KGen after generting strace logfile.') else: kgutils.logger.info('Reusing KGen include file: %s' % includepath)
def genstate(self, myname, result): workdir = result['mkdir_task']['workdir'] reuse_data = result['mkdir_task']['reuse_data'] if not reuse_data: # find jobid jobid = None for iter in range(120): time.sleep(5) out, err, retcode = run_shcmd('bjobs') for line in out.split('\n'): items = line.split() if any(item=='KGCALC' for item in items): #if len(items)>6 and items[6].endswith('KHOMME'): jobid = items[0] break if jobid: break if jobid is None: self.set_status(result, myname, self.FAILED, errmsg='Job id is not found.') return result status = '' maxiter = 3600 iter = 0 while status not in [ 'DONE', 'PSUSP', 'USUSP', 'SSUSP', 'EXIT', 'UNKWN', 'ZOMBI', 'FINISHED' ]: time.sleep(1) out, err, retcode = run_shcmd('bjobs %s'%jobid) if retcode==0: for line in out.split('\n'): items = line.split() if len(items)>3 and items[0]==jobid: status = items[2] elif len(items)>0 and items[-1]=='found': status = 'FINISHED' else: print('DEBUG: ', out, err, retcode) iter += 1 if iter>=maxiter: break if status=='DONE' or 'FINISHED': self.set_status(result, myname, self.PASSED) else: self.set_status(result, myname, self.FAILED, errmsg='Job completion status is not expected.') else: self.set_status(result, myname, self.PASSED) return result
def extract_kernel(self, target, namepath, *args, **kwargs): cmds = ['%s/bin/kgen' % self.KGEN_HOME] for kw, kwarg in kwargs.iteritems(): flag = kw.replace('_', '-').replace('UNDERSCORE', '_') cmds.append('%s %s' % (flag, kwarg)) if namepath: cmds.append('%s:%s' % (target, namepath)) else: cmds.append(target) out, err, retcode = run_shcmd(' '.join(cmds), cwd=self.TEST_DIR) # debug #print ' '.join(cmds) #print out if self.LEAVE_TEMP: kgenscript = '%s/kgen_cmds.sh' % kwargs["__outdir"] with open(kgenscript, 'w') as f: f.write('#!/bin/bash\n') f.write('\n') for cmd in cmds[:-1]: f.write(' %s \\\n' % cmd) f.write(' %s' % cmds[-1]) os.chmod(kgenscript, 0755) if not out or out.find('ERROR') >= 0 or out.find( 'CRITICAL') >= 0 or retcode != 0: return False, out, err return True, out, err
def generate(self, myname, result): workdir = result['mkdir_task']['workdir'] tmpsrc = result['download_task']['tmpsrc'] jobscript = result['download_task']['jobscript'] srcfile = '%s/phys/module_radiation_driver.f90' % tmpsrc namepath = 'module_radiation_driver:radiation_driver:RRTMG_LWRAD' fc = 'ifort' fc_flags = '-O3 -ip -fp-model precise -w -ftz -align all -fno-alias -FR -convert big_endian -xHost -fp-model fast=2 -no-heap-arrays -no-prec-div -no-prec-sqrt -fno-common -xCORE-AVX2' prerun_cmds = ';'.join(result['config_task']['prerun']) prerun_kernel_cmds = ';'.join(result['config_task']['prerun_kernel']) rundir = "%s/run" % tmpsrc src = "%s/../../submit.sh" % here dst = "%s/submit.sh" % rundir run_shcmd('sed "s,WORKDIR,%s," %s > %s' % (rundir, src, dst), cwd=tmpsrc) passed, out, err = self.extract_kernel(srcfile, namepath, \ _e='"%s"'%os.path.join(here, "exclude.ini"), \ __cmd_clean='"cd %s; ./clean"'%tmpsrc, \ __cmd_build='"cd %s; ./compile em_real"'%tmpsrc, \ __cmd_run='"cd %s; qsub %s/submit.sh"'%(tmpsrc, jobscript), \ __kernel_option='FC="%s",FC_FLAGS="%s"'%(fc, fc_flags), \ __prerun='build="%s",run="%s"'%(prerun_cmds, prerun_cmds), \ __outdir=workdir) #_e='exclude.ini', \ #_i='%s/include.ini'%here, \ #__source='format=free,strict=no,alias=/glade/scratch/youngsun:/glade/u/home/youngsun/trepo/temp', \ #__mpi='comm=mpicom,use="spmd_utils:mpicom"', \ #__openmp='enable', \ result[myname]['stdout'] = out result[myname]['stderr'] = err if passed: result[myname]['statefiles'] = glob.glob('rrtmg_lwrad.*.*.*') self.set_status(result, myname, self.PASSED) else: result[myname]['statefiles'] = [] self.set_status(result, myname, self.FAILED, 'STDOUT: %s\nSTDERR: %s' % (out, err)) return result
def download(self, myname, result): systestdir = result['mkdir_task']['sysdir'] workdir = result['mkdir_task']['workdir'] #appsrc = '%s/homme_ref'%systestdir appsrc = '/glade/u/home/youngsun/apps/homme/trunk' if not os.path.exists(appsrc): os.mkdir(appsrc) # check if homme exists in appsrc dir out, err, retcode = run_shcmd('svn info | grep URL', cwd=appsrc) if retcode != 0 or not out or len(out) < 3 or not out.startswith( 'URL'): #out, err, retcode = run_shcmd('svn checkout -r 4971 https://svn-homme-model.cgd.ucar.edu/trunk/ .', cwd=appsrc) # r 4971 has broken pio external link #out, err, retcode = run_shcmd('svn checkout -r 5438 https://svn-homme-model.cgd.ucar.edu/trunk/ .', cwd=appsrc) out, err, retcode = run_shcmd( 'svn checkout -r 5704 https://svn-homme-model.cgd.ucar.edu/trunk/ .', cwd=appsrc) #out, err, retcode = run_shcmd('svn checkout -r 5650 https://svn-homme-model.cgd.ucar.edu/branch_tags/dungeon_tags/dungeon06 .', cwd=appsrc) # copy homme src into test specific src dir tmpsrc = '%s/homme_work' % systestdir # if os.path.exists(tmpsrc): # shutil.rmtree(tmpsrc) # shutil.copytree(appsrc, tmpsrc) if not os.path.exists(tmpsrc): shutil.copytree(appsrc, tmpsrc) else: for fname in os.listdir('%s/src' % tmpsrc): if fname.endswith('.kgen'): shutil.copyfile( os.path.join('%s/src' % tmpsrc, fname), os.path.join('%s/src' % tmpsrc, fname[:-5])) for fname in os.listdir('%s/src/share' % tmpsrc): if fname.endswith('.kgen'): shutil.copyfile( os.path.join('%s/src/share' % tmpsrc, fname), os.path.join('%s/src/share' % tmpsrc, fname[:-5])) result[myname]['appsrc'] = appsrc result[myname]['tmpsrc'] = tmpsrc self.set_status(result, myname, self.PASSED) return result
def recover(self, myname, result): workdir = result['mkdir_task']['workdir'] out, err, retcode = run_shcmd('make recover_from_locals', cwd='%s/state'%workdir) self.set_status(result, myname, self.PASSED) return result
def preprocess(self, myname, result): out, err, retcode = run_shcmd('squeue') if retcode != 0 or (out.find('dav')<0 and out.find('hpss')<0): errmsg = 'Current system is not Cheyenne of NCAR' self.set_status(result, myname, self.FAILED, errmsg) else: self.set_status(result, myname, self.PASSED) return result
def preprocess(self, myname, result): out, err, retcode = run_shcmd('sinfo -a') if retcode != 0 or out.find('caldera') < 0 or out.find( 'geyser') < 0 or out.find('casper') < 0: errmsg = 'Current system is not Cheyenne of NCAR' self.set_status(result, myname, self.FAILED, errmsg) else: self.set_status(result, myname, self.PASSED) return result
def preprocess(self, myname, result): out, err, retcode = run_shcmd("sinfo | cut -d ' ' -f 1") if retcode != 0 or out.find('debug') < 0 or out.find( 'realtime') < 0 or out.find('special') < 0 or out.find( 'shared') < 0: errmsg = 'Current system is not Edison of NERSC' self.set_status(result, myname, self.FAILED, errmsg) else: self.set_status(result, myname, self.PASSED) return result
def preprocess(self, myname, result): out, err, retcode = run_shcmd('bqueues') if retcode != 0 or out.find('caldera') < 0 or out.find( 'geyser') < 0 or out.find('regular') < 0 or out.find( 'premium') < 0: errmsg = 'Current system is not Yellowstone of NCAR' self.set_status(result, myname, self.FAILED, errmsg) else: self.set_status(result, myname, self.PASSED) return result
def download(self, myname, result): systestdir = result['mkdir_task']['sysdir'] workdir = result['mkdir_task']['workdir'] #appsrc = '%s/cesm_ref'%systestdir cesmsrc = '/glade/u/home/youngsun/apps/cesm' appsrc = cesmsrc + '/cesm2.0.0' if not os.path.exists(appsrc): os.mkdir(appsrc) # check if cesm exists in appsrc dir out, err, retcode = run_shcmd('git tag | grep "cesm"', cwd=appsrc) if retcode != 0 or not out or len(out) < 3 or not out.startswith( 'cesm'): out, err, retcode = run_shcmd( 'git clone https://github.com/ESCOMP/cesm.git cesm2.0.0', cwd=cesmsrc) out, err, retcode = run_shcmd('git checkout release-cesm2.0.0', cwd=appsrc) out, err, retcode = run_shcmd( './manage_externals/checkout_externals', cwd=appsrc) # copy cesm src into test specific src dir tmpsrc = '%s/cesm_work' % systestdir if not os.path.exists(tmpsrc): shutil.copytree(appsrc, tmpsrc) result[myname]['appsrc'] = appsrc result[myname]['tmpsrc'] = tmpsrc if os.path.exists(os.path.join(self.TEST_DIR, 'exclude.ini')): shutil.copy(os.path.join(self.TEST_DIR, 'exclude.ini'), workdir) self.set_status(result, myname, self.PASSED) return result
def runkernel(self, myname, result): workdir = result['mkdir_task']['workdir'] out, err, retcode = run_shcmd('make clean; make run', cwd='%s/kernel'%workdir) result[myname]['stdout'] = out result[myname]['stderr'] = err if retcode != 0: self.set_status(result, myname, self.FAILED, errmsg='kernel execution is failed: %s'%err) else: self.set_status(result, myname, self.PASSED) return result
def test_run(extractor): extractor.run() # run kernel out, err, retcode = run_shcmd('make', cwd='%s/%s'%(Config.path['outdir'], Config.path['kernel']) ) # check output if retcode == 0: outlines = out.split('\n') if any( line.find('Verification FAILED') >= 0 for line in outlines ): assert False if not any( line.find('Verification PASSED') >= 0 for line in outlines ): assert False assert True else: assert False
def extract_kernel(self, target, namepath, *args, **kwargs): outdir = '.' cmds = ['%s/bin/kgen' % self.KGEN_HOME] for kw, kwarg in kwargs.iteritems(): flag = kw.replace('_', '-').replace('UNDERSCORE', '_') cmds.append('%s %s' % (flag, kwarg)) if flag == '--outdir': outdir = kwarg if namepath: cmds.append('%s:%s' % (target, namepath)) else: cmds.append(target) for arg in args: cmds.append(arg) if self.LEAVE_TEMP: with open('%s/kgen_cmds.sh' % outdir, 'w') as f: f.write('#!/bin/bash\n') f.write('\n') for cmd in cmds[:-1]: f.write(' %s \\\n' % cmd) f.write(' %s' % cmds[-1]) os.chmod('%s/kgen_cmds.sh' % outdir, 0755) out, err, retcode = run_shcmd(' '.join(cmds)) # debug #print 'CMDS: ', ' '.join(cmds) #print 'STDOUT: ', out #print 'STDERR: ', err #print 'RETCODE: ', retcode if not out or out.find('ERROR') >= 0 or out.find( 'CRITICAL') >= 0 or err.lower().find( 'error') >= 0 or retcode != 0: return False, out, err return True, out, err
def run(self): self._trees = [] self.genfiles = [] kgutils.logger.info('Starting KExtract') # clear shared resources Config.used_srcfiles.clear() # create kernel directory if not os.path.exists('%s/%s' % (Config.path['outdir'], Config.path['kernel'])): os.makedirs('%s/%s' % (Config.path['outdir'], Config.path['kernel'])) # create state directory if not os.path.exists('%s/%s' % (Config.path['outdir'], Config.path['state'])): os.makedirs('%s/%s' % (Config.path['outdir'], Config.path['state'])) # generate kernel and instrumentation if 'all' in Config.rebuild or 'extract' in Config.rebuild or \ not os.path.exists('%s/%s/Makefile'%(Config.path['outdir'], Config.path['state'])) or \ len(glob.glob('%s/%s.*'%(Config.path['outdir'], Config.kernel['name']))) == 0: # generate kgen_driver.f90 in kernel directory driver = create_rootnode(KERNEL_ID_0) self._trees.append(driver) program = create_programnode(driver, KERNEL_ID_0) program.name = Config.kernel_driver['name'] append_program_in_root(driver, program) # generate instrumentation for filepath, (srcobj, mods_used, units_used) in Config.srcfiles.iteritems(): if hasattr(srcobj.tree, 'geninfo') and KGGenType.has_state( srcobj.tree.geninfo): kfile = genkobj(None, srcobj.tree, KERNEL_ID_0) sfile = gensobj(None, srcobj.tree, KERNEL_ID_0) sfile.kgen_stmt.used4genstate = False if kfile is None or sfile is None: raise kgutils.ProgramException( 'Kernel source file is not generated for %s.' % filepath) self.genfiles.append((kfile, sfile, filepath)) Config.used_srcfiles[filepath] = (kfile, sfile, mods_used, units_used) # process each nodes in the tree for plugin_name in event_register.keys(): if not plugin_name.startswith('ext'): continue for kfile, sfile, filepath in self.genfiles: kfile.created([plugin_name]) sfile.created([plugin_name]) for tree in self._trees: tree.created([plugin_name]) for plugin_name in event_register.keys(): if not plugin_name.startswith('ext'): continue for kfile, sfile, filepath in self.genfiles: kfile.process([plugin_name]) sfile.process([plugin_name]) for tree in self._trees: tree.process([plugin_name]) for plugin_name in event_register.keys(): if not plugin_name.startswith('ext'): continue for kfile, sfile, filepath in self.genfiles: kfile.finalize([plugin_name]) sfile.finalize([plugin_name]) for tree in self._trees: tree.finalize([plugin_name]) for plugin_name in event_register.keys(): if not plugin_name.startswith('ext'): continue for kfile, sfile, filepath in self.genfiles: kfile.flatten(KERNEL_ID_0, [plugin_name]) sfile.flatten(KERNEL_ID_0, [plugin_name]) for tree in self._trees: tree.flatten(KERNEL_ID_0, [plugin_name]) # generate source files from each node of the tree kernel_files = [] state_files = [] for kfile, sfile, filepath in self.genfiles: filename = os.path.basename(filepath) set_indent('') klines = kfile.tostring() if klines is not None: klines = kgutils.remove_multiblanklines(klines) kernel_files.append(filename) with open( '%s/%s/%s' % (Config.path['outdir'], Config.path['kernel'], filename), 'wb') as fd: fd.write(klines) if sfile.kgen_stmt.used4genstate: set_indent('') slines = sfile.tostring() if slines is not None: slines = kgutils.remove_multiblanklines(slines) state_files.append(filename) with open( '%s/%s/%s' % (Config.path['outdir'], Config.path['state'], filename), 'wb') as fd: fd.write(slines) with open( '%s/%s/%s' % (Config.path['outdir'], Config.path['kernel'], '%s.f90' % Config.kernel_driver['name']), 'wb') as fd: set_indent('') lines = driver.tostring() if lines is not None: lines = kgutils.remove_multiblanklines(lines) fd.write(lines) kernel_files.append(Config.kernel['name']) kgutils.logger.info( 'Kernel generation and instrumentation is completed.') # generate kgen_utils.f90 in kernel directory kernel_files.append(KGUTIL) self.generate_kgen_utils() shutil.copyfile('%s/%s'%(os.path.dirname(os.path.realpath(__file__)), TPROF), \ '%s/%s/%s'%(Config.path['outdir'], Config.path['kernel'], TPROF)) kernel_files.append(TPROF) self.generate_kernel_makefile() kernel_files.append('Makefile') self.generate_state_makefile() state_files.append('Makefile') kgutils.logger.info('Makefiles are generated') # TODO: wait until state data generation is completed # use -K option for bsub to wait for job completion # clean app if Config.cmd_clean['cmds']: kgutils.run_shcmd(Config.cmd_clean['cmds']) if Config.state_switch['clean']: kgutils.run_shcmd(Config.state_switch['clean']) # build and run app with state instrumentation kgutils.logger.info( 'Application is being built/run with state generation instrumentation.' ) out, err, retcode = kgutils.run_shcmd( 'make', cwd='%s/%s' % (Config.path['outdir'], Config.path['state'])) out, err, retcode = kgutils.run_shcmd( 'make recover', cwd='%s/%s' % (Config.path['outdir'], Config.path['state'])) if Config.state_switch['clean']: kgutils.run_shcmd(Config.state_switch['clean']) kgutils.logger.info('Application built/run is finished.')
def __init__(self, srcpath, preprocess=True): # set default values self.tree = None self.srcpath = srcpath self.realpath = os.path.realpath(self.srcpath) # set source file format isfree = None isstrict = None if self.realpath in Config.source['file'].keys(): if Config.source['file'][self.realpath].has_key('isfree'): isfree = Config.source['file'][self.realpath]['isfree'] if Config.source['file'][self.realpath].has_key('isstrict'): isstrict = Config.source['file'][self.realpath]['isstrict'] else: isstrict = Config.source['isstrict'] isfree = Config.source['isfree'] # prepare include paths and macro definitions path_src = [] macros_src = [] if Config.include['file'].has_key(self.realpath): path_src = Config.include['file'][self.realpath]['path'] + [ os.path.dirname(self.realpath) ] path_src = [path for path in path_src if len(path) > 0] for k, v in Config.include['file'][ self.realpath]['macro'].iteritems(): if v is not None: macros_src.append('-D%s=%s' % (k, v)) else: macros_src.append('-D%s' % k) if os.path.isfile(Config.mpi['header']): includes = [ '-I %s' % incpath for incpath in [os.path.dirname(Config.mpi['header'])] + Config.include['path'] + path_src ] else: includes = [ '-I %s' % incpath for incpath in Config.include['path'] + path_src ] macros_common = [] for k, v in Config.include['macro'].iteritems(): if v: macros_common.append('-D%s=%s' % (k, v)) else: macros_common.append('-D%s' % k) macros = ' '.join(macros_common + macros_src) # execute preprocessing logger.info('Reading %s' % self.srcpath) new_lines = [] with open(self.realpath, 'r') as f: if preprocess: pp = Config.bin['pp'] if pp.endswith('fpp'): if isfree is None or isfree: srcfmt = ' -free' else: srcfmt = ' -fixed' flags = Config.bin['fpp_flags'] + srcfmt elif pp.endswith('cpp'): flags = Config.bin['cpp_flags'] else: raise UserException( 'Preprocessor is not either fpp or cpp') output, err, retcode = kgutils.run_shcmd( '%s %s %s %s' % (pp, flags, ' '.join(includes), macros), input=f.read()) prep = map(lambda l: '!KGEN' + l if l.startswith('#') else l, output.split('\n')) new_lines = self.handle_include(prep) else: new_lines = f.read().split('\n') # add include paths include_dirs = Config.include['path'][:] if Config.include['file'].has_key( self.realpath) and Config.include['file'][ self.realpath].has_key('path'): include_dirs.extend(Config.include['file'][self.realpath]['path']) include_dirs.append(os.path.dirname(self.realpath)) # fparse self.tree = api.parse('\n'.join(new_lines), ignore_comments=False, analyze=True, isfree=isfree, \ isstrict=isstrict, include_dirs=include_dirs, source_only=None ) self.tree.prep = new_lines # parse f2003 lineno = 0 linediff = 0 for stmt, depth in api.walk(self.tree, -1): stmt.parse_f2003() # rename reader.id self.tree.reader.id = self.realpath # collect module information for mod_name, mod_stmt in self.tree.a.module.iteritems(): if not Config.modules.has_key(mod_name): Config.modules[mod_name] = collections.OrderedDict() Config.modules[mod_name]['stmt'] = mod_stmt Config.modules[mod_name]['file'] = self Config.modules[mod_name]['path'] = self.realpath # collect program unit information for item in self.tree.content: if item.__class__ not in [Module, Comment, Program]: if item.reader.id not in Config.program_units.keys(): Config.program_units[item.reader.id] = [] Config.program_units[item.reader.id].append(item) # create a tuple for file dependency Config.srcfiles[self.realpath] = (self, [], []) self.process_directive()
def genstate(self, myname, result): workdir = result['mkdir_task']['workdir'] reuse_data = result['mkdir_task']['reuse_data'] if not reuse_data: # find jobid jobid = None for iter in range(120): time.sleep(5) out, err, retcode = run_shcmd('squeue -u %s' % getpass.getuser()) for line in out.split('\n'): items = line.split() if any(item == 'KGCALC' for item in items): #if len(items)>6 and items[6].endswith('KHOMME'): jobid = items[0] break if jobid: break if jobid is None: self.set_status(result, myname, self.FAILED, errmsg='Job id is not found.') return result status = '' maxiter = 3600 iter = 0 while status not in [ 'BF', 'CA', 'CD', 'F', 'NF', 'PR', 'SE', 'ST', 'TO' ]: time.sleep(1) out, err, retcode = run_shcmd('squeue -j %s' % jobid) if retcode == 0: lines = out.strip().split('\n') if len(lines) == 1: break for line in lines[1:]: items = line.split() if len(items) > 9: if items[0] == jobid: status = items[9] else: print('DEBUG: ', out, err, retcode) break iter += 1 if iter >= maxiter: break if status == 'F' or 'CD' or 'CG': self.set_status(result, myname, self.PASSED) else: self.set_status( result, myname, self.FAILED, errmsg='Job completion status is not expected.') else: self.set_status(result, myname, self.PASSED) return result
def genstate(self, myname, result): workdir = result['mkdir_task']['workdir'] reuse_data = result['mkdir_task']['reuse_data'] if not reuse_data: # find jobid jobid = None for iter in range(120): #print('Waiting for "KGENMPAS" job to be initiated.') time.sleep(5) out, err, retcode = run_shcmd('bjobs') for line in out.split('\n'): items = line.split() if any(item == 'KGENMPAS' for item in items): jobid = items[0] #print('"KGENMPAS" job is initiated.') break if jobid: break if jobid is None: self.set_status(result, myname, self.FAILED, errmsg='Job id is not found.') return result status = '' maxiter = 3600 iter = 0 sleep_sec = 1 while status not in [ 'DONE', 'PSUSP', 'USUSP', 'SSUSP', 'EXIT', 'UNKWN', 'ZOMBI', 'FINISHED' ]: #if iter % 10 == 0: # print('Waiting for "KGENMPAS" job to be finished. %d seconds has been passed.'%iter*sleep_sec) time.sleep(sleep_sec) out, err, retcode = run_shcmd('bjobs %s' % jobid) if retcode == 0: for line in out.split('\n'): items = line.split() if len(items) > 3 and items[0] == jobid: status = items[2] elif len(items) > 0 and items[-1] == 'found': print('"KGENMPAS" job is finished.') status = 'FINISHED' else: print('DEBUG: ', out, err, retcode) iter += 1 if iter >= maxiter: break if status == 'DONE' or 'FINISHED': self.set_status(result, myname, self.PASSED) else: self.set_status( result, myname, self.FAILED, errmsg='Job completion status is not expected.') else: self.set_status(result, myname, self.PASSED) return result
def config(self, myname, result): workdir = result['mkdir_task']['workdir'] systestdir = result['mkdir_task']['sysdir'] tmpsrc = result['download_task']['tmpsrc'] blddir = '%s/bld'%workdir if not os.path.exists(blddir): os.mkdir(blddir) rundir = '%s/run'%workdir if not os.path.exists(rundir): os.mkdir(rundir) result[myname]['blddir'] = blddir result[myname]['rundir'] = rundir # setup if not os.path.exists('%s/movies'%rundir): os.mkdir('%s/movies'%rundir) if os.path.exists('%s/vcoord'%rundir): os.system('rm -f %s/vcoord'%rundir) #os.system('ln -s %s/test/vcoord %s/vcoord'%(tmpsrc, rundir)) os.system('ln -s %s/../../config/perfTestWACCM_64_1_4/vcoord %s/vcoord'%(self.TEST_DIR, rundir)) # create namelist if os.path.exists('%s/perfTestWACCM.nl'%rundir): os.remove('%s/perfTestWACCM.nl'%rundir) #shutil.copy('%s/test/reg_test/namelists/perfTestWACCM.nl'%tmpsrc, rundir) shutil.copy('%s/../../config/perfTestWACCM_64_1_4/perfTestWACCM-ne8.nl'%self.TEST_DIR, rundir) #if os.path.exists('%s/camBench.nl'%rundir): # os.remove('%s/camBench.nl'%rundir) #shutil.copy('%s/test/perftest/camBench.nl'%tmpsrc, rundir) # copy exclude.ini if os.path.exists('%s/exclude.ini'%workdir): os.remove('%s/exclude.ini'%workdir) shutil.copy('%s/exclude.ini'%self.TEST_DIR, workdir) result[myname]['prerun_config'] = self.get_prerun_cmds() + ['rm -rf CMakeFiles CMakeCache.txt'] result[myname]['prerun_build'] = self.get_prerun_cmds() result[myname]['prerun_run'] = self.get_prerun_cmds() + ['export OMP_NUM_THREADS=2', \ 'ulimit -s unlimited', 'export LD_LIBRARY_PATH=${NETCDF}/lib:${LD_LIBRARY_PATH}' ] result[myname]['prerun_kernel'] = self.get_prerun_kernel_cmds() result[myname]['mpirun'] = 'mpirun.lsf' #'export LD_LIBRARY_PATH=$NETCDF/lib:/glade/apps/opt/hdf5/1.8.12/intel/12.1.5/lib:$LD_LIBRARY_PATH', # create job submit script with open('%s/homme.submit'%rundir, 'w') as fd: #fd.write(job_script%('16', '16', '\n'.join(result[myname]['prerun_run']), result[myname]['mpirun'], '%s/test_execs/perfTest/perfTest'%blddir, '%s/camBench.nl'%rundir)) fd.write(job_script%('16', '16', '\n'.join(result[myname]['prerun_run']), result[myname]['mpirun'], '%s/test_execs/perfTestWACCM/perfTestWACCM'%blddir, '%s/perfTestWACCM-ne8.nl'%rundir)) if self.REBUILD or not os.path.exists(blddir) or len([name for name in os.listdir(blddir) if os.path.isfile(os.path.join(blddir, name))])==0: # prepare prerun command prerun_cmds = result[myname]['prerun_config'] # prepare cmake command cmake_cmd = ['cmake'] cmake_cmd.append('-DHOMME_PROJID="NTDD0004"') cmake_cmd.append('-DENABLE_PERFTEST=TRUE') cmake_cmd.append('-DENABLE_OPENMP=TRUE') cmake_cmd.append('-DUSE_MPIEXEC="mpirun.lsf"') cmake_cmd.append('-DCMAKE_C_COMPILER="/glade/apps/opt/modulefiles/ys/cmpwrappers/mpipcc"') cmake_cmd.append('-DCMAKE_Fortran_COMPILER="/glade/apps/opt/modulefiles/ys/cmpwrappers/mpipf90"') cmake_cmd.append('-DNETCDF_DIR:PATH=$NETCDF') cmake_cmd.append('-DPNETCDF_DIR:PATH=$PNETCDF') cmake_cmd.append('-DHDF5_DIR:PATH=/glade/apps/opt/hdf5/1.8.12/pgi/13.3') cmake_cmd.append('-DSZIP_DIR:PATH=/glade/apps/opt/szip/2.1/intel/12.1.5') cmake_cmd.append(tmpsrc) if self.LEAVE_TEMP: with open('%s/config_cmds.sh'%blddir, 'w') as f: f.write('#!/bin/bash\n') f.write('\n') for cmd in prerun_cmds: f.write(' %s\n'%cmd) for cmd in cmake_cmd[:-1]: f.write(' %s \\\n'%cmd) f.write(' %s &> config.log'%cmake_cmd[-1]) os.chmod('%s/config_cmds.sh'%blddir, 0755) out, err, retcode = run_shcmd('%s; %s'%('; '.join(prerun_cmds), ' '.join(cmake_cmd)), cwd=blddir) if retcode != 0: self.set_status(result, myname, self.FAILED, errmsg=err) return result # include.ini was created manually self.set_status(result, myname, self.PASSED) return result
def genstate(self, myname, result): casedir = result['config_task']['casedir'] casename = result['config_task']['casename'] workdir = result['mkdir_task']['workdir'] reuse_data = result['mkdir_task']['reuse_data'] if not reuse_data: # find jobid jobid = None for iter in range(120): time.sleep(5) out, err, retcode = run_shcmd('qstat -u %s' % getpass.getuser()) for line in out.split('\n'): items = line.split() if any(item == casename for item in items): #if len(items)>6 and items[6].endswith(casename): jobid = items[0] break if jobid: break if jobid is None: self.set_status(result, myname, self.FAILED, errmsg='Job id is not found.') return result status = '' # maxiter = 3600 # iter = 0 # while status not in [ 'DONE', 'PSUSP', 'USUSP', 'SSUSP', 'EXIT', 'UNKWN', 'ZOMBI', 'FINISHED' ]: # time.sleep(1) # out, err, retcode = run_shcmd('bjobs %s'%jobid) # if retcode==0: # for line in out.split('\n'): # items = line.split() # if len(items)>3 and items[0]==jobid: # status = items[2] # elif len(items)>0 and items[-1]=='found': # status = 'FINISHED' # else: # print('DEBUG: ', out, err, retcode) # # iter += 1 # if iter>=maxiter: # break if status == 'DONE' or 'FINISHED': self.set_status(result, myname, self.PASSED) else: self.set_status( result, myname, self.FAILED, errmsg='Job completion status is not expected.') else: self.set_status(result, myname, self.PASSED) return result
def run(self): self.genfiles = [] kgutils.logger.info('Starting KCover') model_realpath = os.path.realpath( '%s/%s' % (Config.path['outdir'], Config.path['model'])) coverage_realpath = os.path.realpath( '%s/%s' % (Config.path['outdir'], Config.path['coverage'])) if not os.path.exists(coverage_realpath): os.makedirs(coverage_realpath) # clear shared resources Config.used_srcfiles.clear() if not self.hasmodel( 'coverage' ) or 'all' in Config.rebuild or 'coverage' in Config.rebuild: #if not os.path.exists('%s/%s'%(Config.path['outdir'], Config.modelfile)) or 'all' in Config.rebuild or 'coverage' in Config.rebuild: data_coverage_path = '%s/__data__/%s' % ( model_realpath, Config.model['types']['code']['id']) if os.path.exists(data_coverage_path) and len( glob.glob('%s/*' % data_coverage_path) ) > 1 and Config.model['reuse_rawdata']: kgutils.logger.info('Reusing coverage raw data.') else: kgutils.logger.info('Generating coverage raw data.') if os.path.exists(data_coverage_path): shutil.rmtree(data_coverage_path) os.makedirs(data_coverage_path) if os.path.exists( '%s/__data__/__resource__/%s' % (model_realpath, Config.model['types']['code']['id'])): shutil.rmtree( '%s/__data__/__resource__/%s' % (model_realpath, Config.model['types']['code']['id'])) os.makedirs( '%s/__data__/__resource__/%s' % (model_realpath, Config.model['types']['code']['id'])) # generate wrapper nodes for filepath, (srcobj, mods_used, units_used) in Config.srcfiles.iteritems(): if hasattr(srcobj.tree, 'geninfo') and KGGenType.has_state( srcobj.tree.geninfo): sfile = gensobj(None, srcobj.tree, KERNEL_ID_0) if filepath == Config.callsite['filepath']: sfile.used4coverage = True else: sfile.used4coverage = False if sfile is None: raise kgutils.ProgramException( 'Kernel source file is not generated for %s.' % filepath) self.genfiles.append((sfile, filepath)) Config.used_srcfiles[filepath] = (sfile, mods_used, units_used) # process each nodes in the tree for plugin_name in event_register.keys(): if not plugin_name.startswith('cover'): continue for sfile, filepath in self.genfiles: sfile.created([plugin_name]) for plugin_name in event_register.keys(): if not plugin_name.startswith('cover'): continue for sfile, filepath in self.genfiles: sfile.process([plugin_name]) for plugin_name in event_register.keys(): if not plugin_name.startswith('cover'): continue for sfile, filepath in self.genfiles: sfile.finalize([plugin_name]) for plugin_name in event_register.keys(): if not plugin_name.startswith('cover'): continue for sfile, filepath in self.genfiles: sfile.flatten(KERNEL_ID_0, [plugin_name]) # generate source files from each node of the tree coverage_files = [] for sfile, filepath in self.genfiles: filename = os.path.basename(filepath) if sfile.used4coverage: set_indent('') slines = sfile.tostring() if slines is not None: slines = kgutils.remove_multiblanklines(slines) coverage_files.append(filename) with open('%s/%s' % (coverage_realpath, filename), 'wb') as fd: fd.write(slines) with open( '%s/%s.kgen' % (coverage_realpath, filename), 'wb') as ft: ft.write('\n'.join(sfile.kgen_stmt.prep)) self.gen_makefile() kgutils.logger.info( 'Instrumentation for coverage is generated at %s.' % coverage_realpath) # TODO: wait until coverage data generation is completed # use -K option fir bsub to wait for job completion # clean app if Config.cmd_clean['cmds']: kgutils.run_shcmd(Config.cmd_clean['cmds']) if Config.state_switch['clean']: kgutils.run_shcmd(Config.state_switch['clean']) # TEMP out, err, retcode = kgutils.run_shcmd('make', cwd=coverage_realpath) if retcode != 0: kgutils.logger.warn( 'Coverage raw data is not correctly generated.: %s' % err) if os.path.exists(data_coverage_path) and len( glob.glob('%s/*' % data_coverage_path) ) > 1 and Config.model['reuse_rawdata']: kgutils.logger.info('Generating model file: %s/%s' % (Config.path['outdir'], Config.modelfile)) files = None with open('%s/files' % data_coverage_path, 'r') as f: files = json.load(f) lines = None with open('%s/lines' % data_coverage_path, 'r') as f: lines = json.load(f) if not os.path.exists( '%s/mpi' % data_coverage_path) or not os.path.exists( '%s/openmp' % data_coverage_path): kgutils.logger.error( 'Coverage raw data is not correct. Please rerun KGen after generating coverage raw data correctly.' ) else: numranks = None with open('%s/mpi' % data_coverage_path, 'r') as f: for idx, line in enumerate(f.read().split('\n')): if idx == 0: numranks = int(line) numthreads = None # NOTE: numthreads could be smaller than actual number of omp threads as it depends on code regions. with open('%s/openmp' % data_coverage_path, 'r') as f: for idx, line in enumerate(f.read().split('\n')): if idx == 0: numthreads = int(line) # collect data kgutils.logger.info('Collecting raw data.') usedfiles = [] # fid usedlines = {} # fid=[linenum, ...] mpivisits = {} # fileid:linenum:mpirank=visits ompvisits = {} # fileid:linenum:omptid=visits invokes = { } # mpirank:omptid:invoke=[(fileid, linenum, numvisits), ... ] mpipaths = [] for item in os.listdir(data_coverage_path): if item.isdigit() and os.path.isdir( os.path.join(data_coverage_path, item)): mpipaths.append((data_coverage_path, item)) nprocs = min(len(mpipaths), multiprocessing.cpu_count() * 1) if nprocs == 0: kgutils.logger.warn( 'No coverage data files are found.') else: workload = [ chunk for chunk in chunks( mpipaths, int(math.ceil( len(mpipaths) / nprocs))) ] inqs = [] outqs = [] for _ in range(nprocs): inqs.append(multiprocessing.Queue()) outqs.append(multiprocessing.Queue()) procs = [] for idx in range(nprocs): proc = multiprocessing.Process( target=readdatafiles, args=(inqs[idx], outqs[idx])) procs.append(proc) proc.start() for inq, chunk in zip(inqs, workload): inq.put(chunk) for outq in outqs: invoke, usedfile, usedline, mpivisit, ompvisit = outq.get( ) update(invokes, invoke) for f in usedfile: if f not in usedfiles: usedfiles.append(f) update(usedlines, usedline) update(mpivisits, mpivisit) update(ompvisits, ompvisit) for idx in range(nprocs): procs[idx].join() if len(invokes) == 0: if not _DEBUG: shutil.rmtree(data_coverage_path) kgutils.logger.warn( 'Code coverage data is not collected.') else: kgutils.logger.info( 'Adding coverage data into the model file.') try: coverage_sections = [ 'summary', 'file', 'block', 'invoke' ] self.addmodel('coverage', coverage_sections) summary = [] summary.append( ('number_of_files_having_condblocks', str(len(files)))) summary.append(('number_of_files_invoked', str(len(usedfiles)))) summary.append( ('number_of_condblocks_exist', str( sum([ len(lmap) for fid, lmap in lines.items() ])))) summary.append( ('number_of_condblocks_invoked', str( sum([ len(lids) for fid, lids in usedlines.items() ])))) self.addsection('coverage', 'summary', summary) # file section file = [] # fd.write('; <file number> = <path to file>\n') for fileid, filepath in files.items(): file.append((str(fileid), '%s/%s.kgen\n' % (coverage_realpath, os.path.basename(filepath)))) file.append( ('used_files', ', '.join([fid for fid in usedfiles]))) self.addsection('coverage', 'file', file) # block section block = [] #fd.write('; <file number> = <line number> ...\n') for fileid, lmap in lines.items(): block.append((str(fileid), ', '.join( [lnum for lid, lnum in lmap.items()]))) used_line_pairs = [] for fid, lids in usedlines.items(): for lid in lids: used_line_pairs.append((fid, lid)) block.append(('used_lines', ', '.join([ '%s:%s' % (fid, lines[fid][lid]) for fid, lid in used_line_pairs ]))) self.addsection('coverage', 'block', block) # invoke section invoke = [] #fd.write('; <MPI rank> < OpenMP Thread> <invocation order> = <file number>:<line number><num of invocations> ...\n') for ranknum, threadnums in invokes.items(): for threadnum, invokenums in threadnums.items( ): for invokenum, triples in invokenums.items( ): invoke.append( ( '%s %s %s'%(ranknum, threadnum, invokenum), \ ', '.join( [ '%s:%s:%d'%(fid, lines[fid][lid], nivks) for fid, lid, nivks in triples ] ) ) ) self.addsection('coverage', 'invoke', invoke) kgutils.logger.info( ' ***** Within "%s" kernel *****:' % Config.kernel['name']) kgutils.logger.info( ' * %d original source files have conditional blocks.' % len(files)) kgutils.logger.info( ' * %d original source files are invoked at least once.' % len(usedfiles)) kgutils.logger.info(' * %d conditional blocks exist in the original source files.'%\ sum( [ len(lmap) for fid, lmap in lines.items() ] )) kgutils.logger.info(' * %d conditional blocks are executed at least once among all the conditional blocks.'%\ sum( [ len(lids) for fid, lids in usedlines.items() ] )) for fid in usedfiles: basefile = '%s/%s.kgen' % (coverage_realpath, os.path.basename( files[fid])) with open(basefile, 'r') as fsrc: srclines = fsrc.readlines() filesummary = [ \ '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!', \ '!! %d conditional blocks exist in this file'%len(lines[fid]), \ '!! %d conditional blokcs are executed at least once among all the conditional blocks.'%len(usedlines[fid]), \ '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!' \ ] srclines[0] = '%s\n%s\n' % ( '\n'.join(filesummary), srclines[0]) for lid in usedlines[fid]: linevisit = [ '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!' ] linevisit.append( '!! Total number of visits: %d' % sum([ visits for rank, visits in mpivisits[fid][lid].items() ])) if Config.mpi['enabled']: linevisit.append('!! MPI rank(visits) : %s' % ' '.join( \ ['%s(%d)'%(r,mpivisits[fid][lid][r]) for r in sorted(mpivisits[fid][lid])])) #['%s(%d)'%(r,i) for r,i in mpivisits[fid][lid].items()])) if Config.openmp['enabled']: linevisit.append('!! OpenMP thread(visits) : %s' % ' '.join( \ ['%s(%d)'%(t,ompvisits[fid][lid][t]) for t in sorted(ompvisits[fid][lid])])) #['%s(%d)'%(t,i) for t,i in ompvisits[fid][lid].items()])) linevisit.append( '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!' ) srclines[int(lines[fid][lid]) - 1] = '%s%s\n' % (srclines[ int(lines[fid][lid]) - 1], '\n'.join(linevisit)) coveragefile = '%s/%s.coverage' % ( coverage_realpath, os.path.basename(files[fid])) with open(coveragefile, 'w') as fdst: fdst.write(''.join(srclines)) except Exception as e: kgutils.logger.error(str(e)) else: if not _DEBUG: shutil.rmtree(data_coverage_path) kgutils.logger.info('failed to generate coverage information') out, err, retcode = kgutils.run_shcmd('make recover', cwd=coverage_realpath) if Config.state_switch['clean']: kgutils.run_shcmd(Config.state_switch['clean']) else: # check if coverage should be invoked kgutils.logger.info('Reusing KGen coverage file: %s/%s' % (Config.path['outdir'], Config.modelfile)) # check if coverage data exists in model file if not os.path.exists('%s/%s' % (Config.path['outdir'], Config.modelfile)): kgutils.logger.warn('No coverage file is found.') else: # read ini file kgutils.logger.info('Reading %s/%s' % (Config.path['outdir'], Config.modelfile)) cfg = configparser.ConfigParser() cfg.optionxform = str cfg.read('%s/%s' % (Config.path['outdir'], Config.modelfile)) number_of_files_having_condblocks = int( cfg.get('coverage.summary', 'number_of_files_having_condblocks')) number_of_files_invoked = int( cfg.get('coverage.summary', 'number_of_files_invoked')) number_of_condblocks_exist = int( cfg.get('coverage.summary', 'number_of_condblocks_exist')) number_of_condblocks_invoked = int( cfg.get('coverage.summary', 'number_of_condblocks_invoked')) try: filemap = {} for opt in cfg.options('coverage.file'): if opt.isdigit(): filemap[opt] = cfg.get('coverage.file', opt) blockmap = {} for opt in cfg.options('coverage.block'): if opt.isdigit(): blockmap[opt] = tuple(linenum for linenum in cfg.get( 'coverage.block', opt).split()) # <MPI rank> < OpenMP Thread> <invocation order> = <file number>:<line number>:<num invokes> ... invokemap = {} idx = 0 for opt in cfg.options('coverage.invoke'): idx += 1 ranknum, threadnum, invokenum = tuple( num for num in opt.split()) optval = cfg.get('coverage.invoke', opt).split(',') triples = tuple(triple.strip().split(':') for triple in optval) invokenum = int(invokenum) if invokenum not in invokemap: invokemap[invokenum] = {} if ranknum not in invokemap[invokenum]: invokemap[invokenum][ranknum] = {} if threadnum not in invokemap[invokenum][ranknum]: threadnums = {} invokemap[invokenum][ranknum][threadnum] = threadnums #threadnums = invokemap[invokenum][ranknum][threadnum] for fidstr, lnumstr, numinvokes in triples: fileid = fidstr linenum = lnumstr if fileid not in threadnums: threadnums[fileid] = {} if linenum not in threadnums[fileid]: threadnums[fileid][linenum] = 0 threadnums[fileid][linenum] += int(numinvokes) if idx % 100000 == 0: print 'Processed %d items: %s' % ( idx, datetime.datetime.now().strftime( "%I:%M%p on %B %d, %Y")) except Exception as e: raise Exception( 'Please check the format of coverage file: %s' % str(e)) THREASHOLD = Config.model['types']['code']['percentage'] / 100.0 THREASHOLD_NUM = int( math.ceil(number_of_condblocks_invoked * THREASHOLD)) collected = [] triples = {} numC = len(collected) numD = Config.model['types']['code']['ndata'] for invokenum in sorted(invokemap.keys()): if numC > THREASHOLD_NUM and numC >= numD: break ranknums = invokemap[invokenum] for ranknum in ranknums.keys(): if numC > THREASHOLD_NUM and numC >= numD: break threadnums = invokemap[invokenum][ranknum] for threadnum in threadnums.keys(): invokecount = 0 if numC > THREASHOLD_NUM and numC >= numD: break fileids = invokemap[invokenum][ranknum][threadnum] for fileid in fileids.keys(): if numC > THREASHOLD_NUM and numC >= numD: break if Config.data['maxnuminvokes'] and \ invokecount >= Config.data['maxnuminvokes']: break lnums = invokemap[invokenum][ranknum][threadnum][ fileid] for lnum, numinvokes in lnums.items(): if numC > THREASHOLD_NUM and numC >= numD: break if Config.data['maxnuminvokes'] and \ invokecount >= Config.data['maxnuminvokes']: break if (fileid, lnum, invokenum) not in collected: collected.append((fileid, lnum, invokenum)) numC = len(collected) if (ranknum, threadnum, invokenum) not in triples: invokecount += 1 triples[(ranknum, threadnum, invokenum)] = None print 'At least, %s of conditional blocks will be excuted by using following (MPI ranks, OpenMP Threads, Invokes) triples:' % '{:.1%}'.format( THREASHOLD) print ','.join( [':'.join([str(n) for n in t]) for t in triples.keys()]) #print '' #print 'Following (File id, line number) pairs are covered by above triples:' #print str(collected) for ranknum, threadnum, invokenum in triples.keys(): Config.invocation['triples'].append( ( (str(ranknum), str(ranknum)), (str(threadnum), str(threadnum)), \ (str(invokenum), str(invokenum)) ) )
def config(self, myname, result): workdir = result['mkdir_task']['workdir'] systestdir = result['mkdir_task']['sysdir'] tmpsrc = result['download_task']['tmpsrc'] blddir = '%s/bld' % workdir if not os.path.exists(blddir): os.mkdir(blddir) result[myname]['blddir'] = blddir result[myname]['prerun_config'] = self.get_prerun_cmds() + [ 'rm -rf CMakeFiles CMakeCache.txt' ] result[myname]['prerun_build'] = self.get_prerun_cmds() result[myname]['prerun_run'] = self.get_prerun_cmds() + ['export OMP_NUM_THREADS=2', \ 'export LD_LIBRARY_PATH=$NETCDF/lib:/glade/u/apps/contrib/hdf5-mpi/1.8.12/intel/12.1.5/lib:$LD_LIBRARY_PATH', \ 'ulimit -s unlimited' ] result[myname]['prerun_kernel'] = self.get_prerun_kernel_cmds() result[myname]['mpirun'] = 'mpirun' result[myname]['job_script'] = job_script result[myname]['namelist'] = namelist if self.REBUILD or not os.path.exists(blddir) or len([ name for name in os.listdir(blddir) if os.path.isfile(os.path.join(blddir, name)) ]) == 0: # prepare prerun command prerun_cmds = result[myname]['prerun_config'] # prepare cmake command cmake_cmd = ['cmake'] cmake_cmd.append('-DHOMME_PROJID="NTDD0004"') cmake_cmd.append('-DENABLE_PERFTEST=TRUE') cmake_cmd.append('-DENABLE_OPENMP=TRUE') cmake_cmd.append('-DUSE_MPIEXEC="mpirun"') cmake_cmd.append('-DCMAKE_C_COMPILER="mpiicc"') cmake_cmd.append('-DCMAKE_CXX_COMPILER="mpiicc"') cmake_cmd.append('-DCMAKE_Fortran_COMPILER="mpiifort"') cmake_cmd.append('-DNETCDF_DIR:PATH=$NETCDF') cmake_cmd.append('-DPNETCDF_DIR:PATH=$PNETCDF') cmake_cmd.append( '-DHDF5_DIR:PATH=/glade/u/apps/contrib/hdf5-mpi/1.8.12/intel/12.1.5' ) cmake_cmd.append(tmpsrc) if self.LEAVE_TEMP: with open('%s/config_cmds.sh' % blddir, 'w') as f: f.write('#!/bin/bash\n') f.write('\n') for cmd in prerun_cmds: f.write(' %s\n' % cmd) for cmd in cmake_cmd[:-1]: f.write(' %s \\\n' % cmd) f.write(' %s &> config.log' % cmake_cmd[-1]) os.chmod('%s/config_cmds.sh' % blddir, 0755) out, err, retcode = run_shcmd( '%s; %s' % ('; '.join(prerun_cmds), ' '.join(cmake_cmd)), cwd=blddir) if retcode != 0: self.set_status(result, myname, self.FAILED, errmsg=err) return result # include.ini was created manually self.set_status(result, myname, self.PASSED) return result
def config(self, myname, result): workdir = result['mkdir_task']['workdir'] systestdir = result['mkdir_task']['sysdir'] tmpsrc = result['download_task']['tmpsrc'] scriptdir = '%s/cime/scripts' % tmpsrc casename = 'KINTCESM' casedir = '%s/%s' % (systestdir, casename) cesmtmpdir = '/glade/scratch/%s/%s' % (getpass.getuser(), casename) result[myname]['cesmtmpdir'] = cesmtmpdir datadir = '%s/data' % workdir # NOTE: svn co https://svn-ccsm-models.cgd.ucar.edu/cesm1/tags/cesm1_4_beta07/ systestdir/cesm_ref if self.REBUILD or not os.path.exists(datadir) or len([name for name in os.listdir(datadir) if os.path.isfile(os.path.join(datadir, name))])==0 or \ not os.path.exists(casedir): # check if project option exists if 'project' not in self.OPTIONS: self.set_status( result, myname, self.FAILED, errmsg= '"project" user option is not provided. Use "-o project=<your porject id>"' ) return result # create a case if not os.path.exists(casedir): #import pdb; pdb.set_trace() shutil.rmtree(cesmtmpdir) casecmd = './create_newcase --project %s --mach cheyenne --compset B1850 --res f19_g17 --compiler intel --case %s' % ( self.OPTIONS['project'], casedir) out, err, retcode = run_shcmd(casecmd, cwd=scriptdir) if retcode != 0: shutil.rmtree(casedir) self.set_status( result, myname, self.FAILED, errmsg='MG2 case generation is failed: %s\n\n%s' % (err, out)) return result # modify env_build.xml to enable MG2 out, err, retcode = run_shcmd('grep mg2 env_build.xml', cwd=casedir) if retcode != 0: xmlchange = './xmlchange --file env_build.xml --id CAM_CONFIG_OPTS --val "-microphys mg2 -clubb_sgs" --append' out, err, retcode = run_shcmd(xmlchange, cwd=casedir) if retcode != 0: self.set_status( result, myname, self.FAILED, errmsg= 'Modification of env_build.xml is failed: %s, %s ' % (err, out)) return result # cesm.setup if not os.path.exists('%s/case.run' % casedir): #import pdb; pdb.set_trace() out, err, retcode = run_shcmd('./case.setup', cwd=casedir) if retcode != 0: self.set_status(result, myname, self.FAILED, errmsg='case.setup is failed: %s\n\n%s' % (err, out)) return result for fname in os.listdir('%s/SourceMods' % casedir): if fname.startswith('src.') and os.path.isdir( os.path.join('%s/SourceMods' % casedir, fname)): for srcfile in os.listdir('%s/SourceMods/%s' % (casedir, fname)): if os.path.isfile( os.path.join('%s/SourceMods/%s' % (casedir, fname), srcfile)): os.remove( os.path.join('%s/SourceMods/%s' % (casedir, fname), srcfile)) # include.ini was created manually result[myname]['srcmods'] = '%s/SourceMods' % casedir result[myname]['casedir'] = casedir result[myname]['casename'] = casename self.set_status(result, myname, self.PASSED) return result
def config(self, myname, result): workdir = result['mkdir_task']['workdir'] systestdir = result['mkdir_task']['sysdir'] tmpsrc = result['download_task']['tmpsrc'] scriptdir = '%s/cime/scripts'%tmpsrc casename = 'KINTCESM' casedir = '%s/%s'%(systestdir, casename) scratchdir = "/glade/scratch/%s"%os.getenv("USER") caseworkdir = "%s/%s"%(scratchdir, casename) result[myname]['prerun_kernel'] = self.get_prerun_kernel_cmds() datadir = '%s/data'%workdir if self.REBUILD or not os.path.exists(datadir) or len([name for name in os.listdir(datadir) if os.path.isfile(os.path.join(datadir, name))])==0: # check if project option exists if 'project' not in self.OPTIONS: self.OPTIONS['project'] = 'NTDD0004' # create a case if not os.path.exists(casedir): run_shcmd("rm -rf "+caseworkdir, cwd=scriptdir) casecmd = './create_newcase --project %s --mach cheyenne --compset B1850 --res f19_g17 --compiler intel --queue premium --case %s'%(self.OPTIONS['project'], casedir) out, err, retcode = run_shcmd(casecmd, cwd=scriptdir) if retcode!=0: self.set_status(result, myname, self.FAILED, errmsg='KINTCESM case generation is failed: %s\n\n%s'%(err, out)) return result # modify env_build.xml to enable MG2 out, err, retcode = run_shcmd('grep mg2 env_build.xml', cwd=casedir) if retcode!=0: xmlchange = './xmlchange CAM_CONFIG_OPTS="-microphys mg2 -clubb_sgs" -a' out, err, retcode = run_shcmd(xmlchange, cwd=casedir) if retcode!=0: self.set_status(result, myname, self.FAILED, errmsg='Modification of env_build.xml is failed: '%(err, out)) return result batch_flags = "-W block=true -N KINTCESM.run -r n -j oe -V -S /bin/bash -l select=16:ncpus=36:mpiprocs=36:ompthreads=1 -l walltime=01:15:00 -A NTDD0004 -q premium" out, err, retcode = run_shcmd('./xmlchange BATCH_COMMAND_FLAGS="%s"'%batch_flags, cwd=casedir) # cesm.setup out, err, retcode = run_shcmd('./case.setup', cwd=casedir) if retcode!=0: self.set_status(result, myname, self.FAILED, errmsg='case.setup is failed: %s\n\n%s'%(err, out)) return result for fname in os.listdir('%s/SourceMods'%casedir): if fname.startswith('src.') and os.path.isdir(os.path.join('%s/SourceMods'%casedir, fname)): for srcfile in os.listdir('%s/SourceMods/%s'%(casedir, fname)): if os.path.isfile(os.path.join('%s/SourceMods/%s'%(casedir, fname), srcfile)): os.remove(os.path.join('%s/SourceMods/%s'%(casedir, fname), srcfile)) # include.ini was created manually result[myname]['srcmods'] = '%s/SourceMods'%casedir result[myname]['casedir'] = casedir result[myname]['casename'] = casename self.set_status(result, myname, self.PASSED) return result
def download(self, myname, result): systestdir = result['mkdir_task']['sysdir'] workdir = result['mkdir_task']['workdir'] appsrc = '%s/cesm_ref' % systestdir if os.path.exists(appsrc): # check if cesm exists in appsrc dir out, err, retcode = run_shcmd( 'git status | grep "nothing to commit"', cwd=appsrc) if retcode != 0 or not out or not out.startswith( 'nothing to commit'): run_shcmd('rm -rf ' + appsrc, cwd=systestdir) run_shcmd( 'git clone https://github.com/escomp/cesm.git cesm_ref', cwd=systestdir) run_shcmd('git checkout cesm2_1_alpha01d', cwd=appsrc) run_shcmd('./manage_externals/checkout_externals', cwd=appsrc) else: run_shcmd('git clone https://github.com/escomp/cesm.git cesm_ref', cwd=systestdir) run_shcmd('git checkout cesm2_1_alpha01d', cwd=appsrc) run_shcmd('./manage_externals/checkout_externals', cwd=appsrc) # copy cesm src into test specific src dir tmpsrc = '%s/cesm_work' % systestdir if not os.path.exists(tmpsrc): shutil.copytree(appsrc, tmpsrc) result[myname]['appsrc'] = appsrc result[myname]['tmpsrc'] = tmpsrc self.set_status(result, myname, self.PASSED) return result
def run(self): self.genfiles = [] kgutils.logger.info('Starting PAPI') model_realpath = os.path.realpath( '%s/%s' % (Config.path['outdir'], Config.path['model'])) papi_realpath = os.path.realpath( '%s/%s' % (Config.path['outdir'], Config.path['papi'])) if not os.path.exists(papi_realpath): os.makedirs(papi_realpath) # clear shared resources Config.used_srcfiles.clear() if not self.hasmodel( 'papi') or 'all' in Config.rebuild or 'papi' in Config.rebuild: #if not os.path.exists('%s/%s'%(Config.path['outdir'], Config.modelfile)) or 'all' in Config.rebuild or 'coverage' in Config.rebuild: data_papi_path = '%s/__data__/%s' % ( model_realpath, Config.model['types']['papi']['id']) if os.path.exists(data_papi_path) and len( glob.glob( '%s/*' % data_papi_path)) > 0 and Config.model['reuse_rawdata']: kgutils.logger.info('Reusing papi raw data.') else: kgutils.logger.info('Generating papi counter raw data.') if os.path.exists(data_papi_path): shutil.rmtree(data_papi_path) os.makedirs(data_papi_path) if os.path.exists( '%s/__data__/__resource__/%s' % (model_realpath, Config.model['types']['papi']['id'])): shutil.rmtree( '%s/__data__/__resource__/%s' % (model_realpath, Config.model['types']['papi']['id'])) os.makedirs( '%s/__data__/__resource__/%s' % (model_realpath, Config.model['types']['papi']['id'])) # generate wrapper nodes for filepath, (srcobj, mods_used, units_used) in Config.srcfiles.iteritems(): if hasattr(srcobj.tree, 'geninfo') and KGGenType.has_state( srcobj.tree.geninfo): sfile = gensobj(None, srcobj.tree, KERNEL_ID_0) if filepath == Config.callsite['filepath']: sfile.used4papi = True else: sfile.used4papi = False if sfile is None: raise kgutils.ProgramException( 'Kernel source file is not generated for %s.' % filepath) self.genfiles.append((sfile, filepath)) Config.used_srcfiles[filepath] = (sfile, mods_used, units_used) # process each nodes in the tree for plugin_name in event_register.keys(): if not plugin_name.startswith('papi'): continue for sfile, filepath in self.genfiles: sfile.created([plugin_name]) for plugin_name in event_register.keys(): if not plugin_name.startswith('papi'): continue for sfile, filepath in self.genfiles: sfile.process([plugin_name]) for plugin_name in event_register.keys(): if not plugin_name.startswith('papi'): continue for sfile, filepath in self.genfiles: sfile.finalize([plugin_name]) for plugin_name in event_register.keys(): if not plugin_name.startswith('papi'): continue for sfile, filepath in self.genfiles: sfile.flatten(KERNEL_ID_0, [plugin_name]) # generate source files from each node of the tree papi_files = [] for sfile, filepath in self.genfiles: filename = os.path.basename(filepath) if sfile.used4papi: set_indent('') slines = sfile.tostring() if slines is not None: slines = kgutils.remove_multiblanklines(slines) papi_files.append(filename) with open('%s/%s' % (papi_realpath, filename), 'wb') as fd: fd.write(slines) with open('%s/%s.kgen' % (papi_realpath, filename), 'wb') as ft: ft.write('\n'.join(sfile.kgen_stmt.prep)) self.gen_makefile() kgutils.logger.info( 'Instrumentation for papi is generated at %s.' % papi_realpath) # TODO: wait until coverage data generation is completed # use -K option fir bsub to wait for job completion # clean app if Config.cmd_clean['cmds']: kgutils.run_shcmd(Config.cmd_clean['cmds']) if Config.state_switch['clean']: kgutils.run_shcmd(Config.state_switch['clean']) # TEMP out, err, retcode = kgutils.run_shcmd('make', cwd=papi_realpath) if retcode != 0: kgutils.logger.warn( 'Papi counter raw data is not correctly generated.: %s' % err) if os.path.exists(data_papi_path) and len( glob.glob( '%s/*' % data_papi_path)) > 0 and Config.model['reuse_rawdata']: kgutils.logger.info('Generating model file: %s/%s' % (Config.path['outdir'], Config.modelfile)) # collect data papis = { } # mpirank:omptid:invoke=[(fileid, linenum, numvisits), ... ] papimin = 1E100 papimax = 0 npapis = 0 nexcluded_under = 0 nexcluded_over = 0 mpipaths = [] for item in os.listdir(data_papi_path): try: mpirank, ompthread = item.split('.') if mpirank.isdigit() and ompthread.isdigit(): mpipaths.append( (data_papi_path, mpirank, ompthread)) except: pass nprocs = min(len(mpipaths), multiprocessing.cpu_count() * 1) if nprocs == 0: kgutils.logger.warn('No papi data files are found.') else: workload = [ chunk for chunk in chunks( mpipaths, int(math.ceil(len(mpipaths) / nprocs))) ] inqs = [] outqs = [] for _ in range(nprocs): inqs.append(multiprocessing.Queue()) outqs.append(multiprocessing.Queue()) procs = [] for idx in range(nprocs): proc = multiprocessing.Process(target=readdatafiles, args=(inqs[idx], outqs[idx])) procs.append(proc) proc.start() for inq, chunk in zip(inqs, workload): inq.put(chunk) for outq in outqs: papi, pmeta = outq.get() update(papis, papi) papimin = min(papimin, pmeta[0]) papimax = max(papimax, pmeta[1]) npapis += pmeta[2] nexcluded_under += pmeta[3] nexcluded_over += pmeta[4] for idx in range(nprocs): procs[idx].join() kgutils.logger.info( '# of excluded samples: under limit = %d, over limit = %d' % (nexcluded_under, nexcluded_over)) if len(papis) == 0: if not _DEBUG: shutil.rmtree(data_papi_path) kgutils.logger.warn( 'Papi data collection is not right. Deleting corrupted data.' ) else: try: papi_sections = ['counters', 'summary'] self.addmodel('papi', papi_sections) # papi section papi = [] #fd.write('; <MPI rank> < OpenMP Thread> <invocation order> = <file number>:<line number><num of invocations> ...\n') for ranknum, threadnums in papis.items(): for threadnum, invokenums in threadnums.items(): for invokenum, pvalue in invokenums.items(): papi.append( ('%s %s %s' % (ranknum, threadnum, invokenum), str(pvalue))) self.addsection('papi', 'counters', papi) summary = [] summary.append(('minimum_papicounter', str(papimin))) summary.append(('maximum_papicounter', str(papimax))) summary.append(('number_papicounters', str(npapis))) self.addsection('papi', 'summary', summary) except Exception as e: kgutils.logger.error(str(e)) else: if not _DEBUG: shutil.rmtree(data_papi_path) kgutils.logger.info( 'failed to generate papi counter information') out, err, retcode = kgutils.run_shcmd('make recover', cwd=papi_realpath) if Config.state_switch['clean']: kgutils.run_shcmd(Config.state_switch['clean']) else: # check if coverage should be invoked kgutils.logger.info('Reusing Papi counter file: %s/%s' % (Config.path['outdir'], Config.modelfile)) # check if papi data exists in model file if not os.path.exists('%s/%s' % (Config.path['outdir'], Config.modelfile)): kgutils.logger.warn('No papi counter file is found.') else: # read ini file kgutils.logger.info('Reading %s/%s' % (Config.path['outdir'], Config.modelfile)) cfg = configparser.ConfigParser() cfg.optionxform = str cfg.read('%s/%s' % (Config.path['outdir'], Config.modelfile)) try: papimin = int( cfg.get('papi.summary', 'minimum_papicounter').strip()) papimax = int( cfg.get('papi.summary', 'maximum_papicounter').strip()) npapis = int( cfg.get('papi.summary', 'number_papicounters').strip()) papidiff = papimax - papimin # <MPI rank> < OpenMP Thread> <invocation order> = <file number>:<line number>:<num papis> ... if papidiff == 0: nbins = 1 else: nbins = max( min(Config.model['types']['papi']['nbins'], npapis), 2) kgutils.logger.info('nbins = %d' % nbins) kgutils.logger.info('papimin = %d' % papimin) kgutils.logger.info('papimax = %d' % papimax) kgutils.logger.info('papidiff = %d' % papidiff) kgutils.logger.info('npapis = %d' % npapis) if nbins > 1: papibins = [{} for _ in range(nbins)] papicounts = [0 for _ in range(nbins)] else: papibins = [{}] papicounts = [0] idx = 0 # TODO: conver to counters for opt in cfg.options('papi.counters'): ranknum, threadnum, invokenum = tuple( num for num in opt.split()) count = cfg.getint('papi.counters', opt) if nbins > 1: binnum = int( math.floor( (count - papimin) / papidiff * (nbins - 1))) else: binnum = 0 papicounts[binnum] += 1 invokenum = int(invokenum) if invokenum not in papibins[binnum]: papibins[binnum][invokenum] = {} if ranknum not in papibins[binnum][invokenum]: papibins[binnum][invokenum][ranknum] = {} if threadnum not in papibins[binnum][invokenum][ranknum]: papibins[binnum][invokenum][ranknum][threadnum] = count else: raise Exception('Dupulicated data: (%s, %s, %s, %d)' % (invokenum, ranknum, threadnum, count)) idx += 1 if idx % 100000 == 0: print 'Processed %d items: %s' % ( idx, datetime.datetime.now().strftime( "%I:%M%p on %B %d, %Y")) except Exception as e: raise Exception( 'Please check the format of papi counter file: %s' % str(e)) # types of representation # average, median, min/max, n-stratified, distribution # bins with histogram totalcount = sum(papicounts) countdist = [float(c) / float(totalcount) for c in papicounts] ndata = Config.model['types']['papi']['ndata'] datacollect = [int(round(dist * ndata)) for dist in countdist] # TODO: convert to counters triples = [] for binnum, papibin in enumerate(papibins): bin_triples = [] range_begin = int(binnum * (papimax - papimin) / nbins + papimin) if binnum > 0 else papimin range_end = int((binnum + 1) * (papimax - papimin) / nbins + papimin) if binnum < (nbins - 1) else None if range_end is None: print 'From bin # %d [ %d ~ ] %f %% of %d'%(binnum, \ range_begin, countdist[binnum] * 100, totalcount) else: print 'From bin # %d [ %d ~ %d ] %f %% of %d'%(binnum, \ range_begin, range_end, countdist[binnum] * 100, totalcount) for invokenum in sorted(papibin.keys()): if len(bin_triples) >= datacollect[binnum]: break # select datacollect[binum] under this data tree, rank/thread/invoke bininvokes = papibin[invokenum].keys() random.shuffle(bininvokes) for ranknum in bininvokes: if len(bin_triples) >= datacollect[binnum]: break binranks = papibin[invokenum][ranknum].keys() random.shuffle(binranks) for threadnum in binranks: bin_triples.append((ranknum, threadnum, invokenum)) print ' invocation triple: %s:%s:%s' % ( ranknum, threadnum, invokenum) triples.extend(bin_triples) print 'Number of bins: %d' % nbins print 'Minimun papi count: %d' % papimin print 'Maximum papi count: %d' % papimax #print 'Selected invocation triples:' #print ','.join([ ':'.join([ str(n) for n in t ]) for t in triples]) for ranknum, threadnum, invokenum in triples: Config.invocation['triples'].append( ( (str(ranknum), str(ranknum)), (str(threadnum), str(threadnum)), \ (str(invokenum), str(invokenum)) ) )
def download(self, myname, result): systestdir = result['mkdir_task']['sysdir'] workdir = result['mkdir_task']['workdir'] appsrc = '%s/wrf_ref' % systestdir if os.path.exists(appsrc): # check if wrf exists in appsrc dir out, err, retcode = run_shcmd( 'git status | grep "nothing to commit"', cwd=appsrc) if retcode != 0 or not out or not out.startswith( 'nothing to commit'): run_shcmd('rm -rf ' + appsrc, cwd=systestdir) run_shcmd( 'git clone https://github.com/NCAR/WRFV3.git wrf_ref', cwd=systestdir) run_shcmd('git checkout V3.9.1.1', cwd=appsrc) else: run_shcmd('git clone https://github.com/NCAR/WRFV3.git wrf_ref', cwd=systestdir) run_shcmd('git checkout V3.9.1.1', cwd=appsrc) # copy wrf src into test specific src dir tmpsrc = '%s/wrf_work' % systestdir if not os.path.exists(tmpsrc): shutil.copytree(appsrc, tmpsrc) result[myname]['appsrc'] = appsrc result[myname]['tmpsrc'] = tmpsrc result[myname]['jobscript'] = os.path.join(here, "submit.sh") self.set_status(result, myname, self.PASSED) return result