def processAppKerOutput(appstdout=None,stdout=None,stderr=None,geninfo=None,appKerNResVars=None): #set App Kernel Description if(appKerNResVars!=None and 'app' in appKerNResVars and 'name' in appKerNResVars['app']): akname=appKerNResVars['app']['name'] else: akname='unknown' #initiate parser parser=AppKerOutputParser( name = akname ) #set obligatory parameters and statistics #set common parameters and statistics (App:ExeBinSignature and RunEnv:Nodes) parser.setCommonMustHaveParsAndStats() #set app kernel custom sets #parser.setMustHaveParameter('App:Version') parser.setMustHaveStatistic('Wall Clock Time') #parse common parameters and statistics parser.parseCommonParsAndStats(appstdout,stdout,stderr,geninfo) if hasattr(parser,'appKerWallClockTime'): parser.setStatistic("Wall Clock Time", total_seconds(parser.appKerWallClockTime), "Second") #Here can be custom output parsing # #read output # lines=[] # if os.path.isfile(appstdout): # fin=open(appstdout,"rt") # lines=fin.readlines() # fin.close() # # #process the output # parser.successfulRun=False # j=0 # while j<len(lines): # m=re.search(r'My mega parameter\s+(\d+)',lines[j]) # if m:parser.setParameter("mega parameter",m.group(1)) # # m=re.search(r'My mega parameter\s+(\d+)',lines[j]) # if m:parser.setStatistic("mega statistics",m.group(1),"Seconds") # # m=re.search(r'Done',lines[j]) # if m:parser.successfulRun=True # # j+=1 if __name__ == "__main__": #output for testing purpose print "Parsing complete:",parser.parsingComplete(Verbose=True) print "Following statistics and parameter can be set as obligatory:" parser.printParsNStatsAsMustHave() print "\nResulting XML:" print parser.getXML() #return complete XML otherwise return None return parser.getXML()
def processAppKerOutput(appstdout=None, stdout=None, stderr=None, geninfo=None, appKerNResVars=None): # set App Kernel Description parser = AppKerOutputParser( name="test", version=1, description="test the resource deployment", url="http://xdmod.buffalo.edu", measurement_name="test", ) # set obligatory parameters and statistics # set common parameters and statistics parser.setCommonMustHaveParsAndStats() # set app kernel custom sets parser.setMustHaveStatistic("Wall Clock Time") parser.setMustHaveStatistic("Shell is BASH") # parse common parameters and statistics parser.parseCommonParsAndStats(appstdout, stdout, stderr, geninfo) # set statistics if hasattr(parser, "wallClockTime"): parser.setStatistic("Wall Clock Time", total_seconds(parser.wallClockTime), "Second") # read output lines = [] if os.path.isfile(stdout): fin = open(stdout, "rt") lines = fin.readlines() fin.close() # process the output parser.setStatistic("Shell is BASH", 0) j = 0 while j < len(lines): if lines[j].count("Checking that the shell is BASH") > 0 and lines[j + 1].count("bash") > 0: parser.setStatistic("Shell is BASH", 1) j += 1 if stdout != None: if hasattr(parser, "filesExistance"): for k, v in parser.filesExistance.iteritems(): parser.setStatistic(k + " exists", int(v)) if hasattr(parser, "dirAccess"): for k, v in parser.dirAccess.iteritems(): parser.setStatistic(k + " accessible", int(v)) if __name__ == "__main__": # output for testing purpose print "parsing complete:", parser.parsingComplete() parser.printParsNStatsAsMustHave() print parser.getXML() # return complete XML overwize return None return parser.getXML()
def processAppKerOutput(appstdout=None,stdout=None,stderr=None,geninfo=None,appKerNResVars=None): #initiate parser parser=AppKerOutputParser( name = 'xdmod.benchmark.io.mdtest' ) #set obligatory parameters and statistics #set common parameters and statistics (App:ExeBinSignature and RunEnv:Nodes) parser.setCommonMustHaveParsAndStats() #set app kernel custom sets parser.setMustHaveParameter('RunEnv:Nodes') parser.setMustHaveParameter('Arguments (single directory per process)') parser.setMustHaveParameter('Arguments (single directory)') parser.setMustHaveParameter('Arguments (single tree directory per process)') parser.setMustHaveParameter('Arguments (single tree directory)') parser.setMustHaveParameter('files/directories (single directory per process)') parser.setMustHaveParameter('files/directories (single directory)') parser.setMustHaveParameter('files/directories (single tree directory per process)') parser.setMustHaveParameter('files/directories (single tree directory)') parser.setMustHaveParameter('tasks (single directory per process)') parser.setMustHaveParameter('tasks (single directory)') parser.setMustHaveParameter('tasks (single tree directory per process)') parser.setMustHaveParameter('tasks (single tree directory)') parser.setMustHaveStatistic('Directory creation (single directory per process)') parser.setMustHaveStatistic('Directory creation (single directory)') parser.setMustHaveStatistic('Directory creation (single tree directory per process)') parser.setMustHaveStatistic('Directory creation (single tree directory)') parser.setMustHaveStatistic('Directory removal (single directory per process)') parser.setMustHaveStatistic('Directory removal (single directory)') parser.setMustHaveStatistic('Directory removal (single tree directory per process)') parser.setMustHaveStatistic('Directory removal (single tree directory)') parser.setMustHaveStatistic('Directory stat (single directory per process)') parser.setMustHaveStatistic('Directory stat (single directory)') parser.setMustHaveStatistic('Directory stat (single tree directory per process)') parser.setMustHaveStatistic('Directory stat (single tree directory)') parser.setMustHaveStatistic('File creation (single directory per process)') parser.setMustHaveStatistic('File creation (single directory)') parser.setMustHaveStatistic('File creation (single tree directory per process)') parser.setMustHaveStatistic('File creation (single tree directory)') parser.setMustHaveStatistic('File read (single directory per process)') parser.setMustHaveStatistic('File read (single directory)') parser.setMustHaveStatistic('File read (single tree directory per process)') parser.setMustHaveStatistic('File read (single tree directory)') parser.setMustHaveStatistic('File removal (single directory per process)') parser.setMustHaveStatistic('File removal (single directory)') parser.setMustHaveStatistic('File removal (single tree directory per process)') parser.setMustHaveStatistic('File removal (single tree directory)') parser.setMustHaveStatistic('File stat (single directory per process)') parser.setMustHaveStatistic('File stat (single directory)') parser.setMustHaveStatistic('File stat (single tree directory per process)') parser.setMustHaveStatistic('File stat (single tree directory)') parser.setMustHaveStatistic('Tree creation (single directory per process)') parser.setMustHaveStatistic('Tree creation (single directory)') parser.setMustHaveStatistic('Tree creation (single tree directory per process)') parser.setMustHaveStatistic('Tree creation (single tree directory)') parser.setMustHaveStatistic('Tree removal (single directory per process)') parser.setMustHaveStatistic('Tree removal (single directory)') parser.setMustHaveStatistic('Tree removal (single tree directory per process)') parser.setMustHaveStatistic('Tree removal (single tree directory)') parser.setMustHaveStatistic('Wall Clock Time') #parse common parameters and statistics parser.parseCommonParsAndStats(appstdout,stdout,stderr,geninfo) if hasattr(parser,'appKerWallClockTime'): parser.setStatistic("Wall Clock Time", total_seconds(parser.appKerWallClockTime), "Second") #Here can be custom output parsing #read output lines=[] if os.path.isfile(appstdout): fin=open(appstdout,"rt") lines=fin.readlines() fin.close() #process the output testname="" parser.successfulRun=False j=0 while j<len(lines): m=re.match(r'^#Testing (.+)',lines[j]) if m: testname=" ("+m.group(1).strip()+")" m=re.match(r'^SUMMARY:',lines[j]) if m: j=j+3 while j<len(lines): m=re.match(r'([A-Za-z0-9 ]+):\s+[0-9.]+\s+[0-9.]+\s+([0-9.]+)\s+([0-9.]+)',lines[j]) if m: parser.setStatistic(m.group(1).strip()+testname,m.group(2),"Operations/Second") else: break j=j+1 m=re.search(r'finished at',lines[j]) if m:parser.successfulRun=True m=re.match(r'^Command line used:.+mdtest\s+(.+)',lines[j]) if m: parser.setParameter("Arguments"+testname,m.group(1).strip()) m=re.search(r'([0-9]+) tasks, ([0-9]+) files/directories',lines[j]) if m: parser.setParameter("tasks"+testname,m.group(1).strip()) parser.setParameter("files/directories"+testname,m.group(2).strip()) j=j+1 #parser.setParameter("mega parameter",m.group(1)) # # m=re.search(r'My mega parameter\s+(\d+)',lines[j]) # if m:parser.setStatistic("mega statistics",m.group(1),"Seconds") # # m=re.search(r'Done',lines[j]) # if m:parser.successfulRun=True # # j+=1 if __name__ == "__main__": #output for testing purpose print "Parsing complete:",parser.parsingComplete(Verbose=True) print "Following statistics and parameter can be set as obligatory:" parser.printParsNStatsAsMustHave() print "\nResulting XML:" print parser.getXML() #return complete XML otherwise return None return parser.getXML()
def processAppKerOutput(appstdout=None,stdout=None,stderr=None,geninfo=None,appKerNResVars=None): #set App Kernel Description parser=AppKerOutputParser( name = 'xdmod.benchmark.graph.graph500', version = 1, description = "Graph500 Benchmark", url = 'http://www.Graph500.org', measurement_name = 'Graph500' ) #set obligatory parameters and statistics #set common parameters and statistics parser.setCommonMustHaveParsAndStats() #set app kernel custom sets parser.setMustHaveParameter('App:Version') parser.setMustHaveParameter('Edge Factor') parser.setMustHaveParameter('Input File') parser.setMustHaveParameter('Number of Roots to Check') parser.setMustHaveParameter('Number of Edges') parser.setMustHaveParameter('Number of Vertices') parser.setMustHaveParameter('Scale') parser.setMustHaveStatistic('Harmonic Mean TEPS') parser.setMustHaveStatistic('Harmonic Standard Deviation TEPS') parser.setMustHaveStatistic('Median TEPS') parser.setMustHaveStatistic('Wall Clock Time') #parse common parameters and statistics parser.parseCommonParsAndStats(appstdout,stdout,stderr,geninfo) if hasattr(parser,'appKerWallClockTime'): parser.setStatistic("Wall Clock Time", total_seconds(parser.appKerWallClockTime), "Second") elif hasattr(parser,'wallClockTime'): parser.setStatistic("Wall Clock Time", total_seconds(parser.wallClockTime), "Second") #read output lines=[] if os.path.isfile(appstdout): fin=open(appstdout,"rt") lines=fin.readlines() fin.close() #process the output parser.successfulRun=True Nerrors=0 j=0 while j<len(lines): m=re.match(r'^Graph500 version:\s+(.+)',lines[j]) if m:parser.setParameter("App:Version",m.group(1).strip()) m=re.match(r'ERROR:\s+(.+)',lines[j]) if m:Nerrors+=1 m=re.match(r'^Reading input from\s+(.+)',lines[j]) if m:parser.setParameter("Input File",m.group(1)) m=re.match(r'^SCALE:\s+(\d+)',lines[j]) if m:parser.setParameter("Scale",m.group(1)) m=re.match(r'^edgefactor:\s+(\d+)',lines[j]) if m:parser.setParameter("Edge Factor",m.group(1)) m=re.match(r'^NBFS:\s+(\d+)',lines[j]) if m:parser.setParameter("Number of Roots to Check",m.group(1)) m=re.match(r'^median_TEPS:\s+(\d[0-9.e\+]+)',lines[j]) if m:parser.setStatistic("Median TEPS", m.group(1), "Traversed Edges Per Second" ) m=re.match(r'^harmonic_mean_TEPS:\s+(\d[0-9.e\+]+)',lines[j]) if m: parser.successfulRun=True parser.setStatistic("Harmonic Mean TEPS", m.group(1), "Traversed Edges Per Second" ) m=re.match(r'^harmonic_stddev_TEPS:\s+(\d[0-9.e\+]+)',lines[j]) if m:parser.setStatistic("Harmonic Standard Deviation TEPS", m.group(1), "Traversed Edges Per Second" ) m=re.match(r'^median_validate:\s+([\d.]+)\s+s',lines[j]) if m:parser.setStatistic("Median Validation Time", m.group(1), "Second" ) m=re.match(r'^mean_validate:\s+([\d.]+)\s+s',lines[j]) if m:parser.setStatistic("Mean Validation Time", m.group(1), "Second" ) m=re.match(r'^stddev_validate:\s+([\d.]+)\s+s',lines[j]) if m:parser.setStatistic("Standard Deviation Validation Time", m.group(1), "Second" ) j+=1 if Nerrors>0: parser.successfulRun=False if parser.getParameter('Scale')!=None and parser.getParameter('Edge Factor')!=None : SCALE=int(parser.getParameter('Scale')) edgefactor=int(parser.getParameter('Edge Factor')) parser.setParameter("Number of Vertices",2**SCALE) parser.setParameter("Number of Edges",edgefactor*2**SCALE) if __name__ == "__main__": #output for testing purpose parser.parsingComplete(True) print "parsing complete:",parser.parsingComplete() parser.printParsNStatsAsMustHave() print parser.getXML() #return complete XML overwize return None return parser.getXML()
def processAppKerOutput(appstdout=None,stdout=None,stderr=None,geninfo=None,appKerNResVars=None): #set App Kernel Description parser=AppKerOutputParser( name = 'xdmod.benchmark.mpi.imb', version = 1, description = "Intel MPI Benchmarks", url = 'http://www.intel.com/software/imb', measurement_name = 'Intel MPI Benchmarks' ) #set obligatory parameters and statistics #set common parameters and statistics parser.setCommonMustHaveParsAndStats() #set app kernel custom sets parser.setMustHaveParameter('App:MPI Thread Environment') parser.setMustHaveParameter('App:MPI Version') parser.setMustHaveParameter('App:Max Message Size') parser.setMustHaveStatistic('Max Exchange Bandwidth') parser.setMustHaveStatistic("Max MPI-2 Bidirectional 'Get' Bandwidth (aggregate)") parser.setMustHaveStatistic("Max MPI-2 Bidirectional 'Get' Bandwidth (non-aggregate)") parser.setMustHaveStatistic("Max MPI-2 Bidirectional 'Put' Bandwidth (aggregate)") parser.setMustHaveStatistic("Max MPI-2 Bidirectional 'Put' Bandwidth (non-aggregate)") parser.setMustHaveStatistic("Max MPI-2 Unidirectional 'Get' Bandwidth (aggregate)") parser.setMustHaveStatistic("Max MPI-2 Unidirectional 'Get' Bandwidth (non-aggregate)") parser.setMustHaveStatistic("Max MPI-2 Unidirectional 'Put' Bandwidth (aggregate)") parser.setMustHaveStatistic("Max MPI-2 Unidirectional 'Put' Bandwidth (non-aggregate)") parser.setMustHaveStatistic('Max PingPing Bandwidth') parser.setMustHaveStatistic('Max PingPong Bandwidth') parser.setMustHaveStatistic('Max SendRecv Bandwidth') parser.setMustHaveStatistic('Min AllGather Latency') parser.setMustHaveStatistic('Min AllGatherV Latency') parser.setMustHaveStatistic('Min AllReduce Latency') parser.setMustHaveStatistic('Min AllToAll Latency') parser.setMustHaveStatistic('Min AllToAllV Latency') parser.setMustHaveStatistic('Min Barrier Latency') parser.setMustHaveStatistic('Min Broadcast Latency') parser.setMustHaveStatistic('Min Gather Latency') parser.setMustHaveStatistic('Min GatherV Latency') #parser.setMustHaveStatistic("Min MPI-2 'Accumulate' Latency (aggregate)") #parser.setMustHaveStatistic("Min MPI-2 'Accumulate' Latency (non-aggregate)") parser.setMustHaveStatistic('Min MPI-2 Window Creation Latency') parser.setMustHaveStatistic('Min Reduce Latency') parser.setMustHaveStatistic('Min ReduceScatter Latency') parser.setMustHaveStatistic('Min Scatter Latency') parser.setMustHaveStatistic('Min ScatterV Latency') parser.setMustHaveStatistic('Wall Clock Time') #parse common parameters and statistics parser.parseCommonParsAndStats(appstdout,stdout,stderr,geninfo) if hasattr(parser,'appKerWallClockTime'): parser.setStatistic("Wall Clock Time", total_seconds(parser.appKerWallClockTime), "Second") # Intel MPI benchmark suite contains three classes of benchmarks: # # Single-transfer, which needs only 2 processes # Parallel-transfer, which can use as many processes that are available # Collective, which can use as many processes that are available # The parameters mapping table Params = { "MPI Thread Environment" : [ "MPI Thread Environment", "", "" ], "MPI Version" : [ "MPI Version", "", "" ], "Maximum message length in bytes" : [ "Max Message Size", "MByte", "<val>/1024/1024" ] } # The result mapping table Metrics = { "PingPing" : [ "PingPing Bandwidth", "MByte per Second", "max" ], "PingPong" : [ "PingPong Bandwidth", "MByte per Second", "max" ], "Multi-PingPing" : [ "PingPing Bandwidth", "MByte per Second", "max" ], "Multi-PingPong" : [ "PingPong Bandwidth", "MByte per Second", "max" ], "Sendrecv" : [ "SendRecv Bandwidth", "MByte per Second", "max" ], "Exchange" : [ "Exchange Bandwidth", "MByte per Second", "max" ], "Allreduce" : [ "AllReduce Latency", "us", "min" ], "Reduce" : [ "Reduce Latency", "us", "min" ], "Reduce_scatter" : [ "ReduceScatter Latency", "us", "min" ], "Allgather" : [ "AllGather Latency", "us", "min" ], "Allgatherv" : [ "AllGatherV Latency", "us", "min" ], "Gather" : [ "Gather Latency", "us", "min" ], "Gatherv" : [ "GatherV Latency", "us", "min" ], "Scatter" : [ "Scatter Latency", "us", "min" ], "Scatterv" : [ "ScatterV Latency", "us", "min" ], "Alltoall" : [ "AllToAll Latency", "us", "min" ], "Alltoallv" : [ "AllToAllV Latency", "us", "min" ], "Bcast" : [ "Broadcast Latency", "us", "min" ], "Barrier" : [ "Barrier Latency", "us", "min" ], "Window" : [ "MPI-2 Window Creation Latency", "us", "min" ], "Multi-Unidir_Get" : [ "MPI-2 Unidirectional 'Get' Bandwidth", "MByte per Second", "max" ], "Multi-Unidir_Put" : [ "MPI-2 Unidirectional 'Put' Bandwidth", "MByte per Second", "max" ], "Multi-Bidir_Get" : [ "MPI-2 Bidirectional 'Get' Bandwidth", "MByte per Second", "max" ], "Multi-Bidir_Put" : [ "MPI-2 Bidirectional 'Put' Bandwidth", "MByte per Second", "max" ], "Unidir_Get" : [ "MPI-2 Unidirectional 'Get' Bandwidth", "MByte per Second", "max" ], "Unidir_Put" : [ "MPI-2 Unidirectional 'Put' Bandwidth", "MByte per Second", "max" ], "Bidir_Get" : [ "MPI-2 Bidirectional 'Get' Bandwidth", "MByte per Second", "max" ], "Bidir_Put" : [ "MPI-2 Bidirectional 'Put' Bandwidth", "MByte per Second", "max" ], "Accumulate" : [ "MPI-2 'Accumulate' Latency", "us", "min" ] } #read output lines=[] if os.path.isfile(appstdout): fin=open(appstdout,"rt") lines=fin.readlines() fin.close() #process the output parser.successfulRun=False aggregateMode=None metric=None j=-1 while j<len(lines)-1: j+=1 m=re.search(r'All processes entering MPI_Finalize',lines[j]) if m:parser.successfulRun=True m=re.match(r'^# Benchmarking\s+(\S+)',lines[j]) if m: if m.group(1) in Metrics: metric=m.group(1) continue m=re.match(r'^#\s+MODE:\s+(\S+)',lines[j]) if m and metric and aggregateMode==None: aggregateMode=m.group(1) continue m=re.match(r'^# (.+): (.+)',lines[j]) if m:# benchmark parameters param=m.group(1).strip() if param in Params: val=m.group(2).strip() v=Params[param][2] if v.find('<val>')>=0: val=float(val) val=eval(v.replace('<val>','val')) parser.setParameter( "App:" + Params[param][0], str(val) + " ", Params[param][1] ) continue m=re.match(r'^\s+([1-9]\d*)\s+\d+',lines[j]) if m and metric:# this effectively skips the first line of result, which has #bytes = 0 results=[] while m: numbers=lines[j].split() results.append(float(numbers[-1]))# tokenize the line, and extract the last column j+=1 if j<len(lines): m=re.match(r'^\s+([1-9]\d*)\s+\d+',lines[j]) else: break metricName=Metrics[metric][0] if aggregateMode: metricName+=" ("+aggregateMode.lower()+ ")" if len(results)>0: if Metrics[metric][1]=='us': statname=Metrics[metric][2][0].upper()+Metrics[metric][2][1:]+" "+metricName statval=eval(Metrics[metric][2]+"(results)") parser.setStatistic(statname,statval*1e-6, "Second" ) else: statname=Metrics[metric][2][0].upper()+Metrics[metric][2][1:]+" "+metricName statval=eval(Metrics[metric][2]+"(results)") parser.setStatistic(statname,statval, Metrics[metric][1]) aggregateMode=None metric=None if parser.getParameter("App:MPI Thread Environment")==None: parser.setParameter("App:MPI Thread Environment","") if __name__ == "__main__": #output for testing purpose print "parsing complete:",parser.parsingComplete(Verbose=True) parser.printParsNStatsAsMustHave() print parser.getXML() #Print out missing parameters for debug purpose parser.parsingComplete(Verbose=True) #return complete XML overwize return None return parser.getXML()
def processAppKerOutput(appstdout=None,stdout=None,stderr=None,geninfo=None,appKerNResVars=None): #set App Kernel Description parser=AppKerOutputParser( name = 'xdmod.benchmark.io.ior', version = 1, description = "IOR (Interleaved-Or-Random) Benchmark", url = 'http://freshmeat.net/projects/ior', measurement_name = 'IOR' ) #set obligatory parameters and statistics #set common parameters and statistics parser.setCommonMustHaveParsAndStats() #set app kernel custom sets parser.setMustHaveParameter('App:Version') if appKerNResVars==None or (appKerNResVars!=None and 'testHDF5' in appKerNResVars and appKerNResVars['testHDF5']==True): parser.setMustHaveParameter('HDF Version') parser.setMustHaveParameter('HDF5 Collective N-to-1 Test File System') parser.setMustHaveParameter('HDF5 Independent N-to-1 Test File System') parser.setMustHaveParameter('HDF5 N-to-N Test File System') if appKerNResVars==None or (appKerNResVars!=None and 'testMPIIO' in appKerNResVars and appKerNResVars['testMPIIO']==True): parser.setMustHaveParameter('MPIIO Collective N-to-1 Test File System') parser.setMustHaveParameter('MPIIO Independent N-to-1 Test File System') parser.setMustHaveParameter('MPIIO N-to-N Test File System') if appKerNResVars==None or (appKerNResVars!=None and 'testPOSIX' in appKerNResVars and appKerNResVars['testPOSIX']==True): parser.setMustHaveParameter('POSIX N-to-1 Test File System') parser.setMustHaveParameter('POSIX N-to-N Test File System') if appKerNResVars==None or (appKerNResVars!=None and 'testNetCDF' in appKerNResVars and appKerNResVars['testNetCDF']==True): parser.setMustHaveParameter('Parallel NetCDF Collective N-to-1 Test File System') parser.setMustHaveParameter('Parallel NetCDF Independent N-to-1 Test File System') parser.setMustHaveParameter('Parallel NetCDF Version') parser.setMustHaveParameter('Per-Process Data Size') parser.setMustHaveParameter('Per-Process I/O Block Size') parser.setMustHaveParameter('RunEnv:Nodes') parser.setMustHaveParameter('Transfer Size Per I/O') if appKerNResVars==None or (appKerNResVars!=None and 'testHDF5' in appKerNResVars and appKerNResVars['testHDF5']==True): parser.setMustHaveStatistic('HDF5 Collective N-to-1 Read Aggregate Throughput') parser.setMustHaveStatistic('HDF5 Collective N-to-1 Write Aggregate Throughput') parser.setMustHaveStatistic('HDF5 Independent N-to-1 Read Aggregate Throughput') parser.setMustHaveStatistic('HDF5 Independent N-to-1 Write Aggregate Throughput') parser.setMustHaveStatistic('HDF5 N-to-N Read Aggregate Throughput') parser.setMustHaveStatistic('HDF5 N-to-N Write Aggregate Throughput') if appKerNResVars==None or (appKerNResVars!=None and 'testMPIIO' in appKerNResVars and appKerNResVars['testMPIIO']==True): parser.setMustHaveStatistic('MPIIO Collective N-to-1 Read Aggregate Throughput') parser.setMustHaveStatistic('MPIIO Collective N-to-1 Write Aggregate Throughput') parser.setMustHaveStatistic('MPIIO Independent N-to-1 Read Aggregate Throughput') parser.setMustHaveStatistic('MPIIO Independent N-to-1 Write Aggregate Throughput') parser.setMustHaveStatistic('MPIIO N-to-N Read Aggregate Throughput') parser.setMustHaveStatistic('MPIIO N-to-N Write Aggregate Throughput') if appKerNResVars==None or (appKerNResVars!=None and 'testPOSIX' in appKerNResVars and appKerNResVars['testPOSIX']==True): parser.setMustHaveStatistic('POSIX N-to-1 Read Aggregate Throughput') parser.setMustHaveStatistic('POSIX N-to-1 Write Aggregate Throughput') parser.setMustHaveStatistic('POSIX N-to-N Read Aggregate Throughput') parser.setMustHaveStatistic('POSIX N-to-N Write Aggregate Throughput') if appKerNResVars==None or (appKerNResVars!=None and 'testNetCDF' in appKerNResVars and appKerNResVars['testNetCDF']==True): parser.setMustHaveStatistic('Parallel NetCDF Collective N-to-1 Read Aggregate Throughput') parser.setMustHaveStatistic('Parallel NetCDF Collective N-to-1 Write Aggregate Throughput') parser.setMustHaveStatistic('Parallel NetCDF Independent N-to-1 Read Aggregate Throughput') parser.setMustHaveStatistic('Parallel NetCDF Independent N-to-1 Write Aggregate Throughput') parser.setMustHaveStatistic('Number of Tests Passed') parser.setMustHaveStatistic('Number of Tests Started') parser.setMustHaveStatistic('Wall Clock Time') parser.completeOnPartialMustHaveStatistics=True #parse common parameters and statistics parser.parseCommonParsAndStats(appstdout,stdout,stderr,geninfo) if hasattr(parser,'appKerWallClockTime'): parser.setStatistic("Wall Clock Time", total_seconds(parser.appKerWallClockTime), "Second") #read output lines=[] if os.path.isfile(appstdout): fin=open(appstdout,"rt") lines=fin.readlines() fin.close() #process the output #find which version of IOR was used ior_output_version=None j=0 while j<len(lines)-1: #IOR RELEASE: IOR-2.10.3 m=re.match(r'^#\s+IOR RELEASE:\s(.+)',lines[j]) if m:ior_output_version=2 m=re.match(r'^IOR-[3-9]\.[0-9]+\.[0-9]: MPI Coordinated Test of Parallel I/O',lines[j]) if m:ior_output_version=3 j+=1 if ior_output_version==None: print "ERROR: unknown version of IOR output!!!" testsPassed=0 totalNumberOfTests=0 parser.successfulRun=False if ior_output_version==2: METRICS={} j=-1 while j<len(lines)-1: m=re.match(r'^# (.+?):(.+)',lines[j]) if m: METRICS[m.group(1).strip()]=m.group(2).strip() if m.group(1).strip()=="segmentCount": METRICS[m.group(1).strip()]=m.group(2).strip().split()[0] m=re.match(r'^# IOR command line used:',lines[j]) if m:totalNumberOfTests+=1 if "IOR RELEASE" in METRICS: parser.setParameter( "App:Version", METRICS["IOR RELEASE"]) if "Compile-time HDF Version" in METRICS: parser.setParameter( "HDF Version", METRICS["Compile-time HDF Version"]) if "Compile-time PNETCDF Version" in METRICS: parser.setParameter( "Parallel NetCDF Version", METRICS["Compile-time PNETCDF Version"]) if "blockSize" in METRICS and "segmentCount" in METRICS: #print METRICS["blockSize"],METRICS["segmentCount"] parser.setParameter( "Per-Process Data Size", ( float(METRICS["blockSize"]) / 1024.0 / 1024.0 ) * int(METRICS["segmentCount"]), "MByte" ) parser.setParameter( "Per-Process I/O Block Size", ( float(METRICS["blockSize"]) / 1024.0 / 1024.0 ), "MByte" ) if "reorderTasks" in METRICS: if int(METRICS["reorderTasks"])!=0: parser.setParameter( "Reorder Tasks for Read-back Tests", "Yes" ) if "repetitions" in METRICS: if 1 < int(METRICS["repetitions"]): parser.setParameter( "Test Repetitions", METRICS["repetitions"]) if "transferSize" in METRICS: parser.setParameter( "Transfer Size Per I/O", ( float(METRICS["transferSize"]) / 1024.0 / 1024.0 ), "MByte" ) if "mpiio hints passed to MPI_File_open" in METRICS: parser.setParameter( "MPI-IO Hints", METRICS["mpiio hints passed to MPI_File_open"]) if "Write bandwidth" in METRICS and "Read bandwidth" in METRICS and \ "api" in METRICS and "filePerProc" in METRICS and "collective" in METRICS and \ "randomOffset" in METRICS and "Run finished" in METRICS: testsPassed+=1 label = METRICS["api"]; m=re.search(r'NCMPI',label,re.I) if m:label = "Parallel NetCDF" m=re.search(r'POSIX',label,re.I) if m and "useO_DIRECT" in METRICS: if int(METRICS["useO_DIRECT"]) != 0: label += ' (O_DIRECT)' if m==None: # POSIX doesn't have collective I/O if int(METRICS["collective"]) == 0: label += ' Independent' else: label += ' Collective' if int(METRICS["randomOffset"]) == 0: label += '' else: label += ' Random' if int(METRICS["filePerProc"]) == 0: label += ' N-to-1' else: label += ' N-to-N' # for N-to-N (each process writes to its own file), it must be # Independent, so we can remove the redundant words such as # "Independent" and "Collective" m=re.search(r' (Independent|Collective).+N-to-N',label,re.I) if m: label=label.replace(" Independent","").replace(" Collective","") # now we have the label, get test-specific parameters m=re.search(r'MPIIO',label,re.I) if m: if "useFileView" in METRICS: if 0 != int(METRICS["useFileView"]): parser.setParameter( label + " Uses MPI_File_set_view", "Yes" ) if "useSharedFilePointer" in METRICS: if 0 != int(METRICS["useSharedFilePointer"]): parser.setParameter( label + " Uses Shared File Pointer", "Yes" ) m=re.search(r'POSIX',label,re.I) if m: if "fsyncPerWrite" in METRICS: if 0 != int(METRICS["fsyncPerWrite"]): parser.setParameter( label + " Uses fsync per Write", "Yes" ) m=re.search(r'mean=(\S+)',METRICS["Write bandwidth"]) if m: metric = m.group(1).strip() writeLabel = label # writes are always sequential writeLabel = writeLabel.replace(" Random","") parser.setStatistic( writeLabel+" Write Aggregate Throughput", "%.2f"%(float(metric) / 1024.0 / 1024.0, ), "MByte per Second" ); #parser.setParameter( "${writeLabel} Test File", METRICS["testFileName"} ) if ( exists METRICS["testFileName"} ); if "fileSystem" in METRICS: parser.setParameter( writeLabel+" Test File System", METRICS["fileSystem"]) parser.successfulRun=True if "File Open Time (Write)" in METRICS: m2=re.search(r'mean=(\S+)',METRICS["File Open Time (Write)"]) if m2: parser.setStatistic( writeLabel+" File Open Time (Write)", m2.group(1).strip(), "Second" ); if "File Close Time (Write)" in METRICS: m2=re.search(r'mean=(\S+)',METRICS["File Close Time (Write)"]) if m2: parser.setStatistic( writeLabel+" File Close Time (Write)", m2.group(1).strip(), "Second" ); m=re.search(r'mean=(\S+)',METRICS["Read bandwidth"]) if m: parser.setStatistic( label+" Read Aggregate Throughput", "%.2f"%(float(m.group(1).strip()) / 1024.0 / 1024.0, ), "MByte per Second" ); parser.successfulRun=True if "File Open Time (Read)" in METRICS: m2=re.search(r'mean=(\S+)',METRICS["File Open Time (Read)"]) if m2: parser.setStatistic( writeLabel+" File Open Time (Read)", m2.group(1).strip(), "Second" ); if "File Close Time (Read)" in METRICS: m2=re.search(r'mean=(\S+)',METRICS["File Close Time (Read)"]) if m2: parser.setStatistic( writeLabel+" File Close Time (Read)", m2.group(1).strip(), "Second" ); METRICS={} j+=1 if ior_output_version==3: i=0 input_summary={} rsl_w={} rsl_r={} filesystem=None while i<len(lines)-1: m=re.match(r'^IOR-([3-9]\.[0-9]+\.[0-9]+): MPI Coordinated Test of Parallel I/O',lines[i]) if m:parser.setParameter( "App:Version", m.group(1).strip()) m=re.match(r'^File System To Test:(.+)',lines[i]) if m:filesystem=m.group(1).strip() m=re.match(r'^# Starting Test:',lines[i]) if m:totalNumberOfTests+=1 m0=re.match(r'^Summary:$',lines[i]) if m0: #input summary section input_summary={} i+=1 while i<len(lines): m1=re.match(r'^\t([^=\n\r\f\v]+)=(.+)',lines[i]) if m1: input_summary[m1.group(1).strip()]=m1.group(2).strip() else: break i+=1 #process input_summary input_summary['filesystem']=filesystem input_summary['API']=input_summary['api'] if input_summary['api'].count("MPIIO")>0: input_summary['API']="MPIIO" input_summary['API_Version']=input_summary['api'].replace("MPIIO","").strip() parser.setParameter( "MPIIO Version", input_summary['API_Version']) if input_summary['api'].count("HDF5")>0: input_summary['API']="HDF5" input_summary['API_Version']=input_summary['api'].replace("HDF5-","").replace("HDF5","").strip() parser.setParameter( "HDF Version", input_summary['API_Version']) if input_summary['api'].count("NCMPI")>0: input_summary['API']="Parallel NetCDF" input_summary['API_Version']=input_summary['api'].replace("NCMPI","").strip() parser.setParameter( "Parallel NetCDF Version", input_summary['API_Version']) input_summary['fileAccessPattern']="" input_summary['collectiveOrIndependent']="" if input_summary['access'].count('single-shared-file')>0: input_summary['fileAccessPattern']="N-to-1" if input_summary['access'].count('file-per-process')>0: input_summary['fileAccessPattern']="N-to-N" if input_summary['access'].count('independent')>0: input_summary['collectiveOrIndependent']="Independent" if input_summary['access'].count('collective')>0: input_summary['collectiveOrIndependent']="Collective" if input_summary['fileAccessPattern']=="N-to-N" and input_summary['collectiveOrIndependent']=="Independent": input_summary['collectiveOrIndependent']="" if input_summary['collectiveOrIndependent']!="": input_summary['method']=" ".join((input_summary['API'], input_summary['collectiveOrIndependent'], input_summary['fileAccessPattern'])) else: input_summary['method']=" ".join((input_summary['API'], input_summary['fileAccessPattern'])) if input_summary['filesystem']!=None: parser.setParameter(input_summary['method']+' Test File System',input_summary['filesystem']) if "pattern" in input_summary: m1=re.match(r'^segmented \(([0-9]+) segment',input_summary["pattern"]) if m1:input_summary["segmentCount"]=int(m1.group(1).strip()) if "blocksize" in input_summary and "segmentCount" in input_summary: val,unit=input_summary["blocksize"].split() blockSize=getMiB(float(val),unit) segmentCount=input_summary["segmentCount"] parser.setParameter( "Per-Process Data Size", blockSize*segmentCount, "MByte" ) parser.setParameter( "Per-Process I/O Block Size", blockSize, "MByte" ) if "xfersize" in input_summary: val,unit=input_summary["xfersize"].split() transferSize=getMiB(float(val),unit) parser.setParameter( "Transfer Size Per I/O", transferSize, "MByte" ) m0=re.match(r'^access\s+bw\(MiB/s\)\s+block\(KiB\)\s+xfer\(KiB\)\s+open\(s\)\s+wr/rd\(s\)\s+close\(s\)\s+total\(s\)\s+iter',lines[i]) if m0: i+=1 while i<len(lines): m1=re.match(r'^write\s+([0-9\.]+)+\s+([0-9\.]+)+\s+([0-9\.]+)+\s+([0-9\.]+)+\s+([0-9\.]+)+\s+([0-9\.]+)+\s+([0-9\.]+)+\s+([0-9\.]+)+',lines[i]) m2=re.match(r'^read\s+([0-9\.]+)+\s+([0-9\.]+)+\s+([0-9\.]+)+\s+([0-9\.]+)+\s+([0-9\.]+)+\s+([0-9\.]+)+\s+([0-9\.]+)+\s+([0-9\.]+)+',lines[i]) if m1 or m2: if m1: access="Write" bw,block,xfer,open_s,wrrd_s,close_s,total_s,iter=m1.groups() else: access="Read" bw,block,xfer,open_s,wrrd_s,close_s,total_s,iter=m2.groups() testsPassed+=1 parser.successfulRun=True parser.setStatistic( input_summary['method']+" %s Aggregate Throughput"%access, bw, "MByte per Second" ); parser.setStatistic( input_summary['method']+" File Open Time (%s)"%access, open_s, "Second" ); parser.setStatistic( input_summary['method']+" File Close Time (%s)"%access, close_s, "Second" ); m1=re.match(r'^Summary of all tests:',lines[i]) if m1:break i+=1 #reset variables input_summary={} rsl_w={} rsl_r={} #filesystem=None i+=1 parser.setStatistic('Number of Tests Passed',testsPassed ) parser.setStatistic('Number of Tests Started',totalNumberOfTests ) if __name__ == "__main__": #output for testing purpose print "parsing complete:",parser.parsingComplete(Verbose=True) parser.printParsNStatsAsMustHave() print parser.getXML() #return complete XML overwize return None return parser.getXML()
def processAppKerOutput(appstdout=None,stdout=None,stderr=None,geninfo=None,appKerNResVars=None): #set App Kernel Description parser=AppKerOutputParser( name = 'xdmod.benchmark.hpcc', version = 1, description = "HPC Challenge Benchmarks", url = 'http://icl.cs.utk.edu/hpcc/', measurement_name = 'xdmod.benchmark.hpcc' ) #set obligatory parameters and statistics #set common parameters and statistics parser.setCommonMustHaveParsAndStats() #set app kernel custom sets parser.setMustHaveParameter('App:Version') parser.setMustHaveParameter('Input:DGEMM Problem Size') parser.setMustHaveParameter('Input:High Performance LINPACK Grid Cols') parser.setMustHaveParameter('Input:High Performance LINPACK Grid Rows') parser.setMustHaveParameter('Input:High Performance LINPACK Problem Size') parser.setMustHaveParameter('Input:MPI Ranks') parser.setMustHaveParameter('Input:MPIRandom Problem Size') parser.setMustHaveParameter('Input:OpenMP Threads') parser.setMustHaveParameter('Input:PTRANS Problem Size') parser.setMustHaveParameter('Input:STREAM Array Size') parser.setMustHaveParameter('RunEnv:CPU Speed') parser.setMustHaveParameter('RunEnv:Nodes') parser.setMustHaveStatistic('Average Double-Precision General Matrix Multiplication (DGEMM) Floating-Point Performance') parser.setMustHaveStatistic("Average STREAM 'Add' Memory Bandwidth") parser.setMustHaveStatistic("Average STREAM 'Copy' Memory Bandwidth") parser.setMustHaveStatistic("Average STREAM 'Scale' Memory Bandwidth") parser.setMustHaveStatistic("Average STREAM 'Triad' Memory Bandwidth") parser.setMustHaveStatistic('Fast Fourier Transform (FFTW) Floating-Point Performance') parser.setMustHaveStatistic('High Performance LINPACK Efficiency') parser.setMustHaveStatistic('High Performance LINPACK Floating-Point Performance') parser.setMustHaveStatistic('High Performance LINPACK Run Time') parser.setMustHaveStatistic('MPI Random Access') parser.setMustHaveStatistic('Parallel Matrix Transpose (PTRANS)') parser.setMustHaveStatistic('Wall Clock Time') #parse common parameters and statistics parser.parseCommonParsAndStats(appstdout,stdout,stderr,geninfo) if hasattr(parser,'appKerWallClockTime'): parser.setStatistic("Wall Clock Time", total_seconds(parser.appKerWallClockTime), "Second") # Intel MPI benchmark suite contains three classes of benchmarks: # # Single-transfer, which needs only 2 processes # Parallel-transfer, which can use as many processes that are available # Collective, which can use as many processes that are available # The parameters mapping table Params = { "CommWorldProcs" : [ "MPI Ranks", "", "" ], "HPL_N" : [ "High Performance LINPACK Problem Size", "", "" ], "HPL_nprow" : [ "High Performance LINPACK Grid Rows", "", "" ], "HPL_npcol" : [ "High Performance LINPACK Grid Cols", "", "" ], "PTRANS_n" : [ "PTRANS Problem Size", "", "" ], "MPIRandomAccess_N" : [ "MPIRandom Problem Size", "MWord", "val/1024/1024" ], "STREAM_VectorSize" : [ "STREAM Array Size", "MWord", "" ], "DGEMM_N" : [ "DGEMM Problem Size", "", "" ], "omp_get_num_threads" : [ "OpenMP Threads", "", "" ], } # The result mapping table Metrics = { "HPL_Tflops" : [ "High Performance LINPACK Floating-Point Performance", "MFLOP per Second", "val*1e6" ], "HPL_time" : [ "High Performance LINPACK Run Time", "Second", "" ], "PTRANS_GBs" : [ "Parallel Matrix Transpose (PTRANS)", "MByte per Second", "val*1024" ], "MPIRandomAccess_GUPs" : [ "MPI Random Access", "MUpdate per Second", "val*1000" ], "MPIFFT_Gflops" : [ "Fast Fourier Transform (FFTW) Floating-Point Performance", "MFLOP per Second", "val*1000" ], "StarDGEMM_Gflops" : [ "Average Double-Precision General Matrix Multiplication (DGEMM) Floating-Point Performance", "MFLOP per Second", "val*1000" ], "StarSTREAM_Copy" : [ "Average STREAM 'Copy' Memory Bandwidth", "MByte per Second", "val*1024" ], "StarSTREAM_Scale" : [ "Average STREAM 'Scale' Memory Bandwidth", "MByte per Second", "val*1024" ], "StarSTREAM_Add" : [ "Average STREAM 'Add' Memory Bandwidth", "MByte per Second", "val*1024" ], "StarSTREAM_Triad" : [ "Average STREAM 'Triad' Memory Bandwidth", "MByte per Second", "val*1024" ] } #read output lines=[] if os.path.isfile(appstdout): fin=open(appstdout,"rt") lines=fin.readlines() fin.close() #process the output parser.successfulRun=False resultBegin=None hpl_tflops=None numCores=None values={} j=-1 while j<len(lines)-1: j+=1 m=re.search(r'End of HPC Challenge tests',lines[j]) if m:parser.successfulRun=True m=re.match(r'^Begin of Summary section',lines[j]) if m: resultBegin=1 continue m=re.match(r'^(\w+)=([\w\.]+)',lines[j]) if m and resultBegin: metricName=m.group(1).strip() values[metricName] = m.group(2).strip() if metricName=="HPL_Tflops":hpl_tflops = float(values[metricName]) if metricName=="CommWorldProcs":numCores = int(values[metricName]) m=re.match(r'^Running on ([0-9\.]+) processors',lines[j]) if m: numCores = int(m.group(1).strip()) if hpl_tflops==None or numCores==None: parser.successfulRun=False hpccVersion=None MHz=None theoreticalGFlops=None if "VersionMajor" in values and "VersionMinor" in values and "VersionMicro" in values: hpccVersion=values["VersionMajor"]+"."+values["VersionMinor"]+"."+values["VersionMicro"] if "VersionRelease" in values: hpccVersion+=values["VersionRelease"] if hpccVersion: parser.setParameter("App:Version", hpccVersion) for k,v in Params.iteritems(): if not k in values: continue val=values[k] if v[2].find('val')>=0: val=float(val) val=eval(v[2]) if v[1]=="": v[1]=None parser.setParameter( "Input:" + v[0],val, v[1]) for k,v in Metrics.iteritems(): if not k in values: continue val=values[k] if v[2].find('val')>=0: val=float(val) val=eval(v[2]) if v[1]=="": v[1]=None parser.setStatistic(v[0],val, v[1]) if "cpuSpeed" in parser.geninfo: ll=parser.geninfo["cpuSpeed"].splitlines() cpuSpeedMax=0.0 for l in ll: m=re.search(r'([\d\.]+)$',l) if m: v=float(m.group(1).strip()) if v>cpuSpeedMax:cpuSpeedMax=v if cpuSpeedMax>0.0: parser.setParameter("RunEnv:CPU Speed",cpuSpeedMax, "MHz" ) MHz=cpuSpeedMax #print appKerNResVars #print MHz #print numCores if appKerNResVars !=None: if 'resource' in appKerNResVars and 'app' in appKerNResVars: if 'theoreticalGFlopsPerCore' in appKerNResVars['app']: resname=appKerNResVars['resource']['name'] if resname in appKerNResVars['app']['theoreticalGFlopsPerCore']: theoreticalGFlops=appKerNResVars['app']['theoreticalGFlopsPerCore'][resname] * numCores print "theoreticalGFlops",resname,theoreticalGFlops if theoreticalGFlops==None and MHz!=None: # Most modern x86 & POWER processors are superscale and can issue 4 instructions per cycle theoreticalGFlops = MHz * numCores * 4 / 1000.0 if theoreticalGFlops and hpl_tflops: # Convert both to GFlops and derive the Efficiency percent = ( 1000.0 * hpl_tflops / theoreticalGFlops ) * 100.0; parser.setStatistic("High Performance LINPACK Efficiency", "%.3f"%percent, "Percent") if __name__ == "__main__": #output for testing purpose print "parsing complete:",parser.parsingComplete(Verbose=True) parser.printParsNStatsAsMustHave() print parser.getXML() #return complete XML overwize return None return parser.getXML()
def processAppKerOutput(appstdout=None,stdout=None,stderr=None,geninfo=None,appKerNResVars=None): #set App Kernel Description parser=AppKerOutputParser( name = 'xdmod.app.chem.gamess', version = 1, description = "Gamess: General Atomic and Molecular Electronic Structure System", url = 'http://www.msg.ameslab.gov', measurement_name = 'Gamess' ) #set obligatory parameters and statistics #set common parameters and statistics parser.setCommonMustHaveParsAndStats() #set app kernel custom sets parser.setMustHaveParameter('App:Version') parser.setMustHaveStatistic('Wall Clock Time') parser.setMustHaveStatistic('User Time') parser.setMustHaveStatistic('Time Spent in MP2 Energy Calculation') parser.setMustHaveStatistic('Time Spent in Restricted Hartree-Fock Calculation') #parse common parameters and statistics parser.parseCommonParsAndStats(appstdout,stdout,stderr,geninfo) #read output lines=[] if os.path.isfile(appstdout): fin=open(appstdout,"rt") lines=fin.readlines() fin.close() #process the output startTime=None endTime=None MP2EnergyCalculationTime=0.0 RHFCalculationTime=0.0 efficiency=None j=0 while j<len(lines): m=re.search(r'GAMESS VERSION = ([^*]+)',lines[j]) if m:parser.setParameter("App:Version",m.group(1).strip()) m=re.search(r'PARALLEL VERSION RUNNING ON\s*([\d\.]+) PROCESSORS IN\s*([\d\.]+) NODE',lines[j]) if m: parser.setParameter("App:NCores",m.group(1).strip()) parser.setParameter("App:NNodes",m.group(2).strip()) m=re.search(r'EXECUTION OF GAMESS BEGUN (.+)',lines[j]) if m:startTime=parser.getDateTimeLocal(m.group(1).strip()) m=re.search(r'EXECUTION OF GAMESS TERMINATED NORMALLY (.+)',lines[j]) if m:endTime=parser.getDateTimeLocal(m.group(1).strip()) if re.search(r'DONE WITH MP2 ENERGY',lines[j]): j+=1 m=re.search(r'STEP CPU TIME=\s*([\d\.]+)',lines[j]) if m:MP2EnergyCalculationTime+=float(m.group(1).strip()) if re.search(r'END OF RHF CALCULATION',lines[j]): j+=1 m=re.search(r'STEP CPU TIME=\s*([\d\.]+)',lines[j]) if m:RHFCalculationTime+=float(m.group(1).strip()) m=re.search(r'TOTAL WALL CLOCK TIME.+CPU UTILIZATION IS\s+([\d\.]+)',lines[j]) if m:efficiency=float(m.group(1).strip()) j+=1 if startTime and endTime: wallTime=total_seconds(endTime-startTime) if wallTime >= 0.0: parser.setStatistic('Wall Clock Time', str(wallTime), "Second" ) if efficiency: parser.setStatistic( "User Time", str((0.01 * efficiency * wallTime)), "Second" ) parser.setStatistic("Time Spent in MP2 Energy Calculation", str(MP2EnergyCalculationTime), "Second" ) parser.setStatistic("Time Spent in Restricted Hartree-Fock Calculation", str(RHFCalculationTime),"Second" ) if "attemptsToLaunch" in parser.geninfo: parser.setStatistic("Attempts to Launch", parser.geninfo['attemptsToLaunch'] ) else: parser.setStatistic("Attempts to Launch", 1 ) if __name__ == "__main__": #output for testing purpose print "parsing complete:",parser.parsingComplete() parser.printParsNStatsAsMustHave() print parser.getXML() #return complete XML overwize return None return parser.getXML()
def processAppKerOutput(appstdout=None, stdout=None, stderr=None, geninfo=None, appKerNResVars=None): # set App Kernel Description parser = AppKerOutputParser( name="xdmod.bundle", version=1, description="bundled tasks", url="https://xdmod.ccr.buffalo.edu", measurement_name="BUNDLE", ) parser.setMustHaveParameter("RunEnv:Nodes") parser.setMustHaveStatistic("Wall Clock Time") parser.setMustHaveStatistic("Success Rate") parser.setMustHaveStatistic("Successful Subtasks") parser.setMustHaveStatistic("Total Number of Subtasks") # set obligatory parameters and statistics # set common parameters and statistics parser.parseCommonParsAndStats(appstdout, stdout, stderr, geninfo) if hasattr(parser, "wallClockTime"): parser.setStatistic("Wall Clock Time", total_seconds(parser.wallClockTime), "Second") # check the status of subtasks # appKerNResVars['taskId']=self.task_id # appKerNResVars['subTasksId']=self.subTasksId successRate = 0.0 totalSubtasks = 0 successfulSubtasks = 0 try: db = MySQLdb.connect( host=akrr.export_db_host, user=akrr.export_db_user, passwd=akrr.export_db_passwd, db=akrr.export_db_name ) cur = db.cursor() for subTaskId in appKerNResVars["subTasksId"]: cur.execute( """SELECT instance_id,status FROM akrr_xdmod_instanceinfo WHERE instance_id=%s ;""", (subTaskId,), ) raw = cur.fetchall() instance_id, status = raw[0] successRate += status successfulSubtasks += status successRate /= len(appKerNResVars["subTasksId"]) totalSubtasks = len(appKerNResVars["subTasksId"]) cur.close() del db except: print traceback.format_exc() parser.setStatistic("Success Rate", successRate) parser.setStatistic("Successful Subtasks", successfulSubtasks) parser.setStatistic("Total Number of Subtasks", totalSubtasks) # if successfulSubtasks==totalSubtasks: if __name__ == "__main__": # output for testing purpose print "parsing complete:", parser.parsingComplete(Verbose=True) parser.printParsNStatsAsMustHave() print parser.getXML() # return complete XML overwize return None return parser.getXML()