def doMain(self, name='h1f', file='fillrandom', dir='/home/fine/panda/pandamon/static/root', width=600, height=400, log=False, options='H', block='pyrootfile'): connectionTime = Stopwatch.Stopwatch() rootout = os.tmpnam() print "-------------- rootout=", rootout # print ' RedirectOutput=' , gSystem.RedirectOutput(rootout,'a') if dir == None or dir == '': dir = '/home/fine/panda/pandamon/static/root' if file == None or file == '': file = 'fillrandom.root' timer = Stopwatch.Stopwatch() # # Open ROOT file # if options == None: options = '' print utils.lineInfo(), block if block == 'pyrootfile': if file[-5:] != ".root": file += '.root' call([ '/home/fine/panda/pandamon/pmModules/processor/%s.py' % block, rootout, name, file, dir, options ]) elif block == 'pandsusers': dir = '/home/fine/panda/pandamon/static/data' if file == 'fillrandom': file = 'useact180days' call([ '/home/fine/panda/pandamon/pmModules/processor/%s.py' % block, rootout, file, dir, options ]) r4panda = open(rootout) txt = r4panda.readlines()[0][:-1] r4panda.close() os.remove(rootout) main = {} main['header'] = ["Params", "values"] main['info'] = eval(txt) main['width'] = width main['height'] = height if log == True or isinstance(log, str) and (log.lower() == "true" or log.lower() == "yes"): main['log'] = True main['time'] = {} self.publish(main)
def doJson(self, script=None): """ doJson(self,script=None) defines the Web API list of parametes and its default values <p>This Python DocString is to automatically published by Web Server the Web API Help widget The Web Browser user will be able to access it by clicking the "?" button onto the page menu bar. The string may contain the regular HTML markups. script - the file name to download and execute . <br> It should contain the Javascript function ` <pre> function _fun_(tag,content) { . . . . } </pre> """ self.publishTitle('Hello JavaScript') # short cut for self.publishNav("Load and Execute the '%s' Javascript " % (script if script != None else 'Embedded')) if script != None: """ publish the reference to the external JavaScript if any """ self.publish("%s/%s" % (self.server().fileScriptURL(), "%s.js" % script), role=pmRoles.script()) """ Create the custom content to be published """ content = { "Hello": "This is my own content to be published from %s " % utils.lineInfo() } self.publish(content) self.publish({'s-maxage': 340, 'max-age': 200}, role=pmRoles.cache())
def param2Args(self, params, keyonly=True): """ Convert the list of the URL ampersand separated parameteres to the python argument list """ values = [] keys = {} syskeys = {} extraValue = {} extra = self.extraParams() if extra: extraValue = extra.copy() semicolon = False if ";" in params and '&' in params: # http://stackoverflow.com/questions/5158565/why-does-pythons-urlparse-parse-qs-split-arguments-on-semicolon w3c = """ ';' is equivalent to '&' W3C recommends that all web servers support semicolon separators in the place of ampersand separators. http://www.w3.org/TR/1999/REC-html401-19991224/appendix/notes.html#h-B.2.2 """ params = params.replace(';', ',,,') print utils.lineInfo(), "params <%s> contains the ';', %s" % ( params, w3c) semicolon = True try: for (v, k) in cgi.parse_qs(params, True).iteritems(): # print " 163 ", v,k,params if len(k) != 1: raise SyntaxError( "Ambiguous values %s of the '%s' URL parameters" % (k, v)) kpar = k[0] if semicolon: kpar = k[0].replace(",,,", ";") var = v.strip() if var[0] == "_": syskeys[var] = self.param(kpar) elif extra and extra.has_key(var): extraValue[var] = self.param(kpar) else: keys[var] = self.param(kpar) except: raise pass self.debug("%s from %s " % ((values, keys, syskeys), params)) self.debug(" keys=<%s> syskeys=<%s>" % (urllib.urlencode(keys), urllib.urlencode(syskeys))) return (values, keys, syskeys, extraValue)
def param2Args(self, params, keyonly=True): """ Convert the list of the URL ampersand separated parameteres to the python argument list """ values = [] keys = {} syskeys = {} extraValue = {} extra = self.extraParams() if extra: extraValue = extra.copy() semicolon = False if ";" in params and "&" in params: # http://stackoverflow.com/questions/5158565/why-does-pythons-urlparse-parse-qs-split-arguments-on-semicolon w3c = """ ';' is equivalent to '&' W3C recommends that all web servers support semicolon separators in the place of ampersand separators. http://www.w3.org/TR/1999/REC-html401-19991224/appendix/notes.html#h-B.2.2 """ params = params.replace(";", ",,,") print utils.lineInfo(), "params <%s> contains the ';', %s" % (params, w3c) semicolon = True try: for (v, k) in cgi.parse_qs(params, True).iteritems(): # print " 163 ", v,k,params if len(k) != 1: raise SyntaxError("Ambiguous values %s of the '%s' URL parameters" % (k, v)) kpar = k[0] if semicolon: kpar = k[0].replace(",,,", ";") var = v.strip() if var[0] == "_": syskeys[var] = self.param(kpar) elif extra and extra.has_key(var): extraValue[var] = self.param(kpar) else: keys[var] = self.param(kpar) except: raise pass self.debug("%s from %s " % ((values, keys, syskeys), params)) self.debug(" keys=<%s> syskeys=<%s>" % (urllib.urlencode(keys), urllib.urlencode(syskeys))) return (values, keys, syskeys, extraValue)
def timeValue(cls, str): """ Parse time string into time value """ res = None if len(tstr) == 10: res = datetime.strptime(tstr, "%Y-%m-%d") elif len(tstr) == 13: res = datetime.strptime(tstr, "%Y-%m-%d %H") elif len(tstr) == 16: res = datetime.strptime(tstr, "%Y-%m-%d %H:%M") elif len(tstr) >= 19: res = datetime.strptime(tstr[:18], "%Y-%m-%d %H:%M:%S") else: raise ValueError("%s: time length %s not parseable: %s" % (utils.lineInfo(), len(tstr), tstr)) return res
def timeValue(cls, str): """ Parse time string into time value """ res = None if len(tstr) == 10: res = datetime.strptime(tstr, '%Y-%m-%d') elif len(tstr) == 13: res = datetime.strptime(tstr, '%Y-%m-%d %H') elif len(tstr) == 16: res = datetime.strptime(tstr, '%Y-%m-%d %H:%M') elif len(tstr) >= 19: res = datetime.strptime(tstr[:18], '%Y-%m-%d %H:%M:%S') else: raise ValueError('%s: time length %s not parseable: %s' % (utils.lineInfo(), len(tstr), tstr)) return res
def doJson(self, hello=None,pattern=None): """ List the Panda Monutor Modules: <ul> <li><code>hello</code> = "yes" to list the hello appplication <li><code>pattern</code> = regexp pattern to list the matched modules only </ul> """ modulesDir = os.path.dirname(utils.lineInfo(False,'%(filename)s')) pat = None if pattern != None: pat = re.compile(pattern) filtered = self.listFiles(modulesDir,hello,pat) # htmlList='<IMG SRC="http://www.mikesfreegifs.com/main4/underconstruction/coolconstr.gif" ALIGN=LEFT BORDER=0 HSPACE=10>' htmlList = "<table><thead><tr><th>#</th><th>Module Name</th><th>Module Description</th><th>SVN rev</th></tr></thead><tbody>" usr = self.server().user() version = self.server().version().get('version') if version == None: version = '' else: version = "/~%s" % version script = version i = 0 for f in sorted(filtered): if f == 'taskBufferList' and usr == None: continue else: try: object = pmModule.factory(f.replace(".py",""),fileonly=True); doc = object.__doc__ if doc == None: doc = "The module is under development. It has not been documented yet" i = i+1 htmlList += "<tr><td>%d</td><td>" % i htmlList +="<a title='Click to start the %(module)s application and see its API doc' href='%(version)s/%(module)s'>%(module)s</a>\n" % {'version':version, 'module':f} htmlList +="</td><td>%s</td></tr>" % doc except: # raise # htmlList += "<tr><td>-</td><td>" # htmlList +="<a title='Click to start the %(module)s application and see its API doc' href='%{script}s/%(module)s'>%(module)s</a>\n" % {'script':script, 'module':f} # htmlList +="</td><td>%s</td></tr>" % f.replace(".py","").replace('/','.') pass htmlList += "</tbody></table>" self.publishTitle("The list of the <a href='https://twiki.cern.ch/twiki/bin/view/PanDA/PandaPlatform#API'>Panda Monitor Modules</a>") self.publishMain(htmlList) self.publish( {'s-maxage':9000,'max-age': 9000}, role=pmRoles.cache())
def doJson(self,script=None): """ doJson(self,script=None) defines the Web API list of parametes and its default values <p>This Python DocString is to automatically published by Web Server the Web API Help widget The Web Browser user will be able to access it by clicking the "?" button onto the page menu bar. The string may contain the regular HTML markups. script - the file name to download and execute . <br> It should contain the Javascript function ` <pre> function _fun_(tag,content) { . . . . } </pre> """ self.publishTitle('Hello JavaScript') # short cut for self.publishNav("Load and Execute the '%s' Javascript " % ( script if script!= None else 'Embedded' ) ) if script != None: """ publish the reference to the external JavaScript if any """ self.publish( "%s/%s" % (self.server().fileScriptURL(),"%s.js" % script ),role=pmRoles.script()) """ Create the custom content to be published """ content = {"Hello" : "This is my own content to be published from %s " % utils.lineInfo() } self.publish(content) self.publish({'s-maxage':340,'max-age': 200},role=pmRoles.cache())
def doQuery(self,site='all',jobtype='all',days=1,hostonly='yes',plot='finished,failed',style='SB', details=None,deep='no'): """ Get the list of the jobs' status per worker node for selected site for upto 3 last days <ol> <li><code>site</code> - site to get the information about or "all" <li><code>jobtype</code> = 'production" or " analysis" or "all" <li><code>days</code> - number of days we want to fetch the information for. This parameter must be less or equal 3. <li><code>hostonly</code> - "yes" . "no" - means provide the information 'per CPU' rather 'per host' <li><code>plot</code> - the comma separated list of the <a href='https://twiki.cern.ch/twiki/bin/viewauth/Atlas/PandaShiftGuide#Job_state_definitions_in_Panda'>job status names</a> to generate the plot for <li><code>style</code> the combination of the symbols: 'H' - histogram, 'B' - bar , 'L' - line, 'P' - points, 'S' - stacked<br> = 'BS' - 'by default' <li><code>details</code> - 'yes' - provide the status per host<br> <br> = 'summary' - provide the summary only <br> default = 'yes' if not site == 'all' else 'no' <li><code>deep</code> - do not look up the "long job table" . It can be very slow. default = 'no' <li> <b><a title='Click to see the full list of the parameters available' href='http://pandamon.cern.ch/describe?table=^JOBSARCHIVED4&doc=*'>PARAM</a>=value</b> - comma-separated list of the value <a href ='https://twiki.cern.ch/twiki/bin/view/PanDA/PandaPlatform#API_parameter_value_format'> patterns</a> to filter the job records. <li><code>jobStatus</code> - it can be any comma-separated list of the Panda Jobs states patterns or / and <br> two 'meta' states: "factory" to list the "factory" nodes and "wn" to list the the Worker Nodes </ol> """ vals = self.extraValuesFilled() if site == None: site='all' factory = None wn = None titletype = "PanDA" jobStatus = None if vals != None: jstat = vals.get('jobStatus') jobStatus = jstat if jstat != None: jstat = [x.lower() for x in utils.parseArray(jstat)] print utils.lineInfo(), jstat, 'factory' in jstat if 'factory' in jstat: factory = self.factory() jstat += factory titletype = "Factory" if 'wn' in jstat: wn = self.wn() jstat += wn titletype = "Worker" jstat = list(set(jstat)) vals['jobStatus'] = ','.join(jstat) if deep == None or deep == False: deep = 'no' deep = not deep.lower() == 'no' title = '' hours = (float(days))*24 Deep = 'Deep' if deep else '' if float(days) > 3 and False and not deep: days = 3.0 hours = 24*days title = 'The List of the %s Nodes Activity for the Last %s Hours Only.' % (titletype, int(hours)) else: title = 'The %s List of the %s Nodes Activity for the %s Hours' % (Deep, titletype,int(hours) ) days = float(days) try: if jobtype.lower() == 'all': jobtype=None except: pass self.publishTitle(title) modes = ( 'Defined', 'Waiting', 'Active', 'Archived' ) tables = [] for mode in modes: tables += ['ATLAS_PANDA.jobs%s4' % mode] if days>3 and deep: tables += ['ATLAS_PANDAARCH.JOBSARCHIVED'] q = pmt.wnList(site, jobtype, table=tables, days=days,conditions = vals) try: if details == None: details = not (site == None or site == 'None' or site.lower() == 'all' or site == '*' ) # else: # details = isinstance(details,str) and details.lower() == 'yes' except: details = False ## Publish the result main = {} main['data'] = self.do(q['header'],q['rows'],site,hostonly,details,factory,wn) main['params'] = { 'jobtype': jobtype,'days': days,'site': site,'hostonly':hostonly,'jobStatus':jobStatus} plotpars = [] if plot: for p in plot.split(","): if p.lower() == 'prerun' or factory != None: if wn == None: plotpars += self.factory() if factory == None: plotpars += ['pending','sent','starting'] elif p.lower() == 'finishing' and factory == None: plotpars += ['holding','transferring'] else: plotpars += [p] main['params']['plot'] = list(set(plotpars)) # remove duplicates if style: main['params']['style'] = style self.publish(main) self.publishNav(' type=%s days= last %s ' % (jobtype, days ) ) self.publish( "%s/%s" % ( self.server().fileScriptURL(),"monitor/%s.js" % "wnlist" ),role=pmRoles.script()) self.publish( {'s-maxage':600,'max-age': 600}, role=pmRoles.cache())
def doMain(self, app='prun,pathena,ganga', plots='twall,tcpu,twait,trun', year='last', month='last', dir=None, layout=None, width=600, height=400, log=False, options='H', block='pyrootfile'): connectionTime = Stopwatch.Stopwatch() rootout = os.tmpnam() # print ' RedirectOutput=' , gSystem.RedirectOutput(rootout,'a') if dir == None or dir == '': dir = '/home/fine/panda/pandamon/static/root' if app == None or app == '': app = self._app if plots == None or plots == '': plots = self._plots if layout == None: layout = { 'columns': len(app.split(',')), 'rows': len(plots.split(',')) } else: lt = layout.split('x', 1) layout = {'columns': lt[0].strip(), 'rows': lt[1].strip()} lastMonth = False lastYear = False try: if month.lower() == 'last': lastMonth = True except: pass try: if year.lower() == 'last': lastYear = True except: pass if lastMonth or lastYear: dirlist = sorted(os.listdir(dir)) filtered = [] pattern = 'analysis_14_ALL_ALL_(%(year)s)(%(month)s)' % { 'year': '\\d{4}' if lastYear else '%04d' % year, 'month': '\\d{2}' if lastMonth else '%02d' % month } lut = re.compile(pattern) for f in dirlist: if lut.match(f): filtered.append(f) file = filtered[-1] note = file.replace('analysis_14_ALL_ALL_', '').replace('.root', '') self.publishTitle( 'Timing distribution for ATLAS analysis jobs in the %s.%s month' % (note[:4], note[4:])) self.publishNav('The histogram for %s %s %s.%s' % (app, plots, note[:4], note[4:])) else: file = 'analysis_14_ALL_ALL_%(year)04d%(month)02d' % { 'year': year, 'month': month } self.publishTitle( 'Timing distribution for ATLAS analysis jobs in the %(year}04d.%(month)02d onth' % { 'year': year, 'month': month }) self.publishNav('The histogram for %s %s %04d.%02d' % (app, plots, year, month)) if file == None or file == '': file = 'fillrandom.root' name = '' for p in plots.split(','): for a in app.split(','): name += "%(dim)s_%(plot)s_%(app)s," % { 'dim': self._dimension, 'plot': p, 'app': a } timer = Stopwatch.Stopwatch() # # Open ROOT file # if options == None: options = '' if file[-5:] != ".root": file += '.root' main = {} main['header'] = ["Params", "values"] processor = os.path.join( os.path.dirname(utils.lineInfo(False, '%(filename)s')), 'processor') main['info'] = pmt.getPopularity(name, file, dir, options, block, path=processor) # eval(txt) main['width'] = width main['height'] = height main['layout'] = layout if log == True or isinstance(log, str) and (log.lower() == "true" or log.lower() == "yes"): main['log'] = True main['time'] = {} self.publish(main)
def doMain(self, plots='mc_data,user', year='last', month='last', dir='/data4/work/analysis_15/ALL/ALL', layout=None, width=440, height=240, log=False, options='H', block='pyrootfile'): connectionTime = Stopwatch.Stopwatch() rootout = os.tmpnam() # print ' RedirectOutput=' , gSystem.RedirectOutput(rootout,'a') if dir == None or dir == '': dir = '/home/fine/panda/pandamon/static/root' if plots == None or plots == '': plots = self._plots if layout == None: layout = {'columns': len(plots.split(',')), 'rows': 1} else: lt = layout.split('x', 1) layout = { 'columns': int(lt[0].strip()), 'rows': int(lt[1].strip()) } lastMonth = False lastYear = False try: if month.lower() == 'last': lastMonth = True except: pass try: if year.lower() == 'last': lastYear = True except: pass dirlist = sorted(os.listdir(dir)) filtered = [] filteredAll = [] pattern = 'analysis_15_ALL_ALL_(%(year)s)(%(month)s)(.root)$' % { 'year': '\\d{4}' if lastYear else '%04d' % year, 'month': '\\d{2}' if lastMonth else '%02d' % month } patternAll = 'analysis_15_ALL_ALL_(%(year)s)(%(month)s)(.root)$' % { 'year': '\\d{4}', 'month': '\\d{2}' } lut = re.compile(pattern) lutAll = re.compile(patternAll) years = {} for f in dirlist: if lut.match(f): filtered.append(f) yearmm = lutAll.match(f) if yearmm: filteredAll.append(f) yymm = yearmm.groups() if not years.has_key(yymm[0]): years[yymm[0]] = [] years[yymm[0]].append(yymm[1]) if lastMonth or lastYear: file = filtered[-1] note = file.replace('analysis_15_ALL_ALL_', '').replace('.root', '') self.publishTitle( 'Project and Data Type Popularity for ATLAS Analysis Jobs in the %s.%s Month' % (note[:4], note[4:])) self.publishNav('The Projects for %s %s.%s' % (plots, note[:4], note[4:])) else: file = 'analysis_15_ALL_ALL_%(year)04d%(month)02d' % { 'year': year, 'month': month } self.publishTitle( 'Project and Data Type Popularity for ATLAS Analysis Jobs in the %04d.%02d Month' % (year, month)) self.publishNav('The Projects for "%s" in %04d.%02d' % (plots, year, month)) name = plots timer = Stopwatch.Stopwatch() # # Open ROOT file # if options == None: options = '' if file[-5:] != ".root": file += '.root' main = {} main['header'] = ["Params", "values"] processor = os.path.join( os.path.dirname(utils.lineInfo(False, '%(filename)s')), 'processor') info = pmt.getPopularity(name, file, dir, options, block, path=processor) for i in info: if not 'Histogram' in i[0]: continue try: hists = i[1] for h in hists: property = h['data']['property'] property['xbound'] = 20 title = h['attr']['title'] if 'type' in title: h['attr']['xaxis']['title'] = 'Project and Type' else: h['attr']['xaxis']['title'] = 'Project' h['attr']['yaxis']['title'] = 'Jobs' except: pass main['info'] = info main['files'] = filteredAll main['months'] = years main['width'] = width main['height'] = height main['layout'] = layout if log == True or isinstance(log, str) and (log.lower() == "true" or log.lower() == "yes"): main['log'] = True main['time'] = {} self.publish(main)
def doQuery(self,jobtype='production,test',hours=12,cores=2,vo=None,computingSite=None,processingType=None,workingGroup=None,prodUserName=None,specialHandling=None,region=False): if int(hours) > 24 * 3: hours = 24 * 3 title = 'Job summary from the MultiCore sites for the last %s hours only.' % hours # Use <a href="%s/?%s">"stats"</a> page to see all %s hours' % ( 24 * 3, utils.monURL, 'mode=sitestats', hours ) else: title = 'Job summary from the MultiCore sites for the last %s hours' % hours self.publishTitle(title) modes = ( 'Defined', 'Waiting', 'Active', 'Archived' ) rows = [] cores = cores if cores != None else 0 for mode in modes: q = pmt.getCloudSummary(jobtype,vo,hours,selectionFields='computingSite,processingType,workingGroup, prodUserName,specialHandling,coreCount', tablename='ATLAS_PANDA.jobs%s4' % mode) if len(rows) == 0: header = q['header'] rows += q['rows'] ### Build the jobsumd dictionary jobsumd = {} proctyped = {} workgroupd = {} userd = {} spechandled = {} for c in self.clouds: jobsumd[c] = {} jobsumd[c]['ALL'] = [0]*(self.lstates+1) if c != 'ALL': jobsumd[c]['status'] = self.cloudList[c]['status'] iCloud = utils.name2Index(header,'cloud') print utils.lineInfo(), header iComputingSite = utils.name2Index(header,'computingSite') iStatus = utils.name2Index(header,'jobStatus') iProctype = utils.name2Index(header,'processingType') iSpechandle = utils.name2Index(header,'specialHandling') iWorkgroup = utils.name2Index(header,'workingGroup') iUser = utils.name2Index(header,'prodUserName') iCore = utils.name2Index(header,'coreCount') iCount = utils.name2Index(header,'COUNT') for r in rows: ncores = r[iCore] if r[iCore] != None else 0 if cores != 0 and ncores < cores: continue site = r[iComputingSite] if site == None: # print "Db record doesn't define any site: %s " % r if region: continue site="Unknown" # continue cloud = r[iCloud] # utils.getCloud4Site(r) status = r[iStatus] istatus = utils.name2Index(self.jobstates,status) proctype = r[iProctype] spechandle = r[iSpechandle] workgroup = r[iWorkgroup] user = r[iUser] count = r[iCount] if not cloud in jobsumd: print 'Unknown cloud: %s for site %s ' % ( cloud, site) continue cloud = 'ALL' if not status in self.jobstates: print 'Unknown jobstate:', status continue if not site in jobsumd[cloud]: jobsumd[cloud][site] = {} jobsumd[cloud][site] = [0]*(self.lstates+1) jobsumd[cloud][site][self.lstates] = ncores if ncores != None else 0 jobsumd[cloud][site][istatus] += count if cloud != 'ALL': jobsumd[cloud]['ALL'][istatus] += count jobsumd['ALL']['ALL'][istatus] += count proctyped[proctype] = proctyped.get(proctype,0) + count workgroupd[workgroup] = workgroupd.get(workgroup,0) + count userd[user] = userd.get(user,0) + count if spechandle != None: spechandled[spechandle] = spechandled.get(spechandle,0) + count ## Get cloud/site status main = {} main['states'] = self.jobstates main['jobsumd'] = jobsumd main['params'] = { 'jobtype': jobtype,'hours': hours,'vo':vo ,'computingSite': computingSite ,'processingType': processingType ,'workingGroup':workingGroup ,'prodUserName':prodUserName ,'specialHandling':specialHandling ,'cores' : cores ,'region' : region} self.publish(main) self.publishNav(' type=%s hours=%s' % (jobtype, hours ) ) self.publish( "%s/%s" % ( self.server().fileScriptURL(),"monitor/%s.js" % "mcore" ),role="script")
def doQuery(self, jobtype='production,test', hours=12, cores=2, vo=None, computingSite=None, processingType=None, workingGroup=None, prodUserName=None, specialHandling=None, region=False): if int(hours) > 24 * 3: hours = 24 * 3 title = 'Job summary from the MultiCore sites for the last %s hours only.' % hours # Use <a href="%s/?%s">"stats"</a> page to see all %s hours' % ( 24 * 3, utils.monURL, 'mode=sitestats', hours ) else: title = 'Job summary from the MultiCore sites for the last %s hours' % hours self.publishTitle(title) modes = ('Defined', 'Waiting', 'Active', 'Archived') rows = [] cores = cores if cores != None else 0 for mode in modes: q = pmt.getCloudSummary( jobtype, vo, hours, selectionFields= 'computingSite,processingType,workingGroup, prodUserName,specialHandling,coreCount', tablename='ATLAS_PANDA.jobs%s4' % mode) if len(rows) == 0: header = q['header'] rows += q['rows'] ### Build the jobsumd dictionary jobsumd = {} proctyped = {} workgroupd = {} userd = {} spechandled = {} for c in self.clouds: jobsumd[c] = {} jobsumd[c]['ALL'] = [0] * (self.lstates + 1) if c != 'ALL': jobsumd[c]['status'] = self.cloudList[c]['status'] iCloud = utils.name2Index(header, 'cloud') print utils.lineInfo(), header iComputingSite = utils.name2Index(header, 'computingSite') iStatus = utils.name2Index(header, 'jobStatus') iProctype = utils.name2Index(header, 'processingType') iSpechandle = utils.name2Index(header, 'specialHandling') iWorkgroup = utils.name2Index(header, 'workingGroup') iUser = utils.name2Index(header, 'prodUserName') iCore = utils.name2Index(header, 'coreCount') iCount = utils.name2Index(header, 'COUNT') for r in rows: ncores = r[iCore] if r[iCore] != None else 0 if cores != 0 and ncores < cores: continue site = r[iComputingSite] if site == None: # print "Db record doesn't define any site: %s " % r if region: continue site = "Unknown" # continue cloud = r[iCloud] # utils.getCloud4Site(r) status = r[iStatus] istatus = utils.name2Index(self.jobstates, status) proctype = r[iProctype] spechandle = r[iSpechandle] workgroup = r[iWorkgroup] user = r[iUser] count = r[iCount] if not cloud in jobsumd: print 'Unknown cloud: %s for site %s ' % (cloud, site) continue cloud = 'ALL' if not status in self.jobstates: print 'Unknown jobstate:', status continue if not site in jobsumd[cloud]: jobsumd[cloud][site] = {} jobsumd[cloud][site] = [0] * (self.lstates + 1) jobsumd[cloud][site][ self.lstates] = ncores if ncores != None else 0 jobsumd[cloud][site][istatus] += count if cloud != 'ALL': jobsumd[cloud]['ALL'][istatus] += count jobsumd['ALL']['ALL'][istatus] += count proctyped[proctype] = proctyped.get(proctype, 0) + count workgroupd[workgroup] = workgroupd.get(workgroup, 0) + count userd[user] = userd.get(user, 0) + count if spechandle != None: spechandled[spechandle] = spechandled.get(spechandle, 0) + count ## Get cloud/site status main = {} main['states'] = self.jobstates main['jobsumd'] = jobsumd main['params'] = { 'jobtype': jobtype, 'hours': hours, 'vo': vo, 'computingSite': computingSite, 'processingType': processingType, 'workingGroup': workingGroup, 'prodUserName': prodUserName, 'specialHandling': specialHandling, 'cores': cores, 'region': region } self.publish(main) self.publishNav(' type=%s hours=%s' % (jobtype, hours)) self.publish( "%s/%s" % (self.server().fileScriptURL(), "monitor/%s.js" % "mcore"), role="script")
def doMain(self,app='prun,pathena,ganga',plots='twall,tcpu,twait,trun',year='last', month='last',dir=None, layout=None, width=600, height=400,log=False,options='H',block='pyrootfile'): connectionTime = Stopwatch.Stopwatch() rootout = os.tmpnam() # print ' RedirectOutput=' , gSystem.RedirectOutput(rootout,'a') if dir==None or dir=='': dir = '/home/fine/panda/pandamon/static/root' if app==None or app=='': app = self._app if plots==None or plots=='': plots = self._plots if layout == None: layout = { 'columns': len(app.split(',')) , 'rows' : len(plots.split(',')) } else: lt = layout.split('x',1) layout = { 'columns': lt[0].strip(), 'rows' : lt[1].strip() } lastMonth = False lastYear = False try: if month.lower()=='last': lastMonth = True except: pass try: if year.lower()=='last': lastYear = True except: pass if lastMonth or lastYear: dirlist=sorted(os.listdir(dir)) filtered = [] pattern = 'analysis_14_ALL_ALL_(%(year)s)(%(month)s)' % {'year' : '\\d{4}' if lastYear else '%04d' % year , 'month' : '\\d{2}' if lastMonth else '%02d' % month} lut = re.compile(pattern) for f in dirlist: if lut.match(f): filtered.append(f) file = filtered[-1] note = file.replace('analysis_14_ALL_ALL_','').replace('.root','') self.publishTitle('Timing distribution for ATLAS analysis jobs in the %s.%s month' % (note[:4], note[4:] ) ) self.publishNav('The histogram for %s %s %s.%s' % ( app,plots,note[:4], note[4:] ) ) else: file='analysis_14_ALL_ALL_%(year)04d%(month)02d' % {'year' : year,'month':month } self.publishTitle('Timing distribution for ATLAS analysis jobs in the %(year}04d.%(month)02d onth' % {'year' : year,'month':month } ) self.publishNav('The histogram for %s %s %04d.%02d' % ( app,plots,year,month ) ) if file==None or file=='': file = 'fillrandom.root' name = '' for p in plots.split(','): for a in app.split(','): name += "%(dim)s_%(plot)s_%(app)s," % {'dim': self._dimension,'plot' : p ,'app' : a } timer = Stopwatch.Stopwatch() # # Open ROOT file # if options == None: options='' if file[-5:] != ".root" : file += '.root' main = {} main['header'] = ["Params","values"] processor = os.path.join(os.path.dirname(utils.lineInfo(False,'%(filename)s')),'processor') main['info'] = pmt.getPopularity(name,file,dir,options,block,path=processor) # eval(txt) main['width'] = width main['height'] = height main['layout'] = layout if log == True or isinstance(log,str) and (log.lower()=="true" or log.lower()=="yes"): main['log'] = True main['time'] = {} self.publish(main)