def doMain(self, nJobs, siteName, prodSourceLabel, cpu, mem, diskSpace, modificationHost, timeout, computingElement, atlasRelease, prodUserID, getProxyKey, countryGroup, workingGroup, allowOtherCountry): connectionTime = Stopwatch.Stopwatch() p = pmTaskBuffer.pmTaskBuffer(dbhost=None, dbpasswd=None, dbuser=None, dbname=None) timer = Stopwatch.Stopwatch() jobs = p.getJobs(nJobs, siteName, prodSourceLabel, cpu, mem, diskSpace, modificationHost, timeout, computingElement, atlasRelease, prodUserID, getProxyKey, countryGroup, workingGroup, allowOtherCountry) main = {} main["buffer"] = {} main["buffer"]["method"] = 'getJobs' main["buffer"]["params"] = (nJobs, siteName, prodSourceLabel, cpu, mem, diskSpace, modificationHost, timeout, computingElement, atlasRelease, prodUserID, getProxyKey, countryGroup, workingGroup, allowOtherCountry) main["buffer"]["data"] = jobs main['time'] = {} main['time']['fetch'] = "%s" % timer main['time']['query'] = "%s" % connectionTime self.publish(main)
def doMain(self, name='h1f', file='fillrandom', dir='/home/fine/panda/pandamon/static/root', width=600, height=400, log=False, options='H', block='pyrootfile'): connectionTime = Stopwatch.Stopwatch() rootout = os.tmpnam() print "-------------- rootout=", rootout # print ' RedirectOutput=' , gSystem.RedirectOutput(rootout,'a') if dir == None or dir == '': dir = '/home/fine/panda/pandamon/static/root' if file == None or file == '': file = 'fillrandom.root' timer = Stopwatch.Stopwatch() # # Open ROOT file # if options == None: options = '' print utils.lineInfo(), block if block == 'pyrootfile': if file[-5:] != ".root": file += '.root' call([ '/home/fine/panda/pandamon/pmModules/processor/%s.py' % block, rootout, name, file, dir, options ]) elif block == 'pandsusers': dir = '/home/fine/panda/pandamon/static/data' if file == 'fillrandom': file = 'useact180days' call([ '/home/fine/panda/pandamon/pmModules/processor/%s.py' % block, rootout, file, dir, options ]) r4panda = open(rootout) txt = r4panda.readlines()[0][:-1] r4panda.close() os.remove(rootout) main = {} main['header'] = ["Params", "values"] main['info'] = eval(txt) main['width'] = width main['height'] = height if log == True or isinstance(log, str) and (log.lower() == "true" or log.lower() == "yes"): main['log'] = True main['time'] = {} self.publish(main)
def doJson(self, name='h1f', file='fillrandom', dir='/home/fine/panda/pandamon/static/root', width=600, height=400, log=False, options='H', block='pyrootfile'): """ Histogram Object Rendering Example <br><ul> <li>name - name of the ROOT histogram <li>file - name of the ROOT file the histogram ' name' should be read from <li>dir - the directory on the file server <li>width - the width of the plots in px <li>height - the height of the plots in px <li>log - use logarithmic scale for Y-axis <li>options - some ROOT <a href='http://root.cern.ch/root/html534/THistPainter.html#HP01b'>Paint options</a> </ul> """ self.publishTitle('Histogram object example') timer = Stopwatch.Stopwatch() self.doMain(name, file, dir, width, height, log, options, block) if file == 'fillrandom': file = 'useact180days.json' self.publishNav( 'The histogram from "%s/%s". "%s"' % (dir, file, timer)) # I know the cyber security will be mad ;-) VF. self.publish("%s/%s" % (self.server().fileScriptURL(), "hello/%s.js" % "pyroot"), role="script")
def doMain(self, jobtype, select, timefield, tstart, tend, days, width, height, options, plot, log, file): main = {} params = "getUserActivity" header = ['time', 'Users'] rows = [] timer = Stopwatch.Stopwatch() dateformat = '%y%m' if 'M' in options else '%y%m%d' if file and file.strip() != '': pop = True q = self.doMergePush(file, jobtype, select, timefield, tstart, tend, days, pop, dateformat) else: if tstart == None and tend == None and days == None: days = 3 q = self.doTaskBuffer(jobtype, select, timefield, tstart, tend, days) if plot: title = "The PANDA users activity for the last %s days " % days self.publishPlot(self.makePlotPoints(q['rows'], dateformat), "h1d", title, width, height, options, log) else: main["buffer"] = {} main["buffer"]["method"] = params main["buffer"]["params"] = (jobtype, select, timefield, tstart, tend, days) main["buffer"]["type"] = False main["buffer"]["data"] = q main['time'] = {'fetch': "%s" % timer} self.publish(main)
def doJson(self, nJobs=10, siteName='ANALY_VICTORIA-WG1', prodSourceLabel='user', cpu=0, mem=0, diskSpace=0, modificationHost='', timeout=0, computingElement=None, atlasRelease='no use', prodUserID=None, getProxyKey=None, countryGroup='', workingGroup='', allowOtherCountry=True): """ Invoke the TaskBuffer.getJobs method of <a href="https://svnweb.cern.ch/trac/panda/browser/panda-server/current/pandaserver/taskbuffer/TaskBuffer.py">TaskBuffer</a><br> """ self.publishTitle('Get jobs from Task Buffer from pid: %s !!!' % self.server().getpid()) timer = Stopwatch.Stopwatch() self.doMain(nJobs, siteName, prodSourceLabel, cpu, mem, diskSpace, modificationHost, timeout, computingElement, atlasRelease, prodUserID, getProxyKey, countryGroup, workingGroup, allowOtherCountry) self.publishNav( 'The TaskBuffer.getJobs from CERN: "%s". "%s"' % ((nJobs, siteName, prodSourceLabel, cpu, mem, diskSpace, modificationHost, timeout, computingElement, atlasRelease, prodUserID, getProxyKey, countryGroup, workingGroup, allowOtherCountry), timer)) # I know the cyber security will be mad ;-) VF.
def doMain(self, query): connectionTime = Stopwatch.Stopwatch() sql = pdb.pmDb() if query == None or query == '': query = "select distinct jobstatus from JOBSARCHIVED4" timer = Stopwatch.Stopwatch() s = sql.fetchallh(query) header = s['header'] rows = s['rows'] stime = "%s" % timer sql.close() main = {} main['header'] = header if len(rows) > 0 else [] main['info'] = rows main['time'] = {} main['time']["fetch"] = stime self.publish(main)
def doMain(self): main = {} timer = Stopwatch.Stopwatch() tasks = pmt.schedCfg() main["buffer"] = {} main["buffer"]["method"] = 'schedcfg' main["buffer"]["params"] = '' main["buffer"]["type"] = isinstance(tasks, str) main["buffer"]["data"] = tasks main['time'] = {'fetch': "%s" % timer} self.publish(main)
def doMain(self, hours): main = {} timer = Stopwatch.Stopwatch() tasks = self.getLastDefinedDatasets(hours) main = {} main["method"] = 'reqtask1' main["tasks"] = tasks main["taskDef"] = { 'name': sorted(taskDef_conf.projects['name']), 'jobs': taskDef_conf.trfs['jobs'], 'version': taskDef_conf.swversion['version'] } main['time'] = {'fetch': "%s" % timer} self.publish(main)
def doJson(self): """ Show the non-zero MAXTIME values from schedconfig for each site ( see <a href='http://atlas-agis-api.cern.ch/request/pandaqueue/query/list/?json&preset=schedconf.all'>AGIS schedconf</a> also ) """ self.publishTitle( 'The schedconfig MAXTIME <font size=-2> ( see <a href="http://atlas-agis-api.cern.ch/request/pandaqueue/query/list/?json&preset=schedconf.all">AGIS schedconf</a> also)</font>' ) timer = Stopwatch.Stopwatch() self.doMain() self.publishNav('The maxtime from schedconfig: "%s"' % (timer)) self.publish( "%s/%s" % (self.server().fileScriptURL(), "taskBuffer/%s.js" % 'tbTable'), role="script") self.publish({'s-maxage': 600, 'max-age': 600}, role=pmRoles.cache())
def doJson(self, jobtype='analysis', timefield='MODIFICATIONTIME', tstart=None, tend=None, days=None, width=600, height=400, options='', plot=False, log=False, file='useract365daysTotal'): """ User activity plots <ul> <li>jobtype - name of the ROOT histogram <li>file - name the file to get the data from rather from Db <li>width - the width of the plots in px <li>height - the height of the plots in px <li>plot - = 'True' to create the plot; 'False' to use the table view render the data <li>log - use logarithmic scale for Y-axis <li>options - some ROOT <a href='http://root.cern.ch/root/html534/THistPainter.html#HP01b'>Paint options</a> or <br> 'M' = to get the "per month" rather "per day" data </ul> """ self.publishTitle('Panda User activities for %(jobtype)s jobs' % {'jobtype': jobtype}) timer = Stopwatch.Stopwatch() select = "to_char(MODIFICATIONTIME ,'YYMMDD') as time,PRODUSERID" if 'M' in options: select = "to_char(MODIFICATIONTIME ,'YYMM') as time,PRODUSERID" self.doMain(jobtype, select, timefield, tstart, tend, days, width, height, options, plot, log, file) self.publishNav('The User Activities for %s from CERN ( %s)' % (jobtype, timer)) render = "taskBuffer/%s.js" % ( "tbTable") if not plot else "hello/%s.js" % "pyroot" self.publish("%s/%s" % (self.server().fileScriptURL(), render), role=pmRoles.script()) self.publish({ 's-maxage': 86400, 'max-age': 86400 }, role=pmRoles.cache())
def doJson(self, app='prun,pathena,ganga', plots='trun,tcpu,twall,twait', year='last', month='last', dir=None, layout=None, width=320, height=160, log=True, options='H', block='pyrootfile'): """ Timing distribution for ATLAS analysis jobs <br><ul> <li>app - comma separated list of the applications: prun,pathena,ganga <li>plots - the comma separated list of the plots:'twall,tcpu,twait,trun' <li>year - the 4 digits year to publish the plots for <li>month - the 2 digits month number 1-12 <li>dir - the local directory name to pick the input ROOT files from <li>layout - [columns x rows ] layout to display the multiply plots<br> 'by default'<br> <code>columns</code> = number of the 'app' applications<br> <code>rows</code> = number of the 'plots' <li>width - the width of the plots in px <li>height - the height of the plots in px <li>log - use logarithmic scale for Y-axis <li>options - some ROOT <a href='http://root.cern.ch/root/html534/THistPainter.html#HP01b'>Paint options</a> </ul> """ timer = Stopwatch.Stopwatch() if dir == None: try: dir = self.server().config().analytics['times'] except: dir = '/data4/work/analysis_14/ALL/ALL' self.doMain(app, plots, year, month, dir, layout, width, height, log, options, block) self.publish("%s/%s" % (self.server().fileScriptURL(), "hello/%s.js" % "ptimes"), role="script")
def doJson(self, plots='mc_data,user', year='last', month='last', dir=None, layout=None, width=440, height=240, log=True, options='H', block='pyrootfile'): """ Project and Data Type Popularity for ATLAS Analysis Jobs <br><ul> <li>plots - the comma separated list of the plots:'mc_data,user' <li>year - the 4 digits year to publish the plots for <li>month - the 2 digits month number 1-12 <li>dir - the local directory name to pick the input ROOT files from <li>layout - [columns x rows ] layout to display the multiply plots<br> 'by default'<br> <code>columns</code> = number of the 'plots'<br> <code>rows</code> = 1 <li>width - the width of the plots in px <li>height - the height of the plots in px <li>log - use logarithmic scale for Y-axis <li>options - some ROOT <a href='http://root.cern.ch/root/html534/THistPainter.html#HP01b'>Paint options</a> </ul> """ if dir == None: try: dir = self.server().config().analytics['popularity'] except: dir = '/data4/work/analysis_15/ALL/ALL' timer = Stopwatch.Stopwatch() self.doMain(plots, year, month, dir, layout, width, height, log, options, block) self.publish("%s/%s" % (self.server().fileScriptURL(), "hello/%s.js" % "ppop"), role="script")
def doMain(self, method, params, db): main = {} method = method.strip() api = method.replace("()", "") if params == None: params = '' elif params != '': if method == 'getFullJobStatus': api = 'fullJobStatus' # rename to eliminate the 'side effect' if not isinstance(params, int): params = params.lstrip('[').rstrip(']') params = "[%s]" % params elif method == 'getScriptOfflineRunning': params = int(params) elif method == 'describe': params = "'%s'" % params else: params = "%s" % params if method.find('(') >= 0 and method[-1] == ')': api = method self._doc = method[:method.find('(')] else: self._doc = api api = "%s(%s)" % (api, params) timer = Stopwatch.Stopwatch() tasks = eval('%s.%s' % (db, api)) main["buffer"] = {} main["buffer"]["method"] = method.replace("()", "") main["buffer"]["params"] = params main["buffer"]["type"] = isinstance(tasks, str) main["buffer"]["data"] = tasks main['time'] = {'fetch': "%s" % timer} self.publish(main)
def doMain(self, plots='mc_data,user', year='last', month='last', dir='/data4/work/analysis_15/ALL/ALL', layout=None, width=440, height=240, log=False, options='H', block='pyrootfile'): connectionTime = Stopwatch.Stopwatch() rootout = os.tmpnam() # print ' RedirectOutput=' , gSystem.RedirectOutput(rootout,'a') if dir == None or dir == '': dir = '/home/fine/panda/pandamon/static/root' if plots == None or plots == '': plots = self._plots if layout == None: layout = {'columns': len(plots.split(',')), 'rows': 1} else: lt = layout.split('x', 1) layout = { 'columns': int(lt[0].strip()), 'rows': int(lt[1].strip()) } lastMonth = False lastYear = False try: if month.lower() == 'last': lastMonth = True except: pass try: if year.lower() == 'last': lastYear = True except: pass dirlist = sorted(os.listdir(dir)) filtered = [] filteredAll = [] pattern = 'analysis_15_ALL_ALL_(%(year)s)(%(month)s)(.root)$' % { 'year': '\\d{4}' if lastYear else '%04d' % year, 'month': '\\d{2}' if lastMonth else '%02d' % month } patternAll = 'analysis_15_ALL_ALL_(%(year)s)(%(month)s)(.root)$' % { 'year': '\\d{4}', 'month': '\\d{2}' } lut = re.compile(pattern) lutAll = re.compile(patternAll) years = {} for f in dirlist: if lut.match(f): filtered.append(f) yearmm = lutAll.match(f) if yearmm: filteredAll.append(f) yymm = yearmm.groups() if not years.has_key(yymm[0]): years[yymm[0]] = [] years[yymm[0]].append(yymm[1]) if lastMonth or lastYear: file = filtered[-1] note = file.replace('analysis_15_ALL_ALL_', '').replace('.root', '') self.publishTitle( 'Project and Data Type Popularity for ATLAS Analysis Jobs in the %s.%s Month' % (note[:4], note[4:])) self.publishNav('The Projects for %s %s.%s' % (plots, note[:4], note[4:])) else: file = 'analysis_15_ALL_ALL_%(year)04d%(month)02d' % { 'year': year, 'month': month } self.publishTitle( 'Project and Data Type Popularity for ATLAS Analysis Jobs in the %04d.%02d Month' % (year, month)) self.publishNav('The Projects for "%s" in %04d.%02d' % (plots, year, month)) name = plots timer = Stopwatch.Stopwatch() # # Open ROOT file # if options == None: options = '' if file[-5:] != ".root": file += '.root' main = {} main['header'] = ["Params", "values"] processor = os.path.join( os.path.dirname(utils.lineInfo(False, '%(filename)s')), 'processor') info = pmt.getPopularity(name, file, dir, options, block, path=processor) for i in info: if not 'Histogram' in i[0]: continue try: hists = i[1] for h in hists: property = h['data']['property'] property['xbound'] = 20 title = h['attr']['title'] if 'type' in title: h['attr']['xaxis']['title'] = 'Project and Type' else: h['attr']['xaxis']['title'] = 'Project' h['attr']['yaxis']['title'] = 'Jobs' except: pass main['info'] = info main['files'] = filteredAll main['months'] = years main['width'] = width main['height'] = height main['layout'] = layout if log == True or isinstance(log, str) and (log.lower() == "true" or log.lower() == "yes"): main['log'] = True main['time'] = {} self.publish(main)
def doJson(self, table=None, column=None, db='pmt', doc=None): """ Describe the selected Panda table / column <br> <code>table = the Db table name regexp comma-separated patterns to check</code><br> <code>column = the Db table column name regexp comma-separated patterns to find</code><br> <code>doc = '*|all' - document all columns selected if available<br> = 'comma-separated list of the regexp' is to display the columns with the comments matched the regexp provided </code><br> """ title = 'Describe the Panda Db ' inp = False if table != None and table != 'None': title += "'%s*'" % table inp = True title += " table" else: title += " tables" if column != None and column != 'None': title += " with '%s*' column " % column inp = True main = {} if inp: self.publishTitle(title) timer = Stopwatch.Stopwatch() main["buffer"] = {} main["buffer"]["db"] = db dbAccess = self._db.get(db) if dbAccess != None: main["buffer"]["method"] = "describe" main["buffer"]["params"] = (table, column, doc) main["buffer"]["type"] = False if not utils.isFilled(doc): tdes = dbAccess.describe(table, column) iclmn = utils.name2Index(tdes['header'], 'COLUMN_NAME') rows = tdes['rows'] for h in rows: h[iclmn] = nmap.get(h[iclmn]) main["buffer"]["data"] = tdes else: docrows = [] if utils.isFilled(table): tdoc = dbAccess.pandatabs( table, comments=doc, select= "table_name,'Table Description' as COLUMN_NAME,comments" ) iclmn = utils.name2Index(tdoc['header'], 'COLUMN_NAME') docrows = tdoc['rows'] for h in docrows: h[iclmn] = nmap.get(h[iclmn]) tcol = dbAccess.pandacols( column, table, select='TABLE_NAME,COLUMN_NAME,COMMENTS', comments=doc) iclmn = utils.name2Index(tcol['header'], 'COLUMN_NAME') for h in tcol['rows']: h[iclmn] = nmap.get(h[iclmn]) tcol['rows'] += docrows main["buffer"]["data"] = tcol self.publish({ 's-maxage': 60000, 'max-age': 6000 }, role=pmRoles.cache()) else: self.publishTitle( "Error: 'describe' - unknown Db name '%s' has been provided" % db) self.publish({ 's-maxage': 0, 'max-age': 0 }, role=pmRoles.cache()) else: self.publishTitle( "Error: 'describe' - no parameter has been provided. Define one at least. It should be either 'column' or 'table'" ) self.publish({'s-maxage': 0, 'max-age': 0}, role=pmRoles.cache()) self.publish(main) self.publish( "%s/%s" % (self.server().fileScriptURL(), "taskBuffer/%s.js" % "describe"), role=pmRoles.script())
def doJson( self, query='select distinct jobstatus from ATLAS_PANDA.JOBSARCHIVED4', start='2012-03-01', end='', job=None, fields="NINPUTDATAFILES,INPUTFILEBYTES,NOUTPUTDATAFILES,OUTPUTFILEBYTES", limit=10000, describe=None): """ Place any query with the CERN Oracle Db <br><ul> <li><code>query = "select distinct jobstatus from ATLAS_PANDA.JOBSARCHIVED4"</code><br> <li><code>query = "us" - is to select the US users who were using the panda within start/end time range</code><br> <li><code>query = "releaseinfo" - is to select the release info for all Panda sites</code><br> <li><code>job = "*" is to show the 'fields' for the 10k jobs </code><br> <code> = 'PandaIDs' is to the comma separated list of PAnda Ids to show its 'fields' </code><br> <li><code>fields= the comma separated list of the Db columns to show for the jobs selected with the 'job' parameter 'job' </code><br> <li><code>start = the start date for "us" query (default 2012-02-01) </code><br> <li><code>end = the end for the "us" query (the default is start date + 30 days) <code> </ul> """ releaseinfo = False if describe != None: query = "select distinct column_name,data_type from all_tab_columns where table_name='%s'" % describe.upper( ) if query == "us": if start == '': tstart = datetime.now() - timedelta(days=3 * 30) + 1 else: tstart = datetime.strptime(start, '%Y-%m-%d') starttime = " and MODIFICATIONTIME >= to_date('%s','YYYY-MM-DD') " % tstart.strftime( '%Y-%m-%d') if end == '': tend = tstart + timedelta(days=30) else: tend = datetime.strptime(end, '%Y-%m-%d') table = 'ATLAS_PANDAARCH.JOBSARCHIVED' if tstart < datetime.now( ) - timedelta(days=3 * 30) else 'ATLAS_PANDA.JOBSARCHIVED4' endtime = " and MODIFICATIONTIME < to_date('%s','YYYY-MM-DD') " % tend.strftime( '%Y-%m-%d') query = "select distinct PRODUSERNAME,PRODUSERID from %(table)s where regexp_like(PRODUSERID,'(/C=US/)|(/DC=doegrids/)') and JOBSTATUS='finished' %(start)s %(end)s" % { "table": table, "start": starttime, "end": endtime } elif query == "releaseinfo": query = "select * from ATLAS_PANDAMETA.installedsw order by release desc, siteid, cache" elif job != None: if fields == '' or fields == None: fields = "*" if isinstance(job, str) and job.strip() == "*": where = ' where ROWNUM <= %d ' % limit else: where = " where pandaid in (%s) " % job query = "select pandaid, %s from ATLAS_PANDA.JOBSARCHIVED4 %s " % ( fields, where) self.publishTitle('Hello Oracle Db from pid: %s !!!' % self.server().getpid()) timer = Stopwatch.Stopwatch() self.doMain(query) self.publishNav( 'The Oracle Db table from CERN: "%s". "%s"' % (query, timer)) # I know the cyber security will be mad ;-) VF. self.publish( "%s/%s" % (self.server().fileScriptURL(), "hello/%s.js" % "helloora"), role="script")
def doJson(self, method="getAssigningTask", params='', db='pmt'): """ Invoke the arbitrary method of <a href="https://svnweb.cern.ch/trac/panda/browser/panda-server/current/pandaserver/taskbuffer/TaskBuffer.py">TaskBuffer</a> class <br> <code>method = "getAssigningTask"</code><br> <code>method = "getNumUserSubscriptions"</code><br> <code>method = "getSiteList"</code> ... etc <br> <code>params = the comma separated list of the parameters to be passed to the method defined by 'method' parameter</code> """ self._doc = None self._db = db self.publishTitle('Panda TaskBuffer') timer = Stopwatch.Stopwatch() self.doMain(method, params, db) self.publishNav( 'The TaskBuffer from CERN: "%s:%s". "%s"' % (method, params, timer)) # I know the cyber security will be mad ;-) VF. sc = { "getAssigningTask": "getAssigningTask", "getPledgeResourceRatio": "getPledgeResourceRatio", "getScriptOfflineRunning": "getScriptOfflineRunning", "getFullJobStatus": "getFullJobStatus", "fullJobStatus": "getFullJobStatus", "getUsers": "getUsers", 'getMembers': "tbTable", 'getSiteInfo': "getSiteInfo", 'getSnapshot': "tbTable", 'getCloudSummary': "tbTable", 'getSiteSummary': "tbTable", 'getCloudList': "getCloudList", "getMCShares": "getMCShares", 'getCloudConfig': "tbTable", 'getCloudSites': "tbTable", 'getjobparam': "getjobparam", 'getUserActivity': "tbTable", 'wnList': "tbTable", 'diskUsage': "tbTable", 'debugInfo': "tbTable", 'corruptFiles': "tbTable", 'getVCloudSummary': "tbTable", 'jobParams': "tbTable", 'schedCfg': "tbTable", 'getLastDefinedDataset': "tbTable", 'getTaskMaxLength': "tbTable", 'listTaskRequests': "tbTable", 'getUserSubs': "tbTable", 'pandatabs': "tbTable", 'pandacols': "tbTable", 'countReleases': "tbTable", 'getLogInfo': "tbTable", 'getJobsAtt': "tbTable", 'getErrorCount': "tbTable", 'getJediTaskAtt': "tbTable", 'getJediDSAtt': "tbTable", 'describe': "describe" } f = 'default' for m in sc: if m in method: f = sc[m] break self.publish("%s/%s" % (self.server().fileScriptURL(), "taskBuffer/%s.js" % f), role="script") cache = { "getMCShares": { 's-maxage': 90000, 'max-age': 90000 }, "getJediTaskAtt": { 's-maxage': 900, 'max-age': 900 } } c = cache.get(method) if c: self.publish(c, role=pmRoles.cache())
def doMain(self, app='prun,pathena,ganga', plots='twall,tcpu,twait,trun', year='last', month='last', dir=None, layout=None, width=600, height=400, log=False, options='H', block='pyrootfile'): connectionTime = Stopwatch.Stopwatch() rootout = os.tmpnam() # print ' RedirectOutput=' , gSystem.RedirectOutput(rootout,'a') if dir == None or dir == '': dir = '/home/fine/panda/pandamon/static/root' if app == None or app == '': app = self._app if plots == None or plots == '': plots = self._plots if layout == None: layout = { 'columns': len(app.split(',')), 'rows': len(plots.split(',')) } else: lt = layout.split('x', 1) layout = {'columns': lt[0].strip(), 'rows': lt[1].strip()} lastMonth = False lastYear = False try: if month.lower() == 'last': lastMonth = True except: pass try: if year.lower() == 'last': lastYear = True except: pass if lastMonth or lastYear: dirlist = sorted(os.listdir(dir)) filtered = [] pattern = 'analysis_14_ALL_ALL_(%(year)s)(%(month)s)' % { 'year': '\\d{4}' if lastYear else '%04d' % year, 'month': '\\d{2}' if lastMonth else '%02d' % month } lut = re.compile(pattern) for f in dirlist: if lut.match(f): filtered.append(f) file = filtered[-1] note = file.replace('analysis_14_ALL_ALL_', '').replace('.root', '') self.publishTitle( 'Timing distribution for ATLAS analysis jobs in the %s.%s month' % (note[:4], note[4:])) self.publishNav('The histogram for %s %s %s.%s' % (app, plots, note[:4], note[4:])) else: file = 'analysis_14_ALL_ALL_%(year)04d%(month)02d' % { 'year': year, 'month': month } self.publishTitle( 'Timing distribution for ATLAS analysis jobs in the %(year}04d.%(month)02d onth' % { 'year': year, 'month': month }) self.publishNav('The histogram for %s %s %04d.%02d' % (app, plots, year, month)) if file == None or file == '': file = 'fillrandom.root' name = '' for p in plots.split(','): for a in app.split(','): name += "%(dim)s_%(plot)s_%(app)s," % { 'dim': self._dimension, 'plot': p, 'app': a } timer = Stopwatch.Stopwatch() # # Open ROOT file # if options == None: options = '' if file[-5:] != ".root": file += '.root' main = {} main['header'] = ["Params", "values"] processor = os.path.join( os.path.dirname(utils.lineInfo(False, '%(filename)s')), 'processor') main['info'] = pmt.getPopularity(name, file, dir, options, block, path=processor) # eval(txt) main['width'] = width main['height'] = height main['layout'] = layout if log == True or isinstance(log, str) and (log.lower() == "true" or log.lower() == "yes"): main['log'] = True main['time'] = {} self.publish(main)