def parseInputFiles(inputfilename): ''' output ({run:[cmsls,cmsls,...]},[[resultlines]]) ''' selectedrunlsInDB={} resultlines=[] p=inputFilesetParser.inputFilesetParser(inputfilename) runlsbyfile=p.runsandls() selectedProcessedRuns=p.selectedRunsWithresult() selectedNonProcessedRuns=p.selectedRunsWithoutresult() resultlines=p.resultlines() for runinfile in selectedNonProcessedRuns: selectedrunlsInDB[runinfile]=runlsbyfile[runinfile] return (selectedrunlsInDB,resultlines)
def parseInputFiles(inputfilename): ''' output ({run:[cmsls,cmsls,...]},[[resultlines]]) ''' selectedrunlsInDB={} resultlines=[] p=inputFilesetParser.inputFilesetParser(inputfilename) runlsbyfile=p.runsandls() selectedProcessedRuns=p.selectedRunsWithresult() selectedNonProcessedRuns=p.selectedRunsWithoutresult() resultlines=p.resultlines() for runinfile in selectedNonProcessedRuns: selectedrunlsInDB[runinfile]=runlsbyfile[runinfile] return (selectedrunlsInDB,resultlines)
def parseInputFiles(inputfilename,dbrunlist,optaction): ''' output ({run:[cmsls,cmsls,...]},[[resultlines]]) ''' selectedrunlsInDB={} resultlines=[] p=inputFilesetParser.inputFilesetParser(inputfilename) runlsbyfile=p.runsandls() selectedProcessedRuns=p.selectedRunsWithresult() selectedNonProcessedRuns=p.selectedRunsWithoutresult() resultlines=p.resultlines() for runinfile in selectedNonProcessedRuns: if runinfile not in dbrunlist: continue if optaction=='delivered':#for delivered we care only about selected runs selectedrunlsInDB[runinfile]=None else: selectedrunlsInDB[runinfile]=runlsbyfile[runinfile] return (selectedrunlsInDB,resultlines)
def parseInputFiles(inputfilename): p=inputFilesetParser.inputFilesetParser(inputfilename) runlsbyfile=p.runsandls() return runlsbyfile
def main(): parser = argparse.ArgumentParser(prog=os.path.basename(sys.argv[0]),description = "Lumi Validation",formatter_class=argparse.ArgumentDefaultsHelpFormatter) allowedActions = ['batchupdate','update','dump'] allowedFlags = ['UNKNOWN','GOOD','BAD','SUSPECT'] # parse arguments parser.add_argument('action',choices=allowedActions,help='command actions') parser.add_argument('-c',dest='connect',action='store',required=True,help='connect string to lumiDB') parser.add_argument('-P',dest='authpath',action='store',required=True,help='path to authentication file') parser.add_argument('-i',dest='inputfile',action='store',help='lumi range selection file,required for batchupdate action') parser.add_argument('-o',dest='outputfile',action='store',help='output to csv file') parser.add_argument('-r',dest='runnumber',action='store',type=int,help='run number,optional') parser.add_argument('-runls',dest='runls',action='store',help='selection string,optional. Example [1234:[],4569:[1,1],[2,100]]') parser.add_argument('-flag',dest='flag',action='store',default='UNKNOWN',help='flag string,optional') parser.add_argument('--verbose',dest='verbose',action='store_true',help='verbose mode for printing' ) parser.add_argument('--debug',dest='debug',action='store_true',help='debug') options=parser.parse_args() if options.flag.upper() not in allowedFlags: print 'unrecognised flag ',options.flag.upper() raise os.environ['CORAL_AUTH_PATH'] = options.authpath connectstring=options.connect svc = coral.ConnectionService() msg=coral.MessageStream('') if options.debug: msg.setMsgVerbosity(coral.message_Level_Debug) else: msg.setMsgVerbosity(coral.message_Level_Error) session=svc.connect(connectstring,accessMode=coral.access_Update) session.typeConverter().setCppTypeForSqlType("unsigned int","NUMBER(10)") session.typeConverter().setCppTypeForSqlType("unsigned long long","NUMBER(20)") result={}#parsing result {run:[[ls,status,comment]]} if options.debug : msg=coral.MessageStream('') msg.setMsgVerbosity(coral.message_Level_Debug) if options.action=='batchupdate': #populate from csv file, require -i argument if not options.inputfile: print 'inputfile -i option is required for batchupdate' raise csvReader=csv.reader(open(options.inputfile),delimiter=',') for row in csvReader: if len(row)==0: continue fieldrun=str(row[0]).strip() fieldls=str(row[1]).strip() fieldstatus=row[2] fieldcomment=row[3] if not result.has_key(int(fieldrun)): result[int(fieldrun)]=[] result[int(fieldrun)].append([int(fieldls),fieldstatus,fieldcomment]) insertupdateValidationData(session,result) if options.action=='update': #update flag interactively, require -runls argument #runls={run:[]} populate all CMSLSNUM found in LUMISUMMARY #runls={run:[[1,1],[2,5]],run:[[1,1],[2,5]]} #default value if not options.runls and not options.runnumber: print 'option -runls or -r is required for update' raise if not options.flag: print 'option -flag is required for update' raise if options.flag.upper() not in allowedFlags: print 'unrecognised flag ',options.flag raise if options.runnumber: runlsjson='{"'+str(options.runnumber)+'":[]}' elif options.runls: runlsjson=CommonUtil.tolegalJSON(options.runls) sparser=selectionParser.selectionParser(runlsjson) runsandls=sparser.runsandls() commentStr='NA' statusStr=options.flag for run,lslist in runsandls.items(): if not result.has_key(run): result[run]=[] for ls in lslist: result[run].append([ls,statusStr,commentStr]) insertupdateValidationData(session,result) if options.action=='dump': if options.runls or options.inputfile: if options.runls: runlsjson=CommonUtil.tolegalJSON(options.runls) sparser=selectionParser.selectionParser(runlsjson) runsandls=sparser.runsandls() if options.inputfile: p=inputFilesetParser.inputFilesetParser(options.inputfile) runsandls=p.runsandls() for runnum,lslist in runsandls.items(): dataperrun=getValidationData(session,run=runnum,cmsls=lslist) if dataperrun.has_key(runnum): result[runnum]=dataperrun[runnum] else: result[runnum]=[] else: result=getValidationData(session,run=options.runnumber) runs=result.keys() runs.sort() if options.outputfile: r=csvReporter.csvReporter(options.outputfile) for run in runs: for perrundata in result[run]: r.writeRow([str(run),str(perrundata[0]),perrundata[1],perrundata[2]]) else: for run in runs: print '== =' if len(result[run])==0: print str(run),'no validation data' continue for lsdata in result[run]: print str(run)+','+str(lsdata[0])+','+lsdata[1]+','+lsdata[2] del session del svc
def jsonToIFP(json) : with tempfile.NamedTemporaryFile() as file : print >> file, str(json).replace("'",'"') file.flush() return inputFilesetParser.inputFilesetParser(file.name)
def main(): allowedscales=['linear','log','both'] c=constants() parser = argparse.ArgumentParser(prog=os.path.basename(sys.argv[0]),description="Plot integrated luminosity as function of the time variable of choice",formatter_class=argparse.ArgumentDefaultsHelpFormatter) # add required arguments parser.add_argument('-c',dest='connect',action='store',required=True,help='connect string to lumiDB') # add optional arguments parser.add_argument('-P',dest='authpath',action='store',help='path to authentication file') parser.add_argument('-n',dest='normfactor',action='store',help='normalization factor (optional, default to 1.0)') parser.add_argument('-i',dest='inputfile',action='store',help='lumi range selection file (optional)') parser.add_argument('-o',dest='outputfile',action='store',help='csv outputfile name (optional)') parser.add_argument('-lumiversion',dest='lumiversion',default='0001',action='store',required=False,help='lumi data version') parser.add_argument('-begin',dest='begin',action='store',help='begin value of x-axi (required)') parser.add_argument('-end',dest='end',action='store',help='end value of x-axi (optional). Default to the maximum exists DB') parser.add_argument('-beamenergy',dest='beamenergy',action='store',type=float,required=False,help='beamenergy (in GeV) selection criteria,e.g. 3.5e3') parser.add_argument('-beamfluctuation',dest='beamfluctuation',action='store',type=float,required=False,help='allowed fraction of beamenergy to fluctuate, e.g. 0.1') parser.add_argument('-beamstatus',dest='beamstatus',action='store',required=False,help='selection criteria beam status,e.g. STABLE BEAMS') parser.add_argument('-yscale',dest='yscale',action='store',required=False,default='linear',help='y_scale') parser.add_argument('-hltpath',dest='hltpath',action='store',help='specific hltpath to calculate the recorded luminosity. If specified aoverlays the recorded luminosity for the hltpath on the plot') parser.add_argument('-batch',dest='batch',action='store',help='graphical mode to produce PNG file. Specify graphical file here. Default to lumiSum.png') parser.add_argument('--annotateboundary',dest='annotateboundary',action='store_true',help='annotate boundary run numbers') parser.add_argument('--interactive',dest='interactive',action='store_true',help='graphical mode to draw plot in a TK pannel.') parser.add_argument('-timeformat',dest='timeformat',action='store',help='specific python timeformat string (optional). Default mm/dd/yy hh:min:ss.00') parser.add_argument('-siteconfpath',dest='siteconfpath',action='store',help='specific path to site-local-config.xml file, default to $CMS_PATH/SITECONF/local/JobConfig, if path undefined, fallback to cern proxy&server') parser.add_argument('action',choices=['run','fill','time','perday'],help='x-axis data type of choice') #graphical mode options parser.add_argument('--with-correction',dest='withFineCorrection',action='store_true',help='with fine correction') parser.add_argument('--verbose',dest='verbose',action='store_true',help='verbose mode, print result also to screen') parser.add_argument('--debug',dest='debug',action='store_true',help='debug') # parse arguments batchmode=True args=parser.parse_args() connectstring=args.connect begvalue=args.begin endvalue=args.end beamstatus=args.beamstatus beamenergy=args.beamenergy beamfluctuation=args.beamfluctuation xaxitype='run' connectparser=connectstrParser.connectstrParser(connectstring) connectparser.parse() usedefaultfrontierconfig=False cacheconfigpath='' if connectparser.needsitelocalinfo(): if not args.siteconfpath: cacheconfigpath=os.environ['CMS_PATH'] if cacheconfigpath: cacheconfigpath=os.path.join(cacheconfigpath,'SITECONF','local','JobConfig','site-local-config.xml') else: usedefaultfrontierconfig=True else: cacheconfigpath=args.siteconfpath cacheconfigpath=os.path.join(cacheconfigpath,'site-local-config.xml') p=cacheconfigParser.cacheconfigParser() if usedefaultfrontierconfig: p.parseString(c.defaultfrontierConfigString) else: p.parse(cacheconfigpath) connectstring=connectparser.fullfrontierStr(connectparser.schemaname(),p.parameterdict()) #print 'connectstring',connectstring runnumber=0 svc = coral.ConnectionService() hltpath='' if args.hltpath: hltpath=args.hltpath if args.debug : msg=coral.MessageStream('') msg.setMsgVerbosity(coral.message_Level_Debug) ifilename='' ofilename='integratedlumi.png' timeformat='' if args.authpath and len(args.authpath)!=0: os.environ['CORAL_AUTH_PATH']=args.authpath if args.normfactor: c.NORM=float(args.normfactor) if args.lumiversion: c.LUMIVERSION=args.lumiversion if args.verbose: c.VERBOSE=True if args.inputfile: ifilename=args.inputfile if args.batch: opicname=args.batch if args.outputfile: ofilename=args.outputfile if args.timeformat: timeformat=args.timeformat session=svc.connect(connectstring,accessMode=coral.access_Update) session.typeConverter().setCppTypeForSqlType("unsigned int","NUMBER(10)") session.typeConverter().setCppTypeForSqlType("unsigned long long","NUMBER(20)") inputfilecontent='' fileparsingResult='' runList=[] runDict={} fillDict={} selectionDict={} minTime='' maxTime='' startRunTime='' stopRunTime='' #if len(ifilename)!=0 : # f=open(ifilename,'r') # inputfilecontent=f.read() # sparser=selectionParser.selectionParser(inputfilecontent) # runsandls=sparser.runsandls() # keylist=runsandls.keys() # keylist.sort() # for run in keylist: # if selectionDict.has_key(run): # lslist=runsandls[run] # lslist.sort() # selectionDict[run]=lslist if len(ifilename)!=0: ifparser=inputFilesetParser.inputFilesetParser(ifilename) runsandls=ifparser.runsandls() keylist=runsandls.keys() keylist.sort() for run in keylist: if not selectionDict.has_key(run): lslist=runsandls[run] lslist.sort() selectionDict[run]=lslist if args.action == 'run': if not args.end: session.transaction().start(True) schema=session.nominalSchema() lastrun=max(lumiQueryAPI.allruns(schema,requireRunsummary=True,requireLumisummary=True,requireTrg=True,requireHlt=False)) session.transaction().commit() else: lastrun=int(args.end) for r in range(int(args.begin),lastrun+1): runList.append(r) runList.sort() elif args.action == 'fill': session.transaction().start(True) maxfill=None if not args.end: qHandle=session.nominalSchema().newQuery() maxfill=max(lumiQueryAPI.allfills(qHandle,filtercrazy=True)) del qHandle else: maxfill=int(args.end) qHandle=session.nominalSchema().newQuery() fillDict=lumiQueryAPI.runsByfillrange(qHandle,int(args.begin),maxfill) del qHandle session.transaction().commit() #print 'fillDict ',fillDict for fill in range(int(args.begin),maxfill+1): if fillDict.has_key(fill): #fill exists for run in fillDict[fill]: runList.append(run) runList.sort() elif args.action == 'time' or args.action == 'perday': session.transaction().start(True) t=lumiTime.lumiTime() minTime=t.StrToDatetime(args.begin,timeformat) if not args.end: maxTime=datetime.datetime.utcnow() #to now else: maxTime=t.StrToDatetime(args.end,timeformat) #print minTime,maxTime qHandle=session.nominalSchema().newQuery() runDict=lumiQueryAPI.runsByTimerange(qHandle,minTime,maxTime)#xrawdata session.transaction().commit() runList=runDict.keys() del qHandle runList.sort() if len(runList)!=0: runmin=min(runList) runmax=max(runList) startRunTime=runDict[runmin][0] stopRunTime=runDict[runmax][1] #print 'run list: ',runDict else: print 'unsupported action ',args.action exit finecorrections=None if args.withFineCorrection: schema=session.nominalSchema() session.transaction().start(True) finecorrections=lumiCorrections.correctionsForRange(schema,runList) session.transaction().commit() #print 'runList ',runList #print 'runDict ', runDict fig=Figure(figsize=(7.2,5.4),dpi=120) m=matplotRender.matplotRender(fig) logfig=Figure(figsize=(7.5,5.7),dpi=135) mlog=matplotRender.matplotRender(logfig) if args.action == 'run': result={} result=getLumiInfoForRuns(session,c,runList,selectionDict,hltpath,beamstatus=beamstatus,beamenergy=beamenergy,beamfluctuation=beamfluctuation,finecorrections=finecorrections) xdata=[] ydata={} ydata['Delivered']=[] ydata['Recorded']=[] keylist=result.keys() keylist.sort() #must be sorted in order if args.outputfile: reporter=csvReporter.csvReporter(ofilename) fieldnames=['run','delivered','recorded'] reporter.writeRow(fieldnames) for run in keylist: xdata.append(run) delivered=result[run][0] recorded=result[run][1] ydata['Delivered'].append(delivered) ydata['Recorded'].append(recorded) if args.outputfile and (delivered!=0 or recorded!=0): reporter.writeRow([run,result[run][0],result[run][1]]) m.plotSumX_Run(xdata,ydata,yscale='linear') mlog.plotSumX_Run(xdata,ydata,yscale='log') elif args.action == 'fill': lumiDict={} lumiDict=getLumiInfoForRuns(session,c,runList,selectionDict,hltpath,beamstatus=beamstatus,beamenergy=beamenergy,beamfluctuation=beamfluctuation,finecorrections=finecorrections) xdata=[] ydata={} ydata['Delivered']=[] ydata['Recorded']=[] #keylist=lumiDict.keys() #keylist.sort() if args.outputfile: reporter=csvReporter.csvReporter(ofilename) fieldnames=['fill','run','delivered','recorded'] reporter.writeRow(fieldnames) fills=fillDict.keys() fills.sort() for fill in fills: runs=fillDict[fill] runs.sort() for run in runs: xdata.append(run) ydata['Delivered'].append(lumiDict[run][0]) ydata['Recorded'].append(lumiDict[run][1]) if args.outputfile : reporter.writeRow([fill,run,lumiDict[run][0],lumiDict[run][1]]) #print 'input fillDict ',len(fillDict.keys()),fillDict m.plotSumX_Fill(xdata,ydata,fillDict,yscale='linear') mlog.plotSumX_Fill(xdata,ydata,fillDict,yscale='log') elif args.action == 'time' : lumiDict={} lumiDict=getLumiInfoForRuns(session,c,runList,selectionDict,hltpath,beamstatus=beamstatus,beamenergy=beamenergy,beamfluctuation=beamfluctuation,finecorrections=finecorrections) #lumiDict=getLumiInfoForRuns(session,c,runList,selectionDict,hltpath,beamstatus='STABLE BEAMS') xdata={}#{run:[starttime,stoptime]} ydata={} ydata['Delivered']=[] ydata['Recorded']=[] keylist=lumiDict.keys() keylist.sort() if args.outputfile: reporter=csvReporter.csvReporter(ofilename) fieldnames=['run','starttime','stoptime','delivered','recorded'] reporter.writeRow(fieldnames) for run in keylist: ydata['Delivered'].append(lumiDict[run][0]) ydata['Recorded'].append(lumiDict[run][1]) starttime=runDict[run][0] stoptime=runDict[run][1] xdata[run]=[starttime,stoptime] if args.outputfile : reporter.writeRow([run,starttime,stoptime,lumiDict[run][0],lumiDict[run][1]]) m.plotSumX_Time(xdata,ydata,startRunTime,stopRunTime,hltpath=hltpath,annotateBoundaryRunnum=args.annotateboundary,yscale='linear') mlog.plotSumX_Time(xdata,ydata,startRunTime,stopRunTime,hltpath=hltpath,annotateBoundaryRunnum=args.annotateboundary,yscale='log') elif args.action == 'perday': daydict={}#{day:[[run,cmslsnum,lsstarttime,delivered,recorded]]} lumibyls=getLumiOrderByLS(session,c,runList,selectionDict,hltpath,beamstatus=beamstatus,beamenergy=beamenergy,beamfluctuation=beamfluctuation,finecorrections=finecorrections) #lumibyls [[runnumber,runstarttime,lsnum,lsstarttime,delivered,recorded,recordedinpath]] if args.outputfile: reporter=csvReporter.csvReporter(ofilename) fieldnames=['day','begrunls','endrunls','delivered','recorded'] reporter.writeRow(fieldnames) beginfo=[lumibyls[0][3],str(lumibyls[0][0])+':'+str(lumibyls[0][2])] endinfo=[lumibyls[-1][3],str(lumibyls[-1][0])+':'+str(lumibyls[-1][2])] for perlsdata in lumibyls: lsstarttime=perlsdata[3] delivered=perlsdata[4] recorded=perlsdata[5] day=lsstarttime.toordinal() if not daydict.has_key(day): daydict[day]=[] daydict[day].append([delivered,recorded]) days=daydict.keys() days.sort() daymin=days[0] daymax=days[-1] #alldays=range(daymin,daymax+1) resultbyday={} resultbyday['Delivered']=[] resultbyday['Recorded']=[] #for day in days: #print 'day min ',daymin #print 'day max ',daymax for day in range(daymin,daymax+1): if not daydict.has_key(day): delivered=0.0 recorded=0.0 else: daydata=daydict[day] mytransposed=CommonUtil.transposed(daydata,defaultval=0.0) delivered=sum(mytransposed[0]) recorded=sum(mytransposed[1]) resultbyday['Delivered'].append(delivered) resultbyday['Recorded'].append(recorded) if args.outputfile: reporter.writeRow([day,beginfo[1],endinfo[1],delivered,recorded]) #print 'beginfo ',beginfo #print 'endinfo ',endinfo #print resultbyday m.plotPerdayX_Time( range(daymin,daymax+1) ,resultbyday,startRunTime,stopRunTime,boundaryInfo=[beginfo,endinfo],annotateBoundaryRunnum=args.annotateboundary,yscale='linear') mlog.plotPerdayX_Time( range(daymin,daymax+1),resultbyday,startRunTime,stopRunTime,boundaryInfo=[beginfo,endinfo],annotateBoundaryRunnum=args.annotateboundary,yscale='log') else: raise Exception,'must specify the type of x-axi' del session del svc if args.batch and args.yscale=='linear': m.drawPNG(args.batch) elif args.batch and args.yscale=='log': mlog.drawPNG(args.batch) elif args.batch and args.yscale=='both': m.drawPNG(args.batch) basename,extension=os.path.splitext(args.batch) logfilename=basename+'_log'+extension mlog.drawPNG(logfilename) else: if not args.interactive: return if args.interactive is True and args.yscale=='linear': m.drawInteractive() elif args.interactive is True and args.yscale=='log': mlog.drawInteractive() else: raise Exception('unsupported yscale for interactive mode : '+args.yscale)