def __init__(self,inputfilename): filelist=inputfilename.split('+') self.__inputresultfiles=filelist[0:-1] self.__inputselectionfile=filelist[-1] self.__inputResultHeader=[] self.__inputResult=[] self.__inputSelectionFileparsingResult=None if len(self.__inputselectionfile)!=0: basename,extension=os.path.splitext(self.__inputselectionfile) if extension=='.csv':#if file ends with .csv,use csv parser,else parse as json file self.__inputSelectionFileparsingResult=csvSelectionParser.csvSelectionParser(self.__inputselectionfile) else: selectf=open(self.__inputselectionfile,'r') inputfilecontent=selectf.read() self.__inputSelectionFileparsingResult=selectionParser.selectionParser(inputfilecontent) if len(self.__inputresultfiles)!=0: header='' for f in self.__inputresultfiles: ifile=open(f) hasHeader=filehasHeader(ifile) #hasHeader=csv.Sniffer().has_header(ifile.read(1024)) #sniffer doesn't work well , replace with custom ifile.seek(0) csvReader=csv.reader(ifile,delimiter=',') irow=0 for row in csvReader: if hasHeader and irow==0: self.__inputResultHeader=row else: self.__inputResult.append(row) irow=irow+1 ifile.close()
def __init__(self, inputfilename): filelist = inputfilename.split('+') self.__inputresultfiles = filelist[0:-1] self.__inputselectionfile = filelist[-1] self.__inputResultHeader = [] self.__inputResult = [] self.__inputSelectionFileparsingResult = None if len(self.__inputselectionfile) != 0: basename, extension = os.path.splitext(self.__inputselectionfile) if extension == '.csv': #if file ends with .csv,use csv parser,else parse as json file self.__inputSelectionFileparsingResult = csvSelectionParser.csvSelectionParser( self.__inputselectionfile) else: selectf = open(self.__inputselectionfile, 'r') inputfilecontent = selectf.read() self.__inputSelectionFileparsingResult = selectionParser.selectionParser( inputfilecontent) if len(self.__inputresultfiles) != 0: header = '' for f in self.__inputresultfiles: ifile = open(f) hasHeader = filehasHeader(ifile) #hasHeader=csv.Sniffer().has_header(ifile.read(1024)) #sniffer doesn't work well , replace with custom ifile.seek(0) csvReader = csv.reader(ifile, delimiter=',') irow = 0 for row in csvReader: if hasHeader and irow == 0: self.__inputResultHeader = row else: self.__inputResult.append(row) irow = irow + 1 ifile.close()
def main(): c=constants() parser = argparse.ArgumentParser(prog=os.path.basename(sys.argv[0]),description="Patch LumiData") parser.add_argument('-c',dest='destination',action='store',required=True,help='destination lumi db (required)') parser.add_argument('-s',dest='source',action='store',required=False,help='source db (required except for lumicalib)') parser.add_argument('-P',dest='authpath',action='store',required=True,help='path to authentication file (required)') parser.add_argument('-r',dest='runnumber',action='store',required=False,help='run number (optional)') parser.add_argument('-i',dest='inputfile',action='store',required=False,help='run selection file(optional)') parser.add_argument('-delta',dest='delta',action='store',required=False,help='calibration factor wrt old data in lumiDB (required for lumicalib)') parser.add_argument('action',choices=['deadtimeGT','deadtimeWBM','lumicalib','runtimestamp'],help='deadtimeGT: patch deadtime to deadtimebeamactive,\ndeadtimeWBM: patch deadtimeWBM to deadtimebeamactive,\nlumicalib: recalibrate inst lumi by delta where delta>1\n runtimestamp: add start,stop run timestamp where empty') parser.add_argument('--dryrun',dest='dryrun',action='store_true',help='only print datasource query result, do not update destination') parser.add_argument('--debug',dest='debug',action='store_true',help='debug') args=parser.parse_args() runnumber=args.runnumber destConnect=args.destination sourceConnect=args.source if args.authpath and len(args.authpath)!=0: os.environ['CORAL_AUTH_PATH']=args.authpath svc=coral.ConnectionService() sourcesession=None if sourceConnect: sourcesession=svc.connect(sourceConnect,accessMode=coral.access_ReadOnly) sourcesession.typeConverter().setCppTypeForSqlType("unsigned int","NUMBER(10)") sourcesession.typeConverter().setCppTypeForSqlType("unsigned long long","NUMBER(20)") destsession=svc.connect(destConnect,accessMode=coral.access_Update) destsession.typeConverter().setCppTypeForSqlType("unsigned int","NUMBER(10)") destsession.typeConverter().setCppTypeForSqlType("unsigned long long","NUMBER(20)") if args.debug: msg=coral.MessageStream('') msg.setMsgVerbosity(coral.message_Level_Debug) if args.dryrun: c.isdryrun=True else: c.isdryrun=False deadresult={} if args.action == 'deadtimeGT': if not sourceConnect: raise Exception('deadtimeGT action requies -s option for source connection string') deadresult=GTdeadtimeBeamActiveForRun(sourcesession,c,runnumber) print 'reading from ',sourceConnect print 'run : ',runnumber print 'LS:deadtimebeamactive' #print deadresult if deadresult and len(deadresult)!=0: for cmsls,deadtimebeamactive in deadresult.items(): print cmsls,deadtimebeamactive else: print 'no deadtime found for run ',runnumber print 'exit' return print 'total LS: ',len(deadresult) # if len(deadresult)!=max( [ (deadresult[x],x) for x in deadresult] )[1]: if len(deadresult)!=max( [ x for x in deadresult.keys() ] ): print 'total ls: ',len(deadresult) #print 'max key: ',max( [ x for x in deadresult.keys()]) print 'alert: missing Lumi Sections in the middle' for x in range(1,max( [ x for x in deadresult.keys()] ) ): if x not in deadresult: print 'filling up LS deadtime with 0: LS : ',x deadresult[x]=0 #print deadresult if not args.dryrun: print 'updating ',destConnect nupdated=patchDeadtimeForRun(destsession,c,int(runnumber),deadresult) print 'number of updated rows ',nupdated elif args.action == 'deadtimeWBM': if not sourceConnect: raise Exception('deadtimeWBM action requies -s option for source connection string') deadresult=WBMdeadtimeBeamActiveForRun(sourcesession,c,runnumber) print 'reading from ',sourceConnect print 'run : ',runnumber print 'LS:deadtimebeamactive' #print deadresult if deadresult and len(deadresult)!=0: for cmsls,deadtimebeamactive in deadresult.items(): print cmsls,deadtimebeamactive else: print 'no deadtime found for run ',runnumber print 'exit' return print 'total LS: ',len(deadresult) if len(deadresult)!=max( [ (deadresult[x],x) for x in deadresult])[1]: print 'alert: missing Lumi Sections in the middle' for x in range(1,max( [ (deadresult[x],x) for x in deadresult])[1]): if x not in deadresult: print 'filling up LS deadtime with 0: LS : ',x deadresult[x]=0 print deadresult if not args.dryrun: print 'updating ',destConnect nupdated=patchDeadtimeForRun(destsession,c,int(runnumber),deadresult) print 'number of updated rows ',nupdated elif args.action == 'lumicalib': if not args.delta or args.delta==0: raise Exception('Must provide non-zero -delta argument') runnums=[] if args.runnumber: runnums.append(args.runnumber) elif args.inputfile: basename,extension=os.path.splitext(args.inputfile) if extension=='.csv':#if file ends with .csv,use csv parser,else parse as json file fileparsingResult=csvSelectionParser.csvSelectionParser(args.inputfile) else: f=open(args.inputfile,'r') inputfilecontent=f.read() fileparsingResult=selectionParser.selectionParser(inputfilecontent) if not fileparsingResult: raise Exception('failed to parse the input file '+ifilename) #print fileparsingResult.runsandls() runnums=fileparsingResult.runs() #print runnums else: raise Exception('Must provide -r or -i argument as input') nupdated=recalibrateLumiForRun(destsession,c,args.delta,runnums) elif args.action == 'runtimestamp': if not sourceConnect: raise Exception('runtimestamp action requies -s option for source connection string') if not args.runnumber and not args.inputfile: #if no runnumber nor input file specified, check all runnums=missingTimeRuns(destsession,c) print 'these runs miss start/stop time: ',runnums print 'total : ',len(runnums) elif args.runnumber: runnums=[int(args.runnumber)] elif args.inputfile: basename,extension=os.path.splitext(args.inputfile) if extension=='.csv':#if file ends with .csv,use csv parser,else parse as json file fileparsingResult=csvSelectionParser.csvSelectionParser(args.inputfile) else: f=open(args.inputfile,'r') inputfilecontent=f.read() fileparsingResult=selectionParser.selectionParser(inputfilecontent) if not fileparsingResult: raise Exception('failed to parse the input file '+ifilename) runnums=fileparsingResult.runs() result=getTimeForRun(sourcesession,c,runnums) #for run,(startTimeT,stopTimeT) in result.items(): #print 'run: ',run #if not startTimeT or not stopTimeT: #print 'None' #else: #print 'start: ',startTimeT #print 'stop: ',stopTimeT addTimeForRun(destsession,c,result) if sourcesession: del sourcesession del destsession del svc
print '\tinput selection file: ',options.inputfile print '\tMinBiasXsec: ',options.minBiasXsec print '\tmaxPileupBin: ',options.maxPileupBin print '\tnumPileupBins: ',options.numPileupBins import ROOT pileupHist = ROOT.TH1D (options.pileupHistName,options.pileupHistName, options.numPileupBins, 0., options.maxPileupBin) nbins = options.numPileupBins upper = options.maxPileupBin inpf = open (options.inputfile, 'r') inputfilecontent = inpf.read() inputRange = selectionParser.selectionParser (inputfilecontent).runsandls() #inputRange=inputFilesetParser.inputFilesetParser(options.inputfile) if options.calcMode in ['true','observed']: inputPileupRange=parseInputFile(options.inputLumiJSON) # now, we have to find the information for the input runs and LumiSections # in the Lumi/Pileup list. First, loop over inputs for (run, lslist) in sorted (inputRange.iteritems() ): # now, look for matching run, then match lumi sections # print "searching for run %d" % (run) if run in inputPileupRange.keys(): # print run LSPUlist = inputPileupRange[run]
print '\tinput selection file: ',options.inputfile print '\tMinBiasXsec: ',options.minBiasXsec print '\tmaxPileupBin: ',options.maxPileupBin print '\tnumPileupBins: ',options.numPileupBins import ROOT pileupHist = ROOT.TH1D (options.pileupHistName,options.pileupHistName, options.numPileupBins, 0., options.maxPileupBin) nbins = options.numPileupBins upper = options.maxPileupBin inpf = open (options.inputfile, 'r') inputfilecontent = inpf.read() inputRange = selectionParser.selectionParser (inputfilecontent).runsandls() #inputRange=inputFilesetParser.inputFilesetParser(options.inputfile) if options.calcMode in ['true','observed']: inputPileupRange=parseInputFile(options.inputLumiJSON) # now, we have to find the information for the input runs and LumiSections # in the Lumi/Pileup list. First, loop over inputs for (run, lslist) in sorted (six.iteritems(inputRange)) ): # now, look for matching run, then match lumi sections # print "searching for run %d" % (run) if run in inputPileupRange.keys(): #print run LSPUlist = inputPileupRange[run]
histFile.Close() # pprint (csvDict) sys.exit() ## Get input source if options.runnumber: inputRange = options.runnumber else: basename, extension = os.path.splitext(options.inputfile) if extension == ".csv": # if file ends with .csv, use csv # parser, else parse as json file fileparsingResult = csvSelectionParser.csvSelectionParser(options.inputfile) else: f = open(options.inputfile, "r") inputfilecontent = f.read() inputRange = selectionParser.selectionParser(inputfilecontent) if not inputRange: print "failed to parse the input file", options.inputfile raise recordedData = LumiQueryAPI.recordedLumiForRange(session, parameters, inputRange) ## pprint (recordedData) for runDTarray in recordedData: runNumber = runDTarray[0] deadTable = runDTarray[2] if options.saveRuns: hist = fillPileupHistogram(deadTable, parameters, runNumber=runNumber, debug=options.debugLumi) pileupHist.Add(hist) histList.append(hist) else: fillPileupHistogram(deadTable, parameters, hist=pileupHist, debug=options.debugLumi)
#pprint (csvDict) sys.exit() ## Get input source if options.runnumber: inputRange = options.runnumber else: basename, extension = os.path.splitext (options.inputfile) if extension == '.csv': # if file ends with .csv, use csv # parser, else parse as json file fileparsingResult = csvSelectionParser.csvSelectionParser (options.inputfile) else: f = open (options.inputfile, 'r') inputfilecontent = f.read() inputRange = selectionParser.selectionParser (inputfilecontent) if not inputRange: print('failed to parse the input file', options.inputfile) raise recordedData = LumiQueryAPI.recordedLumiForRange (session, parameters, inputRange) ## pprint (recordedData) for runDTarray in recordedData: runNumber = runDTarray[0] deadTable = runDTarray[2] if options.saveRuns: hist = fillPileupHistogram (deadTable, parameters, runNumber = runNumber, debug = options.debugLumi) pileupHist.Add (hist) histList.append (hist)
def main(): parser = argparse.ArgumentParser(prog=os.path.basename(sys.argv[0]),description = "Lumi Validation",formatter_class=argparse.ArgumentDefaultsHelpFormatter) allowedActions = ['batchupdate','update','dump'] allowedFlags = ['UNKNOWN','GOOD','BAD','SUSPECT'] # parse arguments parser.add_argument('action',choices=allowedActions,help='command actions') parser.add_argument('-c',dest='connect',action='store',required=True,help='connect string to lumiDB') parser.add_argument('-P',dest='authpath',action='store',required=True,help='path to authentication file') parser.add_argument('-i',dest='inputfile',action='store',help='lumi range selection file,required for batchupdate action') parser.add_argument('-o',dest='outputfile',action='store',help='output to csv file') parser.add_argument('-r',dest='runnumber',action='store',type=int,help='run number,optional') parser.add_argument('-runls',dest='runls',action='store',help='selection string,optional. Example [1234:[],4569:[1,1],[2,100]]') parser.add_argument('-flag',dest='flag',action='store',default='UNKNOWN',help='flag string,optional') parser.add_argument('--verbose',dest='verbose',action='store_true',help='verbose mode for printing' ) parser.add_argument('--debug',dest='debug',action='store_true',help='debug') options=parser.parse_args() if options.flag.upper() not in allowedFlags: print 'unrecognised flag ',options.flag.upper() raise os.environ['CORAL_AUTH_PATH'] = options.authpath connectstring=options.connect svc = coral.ConnectionService() msg=coral.MessageStream('') if options.debug: msg.setMsgVerbosity(coral.message_Level_Debug) else: msg.setMsgVerbosity(coral.message_Level_Error) session=svc.connect(connectstring,accessMode=coral.access_Update) session.typeConverter().setCppTypeForSqlType("unsigned int","NUMBER(10)") session.typeConverter().setCppTypeForSqlType("unsigned long long","NUMBER(20)") result={}#parsing result {run:[[ls,status,comment]]} if options.debug : msg=coral.MessageStream('') msg.setMsgVerbosity(coral.message_Level_Debug) if options.action=='batchupdate': #populate from csv file, require -i argument if not options.inputfile: print 'inputfile -i option is required for batchupdate' raise csvReader=csv.reader(open(options.inputfile),delimiter=',') for row in csvReader: if len(row)==0: continue fieldrun=str(row[0]).strip() fieldls=str(row[1]).strip() fieldstatus=row[2] fieldcomment=row[3] if not result.has_key(int(fieldrun)): result[int(fieldrun)]=[] result[int(fieldrun)].append([int(fieldls),fieldstatus,fieldcomment]) insertupdateValidationData(session,result) if options.action=='update': #update flag interactively, require -runls argument #runls={run:[]} populate all CMSLSNUM found in LUMISUMMARY #runls={run:[[1,1],[2,5]],run:[[1,1],[2,5]]} #default value if not options.runls and not options.runnumber: print 'option -runls or -r is required for update' raise if not options.flag: print 'option -flag is required for update' raise if options.flag.upper() not in allowedFlags: print 'unrecognised flag ',options.flag raise if options.runnumber: runlsjson='{"'+str(options.runnumber)+'":[]}' elif options.runls: runlsjson=CommonUtil.tolegalJSON(options.runls) sparser=selectionParser.selectionParser(runlsjson) runsandls=sparser.runsandls() commentStr='NA' statusStr=options.flag for run,lslist in runsandls.items(): if not result.has_key(run): result[run]=[] for ls in lslist: result[run].append([ls,statusStr,commentStr]) insertupdateValidationData(session,result) if options.action=='dump': if options.runls or options.inputfile: if options.runls: runlsjson=CommonUtil.tolegalJSON(options.runls) sparser=selectionParser.selectionParser(runlsjson) runsandls=sparser.runsandls() if options.inputfile: p=inputFilesetParser.inputFilesetParser(options.inputfile) runsandls=p.runsandls() for runnum,lslist in runsandls.items(): dataperrun=getValidationData(session,run=runnum,cmsls=lslist) if dataperrun.has_key(runnum): result[runnum]=dataperrun[runnum] else: result[runnum]=[] else: result=getValidationData(session,run=options.runnumber) runs=result.keys() runs.sort() if options.outputfile: r=csvReporter.csvReporter(options.outputfile) for run in runs: for perrundata in result[run]: r.writeRow([str(run),str(perrundata[0]),perrundata[1],perrundata[2]]) else: for run in runs: print '== =' if len(result[run])==0: print str(run),'no validation data' continue for lsdata in result[run]: print str(run)+','+str(lsdata[0])+','+lsdata[1]+','+lsdata[2] del session del svc
ctr += 1 nextString = pathString[beginString:-1].find(",") endString = beginString + nextString if endString < beginString: endString = len(pathString) newPath = pathString[beginString:endString] beginString = endString + 1 paths.append(newPath) if options.json != "None": jfile = open(options.json, "r") filteredRunList = [] goodLS = "" parsingResult = "" goodLS = jfile.read() parsingResult = selectionParser.selectionParser(goodLS) if not parsingResult: print "Failed to parse the input JSON file", ifilename raise goodRuns = parsingResult.runs() for run in runs: key = runKeys[run] for anotherRun in goodRuns: if int(run) == anotherRun: filteredRunList.append(run) runs = filteredRunList if options.perKey: runsPerKey = {} for run in runs: key = runKeys[run]