def __toStd(self): self._stdDir = {} ##Translate string and list to std::string and std::vector for key, value in self.iteritems(): if type(value) == str: self._stdDir[key] = string(value) if type(value) == int: self._stdDir[key] = ctypes.c_int(value) elif type(value) == list: if type(value[0]) == str: self._stdDir[key] = vector("string")() elif type(value[0]) == float: self._stdDir[key] = vector("float")() elif type(value[0]) == int: self._stdDir[key] = vector("int")() for v in value: self._stdDir[key].push_back(v) else: pass
def GetSignalSignificanceFiveSigma(doSignificanceDir,fileList,modelName): if(len(fileList) == 0): return -666 poiSigDict = {} for fileName in fileList: if modelName in fileName and fileEnd in fileName: if 'asymptotic' in fileName: if 'N' in fileName and not 'N' in modelName: continue if 'f5' in fileName and not 'f5' in modelName: continue medianSignificance = GetExpDiscSignif(doSignificanceDir+fileName) #print 'modelName=',modelName #print 'fileName=',fileName #print 'poi=',fileName[fileName.find(modelName)+len(modelName)+1:fileName.rfind("asymptotic")-1] poi = float(fileName[fileName.find(modelName)+len(modelName)+1:fileName.rfind("asymptotic")-1]) if(medianSignificance > 0): poiSigDict[poi] = medianSignificance medianSignifs = vector("double")() poiValues = vector("double")() for poi,sig in sorted(poiSigDict.items()): poiValues.push_back(poi) medianSignifs.push_back(sig) if(len(medianSignifs) < 3): print 'ERROR: Not enough (nonzero) points to do interpolation!' return -666 interp = Math.Interpolator(medianSignifs,poiValues) return interp.Eval(5)
def getLumisToRun(JSON): if JSON == "": return vector('edm::LuminosityBlockRange')() vlumis = vector('edm::LuminosityBlockRange')() myList = LumiList.LumiList(filename="/".join( ["utils/cert_data", JSONfile])).getCMSSWString().split(',') lumisToProcess = cms.untracked.VLuminosityBlockRange(myList) for BlockRange in lumisToProcess: Block = BlockRange.split('-') startRun = int(Block[0].split(':')[0]) startLumiBlock = int(Block[0].split(':')[1]) if len(Block) > 1: endRun = int(Block[1].split(':')[0]) endLumiBlock = int(Block[1].split(':')[1]) else: endRun = startRun endLumiBlock = endLumiBlock vlumis.push_back( edm.LuminosityBlockRange( edm.LuminosityBlockID(startRun, startLumiBlock), edm.LuminosityBlockID(endRun, endLumiBlock))) return vlumis
def GetPDFPairInfo(tree): xfx_pair_dict = {} for pdf_name in PDF_NAMES: print pdf_name xfx_pair_dict[pdf_name] = PDFPairStruct(vector('double')(), vector('double')()) tree.SetBranchAddress('xfx_first_'+pdf_name, xfx_pair_dict[pdf_name][0]) tree.SetBranchAddress('xfx_second_'+pdf_name, xfx_pair_dict[pdf_name][1]) return xfx_pair_dict
def toolTip(self): tip = "Hits: \t\t " + str(self._params.N_Hits) + "\n" tip += "Start: \t\t (" + "{:.2f}".format(self._params.start_point.w) + ", " tip += "{:.2f}".format(self._params.start_point.t) + ")\n" tip += ("Shower Start \t (" + "{:.2f}".format(self._params.showering_point.w) + ", ") tip += "{:.2f}".format(self._params.showering_point.t) + ")\n" tip += "End: \t\t (" + "{:.2f}".format(self._params.end_point.w) + ", " tip += "{:.2f}".format(self._params.end_point.t) + ")\n" if self._params.principal_dir[0] != 0: slope = self._params.principal_dir[1]/self._params.principal_dir[0] tip += ("Slope: \t\t " + "{:.2f}".format(slope) + "\n") else: tip += "Slope: \t\t inf\n" if self._params.start_dir[0] != 0: tip += "Start Slope: \t " + \ "{:.2f}".format( self._params.start_dir[1]/self._params.start_dir[0]) + "\n" else: tip += "Start Slope: \t inf\n" tip += "Angle: \t\t " + "{:.2f}".format(self._params.angle_2d) + "\n" tip += "\n" fannVec = vector(float)() self._params.GetFANNVector(fannVec) fannTitle = self._params.GetFANNVectorTitle() for title, value in zip(fannTitle,fannVec): tip += "{:.2f}: ".format(value) + "{title}\n".format(title=title) tip += "\nAdd more in python/datatypes/cluster.py:clusterParams:toolTip!" return tip
def copyObjectParameters( obj, copyObj ): ''' Copies the parameters of an LCObject to another LCObject''' parameters = obj.getParameters() copyParameters = copyObj.parameters() keyVec = vector('string')() for intKey in parameters.getIntKeys( keyVec ): intVec = vector('int')() copyParameters.setValues( intKey, parameters.getIntVals( intKey, intVec ) ) keyVec.clear() for floatKey in parameters.getFloatKeys( keyVec ): floatVec = vector('float')() copyParameters.setValues( floatKey, parameters.getFloatVals( floatKey, floatVec ) ) keyVec.clear() for stringKey in parameters.getStringKeys( keyVec ): stringVec = vector('string')() copyParameters.setValues( stringKey, parameters.getStringVals( stringKey, stringVec ) )
def get_collection_name(raw_data_words): '''Use Python bindings of C++ StringSerialiser to deserialise collection names''' from ROOT import string, vector, StringSerializer nw = raw_data_words[NameLengthOffset] name_raw = raw_data_words[NameOffset:NameOffset + nw] name_raw_vec = vector('unsigned int')() for w in name_raw: name_raw_vec.push_back(w) name_str_vec = vector(string)() ss = StringSerializer() ss.deserialize(name_raw_vec, name_str_vec) name_list = [] for s in name_str_vec: name_list.append(s) return name_list
def UpdateFile(fil,tree_names,y_pred,var_name='score'): print (('FileName to be read: %s')%fil) tfile = TFile(fil,'update') trees=[] if len(tree_names) != len(y_pred): print('Number of trees and number of prediction must be equal') exit() for t in tree_names: print ('Tree will be updated: %s'%t) trees.append(tfile.Get(t)) score = vector('float')() print ('\nUpdating File --------------------------') for t in trees: t.Branch(var_name,score) for i in range(len(trees)): for x in y_pred[i]: score.clear() if not np.isscalar(x): for e in x: score.push_back(e) else: score.push_back(x) trees[i].GetBranch(var_name).Fill() trees[i].Write() tfile.Close() del trees[:] del tfile print ('Closing File --------------------------\n')
def printParameters( parameters ): from ROOT import vector ''' Helper method to print the content of an LCParameters object ''' keyVec = vector( 'string' )() for intKey in parameters.getIntKeys( keyVec ): intVec = vector( 'int' )() parameters.getIntVals( intKey, intVec ) print ' parameter %s [int]: %s' % ( intKey, formatVecValues( intVec ) ) keyVec.clear() for floatKey in parameters.getFloatKeys( keyVec ): floatVec = vector( 'float' )() parameters.getFloatVals( floatKey, floatVec ) print ' parameter %s [float]: %s' % ( floatKey, formatVecValues( floatVec ) ) keyVec.clear() for stringKey in parameters.getStringKeys( keyVec ): stringVec = vector( 'string' )() parameters.getStringVals( stringKey, stringVec ) print ' parameter %s [string]: %s' % ( stringKey, formatVecValues( stringVec ) )
def GetTStringVectorSamples(path, samples): from ROOT import vector, TString, gSystem # Add the input data files v = vector(TString)() for s in samples: t = TString(path + s) v.push_back(t) return v v = GetTStringVector(samples)
def copyObjectParameters(obj, copyObj): ''' Copies the parameters of an LCObject to another LCObject''' parameters = obj.getParameters() copyParameters = copyObj.parameters() keyVec = vector('string')() for intKey in parameters.getIntKeys(keyVec): intVec = vector('int')() copyParameters.setValues(intKey, parameters.getIntVals(intKey, intVec)) keyVec.clear() for floatKey in parameters.getFloatKeys(keyVec): floatVec = vector('float')() copyParameters.setValues(floatKey, parameters.getFloatVals(floatKey, floatVec)) keyVec.clear() for stringKey in parameters.getStringKeys(keyVec): stringVec = vector('string')() copyParameters.setValues( stringKey, parameters.getStringVals(stringKey, stringVec))
def _tags(self): """ Read available tags in the file. This method is used internally by the `tags()` method which turns the `std::vector<std::string>` into a python list of python strings. """ from ROOT import vector tags = vector('string')() if not self.ttool.tags(tags): raise CouldNotLoadTagsException() return tags
def pu_weight( data_pu, mc_pu, return_std_vector = True ): print "sum(data_pu) - {}".format(sum(data_pu)) print "sum(mc_pu) - {}".format(sum(mc_pu)) weight = [] for i in range(min(len(data_pu),len(mc_pu))): weight.append(data_pu[i]/mc_pu[i]) if return_std_vector: v = vector('double')() v += weight return v else: return weight
def __init__(self, ptBins, etaBins, taggers, out_dir = "../output/" ): # pointer to selector self.jetCounter = JetCounter() # pt bins vec_ptBins = vector('double')() vec_ptBins += ptBins self.jetCounter.setPtBins(vec_ptBins) # eta bins vec_etaBins = vector('double')() vec_etaBins += etaBins self.jetCounter.setEtaBins(vec_etaBins) for k, v in taggers.items(): workPoints = vector('double')() workPoints += v self.jetCounter.addTagger(k, workPoints) self.out_dir = out_dir return None
def get_bins_vec_2pt(bin1, bin2, xmin, xmax, xmid): #evaluate binning with bin1 width below xmid and bin2 above xmid bins = vector(rt.double)() bins.push_back(xmin) while True: if bins[bins.size() - 1] < xmid: increment = bin1 else: increment = bin2 bins.push_back(bins[bins.size() - 1] + increment) if bins[bins.size() - 1] > xmax: break return bins
def __init__(self, filename, plane, config, save_dir=None, first_run=None): Converter.__init__(self, filename, config) self.SaveDir = save_dir self.CalibrationDir = join(self.SaveDir, 'calibrations') self.RunNumber = basename(filename).strip('.root').split('_')[-1] self.FirstRun = first_run if first_run is not None else self.RunNumber self.OldFile = read_root_file(filename) self.OldTree = self.OldFile.Get('Plane{}'.format(6 + plane)).Get('Hits') self.EventTree = self.OldFile.Get('Event') self.NewFile = self.create_new_file(filename, save_dir) self.NewTree = self.OldTree.CloneTree(0) # New Branches self.ScalarBranches = OrderedDict([('NCluster', array([0], 'u2')), ('TimeStamp', array([0], 'f8'))]) self.VectorBranches = OrderedDict([('VCal', vector('float')()), ('ClusterSize', vector('unsigned short')()), ('ClusterX', vector('float')()), ('ClusterY', vector('float')()), ('ClusterVcal', vector('float')())]) # Charge Fits self.FitParameters = zeros((52, 80, 4)) self.Fit = TF1('ErFit', '[3] * (TMath::Erf((x - [0]) / [1]) + [2])', -500, 255 * 7) self.get_fits() # Vars self.Hits = [] self.ClusteredHits = [] self.Clusters = [] self.ProgressBar = None
def getMedian(th1d): n = th1d.GetXaxis().GetNbins() xVect = vector('double')(n) print xVect xVect = np.array(xVect) print xVect th1d.GetXaxis().GetCenter( xVect ) print xVect yVect = th1d.GetArray() print yVect yVect.SetSize(n) print yVect yVect = np.array(yVect) print yVect print np.median([xVect,yVect]) print TMath.Median(n,xVect,yVect)
def CloneFile(path,fil,tree_names,y_pred,pref='',var_name='score',ntup_opt='recreate',same_path=False): print (('FileName to be read: %s')%fil) tfile = TFile(fil) trees=[] if len(tree_names) != len(y_pred): print('Number of trees and number of prediction must be equal') exit() for t in tree_names: print ('Tree will be cloned: %s'%t) trees.append(tfile.Get(t)) score = vector('float')() print ('\nUpdating File --------------------------') if (ntup_opt=='recreate' and pref==''): print('Attention :: you are trying to create a new ntuple with same file name.') print('Please specify a prefix.') exit() fil_new = fil.replace('.root',pref+'.root') if not same_path: fil_new = path+fil[fil.rfind('/')+1:] print (('FileName to be recorded: %s')%fil_new) trees_new=[] tfile_new = TFile(fil_new,ntup_opt) for t in trees: trees_new.append(t.CloneTree()) trees_new[-1].Branch(var_name,score) for i in range(len(trees_new)): for x in y_pred[i]: score.clear() if not np.isscalar(x): for e in x: score.push_back(e) else: score.push_back(x) trees_new[i].GetBranch(var_name).Fill() trees_new[i].Write() tfile_new.Write() tfile_new.Close() tfile.Close() del trees[:] del trees_new[:] del tfile del tfile_new print ('Closing File --------------------------\n') return fil_new
def anajob(fileNames): from ROOT import vector ''' Method to loop over all events of the given slcio files and print information about all collections''' from pyLCIO import IOIMPL reader = IOIMPL.LCFactory.getInstance().createLCReader() # first loop over all files: open them and print the total number of runs and events fileNameVec = vector('string')() print('anajob: will open and read from files:') for fileName in fileNames: fileNameVec.push_back(fileName) reader.open(fileName) print(' %s [ number of runs: %d, number of events: %d ]\n' % (fileName, reader.getNumberOfRuns(), reader.getNumberOfEvents())) reader.close() # second loop: print information on all run headers # this time we open all files in one stream by passing them as a std::vector<std::string> reader.open(fileNameVec) runHeader = reader.readNextRunHeader() while runHeader: dumpRunHeader(runHeader) runHeader = reader.readNextRunHeader() print('') reader.close() print(' will reopen and read from files:') for fileName in fileNames: print(' %s' % (fileName)) # final loop: print information on all events reader.open(fileNameVec) nEvents = 0 for event in reader: # use the c++ method or the one defined above for printing # from pyLCIO import UTIL # UTIL.LCTOOLS.dumpEvent( event ) dumpEvent(event) nEvents += 1 reader.close() print(' %s events read from files:' % (nEvents)) for fileName in fileNames: print(' %s' % (fileName))
def anajob( fileNames ): from ROOT import vector ''' Method to loop over all events of the given slcio files and print information about all collections''' from pyLCIO import IOIMPL reader = IOIMPL.LCFactory.getInstance().createLCReader() # first loop over all files: open them and print the total number of runs and events fileNameVec = vector( 'string' )() print 'anajob: will open and read from files:' for fileName in fileNames: fileNameVec.push_back( fileName ) reader.open( fileName ) print ' %s [ number of runs: %d, number of events: %d ]\n' % ( fileName, reader.getNumberOfRuns(), reader.getNumberOfEvents() ) reader.close() # second loop: print information on all run headers # this time we open all files in one stream by passing them as a std::vector<std::string> reader.open( fileNameVec ) runHeader = reader.readNextRunHeader() while runHeader: dumpRunHeader( runHeader ) runHeader = reader.readNextRunHeader() print '' reader.close() print ' will reopen and read from files:' for fileName in fileNames: print ' %s' % ( fileName ) # final loop: print information on all events reader.open( fileNameVec ) nEvents = 0 for event in reader: # use the c++ method or the one defined above for printing # from pyLCIO import UTIL # UTIL.LCTOOLS.dumpEvent( event ) dumpEvent( event ) nEvents += 1 reader.close() print ' %s events read from files:' % ( nEvents ) for fileName in fileNames: print ' %s' % ( fileName )
def get_bins(tree, bnam, bmatch, prec, ons, delt): #load tracks momenta to lists for all and matched tracks tree.SetBranchStatus("*", 0) tree.SetBranchStatus(bnam[0], 1) tree.SetBranchStatus(bnam[1], 1) tree.SetBranchStatus(bmatch[0], 1) tree.SetBranchStatus(bmatch[1], 1) #C++ structure for tree entry gROOT.ProcessLine("struct Entry {Double_t p0, p1; Bool_t match0, match1;};") entry = rt.Entry() tree.SetBranchAddress(bnam[0], AddressOf(entry, "p0")) tree.SetBranchAddress(bnam[1], AddressOf(entry, "p1")) tree.SetBranchAddress(bmatch[0], AddressOf(entry, "match0")) tree.SetBranchAddress(bmatch[1], AddressOf(entry, "match1")) #momenta values for all and matched tracks valAll = rt.list(double)() valSel = rt.list(double)() for i in xrange(tree.GetEntriesFast()): tree.GetEntry(i) valAll.push_back(entry.p0) valAll.push_back(entry.p1) if entry.match0 == 1: valSel.push_back(entry.p0) if entry.match1 == 1: valSel.push_back(entry.p1) tree.ResetBranchAddresses() #bin edges bins = vector(rt.double)() t0 = time() #runs faster when the algorithm is in plain ROOT gROOT.LoadMacro("get_bins.C") rt.get_bins(bins, valAll, valSel, prec, ons, delt) t1 = time() print "Time to calculate the bins (sec):", t1-t0 return bins
def get_bins_vec_3pt(bin1, bin2, bin3, xmin, xmax, xmid1, xmid2): #evaluate binning with bin1 below xmid1, bin2 between xmid1 and xmid2 #and bin3 above xmid2 bins = vector(rt.double)() bins.push_back(xmin) while True: xpos = bins[bins.size() - 1] increment = bin1 if xpos > xmid1: increment = bin2 if xpos > xmid2: increment = bin3 bins.push_back(xpos + increment) if bins[bins.size() - 1] > xmax: break return bins
def _drawPDF(self, pdfType, pdfW, var, rng, global_weight, weights, totCut, sampleName, trees, columns, doFold, cutName, variableName, sample, fixZeros): ''' pdfType : alphaS,... pdfW : pdfW var : the variable to plot rng : the variable to plot global_weight : sample global_weight weights : the wieghts 'root file' dependent totCut : the selection trees : the list of input files for this particular sample ''' self._logger.info('Yields by process') print 'pdfW treeName', pdfW numTree = 0 if pdfW is 'PDFWeights_AlphaS': bigNameUp = 'histo_' + sampleName + 'Up_' + cutName + '_' + variableName bigNameDo = 'histo_' + sampleName + 'Do_' + cutName + '_' + variableName hTotalAlphaUp = self._makeshape(bigNameUp, rng) hTotalAlphaDo = self._makeshape(bigNameDo, rng) elif pdfW is 'PDFWeights_Scale': bigNameAUp = 'histo_' + sampleName + 'AUp_' + cutName + '_' + variableName bigNameADo = 'histo_' + sampleName + 'ADo_' + cutName + '_' + variableName hTotalAUp = self._makeshape(bigNameAUp, rng) hTotalADo = self._makeshape(bigNameADo, rng) bigNameBUp = 'histo_' + sampleName + 'BUp_' + cutName + '_' + variableName bigNameBDo = 'histo_' + sampleName + 'BDo_' + cutName + '_' + variableName hTotalBUp = self._makeshape(bigNameBUp, rng) hTotalBDo = self._makeshape(bigNameBDo, rng) bigNameABUp = 'histo_' + sampleName + 'ABUp_' + cutName + '_' + variableName bigNameABDo = 'histo_' + sampleName + 'ABDo_' + cutName + '_' + variableName hTotalABUp = self._makeshape(bigNameABUp, rng) hTotalABDo = self._makeshape(bigNameABDo, rng) elif pdfW is 'PDFWeights_Error': hTotal_Err = [None] * 101 for idx in xrange(101): bigName = 'histo_' + sampleName + str( idx) + '_' + cutName + '_' + variableName hTotal_Err[idx] = self._makeshape(bigName, rng) else: print 'This pdfW', pdfW, 'is not ready, exiting...' exit() for tree in trees: #myBr = tree.GetBranch(pdfW) #myBr = tree.GetBranch(pdfW).GetListOfLeaves() print ' {0:<20} : {1:^9}'.format(sampleName, tree.GetEntries()) RDF = RDataFrame if ('ALL' in columns) or (len(columns) == 0): Dtree = RDF(tree) else: v_columns = vector('string')() for column in columns: v_columns.push_back(column) Dtree = RDF(tree, v_columns) for key in self._definitions: #print key, 'crspdto', self._definitions[key] Dtree = Dtree.Define(key, self._definitions[key]) totalWeight = global_weight ## if weights vector is not given, do not apply file dependent weights if len(weights) != 0: # if weight is not given for a given root file, '-', do not apply file dependent weight for that root file if weights[numTree] != '-': totalWeight = "(" + totalWeight + ") * (" + weights[ numTree] + ")" ################################################ # PDFWeights_AlphaS ################################################ if pdfW is 'PDFWeights_AlphaS': print 'checking size of PDFWeights_AlphaS' size = 0 for event in tree: size = event.PDFWeights_AlphaS.size() break if size == 2: print 'PDFWeights_AlphaS size is 2, let evaluate' totalWeightDo = "(" + totalWeight + ") * (PDFWeights_AlphaS[0])" totalWeightUp = "(" + totalWeight + ") * (PDFWeights_AlphaS[1])" else: print 'PDFWeights_AlphaS size is not 2, let us make Up and Down is the same to norminal' totalWeightDo = totalWeight totalWeightUp = totalWeight Dtree = Dtree.Define('totwDo', totalWeightDo) \ .Define('totwUp', totalWeightUp) # New histogram shapeNameUp = 'histo_' + sampleName + 'Up' + str(numTree) shapeNameDo = 'histo_' + sampleName + 'Do' + str(numTree) # prepare a dummy to fill hclass, hargs, ndim = self._bins2hclass(rng) hModelUp = ( shapeNameUp, shapeNameUp, ) + hargs hModelDo = ( shapeNameDo, shapeNameDo, ) + hargs if ndim == 1: shapeUp = Dtree.Filter(totCut).Histo1D( hModelUp, var, 'totwUp') shapeDo = Dtree.Filter(totCut).Histo1D( hModelDo, var, 'totwDo') elif ndim == 2: shapeUp = Dtree.Filter(totCut).Histo2D( hModelUp, var, 'totwUp') shapeDo = Dtree.Filter(totCut).Histo2D( hModelDo, var, 'totwDo') nTriesUp = shapeUp.Integral() nTriesDo = shapeDo.Integral() print 'integral Up and Do', nTriesUp, nTriesDo if nTriesUp == 0: print 'Warning : entries is 0 for', hModelUp if nTriesDo == 0: print 'Warning : entries is 0 for', hModelDo if math.isnan(nTriesUp): print 'ERROR : entries is nan for', hModelUp if math.isnan(nTriesDo): print 'ERROR : entries is nan for', hModelDo if (numTree == 0): shapeUp.SetTitle(bigNameUp) shapeUp.SetName(bigNameUp) hTotalAlphaUp = shapeUp shapeDo.SetTitle(bigNameDo) shapeDo.SetName(bigNameDo) hTotalAlphaDo = shapeDo else: cloneUp = shapeUp.Clone() cloneDo = shapeDo.Clone() hTotalAlphaUp.Add(cloneUp) hTotalAlphaDo.Add(cloneDo) cloneUp.Delete() cloneDo.Delete() shapeUp.Delete() shapeDo.Delete() ##################################################### # PDFWeights_Scale ##################################################### if pdfW is 'PDFWeights_Scale': # Using Three nuisances for muR up/do, muF up/do, correlated up/do # as recommanded: https://indico.cern.ch/event/494682/contributions/1172505/attachments/1223578/1800218/mcaod-Feb15-2016.pdf print 'checking size of PDFWeights_Scale' size = 0 for event in tree: size = event.PDFWeights_Scale.size() break #tree.GetEntry(0) #size = len( tree.PDFWeights_Scale ) #for idx in xrange(size) : # if idx == 5 or idx == 7: # continue # print idx, tree.PDFWeights_Scale[idx] #print 'scale size', size if size == 9: print 'saving scale nuisance variation: Renormalization and Factorization order changing' totalW_muAUp = "(" + totalWeight + ") * (PDFWeights_Scale[1])" totalW_muADo = "(" + totalWeight + ") * (PDFWeights_Scale[2])" totalW_muBUp = "(" + totalWeight + ") * (PDFWeights_Scale[3])" totalW_muBDo = "(" + totalWeight + ") * (PDFWeights_Scale[6])" totalW_muABUp = "(" + totalWeight + ") * (PDFWeights_Scale[4])" totalW_muABDo = "(" + totalWeight + ") * (PDFWeights_Scale[8])" else: totalW_muAUp = totalWeight totalW_muADo = totalWeight totalW_muBUp = totalWeight totalW_muBDo = totalWeight totalW_muABUp = totalWeight totalW_muABDo = totalWeight Dtree = Dtree.Define('totalW_muAUp', totalW_muAUp) \ .Define('totalW_muADo', totalW_muADo) \ .Define('totalW_muBUp', totalW_muBUp) \ .Define('totalW_muBDo', totalW_muBDo) \ .Define('totalW_muABUp', totalW_muABUp) \ .Define('totalW_muABDo', totalW_muABDo) # New histogram shapeNameAUp = 'histo_' + sampleName + 'AUp' + str(numTree) shapeNameADo = 'histo_' + sampleName + 'ADo' + str(numTree) shapeNameBUp = 'histo_' + sampleName + 'BUp' + str(numTree) shapeNameBDo = 'histo_' + sampleName + 'BDo' + str(numTree) shapeNameABUp = 'histo_' + sampleName + 'ABUp' + str(numTree) shapeNameABDo = 'histo_' + sampleName + 'ABDo' + str(numTree) # prepare a dummy to fill hclass, hargs, ndim = self._bins2hclass(rng) hModelAUp = ( shapeNameAUp, shapeNameAUp, ) + hargs hModelADo = ( shapeNameADo, shapeNameADo, ) + hargs hModelBUp = ( shapeNameBUp, shapeNameBUp, ) + hargs hModelBDo = ( shapeNameBDo, shapeNameBDo, ) + hargs hModelABUp = ( shapeNameABUp, shapeNameABUp, ) + hargs hModelABDo = ( shapeNameABDo, shapeNameABDo, ) + hargs if ndim == 1: shapeAUp = Dtree.Filter(totCut).Histo1D( hModelAUp, var, 'totalW_muAUp') shapeADo = Dtree.Filter(totCut).Histo1D( hModelADo, var, 'totalW_muADo') shapeBUp = Dtree.Filter(totCut).Histo1D( hModelBUp, var, 'totalW_muBUp') shapeBDo = Dtree.Filter(totCut).Histo1D( hModelBDo, var, 'totalW_muBDo') shapeABUp = Dtree.Filter(totCut).Histo1D( hModelABUp, var, 'totalW_muABUp') shapeABDo = Dtree.Filter(totCut).Histo1D( hModelABDo, var, 'totalW_muABDo') elif ndim == 2: shapeAUp = Dtree.Filter(totCut).Histo2D( hModelAUp, var, 'totalW_muAUp') shapeADo = Dtree.Filter(totCut).Histo2D( hModelADo, var, 'totalW_muADo') shapeBUp = Dtree.Filter(totCut).Histo2D( hModelBUp, var, 'totalW_muBUp') shapeBDo = Dtree.Filter(totCut).Histo2D( hModelBDo, var, 'totalW_muBDo') shapeABUp = Dtree.Filter(totCut).Histo2D( hModelABUp, var, 'totalW_muABUp') shapeABDo = Dtree.Filter(totCut).Histo2D( hModelABDo, var, 'totalW_muABDo') nTriesAUp = shapeAUp.Integral() nTriesADo = shapeADo.Integral() nTriesBUp = shapeBUp.Integral() nTriesBDo = shapeBDo.Integral() nTriesABUp = shapeABUp.Integral() nTriesABDo = shapeABDo.Integral() print 'integral AUp and ADo', nTriesAUp, nTriesADo print 'integral AUp and ADo', nTriesAUp, nTriesADo print 'integral BUp and BDo', nTriesBUp, nTriesBDo print 'integral BUp and BDo', nTriesBUp, nTriesBDo print 'integral ABUp and ABDo', nTriesABUp, nTriesABDo print 'integral ABUp and ABDo', nTriesABUp, nTriesABDo if nTriesAUp == 0: print 'Warning : entries is 0 for', hModelAUp if nTriesADo == 0: print 'Warning : entries is 0 for', hModelADo if nTriesBUp == 0: print 'Warning : entries is 0 for', hModelBUp if nTriesBDo == 0: print 'Warning : entries is 0 for', hModelBDo if nTriesABUp == 0: print 'Warning : entries is 0 for', hModelABUp if nTriesABDo == 0: print 'Warning : entries is 0 for', hModelABDo if math.isnan(nTriesAUp): print 'ERROR : entries is nan for', hModelAUp if math.isnan(nTriesADo): print 'ERROR : entries is nan for', hModelADo if math.isnan(nTriesBUp): print 'ERROR : entries is nan for', hModelBUp if math.isnan(nTriesBDo): print 'ERROR : entries is nan for', hModelBDo if math.isnan(nTriesABUp): print 'ERROR : entries is nan for', hModelABUp if math.isnan(nTriesABDo): print 'ERROR : entries is nan for', hModelABDo if (numTree == 0): shapeAUp.SetTitle(bigNameAUp) shapeAUp.SetName(bigNameAUp) hTotalAUp = shapeAUp shapeADo.SetTitle(bigNameADo) shapeADo.SetName(bigNameADo) hTotalADo = shapeADo shapeBUp.SetTitle(bigNameBUp) shapeBUp.SetName(bigNameBUp) hTotalBUp = shapeBUp shapeBDo.SetTitle(bigNameBDo) shapeBDo.SetName(bigNameBDo) hTotalBDo = shapeBDo shapeABUp.SetTitle(bigNameABUp) shapeABUp.SetName(bigNameABUp) hTotalABUp = shapeABUp shapeABDo.SetTitle(bigNameABDo) shapeABDo.SetName(bigNameABDo) hTotalABDo = shapeABDo else: cloneAUp = shapeAUp.Clone() cloneADo = shapeADo.Clone() cloneBUp = shapeBUp.Clone() cloneBDo = shapeBDo.Clone() cloneABUp = shapeABUp.Clone() cloneABDo = shapeABDo.Clone() hTotalAUp.Add(cloneAUp) hTotalADo.Add(cloneADo) hTotalBUp.Add(cloneBUp) hTotalBDo.Add(cloneBDo) hTotalABUp.Add(cloneABUp) hTotalABDo.Add(cloneABDo) cloneAUp.Delete() cloneADo.Delete() cloneBUp.Delete() cloneBDo.Delete() cloneABUp.Delete() cloneABDo.Delete() shapeAUp.Delete() shapeADo.Delete() shapeBUp.Delete() shapeBDo.Delete() shapeABUp.Delete() shapeABDo.Delete() ######################################### # PDFWeights_Error ######################################### # 0-100, 0=> nominal? if pdfW is 'PDFWeights_Error': totalW_pdfErr = [] size = 0 for event in tree: size = event.PDFWeights_Error.size() break #tree.GetEntry(0) #size = len( tree.PDFWeights_Error ) print 'PDFWeights_Error size', size if size > 101: print 'size of PDFWeights_Error is gt 101, exiting...' exit() for idx in xrange(size): #print idx, tree.PDFWeights_Error[idx] totalW_pdfErr.append("(" + totalWeight + ") * (PDFWeights_Error[" + str(idx) + "])") for idx in xrange(101 - size): totalW_pdfErr.append("1") for idx in xrange(101): # print idx, totalW_pdfErr[idx] Dtree = Dtree.Define('totalW_pdfErr' + str(idx), totalW_pdfErr[idx]) # new histogram shapeName = 'histo_' + sampleName + '_' + str( idx) + '_' + str(numTree) # prepare a dummy to fill hclass, hargs, ndim = self._bins2hclass(rng) hModel = ( shapeName, shapeName, ) + hargs if ndim == 1: shape = Dtree.Filter(totCut).Histo1D( hModel, var, 'totalW_pdfErr' + str(idx)) elif ndim == 2: shape = Dtree.Filter(totCut).Histo2D( hModel, var, 'totalW_pdfErr' + str(idx)) else: print 'this dim of hist not ready', ndim, 'exiting' exit() nTries = shape.Integral() #print idx, 'integral ', nTries if nTries == 0: print 'Warning : entries is 0 for', hModel if math.isnan(nTries): print 'ERROR : entries is nan for', hModel if (numTree == 0): bigName = 'histo_' + sampleName + str( idx) + '_' + cutName + '_' + variableName shape.SetTitle(bigName) shape.SetName(bigName) hTotal_Err[idx] = shape else: cloneH = shape.Clone() hTotal_Err[idx].Add(cloneH) cloneH.Delete() shape.Delete() numTree += 1 #Dtree.Delete() #RDF.Delete() # fold if needed if doFold == 1 or doFold == 3: if pdfW is 'PDFWeights_AlphaS': self._FoldOverflow(hTotalAlphaUp) self._FoldOverflow(hTotalAlphaDo) if pdfW is 'PDFWeights_Scale': self._FoldOverflow(hTotalAUp) self._FoldOverflow(hTotalADo) self._FoldOverflow(hTotalBUp) self._FoldOverflow(hTotalBDo) self._FoldOverflow(hTotalABUp) self._FoldOverflow(hTotalABDo) if pdfW is 'PDFWeights_Error': for idx in xrange(101): self._FoldOverflow(hTotal_Err[idx]) if doFold == 2 or doFold == 3: if pdfW is 'PDFWeights_AlphaS': self._FoldUnderflow(hTotalAlphaUp) self._FoldUnderflow(hTotalAlphaDo) if pdfW is 'PDFWeights_Scale': self._FoldUnderflow(hTotalAUp) self._FoldUnderflow(hTotalADo) self._FoldUnderflow(hTotalBUp) self._FoldUnderflow(hTotalBDo) self._FoldUnderflow(hTotalABUp) self._FoldUnderflow(hTotalABDo) if pdfW is 'PDFWeights_Error': for idx in xrange(101): self._FoldUnderflow(hTotal_Err[idx]) # go 1d if pdfW is 'PDFWeights_AlphaS': hTotalFinalAlphaUp = self._h2toh1(hTotalAlphaUp) hTotalFinalAlphaDo = self._h2toh1(hTotalAlphaDo) hTotalFinalAlphaUp.SetTitle('histo_' + sampleName + 'Up') hTotalFinalAlphaDo.SetTitle('histo_' + sampleName + 'Do') hTotalFinalAlphaUp.SetName('histo_' + sampleName + 'Up') hTotalFinalAlphaDo.SetName('histo_' + sampleName + 'Do') if pdfW is 'PDFWeights_Scale': hTotalFinalAUp = self._h2toh1(hTotalAUp) hTotalFinalADo = self._h2toh1(hTotalADo) hTotalFinalAUp.SetTitle('histo_' + sampleName + 'AUp') hTotalFinalADo.SetTitle('histo_' + sampleName + 'ADo') hTotalFinalAUp.SetName('histo_' + sampleName + 'AUp') hTotalFinalADo.SetName('histo_' + sampleName + 'ADo') hTotalFinalBUp = self._h2toh1(hTotalBUp) hTotalFinalBDo = self._h2toh1(hTotalBDo) hTotalFinalBUp.SetTitle('histo_' + sampleName + 'BUp') hTotalFinalBDo.SetTitle('histo_' + sampleName + 'BDo') hTotalFinalBUp.SetName('histo_' + sampleName + 'BUp') hTotalFinalBDo.SetName('histo_' + sampleName + 'BDo') hTotalFinalABUp = self._h2toh1(hTotalABUp) hTotalFinalABDo = self._h2toh1(hTotalABDo) hTotalFinalABUp.SetTitle('histo_' + sampleName + 'ABUp') hTotalFinalABDo.SetTitle('histo_' + sampleName + 'ABDo') hTotalFinalABUp.SetName('histo_' + sampleName + 'ABUp') hTotalFinalABDo.SetName('histo_' + sampleName + 'ABDo') if pdfW is 'PDFWeights_Error': hTotalFinalErr = [None] * 101 for idx in xrange(101): hTotalFinalErr[idx] = self._h2toh1(hTotal_Err[idx]) hTotalFinalErr[idx].SetTitle('histo_' + sampleName + str(idx).zfill(3)) hTotalFinalErr[idx].SetName('histo_' + sampleName + str(idx).zfill(3)) #fix negative (almost never happening) # don't do it here by default, because you may have interference that is actually negative! # do this only if triggered: use with caution! # This also checks that only in specific phase spaces this is activated, "cutName" # # To be used with caution -> do not use this option if you don't know what you are playing with # if fixZeros and 'suppressNegative' in sample.keys() and ( cutName in sample['suppressNegative'] or 'all' in sample['suppressNegative']): if pdfW is 'PDFWeights_AlphaS': self._fixNegativeBinAndError(hTotalFinalAlphaUp) self._fixNegativeBinAndError(hTotalFinalAlphaDo) if pdfW is 'PDFWeights_Scale': self._fixNegativeBinAndError(hTotalFinalAUp) self._fixNegativeBinAndError(hTotalFinalADo) self._fixNegativeBinAndError(hTotalFinalBUp) self._fixNegativeBinAndError(hTotalFinalBDo) self._fixNegativeBinAndError(hTotalFinalABUp) self._fixNegativeBinAndError(hTotalFinalABDo) if pdfW is 'PDFWeights_Error': for idx in xrange(101): self._fixNegativeBinAndError(hTotalFinalErr[idx]) histoList = [] if pdfW is 'PDFWeights_AlphaS': histoList.append(hTotalFinalAlphaUp) histoList.append(hTotalFinalAlphaDo) if pdfW is 'PDFWeights_Scale': histoList.append(hTotalFinalAUp) histoList.append(hTotalFinalADo) histoList.append(hTotalFinalBUp) histoList.append(hTotalFinalBDo) histoList.append(hTotalFinalABUp) histoList.append(hTotalFinalABDo) if pdfW is 'PDFWeights_Error': return hTotalFinalErr else: return histoList
# exe parameters numEvents = -1 # -1 to process all (10000) samList = { 'signals' } # list of samples to be processed - append multiple lists , 'data', 'mainbkg' , 'datall', 'mainbkg', 'minortt', 'dibosons', 'bosons','trigger' trgList = 'def_2016' # trigger paths - remove TriggerOperator to not apply trigger iDir = '/lustre/cmswork/hh/alpha_ntuples/' ntuplesVer = 'v0_20161004' # equal to ntuple's folder oDir = './output/v0_AccTrg_sig' # output dir ('./test') # --------------- if not os.path.exists(oDir): os.mkdir(oDir) trg_names = triggerlists[trgList] trg_names_v = vector("string")() if not trg_names: print "### WARNING: empty hlt_names ###" for trg_name in trg_names: trg_names_v.push_back(trg_name) # to parse variables to the anlyzer config = { "jets_branch_name": "Jets", "hlt_names": trg_names, "n_gen_events": 0 } snames = [] for s in samList: snames.extend(samlists[s])
data_path = "{}/src/Analysis/alp_analysis/data/".format(os.environ["CMSSW_BASE"]) weights = {'PUWeight', 'GenWeight', 'BTagWeight'} #weights to be applied # --------------- if not os.path.exists(oDir): os.mkdir(oDir) if args.btag == 'cmva': btagAlgo = "pfCombinedMVAV2BJetTags" btag_wp = wps['CMVAv2_moriond'] elif args.btag == 'csv': btagAlgo = "pfCombinedInclusiveSecondaryVertexV2BJetTags" btag_wp = wps['CSVv2_moriond'] # to convert weights weights_v = vector("string")() for w in weights: weights_v.push_back(w) # to parse variables to the anlyzer config = {"eventInfo_branch_name" : "EventInfo", "jets_branch_name": "Jets", "dijets_branch_name": "DiJets", "genbfromhs_branch_name" : "GenBFromHs", "genhs_branch_name" : "GenHs", "n_gen_events":0, "xsec_br" : 0, "matcheff": 0, "kfactor" : 0, "isData" : False, "lumiFb" : intLumi_fb, }
intLumi_fb = 12.6 #36.26 12.6 iDir = "/lustre/cmswork/hh/alpha_ntuples/" + args.iDir oDir = '/lustre/cmswork/hh/alp_baseSelector/' + args.oDir if args.jesUp: oDir += "_JESup" elif args.jesDown: oDir += "_JESdown" data_path = "{}/src/Analysis/alp_analysis/data/".format( os.environ["CMSSW_BASE"]) #weights to be applied weights = {'PUWeight', 'GenWeight', 'BTagWeight'} weights_nobTag = {'PUWeight', 'GenWeight'} # to convert weights weights_v = vector("string")() for w in weights: weights_v.push_back(w) w_nobTag_v = vector("string")() for w in weights_nobTag: w_nobTag_v.push_back(w) # --------------- if not os.path.exists(oDir): os.mkdir(oDir) if args.doMixed: config = { "jets_branch_name": "Jets", } else: config = { "eventInfo_branch_name": "EventInfo",
#parser.add_argument("-s", "--samList", help="sample list", default="") args = parser.parse_args() # exe parameters numEvents = args.numEvts oname = "QCD500_tt_SM100k" samList = ['qcd_500toInf_m', 'tt', 'SM' ] #,'SM' 'qcd_200to500_m','tt' # debug - qcd never as last sample times_sm = 100000. #, args.hh_times_sm intLumi_fb = 12.6 lumi_factor = args.lumi_factor lumi = intLumi_fb * lumi_factor nn_vars = ["thrustMayor", "thrustMinor", "sumPz", "invMass"] nn_vars_v = vector("string")() for v in nn_vars: nn_vars_v.push_back(v) mult = args.mult extra_cut = args.extra_cut extra_cut_name = args.extra_cut_name iDir = "/lustre/cmswork/hh/alp_baseSelector/" ntuplesVer = args.ntuplesVer oDir = args.oDir data_path = "{}/src/Analysis/alp_analysis/data/".format( os.environ["CMSSW_BASE"]) weights = {} #weights to be applied 'PUWeight', 'GenWeight', 'BTagWeight' # ---------------
def readEvents(filename, nevents): inp = open(filename) # skip header while 1: record = split(inp.readline()) token = record[0] if token == '</init>': break events = [] nn = 0 metx = 0.0 mety = 0.0 ppx = vector('double') ppy = vector('double') ppz = vector('double') pE = vector('double') jetpx = vector('double') jetpy = vector('double') jetpz = vector('double') jetE = vector('double') while nn < nevents: record = split(inp.readline()) token = record[0] if token == '<event>': nn += 1 if nn % 1000 == 0: print nn partons = [] nonpartons = [] recobjs = [] record = split(inp.readline()) token = record[0] nparticles = atoi(token) if nn < 5: print "Event: %d" % nn print "\tnumber of particles: %d" % nparticles # initialize missing ET sums metx = 0.0 mety = 0.0 continue elif token == '</event>': # missing ET energy = sqrt(metx*metx + mety*mety) particles = [(12, metx, mety, 0, energy)] # parton jets ppx.clear() ppy.clear() ppz.clear() pE.clear() for pid, px, py, pz, E in partons: ppx.push_back(px) ppy.push_back(py) ppz.push_back(pz) pE.push_back(E) findJets(vpx, vpy, vpz, vE, jetpx, jetpy, jetpz, jetE) for i in xrange(jetpx.size()): particles.append( (21, jetpx[i], jetpy[i], jetpz[i], jetE[i]) ) for p in nonpartons: particles.append(p) events.append((particles, recobjs)) continue else: pid = atoi(token) ptype = record[1] px, py, pz, energy, mass = map(atof, record[6:11]) # truth-level code = 3 # reco-level code = 1 if ptype == '3': # exclude beam particles if px == py == 0: continue # temporary hack: exclude t/W/Z/H ID = abs(pid) if not PARTONID.has_key(ID): continue # compute true missing ET if MISSINGETID.has_key(ID): metx += px mety += py elif (ID < 6) or (ID == 2): partons.append((pid, px, py, pz, energy)) else: nonpartons.append((pid, px, py, pz, energy)) else: recobjs.append((pid, px, py, pz, energy)) if nn < 5: if ptype == '3': level = 'truth' else: level = 'reco' rec = "%6s: %12s %10d\t%10.3f %10.3f %10.3f %10.3f" % \ (level, nic.particleName(pid), pid, px, py, pz, energy) print rec return events
data_path = "{}/src/Analysis/alp_analysis/data/".format( os.environ["CMSSW_BASE"]) # --------------- if not os.path.exists(oDir): os.mkdir(oDir) if args.btag == 'cmva': btagAlgo = "pfCombinedMVAV2BJetTags" btag_wp = wps['CMVAv2_moriond'] elif args.btag == 'csv': btagAlgo = "pfCombinedInclusiveSecondaryVertexV2BJetTags" btag_wp = wps['CSVv2_moriond'] # variables to check nearest-neightbour nn_vars = ["thrustMayor", "thrustMinor", "sumPz", "invMass"] nn_vars_v = vector("string")() for v in nn_vars: nn_vars_v.push_back(v) # to parse variables to the anlyzer config = { "eventInfo_branch_name": "EventInfo", "jets_branch_name": "Jets", "dijets_branch_name": "DiJets", #"dihiggs_branch_name": "DiHiggs", #"muons_branch_name" : "", #"electrons_branch_name" : "", #"met_branch_name" : "", "n_gen_events": 0, "xsec_br": 0, "matcheff": 0,
def init_vector_branches(): dic = OrderedDict([('plane', vector('unsigned short')()), ('col', vector('unsigned short')()), ('row', vector('unsigned short')()), ('adc', vector('short')()), ('header', vector('unsigned int')()), ('trailer', vector('unsigned int')()), ('pkam', vector('unsigned short')()), ('token_pass', vector('unsigned short')()), ('reset_tbm', vector('unsigned short')()), ('reset_roc', vector('unsigned short')()), ('auto_reset', vector('unsigned short')()), ('cal_trigger', vector('unsigned short')()), ('trigger_count', vector('unsigned short')()), ('trigger_phase', vector('unsigned short')()), ('stack_count', vector('unsigned short')()), ('invalid_address', vector('bool')()), ('invalid_pulse_height', vector('bool')()), ('buffer_corruption', vector('bool')()), ('incomplete_data', vector('bool')()), ('missing_roc_headers', vector('bool')()), ('roc_readback', vector('bool')()), ('no_data', vector('bool')()), ('eventid_mismatch', vector('bool')())]) return dic
def setUp(self): gROOT.SetBatch(True) self.runOnMC = True self.tfile = TFile.Open(self.infile) self.ttree = self.tfile.Get("ntupler/tree") self.nevents = self.ttree.GetEntries() self.fvec1 = vector('float')() self.fvec2 = vector('float')() self.fvec3 = vector('float')() self.fvec4 = vector('float')() self.fvec5 = vector('float')() self.fvec6 = vector('float')() self.ivec1 = vector('int')() self.ivec2 = vector('int')() self.ivec3 = vector('int')() self.uvec1 = vector('unsigned int')() self.uvec2 = vector('unsigned int')() self.uvec3 = vector('unsigned int')() self.uvec4 = vector('unsigned int')() self.uvec5 = vector('unsigned int')() self.uvec6 = vector('unsigned int')() self.uvec7 = vector('unsigned int')() self.uvec8 = vector('unsigned int')() self.uvec9 = vector('unsigned int')() self.bvec1 = vector('bool')() self.bvec2 = vector('bool')() self.bvec3 = vector('bool')() self.bvec4 = vector('bool')()
intLumi_fb = 12.6 iDir = "/lustre/cmswork/hh/alpha_ntuples/" ntuplesVer = "v1_20161028" if not args.oDir: oDir = "/lustre/cmswork/hh/alp_baseSelector/data_def" else: oDir = args.oDir data_path = "{}/src/Analysis/alp_analysis/data/".format(os.environ["CMSSW_BASE"]) weights = {'EventWeight'} #weights to be applied - EventWeight, PUWeight, GenWeight # --------------- if not os.path.exists(oDir): os.mkdir(oDir) trg_names = triggerlists[trgList] if not trg_names: print "### WARNING: empty hlt_names ###" trg_names_v = vector("string")() for t in trg_names: trg_names_v.push_back(t) # to convert weights weights_v = vector("string")() for w in weights: weights_v.push_back(w) # to parse variables to the anlyzer config = {"eventInfo_branch_name" : "EventInfo", "jets_branch_name": "Jets", #"muons_branch_name" : "", #"electrons_branch_name" : "", #"met_branch_name" : "", "genbfromhs_branch_name" : "GenBFromHs", "genhs_branch_name" : "GenHs",
oDir = args.oDir data_path = "{}/src/Analysis/alp_analysis/data/".format(os.environ["CMSSW_BASE"]) btagAlgo = "pfCombinedInclusiveSecondaryVertexV2BJetTags" #btagAlgo = "pfCombinedMVAV2BJetTags" weights = {'PUWeight', 'GenWeight', 'BTagWeight'} #weights to be applied # --------------- if not os.path.exists(oDir): os.mkdir(oDir) trg_namesD = triggerlists[trgListD] trg_namesN = triggerlists[trgListN] trg_names = trg_namesD + trg_namesN print trg_namesD print trg_namesN if not trg_names: print "### WARNING: empty hlt_names ###" trg_namesD_v = vector("string")() for t in trg_namesD: trg_namesD_v.push_back(t) trg_namesN_v = vector("string")() for t in trg_namesN: trg_namesN_v.push_back(t) # to convert weights weights_v = vector("string")() for w in weights: weights_v.push_back(w) # to parse variables to the anlyzer config = {"eventInfo_branch_name" : "EventInfo", "jets_branch_name": "Jets", "muons_branch_name" : "Muons", "electrons_branch_name" : "Electrons", "met_branch_name" : "MET", "genbfromhs_branch_name" : "GenBFromHs",
def toVector(vtype, list): from ROOT import vector v = vector(vtype)() for o in list: v.push_back(o) return v
def main(): """ Main function (c++ style) """ # The name of the application import os APP_NAME = os.path.basename(__file__).rstrip('.py') print APP_NAME # Set up a logger object import logging logger = logging.getLogger(APP_NAME) logger.setLevel(logging.INFO) hdlr = logging.StreamHandler(sys.stdout) frmt = logging.Formatter('%(name)-14s%(levelname)8s %(message)s') hdlr.setFormatter(frmt) logger.addHandler(hdlr) # Setup the environment import ROOT if ROOT.gROOT.Macro('$ROOTCOREDIR/scripts/load_packages.C'): logger.error("Couldn't load the RootCore packages") return 1 if ROOT.xAOD.Init(APP_NAME).isFailure(): logger.error('Failed to call xAOD::Init(...)') return 1 from ROOT import vector #registry registry = ROOT.ToolsRegistry('ToolsRegistry') # level1 emulation tool tool_l1 = ROOT.TrigTauEmul.Level1EmulationTool("Level1EmulationTool") chains_to_test = vector('std::string')() chains_to_test.push_back("L1_TAU12") chains_to_test.push_back("L1_TAU12IL") chains_to_test.push_back("L1_TAU12IM") chains_to_test.push_back("L1_TAU12IT") chains_to_test.push_back("L1_TAU20") chains_to_test.push_back("L1_TAU20IL") if not tool_l1.setProperty('l1_chains', chains_to_test).isSuccess(): logger.error('Failed to set the property') return 1 sc = tool_l1.initialize() if sc.isFailure(): logger.error('Failed to initialize the tool') return 1 # TauTriggerEmulation import and setup tool = ROOT.TrigTauEmul.HltEmulationTool("TauTriggerEmulationTool") chains_to_test = vector('std::string')() chains_to_test.push_back("HLT_tau5_perf_ptonly_L1TAU8") chains_to_test.push_back("HLT_tau25_perf_ptonly") chains_to_test.push_back("HLT_tau25_loose1_ptonly") chains_to_test.push_back("HLT_tau25_medium1_ptonly") chains_to_test.push_back("HLT_tau25_tight1_ptonly") if not tool.setProperty('hlt_chains', chains_to_test).isSuccess(): logger.error('Failed to set the property') return 1 sc = tool.initialize() if sc.isFailure(): logger.error('Failed to initialize the tool') return 1 # Return gracefully return 0
exit(-1) from ROOT import HMuTauhTreeFromNano, HTauhTauhTreeFromNano fileNames = [ "0E6F4B78-CC12-E811-B37D-FA163EA12C78.root", "50BE09DD-CC12-E811-869D-F04DA27542B9.root", "844BE355-CD12-E811-8871-FA163ED9B872.root", "DEBF5F61-CC12-E811-B47A-0CC47AA9943A.root", "5A038C2A-CC12-E811-B729-7845C4FC3B8D.root", ] lumisToProcess = process.source.lumisToProcess #import FWCore.ParameterSet.Config as cms #lumisToProcess = cms.untracked.VLuminosityBlockRange( ("1:2047-1:2047", "1:2048-1:2048", "1:6145-1:6145", "1:4098-1:4098", "1:3-1:7", "1:6152-1:6152", "1:9-1:11", "1:273-1:273", "1:4109-1:4109", "1:4112-1:4112", "1:4115-1:4116") ) from ROOT import vector vlumis = vector('string')() for lumi in lumisToProcess: vlumis.push_back(lumi) for name in fileNames: aFile = "file:///home/mbluj/work/data/NanoAOD/80X_with944/VBFHToTauTau_M125_13TeV_powheg_pythia8/RunIISummer16NanoAOD_PUMoriond17_05Feb2018_94X_mcRun2_asymptotic_v2-v1/" + name print "Adding file: ", aFile print "Making the MuTau tree" aROOTFile = TFile.Open(aFile) aTree = aROOTFile.Get("Events") print "TTree entries: ", aTree.GetEntries() HMuTauhTreeFromNano(aTree, doSvFit, applyRecoil, vlumis).Loop() print "Making the TauTau tree" aROOTFile = TFile.Open(aFile) aTree = aROOTFile.Get("Events") HTauhTauhTreeFromNano(aTree, doSvFit, applyRecoil, vlumis).Loop()
try: gSystem.Load("libSusyFitter.so") from ROOT import * except Exception, msg: print "Couldn't import HistFitter - please setup appropriate root version" sys.exit() ### file lists fi1 = glob.glob('SoftLeptonMoriond2013_SRs1L_SM_GG1step_*/Fit_SRs1L_SM_GG1step_*_combined_BasicMeasurement_model.root') fi2 = glob.glob('Sig_SM_SS1step_*_combined_BasicMeasurement_model.root') #fi3 = glob.glob('Sig_SM_SS1step_*_combined_BasicMeasurement_model.root') f1 = vector('TString')() f2 = vector('TString')() #f3 = vector('TString')() for file in fi1: f1.push_back(TString(file)) for file in fi2: f2.push_back(TString(file)) #for file in fi3: f3.push_back(TString(file)) format1 = 'filename+SoftLeptonMoriond2013_SRs1L_SM_GG1step_%f_%f_%f+combined' format2 = 'filename+Sig_SM_SS1step_%f_%f_%f+combined' #format3 = 'filename+Sig_SM_SS1step_%f_%f_%f' interpretation = 'm1:m2:m3' outfile = 'combined+wsid' ## workspace id will be added to filename outws_prefix = 'combined' ## prefix for workspace name
def __init__(self, globalTag, jetFlavour, doResidualJECs=True, **kwargs): """Create a corrector object that reads the payloads from the text dumps of a global tag under CMGTools/RootTools/data/jec (see the getJec.py there to make the dumps). It will apply the L1,L2,L3 and possibly the residual corrections to the jets. If configured to do so, it will also compute the type1 MET corrections.""" globalTag = globalTag jetFlavour = jetFlavour doResidualJECs = doResidualJECs era = kwargs.get('era', "") path = kwargs.get('path', pathJEC) upToLevel = kwargs.get('upToLevel', 3) correctType1MET = kwargs.get('correctType1MET', False) correctSeparate = kwargs.get('correctSeparate', False) type1METParams = kwargs.get( 'type1METParams', { 'jetPtThreshold': 15., 'skipEMfractionThreshold': 0.9, 'skipMuons': True }) ###if era: ### globalTag = re.sub(r"(201[678])(_V\d+)",r"\1%s\2"%era,globalTag) # BASE CORRECTIONS path = os.path.expandvars( path ) #"%s/src/CMGTools/RootTools/data/jec"%os.environ['CMSSW_BASE']; print("Loading JES corrections from '%s' with globalTag '%s'..." % (path, globalTag)) filenameL1 = ensureFile("%s/%s_L1FastJet_%s.txt" % (path, globalTag, jetFlavour)) filenameL2 = ensureFile("%s/%s_L2Relative_%s.txt" % (path, globalTag, jetFlavour)) filenameL3 = ensureFile("%s/%s_L3Absolute_%s.txt" % (path, globalTag, jetFlavour)) self.L1JetPar = JetCorrectorParameters(filenameL1, "") self.L2JetPar = JetCorrectorParameters(filenameL2, "") self.L3JetPar = JetCorrectorParameters(filenameL3, "") self.vPar = vector(JetCorrectorParameters)() self.vPar.push_back(self.L1JetPar) if upToLevel >= 2: self.vPar.push_back(self.L2JetPar) if upToLevel >= 3: self.vPar.push_back(self.L3JetPar) # ADD RESIDUALS if doResidualJECs: filenameL2L3 = ensureFile("%s/%s_L2L3Residual_%s.txt" % (path, globalTag, jetFlavour)) self.ResJetPar = JetCorrectorParameters(filenameL2L3) self.vPar.push_back(self.ResJetPar) # STEP 3: Construct a FactorizedJetCorrector object self.JetCorrector = FactorizedJetCorrector(self.vPar) if os.path.exists("%s/%s_Uncertainty_%s.txt" % (path, globalTag, jetFlavour)): self.JetUncertainty = JetCorrectionUncertainty( "%s/%s_Uncertainty_%s.txt" % (path, globalTag, jetFlavour)) elif os.path.exists("%s/Uncertainty_FAKE.txt" % path): self.JetUncertainty = JetCorrectionUncertainty( "%s/Uncertainty_FAKE.txt" % path) else: print 'Missing JEC uncertainty file "%s/%s_Uncertainty_%s.txt", so jet energy uncertainties will not be available' % ( path, globalTag, jetFlavour) self.JetUncertainty = None self.separateJetCorrectors = {} if correctSeparate or correctType1MET: self.vParL1 = vector(JetCorrectorParameters)() self.vParL1.push_back(self.L1JetPar) self.separateJetCorrectors['L1'] = FactorizedJetCorrector( self.vParL1) if upToLevel >= 2 and correctSeparate: self.vParL2 = vector(JetCorrectorParameters)() for i in [self.L1JetPar, self.L2JetPar]: self.vParL2.push_back(i) self.separateJetCorrectors['L1L2'] = FactorizedJetCorrector( self.vParL2) if upToLevel >= 3 and correctSeparate: self.vParL3 = vector(JetCorrectorParameters)() for i in [self.L1JetPar, self.L2JetPar, self.L3JetPar]: self.vParL3.push_back(i) self.separateJetCorrectors['L1L2L3'] = FactorizedJetCorrector( self.vParL3) if doResidualJECs and correctSeparate: self.vParL3Res = vector(JetCorrectorParameters)() for i in [ self.L1JetPar, self.L2JetPar, self.L3JetPar, self.ResJetPar ]: self.vParL3Res.push_back(i) self.separateJetCorrectors[ 'L1L2L3Res'] = FactorizedJetCorrector(self.vParL3Res) self.globalTag = globalTag self.jetFlavour = jetFlavour self.doResidualJECs = doResidualJECs self.path = path self.upToLevel = upToLevel self.correctType1MET = correctType1MET self.correctSeparate = correctSeparate self.type1METParams = type1METParams
from di_higgs.hh2bbbb.samples_25ns import mc_samples max_events = -100 inEllipse = False freeJetTagged = True isMC = True TH1.AddDirectory(False) hlt_paths = ["HLT_BIT_HLT_QuadJet45_TripleBTagCSV0p67_v", "HLT_BIT_HLT_QuadJet45_DoubleBTagCSV0p67_v", "HLT_BIT_HLT_DoubleJet90_Double30_TripleBTagCSV0p67_v", "HLT_BIT_HLT_DoubleJet90_Double30_DoubleBTagCSV0p67_v", "HLT_HH4bAll"] hlt_paths_v = vector("string")() for hlt_path in hlt_paths: hlt_paths_v.push_back(hlt_path) hlt_paths_or = hlt_paths[0:1] + hlt_paths[2:3] hlt_paths_or_v = vector("string")() for hlt_path in hlt_paths_or: hlt_paths_or_v.push_back(hlt_path) mc_names = mc_samples.keys() mc_names=[mc_name for mc_name in mc_names if 'HH' in mc_name] for name in mc_names: isHH = False if "HH" in name: isHH = True selector = SkimSelector(ExtEvent(VHBBEvent))(0, hlt_paths_v, isHH, hlt_paths_or_v) tchain = TChain("tree") tchain.Add(mc_samples[name]["lustre_path"])
gSlight = load_starlight(dy) gMS = load_ms() gCCK = load_cck() #gSartre = load_sartre() #open the inputs inp = TFile.Open(basedir + "/" + infile) tree = inp.Get("jRecTree") inp_gg = TFile.Open(basedir_gg + "/" + infile_gg) tree_gg = inp_gg.Get("jRecTree") inp_coh = TFile.Open(basedir_coh + "/" + infile_coh) tree_coh_gen = inp_coh.Get("jGenTree") #evaluate binning print "bins:", ut.get_nbins(ptbin, ptmin, ptmax) bins = vector(rt.double)() #bins.push_back(ptmin) #while True: # if bins[bins.size()-1] < ptmid: # increment = ptbin # else: # increment = ptlon # bins.push_back( bins[bins.size()-1] + increment ) # if bins[bins.size()-1] > ptmax: break bins = ut.get_bins_vec_2pt(ptbin, ptlon, ptmin, ptmax, ptmid) print "bins2:", bins.size() - 1 #data and gamma-gamma histograms #hPt = ut.prepare_TH1D("hPt", ptbin, ptmin, ptmax)
#weights to be applied weights = {'PUWeight', 'LeptonWeight', 'BTagWeight'} #'lhe_weight_10', weights_nobTag = {'PUWeight', 'LeptonWeight'} #'lhe_weight_10' # --------------- if not os.path.exists(oDir): os.mkdir(oDir) print oDir trg_namesD = triggerlists[trgListD] trg_namesN = triggerlists[trgListN] trg_names = trg_namesD + trg_namesN print trg_namesD print trg_namesN if not trg_names: print "### WARNING: empty hlt_names ###" trg_namesD_v = vector("string")() for t in trg_namesD: trg_namesD_v.push_back(t) trg_namesN_v = vector("string")() for t in trg_namesN: trg_namesN_v.push_back(t) # to convert weights weights_v = vector("string")() for w in weights: weights_v.push_back(w) w_nobTag_v = vector("string")() for w in weights_nobTag: w_nobTag_v.push_back(w) # to parse variables to the anlyzer
def matchAndSort(inputCollection, outputCollection): for cand in inputCollection: decaymatched = False vbfmatched=False if self.domcmatching: decaymatched = cand.leg2().leg1().getSelection('cuts_genParton') and cand.leg2().leg2().getSelection('cuts_genParton') if cand.vbfptr().isNonnull(): if self.cfg_ana.matchvbfgen: vbfjets = self.handles['genVBF'].product() if (len(vbfjets))>1: phileg1 = cand.vbfptr().leg1().phi() etaleg1 = cand.vbfptr().leg1().eta() phileg2 = cand.vbfptr().leg2().phi() etaleg2 = cand.vbfptr().leg2().eta() phigen1 = vbfjets[0].phi() etagen1 = vbfjets[0].eta() phigen2 = vbfjets[1].phi() etagen2 = vbfjets[1].eta() ## vbfmatched = ((deltaR(phileg1, etaleg1, phigen1, etagen1)<1 or deltaR(phileg1, etaleg1, phigen2, etagen2)<1) or ## (deltaR(phileg2, etaleg2, phigen1, etagen1)<1 or deltaR(phileg2, etaleg2, phigen2, etagen2)<1)) vbfmatched = ((deltaR(phileg1, etaleg1, phigen1, etagen1)<0.5 or deltaR(phileg1, etaleg1, phigen2, etagen2)<0.5) or (deltaR(phileg2, etaleg2, phigen1, etagen1)<0.5 or deltaR(phileg2, etaleg2, phigen2, etagen2)<0.5)) if ( cand.leg2().leg1().pt()>self.cfg_ana.jetptmin and cand.leg2().leg2().pt()>self.cfg_ana.jetptmin ): if cand.vbfptr().isNonnull(): if ( cand.vbfptr().leg1().pt()>self.cfg_ana.jetptmin and cand.vbfptr().leg2().pt()>self.cfg_ana.jetptmin ): varnames = vector("string") () varnames.push_back("ZJJMass") varnames.push_back("J1Pt") varnames.push_back("J2Pt") varnames.push_back("ZJJdeltaEtaDecay") varnames.push_back("HMMJJMass>0?abs(HMMJJDeltaPhiZ):abs(HEEJJDeltaPhiZ)") varnames.push_back("HMMJJMass>0?abs(HMMJJSumAbsEtaJ1J2):abs(HEEJJSumAbsEtaJ1J2)") varnames.push_back("HMMJJMass>0?HMMJJcosthetastar:HEEJJcosthetastar") varnames.push_back("HMMJJMass>0?HMMJJhelphi:HEEJJhelphi") varnames.push_back("HMMJJMass>0?HMMJJhelphiZl1:HEEJJhelphiZl1") varnames.push_back("HMMJJMass>0?HMMJJhelphiZl2:HEEJJhelphiZl2") varnames.push_back("HMMJJMass>0?HMMJJphistarZl1:HEEJJphistarZl1") varnames.push_back("HMMJJMass>0?HMMJJphistarZl2:HEEJJphistarZl2") varnames.push_back("HMMJJMass>0?HMMJJhelcosthetaZl1:HEEJJhelcosthetaZl1") varnames.push_back("HMMJJMass>0?HMMJJhelcosthetaZl2:HEEJJhelcosthetaZl2") vars = vector("double") () vars.push_back(cand.leg2().mass()) vars.push_back(cand.leg2().leg1().pt()) vars.push_back(cand.leg2().leg2().pt()) vars.push_back(abs(cand.leg2().leg1().eta() - cand.leg2().leg2().eta())) vars.push_back(abs(deltaPhi(cand.leg1().eta(), cand.leg2().eta()))) vars.push_back(abs(cand.leg2().leg1().eta())+abs(cand.leg2().leg2().eta())) vars.push_back(cand.costhetastar()) vars.push_back(cand.helphi()) vars.push_back(cand.helphiZl1()) vars.push_back(cand.helphiZl2()) vars.push_back(cand.phistarZl1()) vars.push_back(cand.phistarZl2()) vars.push_back(cand.helcosthetaZl1()) vars.push_back(cand.helcosthetaZl2()) if self.cfg_ana.computeClassifier: classifier = ReadBDT(varnames) value = classifier.GetMvaValue(vars) else: value = -1. outputCollection.append([cand, decaymatched, vbfmatched,value]) outputCollection.sort(key=lambda a: a[0].vbfptr().mass(), reverse=True) if self.cfg_ana.computeClassifier: outputCollection.sort(key=lambda a: a[3], reverse=True) #print "initial size", len(outputCollection) leadingmass = outputCollection[0][0].vbfptr().mass() #print "maxvbfmass is ",leadingmass outputCollection = [x for x in outputCollection if x[0].vbfptr().mass() >= leadingmass]