def addHistToTGraphPoisson(self, h, g, w=1.):
        for i in range(0, g.GetN()):
            # shifted by +1, i.e. over/underflow
            err = h.GetBinError(i + 1)
            y = h.GetBinContent(i + 1)

            binWidth = h.GetBinWidth(i + 1)

            # From https://twiki.cern.ch/twiki/bin/view/CMS/PoissonErrorBars
            alpha = 1. - 0.6827
            n = int(
                round(y * binWidth /
                      w))  # Round is necessary due to, well, rounding errors
            l = Math.gamma_quantile(alpha / 2., n, 1.) if n != 0. else 0.
            u = Math.gamma_quantile_c(alpha, n + 1, 1) if n != 0. else 0.
            g.SetPointEYlow(
                i,
                math.sqrt(((n - l) / binWidth * w)**2 + g.GetErrorYlow(i)**2))
            g.SetPointEYhigh(
                i,
                math.sqrt(((u - n) / binWidth * w)**2 + g.GetErrorYhigh(i)**2))

            gx = Double(0.)
            gy = Double(0.)
            g.GetPoint(i, gx, gy)
            g.SetPoint(i, gx, y + gy)
예제 #2
0
def convertToPoisson(h):
    graph = TGraphAsymmErrors()
    q = (1-0.6827)/2.

    for i in range(1,h.GetNbinsX()+1):
        x=h.GetXaxis().GetBinCenter(i)
        xLow =h.GetXaxis().GetBinLowEdge(i)
        xHigh =h.GetXaxis().GetBinUpEdge(i)
        y=h.GetBinContent(i)
        yLow=0
        yHigh=0
        if y !=0.0:
            yLow = y-Math.chisquared_quantile_c(1-q,2*y)/2.
            yHigh = Math.chisquared_quantile_c(q,2*(y+1))/2.-y
            graph.SetPoint(i-1,x,y)
            graph.SetPointEYlow(i-1,yLow)
            graph.SetPointEYhigh(i-1,yHigh)
            graph.SetPointEXlow(i-1,0.0)
            graph.SetPointEXhigh(i-1,0.0)


    graph.SetMarkerStyle(20)
    graph.SetLineWidth(2)
    graph.SetMarkerSize(1.)
    graph.SetMarkerColor(kBlack)


    return graph
예제 #3
0
    def __init__( self, process_list, sf = None, df = None ):
        
        self.__processes = process_list
        
        if sf and df:
            
            self.__sf = sf
            self.__df = df
            
            alpha = 1 - 0.6827            

            self.__sfCRGraph = TGraphAsymmErrors(self.__sf)
            self.__dfCRGraph = TGraphAsymmErrors(self.__df)
            
            for g in [self.__sfCRGraph, self.__dfCRGraph]:
                for i in range(g.GetN()):
                    N = g.GetY()[i]
                    LowErr = 0 if N == 0 else Math.gamma_quantile(alpha/2, N, 1.)
                    UpErr = Math.gamma_quantile_c(alpha/2, N+1, 1.) # recommended by StatComm (1.8)
                    # Math.gamma_quantile_c(alpha, N+1, 1.) # gives 1.2, strictly 68% one-sided
                    g.SetPointEYlow(i, N-LowErr)
                    g.SetPointEYhigh(i, UpErr-N)
            
            for p in self.__processes:
                
                if p.name() in ['nonpromptSF', 'nonpromptDF']:
                    
                    nom, low, high = [], [], []
                    
                    for ib in range(p.nominal().GetXaxis().GetNbins()):
                        if p.name() in ['nonpromptDF']: hnp = self.__dfCRGraph
                        else: hnp = self.__sfCRGraph
                        npcr = hnp.GetY()[ib]
                        npcrErrLow = hnp.GetEYlow()[ib]
                        npcrErrHigh = hnp.GetEYhigh()[ib]
                        np = p.nominal().GetBinContent(ib+1)
                        npErr = p.nominal().GetBinError(ib+1)
                        alpha = np/npcr if npcr > 0 else npErr
                        nom.append(npcr*alpha)
                        low.append(npcrErrLow*alpha)
                        high.append(npcrErrHigh*alpha)
                            
                    hnom = p.nominal().Clone()
                    for ib in range(hnom.GetXaxis().GetNbins()):
                        hnom.SetBinContent(ib+1, nom[ib])
                    npGraph = TGraphAsymmErrors(hnom)
                    for ib in range(hnom.GetXaxis().GetNbins()):
                        npGraph.SetPointEYlow(ib, low[ib])
                        npGraph.SetPointEYhigh(ib, high[ib])
                    if p.name() in ['nonpromptDF']:
                        self.__dfGraph = npGraph
                    else:
                        self.__sfGraph = npGraph
                        
        self.__uncertainties = set()
        for p in self.__processes:
            self.__uncertainties = self.__uncertainties.union( set( p.uncertaintySources() ) )
예제 #4
0
def calculatePoissonErrors(n, confidenceInterval=0.6827):
    ## Calcultes the Poisson error for a given numer
    #  n                   number of entries
    #  confidenceInterval  probability covered inside the error band
    from ROOT import Math
    n = round(n)
    invIntegral = (1. - confidenceInterval) / 2.
    lo = Math.gamma_quantile(invIntegral, n, 1.) if n != 0 else 0.
    hi = Math.gamma_quantile_c(invIntegral, n + 1, 1.)
    return n - lo, hi - n
예제 #5
0
파일: utilpp.py 프로젝트: chekanov/ppbook
def GetZVal(p, excess):
    '''The function normal_quantile converts a p-value into a significance,
     i.e. the number of standard deviations corresponding to the right-tail of 
     Gaussian
  '''
    if excess:
        zval = Math.normal_quantile(1 - p, 1)
    else:
        zval = Math.normal_quantile(p, 1)
    return zval
예제 #6
0
def getDataPoissonErrors(hist,
                         kPoisson=False,
                         drawZeroBins=False,
                         drawXbars=False,
                         centerBin=True):
    '''Make data poisson errors for a histogram with two different methods:
       - TH1.kPoisson
       - chi-squared quantile   
    '''
    # https://github.com/DESY-CMS-SUS/cmgtools-lite/blob/8_0_25/TTHAnalysis/python/plotter/mcPlots.py#L70-L102
    # https://github.com/DESY-CMS-SUS/cmgtools-lite/blob/8_0_25/TTHAnalysis/python/plotter/susy-1lep/RcsDevel/plotDataPredictWithSyst.py#L12-L21

    if kPoisson: hist.SetBinErrorOption(TH1D.kPoisson)

    Nbins = hist.GetNbinsX()
    xaxis = hist.GetXaxis()
    alpha = (1 - 0.6827) / 2.

    graph = TGraphAsymmErrors(Nbins)
    graph.SetName(hist.GetName() + "_graph")
    graph.SetTitle(hist.GetTitle())
    for i in xrange(1, Nbins + 1):
        N = hist.GetBinContent(i)
        if N <= 0 and not drawZeroBins: continue
        dN = hist.GetBinError(i)
        yscale = 1
        if centerBin:
            x = xaxis.GetBinCenter(i)
        else:
            x = xaxis.GetBinLowEdge(i)
        if N > 0 and dN > 0 and abs(dN**2 / N -
                                    1) > 1e-4:  # check is error is Poisson
            yscale = (dN**2 / N)
            N = (N / dN)**2
        if kPoisson:
            EYlow = hist.GetBinErrorLow(i)
            EYup = hist.GetBinErrorUp(i)
        else:
            EYlow = (N - Math.chisquared_quantile_c(1 - alpha, 2 * N) /
                     2.) if N > 0 else 0
            EYup = Math.chisquared_quantile_c(alpha, 2 * (N + 1)) / 2. - N
        y = yscale * N
        EXup = xaxis.GetBinUpEdge(i) - x if drawXbars else 0
        EXlow = x - xaxis.GetBinLowEdge(i) if drawXbars else 0
        graph.SetPoint(i - 1, x, y)
        graph.SetPointError(i - 1, EXlow, EXup, EYlow, EYup)
        #print ">>> getDataPoissonErrors - bin %2d: (x,y) = ( %3.1f - %4.2f + %4.2f, %4.2f - %4.2f + %4.2f )"%(i,x,EXlow,EXup,y,EYlow,EYup)
    graph.SetLineWidth(hist.GetLineWidth())
    graph.SetLineColor(hist.GetLineColor())
    graph.SetLineStyle(hist.GetLineStyle())
    graph.SetMarkerSize(hist.GetMarkerSize())
    graph.SetMarkerColor(hist.GetMarkerColor())
    graph.SetMarkerStyle(hist.GetMarkerStyle())
    return graph
예제 #7
0
def pass_lepton_RelIso(lid, ltightId):
    """Return True if lepton passes tightID.

    Args:
        lid (int): Lepton ID.
        ltightId (bool): Tight ID.
    """
    if abs(lid) == 11:
        return ltightId
    elif abs(lid) == 13:
        if ltightId < 
        return True if 
    else:
        return False

            # We have 2 good (loose) leptons which MAY form a Z candidate.
            loose_lep_arr = []
            # Do an OSSF check:
            if (lid1 + lid2) != 0:
                if verbose:
                    print(f"Event {evt_num} failed OSSF check.")
                continue
            
            # Do we want tight Z1 leptons?
            if force_z1_leps_tightID:
                if (not ltightId1) or (not ltightId2):
                    continue
            
            lorvec_lep1 = Math.PtEtaPhiMVector(lpt1, leta1, lphi1, lmass1)
            lorvec_lep2 = Math.PtEtaPhiMVector(lpt2, leta2, lphi2, lmass2)
            z_cand = lorvec_lep1 + lorvec_lep2
            
            if (z_cand.M() < 12) or (z_cand.M() > 120):
                print(f"Event {evt_num} failed m(Z1) window.")
                continue

            # Good Z candidate! Save these lepton indices.
            z_cand_lep_ndcs.append((ndx1, ndx2))

            #  Need to check lepton kinematics (dxy, dz, SIP3D)
            # if make_valid_z1_candidate(lep1, lep2):

    # All Z1 candidates found!

    # my_lep_ls = 
    # z1_cand_ls = get_all_z1_candidates()

    # If the event made it this far, the leptons are good!
    evt_info_d["n_evts_ge4_passing_leps"] += 1
예제 #8
0
 def get_LorentzVector(self, include_FSR=True):
     """Return a Lorentz vector version of this lepton."""
     if include_FSR:
         return Math.PtEtaPhiMVector(
                 self.lpt,
                 self.leta,
                 self.lphi,
                 self.lmass
                 )
     return Math.PtEtaPhiMVector(
         self.lpt_NoFSR,
         self.leta_NoFSR,
         self.lphi_NoFSR,
         self.lmass_NoFSR
         )
예제 #9
0
    def __call__(self, population, signalYield, signalSample, backgroundYield,
                 backgroundSample):
        # Loop over the chromosomes in the population
        for genome in population:

            # Get a nn describe by the chromosome (feed foreward)
            net = nn.create_ffphenotype(genome)
            error = 0.0
            # Loop over the events
            for event in signalSample:
                # Extract variables and weight
                variables = event[1:]
                weight = event[0] / signalYield
                # Not strictly necessary in feedforward nets
                net.flush()
                # Computing the error
                error = error + weight * (
                    1 - net.sactivate(variables)[0])**self.norm

            # Loop over the events
            for event in backgroundSample:
                # Extract variables and weight
                variables = event[1:]
                weight = event[0] / backgroundYield
                # Not strictly necessary in feedforward nets
                net.flush()
                # Computing the error
                error = error + weight * (
                    net.sactivate(variables)[0])**self.norm

            # Set the fitness value to the chomosome
            genome.fitness = 1 - Math.pow(error / 2, 1. / self.norm)
예제 #10
0
  def __call__(self, population, signalYield, signalSample, backgroundYield, backgroundSample):
    # Loop over the chromosomes in the population 
    for genome in population:
  
      # Get a nn describe by the chromosome (feed foreward)
      net = nn.create_ffphenotype(genome)   
      error = 0.0
      # Loop over the events
      for event in signalSample:
        # Extract variables and weight
        variables = event[1:]
        weight = event[0]/signalYield
        # Not strictly necessary in feedforward nets
        net.flush()
        # Computing the error
        error = error + weight*(1 - net.sactivate(variables)[0])**self.norm

      # Loop over the events
      for event in backgroundSample:
        # Extract variables and weight
        variables = event[1:]
        weight = event[0]/backgroundYield
        # Not strictly necessary in feedforward nets
        net.flush()
        # Computing the error
        error = error + weight*(net.sactivate(variables)[0])**self.norm
    
      # Set the fitness value to the chomosome
      genome.fitness = 1 - Math.pow(error/2, 1./self.norm)
예제 #11
0
 def cdf_generator(self, histo, binN):
     mean = histo.GetBinContent(binN)
     sdev = TMath.Sqrt(mean)
     maxK = mean + 4 * sdev  # mean + 4 * mean
     ks = linspace(0, maxK, 101)  # xrange(maxK)# [k for k in xrange(maxK)]
     cdf = {pos: [ks[pos], Math.inc_gamma_c(ks[pos]+1, mean)] for pos in xrange(len(ks))}  # dictionary wich contains the x and y info of the cdf in a list of two elements. e.g. cdf[1] -> [0, 0].
     return deepcopy(cdf)
예제 #12
0
def getprobability(S, dof):
    """.. function:: checkchisquared(S,dof) -> p
	Knowing that S follows a chi square distribution (with dof degrees of freem), 
	return the probability that S is fulfill the hypothesis
	"""
    from ROOT import Math

    return Math.chisquared_cdf(S, dof)
예제 #13
0
def get_pt(vector):
    # Get pT from a given Lorentz vector
    pT = None
    try:
        pT = vector.Perp()
    except AttributeError:
        pT = Math.sqrt(vector.Perp2())
    return pT
    def createTGraphPoisson(self, h, w=1.):
        self.tfile.cd()
        g = TGraphAsymmErrors(h)
        for i in range(0, g.GetN()):
            y = h.GetBinContent(i+1)
            binWidth = h.GetBinWidth(i+1)

            # From https://twiki.cern.ch/twiki/bin/view/CMS/PoissonErrorBars
            alpha = 1. - 0.6827
            n = int(round(y*binWidth/w)) # Round is necessary due to, well, rounding errors
            l = Math.gamma_quantile(alpha/2., n, 1.) if n != 0. else 0.
            u = Math.gamma_quantile_c(alpha, n+1, 1)
            # print y*binWidth/w, n, y, u, l
            g.SetPointEYlow(i, (n-l)/binWidth*w)
            g.SetPointEYhigh(i, (u-n)/binWidth*w)

        return g
예제 #15
0
def getprobability(S,dof):
	""".. function:: checkchisquared(S,dof) -> p
	Knowing that S follows a chi square distribution (with dof degrees of freem), 
	return the probability that S is fulfill the hypothesis
	"""
	from ROOT import Math

	return Math.chisquared_cdf(S,dof)
예제 #16
0
def get_lorentz_vector_new(particle):
    # Get the Lorentz Vector of a given particle (Math::LorentzVector class)
    vector = Math.PxPyPzMVector()
    vector.SetPx(particle.core.p4.px)
    vector.SetPy(particle.core.p4.py)
    vector.SetPz(particle.core.p4.pz)
    vector.SetM(particle.core.p4.mass)
    return vector
예제 #17
0
def LineShapePDF(shapes, mass, histo):
    # import ROOT
    from ROOT import Math

    x = shapes.binxcenters
    y = np.array([])
    if mass in shapes.shapes.keys():
        y = np.array(shapes.shapes[mass])
    else:
        input_masses = shapes.shapes.keys()
        min_mass = min(input_masses)
        max_mass = max(input_masses)
        ml = mass
        yl = np.array([])
        mh = mass
        yh = np.array([])
        if mass < min_mass:
            print "** WARNING: ** Attempting to extrapolate below the lowest input mass. The extrapolated shape(s) might not be reliable."
            m_temp = input_masses
            m_temp.sort()
            ml = m_temp[0]
            mh = m_temp[1]
        elif mass > max_mass:
            print "** WARNING: ** Attempting to extrapolate above the highest input mass. The extrapolated shape(s) might not be reliable."
            m_temp = input_masses
            m_temp.sort(reverse=True)
            ml = m_temp[1]
            mh = m_temp[0]
        else:
            ml = max([ m for m in input_masses if m<mass ])
            mh = min([ m for m in input_masses if m>mass ])

        yl = np.array(shapes.shapes[ml])
        yh = np.array(shapes.shapes[mh])

        y = ((yh - yl)/float(mh-ml))*float(mass - ml) + yl

    # define interpolator
    interpolator = Math.Interpolator(len(x))
    interpolator.SetData(len(x), array('d',x), array('d',y.tolist()))

    for i in range(0, histo.GetNbinsX()+1):
        xcenter = histo.GetBinCenter(i)/float(mass)
        if xcenter > shapes.binxcenters[0] and xcenter < shapes.binxcenters[-1]:

            xlow = histo.GetXaxis().GetBinLowEdge(i)/float(mass)
            if xlow < shapes.binxcenters[0]: xlow = shapes.binxcenters[0]
            xhigh = histo.GetXaxis().GetBinUpEdge(i)/float(mass)
            if xhigh > shapes.binxcenters[-1]: xhigh = shapes.binxcenters[-1]

            integral = interpolator.Integ(xlow, xhigh)
            histo.SetBinContent( i, (integral if integral >= 0. else 0.) )
        else:
            histo.SetBinContent(i, 0.)

#    histo.Scale( 1./histo.Integral() )
    histo.Scale( sum(y)/histo.Integral() )
    def createTGraphPoisson(self, h, w=1.):
        self.tfile.cd()
        g = TGraphAsymmErrors(h)
        for i in range(0, g.GetN()):
            y = h.GetBinContent(i + 1)
            binWidth = h.GetBinWidth(i + 1)

            # From https://twiki.cern.ch/twiki/bin/view/CMS/PoissonErrorBars
            alpha = 1. - 0.6827
            n = int(
                round(y * binWidth /
                      w))  # Round is necessary due to, well, rounding errors
            l = Math.gamma_quantile(alpha / 2., n, 1.) if n != 0. else 0.
            u = Math.gamma_quantile_c(alpha, n + 1, 1)
            # print y*binWidth/w, n, y, u, l
            g.SetPointEYlow(i, (n - l) / binWidth * w)
            g.SetPointEYhigh(i, (u - n) / binWidth * w)

        return g
    def addHistToTGraphPoisson(self, h, g, w=1.):
        for i in range(0, g.GetN()):
            # shifted by +1, i.e. over/underflow
            err = h.GetBinError(i+1)
            y = h.GetBinContent(i+1)

            binWidth = h.GetBinWidth(i+1)
            
            # From https://twiki.cern.ch/twiki/bin/view/CMS/PoissonErrorBars
            alpha = 1. - 0.6827
            n = int(round(y*binWidth/w)) # Round is necessary due to, well, rounding errors
            l = Math.gamma_quantile(alpha/2., n, 1.) if n != 0. else 0.
            u = Math.gamma_quantile_c(alpha, n+1, 1) if n != 0. else 0.
            g.SetPointEYlow(i, math.sqrt(((n-l)/binWidth*w)**2 + g.GetErrorYlow(i)**2))
            g.SetPointEYhigh(i, math.sqrt(((u-n)/binWidth*w)**2 + g.GetErrorYhigh(i)**2))

            gx = Double(0.)
            gy = Double(0.)
            g.GetPoint(i, gx, gy)
            g.SetPoint(i, gx, y + gy)
예제 #20
0
    def __call__(self, population, signalYield, signalSample, backgroundYield,
                 backgroundSample):

        # Histogram holders
        signalHistogram = TH1F('signalHistogram', 'signal', self.nbins, 0, 1)
        backgroundHistogram = TH1F('backgroundHistogram', 'background',
                                   self.nbins, 0, 1)

        # Loop over the chromosomes in the population
        for genome in population:

            # Reset histograms
            signalHistogram.Reset()
            backgroundHistogram.Reset()

            # Get a nn describe by the chromosome (feed foreward)
            net = nn.create_ffphenotype(genome)

            # Loop over the events creating signal histogram
            for event in signalSample:
                # Extract variables and weight
                variables = event[1:]
                weight = event[0]
                # Not strictly necessary in feedforward nets
                net.flush()
                # Net output
                output = net.sactivate(variables)[0]
                # Filling histograms
                signalHistogram.Fill(output, weight)

            # Loop over the events creating background histogram
            for event in backgroundSample:
                # Extract variables and weight
                variables = event[1:]
                weight = event[0]
                # Not strictly necessary in feedforward nets
                net.flush()
                # Net output
                output = net.sactivate(variables)[0]
                # Filling histograms
                backgroundHistogram.Fill(output, weight)

            fitness = 0

            # Computing fitness
            for bin in xrange(1, signalHistogram.GetNbinsX() + 1):
                signal = signalHistogram.GetBinContent(bin)
                background = backgroundHistogram.GetBinContent(bin)
                total = signal + background
                if total > 0:
                    fitness = fitness + signal**2 / (signal + background)

            # Adding fitness to genome
            genome.fitness = Math.sqrt(fitness)
예제 #21
0
  def __call__(self, population, signalYield, signalSample, backgroundYield, backgroundSample):

    # Histogram holders
    signalHistogram = TH1F('signalHistogram', 'signal', self.nbins, 0, 1)
    backgroundHistogram = TH1F('backgroundHistogram', 'background', self.nbins, 0, 1)

    # Loop over the chromosomes in the population 
    for genome in population:
  
      # Reset histograms
      signalHistogram.Reset()
      backgroundHistogram.Reset()

      # Get a nn describe by the chromosome (feed foreward)
      net = nn.create_ffphenotype(genome)   

      # Loop over the events creating signal histogram
      for event in signalSample:
        # Extract variables and weight
        variables = event[1:]
        weight = event[0]
        # Not strictly necessary in feedforward nets
        net.flush()
        # Net output
        output = net.sactivate(variables)[0]
        # Filling histograms
        signalHistogram.Fill(output, weight)
            
      # Loop over the events creating background histogram
      for event in backgroundSample:
        # Extract variables and weight
        variables = event[1:]
        weight = event[0]
        # Not strictly necessary in feedforward nets
        net.flush()
        # Net output
        output = net.sactivate(variables)[0]
        # Filling histograms
        backgroundHistogram.Fill(output, weight)
    
      fitness = 0
  
      # Computing fitness
      for bin in xrange(1,signalHistogram.GetNbinsX()+1):
        signal = signalHistogram.GetBinContent(bin)
        background = backgroundHistogram.GetBinContent(bin)
        total = signal + background
        if total > 0:
          fitness = fitness + signal**2/(signal+background)

      # Adding fitness to genome
      genome.fitness = Math.sqrt(fitness)
예제 #22
0
def mean_poisson_pval(d, b, b_error, conv_width=3, step_size=1):
    """
    Mostly transcribed from
    https://svnweb.cern.ch/trac/atlasoff/browser/Trigger/TrigFTK/SuPlot/trunk/src/bumphunter/StatisticsAnalysis.C
    (including the docstring)

    Convolve a Gaussian (non-negative part) with a Poisson. Background is b+-b_error, and
    we need the mean Poisson probability to observe at least d (if d >=b, else at most d),
    given this PDF for b.
    The way is to cut the PDF or b into segments, whose area is exactly calculable, take the
    Poisson probability at the center of each gaussian slice, and average the probabilities
    using the area of each slice as weight.

    But here, currently, we are guaranteed to have d > b so some of this simplifies.

    d - data counts
    b - background counts
    b_error - error in bkg counts
    conv_width - range of l in convolution loop (I think this is how many sigmas of to convolve)
    step_size - step size for convolution

    TODO: I think there might be a better way to do this using ROOT.TMath
    """
    if b_error == 0:
        return TMath.Gamma(d, b) # Guaranteed to have d > b, so this is equivalent to commonFunctions.h:503

    # TODO: Pythonify the following
    mean, total_weight = 0.0, 0.0
    l = -conv_width
    while l <= conv_width:
        bcenter = max(0, b + l*b_error)
        this_slice_weight = Math.normal_cdf(l + 0.5*step_size) - Math.normal_cdf(l - 0.5*step_size)
        this_pval = poisson_pval(d, bcenter)
        mean += this_pval*this_slice_weight
        total_weight += this_slice_weight
        l += step_size
    return mean / total_weight
def scat(x, par):
    """
    A = par[0]
    sigma = par[1]
    tau = par[2]
    b = par[3]

    """
    t0 = par[0]
    A = par[1]
    sigma = par[2]
    tau = par[3]
    b = par[4]
    dt = (x[0] - t0)

    t1 = tau * dt
    t3 = sigma * sigma
    t5 = tau * tau
    t9 = math.exp(-(0.4e1 * t1 - t3) / t5 / 0.4e1)
    t10 = 1.77245385090552
    t19 = Math.erf((0.2e1 * t1 - t3) / sigma / tau / 0.2e1)
    return (A * t9 * t10 * sigma * (t19 + 0.1e1) / 0.2e1 + b)
예제 #24
0
def CalFtest(fit1, fit2):
    print fit1.fitname, fit2.fitname
    print 1. - Math.fdistribution_cdf(fit1.chindof / fit2.chindof, fit1.ndof,
                                      fit2.ndof)
예제 #25
0
        continue

    if in_ev == 1:
        # Check the status of this particle
        try:
            if line.split()[1] is "1":
                # We have a final state particle on this line
                s.n_particles += 1
                PID_v.push_back(int(line.split()[0]))
                P_X_v.push_back(float(line.split()[6]))
                P_Y_v.push_back(float(line.split()[7]))
                P_Z_v.push_back(float(line.split()[8]))
                E_v.push_back(float(line.split()[9]))
                M_v.push_back(float(line.split()[10]))
                VecLep = Math.LorentzVector('ROOT::Math::PxPyPzE4D<float>')(
                    float(line.split()[6]), float(line.split()[7]),
                    float(line.split()[8]), float(line.split()[9]))
                P4_v.push_back(VecLep)
                pass
            pass
        except:
            pass

    if in_wgt == 1:
        # <wgt id='rwgt_11'> +2.2580107e-03 </wgt>
        # Check the weight
        try:
            wgt = line.rsplit('>')[1].split()[0]
            Wgt_v.push_back(float(wgt))
            pass
        except:
예제 #26
0
                    if tmpJetPTTwo > jetPTCut:
                        #Counter
                        if not ifTwoBool:
                            ifTwoCount += 1
                            ifTwoBool = True
                        #Getting the eta dif between the two jets
                        tmpEtaDif = abs(ev.Jet_eta[i] - ev.Jet_eta[j])
                        #Checking if the eta dif passes the eta dif cut
                        if tmpEtaDif > jetEtaDifCut:
                            #Counter
                            if not ifThreeBool:
                                ifThreeCount += 1
                                ifThreeBool = True
                            #Getting four vectors for the two jets, using pt, eta, phi, and mass
                            tmpVecOne = Math.PtEtaPhiMVector(
                                ev.Jet_pt[i], ev.Jet_eta[i], ev.Jet_phi[i],
                                ev.Jet_mass[i])
                            tmpVecTwo = Math.PtEtaPhiMVector(
                                ev.Jet_pt[j], ev.Jet_eta[j], ev.Jet_phi[j],
                                ev.Jet_mass[j])
                            #Adding four vectors together and getting their invariant mass
                            tmpDiJetVec = tmpVecOne + tmpVecTwo
                            tmpInvMass = tmpDiJetVec.M()
                            #Checking if their InvMass passes the InvMass cut
                            if tmpInvMass > jetInvMassCut:
                                #Counter
                                if not ifFourBool:
                                    ifFourCount += 1
                                    ifFourBool = True

                                #Selecting by summed jet pt
예제 #27
0
    def __call__(self, event):
        #corrected electrons to put into the event
        corre1s = []
        corre2s = []
        corrgs = []
        Zs = []
        Zgs = []

        corrPtName1 = self._lep1CorrPt
        corrPtName2 = self._lep2CorrPt
        corrPtNameG = self._gamCorrPt

        corrEtaName1 = self._lep1CorrEta
        corrEtaName2 = self._lep2CorrEta
        corrEtaNameG = self._gamCorrEta

        corrPhiName1 = self._lep1CorrPhi
        corrPhiName2 = self._lep2CorrPhi

        corrPt_1 = getattr(event, corrPtName1)
        corrPt_2 = getattr(event, corrPtName2)
        corrPt_G = getattr(event, corrPtNameG)

        corrEta_1 = getattr(event, corrEtaName1)
        corrEta_2 = getattr(event, corrEtaName2)
        corrEta_G = getattr(event, corrEtaNameG)

        corrPhi_1 = getattr(event, corrPhiName1)
        corrPhi_2 = getattr(event, corrPhiName2)

        for i in range(event.N_PATFinalState):
            #recalculate the photon vector from the PV
            corrgs.append(TLorentzVector())
            pv = Math.XYZPoint(event.pvX[i], event.pvY[i], event.pvZ[i])
            phoSC = Math.XYZPoint(event.gPositionX[i], event.gPositionY[i],
                                  event.gPositionZ[i])
            phoTemp = Math.XYZVector(phoSC.X() - pv.X(),
                                     phoSC.Y() - pv.Y(),
                                     phoSC.Z() - pv.Z())
            corrE_G = corrPt_G[i] * math.cosh(corrEta_G[i])
            phoP3 = phoTemp.unit() * corrE_G
            phoP4 = Math.XYZTVector(phoP3.x(), phoP3.y(), phoP3.z(), corrE_G)
            corrgs[-1].SetPtEtaPhiM(phoP4.pt(), phoP4.eta(), phoP4.phi(), 0.0)
            #create e1 corrected LorentzVector and error
            corre1s.append(TLorentzVector())
            pt1 = corrPt_1[i]
            eta1 = corrEta_1[i]
            phi1 = corrPhi_1[i]
            corre1s[-1].SetPtEtaPhiM(pt1, eta1, phi1, self._leptonMass)
            #create e2 corrected LorentzVector and error
            corre2s.append(TLorentzVector())
            pt2 = corrPt_2[i]
            eta2 = corrEta_2[i]
            phi2 = corrPhi_2[i]
            corre2s[-1].SetPtEtaPhiM(pt2, eta2, phi2, self._leptonMass)
            #make composite particles
            Zs.append(corre1s[-1] + corre2s[-1])
            Zgs.append(corre1s[-1] + corre2s[-1] + corrgs[-1])

        #add the newly calculated particles back to the event
        #using a common naming
        setattr(event, 'ell1', corre1s)
        setattr(event, 'ell2', corre2s)
        setattr(event, 'gam', corrgs)
        setattr(event, 'Z', Zs)
        setattr(event, 'Zg', Zgs)
예제 #28
0
  def __call__(self, population, signalYield, signalSample, backgroundYield, backgroundSample):

    # Histogram holders
    signalHistogram = TH1F('signalHistogram', 'signal', self.nbins, 0, 1)
    backgroundHistogram = TH1F('backgroundHistogram', 'background', self.nbins, 0, 1)

    # Loop over the chromosomes in the population 
    for genome in population:
  
      # Reset histograms
      signalHistogram.Reset()
      backgroundHistogram.Reset()

      # Get a nn describe by the chromosome (feed foreward)
      net = nn.create_ffphenotype(genome)   

      # Loop over the events creating signal histogram
      for event in signalSample:
        # Extract variables and weight
        variables = event[1:]
        weight = event[0]
        # Not strictly necessary in feedforward nets
        net.flush()
        # Net output
        output = net.sactivate(variables)[0]
        # Filling histograms
        signalHistogram.Fill(output, weight)

      # Signal overall normalization scale
      signalScale = signalYield/len(signalSample)
            
      # Loop over the events creating background histogram
      for event in backgroundSample:
        # Extract variables and weight
        variables = event[1:]
        weight = event[0]
        # Not strictly necessary in feedforward nets
        net.flush()
        # Net output
        output = net.sactivate(variables)[0]
        # Filling histograms
        backgroundHistogram.Fill(output, weight)

      # Background overall normalization scale
      backgroundScale = backgroundYield/len(backgroundSample)
  
      # Random generators
      rcount = TRandom3(int(random.uniform(0,65535)))
      rsignal = TRandom3(int(random.uniform(0,65535)))
      rbackground = TRandom3(int(random.uniform(0,65535)))

      zscore = 0.; sqweight = 0.
  
      # Computing weighted z-score
      for bin in xrange(1,signalHistogram.GetNbinsX()+1):

        newz = 0.; oldz = 0.

        # Computing the zvalue per bin
        for point in xrange(1,self.mpoints+1):
          # Sample background
          background = Math.gamma_quantile(rbackground.Uniform(), 
            (backgroundHistogram.GetBinContent(bin)/backgroundScale)+1., backgroundScale
          )
          # Background larger that zero
          if background > 0.:
            # Sampling signal
            signal = Math.gamma_quantile(rsignal.Uniform(),
              (signalHistogram.GetBinContent(bin)/signalScale)+1., signalScale
            )
            # Sampling count
            count = rcount.Poisson(signal+background)
            # Computing pvalue
            pvalue = Math.poisson_cdf_c(count,background) + Math.poisson_pdf(count,background)
            # Computing zvalue
            zvalue = self.minz
            if pvalue < 1.0: zvalue = Math.normal_quantile_c(pvalue,1)
            # zvalue iterative average 
            newz = (zvalue + (point - 1)*oldz)/point
            # Computing relative difference
            error = math.fabs((newz - oldz)/newz)
            # Convergency criteria
            if error < self.error: break
            # Updating oldz
            oldz = newz  
            if point == self.mpoints:
              self.message('Warning reach maximum number of integration %s points.' % point)

        weight = self.weight(signalHistogram.GetBinCenter(bin))
        zscore = zscore + weight * newz
        sqweight = sqweight + weight**2
        
      # Fitness function is zscore transform back to 1 - pvalue
      # Set the fitness value to the chomosome
      genome.fitness = 1. - Math.normal_cdf_c(zscore/Math.sqrt(sqweight))
예제 #29
0
import copy
from CMGTools.RootTools.physicsobjects.Particle import Particle

#COLIN should make a module for lorentz vectors (and conversions)
#instanciating template
from ROOT import Math
PtEtaPhiE4DLV = Math.PtEtaPhiE4D(float)
PtEtaPhiM4DLV = Math.PtEtaPhiM4D(float)


class PhysicsObject(Particle):
    '''Extends the cmg::PhysicsObject functionalities.'''

    def __init__(self, physObj):
        self.physObj = physObj
        super(PhysicsObject, self).__init__()

    def __copy__(self):
        '''Very dirty trick, the physObj is deepcopied...'''
        # print 'call copy', self
        physObj = copy.deepcopy( self.physObj )
        newone = type(self)(physObj)
        newone.__dict__.update(self.__dict__)
        newone.physObj = physObj
        return newone        

    def scaleEnergy( self, scale ):
        p4 = self.physObj.p4()
        p4 *= scale 
        self.physObj.setP4( p4 )  
##         p4 = self.physObj.polarP4()
예제 #30
0
Nzj   = zmumuj_data.Get('TotEvts').GetVal()*ztautauj.Get('NormEvts').GetVal()/zmumuj.Get('NormEvts').GetVal()
#Nwt   = (Nevts - (Nqcd + Nzj))*(wtOttbar/(1+wtOttbar))
Nwt   = powheg_predictions.Get("Wt_dr_xsec").GetVal() * L * totaleff_wt
Nsig  = Nevts - Nqcd - Nzj - Nwt
Nbkg = Nqcd + Nzj + Nwt

acc   = TFile("/user2/sfarry/workspaces/top/tuples/ttbar_acc.root")
A     = acc.Get('acc').GetVal()
A_err = acc.Get('acc_err').GetVal()

xsec = Nsig * A / (totaleff * L )

from math import floor
#poisson uncertainties
alpha = 1 - 0.68268942
Nwt_err_lo  = Nwt - Math.gamma_quantile(alpha/2.0, floor(Nwt),1)
Nzj_err_lo  = Nzj - Math.gamma_quantile(alpha/2.0, floor(Nzj),1)
Nqcd_err_lo = Nqcd - Math.gamma_quantile(alpha/2.0, floor(Nqcd),1)
Nbkg_err_lo = Nbkg - Math.gamma_quantile(alpha/2.0, floor(Nbkg),1)
Nwt_err_hi  = Math.gamma_quantile_c(alpha/2.0, floor(Nwt)+1,1) - Nwt
Nzj_err_hi  = Math.gamma_quantile_c(alpha/2.0, floor(Nzj)+1,1) - Nzj
Nqcd_err_hi = Math.gamma_quantile_c(alpha/2.0, floor(Nqcd)+1,1) - Nqcd
Nbkg_err_hi = Math.gamma_quantile_c(alpha/2.0, floor(Nbkg)+1,1) - Nbkg

stat_err = xsec / sqrt(Nevts)
syst_err = xsec * sqrt(pow(totaleff_err / totaleff, 2) + pow(A_err / A, 2) + pow(Nbkg_err_hi/(Nevts - Nbkg),2))
lumi_err = xsec * 0.039


print "Number of selected events: %f +/- %f" %(Nevts, sqrt(Nevts))
print "Same sign background     : %f + %f - %f" %(Nqcd, Nqcd_err_hi, Nqcd_err_lo)
예제 #31
0
    def __call__(self, population, signalYield, signalSample, backgroundYield,
                 backgroundSample):

        # Histogram holders
        signalHistogram = TH1F('signalHistogram', 'signal', self.nbins, 0, 1)
        backgroundHistogram = TH1F('backgroundHistogram', 'background',
                                   self.nbins, 0, 1)

        # Loop over the chromosomes in the population
        for genome in population:

            # Reset histograms
            signalHistogram.Reset()
            backgroundHistogram.Reset()

            # Get a nn describe by the chromosome (feed foreward)
            net = nn.create_ffphenotype(genome)

            # Loop over the events creating signal histogram
            for event in signalSample:
                # Extract variables and weight
                variables = event[1:]
                weight = event[0]
                # Not strictly necessary in feedforward nets
                net.flush()
                # Net output
                output = net.sactivate(variables)[0]
                # Filling histograms
                signalHistogram.Fill(output, weight)

            # Signal overall normalization scale
            signalScale = signalYield / len(signalSample)

            # Loop over the events creating background histogram
            for event in backgroundSample:
                # Extract variables and weight
                variables = event[1:]
                weight = event[0]
                # Not strictly necessary in feedforward nets
                net.flush()
                # Net output
                output = net.sactivate(variables)[0]
                # Filling histograms
                backgroundHistogram.Fill(output, weight)

            # Background overall normalization scale
            backgroundScale = backgroundYield / len(backgroundSample)

            # Random generators
            rcount = TRandom3(int(random.uniform(0, 65535)))
            rsignal = TRandom3(int(random.uniform(0, 65535)))
            rbackground = TRandom3(int(random.uniform(0, 65535)))

            zscore = 0.
            sqweight = 0.

            # Computing weighted z-score
            for bin in xrange(1, signalHistogram.GetNbinsX() + 1):

                newz = 0.
                oldz = 0.

                # Computing the zvalue per bin
                for point in xrange(1, self.mpoints + 1):
                    # Sample background
                    background = Math.gamma_quantile(
                        rbackground.Uniform(),
                        (backgroundHistogram.GetBinContent(bin) /
                         backgroundScale) + 1., backgroundScale)
                    # Background larger that zero
                    if background > 0.:
                        # Sampling signal
                        signal = Math.gamma_quantile(
                            rsignal.Uniform(),
                            (signalHistogram.GetBinContent(bin) / signalScale)
                            + 1., signalScale)
                        # Sampling count
                        count = rcount.Poisson(signal + background)
                        # Computing pvalue
                        pvalue = Math.poisson_cdf_c(
                            count, background) + Math.poisson_pdf(
                                count, background)
                        # Computing zvalue
                        zvalue = self.minz
                        if pvalue < 1.0:
                            zvalue = Math.normal_quantile_c(pvalue, 1)
                        # zvalue iterative average
                        newz = (zvalue + (point - 1) * oldz) / point
                        # Computing relative difference
                        error = math.fabs((newz - oldz) / newz)
                        # Convergency criteria
                        if error < self.error: break
                        # Updating oldz
                        oldz = newz
                        if point == self.mpoints:
                            self.message(
                                'Warning reach maximum number of integration %s points.'
                                % point)

                weight = self.weight(signalHistogram.GetBinCenter(bin))
                zscore = zscore + weight * newz
                sqweight = sqweight + weight**2

            # Fitness function is zscore transform back to 1 - pvalue
            # Set the fitness value to the chomosome
            genome.fitness = 1. - Math.normal_cdf_c(
                zscore / Math.sqrt(sqweight))