コード例 #1
0
ファイル: NLLPlot_combined.py プロジェクト: llechner/TTXPheno
    def calculation(variables):
        #def calculation( var1, var2 ):

        if args.variables[0] == 'cuB' and args.variables[1] == 'cuW':
            var1, var2 = variables  #cuB cuW
            ctZ, ctW = cuBWtoctWZ(var1, var2)
            kwargs = {'ctZ': ctZ, 'ctW': ctW}
        else:
            var1, var2 = variables
            kwargs = {args.variables[0]: var1, args.variables[1]: var2}

        # uncertainties
        c.reset()
        c.addUncertainty('lumi', 'lnN')
        c.addUncertainty('JEC', 'lnN')
        c.addUncertainty('fake', 'lnN')

        signal_rate = {}
        for i_region, region in enumerate(ttZRegions):

            signal_rate[region] = ttZSample.weightInfo.get_weight_yield(
                ttZ_coeffList[region], **kwargs)

            bin_name = "Region_%i" % i_region
            nice_name = region.__str__()
            c.addBin(bin_name,
                     ['_'.join(s.name.split('_')[1:3]) for s in ttZBg],
                     nice_name)
            c.specifyObservation(bin_name, observation[region])

            #            c.specifyFlatUncertainty( 'lumi', 1.05 )
            #            c.specifyFlatUncertainty( 'lumi', 1.026 )
            c.specifyFlatUncertainty('lumi', 1.01)

            c.specifyExpectation(bin_name, 'signal', signal_rate[region])
            c.specifyUncertainty('JEC', bin_name, 'signal',
                                 signal_jec_uncertainty[region])
            c.specifyUncertainty('fake', bin_name, 'signal',
                                 signal_fakerate_uncertainty[region])

            #c.specifyExpectation( bin_name, 'ttX_SM', ttX_SM_rate[region] )
            #c.specifyUncertainty( 'JEC', bin_name, 'ttX_SM', ttX_SM_jec_uncertainty[region])
            #c.specifyUncertainty( 'fake',bin_name, 'ttX_SM', ttX_SM_fakerate_uncertainty[region])

            for background in ttZBg:
                c.specifyExpectation(bin_name,
                                     '_'.join(background.name.split('_')[1:3]),
                                     background_rate[region][background.name])
                c.specifyUncertainty(
                    'JEC', bin_name, '_'.join(background.name.split('_')[1:3]),
                    background_jec_uncertainty[region][background.name])
                c.specifyUncertainty(
                    'fake', bin_name,
                    '_'.join(background.name.split('_')[1:3]),
                    background_fakerate_uncertainty[region][background.name])

        for i_region, region in enumerate(ttgammaRegions):

            signal_rate[region] = ttgamma1lSample.weightInfo.get_weight_yield(
                ttgamma1l_coeffList[region], **kwargs)
            signal_rate[region] += ttgamma2lSample.weightInfo.get_weight_yield(
                ttgamma2l_coeffList[region], **kwargs)

            bin_name = "Region_%i" % (i_region + len(ttZRegions))
            nice_name = region.__str__()

            c.addBin(bin_name,
                     ['_'.join(s.name.split('_')[1:3]) for s in ttgammaBg],
                     nice_name)
            c.specifyObservation(bin_name, observation[region])

            #            c.specifyFlatUncertainty( 'lumi', 1.05 )
            #            c.specifyFlatUncertainty( 'lumi', 1.026 )
            c.specifyFlatUncertainty('lumi', 1.05)

            c.specifyExpectation(bin_name, 'signal', signal_rate[region])
            c.specifyUncertainty('JEC', bin_name, 'signal',
                                 signal_jec_uncertainty[region])
            c.specifyUncertainty('fake', bin_name, 'signal',
                                 signal_fakerate_uncertainty[region])

            #c.specifyExpectation( bin_name, 'ttX_SM', ttX_SM_rate[region] )
            #c.specifyUncertainty( 'JEC', bin_name, 'ttX_SM', ttX_SM_jec_uncertainty[region])
            #c.specifyUncertainty( 'fake',bin_name, 'ttX_SM', ttX_SM_fakerate_uncertainty[region])

            for background in ttgammaBg:
                c.specifyExpectation(bin_name,
                                     '_'.join(background.name.split('_')[1:3]),
                                     background_rate[region][background.name])
                c.specifyUncertainty(
                    'JEC', bin_name, '_'.join(background.name.split('_')[1:3]),
                    background_jec_uncertainty[region][background.name])
                c.specifyUncertainty(
                    'fake', bin_name,
                    '_'.join(background.name.split('_')[1:3]),
                    background_fakerate_uncertainty[region][background.name])

        nameList = ['combined'] + args.variables + args.binning + [
            args.level, args.version, args.order, args.luminosity,
            'small' if args.small else 'full', var1, var2
        ]
        cardname = '%s_nll_card' % '_'.join(map(str, nameList))
        c.writeToFile('./tmp/%s.txt' % cardname)

        profiledLoglikelihoodFit = ProfiledLoglikelihoodFit('./tmp/%s.txt' %
                                                            cardname)
        profiledLoglikelihoodFit.make_workspace(rmin=rmin, rmax=rmax)
        #expected_limit = profiledLoglikelihoodFit.calculate_limit( calculator = "frequentist" )
        nll = profiledLoglikelihoodFit.likelihoodTest()
        logger.info("NLL: %f", nll)
        profiledLoglikelihoodFit.cleanup(removeFiles=True)
        del profiledLoglikelihoodFit
        ROOT.gDirectory.Clear()

        if nll is None or abs(nll) > 10000: nll = 999

        return var1, var2, nll
コード例 #2
0
    def calculation( c_var ):

        sigmaC = getHiggsWeight( c_var )

        nameList = [args.sample] + args.binning + [ args.selection, 'small' if args.small else 'full', c_var ]
        cardname = '%s_nll_card'%'_'.join( map( str, nameList ) )
        cardFilePath = os.path.join( cardfileLocation, cardname + '.txt' )

        c = cardFileWriter.cardFileWriter()

        if not args.fitOnly:
#            print 'run cardfile'

            # uncertainties
            c.reset()
            c.addUncertainty('Luminosity', 'lnN')
            c.addUncertainty('JER',             'lnN')
            c.addUncertainty('btagging',       'lnN')
            c.addUncertainty('mistagging',     'lnN')
            c.addUncertainty('LeptonID',       'lnN')

            signal_rate                  = {}
            for i_region, region in enumerate(regions):

                i_r = i_region % 4

                signal_rate[region]    = signalPP.getYieldFromDraw( selectionString=region.cutString(), weightString="%f"%(nloXSec*(1-c_var)**2/sigmaC) )['val']
                signal_rate[region]   += signalGH.getYieldFromDraw( selectionString=region.cutString(), weightString="%f"%(nloXSec*(1-c_var)*c_var/sigmaC) )['val']
                signal_rate[region]   += signalHG.getYieldFromDraw( selectionString=region.cutString(), weightString="%f"%(nloXSec*(1-c_var)*c_var/sigmaC) )['val']
                signal_rate[region]   += signalHH.getYieldFromDraw( selectionString=region.cutString(), weightString="%f"%(nloXSec*c_var**2/sigmaC) )['val']

                signal_btagging_uncertainty   [region] = 1 + .015/(i_r+1.)
                signal_mistagging_uncertainty [region] = 1 + .01/(i_r+1.)
                signal_leptonId_uncertainty   [region] = 1 + .01/(i_r+1.)
                signal_jes_uncertainty        [region] = 1 + .05/(i_r+1.)

                bin_name = "Region_%i" % i_region
                nice_name = region.__str__()
                c.addBin(bin_name, ['_'.join(s.name.split('_')[1:3]) for s in bg], nice_name)

                c.specifyObservation( bin_name, observation[region] )

                c.specifyExpectation( bin_name, 'signal', signal_rate[region]                                 )

                c.specifyFlatUncertainty( 'Luminosity', 1.026 )
                c.specifyUncertainty( 'JER',         bin_name, 'signal', signal_jes_uncertainty[region]        )
                c.specifyUncertainty( 'btagging',   bin_name, 'signal', signal_btagging_uncertainty[region]   )
                c.specifyUncertainty( 'mistagging', bin_name, 'signal', signal_mistagging_uncertainty[region] )
                c.specifyUncertainty( 'LeptonID',   bin_name, 'signal', signal_leptonId_uncertainty[region] )

                for background in bg:
                    c.specifyExpectation( bin_name, '_'.join( background.name.split('_')[1:3] ), background_rate[region][background.name] )
                    c.specifyUncertainty( 'JER',           bin_name, '_'.join( background.name.split('_')[1:3] ), background_jes_uncertainty[region][background.name])
                    c.specifyUncertainty( 'btagging',     bin_name, '_'.join( background.name.split('_')[1:3] ), background_btagging_uncertainty[region][background.name])
                    c.specifyUncertainty( 'mistagging',   bin_name, '_'.join( background.name.split('_')[1:3] ), background_mistagging_uncertainty[region][background.name])
                    c.specifyUncertainty( 'LeptonID',     bin_name, '_'.join( background.name.split('_')[1:3] ), background_leptonId_uncertainty[region][background.name])
                    
            c.writeToFile( cardFilePath )

        else:
            logger.info( "Running only NLL Fit with given CardFile %s"%cardFilePath)

        if not os.path.isfile( cardFilePath ):
            raise ValueError('CardFiles not found! Run script without --fitOnly!')

        if args.bestFit: r = (0.99, 1.01)
        else: r = (0., 2.)

        profiledLoglikelihoodFit = ProfiledLoglikelihoodFit( cardFilePath )
        profiledLoglikelihoodFit.make_workspace(rmin=r[0], rmax=r[1])
        nll = profiledLoglikelihoodFit.likelihoodTest()
        profiledLoglikelihoodFit.cleanup(removeFiles=args.removeCardFiles)
        del profiledLoglikelihoodFit

        logger.info( "NLL: %f", nll)
        ROOT.gDirectory.Clear()

        # in very large WC regions, the fit fails, not relevant for the interesting regions
        if nll is None or abs(nll) > 10000 or abs(nll) < 1: nll = 999

        del c

        return c_var, nll
コード例 #3
0
ファイル: NLLPlot.py プロジェクト: llechner/TTXPheno
    def calculation(variables):
        #def calculation( var1, var2 ):

        if args.variables[0] == 'cuB' and args.variables[1] == 'cuW':
            var1, var2 = variables  #cuB cuW
            ctZ, ctW = cuBWtoctWZ(var1, var2)
            kwargs = {'ctZ': ctZ, 'ctW': ctW}
        else:
            var1, var2 = variables
            kwargs = {args.variables[0]: var1, args.variables[1]: var2}

        nameList = args.sample.split(
            '_')[1:3] + args.variables + args.binning + [
                args.level, args.version, args.order, args.luminosity,
                "14TeV" if args.scale14TeV else "13TeV", args.selection,
                'small' if args.small else 'full', 'statOnly' if args.statOnly
                else 'fullUnc' if not args.noExpUnc else 'noExpUnc', var1, var2
            ]
        cardname = '%s_nll_card' % '_'.join(map(str, nameList))
        cardFilePath = os.path.join(cardfileLocation, cardname + '.txt')

        c = cardFileWriter.cardFileWriter()
        if args.useCombine:
            c.releaseLocation = combineReleaseLocation

        if not args.fitOnly:
            #            print 'run cardfile'

            # uncertainties
            c.reset()
            if not args.statOnly:
                if not args.noExpUnc:
                    c.addUncertainty('lumi', 'lnN')
                    c.addUncertainty('JES', 'lnN')
                    c.addUncertainty('btagging', 'lnN')
                    c.addUncertainty('mistagging', 'lnN')
                    c.addUncertainty('muonId', 'lnN')
                    c.addUncertainty('electronId', 'lnN')
                for unc in args.addUncertainties:
                    c.addUncertainty(unc, 'lnN')

            signal_rate = {}
            for i_region, region in enumerate(regions):

                signal_rate[region] = ttXSample.weightInfo.get_weight_yield(
                    ttX_coeffList[region], **kwargs)

                if not args.statOnly and not args.noExpUnc:
                    # signal uncertainties
                    # btagging
                    signal_rate_reweighted = ttXSample.weightInfo.get_weight_yield(
                        ttX_coeffList_reweighted_btagging[region], **kwargs)
                    signal_btagging_uncertainty[region] = 1 + (
                        (signal_rate_reweighted - signal_rate[region]) /
                        signal_rate[region]) if signal_rate[region] > 0 else 1.

                    # mistagging
                    signal_rate_reweighted = ttXSample.weightInfo.get_weight_yield(
                        ttX_coeffList_reweighted_mistagging[region], **kwargs)
                    signal_mistagging_uncertainty[region] = 1 + (
                        (signal_rate_reweighted - signal_rate[region]) /
                        signal_rate[region]) if signal_rate[region] > 0 else 1.

                    # muonId
                    signal_rate_reweighted = ttXSample.weightInfo.get_weight_yield(
                        ttX_coeffList_reweighted_muonId[region], **kwargs)
                    signal_muonId_uncertainty[region] = 1 + (
                        (signal_rate_reweighted - signal_rate[region]) /
                        signal_rate[region]) if signal_rate[region] > 0 else 1.

                    # electronId
                    signal_rate_reweighted = ttXSample.weightInfo.get_weight_yield(
                        ttX_coeffList_reweighted_electronId[region], **kwargs)
                    signal_electronId_uncertainty[region] = 1 + (
                        (signal_rate_reweighted - signal_rate[region]) /
                        signal_rate[region]) if signal_rate[region] > 0 else 1.

                    # JES
                    signal_rate_reweighted_JES_up = ttXSample.weightInfo.get_weight_yield(
                        ttX_coeffList_reweighted_jes_up[region], **kwargs)
                    signal_rate_reweighted_JES_down = ttXSample.weightInfo.get_weight_yield(
                        ttX_coeffList_reweighted_jes_down[region], **kwargs)
                    signal_jes_uncertainty[region] = 1 + (
                        (signal_rate_reweighted_JES_up -
                         signal_rate_reweighted_JES_down) /
                        (2 * signal_rate[region])
                    ) if signal_rate[region] > 0 else 1.

                bin_name = "Region_%i" % i_region
                nice_name = region.__str__()
                c.addBin(bin_name,
                         ['_'.join(s.name.split('_')[1:3])
                          for s in bg] + ['nonPrompt'] if args.addNonPrompt
                         else ['_'.join(s.name.split('_')[1:3])
                               for s in bg], nice_name)

                c.specifyObservation(bin_name, observation[region])

                c.specifyExpectation(bin_name, 'signal', signal_rate[region])

                if not args.statOnly:
                    if not args.noExpUnc:
                        c.specifyFlatUncertainty('lumi', 1.01)
                        c.specifyUncertainty('JES', bin_name, 'signal',
                                             signal_jes_uncertainty[region])
                        c.specifyUncertainty(
                            'btagging', bin_name, 'signal',
                            signal_btagging_uncertainty[region])
                        c.specifyUncertainty(
                            'mistagging', bin_name, 'signal',
                            signal_mistagging_uncertainty[region])
                        c.specifyUncertainty('muonId', bin_name, 'signal',
                                             signal_muonId_uncertainty[region])
                        c.specifyUncertainty(
                            'electronId', bin_name, 'signal',
                            signal_electronId_uncertainty[region])

                    for unc in args.addUncertainties:
                        c.specifyUncertainty(
                            unc, bin_name, 'signal', 1 + (getUncertaintyValue(
                                args.additionalCardFile, args.addBinNumberShift
                                + i_region, 'signal', unc) - 1) *
                            args.uncertaintyScale)

                if args.addNonPrompt:
                    # for nonpromt only nonpromt uncertainty is important
                    c.specifyExpectation(bin_name, 'nonPrompt',
                                         nonPromptObservation[region])
                    if not args.statOnly:
                        c.specifyUncertainty(
                            'nonprompt', bin_name, 'nonPrompt',
                            1 + (getUncertaintyValue(
                                args.additionalCardFile, args.addBinNumberShift
                                + i_region, 'nonPromptDD', 'nonprompt') - 1) *
                            args.uncertaintyScale)

                #c.specifyExpectation( bin_name, 'ttX_SM', ttX_SM_rate[region] )
                #c.specifyUncertainty( 'JES', bin_name, 'ttX_SM', ttX_SM_jes_uncertainty[region])
                #c.specifyUncertainty( 'btagging',bin_name, 'ttX_SM', ttX_SM_btagging_uncertainty[region])

                for background in bg:
                    c.specifyExpectation(
                        bin_name, '_'.join(background.name.split('_')[1:3]),
                        background_rate[region][background.name])
                    if not args.statOnly:
                        if not args.noExpUnc:
                            c.specifyUncertainty(
                                'JES', bin_name,
                                '_'.join(background.name.split('_')[1:3]),
                                background_jes_uncertainty[region][
                                    background.name])
                            c.specifyUncertainty(
                                'btagging', bin_name,
                                '_'.join(background.name.split('_')[1:3]),
                                background_btagging_uncertainty[region][
                                    background.name])
                            c.specifyUncertainty(
                                'mistagging', bin_name,
                                '_'.join(background.name.split('_')[1:3]),
                                background_mistagging_uncertainty[region][
                                    background.name])
                            c.specifyUncertainty(
                                'muonId', bin_name,
                                '_'.join(background.name.split('_')[1:3]),
                                background_muonId_uncertainty[region][
                                    background.name])
                            c.specifyUncertainty(
                                'electronId', bin_name,
                                '_'.join(background.name.split('_')[1:3]),
                                background_electronId_uncertainty[region][
                                    background.name])
                        for unc in args.addUncertainties:
                            if 'tZq' in background.name.split(
                                    '_') or 'ttgamma' in background.name.split(
                                        '_') or 'tWZ' in background.name.split(
                                            '_'):
                                proc = 'TTX'
                            elif 'WZ' in background.name.split('_'):
                                proc = 'WZ'
                            else:
                                raise ValueError('Background not found: %s' %
                                                 background.name)
                            c.specifyUncertainty(
                                unc, bin_name,
                                '_'.join(background.name.split('_')[1:3]),
                                1 + (getUncertaintyValue(
                                    args.additionalCardFile,
                                    args.addBinNumberShift + i_region, proc,
                                    unc) - 1) * args.uncertaintyScale)

            c.writeToFile(cardFilePath)

        else:
            logger.info("Running only NLL Fit with given CardFile %s" %
                        cardFilePath)

        if not os.path.isfile(cardFilePath):
            raise ValueError(
                'CardFiles not found! Run script without --fitOnly!')

        if args.useCombine:
            # use the official cms combine tool
            #                c.calcNuisances( cardFilePath, bestFit=args.bestFit )
            nll = c.calcNLL(cardFilePath, bestFit=args.bestFit)
            #            nll = nll['nll0'] #pre-fit
            nll = nll['nll_abs']  #post-fit

            if args.removeCardFiles:
                for file in os.listdir(cardfileLocation):
                    if file.startswith(cardname):
                        os.remove(os.path.join(cardfileLocation, file))

        else:
            if args.bestFit: r = (0.99, 1.01)
            else: r = (0., 2.)

            profiledLoglikelihoodFit = ProfiledLoglikelihoodFit(cardFilePath)
            profiledLoglikelihoodFit.make_workspace(rmin=r[0], rmax=r[1])
            nll = profiledLoglikelihoodFit.likelihoodTest()
            profiledLoglikelihoodFit.cleanup(removeFiles=args.removeCardFiles)
            del profiledLoglikelihoodFit

        logger.info("NLL: %f", nll)
        ROOT.gDirectory.Clear()

        # in very large WC regions, the fit fails, not relevant for the interesting regions
        if nll is None or abs(nll) > 10000 or abs(nll) < 1: nll = 999

        del c

        return var1, var2, nll
コード例 #4
0
            tot_background = sum([
                background_rate[region][background.name]
                for background in backgrounds
            ])
            exp_tot_sigmas += abs(signal_rate[region]) / sqrt(tot_background)
            # avoid total neg. yield
            if signal_rate[region] < 0:
                max_r = -tot_background / signal_rate[region]
                if max_r < max_rmax:
                    max_rmax = max_r

        rmax_est = 400. / exp_tot_sigmas if exp_tot_sigmas > 0 else 1.
        if max_rmax < rmax_est:
            rmax_est = 0.9 * max_rmax  # safety margin such that at least +10% total yield survives in the smallest SR

        profiledLoglikelihoodFit = ProfiledLoglikelihoodFit(
            './tmp_limit_card.txt')
        profiledLoglikelihoodFit.make_workspace(rmin=0, rmax=rmax_est)
        #expected_limit = profiledLoglikelihoodFit.calculate_limit( calculator = "frequentist" )
        expected_limit = profiledLoglikelihoodFit.calculate_limit(
            calculator="asymptotic")
        logger.info("Expected Limit: %f", expected_limit[0])
        limit.SetBinContent(limit.FindBin(cpt, cpQM), expected_limit[0])
        profiledLoglikelihoodFit.cleanup()

limitPlot = Plot2D.fromHisto("2D_limit_3",
                             texX="cpt",
                             texY="cpQM",
                             histos=[[limit]])
limitPlot.drawOption = "colz"
ROOT.gStyle.SetPaintTextFormat("2.2f")
plotting.draw2D(limitPlot,
コード例 #5
0
    def calculation( variables ):

            if args.variables[0] == 'cuB' and args.variables[1] == 'cuW':
                var1, var2 = variables #cuB cuW
                ctZ, ctW = cuBWtoctWZ( var1, var2 )
                kwargs = { 'ctZ':ctZ, 'ctW':ctW }
            else:
                var1, var2 = variables
                kwargs = { args.variables[0]:var1, args.variables[1]:var2 }

            # uncertainties
            c.reset()
            c.addUncertainty('lumi',        'lnN')
            c.addUncertainty('JEC',         'lnN')
            c.addUncertainty('fake',        'lnN')

            signal_rate                  = {}
            for i_region, region in enumerate(ttZRegions):

                signal_rate[region] = ttZSample.weightInfo.get_weight_yield( ttZ_coeffList[region], **kwargs) - ttZ_SM_rate[region]

                bin_name = "Region_%i" % i_region
                nice_name = region.__str__()
                c.addBin(bin_name,['ttX_SM'] + ['_'.join(s.name.split('_')[1:3]) for s in ttZBg], nice_name)
                c.specifyObservation( bin_name, observation[region] )

                c.specifyFlatUncertainty( 'lumi', 1.05 )

                c.specifyExpectation( bin_name, 'signal', signal_rate[region] )
                c.specifyUncertainty( 'JEC', bin_name, 'signal', signal_jec_uncertainty[region])
                c.specifyUncertainty( 'fake',bin_name, 'signal', signal_fakerate_uncertainty[region])

                c.specifyExpectation( bin_name, 'ttX_SM', ttZ_SM_rate[region] )
                c.specifyUncertainty( 'JEC', bin_name, 'ttX_SM', ttX_SM_jec_uncertainty[region])
                c.specifyUncertainty( 'fake',bin_name, 'ttX_SM', ttX_SM_fakerate_uncertainty[region])

                for background in ttZBg:
                    c.specifyExpectation( bin_name, '_'.join( background.name.split('_')[1:3] ), background_rate[region][background.name] )
                    c.specifyUncertainty( 'JEC', bin_name, '_'.join( background.name.split('_')[1:3] ), background_jec_uncertainty[region][background.name])
                    c.specifyUncertainty( 'fake',bin_name, '_'.join( background.name.split('_')[1:3] ), background_fakerate_uncertainty[region][background.name])


            for i_region, region in enumerate(ttgammaRegions):

                signal_rate[region]  = ttgamma1lSample.weightInfo.get_weight_yield( ttgamma1l_coeffList[region], **kwargs) - ttgamma1l_SM_rate[region]
                signal_rate[region] += ttgamma2lSample.weightInfo.get_weight_yield( ttgamma2l_coeffList[region], **kwargs) - ttgamma2l_SM_rate[region]

                bin_name = "Region_%i" % (i_region + len(ttZRegions))
                nice_name = region.__str__()
                c.addBin(bin_name,['ttX_SM'] + ['_'.join(s.name.split('_')[1:3]) for s in ttgammaBg], nice_name)
                c.specifyObservation( bin_name, observation[region] )

    #            c.specifyFlatUncertainty( 'lumi', 1.05 )
    #            c.specifyFlatUncertainty( 'lumi', 1.026 )
                c.specifyFlatUncertainty( 'lumi', 1.05 )

                c.specifyExpectation( bin_name, 'signal', signal_rate[region] )
                c.specifyUncertainty( 'JEC', bin_name, 'signal', signal_jec_uncertainty[region])
                c.specifyUncertainty( 'fake',bin_name, 'signal', signal_fakerate_uncertainty[region])

                c.specifyExpectation( bin_name, 'ttX_SM', ttgamma1l_SM_rate[region] + ttgamma2l_SM_rate[region] )
                c.specifyUncertainty( 'JEC', bin_name, 'ttX_SM', ttX_SM_jec_uncertainty[region])
                c.specifyUncertainty( 'fake',bin_name, 'ttX_SM', ttX_SM_fakerate_uncertainty[region])

                for background in ttgammaBg:
                    c.specifyExpectation( bin_name, '_'.join( background.name.split('_')[1:3] ), background_rate[region][background.name] )
                    c.specifyUncertainty( 'JEC', bin_name, '_'.join( background.name.split('_')[1:3] ), background_jec_uncertainty[region][background.name])
                    c.specifyUncertainty( 'fake',bin_name, '_'.join( background.name.split('_')[1:3] ), background_fakerate_uncertainty[region][background.name])
                    


            nameList = ['combined'] + args.variables + args.binning + [ args.level, args.version, args.order, args.luminosity, 'small' if args.small else 'full', var1, var2 ]
            cardname = '%s_limit_card'%'_'.join( map( str, nameList ) )
            c.writeToFile( './tmp/%s.txt'%cardname )

            # try to adjust rmax with some margin
            exp_tot_sigmas = 0
            max_rmax = float('inf')
            for region in ttZRegions:

                tot_background = sum( [ background_rate[region][background.name] for background in ttZBg ] )
                exp_tot_sigmas += abs(signal_rate[region]) / sqrt( tot_background ) if tot_background > 0 else 1. #float('inf')

                print 'region', region
                print 'exp_sigma', exp_tot_sigmas

                # avoid total neg. yield
                if signal_rate[region] < 0:
                    max_r = -tot_background / signal_rate[region]
                    if max_r < max_rmax:
                        max_rmax = max_r

                print 'max_rmax', max_rmax
                print

            for region in ttgammaRegions:

                tot_background = sum( [ background_rate[region][background.name] for background in ttgammaBg ] )
                exp_tot_sigmas += abs(signal_rate[region]) / sqrt( tot_background ) if tot_background > 0 else 100. #float('inf')

                print 'region', region
                print 'exp_sigma', exp_tot_sigmas

                # avoid total neg. yield
                if signal_rate[region] < 0:
                    max_r = -tot_background / signal_rate[region]
                    if max_r < max_rmax:
                        max_rmax = max_r

                print 'max_rmax', max_rmax
                print


            if exp_tot_sigmas is float('inf'): rmax_est = 0.5 #float('inf')
            elif exp_tot_sigmas == 0: rmax_est = 200 #float('inf')
            else: rmax_est = 400. / exp_tot_sigmas

            print
            print 'rmax_est', rmax_est

            if max_rmax < rmax_est:
                rmax_est = 0.9*max_rmax # safety margin such that at least +10% total yield survives in the smallest SR

            print 'rmax_est', rmax_est
            print

            profiledLoglikelihoodFit = ProfiledLoglikelihoodFit( './tmp/%s.txt'%cardname )
            profiledLoglikelihoodFit.make_workspace(rmin=0, rmax=rmax_est)
            #expected_limit = profiledLoglikelihoodFit.calculate_limit( calculator = "frequentist" )
            expected_limit = profiledLoglikelihoodFit.calculate_limit( calculator = "asymptotic", plotLimit = False )
            logger.info( "Expected Limit: %f", expected_limit[0] )
            profiledLoglikelihoodFit.cleanup( removeFiles = True )
            del profiledLoglikelihoodFit
            ROOT.gDirectory.Clear()

            return var1, var2, [ expected_limit[i] for i in range(-2,3) ]
コード例 #6
0
ファイル: NLLPlot.py プロジェクト: llechner/TTXPheno
            #c.specifyUncertainty( 'JEC', bin_name, 'ttZ_SM', ttZ_SM_jec_uncertainty[region])
            #c.specifyUncertainty( 'fake',bin_name, 'ttZ_SM', ttZ_SM_fakerate_uncertainty[region])

            for background in backgrounds:
                c.specifyExpectation(bin_name, background.name,
                                     background_rate[region][background.name])
                c.specifyUncertainty(
                    'JEC', bin_name, background.name,
                    background_jec_uncertainty[region][background.name])
                c.specifyUncertainty(
                    'fake', bin_name, background.name,
                    background_fakerate_uncertainty[region][background.name])

        c.writeToFile('./tmp_nll_card.txt')

        profiledLoglikelihoodFit = ProfiledLoglikelihoodFit(
            './tmp_nll_card.txt')
        profiledLoglikelihoodFit.make_workspace(rmin=0, rmax=1)
        #expected_limit = profiledLoglikelihoodFit.calculate_limit( calculator = "frequentist" )
        nll = profiledLoglikelihoodFit.likelihoodTest()
        logger.info("NLL: %f", nll)
        nll_plot.SetBinContent(nll_plot.FindBin(cpt, cpQM), nll)
        profiledLoglikelihoodFit.cleanup()

nll_SM = nll_plot.GetBinContent(nll_plot.FindBin(0, 0))
for bin_x in range(nll_plot.GetNbinsX() + 1):
    for bin_y in range(nll_plot.GetNbinsY() + 1):
        nll_plot.SetBinContent(
            bin_x, bin_y,
            nll_plot.GetBinContent(nll_plot.GetBin(bin_x, bin_y)) - nll_SM)

nllPlot = Plot2D.fromHisto("2D_nll_2",