def checkPrediction(self, slhafile, expID, expectedValues, datasetID):

        reducedModel = [
            ptc for ptc in BSMList if abs(ptc.pdg) in [1000011, 1000012]
        ]
        model = Model(reducedModel, SMList)
        model.updateParticles(slhafile)

        self.configureLogger()
        smstoplist = decomposer.decompose(model,
                                          0. * fb,
                                          doCompress=True,
                                          doInvisible=True,
                                          minmassgap=5. * GeV)

        expresults = database.getExpResults(analysisIDs=expID,
                                            datasetIDs=datasetID)
        for expresult in expresults:
            theorypredictions = theoryPredictionsFor(expresult, smstoplist)
            for pred in theorypredictions:
                predval = pred.xsection.value
                expval = expectedValues.pop()
                delta = expval * 0.01
                self.assertAlmostEqual(predval.asNumber(fb),
                                       expval,
                                       delta=delta)

        self.assertTrue(len(expectedValues) == 0)
Beispiel #2
0
    def runPrinterMain(self, slhafile, mprinter, addTopList=False):
        """
        Main program. Displays basic use case.
    
        """
        runtime.modelFile = 'mssm'
        reload(particlesLoader)

        #Set main options for decomposition:
        sigmacut = 0.03 * fb
        mingap = 5. * GeV
        """ Decompose model  """
        model = Model(BSMList, SMList)
        model.updateParticles(slhafile)
        smstoplist = decomposer.decompose(model,
                                          sigmacut,
                                          doCompress=True,
                                          doInvisible=True,
                                          minmassgap=mingap)

        #Add the decomposition result to the printers
        if addTopList:
            mprinter.addObj(smstoplist)
        listOfExpRes = database.getExpResults(analysisIDs=[
            '*:8*TeV', 'CMS-PAS-SUS-15-002', 'CMS-PAS-SUS-16-024'
        ])
        # Compute the theory predictions for each analysis
        allPredictions = []
        for expResult in listOfExpRes:
            predictions = theoryPredictionsFor(expResult, smstoplist)
            if not predictions:
                continue
            allPredictions += predictions._theoryPredictions

        for theoPred in allPredictions:
            if theoPred.dataType() == 'efficiencyMap' and hasattr(
                    theoPred,
                    'expectedUL') and not theoPred.expectedUL is None:
                theoPred.computeStatistics()

        maxcond = 0.2
        theoryPredictions = TheoryPredictionList(allPredictions, maxcond)
        mprinter.addObj(theoryPredictions)

        #Add coverage information:
        coverageInfo = coverage.Uncovered(smstoplist)
        mprinter.addObj(coverageInfo)

        #Add additional information:
        databaseVersion = database.databaseVersion
        outputStatus = ioObjects.OutputStatus(
            [1, 'Input file ok'], slhafile, {
                'sigmacut': sigmacut.asNumber(fb),
                'minmassgap': mingap.asNumber(GeV),
                'maxcond': maxcond
            }, databaseVersion)
        outputStatus.status = 1
        mprinter.addObj(outputStatus)
        mprinter.flush()
Beispiel #3
0
    def testGraph(self):
        """ draw ascii graph """
        
        filename = "./testFiles/lhe/simplyGluino.lhe"
        model = Model(BSMparticles = BSMList, SMparticles = SMList)
        model.updateParticles(filename)
        
        
        topList = decomposer.decompose(model, sigmacut=0)
        element = topList.getElements()[0]


        d1=self.orig().split("\n")
        d2=asciiGraph.asciidraw(element, border=True ).split("\n")
        self.assertEqual(d1,d2)
Beispiel #4
0
 def testT1(self):
     """ test with the T1 slha input file """
     slhafile = "./testFiles/slha/simplyGluino.slha"
     model = Model(BSMList, SMList)
     model.updateParticles(inputFile=slhafile)
     topos = decomposer.decompose(model, .1 * fb, False, False, 5. * GeV)
     for topo in topos:
         for element in topo.elementList:
             masses = element.mass
             # print "e=",element,"masses=",masses
             mgluino = masses[0][0]
             mLSP = masses[0][1]
             self.assertEqual(str(element), "[[[q,q]],[[q,q]]]")
             self.assertEqual(int(mgluino / GeV), 675)
             self.assertEqual(int(mLSP / GeV), 200)
Beispiel #5
0
    def testGoodFile(self):

        filename = "./testFiles/slha/lightEWinos.slha"
        model = Model(BSMList, SMList)
        model.updateParticles(filename)

        topolist = decomposer.decompose(model,
                                        sigmacut=0.1 * fb,
                                        doCompress=True,
                                        doInvisible=True,
                                        minmassgap=5 * GeV)
        analyses = database.getExpResults(txnames=["TChiWZoff"],
                                          analysisIDs='ATLAS-SUSY-2013-12')
        theoryPrediction = theoryPredictionsFor(analyses[0], topolist)[0]
        conditionViolation = theoryPrediction.conditions
        self.assertEqual(
            conditionViolation[
                'Cgtr([[[mu+,mu-]],[[l,nu]]],[[[e+,e-]],[[l,nu]]])'], 0.)
    def testIntegration(self):

        slhafile = '../inputFiles/slha/simplyGluino.slha'
        model = Model(BSMList, SMList)
        model.updateParticles(slhafile)

        self.configureLogger()
        smstoplist = decomposer.decompose(model,
                                          .1 * fb,
                                          doCompress=True,
                                          doInvisible=True,
                                          minmassgap=5. * GeV)
        listofanalyses = database.getExpResults(
            analysisIDs=["ATLAS-SUSY-2013-02", "CMS-SUS-13-012"],
            txnames=["T1"])
        if type(listofanalyses) != list:
            listofanalyses = [listofanalyses]
        for analysis in listofanalyses:
            self.checkAnalysis(analysis, smstoplist)
Beispiel #7
0
 def testInvisibleNegative(self):
     """ test the invisible compression, a negative example """
     slhafile = "./testFiles/slha/higgsinoStop.slha"
     model = Model(BSMList, SMList)
     model.updateParticles(slhafile)
     topos = decomposer.decompose(model, .1 * fb, False, True, 5. * GeV)
     tested = False
     for topo in topos:
         if str(topo) != "[1,1][1,1]":
             continue
         for element in topo.elementList:
             if str(element) != "[[[t+],[t-]],[[q],[W+]]]":
                 continue
             tested = True
             trueMothers = [
                 mother for mother in element.motherElements
                 if not mother is element
             ]
             self.assertEqual(len(trueMothers), 0)
     self.assertTrue(tested)
Beispiel #8
0
 def testApproxGaussian(self):
     ## turn experimental features on
     from smodels.tools import runtime
     runtime._experimental = True
     expRes = database.getExpResults(analysisIDs=["CMS-PAS-SUS-12-026"])
     self.assertTrue(len(expRes), 1)
     filename = "./testFiles/slha/T1tttt.slha"
     model = Model(BSMList, SMList)
     model.updateParticles(filename)
     smstoplist = decomposer.decompose(model, sigmacut=0)
     prediction = theoryPredictionsFor(expRes[0], smstoplist)[0]
     prediction.computeStatistics()
     import numpy
     tot = 0.
     for i in numpy.arange(0., .2, .02):
         tot += prediction.getLikelihood(i)
     c = 0.
     for i in numpy.arange(0., .2, .02):
         l = prediction.getLikelihood(i)
         c += l
     self.assertAlmostEqual(prediction.likelihood, 1.563288e-35, 3)
Beispiel #9
0
 def testMass(self):
     """ test the mass compression, a positive example """
     tested = False
     slhafile = "./testFiles/slha/higgsinoStop.slha"
     model = Model(BSMList, SMList)
     model.updateParticles(slhafile, promptWidth=1e-12 * GeV)
     topos = decomposer.decompose(model, .1 * fb, True, False, 5. * GeV)
     for topo in topos:
         if str(topo) != "[1][1]":
             continue
         for element in topo.elementList:
             if str(element) != "[[[b]],[[b]]]":
                 continue
             masses = element.motherElements[0].mass
             tested = True
             dm = abs(masses[0][1] - masses[0][2]) / GeV
             #If intermediate BSM states are compared there are two elements ([[[b],[c,q]],[[b],[q,q]]])
             # which do not get combined because their branches differ by the charges of the intermediate states
             self.assertEqual(len(element.motherElements), 24)
             self.assertTrue(dm < 5.0)
     self.assertTrue(tested)
Beispiel #10
0
    def testClusteringEM(self):

        slhafile = 'testFiles/slha/lightEWinos.slha'
        model = Model(BSMparticles=BSMList, SMparticles=SMList)
        model.updateParticles(slhafile)
        sigmacut = 5. * fb
        mingap = 5. * GeV
        toplist = decomposer.decompose(model,
                                       sigmacut,
                                       doCompress=True,
                                       doInvisible=True,
                                       minmassgap=mingap)

        #Test clustering for EM results
        dataset = database.getExpResults(
            analysisIDs='CMS-SUS-13-012',
            datasetIDs='3NJet6_800HT1000_300MHT450')[0].getDataset(
                '3NJet6_800HT1000_300MHT450')

        el1 = toplist[0].elementList[0].copy()
        el2 = toplist[0].elementList[1].copy()
        el3 = toplist[2].elementList[1].copy()
        el1.eff = 1.  #(Used in clustering)
        el2.eff = 1.  #(Used in clustering)
        el3.eff = 1.  #(Used in clustering)
        #All elements have the same UL (for EM results)
        el1._upperLimit = el2._upperLimit = el3._upperLimit = 1. * fb
        #Clustering should not depend on the mass, width or txname:
        el1.txname = el2.txname = el3.txname = None
        el1.mass = [[1000. * GeV, 10 * GeV]] * 2
        el2.mass = [[1500. * GeV, 10 * GeV]] * 2
        el3.mass = [[200. * GeV, 100 * GeV, 90. * GeV]] * 2
        el3.totalwidth = [[1e-10 * GeV, 1e-15 * GeV, 0. * GeV]] * 2
        clusters = clusterElements([el1, el2, el3],
                                   maxDist=0.2,
                                   dataset=dataset)
        self.assertEqual(len(clusters), 1)
        self.assertEqual(sorted(clusters[0].elements), sorted([el1, el2, el3]))
        self.assertEqual(clusters[0].averageElement().mass, None)
        self.assertEqual(clusters[0].averageElement().totalwidth, None)
Beispiel #11
0
 def testInvisiblePositive(self):
     """ test the invisible compression, a positive example """
     slhafile = "./testFiles/slha/higgsinoStop.slha"
     model = Model(BSMList, SMList)
     model.updateParticles(slhafile)
     topos = decomposer.decompose(model, .1 * fb, False, True, 5. * GeV)
     tested = False
     for topo in topos:
         if str(topo) != "[][]":
             continue
         for element in topo.elementList:
             if str(element) != "[[],[]]":
                 continue
             tested = True
             trueMothers = [
                 mother for mother in element.motherElements
                 if not mother is element
             ]
             if not trueMothers: continue
             self.assertEqual(str(trueMothers[0]), "[[],[[nu,nu]]]")
             self.assertEqual(len(trueMothers), 1)
     self.assertTrue(tested)
Beispiel #12
0
    def testPredictionInterface(self):
        """ A simple test to see that the interface in datasetObj
        and TheoryPrediction to the statistics tools is working correctly
        """
        expRes = database.getExpResults(analysisIDs=['CMS-SUS-13-012'])[0]

        filename = "./testFiles/slha/simplyGluino.slha"
        model = Model(BSMList, SMList)
        model.updateParticles(filename)
        smstoplist = decomposer.decompose(model, sigmacut=0)
        prediction = theoryPredictionsFor(expRes, smstoplist)[0]
        pred_signal_strength = prediction.xsection.value
        prediction.computeStatistics()
        ill = math.log(prediction.likelihood)
        ichi2 = prediction.chi2
        nsig = (pred_signal_strength * expRes.globalInfo.lumi).asNumber()
        m = Data(4, 2.2, 1.1**2, None, nsignal=nsig, deltas_rel=0.2)
        computer = LikelihoodComputer(m)
        dll = math.log(computer.likelihood(nsig, marginalize=False))
        self.assertAlmostEqual(ill, dll, places=2)
        dchi2 = computer.chi2(nsig, marginalize=False)
        # print ( "dchi2,ichi2",dchi2,ichi2)
        self.assertAlmostEqual(ichi2, dchi2, places=2)
Beispiel #13
0
    def testComplexCluster(self):
        """ test the mass clusterer """

        slhafile = 'testFiles/slha/416126634.slha'
        model = Model(BSMparticles=BSMList, SMparticles=SMList)
        model.updateParticles(slhafile)
        sigmacut = 0.03 * fb
        mingap = 5. * GeV
        toplist = decomposer.decompose(model,
                                       sigmacut,
                                       doCompress=True,
                                       doInvisible=True,
                                       minmassgap=mingap)

        #Test clustering for UL results
        expResult = database.getExpResults(analysisIDs='CMS-SUS-16-039',
                                           dataTypes='upperLimit')[0]
        predictions = theoryPredictionsFor(expResult,
                                           toplist,
                                           combinedResults=False,
                                           marginalize=False)
        clusterSizes = sorted([len(p.elements) for p in predictions])
        self.assertEqual(clusterSizes, [1, 16, 24])
Beispiel #14
0
    def testCombinedResult(self):
        predXSecs, rvalues = {}, {}
        for case in ["T1", "T5", "mixed"]:
            filename = self.createSLHAFile(case=case)
            BSMList = [
                gluino, st1, n1,
                st1.chargeConjugate(),
                n1.chargeConjugate(),
                gluino.chargeConjugate()
            ]
            model = Model(BSMList, SMList)
            model.updateParticles(filename)
            deco = decompose(model)

            expRes = database.getExpResults(
                analysisIDs=["CMS-SUS-16-050-agg"])[0]
            # print ( "Experimental result: %s" % expRes )
            tp = theoryPredictionsFor(expRes,
                                      deco,
                                      useBestDataset=False,
                                      combinedResults=True)
            for t in tp:
                predXSecs[case] = t.xsection.value
                rvalues[case] = t.getRValue(expected=True)
            if True:
                os.unlink(filename)
        ## first test: the theory prediction of the mixed scenario should be 25% of the sum
        ## 25%, because the total cross section is a fixed quantity, and half of the mixed scenario
        ## goes into asymmetric branches which we miss out on.
        self.assertAlmostEqual(
            (predXSecs["T1"] + predXSecs["T5"]).asNumber(fb),
            (4 * predXSecs["mixed"]).asNumber(fb), 2)

        ## second test: the r value of the mixed scenario * 2 must be between the r values
        ## of the pure scenarios. The factor of two comes from the fact, that we loose 50%
        ## to asymmetric branches
        self.assertTrue(rvalues["T5"] < 2 * rvalues["mixed"] < rvalues["T1"])
Beispiel #15
0
 model = Model(BSMparticles=BSMList, SMparticles=SMList)
 model.updateParticles(inputFile=args.slhafile)
 print("[combiner] loading database", args.database)
 db = Database(args.database)
 print("[combiner] done loading database")
 anaIds = ["CMS-SUS-16-033"]
 anaIds = ["all"]
 dts = ["all"]
 if args.upper_limits:
     dts = ["upperLimit"]
 if args.efficiencyMaps:
     dts = ["efficiencyMap"]
 listOfExpRes = db.getExpResults(analysisIDs=anaIds,
                                 dataTypes=dts,
                                 onlyWithExpected=True)
 smses = decomposer.decompose(model, .01 * fb)
 #print ( "[combiner] decomposed into %d topos" % len(smses) )
 from smodels.theory.theoryPrediction import theoryPredictionsFor
 combiner = Combiner()
 allps = []
 for expRes in listOfExpRes:
     preds = theoryPredictionsFor(expRes, smses)
     if preds == None:
         continue
     for pred in preds:
         allps.append(pred)
 combo, globalZ, llhd, muhat = combiner.findHighestSignificance(
     allps, "aggressive", expected=args.expected)
 print("[combiner] global Z is %.2f: %s (muhat=%.2f)" %
       (globalZ, combiner.getComboDescription(combo), muhat))
 for expRes in listOfExpRes:
Beispiel #16
0
        ret += _drawBranch(branch,
                           upwards=(ct == 0),
                           htmlFormat=html,
                           border=border,
                           l=max(l))
    return ret


if __name__ == "__main__":
    argparser = argparse.ArgumentParser(description="simple tool that is "
                                        "meant to draw lessagraphs, as an "
                                        "ascii plot")
    argparser.add_argument('-l',
                           '--lhe',
                           help="LHE file name",
                           type=str,
                           required=True)
    argparser.add_argument('-b',
                           '--border',
                           action='store_true',
                           help="draw a border around the graph")
    args = argparser.parse_args()

    filename = args.lhe

    model = Model(BSMparticles=BSMList, SMparticles=SMList)
    model.updateParticles(inputFile=filename)
    topList = decomposer.decompose(model)
    element = topList.getElements()[0]
    print(asciidraw(element, border=args.border))
Beispiel #17
0
    def runSModelS(self, inputFile, sigmacut, allpreds, llhdonly):
        """ run smodels proper.
        :param inputFile: the input slha file
        :param sigmacut: the cut on the topology weights, typically 0.02*fb
        :param allpreds: if true, return all predictions of analyses, else
                         only best signal region
        :param llhdonly: if true, return only results with likelihoods
        """

        if not os.path.exists(inputFile):
            self.pprint("error, cannot find inputFile %s" % inputFile)
            return []
        model = Model(BSMList, SMList)
        model.updateParticles(inputFile=inputFile)

        mingap = 10 * GeV

        # self.log ( "Now decomposing" )
        topos = decomposer.decompose(model, sigmacut, minmassgap=mingap)
        self.log("decomposed model into %d topologies." % len(topos))

        if allpreds:
            bestDataSet = False
            combinedRes = False
        else:
            bestDataSet = True
            combinedRes = self.do_combine

        preds = []
        # self.log ( "start getting preds" )
        from smodels.tools import runtime
        runtime._experimental = True
        for expRes in self.listOfExpRes:
            predictions = theoryPredictionsFor(expRes,
                                               topos,
                                               useBestDataset=bestDataSet,
                                               combinedResults=combinedRes)
            if predictions == None:
                predictions = []
            if allpreds:
                combpreds = theoryPredictionsFor(
                    expRes,
                    topos,
                    useBestDataset=False,
                    combinedResults=self.do_combine)
                if combpreds != None:
                    for c in combpreds:
                        predictions.append(c)
            for prediction in predictions:
                prediction.computeStatistics()
                if (not llhdonly) or (prediction.likelihood != None):
                    preds.append(prediction)
        sap = "best preds"
        if allpreds:
            sap = "all preds"
        sllhd = ""
        if llhdonly:
            sllhd = ", llhds only"
        self.log ( "returning %d predictions, %s%s" % \
                   (len(preds),sap, sllhd ) )
        return preds
Beispiel #18
0
def testPoint(inputFile, outputDir, parser, databaseVersion, listOfExpRes):
    """
    Test model point defined in input file (running decomposition, check
    results, test coverage)

    :parameter inputFile: path to input file
    :parameter outputDir: path to directory where output is be stored
    :parameter parser: ConfigParser storing information from parameters file
    :parameter databaseVersion: Database version (printed to output file)
    :parameter listOfExpRes: list of ExpResult objects to be considered
    :returns: output of printers
    """
    """Get run parameters and options from the parser"""
    sigmacut = parser.getfloat("parameters", "sigmacut") * fb
    minmassgap = parser.getfloat("parameters", "minmassgap") * GeV
    """Setup output printers"""
    masterPrinter = MPrinter()
    masterPrinter.setPrinterOptions(parser)
    masterPrinter.setOutPutFiles(
        os.path.join(outputDir, os.path.basename(inputFile)))
    """ Add list of analyses loaded to printer"""
    masterPrinter.addObj(ExpResultList(listOfExpRes))
    """Check input file for errors"""
    inputStatus = ioObjects.FileStatus()
    if parser.getboolean("options", "checkInput"):
        inputStatus.checkFile(inputFile)
    """Initialize output status and exit if there were errors in the input"""
    outputStatus = ioObjects.OutputStatus(inputStatus.status, inputFile,
                                          dict(parser.items("parameters")),
                                          databaseVersion)
    masterPrinter.addObj(outputStatus)
    if outputStatus.status < 0:
        return masterPrinter.flush()
    """
    Load the input model
    ====================
    """
    try:
        """
        Load the input model and  update it with the information from the input file
        """
        from smodels.particlesLoader import BSMList
        model = Model(BSMparticles=BSMList, SMparticles=SMList)
        promptWidth = None
        stableWidth = None
        if parser.has_option("particles", "promptWidth"):
            promptWidth = parser.getfloat("particles", "promptWidth") * GeV
        if parser.has_option("particles", "stableWidth"):
            stableWidth = parser.getfloat("particles", "stableWidth") * GeV
        model.updateParticles(inputFile=inputFile,
                              promptWidth=promptWidth,
                              stableWidth=stableWidth)
    except SModelSError as e:
        print("Exception %s %s" % (e, type(e)))
        """ Update status to fail, print error message and exit """
        outputStatus.updateStatus(-1)
        return masterPrinter.flush()
    """
    Decompose input model
    =====================
    """

    try:
        """ Decompose the input model, store the output elements in smstoplist """
        sigmacut = parser.getfloat("parameters", "sigmacut") * fb
        smstoplist = decomposer.decompose(
            model,
            sigmacut,
            doCompress=parser.getboolean("options", "doCompress"),
            doInvisible=parser.getboolean("options", "doInvisible"),
            minmassgap=minmassgap)
    except SModelSError as e:
        print("Exception %s %s" % (e, type(e)))
        """ Update status to fail, print error message and exit """
        outputStatus.updateStatus(-1)
        return masterPrinter.flush()
    """ Print Decomposition output.
        If no topologies with sigma > sigmacut are found, update status, write
        output file, stop running """
    if not smstoplist:
        outputStatus.updateStatus(-3)
        return masterPrinter.flush()

    masterPrinter.addObj(smstoplist)
    """
    Compute theory predictions
    ====================================================
    """
    """ Get theory prediction for each analysis and print basic output """
    allPredictions = []
    combineResults = False
    try:
        combineResults = parser.getboolean("options", "combineSRs")
    except (NoSectionError, NoOptionError) as e:
        pass
    for expResult in listOfExpRes:
        theorypredictions = theoryPredictionsFor(
            expResult,
            smstoplist,
            useBestDataset=True,
            combinedResults=combineResults,
            marginalize=False)
        if not theorypredictions:
            continue
        allPredictions += theorypredictions._theoryPredictions
    """Compute chi-square and likelihood"""
    if parser.getboolean("options", "computeStatistics"):
        for theoPred in allPredictions:
            theoPred.computeStatistics()
    """ Define theory predictions list that collects all theoryPrediction objects which satisfy max condition."""
    maxcond = parser.getfloat("parameters", "maxcond")
    theoryPredictions = theoryPrediction.TheoryPredictionList(
        allPredictions, maxcond)

    if len(theoryPredictions) != 0:
        outputStatus.updateStatus(1)
        masterPrinter.addObj(theoryPredictions)
    else:
        outputStatus.updateStatus(0)  # no results after enforcing maxcond

    if parser.getboolean("options", "testCoverage"):
        """ Testing coverage of model point, add results to the output file """
        if parser.has_option("options", "coverageSqrts"):
            sqrts = parser.getfloat("options", "coverageSqrts") * TeV
        else:
            sqrts = None
        uncovered = coverage.Uncovered(smstoplist,
                                       sigmacut=sigmacut,
                                       sqrts=sqrts)
        masterPrinter.addObj(uncovered)

    return masterPrinter.flush()
Beispiel #19
0
    def testClusteringUL(self):

        slhafile = 'testFiles/slha/lightEWinos.slha'
        model = Model(BSMparticles=BSMList, SMparticles=SMList)
        model.updateParticles(slhafile)
        sigmacut = 5. * fb
        mingap = 5. * GeV
        toplist = decomposer.decompose(model,
                                       sigmacut,
                                       doCompress=True,
                                       doInvisible=True,
                                       minmassgap=mingap)

        #Test clustering for UL results
        dataset = database.getExpResults(analysisIDs='ATLAS-SUSY-2013-02',
                                         datasetIDs=None)[0].getDataset(None)

        el1 = toplist[1].elementList[0]
        el2 = toplist[1].elementList[1]
        el3 = toplist[1].elementList[2]
        weights = [
            el.weight.getMaxXsec().asNumber(fb) for el in [el1, el2, el3]
        ]

        #Overwrite masses and txname label
        el1.mass = [[1000. * GeV, 100. * GeV]] * 2
        el2.mass = [[1020. * GeV, 100. * GeV]] * 2
        el3.mass = [[500. * GeV, 100. * GeV]] * 2
        el1.txname = el2.txname = el3.txname = 'T1'
        el1.eff = 1.  #(Used in clustering)
        el2.eff = 1.  #(Used in clustering)
        el3.eff = 1.  #(Used in clustering)

        #Check clustering with distinct elements
        clusters = clusterElements([el1, el2, el3],
                                   maxDist=0.2,
                                   dataset=dataset)
        self.assertEqual(len(clusters), 2)
        averageMasses = [
            [[(1000. * GeV * weights[0] + 1020. * GeV * weights[1]) /
              (weights[0] + weights[1]), 100. * GeV]] * 2, el3.mass
        ]
        elClusters = [[el1, el2], [el3]]
        for ic, cluster in enumerate(clusters):
            avgEl = cluster.averageElement()
            self.assertEqual(sorted(cluster.elements), sorted(elClusters[ic]))
            for ibr, br in enumerate(avgEl.mass):
                for im, m in enumerate(br):
                    self.assertAlmostEqual(
                        m.asNumber(GeV),
                        averageMasses[ic][ibr][im].asNumber(GeV))
            self.assertEqual(avgEl.totalwidth, elClusters[ic][0].totalwidth)

        #Check clustering with distinct elements but no maxDist limit
        clusters = clusterElements([el1, el2, el3],
                                   maxDist=10.,
                                   dataset=dataset)
        self.assertEqual(len(clusters), 1)
        cluster = clusters[0]
        avgEl = cluster.averageElement()
        averageMass = [[(1000. * GeV * weights[0] + 1020. * GeV * weights[1] +
                         500. * GeV * weights[2]) / sum(weights), 100. * GeV]
                       ] * 2
        self.assertEqual(sorted(cluster.elements), sorted([el1, el2, el3]))
        for ibr, br in enumerate(avgEl.mass):
            for im, m in enumerate(br):
                self.assertAlmostEqual(m.asNumber(GeV),
                                       averageMass[ibr][im].asNumber(GeV))
        self.assertEqual(avgEl.totalwidth, el1.totalwidth)

        #Check clustering where elements have same upper limits, but not the average element:
        el1._upperLimit = 1. * fb
        el2._upperLimit = 1. * fb
        el3._upperLimit = 1. * fb
        clusters = clusterElements([el1, el2, el3],
                                   maxDist=0.1,
                                   dataset=dataset)
        self.assertEqual(len(clusters), 2)
Beispiel #20
0
def main():
    """
    Main program. Displays basic use case.
    """
    model = Model(BSMparticles=BSMList, SMparticles=SMList)
    # Path to input file (either a SLHA or LHE file)
    #     lhefile = 'inputFiles/lhe/gluino_squarks.lhe'
    slhafile = 'inputFiles/slha/lightEWinos.slha'
    #     model.updateParticles(inputFile=lhefile)
    model.updateParticles(inputFile=slhafile)

    # Set main options for decomposition
    sigmacut = 0.01 * fb
    mingap = 5. * GeV

    # Decompose model
    toplist = decomposer.decompose(model,
                                   sigmacut,
                                   doCompress=True,
                                   doInvisible=True,
                                   minmassgap=mingap)

    # Access basic information from decomposition, using the topology list and topology objects:
    print("\n Decomposition Results: ")
    print("\t  Total number of topologies: %i " % len(toplist))
    nel = sum([len(top.elementList) for top in toplist])
    print("\t  Total number of elements = %i " % nel)
    #Print information about the m-th topology:
    m = 2
    if len(toplist) > m:
        top = toplist[m]
        print("\t\t %i-th topology  = " % m, top, "with total cross section =",
              top.getTotalWeight())
        #Print information about the n-th element in the m-th topology:
        n = 0
        el = top.elementList[n]
        print("\t\t %i-th element from %i-th topology  = " % (n, m),
              el,
              end="")
        print("\n\t\t\twith final states =", el.getFinalStates(),
              "\n\t\t\twith cross section =", el.weight,
              "\n\t\t\tand masses = ", el.mass)

    # Load the experimental results to be used.
    # In this case, all results are employed.
    listOfExpRes = database.getExpResults()

    # Print basic information about the results loaded.
    # Count the number of loaded UL and EM experimental results:
    nUL, nEM = 0, 0
    for exp in listOfExpRes:
        expType = exp.datasets[0].dataInfo.dataType
        if expType == 'upperLimit':
            nUL += 1
        elif expType == 'efficiencyMap':
            nEM += 1
    print("\n Loaded Database with %i UL results and %i EM results " %
          (nUL, nEM))

    # Compute the theory predictions for each experimental result and print them:
    print("\n Theory Predictions and Constraints:")
    rmax = 0.
    bestResult = None
    for expResult in listOfExpRes:
        predictions = theoryPredictionsFor(expResult,
                                           toplist,
                                           combinedResults=False,
                                           marginalize=False)
        if not predictions:
            continue  # Skip if there are no constraints from this result
        print('\n %s ' % expResult.globalInfo.id)
        for theoryPrediction in predictions:
            dataset = theoryPrediction.dataset
            datasetID = theoryPrediction.dataId()
            mass = theoryPrediction.mass
            txnames = [str(txname) for txname in theoryPrediction.txnames]
            PIDs = theoryPrediction.PIDs
            print("------------------------")
            print("Dataset = ", datasetID)  #Analysis name
            print("TxNames = ", txnames)
            print(
                "Prediction Mass = ", mass
            )  #Value for average cluster mass (average mass of the elements in cluster)
            print(
                "Prediction PIDs = ", PIDs
            )  #Value for average cluster mass (average mass of the elements in cluster)
            print("Theory Prediction = ",
                  theoryPrediction.xsection)  #Signal cross section
            print("Condition Violation = ",
                  theoryPrediction.conditions)  #Condition violation values

            # Get the corresponding upper limit:
            print("UL for theory prediction = ", theoryPrediction.upperLimit)

            # Compute the r-value
            r = theoryPrediction.getRValue()
            print("r = ", r)
            #Compute likelihhod and chi^2 for EM-type results:
            if dataset.getType() == 'efficiencyMap':
                theoryPrediction.computeStatistics()
                print('Chi2, likelihood=', theoryPrediction.chi2,
                      theoryPrediction.likelihood)
            if r > rmax:
                rmax = r
                bestResult = expResult.globalInfo.id

    # Print the most constraining experimental result
    print("\nThe largest r-value (theory/upper limit ratio) is ", rmax)
    if rmax > 1.:
        print("(The input model is likely excluded by %s)" % bestResult)
    else:
        print(
            "(The input model is not excluded by the simplified model results)"
        )

    #Find out missing topologies for sqrts=8*TeV:
    uncovered = coverage.Uncovered(toplist, sqrts=8. * TeV)
    #First sort coverage groups by label
    groups = sorted(uncovered.groups[:], key=lambda g: g.label)
    #Print uncovered cross-sections:
    for group in groups:
        print("\nTotal cross-section for %s (fb): %10.3E\n" %
              (group.description, group.getTotalXSec()))

    missingTopos = uncovered.getGroup('missing (prompt)')
    #Print some of the missing topologies:
    if missingTopos.generalElements:
        print('Missing topologies (up to 3):')
        for genEl in missingTopos.generalElements[:3]:
            print('Element:', genEl)
            print('\tcross-section (fb):', genEl.missingX)
    else:
        print("No missing topologies found\n")

    missingDisplaced = uncovered.getGroup('missing (displaced)')
    #Print elements with displaced decays:
    if missingDisplaced.generalElements:
        print('\nElements with displaced vertices (up to 2):')
        for genEl in missingDisplaced.generalElements[:2]:
            print('Element:', genEl)
            print('\tcross-section (fb):', genEl.missingX)
    else:
        print("\nNo displaced decays")