Esempio n. 1
0
def _combineElements(elements, dataset, maxDist):
    """
    Combine elements according to the data set type.
    If expResult == upper limit type, first group elements with different TxNames
    and then into mass clusters.
    If expResult == efficiency map type, group all elements into a single cluster.

    :parameter elements: list of elements (Element objects)
    :parameter expResult: Data Set to be considered (DataSet object)
    :returns: list of element clusters (ElementCluster objects)
    """

    clusters = []

    if dataset.getType() == 'efficiencyMap': #cluster all elements
        clusters += clusterTools.clusterElements(elements,maxDist,dataset)
    elif dataset.getType() == 'upperLimit': #Cluster each txname individually
        txnames = list(set([el.txname for el in elements]))
        for txname in txnames:
            txnameEls = [el for el in elements  if el.txname == txname]
            clusters += clusterTools.clusterElements(txnameEls, maxDist, dataset)
    else:
        logger.warning("Unkown data type: %s. Data will be ignored."
                       % dataset.getType())

    return clusters
Esempio n. 2
0
    def testClusterer(self):
        """ test the mass clusterer """
        from smodels.theory import lheReader, lheDecomposer, crossSection
        from smodels.theory import clusterTools
        from smodels.experiment.txnameObj import TxName, TxNameData
        from smodels.experiment.infoObj import Info
        from smodels.tools.physicsUnits import GeV, pb, fb
        import copy
        
        data = [[ [[ 674.99*GeV, 199.999*GeV], [ 674.99*GeV, 199.999*GeV] ],.03*fb ], 
               [ [[ 725.0001*GeV,200.*GeV], [ 725.0001*GeV,200.*GeV] ], .06*fb ] ,
               [ [[ 750.*GeV,250.*GeV], [ 750.*GeV,250.*GeV] ], .03*fb ] ]
        info = Info(os.path.join("./database/8TeV/ATLAS/ATLAS-SUSY-2013-05/data/","dataInfo.txt"))
        globalInfo = Info(os.path.join("./database/8TeV/ATLAS/ATLAS-SUSY-2013-05/","globalInfo.txt"))
        txnameData=TxNameData(data, "efficiencyMap", Id=1)
        txname=TxName("./database/8TeV/ATLAS/ATLAS-SUSY-2013-05/data/T2bb.txt",globalInfo,info)
        txname.txnameData = txnameData

        filename = "./testFiles/lhe/simplyGluino.lhe"
        reader = lheReader.LheReader(filename)
        event = reader.next()
        reader.close()
        event_xsec=event.metainfo["totalxsec"]
        self.assertTrue ( abs ( event_xsec - 0.262 * pb ) < .1 *pb )
        xsecs = crossSection.getXsecFromLHEFile(filename)
        element = lheDecomposer.elementFromEvent(event, xsecs )
        element.txname=None
        e0=copy.deepcopy(element) ## has a gluino with mass of 675 GeV


        ## make a second element with a slightly different gluino mass
        e1=copy.deepcopy(element)
        e1.branches[0].masses[0]=725*GeV
        e1.branches[1].masses[0]=725*GeV
        e0.txname = txname
        e1.txname = txname

        # lets now cluster the two different gluino masses.
        newel=clusterTools.groupAll ( [e0,e1] )
        newmasses=newel.getAvgMass()
        self.assertTrue ( newmasses==None ) ## in the case of efficiency maps the avg mass is none
        ## since it makes no sense

        txname.txnameData.dataTag = 'upperLimits'
        newel=clusterTools.clusterElements ( [e0,e1], 5. )
        ## this example gives an avg cluster mass of 700 gev
        self.assertTrue ( newel[0].getAvgMass()[0][0] == 700. * GeV )
        
        newel=clusterTools.clusterElements ( [e0,e1], .5 )
        #in this example the distance is not in maxdist, so we dont cluster
        self.assertTrue ( len(newel)==2 )
def _combineElements(elements, dataset, maxDist):
    """
    Combine elements according to the data set type.    
    If expResult == upper limit type, first group elements with different TxNames
    and then into mass clusters.
    If expResult == efficiency map type, group all elements into a single cluster.
    
    :parameter elements: list of elements (Element objects)
    :parameter expResult: Data Set to be considered (DataSet object)
    :returns: list of element clusters (ElementCluster objects)    
    """
    
    clusters = []   
    
    if dataset.dataInfo.dataType == 'efficiencyMap':        
        cluster = clusterTools.groupAll(elements)  
        clusters.append(cluster)
    elif dataset.dataInfo.dataType == 'upperLimit':
        txnames = list(set([el.txname for el in elements]))        
        for txname in txnames:
            txnameEls = []
            for element in elements:
                if not element.txname == txname:
                    continue
                else: txnameEls.append(element)
            txnameClusters = clusterTools.clusterElements(txnameEls, maxDist)         
            clusters += txnameClusters
    else:
        logger.warning("Unkown data type: %s. Data will be ignored." 
                       % dataset.dataInfo.dataType)
                
    return clusters
Esempio n. 4
0
def _combineElements(elements, dataset, maxDist):
    """
    Combine elements according to the data set type.
    If expResult == upper limit type, first group elements with different TxNames
    and then into mass clusters.
    If expResult == efficiency map type, group all elements into a single cluster.

    :parameter elements: list of elements (Element objects)
    :parameter expResult: Data Set to be considered (DataSet object)
    :returns: list of element clusters (ElementCluster objects)
    """

    clusters = []

    if dataset.getType() == 'efficiencyMap':
        cluster = clusterTools.groupAll(elements)
        clusters.append(cluster)
    elif dataset.getType() == 'upperLimit':
        txnames = list(set([el.txname for el in elements]))
        for txname in txnames:
            txnameEls = []
            for element in elements:
                if not element.txname == txname:
                    continue
                else: txnameEls.append(element)
            txnameClusters = clusterTools.clusterElements(txnameEls, maxDist)
            clusters += txnameClusters
    else:
        logger.warning("Unkown data type: %s. Data will be ignored."
                       % dataset.getType())

    return clusters
Esempio n. 5
0
def _combineElements(elements, analysis, maxDist):
    """
    Combine elements according to the analysis type.    
    If analysis == upper limit type, group elements into mass clusters. If
    analysis == efficiency map type, group all elements into a single cluster.
    
    :parameter elements: list of elements (Element objects)
    :parameter analysis: analysis to be considered (ULanalysis or EManalysis object)
    :returns: list of element clusters (ElementCluster objects)
    
    """
    if type(analysis) == type(EManalysis()):
        clusters = [clusterTools.groupAll(elements)]
    elif type(analysis) == type(ULanalysis()):
        clusters = clusterTools.clusterElements(elements, analysis, maxDist)
    return clusters
Esempio n. 6
0
    def testClusteringEM(self):

        slhafile = 'testFiles/slha/lightEWinos.slha'
        model = Model(BSMparticles=BSMList, SMparticles=SMList)
        model.updateParticles(slhafile)
        sigmacut = 5. * fb
        mingap = 5. * GeV
        toplist = decomposer.decompose(model,
                                       sigmacut,
                                       doCompress=True,
                                       doInvisible=True,
                                       minmassgap=mingap)

        #Test clustering for EM results
        dataset = database.getExpResults(
            analysisIDs='CMS-SUS-13-012',
            datasetIDs='3NJet6_800HT1000_300MHT450')[0].getDataset(
                '3NJet6_800HT1000_300MHT450')

        el1 = toplist[0].elementList[0].copy()
        el2 = toplist[0].elementList[1].copy()
        el3 = toplist[2].elementList[1].copy()
        el1.eff = 1.  #(Used in clustering)
        el2.eff = 1.  #(Used in clustering)
        el3.eff = 1.  #(Used in clustering)
        #All elements have the same UL (for EM results)
        el1._upperLimit = el2._upperLimit = el3._upperLimit = 1. * fb
        #Clustering should not depend on the mass, width or txname:
        el1.txname = el2.txname = el3.txname = None
        el1.mass = [[1000. * GeV, 10 * GeV]] * 2
        el2.mass = [[1500. * GeV, 10 * GeV]] * 2
        el3.mass = [[200. * GeV, 100 * GeV, 90. * GeV]] * 2
        el3.totalwidth = [[1e-10 * GeV, 1e-15 * GeV, 0. * GeV]] * 2
        clusters = clusterElements([el1, el2, el3],
                                   maxDist=0.2,
                                   dataset=dataset)
        self.assertEqual(len(clusters), 1)
        self.assertEqual(sorted(clusters[0].elements), sorted([el1, el2, el3]))
        self.assertEqual(clusters[0].averageElement().mass, None)
        self.assertEqual(clusters[0].averageElement().totalwidth, None)
Esempio n. 7
0
    def testClusterer(self):
        """ test the mass clusterer """
        from smodels.theory import lheReader, lheDecomposer, crossSection
        from smodels.theory import clusterTools
        from smodels.experiment.txnameObj import TxName, TxNameData
        from smodels.experiment.infoObj import Info
        from smodels.tools.physicsUnits import GeV, pb, fb
        import copy

        data = [[[[674.99 * GeV, 199.999 * GeV], [674.99 * GeV,
                                                  199.999 * GeV]], .03 * fb],
                [[[725.0001 * GeV, 200. * GeV], [725.0001 * GeV, 200. * GeV]],
                 .06 * fb],
                [[[750. * GeV, 250. * GeV], [750. * GeV, 250. * GeV]],
                 .03 * fb]]
        info = Info(
            os.path.join("./database/8TeV/ATLAS/ATLAS-SUSY-2013-05/data/",
                         "dataInfo.txt"))
        globalInfo = Info(
            os.path.join("./database/8TeV/ATLAS/ATLAS-SUSY-2013-05/",
                         "globalInfo.txt"))
        txnameData = TxNameData(data, "efficiencyMap", Id=1)
        txname = TxName(
            "./database/8TeV/ATLAS/ATLAS-SUSY-2013-05/data/T2bb.txt",
            globalInfo, info)
        txname.txnameData = txnameData

        filename = "./testFiles/lhe/simplyGluino.lhe"
        reader = lheReader.LheReader(filename)
        event = reader.next()
        reader.close()
        event_xsec = event.metainfo["totalxsec"]
        self.assertTrue(abs(event_xsec - 0.262 * pb) < .1 * pb)
        xsecs = crossSection.getXsecFromLHEFile(filename)
        element = lheDecomposer.elementFromEvent(event, xsecs)
        element.txname = None
        e0 = copy.deepcopy(element)  ## has a gluino with mass of 675 GeV

        ## make a second element with a slightly different gluino mass
        e1 = copy.deepcopy(element)
        e1.branches[0].masses[0] = 725 * GeV
        e1.branches[1].masses[0] = 725 * GeV
        e0.txname = txname
        e1.txname = txname

        # lets now cluster the two different gluino masses.
        newel = clusterTools.groupAll([e0, e1])
        newmasses = newel.getAvgMass()
        self.assertTrue(
            newmasses ==
            None)  ## in the case of efficiency maps the avg mass is none
        ## since it makes no sense

        txname.txnameData.dataTag = 'upperLimits'
        newel = clusterTools.clusterElements([e0, e1], 5.)
        ## this example gives an avg cluster mass of 700 gev
        self.assertTrue(newel[0].getAvgMass()[0][0] == 700. * GeV)

        newel = clusterTools.clusterElements([e0, e1], .5)
        #in this example the distance is not in maxdist, so we dont cluster
        self.assertTrue(len(newel) == 2)
Esempio n. 8
0
    def testSimpleCluster(self):
        """ test the mass clusterer """

        data = [[[[674.99 * GeV, 199.999 * GeV], [674.99 * GeV,
                                                  199.999 * GeV]], .03 * fb],
                [[[725.0001 * GeV, 200. * GeV], [725.0001 * GeV, 200. * GeV]],
                 .06 * fb],
                [[[750. * GeV, 250. * GeV], [750. * GeV, 250. * GeV]],
                 .03 * fb]]
        info = Info(
            "./database/8TeV/ATLAS/ATLAS-SUSY-2013-05/data/dataInfo.txt")
        globalInfo = Info(
            "./database/8TeV/ATLAS/ATLAS-SUSY-2013-05/globalInfo.txt")
        txnameData = TxNameData(data, "upperLimit", Id=1)
        txname = TxName(
            "./database/8TeV/ATLAS/ATLAS-SUSY-2013-05/data/T2bb.txt",
            globalInfo, info, finalStates)
        txname.txnameData = txnameData
        dataset = DataSet(info=globalInfo, createInfo=False)
        dataset.dataInfo = info
        dataset.txnameList = [txname]

        u = SMparticles.u
        gluino = mssm.gluino.copy()
        gluino.__setattr__("mass", 675. * GeV)
        gluino.__setattr__('totalwidth', float('inf') * GeV)
        n1 = mssm.n1.copy()
        n1.__setattr__("mass", 200. * GeV)
        n1.__setattr__('totalwidth', 0. * GeV)

        w1 = XSectionList()
        w1.xSections.append(XSection())
        w1.xSections[0].info = XSectionInfo()
        w1.xSections[0].info.sqrts = 8. * TeV
        w1.xSections[0].info.label = '8 TeV'
        w1.xSections[0].info.order = 0
        w1.xSections[0].value = 10. * fb

        b1 = Branch()
        b1.evenParticles = [[u, u]]
        b1.oddParticles = [gluino, n1]
        b2 = b1.copy()
        el1 = Element()
        el1.branches = [b1, b2]
        el1.weight = w1
        el1.txname = txname
        el1.eff = 1.  #(Used in clustering)

        ## make a second element with a slightly different gluino mass
        el2 = el1.copy()
        el2.motherElements = [el2]  #Enforce el2 and el1 not to be related
        el2.txname = txname
        el2.branches[0].oddParticles = [
            ptc.copy() for ptc in el1.branches[0].oddParticles
        ]
        el2.branches[1].oddParticles = [
            ptc.copy() for ptc in el1.branches[1].oddParticles
        ]
        el2.branches[0].oddParticles[0].__setattr__("mass", 725. * GeV)
        el2.branches[1].oddParticles[0].__setattr__("mass", 725. * GeV)
        el2.eff = 1.  #(Used in clustering)

        #Cluster for upper limits (all elements close in upper limit should be clustered together)
        maxDist = 5.  #Cluster all elements
        newel = clusterTools.clusterElements([el1, el2], maxDist, dataset)[0]
        newmasses = newel.averageElement().mass
        self.assertEqual(newmasses, [[700. * GeV, 200. * GeV]] * 2)

        maxDist = 0.5  #Elements differ and should not be clustered
        newel = clusterTools.clusterElements([el1, el2], maxDist, dataset)
        #in this example the distance is not in maxdist, so we dont cluster
        self.assertTrue(len(newel) == 2)

        info = Info(
            "./database/8TeV/CMS/CMS-SUS-13-012-eff/6NJet8_1000HT1250_200MHT300/dataInfo.txt"
        )
        globalInfo = Info(
            "./database/8TeV/CMS/CMS-SUS-13-012-eff/globalInfo.txt")
        txnameData = TxNameData(data, "efficiencyMap", Id=1)
        txname = TxName(
            "./database/8TeV/CMS/CMS-SUS-13-012-eff/6NJet8_1000HT1250_200MHT300/T2.txt",
            globalInfo, info, finalStates)
        txname.txnameData = txnameData
        dataset = DataSet(info=globalInfo, createInfo=False)
        dataset.dataInfo = info
        dataset.txnameList = [txname]

        #Cluster for efficiency maps (all elements should be clustered together independent of maxDist)
        maxDist = 0.001
        newel = clusterTools.clusterElements([el1, el2], maxDist, dataset)[0]
        newmasses = newel.averageElement().mass
        self.assertEqual(newmasses, [[700. * GeV, 200. * GeV]] * 2)
Esempio n. 9
0
    def testClustererLifeTimes(self):
        """ test the clustering with distinct lifetimes"""

        data = [[[[674.99 * GeV, 199.999 * GeV], [674.99 * GeV,
                                                  199.999 * GeV]], .03 * fb],
                [[[725.0001 * GeV, 200. * GeV], [725.0001 * GeV, 200. * GeV]],
                 .06 * fb],
                [[[750. * GeV, 250. * GeV], [750. * GeV, 250. * GeV]],
                 .03 * fb]]
        info = Info(
            "./database/8TeV/ATLAS/ATLAS-SUSY-2013-05/data/dataInfo.txt")
        globalInfo = Info(
            "./database/8TeV/ATLAS/ATLAS-SUSY-2013-05/globalInfo.txt")
        txnameData = TxNameData(data, "upperLimit", Id=1)
        txname = TxName(
            "./database/8TeV/ATLAS/ATLAS-SUSY-2013-05/data/T2bb.txt",
            globalInfo, info, finalStates)
        txname.txnameData = txnameData
        dataset = DataSet(info=globalInfo, createInfo=False)
        dataset.dataInfo = info
        dataset.txnameList = [txname]

        u = SMparticles.u
        gluino = mssm.gluino.copy()
        gluino.__setattr__("mass", 675. * GeV)
        gluino.__setattr__('totalwidth', 1e-15 * GeV)
        n1 = mssm.n1.copy()
        n1.__setattr__("mass", 200. * GeV)
        n1.__setattr__('totalwidth', 0. * GeV)

        w1 = XSectionList()
        w1.xSections.append(XSection())
        w1.xSections[0].info = XSectionInfo()
        w1.xSections[0].info.sqrts = 8. * TeV
        w1.xSections[0].info.label = '8 TeV'
        w1.xSections[0].info.order = 0
        w1.xSections[0].value = 10. * fb

        b1 = Branch()
        b1.evenParticles = [ParticleList([u, u])]
        b1.oddParticles = [gluino, n1]
        b2 = b1.copy()
        el1 = Element()
        el1.branches = [b1, b2]
        el1.weight = w1
        el1.txname = txname
        el1.eff = 1.  #(Used in clustering)

        ## make a second element with a slightly different gluino width
        el2 = el1.copy()
        el2.motherElements = [el2]  #Enforce el2 and el1 not to be related
        el2.txname = txname
        el2.branches[0].oddParticles = [
            ptc.copy() for ptc in el1.branches[0].oddParticles
        ]
        el2.branches[1].oddParticles = [
            ptc.copy() for ptc in el1.branches[1].oddParticles
        ]
        el2.eff = 1.  #(Used in clustering)

        el2.branches[0].oddParticles[0].__setattr__("mass", 675. * GeV)
        el2.branches[1].oddParticles[0].__setattr__("mass", 675. * GeV)
        el2.branches[0].oddParticles[0].__setattr__("totalwidth",
                                                    0.9e-15 * GeV)
        el2.branches[1].oddParticles[0].__setattr__("totalwidth",
                                                    0.9e-15 * GeV)

        newel = clusterTools.clusterElements([el1, el2], 5., dataset)
        ## this example gives an avg cluster mass of 700 gev
        self.assertEqual(newel[0].averageElement().mass[0][0], 675. * GeV)
        self.assertAlmostEqual(
            newel[0].averageElement().totalwidth[0][0].asNumber(GeV) * 1e15,
            0.95)

        newel = clusterTools.clusterElements([el1, el2], .5, dataset)
        #in this example the distance is in maxdist, so we cluster
        self.assertTrue(len(newel) == 1)

        newel = clusterTools.clusterElements([el1, el2], .1, dataset)
        #in this example the distance is not in maxdist, so we dont cluster
        self.assertTrue(len(newel) == 2)
Esempio n. 10
0
    def testClusteringUL(self):

        slhafile = 'testFiles/slha/lightEWinos.slha'
        model = Model(BSMparticles=BSMList, SMparticles=SMList)
        model.updateParticles(slhafile)
        sigmacut = 5. * fb
        mingap = 5. * GeV
        toplist = decomposer.decompose(model,
                                       sigmacut,
                                       doCompress=True,
                                       doInvisible=True,
                                       minmassgap=mingap)

        #Test clustering for UL results
        dataset = database.getExpResults(analysisIDs='ATLAS-SUSY-2013-02',
                                         datasetIDs=None)[0].getDataset(None)

        el1 = toplist[1].elementList[0]
        el2 = toplist[1].elementList[1]
        el3 = toplist[1].elementList[2]
        weights = [
            el.weight.getMaxXsec().asNumber(fb) for el in [el1, el2, el3]
        ]

        #Overwrite masses and txname label
        el1.mass = [[1000. * GeV, 100. * GeV]] * 2
        el2.mass = [[1020. * GeV, 100. * GeV]] * 2
        el3.mass = [[500. * GeV, 100. * GeV]] * 2
        el1.txname = el2.txname = el3.txname = 'T1'
        el1.eff = 1.  #(Used in clustering)
        el2.eff = 1.  #(Used in clustering)
        el3.eff = 1.  #(Used in clustering)

        #Check clustering with distinct elements
        clusters = clusterElements([el1, el2, el3],
                                   maxDist=0.2,
                                   dataset=dataset)
        self.assertEqual(len(clusters), 2)
        averageMasses = [
            [[(1000. * GeV * weights[0] + 1020. * GeV * weights[1]) /
              (weights[0] + weights[1]), 100. * GeV]] * 2, el3.mass
        ]
        elClusters = [[el1, el2], [el3]]
        for ic, cluster in enumerate(clusters):
            avgEl = cluster.averageElement()
            self.assertEqual(sorted(cluster.elements), sorted(elClusters[ic]))
            for ibr, br in enumerate(avgEl.mass):
                for im, m in enumerate(br):
                    self.assertAlmostEqual(
                        m.asNumber(GeV),
                        averageMasses[ic][ibr][im].asNumber(GeV))
            self.assertEqual(avgEl.totalwidth, elClusters[ic][0].totalwidth)

        #Check clustering with distinct elements but no maxDist limit
        clusters = clusterElements([el1, el2, el3],
                                   maxDist=10.,
                                   dataset=dataset)
        self.assertEqual(len(clusters), 1)
        cluster = clusters[0]
        avgEl = cluster.averageElement()
        averageMass = [[(1000. * GeV * weights[0] + 1020. * GeV * weights[1] +
                         500. * GeV * weights[2]) / sum(weights), 100. * GeV]
                       ] * 2
        self.assertEqual(sorted(cluster.elements), sorted([el1, el2, el3]))
        for ibr, br in enumerate(avgEl.mass):
            for im, m in enumerate(br):
                self.assertAlmostEqual(m.asNumber(GeV),
                                       averageMass[ibr][im].asNumber(GeV))
        self.assertEqual(avgEl.totalwidth, el1.totalwidth)

        #Check clustering where elements have same upper limits, but not the average element:
        el1._upperLimit = 1. * fb
        el2._upperLimit = 1. * fb
        el3._upperLimit = 1. * fb
        clusters = clusterElements([el1, el2, el3],
                                   maxDist=0.1,
                                   dataset=dataset)
        self.assertEqual(len(clusters), 2)