Esempio n. 1
0
    def _formatElement(self, obj):
        """
        Format data for a Element object.

        :param obj: A Element object to be printed.
        """

        elDic = {}
        elDic["ID"] = obj.elID
        elDic["Particles"] = str(obj.getParticles())
        elDic["Masses (GeV)"] = [[m.asNumber(GeV) for m in br] for br in obj.getMasses()]
        elDic["PIDs"] = obj.getPIDs()
        elDic["Weights (fb)"] = {}
        elDic["final states"] = obj.getFinalStates()
        sqrts = [info.sqrts.asNumber(TeV) for info in obj.weight.getInfo()]
        allsqrts = sorted(list(set(sqrts)))
        for ssqrts in allsqrts:
            sqrts = ssqrts * TeV
            xsecs = [xsec.value.asNumber(fb) for xsec in obj.weight.getXsecsFor(sqrts)]
            if len(xsecs) != 1:
                logger.warning("Element cross sections contain multiple values for %s .\
                Only the first cross section will be printed" %str(sqrts))
            xsecs = xsecs[0]
            sqrtsStr = 'xsec '+str(sqrts.asNumber(TeV))+' TeV'
            elDic["Weights (fb)"][sqrtsStr] = xsecs
        return elDic
Esempio n. 2
0
def equalObjs(obj1,obj2,allowedDiff,ignore=[], where=None ):
    """
    Compare two objects.
    The numerical values are compared up to the precision defined by allowedDiff.
 
    :param obj1: First python object to be compared
    :param obj2: Second python object to be compared
    :param allowedDiff: Allowed % difference between two numerical values
    :param ignore: List of keys to be ignored
    :param where: keep track of where we are, for easier debugging.
    :return: True/False
    """
    if type(obj1) in [ float, int ] and type ( obj2) in [ float, int ]:
        obj1,obj2=float(obj1),float(obj2)
 
    if type(obj1) != type(obj2):
        logger.warning("Data types differ (%s,%s)" %(type(obj1),type(obj2)))
        return False
 
    if isinstance(obj1,unum.Unum):
        if obj1 == obj2:
            return True
        diff = 2.*abs(obj1-obj2)/abs(obj1+obj2)
        return diff.asNumber() < allowedDiff
    elif isinstance(obj1,float):
        if obj1 == obj2:
            return True
        diff = 2.*abs(obj1-obj2)/abs(obj1+obj2)
        return diff < allowedDiff
    elif isinstance(obj1,str):
        return obj1 == obj2
    elif isinstance(obj1,dict):
        for key in obj1:
            if key in ignore: continue
            if not key in obj2:
                logger.warning("Key %s missing" %key)
                return False
            if not equalObjs(obj1[key],obj2[key],allowedDiff, ignore=ignore, where=key ):
                logger.warning('Objects differ in %s:\n   %s\n and\n   %s' %(where, str(obj1[key]),str(obj2[key])))
                #s1,s2 = str(obj1[key]),str(obj2[key]) 
                #if False: # len(s1) + len(s2) > 200:
                #    logger.warning ( "The values are too long to print." )
                #else:
                #    logger.warning( 'The values are: >>%s<< (this run) versus >>%s<< (default)'%\
                #                ( s1[:20],s2[:20] ) )
                return False
    elif isinstance(obj1,list):
        if len(obj1) != len(obj2):
            logger.warning('Lists differ in length:\n   %i (this run)\n and\n   %i (default)' %\
                                (len(obj1),len(obj2)))
            return False
        for ival,val in enumerate(obj1):
            if not equalObjs(val,obj2[ival],allowedDiff):
                logger.warning('Lists differ:\n   %s (this run)\n and\n   %s (default)' %\
                                (str(val),str(obj2[ival])))
                return False
    else:
        return obj1 == obj2
 
    return True
Esempio n. 3
0
    def getEfficiencyFor(self,expid,dataset,txname,massarray):
        """
        Get an efficiency for the given experimental id, 
        the dataset name, the txname, and the massarray.
        Can only be used for EfficiencyMap-type experimental results.
        Interpolation is done, if necessary.

        :param expid: experimental id (string)
        :param dataset: dataset name (string)
        :param txname: txname (string).
        :param massarray: list of masses with units, e.g.
                          [[ 400.*GeV, 100.*GeV],[400.*GeV, 100.*GeV]]
        :return: efficiency
        """
        #First select the experimental results matching the id and the result type:
        expres = None
        for expResult in self:
            if expResult.getValuesFor('id')[0] != expid:
                continue
            else:
                if 'efficiencyMap' in expResult.getValuesFor('dataType'):
                    expres = expResult
                    break

        if not expres:
            logger.warning( "Could not find efficiencyMap result %s."\
                   " getEfficiencyForr can only be\
            used for efficiencyMap results." % (expid))
            return None

        return expres.getEfficiencyFor(txname=txname, mass=massarray, dataset=dataset)
Esempio n. 4
0
    def getULForSR(self,expid,datasetID):
        """
        Get an upper limit for the given experimental id and dataset (signal region).
        Can only be used for efficiency-map results.
        :param expid: experimental id (string)
        :param datasetID: string defining the dataset id, e.g. ANA5-CUT3.
        :return: upper limit [fb]
        """

        #First select the experimental results matching the id and the result type:
        expres = None
        for expResult in self:
            if expResult.getValuesFor('id')[0] != expid:
                continue
            else:
                if 'efficiencyMap' in expResult.getValuesFor('dataType'):
                    expres = expResult
                    break

        if not expres:
            logger.warning ("Could not find efficiency-map result %s . getULForSR can only be\
            used for efficiency-map results." % (expid))
            return None

        for dataset in expres.datasets:
            if dataset.getID() != datasetID:
                continue
            return dataset.getSRUpperLimit()

        logger.warning ( "Could not find dataset %s ." % (datasetID))
        return None
Esempio n. 5
0
def _combineElements(elements, dataset, maxDist):
    """
    Combine elements according to the data set type.
    If expResult == upper limit type, first group elements with different TxNames
    and then into mass clusters.
    If expResult == efficiency map type, group all elements into a single cluster.

    :parameter elements: list of elements (Element objects)
    :parameter expResult: Data Set to be considered (DataSet object)
    :returns: list of element clusters (ElementCluster objects)
    """

    clusters = []

    if dataset.getType() == 'efficiencyMap':
        cluster = clusterTools.groupAll(elements)
        clusters.append(cluster)
    elif dataset.getType() == 'upperLimit':
        txnames = list(set([el.txname for el in elements]))
        for txname in txnames:
            txnameEls = []
            for element in elements:
                if not element.txname == txname:
                    continue
                else: txnameEls.append(element)
            txnameClusters = clusterTools.clusterElements(txnameEls, maxDist)
            clusters += txnameClusters
    else:
        logger.warning("Unkown data type: %s. Data will be ignored."
                       % dataset.getType())

    return clusters
Esempio n. 6
0
    def __add__(self,other):
        newList = self.copy()
        if type(other) != type(self):
            logger.warning("Trying to add a XSectionList and a "+str(type(other)))
            return self

        newList.combineWith(other)
        return  newList
Esempio n. 7
0
    def __init__(self, base=None, force_load = None, discard_zeroes = True,
                  progressbar = False, subpickle = True):
        """
        :param base: path to the database, or pickle file (string), or http
            address. If None, "official", or "official_fastlim",
            use the official database (including fastlim results, if specified).
        :param force_load: force loading the text database ("txt"),
            or binary database ("pcl"), dont force anything if None
        :param discard_zeroes: discard txnames with only zeroes as entries.
        :param progressbar: show a progressbar when building pickle file
                            (needs the python-progressbar module)
        :param subpickle: produce small pickle files per exp result.
            Should only be used when working on the database.
        """
        self.source=""
        self.force_load = force_load
        self.subpickle = subpickle
        if base in [ None, "official" ]:
            from smodels.installation import officialDatabase
            base = officialDatabase()
        if base in [ "official_fastlim" ]:
            from smodels.installation import officialDatabase
            base = officialDatabase( fastlim=True )
        if base in [ "unittest" ]:
            from smodels.installation import testDatabase
            base = testDatabase()
        base, pclfile = self.checkPathName(base, discard_zeroes )
        self.pcl_meta = Meta( pclfile )
        self.expResultList = []
        self.txt_meta = self.pcl_meta
        if not self.force_load == "pcl":
            self.txt_meta = Meta ( base, discard_zeroes = discard_zeroes )
        self.progressbar = None
        if progressbar:
            try:
                import progressbar as P
                self.progressbar = P.ProgressBar( widgets=
                        [ "Building Database ", P.Percentage(),
                          P.Bar( marker=P.RotatingMarker() ), P.ETA() ] )
            except ImportError as e:
                logger.warning ( "progressbar requested, but python-progressbar is not installed." )

        if self.force_load=="txt":
            self.loadTextDatabase()
            self.txt_meta.printFastlimBanner()
            return
        if self.force_load=="pcl":
            self.loadBinaryFile()
            self.pcl_meta.printFastlimBanner()
            return
        if self.force_load in [ None, "none", "None" ]:
            self.loadDatabase()
            self.txt_meta.printFastlimBanner()
            return
        logger.error ( "when initialising database: force_load=%s is not " \
                       "recognized. Valid values are: pcl, txt, None." % force_load )
        sys.exit()
Esempio n. 8
0
def _getDictionariesFromSLHA(slhafile):
    """
    Create mass and BR dictionaries from an SLHA file.
    Ignore decay blocks with R-parity violating or unknown decays

    """

    from smodels.particlesLoader import rEven, rOdd

    res = pyslha.readSLHAFile(slhafile)

   
    # Get mass and branching ratios for all particles
    brDic = {}
    writeIgnoreMessage(res.decays.keys(), rEven, rOdd)

    for pid in res.decays.keys():
        if not pid in rOdd:
            continue
        brs = []
        for decay in res.decays[pid].decays:
            nEven = nOdd = 0.
            for pidd in decay.ids:
                if pidd in rOdd: nOdd += 1
                elif pidd in rEven: nEven += 1
                else:
                    logger.warning("Particle %i not defined in particles.py,decay %i -> [%s] will be ignored" %(pidd,pid,decay.ids))
                    break
            if nOdd + nEven == len(decay.ids) and nOdd == 1:
                brs.append(decay)
            else:
                logger.info("Ignoring decay: %i -> [%s]",pid,decay.ids)

        brsConj = copy.deepcopy(brs)
        for br in brsConj:
            br.ids = [-x for x in br.ids]
        brDic[pid] = brs
        brDic[-pid] = brsConj
    # Get mass list for all particles
    massDic = dict(res.blocks['MASS'].items())
    for pid in list ( massDic.keys() )[:]:
        massDic[pid] = round(abs(massDic[pid]),1)*GeV
        if not -pid in massDic: massDic[-pid] = massDic[pid] 

    #Include proxy for displaced decays
    if 0 in massDic or 0 in brDic:
        logger.error("PDG = 0 is reserved for displaced decays and it can not be used for other particles. Please redefine the input model PDG assignments.")
        raise SModelSError()
    else:
        dispPid = 0
        massDic[dispPid] = 0. * GeV
        dispDec = pyslha.Decay(br=1., ids=[], nda=0)
        brDic[dispPid] = [dispDec]
   
 
    return brDic, massDic
Esempio n. 9
0
 def hasOnlyZeroes ( self ):
     ozs = self.txnameData.onlyZeroValues()
     if self.txnameDataExp:
         e_ozs = self.txnameDataExp.onlyZeroValues()
         if ozs and e_ozs:
             return True
         if (ozs and not e_ozs) or (e_ozs and not ozs):
             logger.warning ( "%s is weird. One of the (expected, observed) results is zeroes-only, the other one isnt." )
             return False
     return ozs
Esempio n. 10
0
def writeIgnoreMessage(keys, rEven, rOdd):
    msg = ""
    for pid in keys:
        if not pid in list(rEven) + list(rOdd):
            logger.warning("Particle %i not defined in particles.py, its decays will be ignored" %(pid))
            continue
        if pid in rEven:
            msg += "%s, " % rEven[pid]
            continue
    if len(msg)>0:
            logger.debug ( "Ignoring %s decays" % msg[:-2] )
Esempio n. 11
0
    def __init__(self, output, filename):
        self.name = "basic"

        self.outputList = []
        self.filename = filename
        self.output = output
        self.printingOrder = []
        self.toPrint = []

        if filename and os.path.isfile(filename):
            logger.warning("Removing file %s" %filename)
            os.remove(filename)
Esempio n. 12
0
    def loadBinaryFile ( self, lastm_only = False ):
        """
        Load a binary database, returning last modified, file count, database.

        :param lastm_only: if true, the database itself is not read.
        :returns: database object, or None, if lastm_only == True.
        """
        if lastm_only and self.pcl_meta.mtime:
            ## doesnt need to load database, and mtime is already
            ## loaded
            return None

        if not os.path.exists ( self.pcl_meta.pathname ):
            return None

        try:
            with open ( self.pcl_meta.pathname, "rb" ) as f:
                t0=time.time()
                pclfilename = self.pcl_meta.pathname
                self.pcl_meta = serializer.load ( f )
                self.pcl_meta.pathname = pclfilename
                if self.force_load == "pcl":
                    self.txt_meta = self.pcl_meta
                if not lastm_only:
                    if not self.force_load == "pcl" and self.pcl_meta.needsUpdate ( self.txt_meta ):
                        logger.warning ( "Something changed in the environment."
                                         "Regenerating." )
                        self.createBinaryFile()
                        return self
                    logger.info ( "loading binary db file %s format version %s" %
                            ( self.pcl_meta.pathname, self.pcl_meta.format_version ) )
                    if sys.version[0]=="2":
                        self.expResultList = serializer.load ( f )
                    else:
                        self.expResultList = serializer.load ( f, encoding="latin1" )
                    t1=time.time()-t0
                    logger.info ( "Loaded database from %s in %.1f secs." % \
                            ( self.pcl_meta.pathname, t1 ) )
        except (EOFError,ValueError) as e:
            os.unlink ( self.pcl_meta.pathname )
            if lastm_only:
                self.pcl_meta.format_version = -1
                self.pcl_meta.mtime = 0
                return self
            logger.error ( "%s is not readable (%s)." % \
                            ( self.pcl_meta.pathname, str(e) ) )
            if self.source in [ "http", "ftp", "pcl" ]:
                logger.error ( "source cannot be rebuilt. supply a different path to the database in your ini file." )
                sys.exit()
            self.createBinaryFile()
        # self.txt_meta = self.pcl_meta
        return self
Esempio n. 13
0
def elementFromEvent(event, weight=None):
    """
    Creates an element from a LHE event and the corresponding event weight.
    
    :param event: LHE event
    :param weight: event weight. Must be a XSectionList object (usually with a
                   single entry) or None if not specified.
    :returns: element
    
    """
    if not event.particles:
        logger.error("Empty event")
        return None

    brDic, massDic = _getDictionariesFromEvent(event)

    # Create branch list
    finalBranchList = []
    from smodels.particlesLoader import rOdd, rEven
    for ip, particle in enumerate(event.particles):
        keys = list ( rEven.keys() ) + \
               list ( rOdd.keys() )
        if not particle.pdg in keys:
            logger.warning("Particle %i not defined in particles.py, events containing this particle will be ignored" %(particle.pdg))
            return None
        
        # Particle came from initial state (primary mother)
        if 1 in particle.moms:
            mombranch = branch.Branch()
            mombranch.PIDs = [[particle.pdg]]           
            if weight:
                mombranch.maxWeight = weight.getMaxXsec()
            else:
                mombranch.maxWeight = 0.*fb
            # Get simple BR and Mass dictionaries for the corresponding branch
            branchBR = brDic[ip]
            branchMass = massDic[ip]
            mombranch.masses = [branchMass[mombranch.PIDs[0][0]]]
            # Generate final branches (after all R-odd particles have decayed)
            finalBranchList += branch.decayBranches([mombranch], branchBR,
                                                    branchMass, sigcut=0. * fb )

    if len(finalBranchList) != 2:
        logger.error(str(len(finalBranchList)) + " branches found in event; "
                     "Possible R-parity violation")
        raise SModelSError()
    # Create element from event
    newElement = element.Element(finalBranchList)
    if weight:
        newElement.weight = copy.deepcopy(weight)

    return newElement
Esempio n. 14
0
 def setOutPutFile(self,filename,overwrite=True,silent=False):
     """
     Set the basename for the text printer. The output filename will be
     filename.xml.
     :param filename: Base filename
     :param overwrite: If True and the file already exists, it will be removed.
     :param silent: dont comment removing old files
     """        
     
     self.filename = filename +'.xml'
     if overwrite and os.path.isfile(self.filename):
         if not silent:
             logger.warning("Removing old output file " + self.filename)
         os.remove(self.filename)        
Esempio n. 15
0
def getXsecFromSLHAFile(slhafile, useXSecs=None, xsecUnit = pb):
    """
    Obtain cross sections for pair production of R-odd particles from input SLHA file.
    The default unit for cross section is pb.

    :parameter slhafile: SLHA input file with cross sections
    :parameter useXSecs: if defined enables the user to select cross sections to
                     use. Must be a XSecInfoList object
    :parameter xsecUnit: cross section unit in the input file (must be a Unum unit)
    :returns: a XSectionList object

    """
    # Store information about all cross sections in the SLHA file
    xSecsInFile = XSectionList()
    f=pyslha.readSLHAFile ( slhafile )
    from smodels.particlesLoader import rOdd
    for production in f.xsections:
        rEvenParticles = list(set(production[2:]).difference(set(rOdd.keys())))
        if rEvenParticles:
            # ignore production of R-Even Particles
            logger.warning("Particles %s not defined as R-odd, cross section for %s production will be ignored" 
                           %(rEvenParticles,str(production)))                 
            continue
        process = f.xsections.get ( production )
        for pxsec in process.xsecs:
            csOrder = pxsec.qcd_order
            wlabel = str( int ( pxsec.sqrts / 1000) ) + ' TeV'
            if csOrder == 0:
                wlabel += ' (LO)'
            elif csOrder == 1:
                wlabel += ' (NLO)'
            elif csOrder == 2:
                wlabel += ' (NLL)'
            else:
                logger.error ( "Unknown QCD order %d" % csOrder )
                raise SModelSError()
            xsec = XSection()
            xsec.info.sqrts = pxsec.sqrts/1000. * TeV
            xsec.info.order = csOrder
            xsec.info.label = wlabel
            xsec.value = pxsec.value * pb
            xsec.pid = production[2:]
            # Do not add xsecs which do not match the user required ones:
            if (useXSecs and not xsec.info in useXSecs):
                continue
            else: xSecsInFile.add(xsec)

    return xSecsInFile
Esempio n. 16
0
    def fetchFromServer(self, path, discard_zeroes):
        import requests, time, json
        logger.debug("need to fetch from server: %s" % path)
        self.source = "http"
        if "ftp://" in path:
            self.source = "ftp"
        store = "." + path.replace(":", "_").replace("/", "_").replace(
            ".", "_")
        if not os.path.isfile(store):
            ## completely new! fetch the description and the db!
            return self.fetchFromScratch(path, store, discard_zeroes)
        with open(store, "r") as f:
            jsn = json.load(f)
        filename = "./" + jsn["url"].split("/")[-1]

        class _:  ## pseudo class for pseudo requests
            def __init__(self):
                self.status_code = -1

        r = _()
        try:
            r = requests.get(path)
        except Exception:
            pass
        if r.status_code != 200:
            logger.warning ( "Error %d: could not fetch %s from server." % \
                           ( r.status_code, path ) )
            if not os.path.isfile(filename):
                logger.error(
                    "Cant find a local copy of the pickle file. Exit.")
                sys.exit()
            logger.warning(
                "I do however have a local copy of the file. I work with that."
            )
            self.force_load = "pcl"
            # next step: check the timestamps
            return ("./", filename)

        if r.json()["lastchanged"] > jsn["lastchanged"]:
            ## has changed! redownload everything!
            return self.fetchFromScratch(path, store, discard_zeroes)

        if not os.path.isfile(filename):
            return self.fetchFromScratch(path, store, discard_zeroes)
        self.force_load = "pcl"
        # next step: check the timestamps
        return ("./", filename)
Esempio n. 17
0
    def _addDecay(self, br, massDictionary):
        """
        Generate a new branch adding a 1-step cascade decay        
        This is described by the br object, with particle masses given by
        massDictionary.
        
        :parameter br: branching ratio object (see pyslha). Contains information about the decay.
        :parameter massDictionary: dictionary containing the masses for all intermediate states.
        :returns: extended branch (Branch object). False if there was an error.
        """

        from smodels.particlesLoader import rEven

        newBranch = self.copy()
        newparticles = []
        newmass = []

        if len(self.PIDs) != 1:
            logger.error("During decay the branch should \
                            not have multiple PID lists!")
            return False

        for partID in br.ids:
            # Add R-even particles to final state
            if partID in rEven:
                newparticles.append(rEven[partID])
            else:
                # Add masses of non R-even particles to mass vector
                newmass.append(massDictionary[partID])
                newBranch.PIDs[0].append(partID)

        if len(newmass) > 1:
            logger.warning("Multiple R-odd particles in the final state: " +
                           str(br.ids))
            return False

        if newparticles:
            newBranch.particles.append(sorted(newparticles))
        if newmass:
            newBranch.masses.append(newmass[0])
        if not self.maxWeight is None:
            newBranch.maxWeight = self.maxWeight * br.br
        #If there are no daughters, assume branch is stable
        if not br.ids:
            newBranch.stable = True

        return newBranch
def get_slha_data(slhaFile):
    """
    Uses pyslha to read the SLHA file. Return a pyslha.Doc objec, if successful.
    """

    if not os.path.isfile(slhaFile):
        logger.warning("SLHA file %s not found. This point will be ignored" %
                       slhaFile)
        return False

    try:
        slhaData = pyslha.readSLHAFile(slhaFile)
    except:
        logger.warning("Error reading SLHA file %s." % slhaFile)
        return False

    return slhaData
Esempio n. 19
0
    def _loadExpResults(self):
        """
        Checks the database folder and generates a list of ExpResult objects for
        each (globalInfo.txt,sms.py) pair.

        :returns: list of ExpResult objects
        """
        #Try to load particles from databaseParticles.py
        self._setParticles(self._getParticles())
        folders = []
        #for root, _, files in os.walk(self.txt_meta.pathname):
        # for root, _, files in cleanWalk(self._base):
        for root, _, files in cleanWalk(self.txt_meta.pathname):
            folders.append((root, files))
        folders.sort()

        roots = []
        for root, files in folders:
            if "/.git/" in root:
                continue
            if root[-11:] == "/validation":
                continue
            if root[-5:] == "/orig":
                continue
            if not 'globalInfo.txt' in files:
                continue
            else:
                roots.append(root)

        if self.progressbar:
            self.progressbar.maxval = len(roots)
            self.progressbar.start()
        resultsList = []
        for ctr, root in enumerate(roots):
            if self.progressbar:
                self.progressbar.update(ctr)
            expres = self.createExpResult(root)
            if expres:
                resultsList.append(expres)

        if not resultsList:
            logger.warning("Zero results loaded.")
        if self.progressbar:
            self.progressbar.finish()

        return resultsList
Esempio n. 20
0
    def _loadExpResults(self):
        """
        Checks the database folder and generates a list of ExpResult objects for
        each (globalInfo.txt,sms.py) pair.

        :returns: list of ExpResult objects

        """
        folders=[]
        #for root, _, files in os.walk(self.txt_meta.pathname):
        # for root, _, files in cleanWalk(self._base):
        for root, _, files in cleanWalk(self.txt_meta.pathname):
            folders.append ( (root, files) )
        folders.sort()

        roots = []
        for root,files in folders:
            if "/.git/" in root:
                continue
            if root[-11:] == "/validation":
                continue
            if root[-5:] == "/orig":
                continue
            if not 'globalInfo.txt' in files:
                continue
            else:
                roots.append ( root )

        if self.progressbar:
            self.progressbar.maxval = len ( roots )
            self.progressbar.start()
        resultsList = []
        for ctr,root in enumerate(roots):
            if self.progressbar:
                self.progressbar.update(ctr)
            expres = self.createExpResult ( root )
            if expres:
                resultsList.append(expres)

        if not resultsList:
            logger.warning("Zero results loaded.")
        if self.progressbar:
            self.progressbar.finish()

        return resultsList
Esempio n. 21
0
    def setPrinterOptions(self, parser):
        """
        Define the printer types and their options.
        
        :param parser: ConfigParser storing information from the parameters file
        """

        #Define the printer types and the printer-specific options:
        printerTypes = parser.get("printer", "outputType").split(",")
        for prt in printerTypes:
            prt = prt.strip()  ## trailing spaces shouldnt matter
            if prt == 'python':
                newPrinter = PyPrinter(output='file')
            elif prt == 'summary':
                newPrinter = SummaryPrinter(output='file')
            elif prt == 'stdout':
                newPrinter = TxTPrinter(output='stdout')
            elif prt == 'log':
                newPrinter = TxTPrinter(output='file')
            elif prt == 'xml':
                newPrinter = XmlPrinter(output='file')
            elif prt == 'slha':
                newPrinter = SLHAPrinter(output='file')
                if parser.getboolean("options",
                                     "doCompress") or parser.getboolean(
                                         "options", "doInvisible"):
                    newPrinter.docompress = 1
            else:
                logger.warning("Unknown printer format: %s" % str(prt))
                continue

            #Copy stdout options to log options:
            if 'log' in printerTypes:
                if parser.has_section(
                        'stdout-printer'
                ) and not parser.has_section('log-printer'):
                    parser.add_section('log-printer')
                    for option, val in parser.items('stdout-printer'):
                        parser.set('log-printer', option, val)

            #Set printer-specific options:
            if parser.has_section(prt + '-printer'):
                newPrinter.setOptions(parser.items(prt + '-printer'))
            self.Printers[prt] = newPrinter
Esempio n. 22
0
    def getValuesFor(self, attribute=None):
        """
        Returns a list for the possible values appearing in the ExpResult
        for the required attribute (sqrts,id,constraint,...).
        If there is a single value, returns the value itself.

        :param attribute: name of a field in the database (string). If not
                          defined it will return a dictionary with all fields
                          and their respective values
        :return: list of values or value

        """
        fieldDict = list(self.__dict__.items())
        valuesDict = {}
        while fieldDict:
            for field, value in fieldDict:
                if not '<smodels.experiment' in str(value):
                    if not field in valuesDict:
                        valuesDict[field] = [value]
                    else:
                        valuesDict[field].append(value)
                else:
                    if isinstance(value, list):
                        for entry in value:
                            fieldDict += entry.__dict__.items()
                    else:
                        fieldDict += value.__dict__.items()
                fieldDict.remove((field, value))

        # Try to keep only the set of unique values
        for key, val in valuesDict.items():
            try:
                valuesDict[key] = list(set(val))
            except TypeError:
                pass
        if not attribute:
            return valuesDict
        elif not attribute in valuesDict.keys():
            logger.warning("Could not find field %s in %s", attribute,
                           self.path)
            return False
        else:
            return valuesDict[attribute]
Esempio n. 23
0
    def getValuesFor(self,attribute=None,expResult=None):
        """
        Returns a list for the possible values appearing in the database
        for the required attribute (sqrts,id,constraint,...).

        :param attribute: name of a field in the database (string). If not defined
                          it will return a dictionary with all fields and their respective
                          values
        :param expResult: if defined, restricts the list to the corresponding expResult.
                          Must be an ExpResult object.
        :return: list of values
        """


        fieldDict = []
        if expResult and isinstance(expResult,ExpResult):
            fieldDict = list(expResult.__dict__.items())   #Use only the entries for the expResult
        else:
            for expResult in self:
                fieldDict += list(expResult.__dict__.items())     #Use all entries/expResults
        valuesDict = {}
        while fieldDict:
            for field,value in fieldDict[:]:
                if not '<smodels.experiment' in str(value):
                    if not field in valuesDict: valuesDict[field] = [value]
                    else: valuesDict[field].append(value)
                else:
                    if isinstance(value,list):
                        for entry in value: fieldDict += list(entry.__dict__.items())
                    else: fieldDict += list(value.__dict__.items())
                fieldDict.remove((field,value))

        #Try to keep only the set of unique values
        for key,val in valuesDict.items():
            try: valuesDict[key] = list(set(val))
            except TypeError: pass
        if not attribute: return valuesDict
        elif not attribute in valuesDict:
            logger.warning("Could not find field %s in database" % attribute)
            return False
        else:
            return valuesDict[attribute]
Esempio n. 24
0
    def getULFor(self, expid, txname, massarray, expected=False):
        """
        Get an upper limit for the given experimental id, the txname,
        and the massarray.
        Can only be used for UL experimental results.
        Interpolation is done, if necessary.

        :param expid: experimental id (string)
        :param txname: txname (string). ONLY required for upper limit results
        :param massarray: list of masses with units, e.g.
                          [[ 400.*GeV, 100.*GeV],[400.*GeV, 100.*GeV]]
        :param expected: If true, return expected upper limit, otherwise
                         return observed upper limit.
        :return: upper limit [fb]
        """

        #First select the experimental results matching the id and the result type:
        expres = None
        for expResult in self:
            if expResult.globalInfo.id != expid:
                continue
            else:
                if 'upperLimit' in [
                        ds.dataInfo.dataType for ds in expResult.datasets
                ]:
                    expres = expResult
                    break

        if not expres:
            logger.warning("Could not find UL result %s. getULFor can only be\
            used for upper-limit results." % (expid))
            return None

        txnames = expres.getTxNames()
        for tx in txnames:
            if not tx.txName == txname:
                continue
            return tx.getULFor(massarray, expected)

        logger.warning("Could not find TxName %s ." % (txname))
        return None
Esempio n. 25
0
    def fetchFromServer ( self, path, discard_zeroes ):
        import requests, time, json
        logger.debug ( "need to fetch from server: %s" % path )
        self.source = "http"
        if "ftp://" in path:
            self.source = "ftp"
        store = "." + path.replace ( ":","_" ).replace( "/", "_" ).replace(".","_" )
        if not os.path.isfile ( store ):
            ## completely new! fetch the description and the db!
            return self.fetchFromScratch ( path, store, discard_zeroes )
        with open(store,"r") as f:
            jsn = json.load(f)
        filename= "./" + jsn["url"].split("/")[-1]
        class _: ## pseudo class for pseudo requests
            def __init__ ( self ): self.status_code = -1
        r=_()
        try:
            r = requests.get( path )
        except Exception:
            pass
        if r.status_code != 200:
            logger.warning ( "Error %d: could not fetch %s from server." % \
                           ( r.status_code, path ) )
            if not os.path.isfile ( filename ):
                logger.error ( "Cant find a local copy of the pickle file. Exit." )
                sys.exit()
            logger.warning ( "I do however have a local copy of the file. I work with that." )
            self.force_load = "pcl"
            # next step: check the timestamps
            return ( "./", filename )

        if r.json()["lastchanged"] > jsn["lastchanged"]:
            ## has changed! redownload everything!
            return self.fetchFromScratch ( path, store, discard_zeroes )

        if not os.path.isfile ( filename ):
            return self.fetchFromScratch ( path, store, discard_zeroes )
        self.force_load = "pcl"
        # next step: check the timestamps
        return ( "./", filename )
Esempio n. 26
0
    def getValuesFor(self, attribute=None):
        """
        Returns a list for the possible values appearing in the ExpResult
        for the required attribute (sqrts,id,constraint,...).
        If there is a single value, returns the value itself.

        :param attribute: name of a field in the database (string). If not
                          defined it will return a dictionary with all fields
                          and their respective values
        :return: list of values or value

        """
        fieldDict = list ( self.__dict__.items() )
        valuesDict = {}
        while fieldDict:
            for field, value in fieldDict:
                if not '<smodels.experiment' in str(value):
                    if not field in valuesDict:
                        valuesDict[field] = [value]
                    else: valuesDict[field].append(value)
                else:
                    if isinstance(value, list):
                        for entry in value:
                            fieldDict += entry.__dict__.items()
                    else: fieldDict += value.__dict__.items()
                fieldDict.remove((field, value))

        # Try to keep only the set of unique values
        for key, val in valuesDict.items():
            try:
                valuesDict[key] = list(set(val))
            except TypeError:
                pass
        if not attribute:
            return valuesDict
        elif not attribute in valuesDict.keys():
            logger.warning("Could not find field %s in %s", attribute, self.path)
            return False
        else:
            return valuesDict[attribute]
Esempio n. 27
0
 def setPrinterOptions(self,parser):
     """
     Define the printer types and their options.
     
     :param parser: ConfigParser storing information from the parameters file
     """
     
     #Define the printer types and the printer-specific options:
     printerTypes = parser.get("printer", "outputType").split(",")        
     for prt in printerTypes:
         prt = prt.strip() ## trailing spaces shouldnt matter
         if prt == 'python':
             newPrinter = PyPrinter(output = 'file')                
         elif prt == 'summary':        
             newPrinter = SummaryPrinter(output = 'file')
         elif prt == 'stdout':
             newPrinter = TxTPrinter(output = 'stdout')
         elif prt == 'log':
             newPrinter = TxTPrinter(output = 'file')
         elif prt == 'xml':
             newPrinter = XmlPrinter(output = 'file')           
         elif prt == 'slha':
             newPrinter = SLHAPrinter(output = 'file')
             if parser.getboolean("options", "doCompress") or parser.getboolean("options", "doInvisible"):
                 newPrinter.docompress = 1
         else:
             logger.warning("Unknown printer format: %s" %str(prt))
             continue
         
         #Copy stdout options to log options:
         if 'log' in printerTypes:
             if parser.has_section('stdout-printer') and not parser.has_section('log-printer'):
                 parser.add_section('log-printer')
                 for option,val in parser.items('stdout-printer'):
                     parser.set('log-printer',option,val)
         
         #Set printer-specific options:
         if parser.has_section(prt+'-printer'):
             newPrinter.setOptions(parser.items(prt+'-printer'))
         self.Printers[prt] = newPrinter
Esempio n. 28
0
    def _loadExpResults(self):
        """
        Checks the database folder and generates a list of ExpResult objects for
        each (globalInfo.txt,sms.py) pair.
       
        :returns: list of ExpResult objects 
  
        """
        folders = []
        for root, _, files in os.walk(self._base):
            folders.append((root, files))
        folders.sort()

        roots = []
        for root, files in folders:
            if "/.git/" in root:
                continue
            if root[-11:] == "/validation":
                continue
            if root[-5:] == "/orig":
                continue
            if not 'globalInfo.txt' in files:
                #      logger.debug("Missing globalInfo.txt in %s", root)
                continue
            else:
                roots.append(root)

        resultsList = []
        for root in roots:
            expres = ExpResult(root)
            if expres:
                resultsList.append(expres)
                contact = expres.globalInfo.getInfo("contact")
                if contact and "fastlim" in contact.lower():
                    self.hasFastLim = True

        if not resultsList:
            logger.warning("Zero results loaded.")

        return resultsList
Esempio n. 29
0
 def visible(self, pid, decay=None):
     """
     Check if pid is detectable.
     If pid is not known, consider it as visible.
     If pid not SM particle and decay = True, check if particle or decay products are visible.
     
     """
     if pid in SMvisible: return True
     if pid in SMinvisible: return False
     qn = Qnumbers(pid)
     if qn.pid == 0:
         return True
     if qn.charge3 != 0 or qn.cdim != 1:
         return True
     if decay:
         if not pid in self.slha.decays:
             logger.warning("Missing decay block for pid %s" % (str(pid)))
             return None  # Note: purposely distinguished from False so I can propagate the information to the output file
         for decay in self.slha.decays[pid].decays:
             for pids in decay.ids:
                 if self.visible(abs(pids), decay=True): return True
     return False
Esempio n. 30
0
 def visible(self, pid, decay=None):
     """
     Check if pid is detectable.
     If pid is not known, consider it as visible.
     If pid not SM particle and decay = True, check if particle or decay products are visible.
     
     """
     if pid in SMvisible: return True
     if pid in SMinvisible: return False
     qn = Qnumbers(pid)
     if qn.pid == 0:
         return True
     if qn.charge3 != 0 or qn.cdim != 1:
         return True
     if decay:
         if not pid in self.slha.decays:
             logger.warning("Missing decay block for pid %s" % (str(pid)))
             return None  # Note: purposely distinguished from False so I can propagate the information to the output file
         for decay in self.slha.decays[pid].decays:
             for pids in decay.ids:
                 if self.visible(abs(pids), decay=True): return True
     return False
Esempio n. 31
0
    def addXSecToFile(self, xsecs, slhafile, comment=None, complain=True):
        """
        Write cross sections to an SLHA file.
        
        :param xsecs: a XSectionList object containing the cross sections
        :param slhafile: target file for writing the cross sections in SLHA format
        :param comment: optional comment to be added to each cross section block
        :param complain: complain if there are already cross sections in file
        
        """

        if not os.path.isfile(slhafile):
            logger.error("SLHA file not found.")
            raise SModelSError()
        if len(xsecs) == 0:
            logger.warning("No cross sections available.")
            return False
        # Check if file already contain cross section blocks
        xSectionList = crossSection.getXsecFromSLHAFile(slhafile)
        if xSectionList and complain:
            logger.info("SLHA file already contains XSECTION blocks. Adding "
                        "only missing cross sections.")

        # Write cross sections to file, if they do not overlap any cross section in
        # the file
        outfile = open(slhafile, 'a')
        for xsec in xsecs:
            writeXsec = True
            for oldxsec in xSectionList:
                if oldxsec.info == xsec.info and set(oldxsec.pid) == set(
                        xsec.pid):
                    writeXsec = False
                    break
            if writeXsec:
                outfile.write(
                    self.xsecToBlock(xsec, (2212, 2212), comment) + "\n")
        outfile.close()

        return True
Esempio n. 32
0
    def getValuesFor(self, attribute=None):
        """
        Returns a list for the possible values appearing in the DataSet
        for the required attribute.


        :param attribute: name of a field in the database (string). If not defined
                          it will return a dictionary with all fields and 
                          their respective values
        :return: list of values
        """

        fieldDict = self.__dict__.items()[:]
        valuesDict = {}
        while fieldDict:
            for field, value in fieldDict[:]:
                if not '<smodels.experiment' in str(value):
                    if not field in valuesDict: valuesDict[field] = [value]
                    else: valuesDict[field].append(value)
                else:
                    if isinstance(value, list):
                        for entry in value:
                            fieldDict += entry.__dict__.items()[:]
                    else:
                        fieldDict += value.__dict__.items()[:]
                fieldDict.remove((field, value))

        #Try to keep only the set of unique values
        for key, val in valuesDict.items():
            try:
                valuesDict[key] = list(set(val))
            except TypeError as e:
                pass
        if not attribute: return valuesDict
        elif not attribute in valuesDict:
            logger.warning("Could not find field %s in database" % attribute)
            return False
        else:
            return valuesDict[attribute]
Esempio n. 33
0
    def getULFor(self,expid,txname,massarray, expected=False ):
        """
        Get an upper limit for the given experimental id, the txname, 
        and the massarray.
        Can only be used for UL experimental results.
        Interpolation is done, if necessary.

        :param expid: experimental id (string)
        :param txname: txname (string). ONLY required for upper limit results
        :param massarray: list of masses with units, e.g.
                          [[ 400.*GeV, 100.*GeV],[400.*GeV, 100.*GeV]]
        :param expected: If true, return expected upper limit, otherwise
                         return observed upper limit.
        :return: upper limit [fb]
        """

        #First select the experimental results matching the id and the result type:
        expres = None
        for expResult in self:
            if expResult.getValuesFor('id')[0] != expid:
                continue
            else:
                if 'upperLimit' in expResult.getValuesFor('dataType'):
                    expres = expResult
                    break

        if not expres:
            logger.warning( "Could not find UL result %s. getULFor can only be\
            used for upper-limit results." % (expid))
            return None

        txnames = expres.getTxNames()
        for tx in txnames:
            if not tx.txName == txname:
                continue
            return tx.getValueFor(massarray,expected)

        logger.warning( "Could not find TxName %s ." % (txname))
        return None
Esempio n. 34
0
    def loadBinaryFile(self, lastm_only=False):
        """
        Load a binary database, returning last modified, file count, database.
        
        :param lastm_only: if true, the database itself is not read.
        :returns: database object, or None, if lastm_only == True.
        """
        if lastm_only and self.pcl_mtime[0]:
            ## doesnt need to load database, and mtime is already
            ## loaded
            return None

        if self.pcl_db:
            return self.pcl_db

        if not os.path.exists(self.binfile):
            return None

        try:
            with open(self.binfile, "rb") as f:
                t0 = time.time()
                self.pcl_python = serializer.load(f)
                self.pcl_format_version = serializer.load(f)
                self.pcl_mtime = serializer.load(f)
                self._databaseVersion = serializer.load(f)
                if not lastm_only:
                    if self.pcl_python != sys.version:
                        logger.warning(
                            "binary file was written with a different "
                            "python version. Regenerating.")
                        self.createBinaryFile()
                        return self
                    if self.pcl_format_version != self.sw_format_version:
                        logger.warning(
                            "binary file format (%s) and format "
                            "supported by software (%s) disagree." %
                            (self.pcl_format_version, self.sw_format_version))
                        logger.warning("will recreate binary.")
                        self.createBinaryFile()
                        return self

                    logger.info("loading binary db file %s format version %s" %
                                (self.binfile, self.pcl_format_version))
                    self.hasFastLim = serializer.load(f)
                    self.expResultList = serializer.load(f)
                    t1 = time.time() - t0
                    logger.info ( "Loaded database from %s in %.1f secs." % \
                            ( self.binfile, t1 ) )
        except EOFError as e:
            os.unlink(self.binfile)
            if lastm_only:
                self.pcl_format_version = -1
                self.pcl_mtime = 0
                return self
            logger.error("%s is not a binary database file! recreate it!" %
                         self.binfile)
            self.createBinaryFile()
        return self
def import_python_output(smodelsFile):
    """
    Imports the smodels output from each .py file.
    """

    try:
        with open(smodelsFile, 'rb') as fsmodels:  ## imports smodels file
            smodelsOut = imp.load_module("smodelsOutput", fsmodels,
                                         smodelsFile,
                                         ('.py', 'rb', imp.PY_SOURCE))
            smodelsOutput = smodelsOut.smodelsOutput
    except:
        logger.debug(
            "Error loading smodels file %s. Does it contain a smodelsOutput dictionary?"
            % smodelsFile)
        return False

    if not isinstance(smodelsOutput, dict):
        logger.warning("smodelsOutput in file %s is not a dictionary." %
                       smodelsFile)
        return False

    return smodelsOutput
Esempio n. 36
0
 def getLifetime(self, pid, ctau=False):
     """
     Compute lifetime from decay-width for a particle with pid.
     
     :parameter pid: PID of particle
     :parameter ctau: set True to multiply lifetime by c
     :returns: lifetime
     
     """
     widths = self.getDecayWidths()
     try:
         if widths[abs(pid)]: lt = (1.0 / widths[abs(pid)]) / 1.51926778e24
         else:
             # Particle is stable
             return -1
         if self.emptyDecay(pid): return -1  # if decay block is empty particle is also considered stable
         if ctau:
             return lt * 3E8
         else:
             return lt
     except KeyError:
         logger.warning("No decay block for %s, consider it as a stable particle" % str(pid) )
         return -1
Esempio n. 37
0
    def checkInstallation(self, compile=True ):
        """
        Checks if installation of tool is correct by looking for executable and
        executing it. If check is False and compile is True, then try and compile it.

        :returns: True, if everything is ok

        """
        if not os.path.exists(self.executablePath):
            if compile:
                logger.warn("%s executable not found. Trying to compile it now. This may take a while." % self.name )
                self.compile()
            else:
                logger.warn("%s exectuable not found." % self.name )
                self.complain()
                return False
        if not os.path.exists(self.executablePath):
            logger.error("Compilation of %s failed Is a according compiler installed?" % self.name )
            self.complain()
        if not os.access(self.executablePath, os.X_OK):
            logger.warning("%s is not executable Trying to chmod" % self.executable)
            self.chmod()
        return True
Esempio n. 38
0
 def getLifetime(self, pid, ctau=False):
     """
     Compute lifetime from decay-width for a particle with pid.
     
     :parameter pid: PID of particle
     :parameter ctau: set True to multiply lifetime by c
     :returns: lifetime
     
     """
     widths = self.getDecayWidths()
     try:
         if widths[abs(pid)]: lt = (1.0 / widths[abs(pid)]) / 1.51926778e24
         else:
             # Particle is stable
             return -1
         if self.emptyDecay(pid): return -1  # if decay block is empty particle is also considered stable
         if ctau:
             return lt * 3E8
         else:
             return lt
     except KeyError:
         logger.warning("No decay block for %s, consider it as a stable particle" % str(pid) )
         return -1
Esempio n. 39
0
    def checkInstallation(self, compile=True ):
        """
        Checks if installation of tool is correct by looking for executable and
        executing it. If check is False and compile is True, then try and compile it.

        :returns: True, if everything is ok

        """
        if not os.path.exists(self.executablePath):
            if compile:
                logger.warn("%s executable not found. Trying to compile it now. This may take a while." % self.name )
                self.compile()
            else:
                logger.warn("%s executable not found." % self.name )
                self.complain()
                return False
        if not os.path.exists(self.executablePath):
            if self.maycompile: ## should have worked
                logger.error("Compilation of %s failed. Is a according compiler installed?" % self.name )
            self.complain()
        if not os.access(self.executablePath, os.X_OK):
            logger.warning("%s is not executable Trying to chmod" % self.executable)
            self.chmod()
        return True
Esempio n. 40
0
    def addHigherOrders(self, sqrts, slhafile):
        """ add higher order xsecs """
        xsecs = copy.deepcopy(self.loXsecs)
        wlabel = str(int(sqrts / TeV)) + ' TeV'
        if self.maxOrder == LO:
            wlabel += ' (LO)'
        elif self.maxOrder == NLO:
            wlabel += ' (NLO)'
        elif self.maxOrder == NLL:
            wlabel += ' (NLO+NLL)'
        for ixsec, xsec in enumerate(xsecs):
            xsecs[ixsec].info.label = wlabel
            xsecs[ixsec].info.order = self.maxOrder

        if self.maxOrder > 0:
            pIDs = self.loXsecs.getPIDpairs()
            nllfast = toolBox.ToolBox().get("nllfast%d" % sqrts.asNumber(TeV))
            nllfast.maycompile = self.maycompile
            for pID in pIDs:
                k = 0.
                kNLO, kNLL = nllfast.getKfactorsFor(pID, slhafile)
                if self.maxOrder == NLO and kNLO:
                    k = kNLO
                elif self.maxOrder == NLL and kNLL and kNLO:
                    k = kNLO * kNLL
                elif self.maxOrder > 2 and kNLL and kNLO:
                    logger.warning(
                        "Unkown xsec order, using NLL+NLO k-factor, "
                        "if available")
                    k = kNLO * kNLL
                k = float(k)
                for i, xsec in enumerate(xsecs):
                    if set(xsec.pid) == set(pID):
                        # Apply k-factor
                        xsecs[i] = xsec * k

        # Remove zero cross sections
        while len(xsecs) > 0 and xsecs.getMinXsec() == 0. * pb:
            for xsec in xsecs:
                if xsec.value == 0. * pb:
                    xsecs.delete(xsec)
                    break
        if self.maxOrder > 0 and len(xsecs) == 0:
            self.countNoNLOXSecs += 1
            if self.countNoNLOXSecs < 3:
                logger.warning("No NLO or NLL cross sections available.")
            if self.countNoNLOXSecs == 3:
                logger.warning(
                    "No NLO or NLL cross sections available (will quench such warnings in future)."
                )

        #for i in xsecs:
        #    logger.error ( "xsec=%s (%s)" % (i,type(i)) )
        return xsecs
Esempio n. 41
0
    def selectExpResultsWith(self, **restrDict):
        """
        Loads the list of the experimental results (pair of InfoFile and DataFile)
        satisfying the restrictions to the _selectedExpResults.
        The restrictions specified as a dictionary.

        :param restrDict: selection fields and their allowed values.
                          E.g. lumi = [19.4/fb, 20.3/fb], txName = 'T1',....}
                          The values can be single entries or a list of values.
                          For the fields not listed, all values are assumed to be allowed.
        """

        #First check all the selected fields exist and build the corresponding
        #restriction dictionary
        rDict = {}
        allfields = self.getAttributes()
        for tag, value in restrDict.items():
            if tag in allfields: rDict[tag] = value
            else:
                logger.warning(
                    "Field/attribute %s not found (will be ignored)." % tag)

        results = self.database.expResultList[:]
        for expRes in results[:]:
            expAttributes = expRes.getAttributes()
            for tag in restrDict:
                #Check if the restriction tag appears in the experimental result
                if not tag in expAttributes:
                    results.remove(expRes)
                    break
                vals = expRes.getValuesFor(tag)
                #If it does, check if any of the values in expResult match any of the values given
                #as restrictions
                if not isinstance(restrDict[tag], list):
                    rvals = [restrDict[tag]]
                else:
                    rvals = restrDict[tag]
                #If there is a type mismatch, also remove
                try:
                    intersec = numpy.intersect1d(vals, rvals)
                except unum.IncompatibleUnitsError:
                    logger.warning("Incompatible units, skipping result.")
                    results.remove(expRes)
                    break
                if len(intersec) == 0:
                    results.remove(expRes)
                    break

        self._selectedExpResults = results[:]

        if not self._selectedExpResults: logger.warning("Zero results loaded.")
Esempio n. 42
0
    def selectExpResultsWith(self,**restrDict):
        """
        Loads the list of the experimental results (pair of InfoFile and DataFile)
        satisfying the restrictions to the _selectedExpResults.
        The restrictions specified as a dictionary.

        :param restrDict: selection fields and their allowed values.
                          E.g. lumi = [19.4/fb, 20.3/fb], txName = 'T1',....}
                          The values can be single entries or a list of values.
                          For the fields not listed, all values are assumed to be allowed.
        """

        #First check all the selected fields exist and build the corresponding
        #restriction dictionary
        rDict = {}
        allfields = self.getAttributes()
        for tag,value in restrDict.items():
            if tag in allfields: rDict[tag] = value
            else: logger.warning("Field/attribute %s not found (will be ignored)." % tag)

        results = self.database.expResultList[:]
        for expRes in results[:]:
            values = self.getValuesFor(attribute=None, expResult=expRes)
            for tag in restrDict:
                #Check if the restriction tag appears in the experimental result
                if not tag in values:
                    results.remove(expRes)
                    break
                vals = values[tag]
                #If it does, check if any of the values in expResult match any of the values given
                #as restrictions
                if not isinstance(restrDict[tag],list): rvals = [restrDict[tag]]
                else: rvals = restrDict[tag]
                #If there is a type mismatch, also remove
                try: intersec = numpy.intersect1d(vals,rvals)
                except unum.IncompatibleUnitsError:
                    logger.warning("Incompatible units, skipping result.")
                    results.remove(expRes)
                    break
                if len(intersec) == 0:
                    results.remove(expRes)
                    break

        self._selectedExpResults = results[:]

        if not self._selectedExpResults: logger.warning("Zero results loaded.")
Esempio n. 43
0
 def checkPyhfVersion ( self ):
     """ check the pyhf version, currently we need 0.6.1+ """
     if pyhfinfo["ver"] < pyhfinfo["required"]:
         logger.warning ( f"pyhf version is {'.'.join(pyhfinfo['ver'])}. SModelS currently requires pyhf>={'.'.join(pyhfinfo['required'])}. You have been warned." )
Esempio n. 44
0
    def getCombinedUpperLimitFor(self, nsig, expected=False, deltas_rel=0.2):
        """
        Get combined upper limit. If covariances are given in globalInfo then simplified likelihood is used, else if json files are given pyhf cimbination is performed.

        :param nsig: list of signal events in each signal region/dataset. The list
                        should obey the ordering in globalInfo.datasetOrder.
        :param expected: return expected, not observed value
        :param deltas_rel: relative uncertainty in signal (float). Default value is 20%.

        :returns: upper limit on sigma*eff
        """

        if hasattr(self.globalInfo, "covariance" ):
            cov = self.globalInfo.covariance
            if type(cov) != list:
                raise SModelSError( "covariance field has wrong type: %s" % type(cov))
            if len(cov) < 1:
                raise SModelSError( "covariance matrix has length %d." % len(cov))

            computer = UpperLimitComputer(ntoys=10000)

            nobs = [x.dataInfo.observedN for x in self._datasets]
            bg = [x.dataInfo.expectedBG for x in self._datasets]
            no = nobs

            ret = computer.ulSigma(Data(observed=no, backgrounds=bg, covariance=cov,
                                        third_moment=None, nsignal=nsig, deltas_rel=deltas_rel),
                                        marginalize=self._marginalize,
                                        expected=expected)

            if ret != None:
                #Convert limit on total number of signal events to a limit on sigma*eff
                ret = ret/self.globalInfo.lumi
            logger.debug("SL upper limit : {}".format(ret))
            return ret
        elif hasattr(self.globalInfo, "jsonFiles" ):
            logger.debug("Using pyhf")
            if all([s == 0 for s in nsig]):
                logger.warning("All signals are empty")
                return None
            ulcomputer, combinations = self.getPyhfComputer( nsig )
            if ulcomputer.nWS == 1:
                ret = ulcomputer.ulSigma(expected=expected)
                ret = ret/self.globalInfo.lumi
                logger.debug("pyhf upper limit : {}".format(ret))
                return ret
            else:
                # Looking for the best combination
                logger.debug('self.bestCB : {}'.format(self.bestCB))
                if self.bestCB == None:
                    logger.debug("Performing best expected combination")
                    ulMin = float('+inf')
                    for i_ws in range(ulcomputer.nWS):
                        ul = ulcomputer.ulSigma(expected=True, workspace_index=i_ws)
                        if ul == None:
                            continue
                        if ul < ulMin:
                            ulMin = ul
                            i_best = i_ws
                    self.bestCB = combinations[i_best] # Keeping the index of the best combination for later
                    logger.debug('Best combination : %s' % self.bestCB)
                # Computing upper limit using best combination
                if expected:
                    try:
                        ret = ulMin/self.globalInfo.lumi
                    except NameError:
                        ret = ulcomputer.ulSigma(expected=True, workspace_index=combinations.index(self.bestCB))
                        ret = ret/self.globalInfo.lumi
                else:
                    ret = ulcomputer.ulSigma(expected=False, workspace_index=combinations.index(self.bestCB))
                    ret = ret/self.globalInfo.lumi
                logger.debug("pyhf upper limit : {}".format(ret))
                return ret
        else:
            logger.error ( "no covariance matrix or json file given in globalInfo.txt for %s" % self.globalInfo.id )
            raise SModelSError( "no covariance matrix or json file given in globalInfo.txt for %s" % self.globalInfo.id )
Esempio n. 45
0
 def __init__(self, sec):
     self.sec = sec
     if type ( sec ) != int:
         logger.warning ( "timeout set to a non-integral number of seconds."
                          " Will try to cast to integer." )
         self.sec = int ( sec )
Esempio n. 46
0
 def _checkMaxOrder(self, maxOrder):
     smaxorder = {"LO": 0, "NLO": 1, "NLL": 2}
     if maxOrder in smaxorder.keys():
         logger.warning("maxorder given as string, please supply integer.")
         maxOrder = smaxorder[maxOrder]
     return maxOrder
Esempio n. 47
0
    def getKfactorsFor( self, pIDs, slhafile, pdf='cteq' ):
        """
        Read the NLLfast grid and returns a pair of k-factors (NLO and NLL) for 
        the PIDs pair.

        :returns: k-factors = None, if NLLfast does not contain the process; uses
                  the slhafile to obtain the SUSY spectrum.
        
        """
        if not os.path.isfile(slhafile):
            logger.error("SLHA file %s not found", slhafile)
            return False

        energy = str(int(self.sqrts)) + 'TeV'
        # Get process name (in NLLfast notation)
        process = self._getProcessName(pIDs)
        if not process:
            # Return k-factors = None, if NLLfast does not have the process
            return (None, None)

        # Obtain relevant masses
        readfile = pyslha.readSLHAFile(slhafile)
        masses=readfile.blocks['MASS']
        check_pids=squarks+gluinos+third
        for check in check_pids:
            if not check in masses.entries:
                logger.error ( "cannot compute k factor for pdgid %d: " \
                  " no particle mass given. will set mass to inf." % check )
                masses.entries[check]=1.e10

        gluinomass = abs(masses.entries[1000021])
        squarkmass = sum([abs(masses.entries[pid])
                          for pid in squarks]) / 8.
        pid1, pid2 = sorted(pIDs)
        if pid1 in antisquarks and pid2 in squarks:
            squarkmass = (abs(masses.entries[abs(pid1)]) +
                          abs(masses.entries[pid2])) / 2.
        elif pid1 in squarks and pid2 in squarks:
            squarkmass = (abs(masses.entries[pid1]) + abs(masses.entries[pid2])) / 2.
        elif abs(pid1) == pid2 and pid2 in third:
            squarkmass = abs(masses.entries[abs(pid1)])

        #if tool == None:
        #    logger.warning("No NLLfast data for sqrts = " + str(sqrts))
        #    return (None, None)
        nllpath = self.installDirectory()
        # self.pathOfExecutable()
        self.checkInstallation()
        nll_output = self._compute ( energy, pIDs, pdf, squarkmass, gluinomass )

        # If run was successful, return k-factors:
        if "K_NLO" in nll_output:
            # NLLfast ran ok, try to get the k-factors
            kFacs = self._getKfactorsFrom(nll_output)
            if not kFacs or min(kFacs) <= 0.:
                logger.warning("Error obtaining k-factors")
                return (None, None)
            else:
                return kFacs
        # If run was not successful, check for decoupling error messages:
        elif not "too low/high" in nll_output.lower():
            logger.warning("Error running NLLfast")
            return (None, None)

        # Check for decoupling cases with a decoupling grid (only for sb and gg)
        doDecoupling = False
        if "too low/high gluino" in nll_output.lower():        
            if gluinomass > 500. and process == 'sb': 
                doDecoupling = True
                dcpl_mass = gluinomass
        elif "too low/high squark" in nll_output.lower():
            if squarkmass > 500. and process == 'gg':
                doDecoupling = True
                dcpl_mass = squarkmass

        # If process do not have decoupled grids, return None:
        if not doDecoupling:
            logger.warning("Masses of (q,g)=(%s,%s) out of NLLfast grid for %s, %s" % ( squarkmass, gluinomass, process, energy ))
            return (None, None)

        # Obtain k-factors from the NLLfast decoupled grid
        kfacs = self._getDecoupledKfactors(process,energy,pdf,min(gluinomass,squarkmass))
        # Decoupling limit is satisfied, do not interpolate
        if not kfacs:
            logger.warning("Error obtaining k-factors from the NLLfast decoupled grid for " + process)
            return (None, None)
        elif dcpl_mass/min(gluinomass,squarkmass) > 10.:    
            return kfacs
        # Interpolate between the non-decoupled and decoupled grids
        else:
            kFacsVector = [[10.*min(gluinomass,squarkmass),kfacs]]  #First point for interpolation (decoupled grid)
            kfacs = None        
            while not kfacs and dcpl_mass > 500.:
                dcpl_mass -= 100.  # Reduce decoupled mass, until NLLfast produces results
                if process == 'sb': nllinput = (process, pdf, squarkmass, dcpl_mass)
                else:  nllinput = (process, pdf, dcpl_mass, gluinomass)
                nll_output = self._runForDecoupled ( energy, nllinput )
                kfacs = self._getKfactorsFrom(nll_output)        
            kFacsVector.append([dcpl_mass, kfacs]) #Second point for interpolation (non-decoupled grid)

        if len(kFacsVector) < 2:
            logger.warning("Not enough points for interpolation in the decoupling "
                           "limit")
            return (None, None)
        else:
            # Interpolate k-factors
            kFacs = self._interpolateKfactors(kFacsVector,
                            max(squarkmass, gluinomass))
        return kFacs
Esempio n. 48
0
 def __init__(self, sec):
     self.sec = sec
     if type(sec) != int:
         logger.warning("timeout set to a non-integral number of seconds."
                        " Will try to cast to integer.")
         self.sec = int(sec)
Esempio n. 49
0
    def loadParameters(self):
        """
        Reads the parameters from the plotting parameter file.
        """
        
        logger.info("Reading parameters from %s ..." %(self.parameterFile))        
        
        parFile = self.parameterFile
        import imp
        
        try:
            with open(self.parameterFile, 'rb') as fParameters: ## imports parameter file
                parameters = imp.load_module("parameters",fParameters,self.parameterFile,('.py', 'rb', imp.PY_SOURCE))
        except:
            logger.error("Error loading parameters file %s" %self.parameterFile)
            return False
         
        if not hasattr(parameters, 'slha_hover_information'):
            logger.debug("slha_hover_information dictionary was not found in %s. SLHA data will not be included in info box." %parFile)
            self.slha_hover_information = {}
        else:
            self.slha_hover_information = parameters.slha_hover_information
    
        if not hasattr(parameters, 'ctau_hover_information'):
            logger.debug("ctau_hover_information dictionary was not found in %s. Lifetime data will not be included in info box." %parFile)
            self.ctau_hover_information = {}
        else:
            self.ctau_hover_information = parameters.ctau_hover_information
    
        if not hasattr(parameters, 'BR_hover_information'):
            logger.debug("BR_hover_information dictionary was not found in %s. Branching ratio data will not be included in info box." %parFile)
            self.BR_hover_information = {}
        else:
            self.BR_hover_information = parameters.BR_hover_information
    
        if not hasattr(parameters, 'SModelS_hover_information'):
            logger.debug("SModelS_hover_information dictionary was not found in %s. SModelS data will not be included in info box." %parFile)
            self.SModelS_hover_information = {}
        else:
            self.SModelS_hover_information = list(set(parameters.SModelS_hover_information))
    
        if not hasattr(parameters, 'plot_data'):
            logger.debug("plot_data list was not found in %s. All points will be plotted" %parFile)
            self.plot_data = ['all']
        else:
            self.plot_data = list(set(parameters.plot_data))
    
        if not hasattr(parameters, 'variable_x'):
            raise SModelSError("variable_x was not found in %s. Please define the variable to be plotted in the x-axis." %parFile)
        else:
            self.variable_x = parameters.variable_x
        if not hasattr(parameters, 'variable_y'):
            raise SModelSError("variable_y was not found in %s. Please define the variable to be plotted in the y-axis." %parFile)
        else:
            self.variable_y = parameters.variable_y
        if not hasattr(parameters, 'plot_list'):
            raise SModelSError("plot_list was not found in %s. Please define the list of plots to be plotted." %parFile)
        else:
            self.plot_list = list(set(parameters.plot_list))
            
        if not hasattr(parameters,'BR_get_top'):
            logger.debug("BR_get_top not found in %s. Will include all decay channels")
            self.BR_get_top = 'all'
        else:
            self.BR_get_top = parameters.BR_get_top

        if not hasattr(parameters,'plot_title'):
            logger.warning("plot_title not defined in %s. Using default title" %parFile)
            self.plot_title = 'interactive-plots'
        else:
            self.plot_title = parameters.plot_title
Esempio n. 50
0
def _doCluster(elements, txdata, maxDist):
    """
    Cluster algorithm to cluster elements.

    :parameter elements: list of all elements to be clustered
    :parameter txdata: TxNameData object to be used for computing distances in UL space
    :parameter maxDist: maximum mass distance for clustering two elements

    :returns: a list of ElementCluster objects containing the elements
    belonging to the cluster
    """
    # First build the element:mass, element:position in UL space
    # and element:maxWeight (in fb) dictionaries
    #(Combine elements with identical masses)
    massMap = {}
    posMap = {}
    weightMap = {}
    for iel, el in enumerate(elements):
        if not el.getMasses() in massMap.values():
            massMap[iel] = el.getMasses()
            posMap[iel] = massPosition(massMap[iel], txdata)
            weightMap[iel] = el.weight.getMaxXsec() / fb
        else:
            j = list(massMap.keys())[list(massMap.values()).index(el.getMasses())]
            weightMap[j] += el.weight.getMaxXsec() / fb

    # Start with maximal clusters
    clusterList = []
    for iel in posMap:
        indices = [iel]
        for jel in posMap:
            if distance(posMap[iel], posMap[jel]) <= maxDist:
                indices.append(jel)
        indexCluster = IndexCluster(massMap, posMap, weightMap, set(indices),txdata)
        #Ignore cluster which average mass falls oustide the grid:
        if indexCluster.avgPosition:
            clusterList.append(indexCluster)

    #Split the maximal clusters until all elements inside each cluster are
    #less than maxDist apart from each other and the cluster average position
    #is less than maxDist apart from all elements
    finalClusters = []
    newClusters = True
    while newClusters:
        newClusters = []
        for indexCluster in clusterList:
            # cluster is good
            if indexCluster._getMaxInternalDist() < maxDist:
                if not indexCluster in finalClusters:
                    finalClusters.append(indexCluster)
                continue
            # Distance to cluster center (average)
            distAvg = indexCluster._getDistanceTo(indexCluster.avgPosition)
            #Loop over cluster elements and if element distance or cluster
            #average distance falls outside the cluster, remove element
            for iel in indexCluster:
                dist = indexCluster._getDistanceTo(iel)
                if max(dist, distAvg) > maxDist:
                    newcluster = indexCluster.copy()
                    newcluster.remove(iel)
                    if not newcluster in newClusters:
                        #Ignore cluster which average mass falls oustide the grid:
                        if newcluster.avgPosition:
                            newClusters.append(newcluster)

        clusterList = newClusters
        # Check for oversized list of indexCluster (too time consuming)
        if len(clusterList) > 100:
            logger.warning("ElementCluster failed, using unclustered masses")
            finalClusters = []
            clusterList = []

    # finalClusters = finalClusters + clusterList
    # Add clusters of individual masses (just to be safe)
    for iel in massMap:
        finalClusters.append(IndexCluster(massMap, posMap, weightMap,
                                           set([iel])))

    # Clean up clusters (remove redundant clusters)
    for ic, clusterA in enumerate(finalClusters):
        if clusterA is None:
            continue
        for jc, clusterB in enumerate(finalClusters):
            if clusterB is None:
                continue
            if ic != jc and clusterB.indices.issubset(clusterA.indices):
                finalClusters[jc] = None
    while finalClusters.count(None) > 0:
        finalClusters.remove(None)

    # Transform index clusters to element clusters:
    clusterList = []
    for indexCluster in finalClusters:
        cluster = ElementCluster()
        masses = [massMap[iel] for iel in indexCluster]
        for el in elements:
            if el.getMasses() in masses:
                cluster.elements.append(el)
        clusterList.append(cluster)

    return clusterList
Esempio n. 51
0
    def getUpperLimitFor(self,element=None,expected = False, txnames = None
                         ,compute=False,alpha=0.05,deltas_rel=0.2):
        """
        Returns the upper limit for a given element (or mass) and txname. If
        the dataset hold an EM map result the upper limit is independent of
        the input txname or mass.
        For UL results if an Element object is given the corresponding upper limit
        will be rescaled according to the lifetimes of the element intermediate particles.
        On the other hand, if a mass is given, no rescaling will be applied.

        :param txname: TxName object or txname string (only for UL-type results)
        :param element: Element object or mass array with units (only for UL-type results)
        :param alpha: Can be used to change the C.L. value. The default value is 0.05
                      (= 95% C.L.) (only for  efficiency-map results)
        :param deltas_rel: relative uncertainty in signal (float). Default value is 20%.
        :param expected: Compute expected limit, i.e. Nobserved = NexpectedBG
                         (only for efficiency-map results)
        :param compute: If True, the upper limit will be computed
                        from expected and observed number of events.
                        If False, the value listed in the database will be used
                        instead.
        :return: upper limit (Unum object)
        """


        if self.getType() == 'efficiencyMap':
            upperLimit =  self.getSRUpperLimit(expected=expected,alpha=alpha,compute=compute,
                                               deltas_rel=deltas_rel)
            if (upperLimit/fb).normalize()._unit:
                logger.error("Upper limit defined with wrong units for %s and %s"
                              %(self.globalInfo.id,self.getID()))
                return False
            else:
                return upperLimit

        elif self.getType() == 'upperLimit':
            if not txnames or not element:
                logger.error("A TxName and mass array must be defined when \
                             computing ULs for upper-limit results.")
                return False
            elif isinstance(txnames,list):
                if len(txnames) != 1:
                    logger.error("txnames must be a TxName object, a string or a list with a single Txname object")
                    return False
                else:
                    txname = txnames[0]
            else:
                txname = txnames

            if not isinstance(txname, txnameObj.TxName) and \
            not isinstance(txname, str):
                logger.error("txname must be a TxName object or a string")
                return False

            if not isinstance(element, list) and not isinstance(element,Element):
                logger.error("Element must be an element object or a mass array")
                return False

            for tx in self.txnameList:
                if tx == txname or tx.txName == txname:
                    upperLimit = tx.getULFor(element,expected)

            return upperLimit

        else:
            logger.warning("Unkown data type: %s. Data will be ignored.",
                           self.getType())
            return None
Esempio n. 52
0
    def getUpperLimitFor(self,mass=None,expected = False, txnames = None
                         ,compute=False,alpha=0.05,deltas_rel=0.2):
        """
        Returns the upper limit for a given mass and txname. If
        the dataset hold an EM map result the upper limit is independent of
        the input txname or mass.

        :param txname: TxName object or txname string (only for UL-type results)
        :param mass: Mass array with units (only for UL-type results)        
        :param alpha: Can be used to change the C.L. value. The default value is 0.05
                      (= 95% C.L.) (only for  efficiency-map results)
        :param deltas_rel: relative uncertainty in signal (float). Default value is 20%.                      
        :param expected: Compute expected limit, i.e. Nobserved = NexpectedBG
                         (only for efficiency-map results)
        :param compute: If True, the upper limit will be computed
                        from expected and observed number of events.
                        If False, the value listed in the database will be used
                        instead.
        :return: upper limit (Unum object)
        """
        
        
        if self.getType() == 'efficiencyMap':            
            upperLimit =  self.getSRUpperLimit(expected=expected,alpha=alpha,compute=compute,
                                               deltas_rel=deltas_rel)
            if (upperLimit/fb).normalize()._unit:
                logger.error("Upper limit defined with wrong units for %s and %s"
                              %(self.globalInfo.id,self.getID()))
                return False
            else:
                return upperLimit
            
            
        elif self.getType() == 'upperLimit':            
            if not txnames or not mass:
                logger.error("A TxName and mass array must be defined when \
                             computing ULs for upper-limit results.")
                return False
            elif isinstance(txnames,list):
                if len(txnames) != 1:
                    logger.error("txnames must be a TxName object, a string or a list with a single Txname object")
                    return False
                else:
                    txname = txnames[0]
            else:
                txname = txnames
                
            if not isinstance(txname, txnameObj.TxName) and \
            not isinstance(txname, str):
                logger.error("txname must be a TxName object or a string")
                return False
            if not isinstance(mass, list):
                logger.error("mass must be a mass array")
                return False

            for tx in self.txnameList: 
                if tx == txname or tx.txName == txname:
                    if expected:
                        if not tx.txnameDataExp:
                            upperLimit = None
                        else:
                            upperLimit = tx.txnameDataExp.getValueFor(mass)
                    else:
                        upperLimit = tx.txnameData.getValueFor(mass)
                        
            return upperLimit        
        else:
            logger.warning("Unkown data type: %s. Data will be ignored.",
                           self.getType())
            return None        
Esempio n. 53
0
    def loadParameters(self):
        """
        Reads the parameters from the plotting parameter file.
        """

        logger.info("Reading parameters from %s ..." % (self.parameterFile))

        parFile = self.parameterFile
        import imp

        try:
            with open(self.parameterFile,
                      'rb') as fParameters:  ## imports parameter file
                parameters = imp.load_module("parameters", fParameters,
                                             self.parameterFile,
                                             ('.py', 'rb', imp.PY_SOURCE))
        # except Exception as e:
        except (IOError, ValueError, ImportError, SyntaxError) as e:
            logger.error("Error loading parameters file %s: %s" %
                         (self.parameterFile, e))
            raise SModelSError()

        if not hasattr(parameters, 'slha_hover_information'):
            logger.debug(
                "slha_hover_information dictionary was not found in %s. SLHA data will not be included in info box."
                % parFile)
            self.slha_hover_information = {}
        else:
            self.slha_hover_information = parameters.slha_hover_information

        if not hasattr(parameters, 'ctau_hover_information'):
            logger.debug(
                "ctau_hover_information dictionary was not found in %s. Lifetime data will not be included in info box."
                % parFile)
            self.ctau_hover_information = {}
        else:
            self.ctau_hover_information = parameters.ctau_hover_information

        if not hasattr(parameters, 'BR_hover_information'):
            logger.debug(
                "BR_hover_information dictionary was not found in %s. Branching ratio data will not be included in info box."
                % parFile)
            self.BR_hover_information = {}
        else:
            self.BR_hover_information = parameters.BR_hover_information

        if not hasattr(parameters, 'SModelS_hover_information'):
            logger.debug(
                "SModelS_hover_information dictionary was not found in %s. SModelS data will not be included in info box."
                % parFile)
            self.SModelS_hover_information = {}
        else:
            self.SModelS_hover_information = list(
                set(parameters.SModelS_hover_information))

        if not hasattr(parameters, 'plot_data'):
            logger.debug(
                "plot_data list was not found in %s. All points will be plotted"
                % parFile)
            self.plot_data = ['all']
        else:
            self.plot_data = list(set(parameters.plot_data))

        if not hasattr(parameters, 'variable_x'):
            raise SModelSError(
                "variable_x was not found in %s. Please define the variable to be plotted in the x-axis."
                % parFile)
        else:
            self.variable_x = parameters.variable_x
        if not hasattr(parameters, 'variable_y'):
            raise SModelSError(
                "variable_y was not found in %s. Please define the variable to be plotted in the y-axis."
                % parFile)
        else:
            self.variable_y = parameters.variable_y
        if not hasattr(parameters, 'plot_list'):
            raise SModelSError(
                "plot_list was not found in %s. Please define the list of plots to be plotted."
                % parFile)
        else:
            self.plot_list = list(set(parameters.plot_list))

        if not hasattr(parameters, 'BR_get_top'):
            logger.debug(
                "BR_get_top not found in %s. Will include all decay channels")
            self.BR_get_top = 'all'
        else:
            self.BR_get_top = parameters.BR_get_top

        if not hasattr(parameters, 'plot_title'):
            logger.warning(
                "plot_title not defined in %s. Using default title" % parFile)
            self.plot_title = 'interactive-plots'
        else:
            self.plot_title = parameters.plot_title
Esempio n. 54
0
 def _checkSqrts(self, sqrts):
     if type(sqrts) == type(float) or type(sqrts) == type(int):
         logger.warning("sqrt(s) given as scalar, will add TeV as unit.")
         sqrts = float(sqrts) * TeV
     return sqrts
Esempio n. 55
0
    def setDecays(self,decaysDict,promptWidth,stableWidth,erasePrompt):

        allPDGs = list(set(self.getValuesFor('pdg')))
        evenPDGs,oddPDGs = self.getEvenOddList()

        for particle in self.BSMparticles:
            if isinstance(particle,MultiParticle):
                continue

            if not hasattr(particle,'pdg') or not hasattr(particle,'Z2parity'):
                raise SModelSError("PDG and/or Z2-parity for particle %s has not been defined" %particle.label)

            pdg = particle.pdg
            particle.decays = []
            if pdg in decaysDict:
                particleData = decaysDict[pdg]
                chargeConj = 1
            elif -pdg in decaysDict:
                particleData = decaysDict[-pdg]
                chargeConj = -1
            else:
                logger.error("Decay information for particle %i could not be found" %pdg)
                raise SModelSError()

            particle.totalwidth = abs(particleData.totalwidth)*GeV
            if particle.totalwidth < stableWidth:
                particle.totalwidth = 0.*GeV  #Treat particle as stable
                logger.debug("Particle %s has width below the threshold and will be assumed as stable" %particle.pdg)
                continue

            if particle.totalwidth > promptWidth:
                particle.totalwidth = float('inf')*GeV  #Treat particle as prompt
                logger.debug("Particle %s has width above the threshold and will be assumed as prompt." %particle.pdg)
                if erasePrompt and particle.Z2parity == -1:
                    logger.debug("Erasing quantum numbers of (prompt) particle %s." %particle.pdg)
                    for attr in erasePrompt:
                        delattr(particle,attr)
            else:
                particle.decays.append(None) #Include possibility for particle being long-lived (non-prompt)

            for decay in particleData.decays:
                pids = decay.ids
                missingIDs = set(pids).difference(set(allPDGs))
                if missingIDs:
                    logger.info("Particle(s) %s is not defined within model. Decay %s will be ignored" %(missingIDs,decay))
                    continue
                oddPids = [pid for pid in decay.ids if abs(pid) in oddPDGs]
                evenPids = [pid for pid in decay.ids if abs(pid) in evenPDGs]
                if len(oddPids) != 1 or len(evenPids+oddPids) != len(decay.ids):
                    logger.debug("Decay %i -> %s is not of the form Z2-odd -> Z2-odd + [Z2-even particles] and will be ignored" %(pdg,pids))
                    continue

                #Conjugated decays if needed
                #(if pid*chargeConj is not in model, assume the particle is its own anti-particle)
                decayIDs = [pid*chargeConj if pid*chargeConj in allPDGs else pid for pid in decay.ids]
                newDecay = pyslha.Decay(br=decay.br,nda=decay.nda,parentid=decay.parentid,ids=decayIDs)

                #Convert PDGs to particle objects:
                daughters = []
                for pdg in newDecay.ids:
                    daughter = self.getParticlesWith(pdg=pdg)
                    if not daughter:
                        raise SModelSError("Particle with PDG = %i was not found in model. Check the model definitions." %pdg)
                    elif len(daughter) > 1:
                        raise SModelSError("Multiple particles defined with PDG = %i. PDG ids must be unique." %pdg)
                    else:
                        daughter = daughter[0]
                    daughters.append(daughter)
                oddParticles = [p for p in daughters if p.Z2parity == -1]
                evenParticles = ParticleList([p for p in daughters if p.Z2parity == 1])
                newDecay.oddParticles = oddParticles
                newDecay.evenParticles = evenParticles
                particle.decays.append(newDecay)

        #Check if all unstable particles have decay channels defined:
        for p in self.BSMparticles:
            if p.totalwidth < stableWidth:
                continue
            ndecays = len([dec for dec in p.decays if dec is not None])
            if ndecays == 0:
                if p.Z2parity == -1:
                    logger.warning("No valid decay found for %s. It will be considered stable." %p)
                p.totalwidth = 0.*GeV
Esempio n. 56
0
def doCluster(elements, dataset, maxDist):
    """
    Cluster algorithm to cluster elements.

    :parameter elements: list of all elements to be clustered
    :parameter dataset: Dataset object to be used when computing distances in upper limit space
    :parameter maxDist: maximum distance for clustering two elements

    :returns: a list of ElementCluster objects containing the elements
              belonging to the cluster
    """

    #Get average elements:
    averageElements = groupElements(elements,dataset)

    #Index average elements:
    elementList = sorted(averageElements, key = lambda el: el._upperLimit)
    for iel,el in enumerate(elementList):
        el._index = iel

    #Pre-compute all necessary distances:
    distanceMatrix = np.zeros((len(elementList),len(elementList)))
    for iel,elA in enumerate(elementList):
        for jel,elB in enumerate(elementList):
            if jel <= iel:
                continue
            distanceMatrix[iel,jel] = relativeDistance(elA, elB, dataset)
    distanceMatrix = distanceMatrix + distanceMatrix.T

    #Start building maximal clusters
    clusterList = []
    for el in elementList:
        cluster = ElementCluster([],dataset,distanceMatrix)
        for elB in elementList:
            if distanceMatrix[el._index,elB._index] <= maxDist:
                cluster.add(elB)
        if not cluster.elements:
            continue
        if cluster.averageElement()._upperLimit is None:
            continue
        if not cluster in clusterList:
            clusterList.append(cluster)

    #Split the maximal clusters until all elements inside each cluster are
    #less than maxDist apart from each other and the cluster average position
    #is less than maxDist apart from all elements
    finalClusters = []
    while clusterList:
        newClusters = []
        for cluster in clusterList:
            #Check if maximal internal distance is below maxDist
            isConsistent = cluster.isConsistent(maxDist)
            if isConsistent and cluster.maxInternalDist < maxDist:
                if not cluster in finalClusters:
                    finalClusters.append(cluster)

            #Cluster violates maxDist:
            else:
                #Loop over cluster elements and if element distance
                #falls outside the cluster, remove element
                for el in cluster:
                    if cluster.getDistanceTo(el) > maxDist or not isConsistent:
                        newcluster = cluster.copy()
                        newcluster.remove(el)
                        if newcluster.averageElement()._upperLimit is None:
                            continue
                        if newcluster in newClusters:
                            continue
                        newClusters.append(newcluster)

        clusterList = newClusters
        # Check for oversized list of indexCluster (too time consuming)
        if len(clusterList) > 100:
            logger.warning("ElementCluster failed, using unclustered masses")
            finalClusters = []
            clusterList = []

    # finalClusters = finalClusters + clusterList
    # Add clusters of individual masses (just to be safe)
    for el in elementList:
        finalClusters.append(ElementCluster([el],dataset,distanceMatrix))

    # Clean up clusters (remove redundant clusters)
    for ic, clusterA in enumerate(finalClusters):
        if clusterA is None:
            continue
        for jc, clusterB in enumerate(finalClusters):
            if clusterB is None:
                continue
            if ic != jc and set(clusterB.indices()).issubset(set(clusterA.indices())):
                finalClusters[jc] = None
    while finalClusters.count(None) > 0:
        finalClusters.remove(None)

    #Replace average elements by the original elements:
    for cluster in finalClusters:
        originalElements = []
        for avgEl in cluster.elements[:]:
            originalElements += avgEl.elements[:]
        cluster.elements = originalElements[:]

    return finalClusters
Esempio n. 57
0
def equalObjs(obj1, obj2, allowedDiff, ignore=[], where=None):
    """
    Compare two objects.
    The numerical values are compared up to the precision defined by allowedDiff.
 
    :param obj1: First python object to be compared
    :param obj2: Second python object to be compared
    :param allowedDiff: Allowed % difference between two numerical values
    :param ignore: List of keys to be ignored
    :param where: keep track of where we are, for easier debugging.
    :return: True/False
    """
    if type(obj1) in [float, int] and type(obj2) in [float, int]:
        obj1, obj2 = float(obj1), float(obj2)

    if type(obj1) != type(obj2):
        logger.warning("Data types differ (%s,%s)" % (type(obj1), type(obj2)))
        return False

    if isinstance(obj1, unum.Unum):
        if obj1 == obj2:
            return True
        diff = 2. * abs(obj1 - obj2) / abs(obj1 + obj2)
        return diff.asNumber() < allowedDiff
    elif isinstance(obj1, float):
        if obj1 == obj2:
            return True
        diff = 2. * abs(obj1 - obj2) / abs(obj1 + obj2)
        return diff < allowedDiff
    elif isinstance(obj1, str):
        return obj1 == obj2
    elif isinstance(obj1, dict):
        for key in obj1:
            if key in ignore: continue
            if not key in obj2:
                logger.warning("Key %s missing" % key)
                return False
            if not equalObjs(
                    obj1[key], obj2[key], allowedDiff, ignore=ignore,
                    where=key):
                logger.warning('Objects differ in %s:\n   %s\n and\n   %s' %
                               (where, str(obj1[key]), str(obj2[key])))
                #s1,s2 = str(obj1[key]),str(obj2[key])
                #if False: # len(s1) + len(s2) > 200:
                #    logger.warning ( "The values are too long to print." )
                #else:
                #    logger.warning( 'The values are: >>%s<< (this run) versus >>%s<< (default)'%\
                #                ( s1[:20],s2[:20] ) )
                return False
    elif isinstance(obj1, list):
        if len(obj1) != len(obj2):
            logger.warning('Lists differ in length:\n   %i (this run)\n and\n   %i (default)' %\
                                (len(obj1),len(obj2)))
            return False
        for ival, val in enumerate(obj1):
            if not equalObjs(val, obj2[ival], allowedDiff):
                logger.warning('Lists differ:\n   %s (this run)\n and\n   %s (default)' %\
                                (str(val),str(obj2[ival])))
                return False
    else:
        return obj1 == obj2

    return True
Esempio n. 58
0
def getKfactorsFor(pIDs, sqrts, slhafile, pdf='cteq'):
    """
    Read the NLLfast grid and returns a pair of k-factors (NLO and NLL) for the
    pair.

    :returns: k-factors = None, if NLLfast does not contain the process; uses
              the slhafile to obtain the SUSY spectrum.
    
    """
    if not os.path.isfile(slhafile):
        logger.error("SLHA file %s not found", slhafile)
        return False

    # Get process name (in NLLfast notation)
    process = getProcessName(pIDs)
    if not process:
        # Return k-factors = None, if NLLfast does not have the process
        return (None, None)

    # Obtain relevant masses
    readfile = pyslha.readSLHAFile(slhafile)
    masses = readfile.blocks['MASS']
    check_pids = squarks + gluinos + third
    for check in check_pids:
        if not check in masses.entries:
            logger.error ( "cannot compute k factor for pdgid %d: " \
              " no particle mass given. will set mass to inf." % check )
            masses.entries[check] = 1.e10

    gluinomass = abs(masses.entries[1000021])
    squarkmass = sum([abs(masses.entries[pid]) for pid in squarks]) / 8.
    pid1, pid2 = sorted(pIDs)
    if pid1 in antisquarks and pid2 in squarks:
        squarkmass = (abs(masses.entries[abs(pid1)]) +
                      abs(masses.entries[pid2])) / 2.
    elif pid1 in squarks and pid2 in squarks:
        squarkmass = (abs(masses.entries[pid1]) +
                      abs(masses.entries[pid2])) / 2.
    elif abs(pid1) == pid2 and pid2 in third:
        squarkmass = abs(masses.entries[abs(pid1)])

    # Set up NLLfast run, the old way
    sqrtS = float(sqrts / TeV)
    energy = str(int(sqrtS)) + 'TeV'
    toolname = "nllfast%d" % int(sqrtS)
    box = toolBox.ToolBox()
    tool = box.get(toolname)
    if tool == None:
        logger.warning("No NLLfast data for sqrts = " + str(sqrts))
        return (None, None)
    nllpath = tool.installDirectory()
    tool.pathOfExecutable()
    tool.checkInstallation()
    if process == "st":
        nll_run = "./nllfast_" + energy + " %s %s %s" % \
                  (process, pdf, squarkmass)
    else:
        nll_run = "./nllfast_" + energy + " %s %s %s %s" % \
                  (process, pdf, squarkmass, gluinomass)

    # Run NLLfast
    nll_output = runNLLfast(nll_run, nllpath)

    # If run was successful, return k-factors:
    if "K_NLO" in nll_output:
        # NLLfast ran ok, try to get the k-factors
        kFacs = getKfactorsFrom(nll_output)
        if not kFacs or min(kFacs) <= 0.:
            logger.warning("Error obtaining k-factors")
            return (None, None)
        else:
            return kFacs
    # If run was not successful, check for decoupling error messages:
    elif not "too low/high" in nll_output.lower():
        logger.warning("Error running NLLfast")
        return (None, None)

    # Check for decoupling cases with a decoupling grid (only for sb and gg)
    doDecoupling = False
    if "too low/high gluino" in nll_output.lower():
        if gluinomass > 500. and process == 'sb':
            doDecoupling = True
            dcpl_mass = gluinomass
    elif "too low/high squark" in nll_output.lower():
        if squarkmass > 500. and process == 'gg':
            doDecoupling = True
            dcpl_mass = squarkmass

    # If process do not have decoupled grids, return None:
    if not doDecoupling:
        logger.warning("Masses out of NLLfast grid for " + process)
        return (None, None)

    # Obtain k-factors from the NLLfast decoupled grid
    kfacs = getDecoupledKfactors(nllpath, process, energy, pdf,
                                 min(gluinomass, squarkmass))
    # Decoupling limit is satisfied, do not interpolate
    if not kfacs:
        logger.warning(
            "Error obtaining k-factors from the NLLfast decoupled grid for " +
            process)
        return (None, None)
    elif dcpl_mass / min(gluinomass, squarkmass) > 10.:
        return kfacs
    # Interpolate between the non-decoupled and decoupled grids
    else:
        kFacsVector = [[10. * min(gluinomass, squarkmass), kfacs]
                       ]  #First point for interpolation (decoupled grid)
        kfacs = None
        while not kfacs and dcpl_mass > 500.:
            dcpl_mass -= 100.  # Reduce decoupled mass, until NLLfast produces results
            if process == 'sb':
                nllinput = (process, pdf, squarkmass, dcpl_mass)
            else:
                nllinput = (process, pdf, dcpl_mass, gluinomass)
            nll_run = "./nllfast_" + energy + " %s %s %s %s" % nllinput
            nll_output = runNLLfast(nll_run, nllpath)
            kfacs = getKfactorsFrom(nll_output)
        kFacsVector.append(
            [dcpl_mass,
             kfacs])  #Second point for interpolation (non-decoupled grid)

    if len(kFacsVector) < 2:
        logger.warning("Not enough points for interpolation in the decoupling "
                       "limit")
        return (None, None)
    else:
        # Interpolate k-factors
        kFacs = interpolateKfactors(kFacsVector, max(squarkmass, gluinomass))
    return kFacs
Esempio n. 59
0
    def ulSigma (self, expected=False, workspace_index=None):
        """
        Compute the upper limit on the signal strength modifier with:
            - by default, the combination of the workspaces contained into self.workspaces
            - if workspace_index is specified, self.workspace[workspace_index] (useful for computation of the best upper limit)

        :param expected:  - if set to `True`: uses expected SM backgrounds as signals
                          - else: uses `self.nsignals`
        :param workspace_index: - if different from `None`: index of the workspace to use for upper limit
                          - else: all workspaces are combined
        :return: the upper limit at `self.cl` level (0.95 by default)
        """
        startUL = time.time()
        logger.debug("Calling ulSigma")
        if self.data.errorFlag or self.workspaces == None: # For now, this flag can only be turned on by PyhfData.checkConsistency
            return None
        if self.nWS == 1:
            if self.zeroSignalsFlag[0] == True:
                logger.warning("There is only one workspace but all signals are zeroes")
                return None
        else:
            if workspace_index == None:
                logger.error("There are several workspaces but no workspace index was provided")
                return None
            elif self.zeroSignalsFlag[workspace_index] == True:
                logger.debug("Workspace number %d has zero signals" % workspace_index)
                return None
        def updateWorkspace():
            if self.nWS == 1:
                return self.workspaces[0]
            else:
                return self.workspaces[workspace_index]
        workspace = updateWorkspace()
        def root_func(mu):
            # Same modifiers_settings as those use when running the 'pyhf cls' command line
            msettings = {'normsys': {'interpcode': 'code4'}, 'histosys': {'interpcode': 'code4p'}}
            model = workspace.model(modifier_settings=msettings)
            start = time.time()
            stat = "qtilde" # by default
            args = { "return_expected": expected }
            pver = float ( pyhf.__version__[:3] )
            if pver < 0.6:
                args["qtilde"]=True
            else:
                args["test_stat"]=stat
            with np.testing.suppress_warnings() as sup:
                if pyhfinfo["backend"] == "numpy":
                    sup.filter ( RuntimeWarning, r'invalid value encountered in log')
                result = pyhf.infer.hypotest(mu, workspace.data(model), model, **args )
            end = time.time()
            logger.debug("Hypotest elapsed time : %1.4f secs" % (end - start))
            if expected:
                logger.debug("expected = {}, mu = {}, result = {}".format(expected, mu, result))
                try:
                    CLs = float(result[1].tolist())
                except TypeError:
                    CLs = float(result[1][0])
            else:
                logger.debug("expected = {}, mu = {}, result = {}".format(expected, mu, result))
                CLs = float(result)
            # logger.debug("Call of root_func(%f) -> %f" % (mu, 1.0 - CLs))
            return 1.0 - self.cl - CLs
        # Rescaling singals so that mu is in [0, 10]
        factor = 10.
        wereBothLarge = False
        wereBothTiny = False
        nattempts = 0
        nNan = 0
        lo_mu, hi_mu = .2, 5.
        while "mu is not in [lo_mu,hi_mu]":
            nattempts += 1
            if nNan > 5:
                logger.warn("encountered NaN 5 times while trying to determine the bounds for brent bracketing. now trying with q instead of qtilde test statistic")
                stat = "q"
                nattempts = 0
            if nattempts > 10:
                logger.warn ( "tried 10 times to determine the bounds for brent bracketing. we abort now." )
                return None
            # Computing CL(1) - 0.95 and CL(10) - 0.95 once and for all
            rt1 = root_func(lo_mu)
            rt10 = root_func(hi_mu)
            if rt1 < 0. and 0. < rt10: # Here's the real while condition
                break
            if self.alreadyBeenThere:
                factor = 1 + (factor-1)/2
                logger.debug("Diminishing rescaling factor")
            if np.isnan(rt1):
                nNan += 1
                self.rescale(factor)
                workspace = updateWorkspace()
                continue
            if np.isnan(rt10):
                nNan += 1
                self.rescale(1/factor)
                workspace = updateWorkspace()
                continue
            # Analyzing previous values of wereBoth***
            if rt10 < 0 and rt1 < 0 and wereBothLarge:
                factor = 1 + (factor-1)/2
                logger.debug("Diminishing rescaling factor")
            if rt10 > 0 and rt1 > 0 and wereBothTiny:
                factor = 1 + (factor-1)/2
                logger.debug("Diminishing rescaling factor")
            # Preparing next values of wereBoth***
            wereBothTiny = rt10 < 0 and rt1 < 0
            wereBothLarge = rt10 > 0 and rt1 > 0
            # Main rescaling code
            if rt10 < 0.:
                self.rescale(factor)
                workspace = updateWorkspace()
                continue
            if rt1 > 0.:
                self.rescale(1/factor)
                workspace = updateWorkspace()
                continue
        # Finding the root (Brent bracketing part)
        logger.debug("Final scale : %f" % self.scale)
        logger.debug("Starting brent bracketing")
        ul = optimize.brentq(root_func, lo_mu, hi_mu, rtol=1e-3, xtol=1e-3)
        endUL = time.time()
        logger.debug("ulSigma elpased time : %1.4f secs" % (endUL - startUL))
        return ul*self.scale # self.scale has been updated whithin self.rescale() method