Example #1
0
 def getPrettyName ( self, anaid ):
     """ get pretty name of ana id """
     if False: ## set to true and we have the old analysis Ids
         return anaid
     if not hasattr ( self, "database" ):
         from smodels.experiment.databaseObj import Database
         dbname = "./original.pcl" 
         dbname = "/home/walten/git/smodels-database"
         dbname = "/scratch-cbe/users/wolfgan.waltenberger/rundir/db31.pcl"
         self.database = Database ( dbname )
     from smodels_utils.helper.prettyDescriptions import prettyTexAnalysisName
     if ":" in anaid:
         anaid = anaid[:anaid.find(":")]
     ers = self.database.getExpResults ( analysisIDs = [ anaid ] )
     for er in ers:
        if hasattr ( er.globalInfo, "prettyName" ):
           pn = er.globalInfo.prettyName
           sqrts = er.globalInfo.sqrts.asNumber(TeV)
           ret = prettyTexAnalysisName ( pn, sqrts, dropEtmiss = True,
                                     collaboration = True, anaid = er.globalInfo.id )
           # for the 2020 paper to be consistent
           ret = ret.replace( "+ top tag", "stop" )
           ret = ret.replace( "+ 4 (1 b-)jets", "multijet" )
           # ret += " -> " + anaid
           return ret
     # print ( "found no pretty name", ers[0].globalInfo )
     return anaid
Example #2
0
def checkOneAnalysis():
    import argparse
    #import IPython
    argparser = argparse.ArgumentParser(
        description='print the correlations of one specific analysis')
    argparser.add_argument(
        '-d',
        '--dbpath',
        help='specify path to database [<rundir>/database.pcl]',
        type=str,
        default="<rundir>/database.pcl")
    argparser.add_argument('-a',
                           '--analysis',
                           help='print for <analysis>',
                           type=str,
                           default="CMS-SUS-19-006")
    args = argparser.parse_args()
    from smodels.experiment.databaseObj import Database
    print("[analysisCombiner] checking %s" % args.dbpath)
    db = Database(args.dbpath)
    results = db.getExpResults()
    info = getInfoFromAnaId(args.analysis, results)
    sqrts = info.sqrts
    collaboration = getExperimentName(info)
    prettyName = info.prettyName
    if args.analysis in moreComments:
        prettyName += " (%s)" % moreComments[args.analysis]
    # IPython.embed()
    print("correlations for %s: %s" % (args.analysis, prettyName))
    combs, nocombs = set(), set()
    pnames = {}
    for er in results:
        if er.globalInfo.sqrts != sqrts:
            continue
        if getExperimentName(er.globalInfo) != collaboration:
            continue
        Id = er.globalInfo.id
        Id = Id.replace("-eff", "").replace("-agg", "")
        if Id == "CMS-SUS-19-006-2":
            Id = "CMS-SUS-19-006"
        if Id == args.analysis:
            continue
        pname = er.globalInfo.prettyName
        if Id in moreComments:
            pname += " (%s)" % moreComments[Id]
        pnames[Id] = pname
        cc = canCombine(info, er.globalInfo, "aggressive")
        # cc = canCombine ( pred, er.globalInfo )
        if cc:
            combs.add(Id)
        else:
            nocombs.add(Id)
    print("can combine with: ")
    for Id in combs:
        pname = pnames[Id]
        print(" `- %s: %s" % (Id, pname))
    print("cannot combine with: ")
    for Id in nocombs:
        pname = pnames[Id]
        print(" `- %s: %s" % (Id, pname))
Example #3
0
def collect():
    db = Database ( "./database.pcl" ) # , force_load = "txt" )
    ers = db.getExpResults ( dataTypes = [ "upperLimit" ], onlyWithExpected=True )
    allSs = []
    for er in ers:
        txnlist = er.datasets[0].txnameList
        for txn in txnlist:
            ct=0
            origdata = eval(txn.txnameData.origdata)
            for point in origdata:
                m = point[0]
                rul = point[1]
                ul,eul=None,None
                try:
                    ul = txn.getULFor(m, False )
                    eul = txn.getULFor(m, True )
                except Exception:
                    pass
                if type(ul) == type(None) or type(eul) == type(None):
                    continue
                sigma = eul / 1.96
                S = float ( ( ul - eul ) / sigma )
                if (S < -1.8 or S > 3.5) and ct<3:
                # if S > 10. and ct<3:
                    print ( )
                    print ( "S=%.2f for ul=%s, eul=%s sigma=%s" % ( S, ul, eul, sigma ) )
                    print ( "  at ", er.globalInfo.id, txn.txName, m, "rul", rul )
                    ct += 1
                allSs.append ( S )
                # print ( "->", er.globalInfo.id, txn, S )
    print ("all", min(allSs), np.mean(allSs), max(allSs) )
    f=open("ulSs.pcl","wb")
    pickle.dump(allSs,f)
    f.close()
    sys.exit()
Example #4
0
 def __init__(self,
              walkerid,
              dbpath="./default.pcl",
              expected=False,
              select="all",
              do_combine=False):
     """
     :param do_combine: if True, then also use combined results,
                        both via simplified likelihoods and pyhf.
     """
     self.walkerid = walkerid
     self.do_combine = do_combine
     self.modifier = None
     self.select = select
     self.expected = expected
     self.rthreshold = 1.3  ## threshold for rmax
     if expected:
         from expResModifier import ExpResModifier
         self.modifier = ExpResModifier()
     force_load = None
     if dbpath.endswith(".pcl"):
         force_load = "pcl"
     ntries = 0
     while not os.path.exists(dbpath):
         ## give it a few tries
         ntries += 1
         time.sleep(ntries * 5)
         if ntries > 5:
             break
     self.database = Database(dbpath, force_load=force_load)
     self.fetchResults()
     self.combiner = Combiner(self.walkerid)
Example #5
0
 def testWritePickle(self):
     """ tests writing pickle file """
     binfile = "./.database.pcl"
     if os.path.exists ( binfile ):
         os.unlink ( binfile )
     self.logger.info ( "test writing pickle file """ )
     writer = Database ( "./tinydb/", force_load = "txt" )
     writer.createBinaryFile ( binfile )
     reader = Database ( binfile, force_load="pcl" )
     os.unlink ( binfile )
     self.assertEqual( writer, reader )
Example #6
0
def getSRs():
    from smodels.experiment.databaseObj import Database
    db = Database ( "official" )
    ers = db.getExpResults( dataTypes=[ "efficiencyMap" ] )
    stats = []
    for er in ers:
        for ds in er.datasets:
            D = { "obsN": ds.dataInfo.observedN, "expectedBG": ds.dataInfo.expectedBG,
                  "bgError": ds.dataInfo.bgError, "upperLimit": ds.dataInfo.upperLimit,
                  "expectedUpperLimit": ds.dataInfo.expectedUpperLimit }
            stats.append ( D )
    return stats
Example #7
0
 def __init__(self, inputfile, rundir, verbose="info"):
     self.inputfile = inputfile
     self.rundir = rundir
     self.nr = 0
     self.verbstring = verbose
     if type(verbose) == int:
         self.verbose = verbose
     else:
         verbose = verbose.lower()
         verbs = {"err": 10, "warn": 20, "info": 30, "debug": 40}
         self.verbose = 50
         for k, v in verbs.items():
             if k in verbose:
                 self.verbose = v
     self.database = Database(self.inputfile)
 def testFallBack(self):
     #Test if smodels/experiment/defaultFinalStates.py is being used if databaseParticle.py is missing
     dbpath = "./database_simple"
     dbOld = Database(dbpath, discard_zeroes=False, force_load='txt')
     model = dbOld.databaseParticles
     self.assertEqual(model.label,
                      'DB Final States (default)')  #Simple fallback test
Example #9
0
    def initialize( self ):
        self.setStatus ( "initialized" )
        Cache.n_stored = 20000 ## crank up the caching
        self.db = Database ( self.dbpath )
        self.expResults = self.db.expResultList
        self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self.server_address = ( self.servername, self.port )
        self.pprint ( 'starting up on %s port %s' % self.server_address )
        self.pprint ( 'I will be serving database %s at %s' % \
                      (self.db.databaseVersion, self.dbpath ) )
        try:
            self.sock.bind( self.server_address )
        except OSError as e:
            self.pprint ( "exception %s. is host ''%s'' reachable?" % \
                          ( e, self.server_address ) )
            sys.exit(-1)
        # Listen for incoming connections
        self.sock.listen(1)

        atexit.register ( shutdownAll )

        while True:
            # Wait for a connection
            self.setStatus ( "waiting" )
            self.log ( 'waiting for a connection' )
            self.connection, self.client_address = self.sock.accept()
            self.listen()
def loadDatabase(parser, db):
    """
    Load database
    
    :parameter parser: ConfigParser with path to database
    :parameter db: binary database object. If None, then database is loaded,
                   according to databasePath. If True, then database is loaded,
                   and text mode is forced.
    :returns: database object, database version
        
    """
    try:
        databasePath = parser.get("path", "databasePath")
        if databasePath == "micromegas":
            databasePath = installDirectory() + "/smodels-database/"
        database = db
        if database in [None, True]:
            force_load = None
            if database == True: force_load = "txt"
            database = Database(databasePath, force_load=force_load)
        databaseVersion = database.databaseVersion
    except DatabaseNotFoundException:
        logger.error("Database not found in %s" %
                     os.path.realpath(databasePath))
        sys.exit()
    return database, databaseVersion
Example #11
0
    def testCompare(self):
        warnings.simplefilter("ignore", ResourceWarning)
        from simplyGluino_default import smodelsOutputDefault
        filename = "./testFiles/slha/simplyGluino.slha"
        port = random.choice(range(31700, 42000))
        # port = 31744
        dbfile = "database/db30.pcl"
        dbfile = "unittest"

        startserver = f"../smodels/tools/smodelsTools.py proxydb -p {port} -i {dbfile} -o ./proxy.pcl -r -v error"
        cmd = startserver.split(" ")
        # print ( "starting server %s" % startserver )
        myenv = os.environ.copy()
        pp = ""
        if "PYTHONPATH" in myenv:
            pp = ":" + myenv["PYTHONPATH"]
        myenv["PYTHONPATH"] = "../" + pp
        subprocess.Popen(cmd, env=myenv)

        time.sleep(3)

        db = Database("./proxy.pcl")
        outputfile = runMain(filename,
                             suppressStdout=True,
                             overridedatabase=db)
        smodelsOutput = importModule(outputfile)

        client = DatabaseClient(port=port, verbose="warn")
        client.send_shutdown()

        ignoreFields = [
            'input file', 'smodels version', 'ncpus', 'Element',
            'database version', 'Total missed xsec', 'Missed xsec long-lived',
            'Missed xsec displaced', 'Missed xsec MET',
            'Total outside grid xsec',
            'Total xsec for missing topologies (fb)',
            'Total xsec for missing topologies with displaced decays (fb)',
            'Total xsec for missing topologies with prompt decays (fb)',
            'Total xsec for topologies outside the grid (fb)'
        ]
        smodelsOutputDefault['ExptRes'] = sorted(
            smodelsOutputDefault['ExptRes'],
            key=lambda res: res['r'],
            reverse=True)
        equals = equalObjs(smodelsOutput,
                           smodelsOutputDefault,
                           allowedDiff=0.08,
                           ignore=ignoreFields,
                           fname=outputfile)
        if not equals:
            e = "simplyGluino.py != simplyGluino_default.py"
            logger.error(e)
            # raise AssertionError( e )

        self.assertTrue(equals)
        self.removeOutputs(outputfile)
Example #12
0
def getDatabaseVersion(protomodel, dbpath="default.pcl"):
    dbver = "???"
    if hasattr(protomodel, "dbversion"):
        dbver = protomodel.dbversion
        if not "???" in dbver:
            return dbver
    if os.path.exists(dbpath):
        ## try to get db version from db file
        from smodels.experiment.databaseObj import Database
        db = Database(dbpath)
        dbver = db.databaseVersion
    return dbver
Example #13
0
def getSummary():
    from smodels.experiment.databaseObj import Database
    # dbpath = "official"
    dbpath = "<rundir>/database.pcl"
    print("[analysisCombiner] checking %s" % dbpath)
    db = Database(dbpath)
    results = db.getExpResults()
    strategy = "aggressive"
    ana1 = "CMS-SUS-16-042"
    ana2 = "CMS-SUS-16-033"
    canC = canCombine(ana1, ana2, strategy, results)
    print("[analysisCombiner] can combine %s with %s: %s" %
          (ana1, ana2, str(canC)))
    ctr, combinable = 0, 0
    for x, e in enumerate(results):
        for y, f in enumerate(results):
            if y <= x:
                continue
            ctr += 1
            isUn = canCombine(e.globalInfo, f.globalInfo, strategy)
            combinable += isUn
    print("[analysisCombiner] can combine %d/%d pairs of results" %
          (combinable, ctr))
    def __init__(self, database, force_txt = False ):
        """
        :param database: Path to the database or Database object
        """

        self._selectedExpResults = []
        load = None
        if force_txt == True:
            load = "txt"
        if isinstance(database,str):
            self.database = Database(database, load )
        elif isinstance(database,Database):
            self.database = database
        else:
            logger.error("The input must be the database location or a Database object.")
            raise SModelSError()
        self.loadAllResults()
Example #15
0
def loadDatabase(parser, db):
    """
    Load database

    :parameter parser: ConfigParser with path to database
    :parameter db: binary database object. If None, then database is loaded,
                   according to databasePath. If True, then database is loaded,
                   and text mode is forced.
    :returns: database object, database version

    """
    try:
        dp = parser.get("path", "databasePath")
        logger.error("``[path] databasePath'' in ini file is deprecated; " \
           "use ``[database] path'' instead.(See e.g. smodels/etc/parameters_default.ini)")
        parser.set("database", "path", dp)
    except (NoSectionError, NoOptionError) as e:
        ## path.databasePath not set. This is good.
        pass
    try:
        database = db
        # logger.error("database=db: %s" % database)
        if database in [None, True]:
            databasePath = parser.get("database", "path")
            checkForSemicolon(databasePath, "database", "path")
            discard_zeroes = True
            try:
                discard_zeroes = parser.getboolean("database", "discardZeroes")
            except (NoSectionError, NoOptionError) as e:
                logger.debug(
                    "database:discardZeroes is not given in config file. Defaulting to 'True'."
                )
            force_load = None
            if database == True: force_load = "txt"
            if os.path.isfile(databasePath):
                force_load = "pcl"
            database = Database(databasePath, force_load=force_load, \
                                 discard_zeroes = discard_zeroes)
        databaseVersion = database.databaseVersion
    except DatabaseNotFoundException:
        logger.error("Database not found in ``%s''" %
                     os.path.realpath(databasePath))
        sys.exit()
    return database, databaseVersion
Example #16
0
    def __init__(self, database, force_txt = False ):
        """
        :ivar _selectedExpResults: list of experimental results loaded in the browser.
                           Can be used to hold a subset of results in the database.
                           By default all results are loaded.
        
        
        :param database: Path to the database or Database object
        """

        self._selectedExpResults = []
        load = None
        if force_txt == True:
            load = "txt"
        if isinstance(database,str):
            if database.endswith(".pcl"):
                load = "pcl"
            self.database = Database(database, load )
        elif isinstance(database,Database):
            self.database = database
        else:
            logger.error("The input must be the database location or a Database object.")
            raise SModelSError()
        self.loadAllResults()
Example #17
0
#!/usr/bin/env python3
"""
.. module:: databaseLoader
   :synopsis: When running the complete test suite, we need to
              load the database only once

.. moduleauthor:: Wolfgang Waltenberger <*****@*****.**>

"""

import sys
sys.path.insert(0, "../")
from smodels.experiment.databaseObj import Database

# dbpath = "./database"
# dbpath = "../../smodels-database"
dbpath = "unittest"

database = Database(dbpath, discard_zeroes=False)

if __name__ == "__main__":
    print(database)
Example #18
0
class LlhdPlot:
    """ A simple class to make debugging the plots easier """
    def __init__ ( self, pid1, pid2, verbose, copy, max_anas, 
                   interactive, drawtimestamp, compress, rundir,
                   upload ):
        """
        :param pid1: pid for x axis, possibly a range of pids
        :param pid2: pid for y axis
        :param verbose: verbosity (debug, info, warn, or error)
        :param copy: copy plot to ../../smodels.github.io/protomodels/latest
        :param max_anas: maximum number of analyses on summary plot
        :param interactive: prepare for an interactive session?
        :param drawtimestamp: if true, put a timestamp on plot
        :param compress: prepare for compression
        :param upload: upload directory, default is "latest"
        """
        self.rundir = rundir
        self.upload = upload
        self.setup( pid1, pid2 )
        self.DEBUG, self.INFO = 40, 30
        self.drawtimestamp = drawtimestamp
        self.max_anas = max_anas ## maximum number of analyses
        self.copy = copy
        self.rthreshold = 1.7
        self.interactive = interactive
        self.hiscorefile = "./hiscore.hi"
        if rundir != None:
            self.hiscorefile = f"{rundir}/hiscore.hi"
        self.setVerbosity ( verbose )
        masspoints,mx,my,nevents,topo,timestamp = self.loadPickleFile( compress )
        self.masspoints = masspoints
        self.mx = mx
        self.my = my
        self.nevents = nevents
        self.topo = topo
        self.timestamp = timestamp
        self.massdict = {}
        self.rdict = {}
        if masspoints == None:
            return
        for m in masspoints:
            self.massdict[ (m[0],m[1]) ] = m[2]
            if len(m)>3:
                self.rdict[ (m[0],m[1]) ] = m[3]

    def setVerbosity ( self, verbose ):
        self.verbose = verbose
        if type(verbose)==str:
            verbose = verbose.lower()
            if "deb" in verbose:
                self.verbose = 40
                return
            if "inf" in verbose:
                self.verbose = 30
                return
            if "warn" in verbose:
                self.verbose = 20
                return
            if "err" in verbose:
                self.verbose = 10
                return
            self.pprint ( "I dont understand verbosity ``%s''. Setting to debug." % verbose )
            self.verbose = 40

    def getHash ( self, m1=None, m2=None ):
        """ get hash for point. if None, get hash for self.mx, self.my """
        if m1 == None:
            m1 = self.mx
        if m2 == None:
            m2 = self.my
        return int(1e3*m1) + int(1e0*m2)

    def getResultFor ( self, ana, masspoint ):
        """ return result for ana/topo pair 
        :param ana: the analysis id. optionally a data type can be specificed, e.g.
                    as :em. Alternatively, a signal region can be specified.
        :param masspoint: a point from self.masspoints
        :returns: results for this analysis (possibly data type, possibly signal region) 
                  and topology
        """
        #self.pprint ( "asking for %s" % ana )
        ret,sr = None, None
        dType = "any"
        if ":" in ana:
            ana,dType = ana.split(":")
        for k,v in masspoint.items():
            tokens = k.split(":")
            if dType == "ul" and tokens[1] != "None":
                continue
            if dType == "em" and tokens[1] == "None":
                continue
            if ana != tokens[0]:
                continue
            # self.pprint ( "asking for %s, %s %s" % ( tokens[0], tokens[1], dType ) )
            if tokens[1] != None and dType not in [ "any", "ul", "None" ]:
                # if signal regions are given, they need to match
                if tokens[1] != dType:
                    continue
                self.debug ( "found a match for", tokens[0], tokens[1], v )
            if self.topo not in tokens[2]:
                continue
            if ret == None or v > ret:
                ret = v
                sr = tokens[1]
        return ret,sr

    def loadPickleFile ( self, returnAll=False ):
        """ load dictionary from picklefile 
        :param returnAll: return all likelihoods info
        """
        topo, timestamp = "?", "?"
        allhds = None
        with open ( self.picklefile, "rb" ) as f:
            try:
                allhds = pickle.load ( f )
                mx = pickle.load ( f )
                my = pickle.load ( f )
                nevents = pickle.load ( f )
                topo = pickle.load ( f )
                timestamp = pickle.load ( f )
            except EOFError as e:
                print ( "[plotLlhds] EOF error %s, when reading %s" % \
                        ( e, self.picklefile ) )
            f.close()
        if allhds == None:
            print ( "couldnt read llhds in %s" % self.picklefile )
            return None,None,None,None,None,None
        if returnAll:
            return allhds,mx,my,nevents,topo,timestamp
        llhds=[]
        mu = 1.
        def getMu1 ( L ):
            for k,v in L.items():
                if abs(k-mu)<1e-9:
                    return v
            print ( "couldnt find anything" )
            return None
        for llhd in allhds:
            if self.pid1 in [ 1000001, 1000002, 1000003, 1000004 ]:
                if llhd[0]<310.:
                    print ( "light squark mass wall, skipping mx %d < 310 GeV" % llhd[0] )
                    continue
            if len(llhd)==4:
                llhds.append ( (llhd[0],llhd[1],getMu1(llhd[2]),llhd[3]) )
            else:
                llhds.append ( (llhd[0],llhd[1],getMu1(llhd[2]),[0.,0.,0.]) )
        return llhds,mx,my,nevents,topo,timestamp

    def pprint ( self, *args ):
        print ( "[plotLlhds] %s" % " ".join(map(str,args)) )  

    def debug ( self, *args ):
        if self.verbose >= self.DEBUG:
            print ( "[plotLlhds] %s" % " ".join(map(str,args)) )  

    def setup ( self, pid1, pid2 ):
        """ setup rundir, picklefile path and hiscore file path """
        self.hiscorefile = self.rundir + "/hiscore.hi"
        if not os.path.exists ( self.hiscorefile ):
            self.pprint ( "could not find hiscore file %s" % self.hiscorefile )
 
        self.pid1 = pid1
        self.pid2 = pid2
        if type(self.pid1) in [ tuple, list ]:
            pid1 = self.pid1[0]
        self.picklefile = "%s/llhd%d%d.pcl" % ( self.rundir, pid1, self.pid2 )
        if not os.path.exists ( self.picklefile ):
            llhdp = self.picklefile
            self.picklefile = "%s/mp%d%d.pcl" % ( self.rundir, pid1, self.pid2 )
        if not os.path.exists ( self.picklefile ):
            self.pprint ( "could not find pickle files %s and %s" % \
                          ( llhdp, self.picklefile ) )

    def describe ( self ):
        """ describe the situation """
        print ( "%d masspoints obtained from %s, hiscore stored in %s" % \
                ( len ( self.masspoints), self.picklefile, self.hiscorefile ) )
        print ( "Data members: plot.masspoints, plot.massdict, plot.timestamp, plot.mx, plot.my" )
        print ( "              plot.pid1, plot.pid2, plot.topo" )
        print ( "Function members: plot.findClosestPoint()" )


    def getLClosestTo ( self, L, mx=None, my=None ):
        """ get the L closest to your point """
        if mx == None:
            mx=self.mx
        if my == None:
            my=self.my
        def distance_ ( k, mx, my ):
            _x = int(math.floor(k/1000.))
            _y = int(math.floor(k % 1000 ) )
            ret= (mx - _x)**2 + (my - _y)**2
            return ret

        dmmin, vmin = float("inf"), 23.
        for k,v in L.items():
            dm = distance_ ( k, mx, my )
            if dm < dmmin and not np.isnan(v):
                dmmin = dmmin
                vmin = v
        return vmin

    def getPrettyName ( self, anaid ):
        """ get pretty name of ana id """
        if False: ## set to true and we have the old analysis Ids
            return anaid
        if not hasattr ( self, "database" ):
            from smodels.experiment.databaseObj import Database
            dbname = "./original.pcl" 
            dbname = "/home/walten/git/smodels-database"
            dbname = "/scratch-cbe/users/wolfgan.waltenberger/rundir/db31.pcl"
            self.database = Database ( dbname )
        from smodels_utils.helper.prettyDescriptions import prettyTexAnalysisName
        if ":" in anaid:
            anaid = anaid[:anaid.find(":")]
        ers = self.database.getExpResults ( analysisIDs = [ anaid ] )
        for er in ers:
           if hasattr ( er.globalInfo, "prettyName" ):
              pn = er.globalInfo.prettyName
              sqrts = er.globalInfo.sqrts.asNumber(TeV)
              ret = prettyTexAnalysisName ( pn, sqrts, dropEtmiss = True,
                                        collaboration = True, anaid = er.globalInfo.id )
              # for the 2020 paper to be consistent
              ret = ret.replace( "+ top tag", "stop" )
              ret = ret.replace( "+ 4 (1 b-)jets", "multijet" )
              # ret += " -> " + anaid
              return ret
        # print ( "found no pretty name", ers[0].globalInfo )
        return anaid

    def plot ( self, ulSeparately=True, pid1=None ):
        """ a summary plot, overlaying all contributing analyses 
        :param ulSeparately: if true, then plot UL results on their own
        """
        if pid1 == None and type(self.pid1) in [ list, tuple ]:
            for p in self.pid1:
                self.plot ( ulSeparately, p )
            return
        if type(pid1) in [ tuple, list ]:
            for p in pid1:
                self.plot ( ulSeparately, p )
            return
        if pid1 == None:
            pid1 = self.pid1
        self.pprint ( "plotting summary for %s, %s" % ( pid1, self.topo ) )
        resultsForPIDs = {}
        from plotting.plotHiscore import getPIDsOfTPred, obtain
        protomodel = obtain ( 0, self.hiscorefile )
        for tpred in protomodel.bestCombo:
            resultsForPIDs = getPIDsOfTPred ( tpred, resultsForPIDs, integrateSRs=False )
        stats = self.getAnaStats( integrateSRs=False )
        if stats == None:
            self.pprint ( "found no ana stats?" )
            return
        anas = list(stats.keys())
        if pid1 in resultsForPIDs:
            self.debug ( "results for PIDs %s" % ", ".join ( resultsForPIDs[pid1] ) )
            anas = list ( resultsForPIDs[pid1] )
        anas.sort()
        self.pprint ( "summary plot: %s" % ", ".join ( anas ) )
        # print ( stats.keys() )
        colors = [ "red", "green", "blue", "orange", "cyan", "magenta", "grey", "brown",
                   "pink", "indigo", "olive", "orchid", "darkseagreen", "teal" ]
        xmin,xmax,ymin,ymax=9000,0,9000,0
        for m in self.masspoints:
            if m[0] < xmin:
                xmin = m[0]
            if m[0] > xmax:
                xmax = m[0]
            if m[1] < ymin:
                ymin = m[1]
            if m[1] > ymax:
                ymax = m[1]
        if abs(xmin-310.)<1e-5:
            xmin=330. ## cut off the left margin
        print ( "[plotLlhds] range x [%d,%d] y [%d,%d]" % ( xmin, xmax, ymin, ymax ) )
        handles = []
        existingPoints = []
        combL = {}
        namer = SParticleNames ( susy = False )
        for ctr,ana in enumerate ( anas ): ## loop over the analyses
            if ctr >= self.max_anas:
                self.pprint ( "too many (%d > %d) analyses." % (len(anas),self.max_anas) )
                for ana in anas[ctr:]:
                    self.pprint ( "  - skipping %s" % ana )
                break
            color = colors[ctr]
            x,y=set(),set()
            L, R = {}, {}
            minXY=( 0.,0., float("inf") )
            s=""
            r,sr = self.getResultFor ( ana, self.masspoints[0][2] )
            if r:
                s="(%.2f)" % (-np.log(r))
            print ( "[plotLlhds] result for", ana,"is", s )
            cresults = 0
            for cm,masspoint in enumerate(self.masspoints[1:]):
                #if cm % 10 != 0:
                #    continue
                if cm % 1000 == 0:
                    print ( ".", end="", flush=True )
                m1,m2,llhds,robs=masspoint[0],masspoint[1],masspoint[2],masspoint[3]
                rmax=float("nan")
                if len(robs)>0:
                    rmax=robs[0]
                if m2 > m1:
                    print ( "m2,m1 mass inversion?",m1,m2 )
                x.add ( m1 )
                y.add ( m2 )
                zt = float("nan")
                result,sr = self.getResultFor ( ana, llhds )
                if result:
                    zt = - np.log( result )
                    cresults += 1
                    if zt < minXY[2] and rmax<=self.rthreshold:
                        minXY=(m1,m2,zt)
                h = self.getHash(m1,m2)
                L[h]=zt
                if not h in combL:
                    combL[h]=0.
                if np.isnan(zt):
                    combL[h] = combL[h] + 100.
                else:
                    combL[h] = combL[h] + zt
                R[h]=rmax
            print ()
            # print ( "\n[plotLlhds] min(xy) for %s is at m=(%d/%d): %.2f(%.2g)" % ( ana, minXY[0], minXY[1], minXY[2], np.exp(-minXY[2] ) ) )
            if cresults == 0:
                print ( "[plotLlhds] warning: found no results for %s. skip" % \
                        str(masspoint) )
                continue
                # return
            x.add ( xmax*1.03 )
            x.add ( xmin*.93 )
            y.add ( ymax+50. )
            y.add ( 0. )
            x,y=list(x),list(y)
            x.sort(); y.sort()
            X, Y = np.meshgrid ( x, y )
            Z = float("nan")*X
            RMAX = float("nan")*X
            for irow,row in enumerate(Z):
                for icol,col in enumerate(row):
                    h = 0
                    if len(x)>= icol and len(y) >= irow:
                        h = self.getHash(list(x)[icol],list(y)[irow])
                    if h in L:
                        Z[irow,icol]=L[h]
                    if h in R:
                        RMAX[irow,icol]=R[h]
            if self.interactive:
                self.RMAX = RMAX
                # self.ZCOMB = ZCOMB
                self.Z = Z
                self.L = L
                self.R = R
                self.X = X
                self.Y = Y
            hldZ100 = computeHPD ( Z, None, 1., False, rthreshold=self.rthreshold )
            cont100 = plt.contour ( X, Y, hldZ100, levels=[0.25], colors = [ color ], linestyles = [ "dotted" ], zorder=10 )
            #hldZ95 = computeHPD ( Z, .95, False )
            #cont95 = plt.contour ( X, Y, hldZ95, levels=[0.5], colors = [ color ], linestyles = [ "dashed" ] )
            #plt.clabel ( cont95, fmt="95%.0s" )
            hldZ50 = computeHPD ( Z, RMAX, .68, False, rthreshold=self.rthreshold )
            cont50c = plt.contour ( X, Y, hldZ50, levels=[1.0], colors = [ color ], zorder=10 )
            cont50 = plt.contourf ( X, Y, hldZ50, levels=[1.,10.], colors = [ color, color ], alpha=getAlpha( color ), zorder=10 )
            plt.clabel ( cont50c, fmt="68%.0s" )
            if hasattr ( cont50, "axes" ):
                ax = cont50.axes
            else:
                ax = cont50.ax
            while isCloseToExisting ( minXY, existingPoints ):
                minXY = ( minXY[0]+8., minXY[1]+8., minXY[2] )
            a = ax.scatter( [ minXY[0] ], [ minXY[1] ], marker="*", s=180, color="black", zorder=20 )
            anan = ana.replace(":None",":UL") # + " (%.2f)" % (minXY[2])
            label = self.getPrettyName ( ana )
            a = ax.scatter( [ minXY[0] ], [ minXY[1] ], marker="*", s=110, color=color, 
                            label=label, alpha=1., zorder=20 )
            existingPoints.append ( minXY )
            handles.append ( a )
        ZCOMB = float("nan")*X
        for irow,row in enumerate(Z):
            for icol,col in enumerate(row):
                h = 0
                if len(x)> icol and len(y) > irow:
                    h = self.getHash(list(x)[icol],list(y)[irow])
                if h in combL and not np.isnan(combL[h]):
                    ZCOMB[irow,icol]=combL[h]
                    if combL[h]==0.:
                        ZCOMB[irow,icol]=float("nan")
        self.ZCOMB = ZCOMB
        contRMAX = plt.contour ( X, Y, RMAX, levels=[self.rthreshold], colors = [ "gray" ], zorder=10 )
        contRMAXf = plt.contourf ( X, Y, RMAX, levels=[self.rthreshold,float("inf")], colors = [ "gray" ], hatches = ['////'], alpha=getAlpha( "gray" ), zorder=10 )
        hldZcomb68 = computeHPD ( ZCOMB, RMAX, .68, False, rthreshold=self.rthreshold )
        contZCOMB = plt.contour ( X, Y, hldZcomb68, levels=[.25], colors = [ "black" ], zorder=10 )

        # ax.scatter( [ minXY[0] ], [ minXY[1] ], marker="s", s=110, color="gray", label="excluded", alpha=.3, zorder=20 )
        print()
        self.pprint ( "timestamp:", self.timestamp, self.topo, max(x) )
        dx,dy = max(x)-min(x),max(y)-min(y)
        if self.drawtimestamp:
            plt.text( max(x)-.37*dx,min(y)-.11*dy,self.timestamp, c="gray" )
        ### the altitude of the alpha quantile is l(nuhat) - .5 chi^2_(1-alpha);ndf
        ### so for alpha=0.05%, ndf=1 the dl is .5 * 3.841 = 1.9207
        ### for ndf=2 the dl is ln(alpha) = .5 * 5.99146 = 2.995732
        ### folien slide 317
        if hasattr ( cont50, "axes" ):
            ax = cont50.axes
        else:
            ax = cont50.ax
        # Xs,Ys=X,Y
        Xs,Ys = filterSmaller ( X, Y )
        h = self.getHash()
        # print ( "hash is", h )
        #s=" (??)"
        #if h in L:
        #    s=" (%.2f)" % L[h]
        #s=" (%.2f)" % self.getLClosestTo ( L )
        s=""
        ax.scatter( [ self.mx ], [ self.my ], marker="*", s=200, color="white", zorder=20 )
        c = ax.scatter( [ self.mx ], [ self.my ], marker="*", s=160, color="black", 
                      label="proto-model%s" % s, zorder=20 )
        handles.append ( c )
        if sr == None:
            sr = "UL"
        # plt.title ( "HPD regions, %s [%s]" % ( namer.texName(pid1, addSign=False, addDollars=True), self.topo ), fontsize=14 )
        plt.xlabel ( "m(%s) [GeV]" % namer.texName(pid1,addSign=False, addDollars=True), fontsize=14 )
        plt.ylabel ( "m(%s) [GeV]" % namer.texName(self.pid2, addSign=False, addDollars=True), fontsize=14 )
        circ1 = mpatches.Patch( facecolor="gray",alpha=getAlpha("gray"),hatch=r'////',label='excluded by critic', edgecolor="black" )
        handles.append ( circ1 )
        plt.legend( handles=handles, loc="upper left", fontsize=12 )
        figname = "%s/llhd%d.png" % ( self.rundir, pid1 )
        self.pprint ( "saving to %s" % figname )
        plt.savefig ( figname )
        if self.interactive:
            self.axes = ax
            self.plt = plt
        plt.close()
        if self.copy:
            self.copyFile ( figname )
        return

    def copyFile ( self, filename ):
        """ copy filename to smodels.github.io/protomodels/<upload>/ """
        dest = os.path.expanduser ( "~/git/smodels.github.io" )
        cmd = "cp %s %s/protomodels/%s/" % ( filename, dest, self.upload )
        o = subprocess.getoutput ( cmd )
        self.pprint ( "%s: %s" % ( cmd, o ) )


    def getAnaStats ( self, integrateSRs=True, integrateTopos=True,
                      integrateDataType=True  ):
        """ given the likelihood dictionaries D, get
            stats of which analysis occurs how often 
        :param integrateTopos: sum over all topologies
        :param integrateSRs: sum over all signal regions
        :param integrateDataType: ignore data type
        """
        anas = {}
        if self.masspoints == None:
            return None
        for masspoint in self.masspoints:
            m1,m2,llhds=masspoint[0],masspoint[1],masspoint[2]
            if len(masspoint)>3:
                robs = masspoint[3]
            for k,v in llhds.items():
                tokens = k.split(":")
                if not integrateTopos and self.topo not in tokens[2]:
                    continue
                dType = ":em"
                if tokens[1] in [ "None", None ]:
                    dType = ":ul"
                name = tokens[0]
                if not integrateDataType:
                    name = name + dType
                if not integrateTopos:
                    name = tokens[0]+tokens[1]
                if not name in anas.keys():
                    anas[name]=0
                anas[name]=anas[name]+1
        return anas

    def listAnalyses( self ):
        """
        :param verbose: verbosity: debug, info, warn, or error
        """
        stats = self.getAnaStats( integrateDataType=False )
        print ( "%6d masspoints with %s" % ( len(self.masspoints), self.topo ) )
        for k,v in stats.items():
            print ( "%6d: %s" % ( v, k ) )

    def compress ( self ):
        """ produce a pcl file with only a fraction of the points. 
            good for testing and development """
        backupfile = self.picklefile.replace(".pcl",".bu.pcl")
        subprocess.getoutput ( "cp %s %s" % ( self.picklefile, backupfile ))
        newfile = self.picklefile.replace(".pcl",".comp.pcl")
        mx,my=set(),set()
        for m in self.masspoints:
            mx.add ( m[0] )
            my.add ( m[1] )
        mx=list(mx)
        my=list(my)

        with open ( newfile, "wb" ) as f:
            mps = []
            for i,m in enumerate(self.masspoints):
                if mx.index (m[0] ) % 2 == 0 and \
                   my.index (m[1] ) % 2 == 0:
                # if i % 5 == 0:
                    mps.append ( m )
            pickle.dump ( mps, f )
            pickle.dump ( self.mx, f )
            pickle.dump ( self.my, f )
            pickle.dump ( self.nevents, f )
            pickle.dump ( self.topo, f )
            pickle.dump ( self.timestamp, f )
            f.close()

    def findClosestPoint ( self, m1=None, m2=None, nll=False ):
        """ find the mass point closest to m1, m2. If not specified, 
            return the hiscore point.
        :param nll: if True, report nlls, else report likelihoods.
        """
        if m1 == None:
            m1 = self.mx
        if m2 == None:
            m2 = self.my
        dm,point = float("inf"),None
        def distance ( m ):
            return (m[0]-m1)**2 + (m[1]-m2)**2

        for m in self.masspoints:
            tmp = distance(m)
            if tmp < dm:
                dm = tmp
                point = m
        if not nll:
            return point
        # asked for NLLs
        D = {}
        for k,v in point[2].items():
            D[k]=-np.log(v)
        return ( point[0], point[1], D )

    def interact ( self ):
        import IPython
        varis = "plot.describe()"
        print ( "%s[plot] interactive session. Try: %s%s" % \
                ( colorama.Fore.GREEN, varis, colorama.Fore.RESET ) )
        IPython.embed( using=False )
Example #19
0
#Set up the path to SModelS installation folder if running on a different folder
import sys, os
sys.path.append(os.path.join(os.getenv("HOME"), "smodels/"))

# In[2]:

#Import those parts of smodels that are needed for this exercise
from smodels.tools.physicsUnits import GeV
from smodels.experiment.databaseObj import Database

# In[3]:

## Load the database:
databasePath = os.path.join(os.getenv("HOME"), "smodels-database/")
db = Database(databasePath)

# ## Look up upper limit for an Upper Limit-type result:

# In[4]:

#Select desired result:
resultID = ["CMS-PAS-SUS-13-016"]
txname = ["T1tttt"]
expResult = db.getExpResults(analysisIDs=resultID,
                             txnames=txname,
                             dataTypes='upperLimit')[0]
print 'selected result:', expResult

# In[5]:
#!/usr/bin/env python3
""" just test that the fake signal gets injected in the right part of the UL maps """

import math
from smodels.experiment.databaseObj import Database
from smodels.tools.physicsUnits import GeV

# ./ptools/fetchFromClip.py -R rundir.frozen1 --database
db = Database("default.pcl")
print("db", db.databaseVersion)
er = db.getExpResults(["CMS-SUS-19-006"])[0]
ds = er.datasets[0]
print(ds.txnameList)
txn = ds.txnameList[6]
print(txn)


def distance(mass):
    # return math.sqrt ( (mass[0] - 735.)**2 + (mass[1]-162.6)**2  )
    return math.sqrt((mass[0] - 1166.)**2 + (mass[1] - 162.6)**2)


masses = []
for mLSP in range(100, 240, 50):
    for msquark in range(850, 1300, 50):
        masses.append([msquark, mLSP])
        # masses.append (  [[msquark*GeV,mLSP*GeV],[msquark*GeV,mLSP*GeV]] )
for mass in masses:
    mvec = [[mass[0] * GeV, mass[1] * GeV], [mass[0] * GeV, mass[1] * GeV]]
    oUL = txn.getULFor(mvec, expected=False)
    eUL = txn.getULFor(mvec, expected=True)
Example #21
0
class ProxyDBCreater:
    def __init__(self, inputfile, rundir, verbose="info"):
        self.inputfile = inputfile
        self.rundir = rundir
        self.nr = 0
        self.verbstring = verbose
        if type(verbose) == int:
            self.verbose = verbose
        else:
            verbose = verbose.lower()
            verbs = {"err": 10, "warn": 20, "info": 30, "debug": 40}
            self.verbose = 50
            for k, v in verbs.items():
                if k in verbose:
                    self.verbose = v
        self.database = Database(self.inputfile)

    def create(self, servername, serverport):
        if servername == None:
            servername = socket.gethostname()
            self.pprint("determined servername as '%s'" % servername)
        if serverport == None:
            serverport = 31770
        self.servername = servername
        self.serverport = serverport
        self.database.client = DatabaseClient(servername,
                                              serverport,
                                              verbose=self.verbstring,
                                              rundir=self.rundir,
                                              clientid=self.nr)
        for e, expRes in enumerate(self.database.expResultList):
            for d, dataset in enumerate(expRes.datasets):
                for t, txn in enumerate(dataset.txnameList):
                    self.database.expResultList[e].datasets[d].txnameList[
                        t].dbClient = copy.copy(self.database.client)
                    del self.database.expResultList[e].datasets[d].txnameList[
                        t].txnameData.tri
                    if txn.txnameDataExp != None:
                        del self.database.expResultList[e].datasets[
                            d].txnameList[t].txnameDataExp.tri

    def pprint(self, *args):
        if self.verbose > 25:
            print ( "[proxyDBCreater-%s] %s" % \
                    ( time.strftime("%H:%M:%S"), " ".join(map(str,args)) ) )

    def store(self, outputfile):
        """ store the outputfile """
        self.outputfile = outputfile
        self.pprint("writing to %s" % outputfile)
        if os.path.exists(outputfile):
            os.unlink(outputfile)
        ## first create it as temporary file, then move
        tempf = outputfile + ".tmp"  # tempfile.mktemp ( suffix=".pcl" )
        self.database.createBinaryFile(tempf)
        #cmd = f"mv {tempf} {outputfile}"
        #subprocess.getoutput ( cmd )
        os.rename(tempf, outputfile)  ## would only work on same device

    def symlink(self):
        """ set a symlink from self.outputfile to default.pcl """
        dirname = os.path.dirname(self.outputfile)
        symfile = f"{dirname}/default.pcl"
        self.pprint ( "setting a symlink from %s to %s" % \
                      ( self.outputfile, symfile ) )
        if os.path.exists(symfile):
            os.unlink(symfile)
        cmd = f"ln -s {self.outputfile} {symfile}"
        subprocess.getoutput(cmd)

    def run(self, really):
        """ now run the server
        :param really: if False, then only write out command
        """
        dirname = os.path.dirname(__file__)
        inputfile = self.inputfile
        #if not "/" in inputfile:
        #    inputfile = os.getcwd() + "/" + inputfile
        servercmd = "%s/databaseServer.py -R %s -p %d -d %s -v %s" % \
                      ( dirname, self.rundir, self.serverport, inputfile, self.verbstring )
        if really:
            self.pprint ( "starting a server on %s: %s" % \
                          ( self.servername, servercmd ) )
            import subprocess
            a = subprocess.getoutput(servercmd)
            self.pprint("output %s" % a)
        else:
            print("not started a server. you can start one yourself:")
            self.pprint(servercmd)
Example #22
0
 if args.prior:
     normalizePrior()
     sys.exit()
 if args.upper_limits and args.efficiencyMaps:
     print("[combiner] -u and -e are mutually exclusive")
     sys.exit()
 from smodels.experiment.databaseObj import Database
 from smodels.theory import decomposer
 from smodels.particlesLoader import BSMList
 from smodels.share.models.SMparticles import SMList
 from smodels.theory.model import Model
 from smodels.tools.physicsUnits import fb
 model = Model(BSMparticles=BSMList, SMparticles=SMList)
 model.updateParticles(inputFile=args.slhafile)
 print("[combiner] loading database", args.database)
 db = Database(args.database)
 print("[combiner] done loading database")
 anaIds = ["CMS-SUS-16-033"]
 anaIds = ["all"]
 dts = ["all"]
 if args.upper_limits:
     dts = ["upperLimit"]
 if args.efficiencyMaps:
     dts = ["efficiencyMap"]
 listOfExpRes = db.getExpResults(analysisIDs=anaIds,
                                 dataTypes=dts,
                                 onlyWithExpected=True)
 smses = decomposer.decompose(model, .01 * fb)
 #print ( "[combiner] decomposed into %d topos" % len(smses) )
 from smodels.theory.theoryPrediction import theoryPredictionsFor
 combiner = Combiner()
Example #23
0
#!/usr/bin/env python3
"""
.. module:: databaseLoader
   :synopsis: When running the complete test suite, we need to
              load the database only once

.. moduleauthor:: Wolfgang Waltenberger <*****@*****.**>

"""

import sys
sys.path.insert(0, "../")
from smodels.experiment.databaseObj import Database
from smodels.installation import version
ver = "".join(map(str, version(True)[:3]))
#dbname="./database/db%d0.pcl" % int ( sys.version[0] )
dbname = "http://smodels.hephy.at/database/unittest%s" % ver
database = Database(dbname, discard_zeroes=False)

if __name__ == "__main__":
    print(database)
Example #24
0
 def testLoadLatest(self):
     dblatest=Database ("latest")
     latestver = dblatest.databaseVersion.replace(".","")
     from databaseLoader import database
     thisver = database.databaseVersion.replace("unittest","").replace(".","")
     self.assertTrue ( latestver[:2]==thisver[:2] )
Example #25
0
from smodels.installation import installDirectory
from smodels.tools.physicsUnits import fb, GeV
from smodels.theory.theoryPrediction import theoryPredictionsFor
from smodels.experiment.databaseObj import Database

# In[3]:

#Define the SLHA input file name
filename = "%s/inputFiles/slha/gluino_squarks.slha" % installDirectory()

# In[4]:

#Load the database, do the decomposition and compute theory predictions:
#(Look at the theory predictions HowTo to learn how to compute theory predictions)
databasepath = os.path.join(os.getenv("HOME"), "smodels-database/")
database = Database(databasepath)
expResults = database.getExpResults()
topList = slhaDecomposer.decompose(filename,
                                   sigcut=0.03 * fb,
                                   doCompress=True,
                                   doInvisible=True,
                                   minmassgap=5 * GeV)
allThPredictions = [theoryPredictionsFor(exp, topList) for exp in expResults]

# In[5]:

#Print the value of each theory prediction for each experimental
#result and the corresponding upper limit (see the obtain experimental upper limits HowTo to learn how
#to compute the upper limits).
#Also print the expected upper limit, if available
for thPreds in allThPredictions:
Example #26
0
class Predictor:
    def __init__(self,
                 walkerid,
                 dbpath="./default.pcl",
                 expected=False,
                 select="all",
                 do_combine=False):
        """
        :param do_combine: if True, then also use combined results,
                           both via simplified likelihoods and pyhf.
        """
        self.walkerid = walkerid
        self.do_combine = do_combine
        self.modifier = None
        self.select = select
        self.expected = expected
        self.rthreshold = 1.3  ## threshold for rmax
        if expected:
            from expResModifier import ExpResModifier
            self.modifier = ExpResModifier()
        force_load = None
        if dbpath.endswith(".pcl"):
            force_load = "pcl"
        ntries = 0
        while not os.path.exists(dbpath):
            ## give it a few tries
            ntries += 1
            time.sleep(ntries * 5)
            if ntries > 5:
                break
        self.database = Database(dbpath, force_load=force_load)
        self.fetchResults()
        self.combiner = Combiner(self.walkerid)

    def filterForAnaIdsTopos(self, anaIds, topo):
        """ filter the list of expRes, keep only anaIds """
        keepExpRes = []
        nbefore = len(self.listOfExpRes)
        for er in self.listOfExpRes:
            eid = er.globalInfo.id
            if not eid in anaIds:
                continue
            txnames = [x.txName for x in er.getTxNames()]
            if not topo in txnames:  ## can safely skip
                continue
            newDS = []
            for dataset in er.datasets:
                newTxNames = []
                for txName in dataset.txnameList:
                    if txName.txName != topo:
                        continue
                    newTxNames.append(txName)
                if len(newTxNames) > 0:
                    dataset.txnameList = newTxNames
                    newDS.append(dataset)
            if len(newDS) > 0:
                er.datasets = newDS
                keepExpRes.append(er)
        self.pprint ( "filtered for %s, keeping %d/%d expRes" % \
                      ( topo, len(keepExpRes), nbefore) )
        self.listOfExpRes = keepExpRes

    def filterForTopos(self, topo):
        """ filter the list of expRes, keep only the ones for topo """
        keepExpRes = []
        nbefore = len(self.listOfExpRes)
        for er in self.listOfExpRes:
            txnames = [x.txName for x in er.getTxNames()]
            if not topo in txnames:  ## can safely skip
                continue
            newDS = []
            for dataset in er.datasets:
                newTxNames = []
                for txName in dataset.txnameList:
                    if txName.txName != topo:
                        continue
                    newTxNames.append(txName)
                if len(newTxNames) > 0:
                    dataset.txnameList = newTxNames
                    newDS.append(dataset)
            if len(newDS) > 0:
                er.datasets = newDS
                keepExpRes.append(er)
        self.pprint ( "filtered for %s, keeping %d/%d expRes" % \
                      ( topo, len(keepExpRes), nbefore) )
        self.listOfExpRes = keepExpRes

    def fetchResults(self):
        """ fetch the list of results, perform all selecting
            and modding """

        dataTypes = ["all"]
        if self.select == "em":
            dataTypes = ["efficiencyMap"]
        if self.select == "ul":
            dataTypes = ["upperLimit"]
        txnames = ["all"]
        if self.select.startswith("txnames:"):
            s = self.select.replace("txnames:", "")
            txnames = s.split(",")
            self.pprint("I have been asked to select txnames for %s" % s)

        listOfExpRes = self.database.getExpResults(dataTypes=dataTypes,
                                                   txnames=txnames,
                                                   useNonValidated=True)
        if self.modifier:
            listOfExpRes = self.modifier.modify(listOfExpRes)

        self.listOfExpRes = listOfExpRes
        if False:
            f = open("expresults.txt", "wt")
            for expRes in self.listOfExpRes:
                f.write("%s %s\n" % (expRes.id(), expRes.datasets[0]))
            f.close()

    def pprint(self, *args):
        """ logging """
        print("[predictor] %s" % (" ".join(map(str, args))))
        self.log(*args)

    def log(self, *args):
        """ logging to file """
        with open("walker%d.log" % self.walkerid, "a") as f:
            f.write("[predictor-%s] %s\n" %
                    (time.strftime("%H:%M:%S"), " ".join(map(str, args))))

    def predict(self,
                protomodel,
                sigmacut=0.02 * fb,
                strategy="aggressive",
                keep_predictions=False):
        """ Compute the predictions and statistical variables, for a
            protomodel.

        :param sigmacut: weight cut on the predict xsecs for theoryPredictions
        :param strategy: combination strategy, currently only aggressive is used
        :param keep_predictions: if True, then keep all predictions (in self,
               not in protomodel!!)
        :returns: False, if no combinations could be found, else True
        """

        if hasattr(self, "predictions"):
            del self.predictions  ## make sure we dont accidentally use old preds

        # Create SLHA file (for running SModelS)
        slhafile = protomodel.createSLHAFile()

        # First run SModelS using all results and considering only the best signal region.
        # thats the run for the critic
        bestpreds = self.runSModelS(slhafile,
                                    sigmacut,
                                    allpreds=False,
                                    llhdonly=False)

        if keep_predictions:
            self.bestpreds = bestpreds
        # Extract the relevant prediction information and store in the protomodel:
        self.updateModelPredictions(protomodel, bestpreds)
        # self.log ( "model is excluded? %s" % str(protomodel.excluded) )

        # Compute the maximum allowed (global) mu value given the r-values
        # stored in protomodel
        protomodel.mumax = self.getMaxAllowedMu(protomodel)

        # now use all prediction with likelihood values to compute the Z of the model
        predictions = self.runSModelS(slhafile,
                                      sigmacut,
                                      allpreds=True,
                                      llhdonly=True)

        if keep_predictions:
            self.predictions = predictions

        # Compute significance and store in the model:
        self.computeSignificance(protomodel, predictions, strategy)
        if protomodel.Z is None:
            self.log(
                "done with prediction. Could not find combinations (Z=%s)" %
                (protomodel.Z))
            protomodel.delCurrentSLHA()
            return False
        else:
            self.log("done with prediction. best Z=%.2f (muhat=%.2f)" %
                     (protomodel.Z, protomodel.muhat))

        protomodel.cleanBestCombo()

        #Recompute predictions with higher accuracy for high score models:
        if protomodel.Z > 2.7 and protomodel.nevents < 55000:
            protomodel.nevents = 100000
            protomodel.computeXSecs()
            self.predict(protomodel, sigmacut=sigmacut, strategy=strategy)

        protomodel.delCurrentSLHA()
        return True

    def runSModelS(self, inputFile, sigmacut, allpreds, llhdonly):
        """ run smodels proper.
        :param inputFile: the input slha file
        :param sigmacut: the cut on the topology weights, typically 0.02*fb
        :param allpreds: if true, return all predictions of analyses, else
                         only best signal region
        :param llhdonly: if true, return only results with likelihoods
        """

        if not os.path.exists(inputFile):
            self.pprint("error, cannot find inputFile %s" % inputFile)
            return []
        model = Model(BSMList, SMList)
        model.updateParticles(inputFile=inputFile)

        mingap = 10 * GeV

        # self.log ( "Now decomposing" )
        topos = decomposer.decompose(model, sigmacut, minmassgap=mingap)
        self.log("decomposed model into %d topologies." % len(topos))

        if allpreds:
            bestDataSet = False
            combinedRes = False
        else:
            bestDataSet = True
            combinedRes = self.do_combine

        preds = []
        # self.log ( "start getting preds" )
        from smodels.tools import runtime
        runtime._experimental = True
        for expRes in self.listOfExpRes:
            predictions = theoryPredictionsFor(expRes,
                                               topos,
                                               useBestDataset=bestDataSet,
                                               combinedResults=combinedRes)
            if predictions == None:
                predictions = []
            if allpreds:
                combpreds = theoryPredictionsFor(
                    expRes,
                    topos,
                    useBestDataset=False,
                    combinedResults=self.do_combine)
                if combpreds != None:
                    for c in combpreds:
                        predictions.append(c)
            for prediction in predictions:
                prediction.computeStatistics()
                if (not llhdonly) or (prediction.likelihood != None):
                    preds.append(prediction)
        sap = "best preds"
        if allpreds:
            sap = "all preds"
        sllhd = ""
        if llhdonly:
            sllhd = ", llhds only"
        self.log ( "returning %d predictions, %s%s" % \
                   (len(preds),sap, sllhd ) )
        return preds

    def printPredictions(self):
        """ if self.predictions exists, pretty print them """
        if hasattr(self, "predictions"):
            print("[predictor] all predictions (for combiner):")
            for p in self.predictions:
                print ( " - %s %s, %s %s" % \
                        ( p.analysisId(), p.dataType(), p.dataset.dataInfo.dataId, p.txnames ) )
        if hasattr(self, "bestpreds"):
            print("[predictor] best SR predictions (for critic):")
            for p in self.bestpreds:
                print ( " - %s %s, %s %s" % \
                        ( p.analysisId(), p.dataType(), p.dataset.dataInfo.dataId, p.txnames ) )

    def updateModelPredictions(self, protomodel, predictions):
        """ Extract information from list of theory predictions and store in the protomodel.
        :param predictions: all theory predictions
        :returns: list of tuples with observed r values, r expected and
                  theory prediction info (sorted with highest r-value first)
        """

        rvalues = []  #If there are no predictions set rmax and r2 to 0
        tpList = []
        for theorypred in predictions:
            r = theorypred.getRValue(expected=False)
            if r == None:
                self.pprint("I received %s as r. What do I do with this?" % r)
                r = 23.
            rexp = theorypred.getRValue(expected=True)
            # tpList.append( (r, rexp, self.combiner.removeDataFromTheoryPred ( theorypred ) ) )
            tpList.append((r, rexp, theorypred))
            rvalues.append(r)
        while len(rvalues) < 2:
            rvalues.append(0.)
        rvalues.sort(reverse=True)
        srs = "%s" % ", ".join(["%.2f" % x for x in rvalues[:3]])
        self.log("top r values before rescaling are: %s" % srs)
        protomodel.rvalues = rvalues  #Do not include initial zero values
        # protomodel.excluded = protomodel.rvalues[0] > self.rthreshold #The 0.99 deals with the case rmax = threshold
        protomodel.tpList = tpList[:]

    def getMaxAllowedMu(self, protomodel):
        """ Compute the maximum (global) signal strength normalization
            given the predictions.
        """

        mumax = float("inf")
        if protomodel.rvalues[0] > 0.:
            #Set mumax slightly below threshold, so the model is never excluded
            mumax = 0.999 * self.rthreshold / protomodel.rvalues[0]

        return mumax

    def computeSignificance(self, protomodel, predictions, strategy):
        """ compute the K and Z values, and attach them to the protomodel """

        self.log("now find highest significance for %d predictions" %
                 len(predictions))
        ## find highest observed significance
        #(set mumax just slightly below its value, so muhat is always below)
        mumax = protomodel.mumax
        combiner = self.combiner
        bestCombo, Z, llhd, muhat = combiner.findHighestSignificance(
            predictions, strategy, expected=False, mumax=mumax)
        prior = combiner.computePrior(protomodel)
        if hasattr(protomodel, "keep_meta") and protomodel.keep_meta:
            protomodel.bestCombo = bestCombo
        else:
            protomodel.bestCombo = combiner.removeDataFromBestCombo(bestCombo)
        protomodel.Z = Z

        if Z is not None:  # Z is None when no combination was found
            protomodel.K = combiner.computeK(Z, prior)
        else:
            protomodel.K = None
        protomodel.llhd = llhd
        protomodel.muhat = muhat
        protomodel.letters = combiner.getLetterCode(protomodel.bestCombo)
        protomodel.description = combiner.getComboDescription(
            protomodel.bestCombo)
Example #27
0
def draw( strategy, databasepath, trianglePlot, miscol,
          diagcol, experiment, S, drawtimestamp, outputfile, nofastlim ):
    """
    :param trianglePlot: if True, then only plot the upper triangle of this
                         symmetrical matrix
    :param miscol: color to use when likelihood is missing
    :param diagcol: color to use for diagonal
    :param experiment: draw only for specific experiment ("CMS", "ATLAS", "all" )
    :param S: draw only for specific sqrts ( "8", "13", "all" )
    :param drawtimestamp: if true, put a timestamp on plot
    :param outputfile: file name of output file (matrix.png)
    :param nofastlim: if True, discard fastlim results
    """
    ROOT.gStyle.SetOptStat(0000)

    ROOT.gROOT.SetBatch()
    cols = [ ROOT.kRed+1, ROOT.kWhite, ROOT.kGreen+1, miscol, diagcol ]
    ROOT.gStyle.SetPalette(len(cols), (ctypes.c_int * len(cols))(*cols) )
    ROOT.gStyle.SetNumberContours(len(cols))

    ROOT.gStyle.SetPadLeftMargin(.25)

    sqrtses = [ 8, 13 ]
    if S not in [ "all" ]:
        sqrtses = [ int(S) ]

    colors.on = True
    setLogLevel ( "debug" )

    # dir = "/home/walten/git/smodels-database/"
    dir = databasepath
    d=Database( dir, discard_zeroes = True )
    print(d)
    analysisIds = [ "all" ]
    exps = [ "CMS", "ATLAS" ]
    if experiment in [ "CMS", "ATLAS" ]:
        analysisIds = [ experiment+"*" ]
        exps = [ experiment ]
    results = d.getExpResults( analysisIDs = analysisIds )
    if nofastlim:
        results = noFastlim ( results )
    results = sortOutDupes ( results )
    if S in [ "8", "13" ]:
        results = sortBySqrts ( results, int(S) )

    #results.sort()
    nres = len ( results )

    ROOT.c1=ROOT.TCanvas("c1","c1",1770,1540)
    ROOT.c1.SetLeftMargin(0.18)
    ROOT.c1.SetBottomMargin(0.21)
    ROOT.c1.SetTopMargin(0.06)
    ROOT.c1.SetRightMargin(0.01)
    if nres > 60:
        ROOT.c1.SetLeftMargin(0.12) ## seemed to work for 96 results
        ROOT.c1.SetBottomMargin(0.15)
        ROOT.c1.SetTopMargin(0.09)
        ROOT.c1.SetRightMargin(0.015)

    h=ROOT.TH2F ( "Correlations", "",
                  nres, 0., nres, nres, 0., nres )
    xaxis = h.GetXaxis()
    yaxis = h.GetYaxis()

    sze = 0.13 / math.sqrt ( nres )
    xaxis.SetLabelSize( 1.3*sze )
    yaxis.SetLabelSize( 1.3*sze )

    bins= { "CMS": { 8: [999,0], 13:[999,0] },
            "ATLAS": { 8: [999,0], 13: [999,0] } }

    n = len(results )
    for x,e in enumerate(results):
        label = e.globalInfo.id
        hasLikelihood = hasLLHD ( e )
        ana = analysisCombiner.getExperimentName ( e.globalInfo )
        #if not hasLikelihood:
        #    print ( "no likelihood: %s" % label )
        sqrts = int(e.globalInfo.sqrts.asNumber(TeV))
        color = ROOT.kCyan+2
        ymax=0
        if ana == "ATLAS":
            color = ROOT.kBlue+1
        if sqrts > 10.:
            color += 2
        if x < bins[ana][sqrts][0]:
            bins[ana][sqrts][0]=x
        if x > bins[ana][sqrts][1]:
            bins[ana][sqrts][1]=x
            ymax=x
        color = ROOT.kGray+2
        if len(exps)==1 and len(sqrtses)==1:
            label = label.replace("CMS-","").replace("ATLAS-","").replace("-agg","")
        label = "#color[%d]{%s}" % (color, label )
        xaxis.SetBinLabel(n-x, label )
        yaxis.SetBinLabel(x+1, label )
        for y,f in enumerate(results):
            if trianglePlot and y>x:
                continue
            isUn = analysisCombiner.canCombine ( e.globalInfo, f.globalInfo, strategy )
            # isUn = e.isUncorrelatedWith ( f )
            if isUn:
                h.SetBinContent ( n-x, y+1, 1. )
            else:
                h.SetBinContent ( n-x, y+1, -1. )
            if not hasLikelihood or not hasLLHD ( f ): ## has no llhd? cannot be combined
                h.SetBinContent ( n-x, y+1, 2. )
            if y==x:
                h.SetBinContent ( n-x, y+1, 3. )

    h.Draw("col")
    ROOT.bins, ROOT.xbins, ROOT.lines = {}, {}, []
    if len(exps)==1 and len(sqrtses)==1:
        ROOT.t1 = ROOT.TLatex()
        ROOT.t1.SetNDC()
        ROOT.t1.DrawLatex ( .45, .95, "%s, %d TeV" % ( exps[0], sqrtses[0] ) )
        
    for ana in exps:
        for sqrts in sqrtses:
            name= "%s%d" % ( ana, sqrts )
            ROOT.bins[name] = ROOT.TLatex()
            ROOT.bins[name].SetTextColorAlpha(ROOT.kBlack,.7)
            ROOT.bins[name].SetTextSize(.025)
            ROOT.bins[name].SetTextAngle(90.)
            ROOT.xbins[name] = ROOT.TLatex()
            ROOT.xbins[name].SetTextColorAlpha(ROOT.kBlack,.7)
            ROOT.xbins[name].SetTextSize(.025)
            xcoord = .5 * ( bins[ana][sqrts][0] + bins[ana][sqrts][1] )
            ycoord = n- .5 * ( bins[ana][sqrts][0] + bins[ana][sqrts][1] ) -3
            if len(sqrtses)>1 or len(exps)>1:
                ROOT.bins[name].DrawLatex(-4,xcoord-3,"#splitline{%s}{%d TeV}" % ( ana, sqrts ) )
                ROOT.xbins[name].DrawLatex(ycoord,-5,"#splitline{%s}{%d TeV}" % ( ana, sqrts ) )
            yt = bins[ana][sqrts][1] +1
            extrudes = 3 # how far does the line extrude into tick labels?
            xmax = n
            if trianglePlot:
                xmax = n-yt
            line = ROOT.TLine ( -extrudes, yt, xmax, yt )
            line.SetLineWidth(2)
            line.Draw()
            ymax = n
            if trianglePlot:
                ymax = yt
            xline = ROOT.TLine ( n-yt, ymax, n-yt, -extrudes )
            xline.SetLineWidth(2)
            xline.Draw()
            ROOT.lines.append ( line )
            ROOT.lines.append ( xline )
    line = ROOT.TLine ( -extrudes, 0, xmax, 0 )
    line.SetLineWidth(2)
    line.Draw()
    xline = ROOT.TLine ( n, ymax, n, -extrudes )
    xline.SetLineWidth(2)
    xline.Draw()
    ROOT.lines.append ( line )
    ROOT.lines.append ( xline )
    h.LabelsOption("v","X")
    if trianglePlot:
        for i in range(n+1):
            wline = ROOT.TLine ( n, i, n-i, i )
            wline.SetLineColor ( ROOT.kWhite )
            wline.Draw ()
            ROOT.lines.append ( wline )
            vline = ROOT.TLine ( i, n-i, i, n )
            vline.SetLineColor ( ROOT.kWhite )
            vline.Draw ()
        ROOT.lines.append ( vline )
        ROOT.title = ROOT.TLatex()
        ROOT.title.SetNDC()
        ROOT.title.SetTextSize(.025 )
        ROOT.title.DrawLatex(.28,.89, "#font[132]{Correlations between analyses, combination strategy: ,,%s''}" % strategy )
    ROOT.boxes = []
    if trianglePlot:
        for i,b in enumerate ( [ "pair is uncorrelated", "pair is correlated", "likelihood is missing" ] ):
            bx = 51
            by = 68 - 3*i
            box = ROOT.TBox(bx,by,bx+1,by+1)
            c = cols[i]
            if i > 0:
                c = cols[i+1]
            box.SetFillColor ( c )
            box.Draw()
            ROOT.boxes.append ( box )
            l = ROOT.TLatex()
            l.SetTextSize(.022)
            #if i == 2:
            #    c = 16
            l.SetTextColor ( c )
            b="#font[132]{%s}" % b ## add font
            l.DrawLatex ( bx+2, by, b )
            ROOT.boxes.append ( l )
    l = ROOT.TLatex()
    l.SetNDC()
    l.SetTextColor(ROOT.kGray+1)
    l.SetTextSize(.015)
    if drawtimestamp:
        l.DrawLatex ( .01, .01, "plot produced %s from database v%s" % \
                      ( time.strftime("%h %d %Y" ), d.databaseVersion ) )
    ROOT.gPad.SetGrid()
    if "@M" in outputfile:
        modifiers = ""
        if len(exps)==1:
            modifiers += exps[0]
        if len(sqrtses)==1:
            modifiers += str(sqrtses[0])
        outputfile = outputfile.replace("@M",modifiers)
    print ( "Plotting to %s" % outputfile )
    ROOT.c1.Print( outputfile )
Example #28
0
# In[1]:

#Set up the path to SModelS installation folder if running on a different folder
import sys, os
sys.path.append(os.path.join(os.getenv("HOME"), "smodels/"))

# In[2]:

from smodels.experiment.databaseObj import Database
from smodels.tools.physicsUnits import GeV

# In[3]:

## Load the database:
dbPath = os.path.join(os.getenv("HOME"), "smodels-database/")
database = Database(dbPath)

# ## How to select results from one publication (or conference note)

# In[6]:

#Select only the CMS SUS-12-028 conference note
expID = ["CMS-SUS-12-028"]

# In[7]:

#Loads the selected analyses
#(The INFO tells you that superseded analyses are not loaded, see below)
results = database.getExpResults(analysisIDs=expID)

# In[9]:
Example #29
0
runtime.modelFile = 'smodels.share.models.mssm'
#runtime.modelFile = 'mssmQNumbers.slha'

from smodels.theory import decomposer
from smodels.tools.physicsUnits import fb, GeV, TeV
from smodels.theory.theoryPrediction import theoryPredictionsFor
from smodels.experiment.databaseObj import Database
from smodels.tools import coverage
from smodels.tools.smodelsLogging import setLogLevel
from smodels.particlesLoader import BSMList
from smodels.share.models.SMparticles import SMList
from smodels.theory.model import Model
setLogLevel("info")

# Set the path to the database
database = Database("official")


def main():
    """
    Main program. Displays basic use case.
    """
    model = Model(BSMparticles=BSMList, SMparticles=SMList)
    # Path to input file (either a SLHA or LHE file)
    #     lhefile = 'inputFiles/lhe/gluino_squarks.lhe'
    slhafile = 'inputFiles/slha/lightEWinos.slha'
    #     model.updateParticles(inputFile=lhefile)
    model.updateParticles(inputFile=slhafile)

    # Set main options for decomposition
    sigmacut = 0.01 * fb
Example #30
0
    def RunSModelS(self,SLHAFilePath,SummaryFilePath):
        # Set the path to the database
        database = Database("/home/oo1m20/softwares/smodels-1.2.2/smodels-database")

        self.SummaryFilePath = os.path.abspath(SummaryFilePath)

        #Define your model (list of rEven and rOdd particles)
        particlesLoader.load( 'smodels.share.models.secumssm' ) #Make sure all the model particles are up-to-date
    
        # Path to input file (either a SLHA or LHE file)
        self.SLHAFilePath = SLHAFilePath
        slhafile = self.SLHAFilePath
        #lhefile = 'inputFiles/lhe/gluino_squarks.lhe'

        # Set main options for decomposition
        sigmacut = 0.01 * fb
        mingap = 5. * GeV

    
        # Decompose model (use slhaDecomposer for SLHA input or lheDecomposer for LHE input)
        slhaInput = True
        if slhaInput:
            toplist = slhaDecomposer.decompose(slhafile, sigmacut, doCompress=True, doInvisible=True, minmassgap=mingap)
        else:
            toplist = lheDecomposer.decompose(lhefile, doCompress=True,doInvisible=True, minmassgap=mingap)
        # Access basic information from decomposition, using the topology list and topology objects:
        f= open(self.SummaryFilePath,"a+")
        print( "\n Decomposition Results: ", file=f )
        print( "\t  Total number of topologies: %i " %len(toplist), file=f )
        nel = sum([len(top.elementList) for top in toplist])
        print( "\t  Total number of elements = %i " %nel , file=f)
        #Print information about the m-th topology (if it exists):
        m = 2
        if len(toplist) > m:
            top = toplist[m]
            print( "\t\t %i-th topology  = " %m,top,"with total cross section =",top.getTotalWeight(), file=f )
            #Print information about the n-th element in the m-th topology:
            n = 0
            el = top.elementList[n]
            print( "\t\t %i-th element from %i-th topology  = " %(n,m),el, end="", file=f )
            print( "\n\t\t\twith final states =",el.getFinalStates(),"\n\t\t\twith cross section =",el.weight,"\n\t\t\tand masses = ",el.getMasses(), file=f )
            
        # Load the experimental results to be used.
        # In this case, all results are employed.
        listOfExpRes = database.getExpResults()

        # Print basic information about the results loaded.
        # Count the number of loaded UL and EM experimental results:
        nUL, nEM = 0, 0
        for exp in listOfExpRes:
            expType = exp.getValuesFor('dataType')[0]
            if expType == 'upperLimit':
                nUL += 1
            elif  expType == 'efficiencyMap':
                nEM += 1
        print( "\n Loaded Database with %i UL results and %i EM results " %(nUL,nEM), file=f )

        # Compute the theory predictions for each experimental result and print them:
        print("\n Theory Predictions and Constraints:", file=f)
        rmax = 0.
        bestResult = None
        for expResult in listOfExpRes:
            predictions = theoryPredictionsFor(expResult, toplist, combinedResults=False, marginalize=False)
            if not predictions: continue # Skip if there are no constraints from this result
            print('\n %s ' %expResult.globalInfo.id, file=f)
            for theoryPrediction in predictions:
                dataset = theoryPrediction.dataset
                datasetID = dataset.dataInfo.dataId            
                mass = theoryPrediction.mass
                txnames = [str(txname) for txname in theoryPrediction.txnames]
                PIDs =  theoryPrediction.PIDs         
                print( "------------------------", file=f )
                print( "Dataset = ", datasetID, file=f )   #Analysis name
                print( "TxNames = ", txnames, file=f )  
                print( "Prediction Mass = ",mass, file=f )   #Value for average cluster mass (average mass of the elements in cluster)
                print( "Prediction PIDs = ",PIDs, file=f )   #Value for average cluster mass (average mass of the elements in cluster)
                print( "Theory Prediction = ",theoryPrediction.xsection, file=f )  #Signal cross section
                print( "Condition Violation = ",theoryPrediction.conditions, file=f ) #Condition violation values
              
                # Get the corresponding upper limit:
                print( "UL for theory prediction = ",theoryPrediction.upperLimit, file=f )

                # Compute the r-value
                r = theoryPrediction.getRValue()
                print( "r = ",r , file=f)
                #Compute likelihhod and chi^2 for EM-type results:
                if dataset.dataInfo.dataType == 'efficiencyMap':
                    theoryPrediction.computeStatistics()
                    print( 'Chi2, likelihood=', theoryPrediction.chi2, theoryPrediction.likelihood, file=f )
                if r > rmax:
                    rmax = r
                    bestResult = expResult.globalInfo.id

        # Print the most constraining experimental result
        print( "\nThe largest r-value (theory/upper limit ratio) is ",rmax, file=f )
        if rmax > 1.:
            print( "(The input model is likely excluded by %s)" %bestResult, file=f )
        else:
            print( "(The input model is not excluded by the simplified model results)", file=f )

        f.close()