def __init__(self, path=None): self.path = path if path: logger.debug('Creating object based on %s' % self.path) #Open the info file and get the information: if not os.path.isfile(path): logger.error("Info file %s not found" % path) raise SModelSError() from smodels.tools.stringTools import concatenateLines infoFile = open(self.path) content = concatenateLines(infoFile.readlines()) infoFile.close() #Get tags in info file: tags = [line.split(':', 1)[0].strip() for line in content] for i, tag in enumerate(tags): if not tag: continue line = content[i] value = line.split(':', 1)[1].strip() if tags.count(tag) == 1: self.addInfo(tag, value) else: logger.info("Ignoring unknown field %s found in file %s" % (tag, self.path)) continue
def __init__(self, info=None): """ Initializes the branch. If info is defined, tries to generate the branch using it. :parameter info: string describing the branch in bracket notation (e.g. [[e+],[jet]]) """ self.masses = [] self.particles = [] self.PIDs = [] self.maxWeight = None self.vertnumb = None self.vertparts = None if type(info) == type(str()): branch = elementsInStr(info) if not branch or len(branch) > 1: logger.error("Wrong input string " + info) raise SModelSError() else: branch = branch[0] vertices = elementsInStr(branch[1:-1]) for vertex in vertices: ptcs = vertex[1:-1].split(',') # Syntax check: for ptc in ptcs: if not ptc in rEven.values() \ and not ptc in ptcDic: logger.error("Unknown particle. Add " + ptc + " to smodels/particle.py") raise SModelSError() self.particles.append(ptcs) self.vertnumb = len(self.particles) self.vertparts = [len(v) for v in self.particles]
def loadData(self, value): """ Uses the information in value to generate the data grid used for interpolation. """ if self._V: return if type(value) == str: value = eval(value, {'fb': fb, 'pb': pb, 'GeV': GeV, 'TeV': TeV}) self.unit = 1.0 ## store the unit so that we can take arbitrary units for ## the "z" values. default is unitless, ## which we use for efficiency maps if len(value) < 1 or len(value[0]) < 2: logger.error ( "input value not in correct format. expecting sth " \ "like [ [ [[ 300.*GeV,100.*GeV], "\ "[ 300.*GeV,100.*GeV] ], 10.*fb ], ... ] "\ "for upper limits or [ [ [[ 300.*GeV,100.*GeV],"\ " [ 300.*GeV,100.*GeV] ], .1 ], ... ] for "\ "efficiency maps" ) if type(value[0][1]) == unum.Unum: ## if its a unum, we store 1.0 * unit self.unit = value[0][1] / (value[0][1].asNumber()) self.computeV(value)
def setFinalState(self, finalState=None): """ If finalState = None, define the branch final state according to the PID of the last R-odd particle appearing in the cascade decay. Else set the final state to the finalState given :parameter finalState: String defining the final state """ if finalState: if finalState == '*': finalState = InclusiveStr() if not finalState in list(finalStates.keys()): raise SModelSError( "Final state %s has not been defined. Add it to particles.py." % finalState) else: self.finalState = finalState #If PIDs have been defined, use it: elif self.PIDs: fStates = set() for pidList in self.PIDs: fStates.add(getFinalStateLabel(pidList[-1])) if len(fStates) != 1: logger.error("Error obtaining the final state for branch %s" % self) raise SModelSError else: self.finalState = list(fStates)[0] #Else do nothing else: self.finalState = None
def flush(self): """ Get the python dictionaries generated by the object formatting to the defined output and convert to XML """ outputDict = {} for iobj,obj in enumerate(self.toPrint): if obj is None: continue output = self._formatObj(obj) # Conver to python dictionaries if not output: continue #Skip empty output outputDict.update(output) root = None #Convert from python dictionaries to xml: if outputDict: root = ElementTree.Element('smodelsOutput') self.convertToElement(outputDict,root) rough_xml = ElementTree.tostring(root, 'utf-8') nice_xml = minidom.parseString(rough_xml).toprettyxml(indent=" ") if self.output == 'stdout': sys.stdout.write(nice_xml) elif self.output == 'file': if not self.filename: logger.error('Filename not defined for printer') return False with open(self.filename, "a") as outfile: outfile.write(nice_xml) outfile.close() ret = nice_xml self.toPrint = [None]*len(self.printingOrder) return root
def flush(self): """ Write the python dictionaries generated by the object formatting to the defined output """ outputDict = {} for iobj, obj in enumerate(self.toPrint): if obj is None: continue output = self._formatObj(obj) if not output: continue #Skip empty output outputDict.update(output) output = 'smodelsOutput = ' + str(outputDict) if self.output == 'stdout': sys.stdout.write(output) elif self.output == 'file': if not self.filename: logger.error('Filename not defined for printer') return False with open(self.filename, "a") as outfile: outfile.write(output) outfile.close() self.toPrint = [None] * len(self.printingOrder) ## it is a special feature of the python printer ## that we also return the output dictionary return outputDict
def testGoodFile13(self): filename = "./testFiles/slha/simplyGluino.slha" outputfile = runMain(filename,suppressStdout = True ) if self.definingRun: logger.error ( "This is a definition run! Know what youre doing!" ) default = "simplyGluino_default.py" cmd = "cat %s | sed -e 's/smodelsOutput/smodelsOutputDefault/' > %s" % ( outputfile, default ) a = subprocess.getoutput ( cmd ) smodelsOutput = importModule ( outputfile ) from simplyGluino_default import smodelsOutputDefault ignoreFields = ['input file','smodels version', 'ncpus', 'Element', 'database version', 'Total missed xsec', 'Missed xsec long-lived', 'Missed xsec displaced', 'Missed xsec MET', 'Total outside grid xsec', 'Total xsec for missing topologies (fb)','Total xsec for missing topologies with displaced decays (fb)', 'Total xsec for missing topologies with prompt decays (fb)', 'Total xsec for topologies outside the grid (fb)'] smodelsOutputDefault['ExptRes'] = sorted(smodelsOutputDefault['ExptRes'], key=lambda res: res['r'], reverse=True) equals = equalObjs(smodelsOutput,smodelsOutputDefault,allowedDiff=0.08, ignore=ignoreFields, fname = outputfile ) if not equals: e = "output13.py != simplyGluino_default.py" logger.error( e ) # raise AssertionError( e ) self.assertTrue(equals) ## test went through, so remove the output files self.removeOutputs( outputfile )
def totalChi2(self, nsig, marginalize=False, deltas_rel=0.2): """ Computes the total chi2 for a given number of observed events, given a predicted signal "nsig", with nsig being a vector with one entry per dataset. nsig has to obey the datasetOrder. Deltas is the error on the signal efficiency. :param nsig: predicted signal (list) :param deltas_rel: relative uncertainty in signal (float). Default value is 20%. :returns: chi2 (float) """ if len(self._datasets) == 1: if isinstance(nsig, list): nsig = nsig[0] return self._datasets[0].chi2(nsig, marginalize=marginalize) if not hasattr(self.globalInfo, "covariance"): logger.error( "Asked for combined likelihood, but no covariance error given." ) return None nobs = [x.dataInfo.observedN for x in self._datasets] bg = [x.dataInfo.expectedBG for x in self._datasets] cov = self.globalInfo.covariance computer = LikelihoodComputer( Data(nobs, bg, cov, deltas_rel=deltas_rel)) return computer.chi2(nsig, marginalize=marginalize)
def testRuntimeImport(self): filename = "./testFiles/slha/idm_example.slha" runtime.modelFile = 'idm' reload(particlesLoader) outputfile = runMain(filename,inifile='testParameters_noModel.ini',suppressStdout=True) if self.definingRun: logger.error ( "This is a definition run! Know what youre doing!" ) default = "idm_example_defaultB.py" cmd = "cat %s | sed -e 's/smodelsOutput/smodelsOutputDefault/' > %s" % ( outputfile, default ) a = subprocess.getoutput ( cmd ) smodelsOutput = importModule ( outputfile ) from idm_example_defaultB import smodelsOutputDefault ignoreFields = ['input file','smodels version', 'ncpus', 'Element', 'database version', 'Total missed xsec', 'Missed xsec long-lived', 'Missed xsec displaced', 'Missed xsec MET', 'Total outside grid xsec', 'Total xsec for missing topologies (fb)', 'Total xsec for missing topologies with displaced decays (fb)', 'Total xsec for missing topologies with prompt decays (fb)', 'Total xsec for topologies outside the grid (fb)'] smodelsOutputDefault['ExptRes'] = sorted(smodelsOutputDefault['ExptRes'], key=lambda res: res['r'], reverse=True) equals = equalObjs(smodelsOutput,smodelsOutputDefault,allowedDiff=0.1, ignore=ignoreFields, fname = outputfile ) self.assertTrue(equals) self.removeOutputs(outputfile)
def testCombinedResult(self): filename = "./testFiles/slha/gluino_squarks.slha" outputfile = runMain(filename, inifile="testParameters_agg.ini", suppressStdout=True) with open(outputfile, 'rb') as fp: ## imports file with dots in name output_module = imp.load_module("output", fp, outputfile, ('.py', 'rb', imp.PY_SOURCE)) smodelsOutput = output_module.smodelsOutput from gluino_squarks_default_agg import smodelsOutputDefault ignoreFields = [ 'input file', 'smodels version', 'ncpus', 'database version' ] smodelsOutputDefault['ExptRes'] = sorted( smodelsOutputDefault['ExptRes'], key=lambda res: res['r'], reverse=True) equals = equalObjs(smodelsOutput, smodelsOutputDefault, allowedDiff=0.02, ignore=ignoreFields) if equals != True: logger.error("%s differs from %s!" % ("gluino_squarks_default_agg.py", outputfile)) self.assertTrue(equals) for i in [outputfile, outputfile.replace(".py", ".pyc")]: if os.path.exists(i): os.remove(i)
def getFinalStateLabel(pid): """ Given the particle PID, returns the label corresponding to its final state (e.g. 1000022 -> MET, 1000023 -> HSCP,...) :parameter pid: PDG code for particle (must appear in particles.py) :return: Final state string (e.g. MET, HSCP,...) """ from smodels.particlesLoader import qNumbers if pid == 0: # If pid is zero, return displaced return "Displaced" if not abs(pid) in qNumbers: logger.error( "qNumbers are not defined for %i. Please, add it to particles.py." % pid) raise SModelSError elif not pid in qNumbers: #Use the anti-particle info: pidQnumber = qNumbers[abs(pid)] pidQnumber[1] = -pidQnumber[1] #Flip the charge sign else: pidQnumber = qNumbers[pid] for key, qnumberList in finalStates.items(): if pidQnumber in qnumberList: return key logger.error( "Final state for %i not found. Please, add it to particles.py." % pid) raise SModelSError
def getWSInfo(self): """ Getting informations from the json files :ivar channelsInfo: list of dictionaries (one dictionary for each json file) containing useful information about the json files - :key signalRegions: list of dictonaries with 'json path' and 'size' (number of bins) of the 'signal regions' channels in the json files - :key otherRegions: list of strings indicating the path to the control and validation region channels """ # Identifying the path to the SR and VR channels in the main workspace files self.channelsInfo = [] # workspace specifications if not isinstance(self.inputJsons, list): logger.error("The `inputJsons` parameter must be of type list") self.errorFlag = True return for ws in self.inputJsons: wsChannelsInfo = {} wsChannelsInfo['signalRegions'] = [] wsChannelsInfo['otherRegions'] = [] if not 'channels' in ws.keys(): logger.error("Json file number {} is corrupted (channels are missing)".format(self.inputJsons.index(ws))) self.channelsInfo = None return for i_ch, ch in enumerate(ws['channels']): if ch['name'][:2] == 'SR': # if channel name starts with 'SR' wsChannelsInfo['signalRegions'].append({'path':'/channels/'+str(i_ch)+'/samples/0', # Path of the new sample to add (signal prediction) 'size':len(ch['samples'][0]['data'])}) # Number of bins else: wsChannelsInfo['otherRegions'].append('/channels/'+str(i_ch)) wsChannelsInfo['otherRegions'].sort(key=lambda path: path.split('/')[-1], reverse=True) # Need to sort correctly the paths to the channels to be removed self.channelsInfo.append(wsChannelsInfo)
def wsMaker(self): """ Apply each region patch (self.patches) to his associated json (self.inputJsons) to obtain the complete workspaces :returns: the list of patched workspaces """ if self.patches == None: return None if self.nWS == 1: try: return [pyhf.Workspace(jsonpatch.apply_patch(self.inputJsons[0], self.patches[0]))] except (pyhf.exceptions.InvalidSpecification, KeyError) as e: logger.error("The json file is corrupted:\n{}".format(e)) return None else: workspaces = [] for js, patch in zip(self.inputJsons, self.patches): wsDict = jsonpatch.apply_patch(js, patch) try: ws = pyhf.Workspace(wsDict) except (pyhf.exceptions.InvalidSpecification, KeyError) as e: logger.error("Json file number {} is corrupted:\n{}".format(self.inputJsons.index(json), e)) return None workspaces.append(ws) return workspaces
def __init__(self, base=None, force_load=None, verbosity=None): """ :param force_load: force loading the text database ("txt"), or binary database ("pcl"), dont force anything if None """ self.force_load = force_load self.pclfilename = "database.pcl" self.hasFastLim = False # True if any ExpResult is from fastlim self._validateBase(base) self._verbosity = verbosity self._databaseVersion = None self.expResultList = [] self.txt_mtime = None, None self.pcl_mtime = None, None self.pcl_db = None self.sw_format_version = "115" ## what format does the software support? self.pcl_format_version = None ## what format is in the binary file? self.binfile = os.path.join(self._base, self.pclfilename) setLogLevel(self._verbosity) if self.force_load == "txt": self.loadTextDatabase() self.printFastlimBanner() return if self.force_load == "pcl": self.loadBinaryFile() self.printFastlimBanner() return if self.force_load in [None, "none", "None"]: self.loadDatabase() self.printFastlimBanner() return logger.error ( "when initialising database: force_load=%s is not " \ "recognized. Valid values are: pcl, txt, None." % force_load ) sys.exit()
def __init__(self, path=None): self.path = path if path: logger.debug('Creating object based on %s' %self.path) #Open the info file and get the information: if not os.path.isfile(path): logger.error("Info file %s not found" % path) raise SModelSError() from smodels.tools.stringTools import concatenateLines infoFile = open(self.path) content = concatenateLines ( infoFile.readlines() ) infoFile.close() #Get tags in info file: tags = [line.split(':', 1)[0].strip() for line in content] for i,tag in enumerate(tags): if not tag: continue line = content[i] value = line.split(':',1)[1].strip() if tags.count(tag) == 1: self.addInfo(tag,value) else: logger.info("Ignoring unknown field %s found in file %s" % (tag, self.path)) continue
def __init__(self, path=None, info=None, createInfo=True, discard_zeroes=True, databaseParticles = None): """ :param discard_zeroes: discard txnames with zero-only results """ self.path = path self.globalInfo = info self.txnameList = [] if path and createInfo: logger.debug('Creating object based on data folder : %s' %self.path) #Get data folder info: if not os.path.isfile(os.path.join(path,"dataInfo.txt")): logger.error("dataInfo.txt file not found in " + path) raise TypeError self.dataInfo = infoObj.Info(os.path.join(path,"dataInfo.txt")) #Get list of TxName objects: for txtfile in glob.iglob(os.path.join(path,"*.txt")): try: txname = txnameObj.TxName(txtfile,self.globalInfo, self.dataInfo, databaseParticles) if discard_zeroes and txname.hasOnlyZeroes(): logger.debug ( "%s, %s has only zeroes. discard it." % \ ( self.path, txname.txName ) ) continue self.txnameList.append(txname) except TypeError: continue self.txnameList.sort() self.checkForRedundancy(databaseParticles)
def __init__(self, path=None, info=None, createInfo=True): self.path = path self.globalInfo = info self.txnameList = [] if path and createInfo: logger.debug('Creating object based on data folder : %s' % self.path) #Get data folder info: if not os.path.isfile(os.path.join(path, "dataInfo.txt")): logger.error("dataInfo.txt file not found in " + path) raise TypeError self.dataInfo = infoObj.Info(os.path.join(path, "dataInfo.txt")) #Get list of TxName objects: for txtfile in glob.iglob(os.path.join(path, "*.txt")): try: txname = txnameObj.TxName(txtfile, self.globalInfo, self.dataInfo) self.txnameList.append(txname) except TypeError: continue self.txnameList.sort()
def checkForRedundancy(self,databaseParticles): """ In case of efficiency maps, check if any txnames have overlapping constraints. This would result in double counting, so we dont allow it. """ if self.getType() == "upperLimit": return False logger.debug ( "checking for redundancy" ) datasetElements = [] for tx in self.txnameList: if hasattr(tx, 'finalState'): finalState = tx.finalState else: finalState = ['MET','MET'] if hasattr(tx, 'intermediateState'): intermediateState = tx.intermediateState else: intermediateState = None for el in elementsInStr(str(tx.constraint)): newEl = Element(el,finalState,intermediateState, model=databaseParticles) datasetElements.append(newEl) combos = itertools.combinations(datasetElements, 2) for x,y in combos: if x == y and _complainAboutOverlappingConstraints: errmsg ="Constraints (%s) and (%s) appearing in dataset %s:%s overlap "\ "(may result in double counting)." % \ (x,y,self.getID(),self.globalInfo.id ) logger.error( errmsg ) raise SModelSError ( errmsg )
def createExpResult ( self, root ): """ create, from pickle file or text files """ txtmeta = Meta ( root, discard_zeroes = self.txt_meta.discard_zeroes, hasFastLim=None, databaseVersion = self.databaseVersion ) pclfile = "%s/.%s" % ( root, txtmeta.getPickleFileName() ) logger.debug ( "Creating %s, pcl=%s" % (root,pclfile ) ) expres = None try: # logger.info ( "%s exists? %d" % ( pclfile,os.path.exists ( pclfile ) ) ) if not self.force_load=="txt" and os.path.exists ( pclfile ): # logger.info ( "%s exists" % ( pclfile ) ) with open(pclfile,"rb" ) as f: logger.debug ( "Loading: %s" % pclfile ) ## read meta from pickle pclmeta = serializer.load ( f ) if not pclmeta.needsUpdate ( txtmeta ): logger.debug ( "we can use expres from pickle file %s" % pclfile ) expres = serializer.load ( f ) else: logger.debug ( "we cannot use expres from pickle file %s" % pclfile ) logger.debug ( "txt meta %s" % txtmeta ) logger.debug ( "pcl meta %s" % pclmeta ) logger.debug ( "pcl meta needs update %s" % pclmeta.needsUpdate ( txtmeta ) ) except IOError as e: logger.error ( "exception %s" % e ) if not expres: ## create from text file expres = ExpResult(root, discard_zeroes = self.txt_meta.discard_zeroes ) if self.subpickle and expres: expres.writePickle( self.databaseVersion ) if expres: contact = expres.globalInfo.getInfo("contact") if contact and "fastlim" in contact.lower(): self.txt_meta.hasFastLim = True return expres
def setMasses(self, mass, sameOrder=True, opposOrder=False): """ Set the element masses to the input mass array. :parameter mass: list of masses ([[masses for branch1],[masses for branch2]]) :parameter sameOrder: if True, set the masses to the same branch ordering If True and opposOrder=True, set the masses to the smaller of the two orderings. :parameter opposOrder: if True, set the masses to the opposite branch ordering. If True and sameOrder=True, set the masses to the smaller of the two orderings. """ if sameOrder and opposOrder: newmass = sorted(mass) elif sameOrder: newmass = mass elif opposOrder: newmass = [mass[1], mass[0]] else: logger.error("Called with no possible ordering") raise SModelSError() if len(newmass) != len(self.branches): logger.error("Called with wrong number of mass branches") raise SModelSError() for i, mass in enumerate(newmass): self.branches[i].masses = mass[:]
def testGoodFile(self): filename = "./testFiles/slha/gluino_squarks.slha" outputfile = runMain(filename) if self.definingRun: logger.error ( "This is a definition run! Know what youre doing!" ) default = "gluino_squarks_default.py" cmd = "cat %s | sed -e 's/smodelsOutput/smodelsOutputDefault/' > %s" % ( outputfile, default ) a = subprocess.getoutput ( cmd ) smodelsOutput = importModule ( outputfile ) from gluino_squarks_default import smodelsOutputDefault ignoreFields = ['input file','smodels version', 'ncpus', 'Element', 'database version', 'Total missed xsec', 'Missed xsec long-lived', 'Missed xsec displaced', 'Missed xsec MET', 'Total outside grid xsec', 'Total xsec for missing topologies (fb)','Total xsec for missing topologies with displaced decays (fb)', 'Total xsec for missing topologies with prompt decays (fb)', 'Total xsec for topologies outside the grid (fb)'] smodelsOutputDefault['ExptRes'] = sorted(smodelsOutputDefault['ExptRes'], key=lambda res: res['r'], reverse=True) equals = equalObjs(smodelsOutput,smodelsOutputDefault,allowedDiff=0.02, ignore=ignoreFields, fname = outputfile ) for i in [ './output.py', './output.pyc' ]: if os.path.exists( i ): os.remove( i ) if not equals: p = outputfile.find ( "unitTestOutput" ) fname = outputfile if p > 0: fname = fname[p:] print ( "[testRunSModelS] %s != %s" % \ ( fname, "gluino_squarks_default.py" ) ) self.assertTrue(equals) self.removeOutputs( outputfile )
def _getBestResult(dataSetResults): """ Returns the best result according to the expected upper limit :param datasetPredictions: list of TheoryPredictionList objects :return: best result (TheoryPrediction object) """ #In the case of UL analyses or efficiency-maps with a single signal region #return the single result: if len(dataSetResults) == 1: return dataSetResults[0] #For efficiency-map analyses with multipler signal regions, #select the best one according to the expected upper limit: bestExpectedR = 0. bestXsec = 0.*fb for predList in dataSetResults: if len(predList) != 1: logger.error("Multiple clusters should only exist for upper limit results!") raise SModelSError() dataset = predList[0].dataset if dataset.getType() != 'efficiencyMap': txt = "Multiple data sets should only exist for efficiency map results, but we have them for %s?" % (predList[0].analysisId()) logger.error( txt ) raise SModelSError( txt ) pred = predList[0] xsec = pred.xsection expectedR = (xsec.value/dataset.getSRUpperLimit(0.05,True,False) ).asNumber() if expectedR > bestExpectedR or (expectedR == bestExpectedR and xsec.value > bestXsec): bestExpectedR = expectedR bestPred = pred bestXsec = xsec.value return bestPred
def flush(self): """ Write the python dictionaries generated by the object formatting to the defined output """ outputDict = {} for obj in self.toPrint: if obj is None: continue output = self._formatObj(obj) if not output: continue #Skip empty output outputDict.update(output) output = 'smodelsOutput = '+str(outputDict) if self.output == 'stdout': sys.stdout.write(output) elif self.output == 'file': if not self.filename: logger.error('Filename not defined for printer') return False with open(self.filename, "a") as outfile: outfile.write(output) outfile.close() self.toPrint = [None]*len(self.printingOrder) ## it is a special feature of the python printer ## that we also return the output dictionary return outputDict
def _evalConstraint(cluster): """ Evaluate the constraint inside an element cluster. If the cluster refers to a specific TxName, sum all the elements' weights according to the analysis constraint. For efficiency map results, sum all the elements' weights. :parameter cluster: cluster of elements (ElementCluster object) :returns: cluster cross section """ if cluster.getDataType() == 'efficiencyMap': return cluster.getTotalXSec() elif cluster.getDataType() == 'upperLimit': if len(cluster.txnames) != 1: logger.error("An upper limit cluster should never contain more than one TxName") raise SModelSError() txname = cluster.txnames[0] if not txname.constraint or txname.constraint == "not yet assigned": return txname.constraint exprvalue = _evalExpression(txname.constraint,cluster) return exprvalue else: logger.error("Unknown data type %s" %(str(cluster.getDataType()))) raise SModelSError()
def decayDaughter(self, brDictionary, massDictionary): """ Generate a list of all new branches generated by the 1-step cascade decay of the current branch daughter. :parameter brDictionary: dictionary with the decay information for all intermediate states (values are br objects, see pyslha) :parameter massDictionary: dictionary containing the masses for all intermediate states. :returns: list of extended branches (Branch objects). Empty list if daughter is stable or if daughterID was not defined. """ if len(self.PIDs) != 1: logger.error("Can not decay branch with multiple PID lists") return False if not self.PIDs[0][-1]: # Do nothing if there is no R-odd daughter (relevant for RPV decays # of the LSP) return [] #If decay table is not defined, assume daughter is stable: if not self.PIDs[0][-1] in brDictionary: return [] # List of possible decays (brs) for R-odd daughter in branch brs = brDictionary[self.PIDs[0][-1]] if len(brs) == 0: # Daughter is stable, there are no new branches return [] newBranches = [] for br in brs: if not br.br: continue #Skip zero BRs # Generate a new branch for each possible decay newBranches.append(self._addDecay(br, massDictionary)) return newBranches
def _evalConditions(cluster): """ Evaluate the conditions (if any) inside an element cluster. :parameter cluster: cluster of elements (ElementCluster object) :returns: list of condition values (floats) if analysis type == upper limit. None, otherwise. """ conditionVals = {} for txname in cluster.txnames: if not txname.condition or txname.condition == "not yet assigned": continue #Make sure conditions is always a list if isinstance(txname.condition,str): conditions = [txname.condition] elif isinstance(txname.condition,list): conditions = txname.condition else: logger.error("Conditions should be a list or a string") raise SModelSError() # Loop over conditions for cond in conditions: exprvalue = _evalExpression(cond,cluster) if isinstance(exprvalue,crossSection.XSection): conditionVals[cond] = exprvalue.value else: conditionVals[cond] = exprvalue if not conditionVals: return None else: return conditionVals
def compute(self, sqrts, slhafile, lhefile=None, unlink=True, loFromSlha=None, pythiacard=None): """ Run pythia and compute SUSY cross sections for the input SLHA file. :param sqrts: sqrt{s} to run Pythia, given as a unum (e.g. 7.*TeV) :param slhafile: SLHA file :param lhefile: LHE file. If None, do not write pythia output to file. If file does not exist, write pythia output to this file name. If file exists, read LO xsecs from this file (does not run pythia). :param unlink: Clean up temp directory after running pythia :param loFromSlha: If True, uses the LO xsecs from the SLHA file to compute the higher order xsecs :param pythiaCard: Optional path to pythia.card. If None, uses /etc/pythia.card :returns: XSectionList object """ sqrts = self._checkSqrts(sqrts) self._checkSLHA(slhafile) if lhefile: if os.path.isfile(lhefile): logger.warning("Using LO cross sections from " + lhefile) logger.error( "Cross section retrieval from lhefile currently not implemented" ) sys.exit() else: logger.info("Writing pythia LHE output to " + lhefile) if loFromSlha: logger.info("Using LO cross sections from " + slhafile) xsecsInfile = crossSection.getXsecFromSLHAFile(slhafile) loXsecs = crossSection.XSectionList() for xsec in xsecsInfile: if xsec.info.order == 0 and xsec.info.sqrts == sqrts: loXsecs.add(xsec) else: logger.info("get LO cross sections from pythia%d" % self.pythiaVersion) tool = toolBox.ToolBox().get("pythia%d" % self.pythiaVersion) tool.nevents = self.nevents tool.sqrts = sqrts / TeV tool.pythiacard = pythiacard loXsecs = tool.run(slhafile, lhefile, unlink=unlink) self.loXsecs = loXsecs self.loXsecs.sort() self.xsecs = self.addHigherOrders(sqrts, slhafile) self.xsecs.sort() #for xsec in self.loXsecs: # logger.debug ( "now writing out xsecs: %s" % xsec.value ) logger.debug("how many NLL xsecs? %d" % len(self.xsecs)) return self.xsecs
def checkInstallation(self, compile=True): """ Checks if installation of tool is correct by looking for executable and executing it. If check is False and compile is True, then try and compile it. :returns: True, if everything is ok """ if not os.path.exists(self.executablePath): if compile: logger.warn( "%s executable not found. Trying to compile it now. This may take a while." % self.name) self.compile() else: logger.warn("%s exectuable not found." % self.name) self.complain() return False if not os.path.exists(self.executablePath): logger.error( "Compilation of %s failed Is a according compiler installed?" % self.name) self.complain() if not os.access(self.executablePath, os.X_OK): logger.warning("%s is not executable Trying to chmod" % self.executable) self.chmod() return True
def getValueFor(self, massarray): """ Interpolates the value and returns the UL or efficiency for the respective massarray :param massarray: mass array values (with units), i.e. [[100*GeV,10*GeV],[100*GeV,10*GeV]] """ porig = self.flattenMassArray(massarray) ## flatten self.massarray = massarray if len(porig) != self.full_dimensionality: logger.error ( "dimensional error. I have been asked to compare a "\ "%d-dimensional mass vector with %d-dimensional data!" % \ ( len(porig), self.full_dimensionality ) ) return None p = ((np.matrix(porig)[0] - self.delta_x)).tolist()[0] P = np.dot(p, self._V) ## rotate dp = self.countNonZeros(P) self.projected_value = self.interpolate([P[:self.dimensionality]]) # self.projected_value = griddata( self.Mp, self.xsec, [ P[:self.dimensionality] ], method="linear")[0] # self.projected_value = float(self.projected_value) if dp > self.dimensionality: ## we have data in different dimensions if self._accept_errors_upto == None: return None logger.debug( "attempting to interpolate outside of convex hull "\ "(d=%d,dp=%d,masses=%s)" % ( self.dimensionality, dp, str(massarray) ) ) return self._interpolateOutsideConvexHull(massarray) return self._returnProjectedValue()
def flush(self): """ Get the python dictionaries generated by the object formatting to the defined output and convert to XML """ outputDict = {} for iobj, obj in enumerate(self.toPrint): if obj is None: continue output = self._formatObj(obj) # Conver to python dictionaries if not output: continue #Skip empty output outputDict.update(output) root = None #Convert from python dictionaries to xml: if outputDict: root = ElementTree.Element('smodelsOutput') self.convertToElement(outputDict, root) rough_xml = ElementTree.tostring(root, 'utf-8') nice_xml = minidom.parseString(rough_xml).toprettyxml( indent=" ") if self.output == 'stdout': sys.stdout.write(nice_xml) elif self.output == 'file': if not self.filename: logger.error('Filename not defined for printer') return False with open(self.filename, "a") as outfile: outfile.write(nice_xml) outfile.close() ret = nice_xml self.toPrint = [None] * len(self.printingOrder) return root
def loadDatabase(parser, db): """ Load database :parameter parser: ConfigParser with path to database :parameter db: binary database object. If None, then database is loaded, according to databasePath. If True, then database is loaded, and text mode is forced. :returns: database object, database version """ try: databasePath = parser.get("path", "databasePath") if databasePath == "micromegas": databasePath = installDirectory() + "/smodels-database/" database = db if database in [None, True]: force_load = None if database == True: force_load = "txt" database = Database(databasePath, force_load=force_load) databaseVersion = database.databaseVersion except DatabaseNotFoundException: logger.error("Database not found in %s" % os.path.realpath(databasePath)) sys.exit() return database, databaseVersion
def flush(self): """ Format the objects added to the output, print them to the screen or file and remove them from the printer. """ ret = "" for iobj, obj in enumerate(self.toPrint): if obj is None: continue output = self._formatObj(obj) if not output: continue #Skip empty output ret += output if self.output == 'stdout': sys.stdout.write(output) elif self.output == 'file': if not self.filename: logger.error('Filename not defined for printer') return False with self.openOutFile(self.filename, "a") as outfile: outfile.write(output) outfile.close() self.toPrint = [None] * len( self.printingOrder) #Reset printing objects return ret
def testGoodFile13(self): filename = "./testFiles/slha/simplyGluino.slha" outputfile = runMain(filename, suppressStdout=True) with open(outputfile, 'rb') as fp: ## imports file with dots in name output_module = imp.load_module("output", fp, outputfile, ('.py', 'rb', imp.PY_SOURCE)) smodelsOutput = output_module.smodelsOutput from simplyGluino_default import smodelsOutputDefault ignoreFields = [ 'input file', 'smodels version', 'ncpus', 'Element', 'database version' ] smodelsOutputDefault['ExptRes'] = sorted( smodelsOutputDefault['ExptRes'], key=lambda res: res['r'], reverse=True) equals = equalObjs(smodelsOutput, smodelsOutputDefault, allowedDiff=0.02, ignore=ignoreFields) if not equals: e = "output13.py and simplyGluino_default.py differ!" logger.error(e) # raise AssertionError ( e ) self.assertTrue(equals) ## test went through, so remove the output files self.removeOutputs(outputfile)
def __init__(self, path=None): """ :param path: Path to the experimental result folder """ if path and os.path.isdir(path): self.path = path if not os.path.isfile(os.path.join(path, "globalInfo.txt")): logger.error("globalInfo.txt file not found in " + path) raise TypeError self.globalInfo = infoObj.Info(os.path.join( path, "globalInfo.txt")) self.datasets = [] folders = [] for root, _, files in os.walk(path): folders.append((root, files)) folders.sort() for root, files in folders: if 'dataInfo.txt' in files: # data folder found # Build data set try: dataset = datasetObj.DataSet(root, self.globalInfo) self.datasets.append(dataset) except TypeError: continue