def __init__(self, name1, name2, delta=1e-6): self.DELTA_ERROR = delta if isinstance(name1, str): self._bn1 = gum.loadBN(name1) self._bn1.setProperty( 'name', '"' + os.path.basename(self._bn1.property('name') + '"')) else: self._bn1 = name1 if isinstance(name2, str): self._bn2 = gum.loadBN(name2) self._bn2.setProperty( 'name', '"' + os.path.basename(self._bn2.property('name') + '"')) else: self._bn2 = name2 s1 = set(self._bn1.names()) s2 = set(self._bn2.names()) if s1 != s2: raise ValueError( "The 2 BNs are not comparable! There are names not present in the 2 BNs : " + str(s1.symmetric_difference(s2)))
def testParameterLearning(self): bn=gum.loadBN(self.agrumSrcDir('src/testunits/ressources/asia_bool.bif')) learner=gum.BNLearner(self.agrumSrcDir('src/testunits/ressources/asia3.csv'),bn) learner.useScoreLog2Likelihood() learner.useAprioriSmoothing(1.0) bn2=learner.learnParameters(bn) for i in range(bn.size()): self.assertEquals(str(bn2.variable(i)),str(bn.variable(bn.idFromName(bn2.variable(i).name())))) bn=gum.loadBN(self.agrumSrcDir('src/testunits/ressources/asia_bool.bif')) # there is a beurk modality in asia3-faulty.csv with self.assertRaises(gum.UnknownLabelInDatabase): learner=gum.BNLearner(self.agrumSrcDir('src/testunits/ressources/asia3-faulty.csv'),bn)
def checkROCargs(): pyAgrum_header("2011-13") bn_name=sys.argv[1] if len(sys.argv)>1 else "" csv_name=sys.argv[2] if len(sys.argv)>2 else "" variable=sys.argv[3] if len(sys.argv)>3 else "" label=sys.argv[4] if len(sys.argv)>4 else "" if bn_name.__eq__(""): module_help() bn=gum.loadBN(bn_name) if csv_name.__eq__(""): module_help() if variable.__eq__(""): module_help(message=" Variables : "+str(bn.names())) else: if variable not in bn.names(): module_help(message=" Variable '"+variable+"'not found.\n Variables : "+str(bn.names())) if label.__eq__(""): module_help(message=" Labels : "+str(bn.variableFromName(variable))) else: try: bn.variableFromName(variable)[label] except gum.OutOfBounds: module_help(message=" Label '"+label+"' not found.\n Labels : "+str(bn.variableFromName(variable))) return (bn,csv_name,variable,label)
def main(): bn1 = gum.loadBN("alarm.bif") print(" - BN read") evs = {"SAO2": 2, "CATECHOL": 1} bn2 = mutilate(gum.BayesNet(bn1), evs) print(" - Mutilation done") ie1 = gum.LazyPropagation(bn1) ie1.setEvidence(evs) ie1.makeInference() ie2 = gum.LazyPropagation(bn2) ie2.setEvidence(evs) ie2.makeInference() print(" - Inference done") nb_errors = 0 for n in bn1.names(): if not isAlmostEqualPot(ie1.posterior(bn1.idFromName(n)), ie2.posterior(bn2.idFromName(n))): nb_errors += 1 print("Error on {} : {} != {}".format( n, ie1.posterior(bn1.idFromName(n))[:], ie2.posterior(bn2.idFromName(n))[:])) else: pass if nb_errors > 0: print("Errors : {}".format(nb_errors)) else: print("No error : inference results are identical.")
def main(): bn = gum.loadBN("data/alarm.bif") print("BN read") evs = {"HR": 1, "PAP": 2} m = Gibbs(bn, evs, verbose=True) m.run(5e-2, 20) print("done") ie = gum.LazyPropagation(bn) ie.setEvidence(evs) ie.makeInference() for i in bn.ids(): v, c = m.results(i) if v is not None: print("{} : {:3.5f}\n exact : {}\n approx : {} ({:7.5f})". format( bn.variable(i).name(), utils.KL(ie.posterior(i), v), utils.compactPot(ie.posterior(i)), utils.compactPot(v), c)) else: print("{}: {}".format( bn.variable(i).name(), evs[bn.variable(i).name()]))
def run_bn_sup(path, test_corr, structure): """" This method first loads the GT BN, then it propagates evidence from test_corr through it, after which a new data set is created based on the new posteriors :param path: the path from which the .bif file is loaded :param test_corr: test-data that is being updated, in one-hot encoding form :param structure: structure of the data (how many categories each attribute has) """ structure_0 = [0] + structure # Load the BN bn = gum.loadBN(path) # Create a placeholder for the net_data new_data = np.zeros(test_corr.shape) for i in range(test_corr.shape[0]): dp = test_corr[i, :] # fix an observation evs = {} k = 0 for n in bn.nodes( ): # Convert the evidence to a dictionary structure needed for propagation evs[n] = dp[sum(structure_0[:k + 1]):sum(structure_0[:k + 2])] k += 1 ie = gum.LazyPropagation(bn) ie.setEvidence(evs) # set the evidence pst = [ie.posterior(n).toarray() for n in bn.nodes() ] # Extract the posteriors and store them in new_data new_data[i, :] = list(itertools.chain.from_iterable(pst)) ie.eraseAllEvidence() return new_data
def transform(): bn1 = gum.loadBN("data/test_level_0.o3prm", system="aSys") print("transform : prm loaded") gum.saveBN(bn1, "data/test_level_0.bif") print("transform : bn written") bn = gum.loadBN("data/test_level_0.bif") print("transform : bn loaded") for i in bn.ids(): bn.cpt(i).translate(1e-2).normalizeAsCPT() print("transform : bn normalized") gum.saveBN(bn, "data/test_level_0_1.bif") print("transform : bn written")
def main(): bn1 = gum.loadBN("data/alarm.bif") print(" - BN read") print() test1(bn1) print() test2(bn1) print() test3(bn1)
def testCedric(): print("loading ...") bn = gum.loadBN("data/loopyOut.bif") print("done") scenario = {'E7.ValueEE': 6} m = LoopyBeliefPropagation(bn, scenario, verbose=True) print("running ...") m.run(4e-1, 2) print("done")
def config(self,configFileName): configFileName=os.path.abspath(configFileName) self._request = loadConfig(configFileName) self._request['config']=configFileName self._request['templateHeader']=self._request['header'] self._request['path']=os.path.dirname(configFileName)+"/" self._bn=gum.loadBN(self._request['path']+self._request['bayesnet'])
def pretty_bn(aBN): if isinstance(aBN,str): bn=gum.loadBN(aBN) else: bn=aBN seq=bn.topologicalOrder() for nod in seq: pretty_cpt(bn.cpt(nod)) print("")
def testLocalSearchWithTabuAccurate(self): learner = gum.BNLearner(self.agrumSrcDir('asia.csv')) learner.useLocalSearchWithTabuList() bn = learner.learnBN() ref = gum.loadBN(self.agrumSrcDir('asia2.bif'), verbose=False) f = gum.ExactBNdistance(bn, ref) res = f.compute() self.assertAlmostEqual(res['klPQ'], 0.5, delta=0.5)
def main(): bn = gum.loadBN("data/alarm.bif") print("BN read") evs = {"HR": 1, "PAP": 2} m = Weighted(bn, evs, verbose=True) m.run(1e-2, 200) print("done") testUtils.compareApprox(m, bn, evs)
def testLocalSearchWithTabuAccurate(self): learner=gum.BNLearner(self.agrumSrcDir('src/testunits/ressources/asia.csv')) learner.useLocalSearchWithTabuList() bn=learner.learnBN() ref=gum.loadBN(self.agrumSrcDir('src/testunits/ressources/asia2.bif')) f=gum.BruteForceKL(bn,ref) res=f.compute() self.assertDelta(res['klPQ'],0,1)
def config(self, configFileName): configFileName = os.path.abspath(configFileName) self._request = loadConfig(configFileName) self._request['config'] = configFileName self._request['templateHeader'] = self._request['header'] self._request['path'] = os.path.dirname(configFileName) + "/" self._bn = gum.loadBN(self._request['path'] + self._request['bayesnet'])
def main(): bn = gum.loadBN("data/loopyOut.bif") print("BN read") evs = {"HR": 1, "PAP": 2} m = LoopyBeliefPropagation(bn, evs, verbose=True) m.run(1e-5, 50) print("done") testUtils.compareApprox(m, bn, evs)
def testParameterLearning(self): bn = gum.loadBN(self.agrumSrcDir('asia_bool.bif'), verbose=False) learner = gum.BNLearner(self.agrumSrcDir('asia3.csv'), bn) learner.setInitialDAG(bn.dag()) learner.useScoreLog2Likelihood() learner.useSmoothingPrior(1.0) bn2 = learner.learnParameters() for i in range(bn.size()): # self.assertEqual(str(bn2.variable(i)), str(bn.variable(bn.idFromName(bn2.variable(i).name())))) self.assertEqual( set(bn2.variable(i).labels()), set( bn.variable(bn.idFromName( bn2.variable(i).name())).labels())) bn = gum.loadBN(self.agrumSrcDir('asia_bool.bif'), verbose=False) # there is a beurk modality in asia3-faulty.csv with self.assertRaises(gum.UnknownLabelInDatabase): learner = gum.BNLearner(self.agrumSrcDir('asia3-faulty.csv'), bn)
def main(): bn = gum.loadBN("data/alarm.bif") print("BN read") evs = {"HR": 1, "PAP": 2} m = Importance(bn, evs, epsilon=0.2, verbose=True) m.run(1e-2, 100) print("done") testUtils.compareApprox(m, bn, evs) print(m.posterior(1))
def doLoadBN(s): # you could simply do that # bn=gum.loadBN("test") # but listeners are fun !! title=os.path.basename(s)+" ("+'{0:,d}'.format(os.path.getsize(s)/1024).replace(',',' ')+" Ko)" progressbar=ProgressBar(title,0,100,mode='dynamic', char='-') def local_update(pourcent): progressbar.update_amount(pourcent) progressbar.display() if pourcent==100: print return gum.loadBN(s,local_update)
def compareBN(name1,name2): if isinstance(name1,str): b1=gum.loadBN(name1) b1.setProperty('name','"'+os.path.basename(b1.property('name')+'"')) else: b1=name1 if isinstance(name2,str): b2=gum.loadBN(name2) b2.setProperty('name','"'+os.path.basename(b2.property('name')+'"')) else: b2=name2 ret=compareBNVariables(b1,b2) if ret!="OK": return ret ret=compareBNParents(b1,b2) if ret!="OK": return ret ret=compareBNCPT(b1,b2) return ret
def main(): bn = gum.loadBN("alarm.bif") ie = gum.LazyPropagation(bn) ie.makeInference() m = ParallelMonteCarlo(bn) m.run(1e-2, 300, verbose=True) print("done") for i in bn.ids(): print("{}: {}".format( bn.variable(i).name(), int(100000 * utils.KL(ie.posterior(i), m.posterior(i))) / 1000))
def doLoadBN(s): # you could simply do that # bn=gum.loadBN("test") # but listeners are fun !! title = os.path.basename(s) + " (" + '{0:,d}'.format( os.path.getsize(s) / 1024).replace(',', ' ') + " Ko)" progressbar = ProgressBar(title, 0, 100, mode='dynamic', char='-') def local_update(pourcent): progressbar.update_amount(pourcent) progressbar.display() if pourcent == 100: print return gum.loadBN(s, local_update)
def main(): bn = gum.loadBN("alarm.bif") ie = gum.LazyPropagation(bn) ie.makeInference() m = MonteCarlo(bn) m.run(1e-2, 300, verbose=True) print("done") for i in bn.ids(): ev, ec, hv, hc = m.everything(i) print("{}: {} ({}) =!= {} ({})".format( bn.variable(i).name(), ev, ec, hv, hc))
def calculateBF(self): self.config.calcButton.setEnabled(False) self.obsRepCouples = self.config.checkObsRepCouples.isChecked() self.obsObsolete = self.config.checkObsObsObsolete.isChecked() if self.config.radioCalcAll.isChecked(): self.modeCalc = "all" else: self.modeCalc = "dp" if self.config.radioExecStepByStep.isChecked(): self.modeExec = "step-by-step" else: self.modeExec = "show-tree" answer = QMessageBox.question( self, "Attention !", "Les calculs avec la recherche exhaustive peuvent être trop" " lourds (à peu près 50 minutes même pour la meilleure " "configuration). Voulez-vous utiliser une version simplifiée " "du problème ?", QMessageBox.Yes | QMessageBox.No) if answer == QMessageBox.Yes: if self.modeCalc == "dp" and self.obsRepCouples: self.nodesAssociations = nodesAssociationsSimple0 elif self.modeCalc == "dp" and not self.obsRepCouples: self.nodesAssociations = nodesAssociationsSimple1 elif self.modeCalc == "all" and self.obsRepCouples: self.nodesAssociations = nodesAssociationsSimple2 elif self.modeCalc == "all" and not self.obsRepCouples: self.nodesAssociations = nodesAssociationsSimple3 self.tsp = dtt.TroubleShootingProblem( gum.loadBN(self.bnCarFilename), [self.costsRep, self.costsObs], self.nodesAssociations) pbarMax = self.findPbarMax() self.config.progressBar.setRange(0, pbarMax) self.randomSocketPort = int(np.random.randint(1024, 10000, 1)) if os.name == "nt": self.bfProcess = Process( target=launch_brute_force_multi_processing_windows, args=( self.bnCarFilename, [self.costsRep, self.costsObs], self.nodesAssociations, self.randomSocketPort, self.modeCalc, self.obsRepCouples, self.obsObsolete, self.exchangeFileName ) ) else: self.bfProcess = Process(target=self.launchBruteForceMultiProcessing) self.config.calcButton.setText("Le calcul de la stratégie optimale est en cours...") self.bfProcess.start() self.managePbar()
def testEvidenceImpactWithNodeId(self): bn = gum.loadBN(self.agrumSrcDir('asia.bif'), [], verbose=False) # verbose=False : don't want to see the warnings ie = gum.LazyPropagation(bn) self.assertEqual(len(ie.BN().arcs()),8) with self.assertRaises(gum.InvalidArgument): res = ie.evidenceImpact(0, [0, 1, 2]) res = ie.evidenceImpact(0, [1, 2]) self.assertEqual(res.nbrDim(), 2) # 2 indep 0 given 1 self.assertEqual(res.extract({"tuberculosis?": 0}), gum.getPosterior(bn, target=0, evs={1: 0})) self.assertEqual(res.extract({"tuberculosis?": 1}), gum.getPosterior(bn, target=0, evs={1: 1}))
def testSetVarOrder(self): bn = gum.loadBN(self.agrumSrcDir('survey.bif')) dbgen = gum.BNDatabaseGenerator(bn) dbgen.setVarOrderFromCSV(self.agrumSrcDir('survey1.csv')) self.assertEqual(dbgen.varOrderNames(), ('E', 'A', 'O', 'T', 'R', 'S')) dbgen.setVarOrder(["A", "E", "O", "R", "S", "T"]) self.assertEqual(dbgen.varOrderNames(), ('A', 'E', 'O', 'R', 'S', 'T')) with self.assertRaises(gum.FatalError): dbgen.setVarOrder(["A", "E", "O", "R", "A", "S", "T"]) with self.assertRaises(gum.FatalError): dbgen.setVarOrder(["A", "O", "R", "S", "T"]) with self.assertRaises(gum.NotFound): dbgen.setVarOrder(["A", "O", "R", "S", "T", "X"])
def testEvidenceImpactWithName(self): bn = gum.loadBN(self.agrumSrcDir('asia.bif'), [], verbose=False) # verbose=False : don't want to see the warnings ie = gum.LazyPropagation(bn) with self.assertRaises(gum.InvalidArgument): res = ie.evidenceImpact("visit_to_Asia?", ["visit_to_Asia?", "tuberculosis?", "tuberculos_or_cancer?"]) with self.assertRaises(gum.NotFound): res = ie.evidenceImpact("visit_to_Asia?", ["toto", "tuberculosis?", "tuberculos_or_cancer?"]) res = ie.evidenceImpact("visit_to_Asia?", ["tuberculosis?", "tuberculos_or_cancer?"]) self.assertEqual(res.nbrDim(), 2) # 2 indep 0 given 1 self.assertEqual(res.extract({"tuberculosis?": 0}), gum.getPosterior(bn, target="visit_to_Asia?", evs={"tuberculosis?": 0})) self.assertEqual(res.extract({"tuberculosis?": 1}), gum.getPosterior(bn, target="visit_to_Asia?", evs={"tuberculosis?": 1}))
def launch_brute_force_multi_processing_windows( bayesian_network_filename, costs, nodes_types, port, mode_calc, obs_rep_couples, obs_obsolete, exchange_file_name ): tsp = dtt.TroubleShootingProblem(gum.loadBN(bayesian_network_filename), costs, nodes_types) sock = socket.socket() sock.connect(("localhost", port)) sock.send("0".encode()) best_tree, best_ecr = tsp.brute_force_solver( mode=mode_calc, obs_rep_couples=obs_rep_couples, obs_obsolete=obs_obsolete, sock=sock ) filename = exchange_file_name best_tree.to_file(filename) fout = open(filename, "a") fout.write(best_tree.fout_newline + str(best_ecr) + best_tree.fout_newline) fout.close() sock.send("1".encode()) sock.close()
def proceed(self, name_in, name_out, n, visible): """ From the file name_in (BN file), generate n samples and save them in name_out return the log2-likelihood of the n samples database """ if isinstance(name_in, str): bn = gum.loadBN(name_in) else: bn = name_in seq = bn.topologicalOrder() writer = csv.writer(open(name_out, "w")) if visible: sys.stdout.flush() titles = [bn.variable(item).name() for item in seq] writer.writerow(titles) if visible: prog = ProgressBar(name_out + " : ", 0, n, 77, mode="static", char="#") prog.display() LL = 0 for i in range(n): (cas, ll) = self.newSample(bn, seq) LL += ll row = [cas[bn.variable(item).name()] for item in seq] writer.writerow(row) if visible: prog.increment_amount() prog.display() if visible: print() print("Log2-Likelihood : {0}".format(LL)) print() return LL
def testReadAfterWrite(self): bn = gum.BayesNet() bn.add(gum.RangeVariable("1", "", 0, 1)) bn.add( gum.DiscretizedVariable("2", "").addTick(0.0).addTick(0.5).addTick(1.0)) bn.add(gum.LabelizedVariable("3", "", 2)) bn.add(gum.LabelizedVariable("4", "", 2)) bn.add(gum.LabelizedVariable("5", "", 3)) bn.addArc("1", "3") bn.addArc("1", "4") bn.addArc("3", "5") bn.addArc("4", "5") bn.addArc("2", "4") bn.addArc("2", "5") bn.cpt("1").fillWith([0.2, 0.8]) bn.cpt("2").fillWith([0.3, 0.7]) bn.cpt("3").fillWith([0.1, 0.9, 0.9, 0.1]) bn.cpt("4").fillWith([0.4, 0.6, 0.5, 0.5, 0.5, 0.5, 1.0, 0.0]) bn.cpt("5").fillWith([ 0.3, 0.6, 0.1, 0.5, 0.5, 0.0, 0.5, 0.5, 0.0, 1.0, 0.0, 0.0, 0.4, 0.6, 0.0, 0.5, 0.5, 0.0, 0.5, 0.5, 0.0, 0.0, 0.0, 1.0 ]) gum.saveBN(bn, self.agrumSrcDir("o3prm/BNO3PRMIO_file.o3prm")) bn2 = gum.loadBN(self.agrumSrcDir("o3prm/BNO3PRMIO_file.o3prm"), system="bayesnet") self.assertEqual(bn.dim(), bn2.dim()) self.assertEqual(bn.log10DomainSize(), bn2.log10DomainSize()) for n in bn.names(): self.assertEqual(bn.variable(n).name(), bn2.variable(n).name()) self.assertEqual( bn.variable(n).varType(), bn2.variable(n).varType()) self.assertEqual( bn.variable(n).domainSize(), bn2.variable(n).domainSize())
def testHillClimbingAccurate(self): learner = gum.BNLearner(self.agrumSrcDir('asia.csv')) witness = [ 'smoking?', 'lung_cancer?', 'bronchitis?', 'visit_to_Asia?', 'tuberculosis?', 'tuberculos_or_cancer?', 'dyspnoea?', 'positive_XraY?' ] for n in witness: self.assertTrue(n in learner.names()) for n in learner.names(): self.assertTrue(n in witness) learner.useGreedyHillClimbing() bn = learner.learnBN() ref = gum.loadBN(self.agrumSrcDir('asia2.bif'), verbose=False) f = gum.ExactBNdistance(bn, ref) res = f.compute() self.assertAlmostEqual(res['klPQ'], 0.5, delta=0.5)
def documentor(BNfilename): bn=gum.loadBN(BNfilename) # from "/x/y/sample.1.bif" to "Sample1" radical=remove_chars_re(os.path.basename(BNfilename).rsplit(".",1)[0],'+.-#*').capitalize() maxPerLine=6 n=0 namesList="#(@TODO) " for node in bn.ids(): namesList+=bn.variable(node).name() if n==maxPerLine: n=0 namesList+="\n#(@TODO) " else: n+=1 namesList+="\t" namesLanguage="#(@TODO) "+"\t".join(languages) signature('# Yaml config file automatically generated by ') print(Conf.template_yaml.format(BNfilename,namesList,namesLanguage,radical))
def dotize(aBN,name,style='pdf'): """ From a bn 'bn' and a name 'bn', ize creates 'bn.dot' and 'bn.style', representation of the bn in dot format and in style. style in [pdf,png,fig,jpg,svg] """ if style not in ['pdf','png','fig','jpg','svg']: raise Exception,"<%s> in not a correct style ([pdf,png,fig,jpg,svg])"%style if isinstance(aBN,str): bn=gum.loadBN(aBN) else: bn=aBN dotfile=name+'.dot' pngfile=name+'.'+style f=open(dotfile,'w') f.write(bn.toDot()) f.close() cmd=['dot','-T'+style,dotfile,('-o'+pngfile)] return subprocess.call(cmd)
def testHillClimbingAccurate(self): learner=gum.BNLearner(self.agrumSrcDir('src/testunits/ressources/asia.csv')) witness=['smoking?', 'lung_cancer?', 'bronchitis?', 'visit_to_Asia?', 'tuberculosis?', 'tuberculos_or_cancer?', 'dyspnoea?', 'positive_XraY?'] for n in witness: self.assertTrue(n in learner.names()) for n in learner.names(): self.assertTrue(n in witness) learner.useGreedyHillClimbing() bn=learner.learnBN() ref=gum.loadBN(self.agrumSrcDir('src/testunits/ressources/asia2.bif')) f=gum.BruteForceKL(bn,ref) res=f.compute() self.assertDelta(res['klPQ'],0,1)
def documentor(BNfilename): bn = gum.loadBN(BNfilename) # from "/x/y/sample.1.bif" to "Sample1" radical = remove_chars_re( os.path.basename(BNfilename).rsplit(".", 1)[0], '+.-#*').capitalize() maxPerLine = 6 n = 0 namesList = "#(@TODO) " for node in bn.ids(): namesList += bn.variable(node).name() if n == maxPerLine: n = 0 namesList += "\n#(@TODO) " else: n += 1 namesList += "\t" namesLanguage = "#(@TODO) " + "\t".join(languages) signature('# Yaml config file automatically generated by ') print( Conf.template_yaml.format(BNfilename, namesList, namesLanguage, radical))
def computeScores(bn_name, csv_name, visible=False): """ Compute scores (likelihood, aic, bic, mdl, etc.) from a bn w.r.t to a csv Parameters ---------- bn_name : pyAgrum.BayesNet | str a gum.BayesianNetwork or a filename for a BN csv_name : str a filename for the CSV database visible: bool do we show the progress Returns ------- Tuple[float,Dict[str,float]] percentDatabaseUsed,scores """ if isinstance(bn_name, str): bn = gum.loadBN(bn_name) else: bn = bn_name nbr_lines = lines_count(csv_name) - 1 with open(csv_name, "r") as csvfile: dialect = csv.Sniffer().sniff(csvfile.read(1024)) nbr_insignificant = 0 num_ligne = 1 likelihood = 0.0 with open(csv_name, 'r') as csvfile: batchReader = csv.reader(csvfile, dialect) titre = next(batchReader) fields = {} for i, nom in enumerate(titre): fields[nom] = i positions = checkCompatibility(bn, fields, csv_name) inst = bn.completeInstantiation() if visible: # tqdm is optional # pylint: disable=import-outside-toplevel from tqdm import tqdm pbar = tqdm(total=nbr_lines, desc=csv_name, bar_format='{desc}: {percentage:3.0f}%|{bar}|') for data in batchReader: num_ligne += 1 for i in range(inst.nbrDim()): try: inst.chgVal(i, _getIdLabel(inst, i, data[positions[i]])) except gum.OutOfBounds: print( f"Out of bounds for ({i},{positions[i]}) : unknown id or label '{data[positions[i]]}' for the variable {inst.variable(i)}" ) p = bn.jointProbability(inst) if p == 0.0: print(str(num_ligne) + ":" + str(inst)) nbr_insignificant += 1 else: likelihood += math.log(p, 2) if visible: pbar.update() if visible: pbar.close() nbr_arcs = 1.0 * bn.sizeArcs() dim = 1.0 * bn.dim() aic = likelihood - dim aicc = 2 * aic - 2 * dim * (dim + 1) / (nbr_lines - dim + 1) if ( nbr_lines - dim + 1 > 0) else "undefined" bic = likelihood - dim * math.log(nbr_lines, 2) mdl = likelihood - nbr_arcs * math.log( nbr_lines, 2) - 32 * dim # 32=nbr bits for a params return ((nbr_lines - nbr_insignificant) * 100.0 / nbr_lines, { 'likelihood': likelihood, 'aic': aic, 'aicc': aicc, 'bic': bic, 'mdl': mdl })
# -- 5. Exploitation d'indépendances conditionnelles -- def find_indep(P, epsilon): n = nb_vars(P) ind_to_remove = np.array([index for index in np.arange(n) if is_indep(P, index, epsilon)]) return np.array([n, project(P, ind_to_remove), np.delete(np.arange(n), ind_to_remove)]) # -- 6. Expression compacte d'une probabilité jointe -- def find_all_indep(P, epsilon): n = nb_vars(P) m = 0 for i in np.arange(n): C = project(P, np.delete(np.arange(n), find_indep(P, epsilon))) m += C.size print("nombre de variables de la distribution P(X0,…,Xi) : %s" % n) print("nombre de variables de la probabilité conditionnelle compacte calculée : %s" % nb_vars(C)) print("consommation mémoire totale de P(X0,…,Xn) : %s" % P.size) print("consommation mémoire totale des probabilités conditionnelles compactes : %s" % m) # -- 7. Applications pratiques -- # chargement du fichier bif ou dsl filename = "nom du fichier" bn = gum.loadBN(filename) # affichage de la taille des probabilités jointes compacte et non compacte print(bn)
print("transform : bn written") bn = gum.loadBN("data/test_level_0.bif") print("transform : bn loaded") for i in bn.ids(): bn.cpt(i).translate(1e-2).normalizeAsCPT() print("transform : bn normalized") gum.saveBN(bn, "data/test_level_0_1.bif") print("transform : bn written") if __name__ == '__main__': # transform() bn = gum.loadBN("data/test_level_0_1.bif") print("BN loaded") scenario = { 'E5.ValueEE': 6, 'E4.ValueEE': 6, 'Ac3.Duration': 3, 'A1.Productivity': 6 } # ,'E3.ValueEE':6}#'E1.Agg': 0, 'E3.Agg': 0, 'E4.Agg': 1, 'E5.Agg': 2}#,'E6.Agg':0} # scenario={'E5.ValueEE':6} print('ok 1') m = Weighted(bn, scenario, verbose=True) # m = MonteCarlo(bn, scenario,verbose=True) print('ok') m.run(1e-2, 50)
""" description of a BN """ import pyAgrum as gum def showBN(bn): print('---------------------------------') print(bn.property("name")) print('---------------------------------') print(bn) print('---------------------------------') l=[len(bn.variable(i)) for i in bn.ids()] print('variables domainSize : min={0} max={1}'.format(min(l),max(l))) print('parents : max={0}'.format(max([len(bn.parents(i)) for i in bn.ids()]))) print('---------------------------------') for i in bn.ids(): print('{0} : {1}'.format(i,str(bn.variable(i)))) print('---------------------------------') for (i,j) in bn.arcs(): print('{0}->{1}'.format(bn.variable(i).name(),bn.variable(j).name())) print('---------------------------------') #load the file alarm.dsl bn=gum.loadBN("../resources/alarm.dsl") showBN(bn)
def bayesNet(evs): # Creating BayesNet with 4 variables bayesNets = gum.BayesNet('Quality Prediction') # Adding nodes the long way commitNumber = bayesNets.add( gum.LabelizedVariable( 'Commit Number', 'cloudy ?', 0).addLabel("Low").addLabel("Medium").addLabel("High")) numberOfDevloper = bayesNets.add( gum.LabelizedVariable( "Number Of Developer", "Devs", 0).addLabel("Low").addLabel("Medium").addLabel("High")) buildFailures = bayesNets.add( gum.LabelizedVariable( 'Build Failures', 'cloudy ?', 0).addLabel("Low").addLabel("Medium").addLabel("High")) numberOfFixedBug = bayesNets.add( gum.LabelizedVariable('Number Of Fixed Bug', 'cloudy ?', 2)) nbFunctionalEvolution = bayesNets.add( gum.LabelizedVariable( "Number Of Functional Evolution", "Evol", 0).addLabel("Low").addLabel("Medium").addLabel("High")) categoryOfIncident = bayesNets.add( gum.LabelizedVariable("Incident Category", "Devs", 0).addLabel( "Tertiaire").addLabel("Secondaire").addLabel("Prmaire")) # creation of the links between nodes for link in [(commitNumber, buildFailures), (numberOfDevloper, buildFailures), (nbFunctionalEvolution, numberOfFixedBug), (buildFailures, numberOfFixedBug), (buildFailures, categoryOfIncident), (numberOfFixedBug, categoryOfIncident)]: bayesNets.addArc(*link) print(bayesNets) bayesNets.cpt(commitNumber)[:] = [0.5, 0.3, 0.2] bayesNets.cpt(numberOfDevloper)[:] = [0.5, 0.4, 0.5] bayesNets.cpt(nbFunctionalEvolution)[:] = [0.5, 0.4, 0.5] print(bayesNets.cpt(numberOfFixedBug).var_names) bayesNets.cpt(buildFailures).var_names bayesNets.cpt(numberOfDevloper) #bayesNets.cpt(numberOfFixedBug)[{'buildFailures': 1, 'Number Of Functional Evolution': "Medium"}]=[0.5,0.4] #bayesNets.cpt(numberOfFixedBug)[{'buildFailures': 1, 'Number Of Functional Evolution': "High"}]=[0.54,0.46] #bayesNets.cpt(numberOfFixedBug)[{'buildFailures': 1, 'Number Of Functional Evolution': "Low"}]=[0.54,0.46] bayesNets.cpt(numberOfFixedBug)[{ 'Build Failures': 0, 'Number Of Functional Evolution': "Medium" }] = [0.5, 0.4] bayesNets.cpt(numberOfFixedBug)[{ 'Build Failures': 0, 'Number Of Functional Evolution': "High" }] = [0.54, 0.46] bayesNets.cpt(numberOfFixedBug)[{ 'Build Failures': 0, 'Number Of Functional Evolution': "Low" }] = [0.53, 0.47] bayesNets.cpt(numberOfFixedBug) bayesNets.cpt(buildFailures)[{ 'Commit Number': "Low", 'Number Of Developer': "Low" }] = 0.2 bayesNets.cpt(buildFailures)[{ 'Commit Number': "Low", 'Number Of Developer': "High" }] = 0.4 bayesNets.cpt(buildFailures)[{ 'Commit Number': "High", 'Number Of Developer': "Low" }] = 0.2 bayesNets.cpt(buildFailures)[{ 'Commit Number': "High", 'Number Of Developer': "High" }] = 0.9 bayesNets.cpt(buildFailures)[{ 'Commit Number': "Low", 'Number Of Developer': "Medium" }] = 1 bayesNets.cpt(buildFailures)[{ 'Commit Number': "Low", 'Number Of Developer': "Medium" }] = 0.9 bayesNets.cpt(buildFailures)[{ 'Commit Number': "High", 'Number Of Developer': "Medium" }] = 0.1 bayesNets.cpt(buildFailures)[{ 'Commit Number': "Medium", 'Number Of Developer': "Medium" }] = 0.6 bayesNets.cpt(buildFailures) bayesNets.cpt(categoryOfIncident)[{ 'Build Failures': 0, 'Number Of Fixed Bug': 0 }] = [0.2, 0.3, 0.5] bayesNets.cpt(categoryOfIncident)[{ 'Build Failures': 0, 'Number Of Fixed Bug': 1 }] = [0.5, 0.3, 0.2] bayesNets.cpt(categoryOfIncident) ie = gum.LazyPropagation(bayesNets) gum.saveBN(bayesNets, "QualtiyPrediction.bifxml") bn = gum.loadBN("QualtiyPrediction.bifxml") bn output_parameters_labels = ['Incident Cateogry'] ie = gum.LazyPropagation(bayesNet) ie.setEvidence(evs) ie.makeInference() resultCSV = [] resultCSV.append('Parameter, Low, Medium, High') for output_parameter_label in output_parameters_labels: results = ie.posterior(output_parameter_label).tolist() resultCSV.append(output_parameter_label + ', ' + str(round(results[0], 3)) + ', ' + str(round(results[1], 3)) + ', ' + str(round(results[2]))) #gnb.showInference(bn,evs={}) resultBytes = BNinference2dot(bn, evs=evs).create(format='png') resultBytesStr = base64.b64encode(resultBytes) return resultBytesStr, resultCSV
#sequence d'instruction codee sequenceOfInstructions=[['CPO', 'TRXTBH', [2, 3], 1.0], ['CPO', 'FCPPAV', [2, 6, 7], 1.0], ['CPO', 'QUIBFX', [0, 1], 1.0], ['CPO', 'HWIALF', [1, 2, 4], 1.0], ['CPO', 'JDMAEU', [2, 5, 6], 1.0], ['CPO', 'MVTHNR', [2, 4, 5], 1.0], ['MUC', 'QUIBFX', '0', [0, 1]], ['MUC', 'QUIBFX', '1', [0, 1]], ['MUC', 'HWIALF', '2', [1, 2, 4]], ['MUC', 'TRXTBH', '3', [2, 3]], ['MUC', 'MVTHNR', '4', [2, 4, 5]], ['MUC', 'JDMAEU', '5', [2, 5, 6]], ['MUC', 'JDMAEU', '6', [2, 5, 6]], ['MUC', 'FCPPAV', '7', [2, 6, 7]], ['ASE', 'smoking?', 'EV_5', '0', "evs.get([5, 'smoking?'][1])"], ['ASE', 'positive_XraY?', 'EV_3', '0', "evs.get([3, 'positive_XraY?'][1])"], ['ASE', 'visit_to_Asia?', 'EV_0', '0', "evs.get([0, 'visit_to_Asia?'][1])"], ['ASE', 'dyspnoea?', 'EV_7', '0', "evs.get([7, 'dyspnoea?'][1])"], ['MUL', 'QUIBFX', 'EV_0', [0, 1], ['0']], ['MUL', 'TRXTBH', 'EV_3', [2, 3], ['3']], ['MUL', 'JDMAEU', 'EV_5', [2, 5, 6], ['5']], ['MUL', 'FCPPAV', 'EV_7', [2, 6, 7], ['7']], ['CPO', 'XAKXFL', [2, 5, 6], 1.0], ['MUL', 'XAKXFL', 'JDMAEU', [2, 5, 6], [2, 5, 6]], ['CPO', 'IFVUXZ', [2, 4, 5], 1.0], ['MUL', 'IFVUXZ', 'MVTHNR', [2, 4, 5], [2, 4, 5]], ['CPO', 'JZGJUB', [2, 4, 5], 1.0], ['MUL', 'JZGJUB', 'MVTHNR', [2, 4, 5], [2, 4, 5]], ['CPO', 'IPEKWS', [1, 2, 4], 1.0], ['MUL', 'IPEKWS', 'HWIALF', [1, 2, 4], [1, 2, 4]], ['CPO', 'ZLCECG', [2, 6], 0.0], ['MAR', 'ZLCECG', 'FCPPAV', [2, 6], [2, 6, 7]], ['MUL', 'JDMAEU', 'ZLCECG', [2, 5, 6], [2, 6]], ['CPO', 'KNMSKT', [1], 0.0], ['MAR', 'KNMSKT', 'QUIBFX', [1], [0, 1]], ['MUL', 'HWIALF', 'KNMSKT', [1, 2, 4], [1]], ['CPO', 'KIKCCO', [2, 4], 0.0], ['MAR', 'KIKCCO', 'HWIALF', [2, 4], [1, 2, 4]], ['MUL', 'MVTHNR', 'KIKCCO', [2, 4, 5], [2, 4]], ['CPO', 'KVWKZF', [2, 5], 0.0], ['MAR', 'KVWKZF', 'MVTHNR', [2, 5], [2, 4, 5]], ['MUL', 'JDMAEU', 'KVWKZF', [2, 5, 6], [2, 5]], ['CPO', 'ZXLINC', [2], 0.0], ['MAR', 'ZXLINC', 'TRXTBH', [2], [2, 3]], ['MUL', 'JDMAEU', 'ZXLINC', [2, 5, 6], [2]], ['MUL', 'XAKXFL', 'ZLCECG', [2, 5, 6], [2, 6]], ['MUL', 'XAKXFL', 'ZXLINC', [2, 5, 6], [2]], ['CPO', 'NBUOFU', [2, 5], 0.0], ['MAR', 'NBUOFU', 'XAKXFL', [2, 5], [2, 5, 6]], ['MUL', 'IFVUXZ', 'NBUOFU', [2, 4, 5], [2, 5]], ['CPO', 'ZMUCOI', [2, 4], 0.0], ['MAR', 'ZMUCOI', 'IFVUXZ', [2, 4], [2, 4, 5]], ['MUL', 'JZGJUB', 'KIKCCO', [2, 4, 5], [2, 4]], ['MUL', 'JZGJUB', 'NBUOFU', [2, 4, 5], [2, 5]], ['MUL', 'IPEKWS', 'ZMUCOI', [1, 2, 4], [2, 4]], ['MUL', 'IPEKWS', 'KNMSKT', [1, 2, 4], [1]], ['CPO', 'P_6', ['6'], 0.0], ['MAR', 'P_6', 'JDMAEU', [6], [2, 5, 6]], ['NOR', 'P_6', 'bronchitis?'], ['CPO', 'P_4', ['4'], 0.0], ['MAR', 'P_4', 'JZGJUB', [4], [2, 4, 5]], ['NOR', 'P_4', 'lung_cancer?'], ['CPO', 'P_1', ['1'], 0.0], ['MAR', 'P_1', 'IPEKWS', [1], [1, 2, 4]], ['NOR', 'P_1', 'tuberculosis?']] #### GENERATION DE PHP #### # from phpGenerator import PhpGenerator # generator = PhpGenerator() # filename="samplecode.php" #### GENERATION DE CSHAPR #### from csharpGenerator import CSharpGenerator generator = CSharpGenerator() filename="samplecode.cs" import pyAgrum as gum generator.setBN(gum.loadBN("asia.bif")) generator.setCommentMode(True) generator.genere(['lung_cancer?', 'bronchitis?', 'tuberculosis?'], {'dyspnoea?', 'smoking?', 'positive_XraY?', 'visit_to_Asia?'}, {'visit_to_Asia?': [0, 1], 'dyspnoea?': [1, 0.5], 'smoking?': [1, 1]}, sequenceOfInstructions, filename, "getProbaForAsia", "# generation of CSharp");
#without fee or royalty is hereby granted, provided #that the above copyright notice appear in all copies #and that both that copyright notice and this permission #notice appear in supporting documentation or portions #thereof, including modifications, that you make. #THE AUTHOR P.H. WUILLEMIN DISCLAIMS ALL WARRANTIES #WITH REGARD TO THIS SOFTWARE, INCLUDING ALL IMPLIED #WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT #SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, INDIRECT #OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER #RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER #IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS #ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE #OR PERFORMANCE OF THIS SOFTWARE! import pyAgrum as gum from gumLib.pyAgrum_header import pyAgrum_header pyAgrum_header(2011) bn=gum.loadBN("../resources/bn.bif") seq=bn.topologicalOrder() print("order : "+str(seq)) print print("enumeration :") for nod in bn.topologicalOrder(): print " %d (%s)"%(nod,bn.variable(nod).name())
def load_bn(self): bn = gum.loadBN(self.path + self.name + '.bifxml') self.generator = gum.BNDatabaseGenerator(bn) self.ie = gum.LazyPropagation(bn) self.bn = bn self.structure = [self.bn.nodes(), self.bn.arcs()]
#THE AUTHOR P.H. WUILLEMIN DISCLAIMS ALL WARRANTIES #WITH REGARD TO THIS SOFTWARE, INCLUDING ALL IMPLIED #WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT #SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, INDIRECT #OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER #RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER #IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS #ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE #OR PERFORMANCE OF THIS SOFTWARE! import pyAgrum as gum from gumLib.pyAgrum_header import pyAgrum_header pyAgrum_header(2011) bn=gum.loadBN("bn.bif") print bn.variable(0).name() # a print bn.cpt(0)[{'e':0,'f':1}] # [ 0.0250064 0.974994 ] bn.cpt(0)[{'e':0,'f':1}]=[1,0] print bn.cpt(0)[{'e':0,'f':1}] # [ 1. 0. ] bn.cpt(0)[{'a':0, 'e':0,'f':1}]=12 print bn.cpt(0)[{'e':0,'f':1}] # [ 12. 0. ]
''' Experiment configuration ''' sim_param.save_trajs = False obj_name = 'cube' # learn_algo = 'hillclimbing' learn_algo = 'hard-coded' # bn_folder = '/home/maestre/git/a2l_exp_baxter_core/src/generated_files/dataset_size/2017-03-25_20:05:50_16_3' # iter_nb = 3 # bn_full_path = bn_folder + '/BN_random_' + str(iter_nb) + '_' + learn_algo + '.bif' bn_full_path = '/home/maestre/Desktop/BN_video1_predefined_hc.bif' ''' Load the BN ''' bn_loaded = agrum.loadBN(bn_full_path) ''' Create discretizations ''' current_orien_discr = discr.compute_orientation_discr() current_inclin_discr = discr.compute_inclination_discr() current_dist_discr = discr.compute_distance_discr() # ''' Load eef interface to close gripper''' # rospy.init_node('left_gripper_node', anonymous=True) # left_gripper_interface = baxter_interface.Gripper('left') # rospy.init_node('right_gripper_node', anonymous=True) # right_gripper_interface = baxter_interface.Gripper('right') # rospy.sleep(1) if not sim_param.real_robot:
def computeScores(bn_name,csv_name,visible=False,transforme_label=None): if isinstance(bn_name,str): bn=gum.loadBN(bn_name) else: bn=bn_name nbr_lines=lines_count(csv_name)-1 csvfile = open(csv_name, "rb") dialect = csv.Sniffer().sniff(csvfile.read(1024)) csvfile.seek(0) batchReader = csv.reader(open(csv_name,'rb'),dialect) titre = batchReader.next() fields = {} for i,nom in enumerate(titre): fields[nom]=i positions=checkCompatibility(bn,fields,csv_name) if positions is None: sys.exit(1) inst=gum.Instantiation() bn.completeInstantiation(inst) if visible: prog = ProgressBar(csv_name+' : ',0, nbr_lines, 77, mode='static', char='#') prog.display() nbr_insignificant=0 num_ligne=0 likelihood=0.0 for data in batchReader: num_ligne+=1 for i in range(inst.nbrDim()): try: inst.chgVal(i,getNumLabel(inst,i,data[positions[i]],transforme_label)) except gum.OutOfBounds: print("out of bounds",i,positions[i],data[positions[i]],inst.variable(i)) p=bn.jointProbability(inst) if p==0.0: print(str(num_ligne)+":"+str(inst)) nbr_insignificant+=1 else: likelihood+=math.log(p,2) if visible: prog.increment_amount() prog.display() if visible: print nbr_arcs=1.0*bn.sizeArcs() dim=1.0*bn.dim() aic=likelihood-dim aicc=2*aic-2*dim*(dim+1)/(nbr_lines-dim+1) if (nbr_lines-dim+1>0) else "undefined" bic=likelihood-dim*math.log(nbr_lines,2) mdl=likelihood-nbr_arcs*math.log(nbr_lines,2)-32*dim #32=nbr bits for a params return ((nbr_lines-nbr_insignificant)*100.0/nbr_lines, {'likelihood':likelihood,'aic':aic,'aicc':aicc,'bic':bic,'mdl':mdl})
#THE AUTHOR P.H. WUILLEMIN (@LIP6) DISCLAIMS ALL WARRANTIES #WITH REGARD TO THIS SOFTWARE, INCLUDING ALL IMPLIED #WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT #SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, INDIRECT #OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER #RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER #IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS #ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE #OR PERFORMANCE OF THIS SOFTWARE! import pyAgrum as gum from gumLib.pyAgrum_header import pyAgrum_header pyAgrum_header(2011) bn = gum.loadBN("bn.bif") print bn.variable(0).name() # a print bn.cpt(0)[{'e': 0, 'f': 1}] # [ 0.0250064 0.974994 ] bn.cpt(0)[{'e': 0, 'f': 1}] = [1, 0] print bn.cpt(0)[{'e': 0, 'f': 1}] # [ 1. 0. ] bn.cpt(0)[{'a': 0, 'e': 0, 'f': 1}] = 12 print bn.cpt(0)[{'e': 0, 'f': 1}] # [ 12. 0. ]