def testInferenceWithLocalsCPT(self): bn = self.fill() bn2 = self.fill2(bn) frag = gum.BayesNetFragment(bn) for i in bn.nodes(): frag.installNode(i) self.assertTrue(frag.checkConsistency()) self.assertEqual(frag.size(), 6) self.assertEqual(frag.sizeArcs(), 7) newV5 = gum.Potential().add(frag.variable("v5")).add( frag.variable("v2")).add(frag.variable("v3")) newV5.fillWith(bn2.cpt("v5")) frag.installCPT("v5", newV5) self.assertTrue(frag.checkConsistency()) self.assertEqual(frag.size(), 6) self.assertEqual(frag.sizeArcs(), 6) ie2 = gum.LazyPropagation(bn2) ie2.makeInference() ie = gum.LazyPropagation(frag) ie.makeInference() for n in frag.names(): for x1, x2 in zip( ie2.posterior(n).tolist(), ie.posterior(n).tolist()): self.assertAlmostEqual(x1, x2, delta=1e-5, msg="For variable '{}'".format(n))
def test1(bn1): print("=====") print("TEST1") print("=====") evs = {"SAO2": 2, "CATECHOL": 1} print("EVIDENCES {}".format(evs)) bn2, evs2 = conditionalModel(bn1, evs) print(" - Mutilation done") ie1 = gum.LazyPropagation(bn1) ie1.setEvidence(evs) ie1.makeInference() ie2 = gum.LazyPropagation(bn2) ie2.setEvidence(evs2) ie2.makeInference() print(" - Inference done") nb_errors = 0 for n in bn1.names(): if not isAlmostEqualPot(ie1.posterior(bn1.idFromName(n)), ie2.posterior(bn2.idFromName(n))): nb_errors += 1 print("Error on {} : {} != {}".format( n, ie1.posterior(bn1.idFromName(n))[:], ie2.posterior(bn2.idFromName(n))[:])) else: pass if nb_errors > 0: print("Errors : {}".format(nb_errors)) else: print("No error : inference results are identical.")
def main(): bn1 = gum.loadBN("alarm.bif") print(" - BN read") evs = {"SAO2": 2, "CATECHOL": 1} bn2 = mutilate(gum.BayesNet(bn1), evs) print(" - Mutilation done") ie1 = gum.LazyPropagation(bn1) ie1.setEvidence(evs) ie1.makeInference() ie2 = gum.LazyPropagation(bn2) ie2.setEvidence(evs) ie2.makeInference() print(" - Inference done") nb_errors = 0 for n in bn1.names(): if not isAlmostEqualPot(ie1.posterior(bn1.idFromName(n)), ie2.posterior(bn2.idFromName(n))): nb_errors += 1 print("Error on {} : {} != {}".format( n, ie1.posterior(bn1.idFromName(n))[:], ie2.posterior(bn2.idFromName(n))[:])) else: pass if nb_errors > 0: print("Errors : {}".format(nb_errors)) else: print("No error : inference results are identical.")
def testOpenBayesSiteExamples(self): protoie = gum.LazyPropagation(self.bn) protoie.makeInference() proto = protoie.posterior(self.w) ie = gum.LoopyWeightedSampling(self.bn) ie.setVerbosity(True) ie.setEpsilon(0.02) ie.setMinEpsilonRate(0.001) msg = self.iterTest(proto, ie, self.w, {}) if msg is not None: self.fail(msg) protoie = gum.LazyPropagation(self.bn) protoie.makeInference() ie.setEvidence({'s': 0, 'c': 0}) proto = protoie.posterior(self.w) ie = gum.LoopyGibbsSampling(self.bn) ie.setVerbosity(False) ie.setEpsilon(0.02) ie.setMinEpsilonRate(0.001) msg = self.iterTest(proto, ie, self.w, {'s': 0, 'c': 0}) if msg is not None: self.fail(msg)
def test2(bn1): print("=====") print("TEST2") print("=====") evs = {"HYPOVOLEMIA": 0, "CATECHOL": 1, "INTUBATION": 0} bn2, evs2 = conditionalModel(bn1, evs) # HYPOVOLEMIA has no parent if "HYPOVOLEMIA" in evs2: print("- HYPOVOLEMIA should not be in evs2") try: i = bn2.idFromName("HYPOVOLEMIA") print("- HYPOVOLEMIA should not be in bn2") except: pass # INTUBATION has no parent if "INTUBATION" in evs2: print("- INTUBATION should not be in evs2") try: i = bn2.idFromName("INTUBATION") print("- INTUBATION should not be in bn2") except: pass ie1 = gum.LazyPropagation(bn1) ie1.setEvidence(evs) ie1.makeInference() ie2 = gum.LazyPropagation(bn2) ie2.setEvidence(evs2) ie2.makeInference() print(" - Inference done") nb_errors = 0 for n in bn2.names(): if not isAlmostEqualPot(ie1.posterior(bn1.idFromName(n)), ie2.posterior(bn2.idFromName(n))): nb_errors += 1 print("Error on {} : {} != {}".format( n, ie1.posterior(bn1.idFromName(n))[:], ie2.posterior(bn2.idFromName(n))[:])) else: pass if nb_errors > 0: print("Errors : {}".format(nb_errors)) else: print("No error : inference results are identical.")
def main(): bn = gum.loadBN("data/alarm.bif") print("BN read") evs = {"HR": 1, "PAP": 2} m = Gibbs(bn, evs, verbose=True) m.run(5e-2, 20) print("done") ie = gum.LazyPropagation(bn) ie.setEvidence(evs) ie.makeInference() for i in bn.ids(): v, c = m.results(i) if v is not None: print("{} : {:3.5f}\n exact : {}\n approx : {} ({:7.5f})". format( bn.variable(i).name(), utils.KL(ie.posterior(i), v), utils.compactPot(ie.posterior(i)), utils.compactPot(v), c)) else: print("{}: {}".format( bn.variable(i).name(), evs[bn.variable(i).name()]))
def compareApprox(m, bn, evs): """ Compare results in m with a LazyPropagation on bn with evidence evs :param m: the approximated algorithm :param bn: the bayesian network :param evs: the dict of evidence :return: void """ ie = gum.LazyPropagation(bn) ie.setEvidence(evs) ie.makeInference() res = [] for i in bn.ids(): v, c = m.results(i) if v is not None: res.append((bn.variable(i).name(), KL(ie.posterior(i), v), compactPot(ie.posterior(i)), compactPot(v), c)) else: print("{}: {}".format( bn.variable(i).name(), evs[bn.variable(i).name()])) for r in sorted(res, key=lambda item: item[1], reverse=True): print( "{} : {:3.5f}\n exact : {}\n approx : {} ({:7.5f})" .format(*r))
def run_bn_unsup(train_corr, test_corr, structure): """" This method first learns a BN based on train_corr, then it propagates evidence from test_corr through it, after which a new data set is created based on the new posteriors :param train_corr: training-data, not in one-hot encoding form! :param test_corr: test-data that is being updated, in one-hot encoding form :param structure: structure of the data (how many categories each attribute has) """ structure_0 = [0] + structure # Learn the BN based on train_corr learner = gum.BNLearner(train_corr) learner.useScoreBDeu() bn = learner.learnBN() # Create a placeholder for the net_data new_data = np.zeros(test_corr.shape) for i in range(test_corr.shape[0]): dp = test_corr[i, :] # fix an observation evs = {} k = 0 for n in bn.nodes( ): # Convert the evidence to a dictionary structure needed for propagation evs[n] = dp[sum(structure_0[:k + 1]):sum(structure_0[:k + 2])] k += 1 ie = gum.LazyPropagation(bn) ie.setEvidence(evs) # set the evidence pst = [ie.posterior(n).toarray() for n in bn.nodes() ] # Extract the posteriors and store them in new_data new_data[i, :] = list(itertools.chain.from_iterable(pst)) ie.eraseAllEvidence() return new_data
def compil(bn, targets, evs): """This function uses all the predefined functions above to fill the compiler array with instructions to get the targets of a bn according to evidences""" ie = gum.LazyPropagation(bn) jt = ie.junctionTree() #hardToSoftEvidences(bn, evs) absorp = [] #the list for the absorption diffu = [] #the list for the diffusion cliquesTar = { } #the dictionnary which contains the couples {target:clique} r = mainClique(bn, jt, targets) n = r targetmp1 = list(targets) #for parcours (this list is changed) deleteTarMainCliq(bn, jt, targetmp1, r) targetmp2 = list(targetmp1) #for inference (this list is not changed) parcours(bn, jt, targetmp1, n, r, absorp, diffu, cliquesTar) #Creation and initialization of potentials creationPotentialsAbsorp(bn, jt, absorp) initPotentialsAbsorp(bn, jt, absorp) evsPotentials(bn, jt, evs, absorp) creaIniPotentialsDiffu(bn, jt, diffu, cliquesTar, targets, absorp) #Absorption and diffusion inference(bn, jt, absorp, diffu, targets, targetmp2, cliquesTar) #Computing targets output(bn, jt, targets, absorp, diffu, cliquesTar) return compilator.getTab()
def testWithDifferentVariables(self): protoie = gum.LazyPropagation(self.bn) protoie.addEvidence('r', 1) protoie.addEvidence('w', 0) protoie.makeInference() proto = protoie.posterior(self.s) ie = gum.LoopyWeightedSampling(self.bn) ie.setVerbosity(False) ie.setEpsilon(0.1) ie.setMinEpsilonRate(0.01) msg = self.iterTest(proto, ie, self.s, {'r': [0, 1], 'w': (1, 0)}) if msg is not None: self.fail(msg) ie2 = gum.LoopyGibbsSampling(self.bn) ie2.setVerbosity(False) ie2.setEpsilon(0.1) ie2.setMinEpsilonRate(0.01) ie2.setEvidence({'r': 1, 'w': 0}) ie2.makeInference() msg = self.iterTest(proto, ie2, self.s, {'r': 1, 'w': 0}) if msg is not None: self.fail(msg) ie3 = gum.LoopyMonteCarloSampling(self.bn) ie3.setVerbosity(False) ie3.setEpsilon(0.1) ie3.setMinEpsilonRate(0.01) msg = self.iterTest(proto, ie3, self.s, {'r': [0, 1], 'w': (1, 0)}) if msg is not None: self.fail(msg)
def getInformationGraph(bn, evs=None, size=None, cmap=_INFOcmap, withMinMax=False): """ Create a dot representation of the information graph for this BN Parameters ---------- bn: gum.BayesNet the BN evs : Dict[str,str|int|List[float]] map of evidence size: str|int size of the graph cmap: matplotlib.colors.Colormap color map withMinMax: bool min and max in the return values ? Returns ------- dot.Dot | Tuple[dot.Dot,float,float] graph as a dot representation and if asked, min_information_value, max_information_value """ if size is None: size = gum.config["notebook", "default_graph_size"] if evs is None: evs = {} ie = gum.LazyPropagation(bn) ie.setEvidence(evs) ie.makeInference() idEvs = {bn.idFromName(name) for name in evs} nodevals = { bn.variable(n).name(): ie.H(n) for n in bn.nodes() if not n in idEvs } arcvals = {(x, y): ie.I(x, y) for x, y in bn.arcs()} gr = BN2dot(bn, size, nodeColor=_normalizeVals(nodevals, hilightExtrema=False), arcWidth=arcvals, cmapNode=cmap, cmapArc=cmap, showMsg=nodevals) mi = min(nodevals.values()) ma = max(nodevals.values()) if withMinMax: return gr, mi, ma else: return gr
def testJointMutualInformation(self): bn = gum.fastBN("A->B->C->D;A->E->D;F->B;C->H;") ie = gum.LazyPropagation(bn) ie.makeInference() with self.assertRaises(gum.InvalidArgument): ie.jointMutualInformation([0]) self.assertAlmostEqual(ie.I(0, 1), ie.jointMutualInformation([0, 1])) self.assertAlmostEqual(ie.I("A", "B"), ie.jointMutualInformation(["A", "B"])) ie = gum.LazyPropagation(bn) ie.addJointTarget({1, 4, 3}) ie.addAllTargets() ie.makeInference() byHandJMI = 0 byHandJMI -= ie.jointPosterior({1, 3, 4}).entropy() byHandJMI += ie.jointPosterior({1, 4}).entropy() + ie.jointPosterior({1, 3}).entropy() + ie.jointPosterior( {4, 3}).entropy() byHandJMI -= ie.posterior(1).entropy() + ie.posterior(4).entropy() + ie.posterior(3).entropy() ie2 = gum.LazyPropagation(bn) JMI = ie2.jointMutualInformation({1, 3, 4}) self.assertAlmostEqual(JMI, byHandJMI) ie = gum.LazyPropagation(bn) ie.addJointTarget({0, 1, 2, 3}) ie.addAllTargets() ie.makeInference() byHandJMI = 0 byHandJMI -= ie.jointPosterior({0, 1, 2, 3}).entropy() byHandJMI += ie.jointPosterior({0, 1, 2}).entropy() + ie.jointPosterior({0, 1, 3}).entropy() + ie.jointPosterior( {0, 2, 3}).entropy() + ie.jointPosterior({1, 2, 3}).entropy() byHandJMI -= ie.jointPosterior({0, 1}).entropy() + ie.jointPosterior({0, 2}).entropy() + ie.jointPosterior( {0, 3}).entropy() + ie.jointPosterior({1, 2}).entropy() + ie.jointPosterior( {1, 3}).entropy() + ie.jointPosterior({2, 3}).entropy() byHandJMI += ie.posterior(0).entropy() + ie.posterior(1).entropy() + ie.posterior(2).entropy() + ie.posterior( 3).entropy() ie2 = gum.LazyPropagation(bn) JMI = ie2.jointMutualInformation({0, 1, 2, 3}) self.assertAlmostEqual(JMI, byHandJMI)
def plotFollowUnrolled(lovars, dbn, T, evs, vars_title=None): """ plot the dynamic evolution of a list of vars with a dBN :param lovars: list of variables to follow :param dbn: the unrolled dbn :param T: the time range :param evs: observations :param vars_title: string for default or a dictionary with the variable name as key and the respective title as value. """ ie = gum.LazyPropagation(dbn) ie.setEvidence(evs) ie.makeInference() x = np.arange(T) for var in lovars: v0 = dbn.variableFromName(var + "0") lpots = [] for i in range(v0.domainSize()): serie = [] for t in range(T): serie.append(ie.posterior(dbn.idFromName(var + str(t)))[i]) lpots.append(serie) _, ax = plt.subplots() plt.xlim(left=0, right=T - 1) plt.ylim(top=1, bottom=0) ax.xaxis.grid() # Setting a customized title if vars_title is None: plt.title(f"Following variable {var}", fontsize=20) elif len(vars_title) != 0: plt.title(vars_title[var], fontsize=20) else: raise TypeError("Incorrect format of the plots title dictionary") plt.xlabel('time') stack = ax.stackplot(x, lpots) proxy_rects = [ Rectangle((0, 0), 1, 1, fc=pc.get_facecolor()[0]) for pc in stack ] labels = [v0.label(i) for i in range(v0.domainSize())] plt.legend(proxy_rects, labels, loc='center left', bbox_to_anchor=(1, 0.5), ncol=1, fancybox=True, shadow=True) plt.show()
def testEvidenceJointImpact(self): bn = gum.fastBN("A->B->C->D;A->E->D;F->B;C->H;") ie = gum.LazyPropagation(bn) res = ie.evidenceJointImpact(["D", "E"], ["A", "B", "C", "F"]) joint = bn.cpt("A") * bn.cpt("B") * bn.cpt("C") * bn.cpt("D") * bn.cpt("E") * bn.cpt("F") * bn.cpt("H") pADCE = joint.margSumIn(["A", "C", "D", "E"]) pAC = pADCE.margSumOut(["D", "E"]) self.assertEqual(res, pADCE / pAC)
def testMutilateBN2(self): bn = gum.fastBN("P2->N<-P1;A->E2<-N->E1") bn2, ev2 = gum.mutilateBN(bn, intervention={}, observation={ 'A': ["1"], "N": [1, 1] }) ie = gum.LazyPropagation(bn) ie.setEvidence(ev2) ie.makeInference() ie2 = gum.LazyPropagation(bn2) ie2.setEvidence(ev2) ie2.makeInference() for n in bn2.names(): self.assertEquals( ie.posterior(n).tolist(), ie2.posterior(n).tolist())
def predict_learner_knowledge_states_from_learner_traces( self, learner_traces): knowledge_components = self.associated_learner_pool.get_knowledge_components( ) bn = unroll_2tbn(self.bn, len(learner_traces) + 1) # Setup the soft evidences in the BNow evidences = {} for i, trace in enumerate(learner_traces): evaluated_kc = trace.get_kc() success = trace.get_success() exercise = trace.get_exercise() guess, slip = self.associated_learner_pool.get_guess( exercise), self.associated_learner_pool.get_slip(exercise) learn, forget = self.associated_learner_pool.get_learn( evaluated_kc), self.associated_learner_pool.get_forget( evaluated_kc) bn.add( gum.LabelizedVariable(f"exercise({exercise.id}){i}", f"exercise({exercise.id}){i}", 2)) bn.addArc(f"({evaluated_kc.id}){i}", f"exercise({exercise.id}){i}") bn.cpt(f"exercise({exercise.id}){i}")[{ f"({evaluated_kc.id}){i}": 0 }] = [1 - guess, guess] bn.cpt(f"exercise({exercise.id}){i}")[{ f"({evaluated_kc.id}){i}": 1 }] = [slip, 1 - slip] evidences[f"exercise({exercise.id}){i}"] = int(success) bn.cpt(f"(Z[({evaluated_kc.id})0->({evaluated_kc.id})t]){i+1}")[{ f"({evaluated_kc.id}){i}": 0 }] = [1 - learn, learn] bn.cpt(f"(Z[({evaluated_kc.id})0->({evaluated_kc.id})t]){i+1}")[{ f"({evaluated_kc.id}){i}": 1 }] = [forget, 1 - forget] """ evidences[f"({evaluated_kc.id}){i}"] = [guess, 1 - guess] if success else [ 1 - slip, slip] """ # Setup the inference ie = gum.LazyPropagation(bn) ie.setEvidence(evidences) ie.makeInference() knowledge_states = {} for kc in knowledge_components: knowledge_states[f"{kc.id}"] = ie.posterior( bn.idFromName(f"({kc.id}){len(evidences.keys())}"))[1] return knowledge_states
def testRelevantReasonning(self): # an inference for all the bn with an hard evidence and an inference for # the right fragment with a local CPT should be the same bn = self.fill() inf_complete = gum.LazyPropagation(bn) inf_complete.setEvidence({"v3": 1}) inf_complete.makeInference() p = inf_complete.posterior("v6") frag = gum.BayesNetFragment(bn) frag.installAscendants("v6") marg = gum.Potential().add(frag.variable("v3")) marg.fillWith([0, 1]) frag.installMarginal("v3", marg) self.assertEqual(frag.size(), 3) self.assertEqual(frag.sizeArcs(), 1) inf_frag = gum.LazyPropagation(frag) inf_frag.makeInference() for x1, x2 in zip( inf_complete.posterior("v6").tolist(), inf_frag.posterior("v6").tolist()): self.assertAlmostEqual(x1, x2, delta=1e-5)
def main(): bn = gum.loadBN("alarm.bif") ie = gum.LazyPropagation(bn) ie.makeInference() m = ParallelMonteCarlo(bn) m.run(1e-2, 300, verbose=True) print("done") for i in bn.ids(): print("{}: {}".format( bn.variable(i).name(), int(100000 * utils.KL(ie.posterior(i), m.posterior(i))) / 1000))
def main(): bn = gum.loadBN("alarm.bif") ie = gum.LazyPropagation(bn) ie.makeInference() m = MonteCarlo(bn) m.run(1e-2, 300, verbose=True) print("done") for i in bn.ids(): ev, ec, hv, hc = m.everything(i) print("{}: {} ({}) =!= {} ({})".format( bn.variable(i).name(), ev, ec, hv, hc))
def testEvidenceImpactWithNodeId(self): bn = gum.loadBN(self.agrumSrcDir('asia.bif'), [], verbose=False) # verbose=False : don't want to see the warnings ie = gum.LazyPropagation(bn) self.assertEqual(len(ie.BN().arcs()),8) with self.assertRaises(gum.InvalidArgument): res = ie.evidenceImpact(0, [0, 1, 2]) res = ie.evidenceImpact(0, [1, 2]) self.assertEqual(res.nbrDim(), 2) # 2 indep 0 given 1 self.assertEqual(res.extract({"tuberculosis?": 0}), gum.getPosterior(bn, target=0, evs={1: 0})) self.assertEqual(res.extract({"tuberculosis?": 1}), gum.getPosterior(bn, target=0, evs={1: 1}))
def testOr(self): bn = gum.BayesNet() c1, c2 = [bn.add(gum.LabelizedVariable(item, item, 2)) for item in ['C1', 'C2']] a = bn.addOR(gum.LabelizedVariable('a', 'a', 2)) bn.addArc(c1, a) bn.addArc(c2, a) for i in range(2): bn.cpt(c1)[:] = [i, 1 - i] for j in range(2): bn.cpt(c2)[:] = [j, 1 - j] ie = gum.LazyPropagation(bn) ie.makeInference() if i * j == 0: self.assertEqual(ie.posterior(a)[:][0], 0.0) self.assertEqual(ie.posterior(a)[:][1], 1.0) else: self.assertEqual(ie.posterior(a)[:][0], 1.0) self.assertEqual(ie.posterior(a)[:][1], 0.0)
def testEvidenceImpactWithName(self): bn = gum.loadBN(self.agrumSrcDir('asia.bif'), [], verbose=False) # verbose=False : don't want to see the warnings ie = gum.LazyPropagation(bn) with self.assertRaises(gum.InvalidArgument): res = ie.evidenceImpact("visit_to_Asia?", ["visit_to_Asia?", "tuberculosis?", "tuberculos_or_cancer?"]) with self.assertRaises(gum.NotFound): res = ie.evidenceImpact("visit_to_Asia?", ["toto", "tuberculosis?", "tuberculos_or_cancer?"]) res = ie.evidenceImpact("visit_to_Asia?", ["tuberculosis?", "tuberculos_or_cancer?"]) self.assertEqual(res.nbrDim(), 2) # 2 indep 0 given 1 self.assertEqual(res.extract({"tuberculosis?": 0}), gum.getPosterior(bn, target="visit_to_Asia?", evs={"tuberculosis?": 0})) self.assertEqual(res.extract({"tuberculosis?": 1}), gum.getPosterior(bn, target="visit_to_Asia?", evs={"tuberculosis?": 1}))
def build_save_BN(self, dGraphNodes, dGraphEdges, randomCPTs): dGraphEdges = [(str(A), str(B)) for (A, B) in dGraphEdges] bn = gum.BayesNet(self.name) [ bn.add(gum.LabelizedVariable(str(var), str(var), 2)) for var in dGraphNodes ] for edge in dGraphEdges: bn.addArc(edge[0], edge[1]) if randomCPTs: # For generate all CPTs bn.generateCPTs() # To graph and save BN gumGraph.dotize(bn, self.path + self.name, 'pdf') gum.saveBN(bn, self.path + self.name + '.bifxml') self.generator = gum.BNDatabaseGenerator(bn) self.ie = gum.LazyPropagation(bn) self.bn = bn self.structure = [dGraphNodes, dGraphEdges]
def WikipediaExample(self): protoie = gum.LazyPropagation(self.bn2) protoie.makeInference() proto = protoie.posterior('w2') ie = gum.LoopyWeightedSampling(self.bn2) ie.setVerbosity(False) ie.setEpsilon(0.01) ie.setMinEpsilonRate(0.001) msg = self.iterTest(proto, ie, 'w2', {}) if msg is not None: self.fail(msg) ie2 = gum.LoopyMonteCarloSampling(self.bn2) ie2.setVerbosity(False) ie2.setEpsilon(0.01) ie2.setMinEpsilonRate(0.001) msg = self.iterTest(proto, ie2, 'w2', {}) if msg is not None: self.fail(msg)
def __init__(self, bn, truth_values): self.truth_values = truth_values self.scenario_nodes = [] try: bn.variable('constraint') try: bn.variable('aux') except: print("No aux node found, restructuring network into Fenton 2016 style") self.restructure_bn_fenton(bn) except: #print("No constraint-aux construction implemented yet. Doing that now.") self.implement_aux_constraint_structure(bn) self.bn = bn self.casemodel = case_model.CaseModel([({}, 1)]) ie = gum.LazyPropagation(bn) self.ie = ie self.global_evidence_dict = {'constraint': truth_values[0]} ie.setEvidence(self.global_evidence_dict) self.create_first_cases()
def testDictOfSequences(self): protoie = gum.LazyPropagation(self.bn) protoie.addEvidence('s', 1) protoie.addEvidence('w', 0) protoie.makeInference() proto = protoie.posterior(self.r) ie = gum.LoopyImportanceSampling(self.bn) ie.setVerbosity(False) ie.setEpsilon(0.05) ie.setMinEpsilonRate(0.001) msg = self.iterTest(proto, ie, self.r, {'s': [0, 1], 'w': (1, 0)}) if msg is not None: self.fail(msg) ie = gum.LoopyImportanceSampling(self.bn) ie.setVerbosity(False) ie.setEpsilon(0.05) ie.setMinEpsilonRate(0.001) msg = self.iterTest(proto, ie, self.r, ({'s': 1, 'w': 0})) if msg is not None: self.fail(msg)
def testDictOfLabels(self): protoie = gum.LazyPropagation(self.bn) protoie.addEvidence('s', 0) protoie.addEvidence('w', 1) protoie.makeInference() proto = protoie.posterior(self.r) ie = gum.LoopyGibbsSampling(self.bn) ie.setVerbosity(False) ie.setEpsilon(0.05) ie.setMinEpsilonRate(0.001) msg = self.iterTest(proto, ie, self.r, {'s': 0, 'w': 1}) if msg is not None: self.fail(msg) ie = gum.LoopyGibbsSampling(self.bn) ie.setVerbosity(False) ie.setEpsilon(0.05) ie.setMinEpsilonRate(0.001) msg = self.iterTest(proto, ie, self.r, {'s': 'no', 'w': 'yes'}) if msg is not None: self.fail(msg)
def eval(self, contextual_bn: "pyAgrum.BayesNet") -> "pyAgrum.Potential": if self._verbose: print(f"EVAL ${self.fastToLatex(defaultdict(int))}$ in context", flush=True) ie = pyAgrum.LazyPropagation(contextual_bn) if len(self.varNames) > 1: svars = set(self.varNames) ie.addJointTarget(svars) ie.makeInference() res = ie.jointPosterior(svars) else: for name in self.varNames: break # take the first and only one name in varNames ie.makeInference() res = ie.posterior(name) if self._verbose: print( f"END OF EVAL ${self.fastToLatex(defaultdict(int))}$ : {res}", flush=True) return res
def eval(self, contextual_bn: "pyAgrum.BayesNet") -> "pyAgrum.Potential": if self._verbose: print(f"EVAL ${self.fastToLatex(defaultdict(int))} in context", flush=True) ie = pyAgrum.LazyPropagation(contextual_bn) p = None # simple case : we just need a CPT from the BN if len(self.vars) == 1: for x in self.vars: break # we keep the first one and only one ix = contextual_bn.idFromName(x) if { contextual_bn.variable(i).name() for i in contextual_bn.parents(ix) } == self.knw: p = contextual_bn.cpt(ix) if p is None: if len(self.knw) == 0: ie.addJointTarget(self.vars) ie.makeInference() p = ie.jointPosterior(self.vars) else: ie.addJointTarget(self.vars | self.knw) ie.makeInference() p = ie.jointPosterior(self.vars | self.knw) / ie.jointPosterior(self.knw) # # res = p.extract({k: v for k, v in context.todict().items() if k in self.vars + self.knw}) if self._verbose: print(f"END OF EVAL ${self.fastToLatex(defaultdict(int))}$ : {p}", flush=True) return p
def setUp(self): self.bn = gum.BayesNet() self.c, self.r = \ [self.bn.add(gum.LabelizedVariable(name, name, 2)) for name in 'c r'.split()] self.s, self.w = \ [self.bn.add(gum.LabelizedVariable(name, name, 0).addLabel('no').addLabel('yes')) for name in 's w'.split()] for link in [(self.c, self.s), (self.c, self.r), (self.s, self.w), (self.r, self.w)]: self.bn.addArc(*link) self.bn.cpt(self.c)[:] = [0.5, 0.5] self.bn.cpt(self.s)[:] = [[0.5, 0.5], [0.9, 0.1]] self.bn.cpt(self.r)[:] = [[0.8, 0.2], [0.2, 0.8]] self.bn.cpt(self.w)[0, 0, :] = [1, 0] self.bn.cpt(self.w)[0, 1, :] = [0.1, 0.9] self.bn.cpt(self.w)[1, 0, :] = [0.1, 0.9] self.bn.cpt(self.w)[1, 1, :] = [0.01, 0.99] self.ie = gum.LazyPropagation(self.bn) self.jt = self.ie.junctionTree()