def testGetPosterior(self): self.assertEqual(gum.getPosterior(self.bn, {}, "A"), self.joint.margSumIn(["A"])) self.assertEqual(gum.getPosterior(self.bn, {}, 2), self.joint.margSumIn(["C"])) self.assertEqual(gum.getPosterior(self.bn, {}, 'D'), self.joint.margSumIn(["D"])) self.ie.eraseAllTargets() self.ie.addTarget("A") self.ie.addTarget("F") self.ie.addEvidence("B", 2) self.ie.addEvidence("D", [0.2, 0.6, 0.6]) self.ie.makeInference() posterior_joint = self.joint * \ gum.Potential().add(self.bn.variable("B")).fillWith([0, 0, 1]) * \ gum.Potential().add(self.bn.variable("D")).fillWith([0.2, 0.6, 0.6]) self.assertEqual( gum.getPosterior(self.bn, { 1: 2, "D": [0.2, 0.6, 0.6] }, "A"), posterior_joint.margSumIn(["A"]).normalize()) self.assertEqual( gum.getPosterior(self.bn, { "B": 2, 3: [0.2, 0.6, 0.6] }, "F"), posterior_joint.margSumIn(["F"]).normalize())
def getOneSampleFromMN(mn): """ Get one sample from a markov network Examples -------- >>> sample=mnl.getOneSampleFromMN(mn) Parameters ---------- mn : pyAgrum.MarkovNet the markov network Returns ------- dict the sample, as a dictionary, keys are the variables names and values are the sampled values """ variablesToSample = mn.names() sampledVariables = {} while variablesToSample != []: sampledValue = gum.getPosterior(mn, target=variablesToSample[0], evs=sampledVariables).draw() sampledVariables.update({variablesToSample[0]: sampledValue}) variablesToSample.pop(0) return sampledVariables
def testEvidenceImpactWithNodeId(self): bn = gum.loadBN(self.agrumSrcDir('asia.bif'), [], verbose=False) # verbose=False : don't want to see the warnings ie = gum.LazyPropagation(bn) self.assertEqual(len(ie.BN().arcs()),8) with self.assertRaises(gum.InvalidArgument): res = ie.evidenceImpact(0, [0, 1, 2]) res = ie.evidenceImpact(0, [1, 2]) self.assertEqual(res.nbrDim(), 2) # 2 indep 0 given 1 self.assertEqual(res.extract({"tuberculosis?": 0}), gum.getPosterior(bn, target=0, evs={1: 0})) self.assertEqual(res.extract({"tuberculosis?": 1}), gum.getPosterior(bn, target=0, evs={1: 1}))
def showPosterior(bn, evs, target): """ shortcut for showProba(gum.getPosterior(bn,evs,target)) :param bn: the BayesNet :param evs: map of evidence :param target: name of target variable """ showProba(gum.getPosterior(bn, evs=evs, target=target))
def testEvidenceImpactWithName(self): bn = gum.loadBN(self.agrumSrcDir('asia.bif'), [], verbose=False) # verbose=False : don't want to see the warnings ie = gum.LazyPropagation(bn) with self.assertRaises(gum.InvalidArgument): res = ie.evidenceImpact("visit_to_Asia?", ["visit_to_Asia?", "tuberculosis?", "tuberculos_or_cancer?"]) with self.assertRaises(gum.NotFound): res = ie.evidenceImpact("visit_to_Asia?", ["toto", "tuberculosis?", "tuberculos_or_cancer?"]) res = ie.evidenceImpact("visit_to_Asia?", ["tuberculosis?", "tuberculos_or_cancer?"]) self.assertEqual(res.nbrDim(), 2) # 2 indep 0 given 1 self.assertEqual(res.extract({"tuberculosis?": 0}), gum.getPosterior(bn, target="visit_to_Asia?", evs={"tuberculosis?": 0})) self.assertEqual(res.extract({"tuberculosis?": 1}), gum.getPosterior(bn, target="visit_to_Asia?", evs={"tuberculosis?": 1}))
def getPosterior(bn, evs, target): """ shortcut for proba2histo(gum.getPosterior(bn,evs,target)) :param bn: the BayesNet :type bn: gum.BayesNet :param evs: map of evidence :type evs: dict(str->int) :param target: name of target variable :type target: str :return: the matplotlib graph """ fig = proba2histo(gum.getPosterior(bn, evs=evs, target=target)) plt.close() return _getMatplotFig(fig)
def fastSampleFromMNRecursive(mn, samples, samplesNumber, variablesToSampleOrder, variablesEvidences, onlineComputedPotential, sampledVariables, prog): nextSampleIndex = len(sampledVariables) if nextSampleIndex == len(variablesToSampleOrder): samples.extend(sampledVariables for _ in range(samplesNumber)) if prog != None: prog.increment_amount(samplesNumber) prog.display() else: variable = variablesToSampleOrder[nextSampleIndex] variableSample = defaultdict(lambda: 0) sampledVariablesTuple = tuple( sampledVariables[key] for key in variablesToSampleOrder[:nextSampleIndex] if key in variablesEvidences[variable]) if sampledVariablesTuple in onlineComputedPotential[variable]: posterior = onlineComputedPotential[variable][ sampledVariablesTuple] else: posterior = gum.getPosterior(mn, target=variable, evs=sampledVariables) onlineComputedPotential[variable][ sampledVariablesTuple] = posterior for _ in range(samplesNumber): value = posterior.draw() variableSample[value] += 1 for value in variableSample.keys(): fastSampleFromMNRecursive(mn, samples, variableSample[value], variablesToSampleOrder, variablesEvidences, onlineComputedPotential, { **sampledVariables, **{ variable: value } }, prog)
# Exercice 7 alpha = 0.05 bn_struct = learn_BN_structure(data, dico, alpha) #display_BN(names, bn_struct, "asia", style) display_BN_graphviz(names, bn_struct, "asia", style) # Exercice 7bis # création du réseau bayésien à la aGrUM bn = learn_parameters(bn_struct, "2015_tme5_asia.csv") # affichage de sa taille print(bn) # récupération de la ''conditional probability table'' (CPT) et affichage de cette table gnb.showPotential(bn.cpt(bn.idFromName('bronchitis?'))) # calcul de la marginale proba = gum.getPosterior(bn, {}, 'bronchitis?') # affichage de la marginale gnb.showPotential(proba) #calcul d'une distribution marginale a posteriori : P(bronchitis? | smoking? = true, turberculosis? = false ) gnb.showPotential( gum.getPosterior(bn, { 'smoking?': 'true', 'tuberculosis?': 'false' }, 'bronchitis?'))