Exemple #1
0
    def infer(self, evidence, new_evidence):

        evidence.update(new_evidence)

        new_model, additional_evidence = self.reduce_model(evidence)

        try:
            if self.inference_type == InferenceType.BeliefPropagation:
                inference = BeliefPropagation(new_model)
            elif self.inference_type == InferenceType.GibbsSampling:
                inference = GibbsSampling(new_model)
            elif self.inference_type == InferenceType.BayesianModelSampler:
                inference = BayesianModelSampling(new_model)
        except Exception as e:
            # for factor in new_model.factors:
            #     print(factor)
            raise e

        self.evidence = {
            var: val
            for (var, val) in evidence.items() if "F(" not in var
        }
        self.evidence.update(additional_evidence)
        self.inference = inference
        self.scope = get_scope(new_model)

        return new_model
Exemple #2
0
    def __init__(self, theta, seed=None):

        if seed is not None:
            np.random.seed(seed)
    
        # graph
        self.G = MarkovModel()
    
        for _, row in theta.iterrows():
    
            # unary
            if row["j"]==row["k"]:
                self.G.add_node(str(int(row["j"])))
                theta_jj = row["value"]
                self.G.add_factors(DiscreteFactor([str(int(row["j"]))], [2], 
                np.exp([-theta_jj,theta_jj])))
            # pairwise
            elif row["value"]!=0:
                self.G.add_edge(str(int(row["j"])), str(int(row["k"])))
                theta_jk = row["value"]
                self.G.add_factors(DiscreteFactor([str(int(row["j"])), str(int(row["k"]))], 
                [2, 2], np.exp([theta_jk, -theta_jk, -theta_jk, theta_jk])))

        self.G.check_model()
        self.infer = BeliefPropagation(self.G)
        self.infer.calibrate()
Exemple #3
0
 def test_query_multiple_variable(self):
     belief_propagation = BeliefPropagation(self.bayesian_model)
     query_result = belief_propagation.query(['Q', 'J'])
     np_test.assert_array_almost_equal(query_result['J'].values,
                                       np.array([0.416, 0.584]))
     np_test.assert_array_almost_equal(query_result['Q'].values,
                                       np.array([0.4912, 0.5088]))
def main():
    """
    Main functiont to lunch the belief propagation algorithm
    """

    M = model()
    M.check_model()

    #Belief propagation
    bp = BeliefPropagation(M)
    bp.calibrate()
    print("maximal cliques are:")
    print(bp.get_cliques())

    # first query
    print("computing probability of B=")
    query1 = bp.query(variables=list('b'), show_progress=True)
    print(query1)

    #second query
    print("computing probability of B|C")
    query2 = bp.query(variables=['b', 'c'])
    query2.marginalize(['c'])
    print(query2)

    #Third query
    print("computing joint")
    query3 = bp.query(['a', 'b', 'c', 'd', 'e'])
    query3.normalize()
    print(query3)
def belief_propagation(evidence: dict, bn: BayesianNetwork):
    """
    evidence: {node: evidence_value, ...}
    bn : our own bayesian network class
    """
    network = convert2pgm(bn)
    net_infer = BeliefPropagation(network)
    to_infer = list(set(network.nodes()) - set(evidence.keys()))

    prior = {}
    prior_dist = net_infer.query(variables=to_infer, evidence={}, joint=False)
    for factor in prior_dist.values():
        node = factor.variables[0]
        values = factor.values.tolist()
        prior[factor.variables[0]] = pd.DataFrame({
            node: [0, 1],
            "prob": values
        })

    post = {}
    post_dist = net_infer.query(variables=to_infer,
                                evidence=evidence,
                                joint=False)
    for factor in post_dist.values():
        node = factor.variables[0]
        values = factor.values.tolist()
        post[factor.variables[0]] = pd.DataFrame({
            node: [0, 1],
            "prob": values
        })

    return prior, post
Exemple #6
0
def test_find_MAP():
    print '-' * 80
    G = MarkovModel()
    G.add_nodes_from(['x1', 'x2', 'x3'])
    G.add_edges_from([('x1', 'x2'), ('x1', 'x3')])
    phi = [
        DiscreteFactor(['x2', 'x1'],
                       cardinality=[2, 2],
                       values=np.array([[1.0 / 1, 1.0 / 2], [1.0 / 3,
                                                             1.0 / 4]])),
        DiscreteFactor(['x3', 'x1'],
                       cardinality=[2, 2],
                       values=np.array([[1.0 / 1, 1.0 / 2], [1.0 / 3,
                                                             1.0 / 4]]))
    ]
    #		   DiscreteFactor(['x1'], cardinality=[2],
    #		   values=np.array([2,2]))]
    G.add_factors(*phi)
    print "nodes:", G.nodes()

    bp = BeliefPropagation(G)
    bp.max_calibrate()
    #	bp.calibrate()
    clique_beliefs = bp.get_clique_beliefs()
    print clique_beliefs
    print clique_beliefs[('x1', 'x2')]
    print clique_beliefs[('x1', 'x3')]
    #	print 'partition function should be', np.sum(clique_beliefs[('x1', 'x3')].values)
    phi_query = bp._query(['x1', 'x2', 'x3'], operation='maximize')
    #	phi_query = bp._query(['x1', 'x2', 'x3'], operation='marginalize')
    print phi_query

    sleep(52)
Exemple #7
0
 def test_map_query_with_evidence(self):
     belief_propagation = BeliefPropagation(self.bayesian_model)
     map_query = belief_propagation.map_query(["A", "R", "L"], {
         "J": 0,
         "Q": 1,
         "G": 0
     })
     self.assertDictEqual(map_query, {"A": 1, "R": 0, "L": 0})
Exemple #8
0
 def test_map_query_with_evidence(self):
     belief_propagation = BeliefPropagation(self.bayesian_model)
     map_query = belief_propagation.map_query(['A', 'R', 'L'], {
         'J': 0,
         'Q': 1,
         'G': 0
     })
     self.assertDictEqual(map_query, {'A': 1, 'R': 0, 'L': 0})
Exemple #9
0
 def test_query_single_variable_with_evidence(self):
     belief_propagation = BeliefPropagation(self.bayesian_model)
     query_result = belief_propagation.query(variables=['J'],
                                             evidence={
                                                 'A': 0,
                                                 'R': 1
                                             })
     np_test.assert_array_almost_equal(query_result['J'].values,
                                       np.array([0.60, 0.40]))
Exemple #10
0
 def test_query_single_variable(self):
     belief_propagation = BeliefPropagation(self.bayesian_model)
     query_result = belief_propagation.query(["J"])
     self.assertEqual(
         query_result,
         DiscreteFactor(variables=["J"],
                        cardinality=[2],
                        values=[0.416, 0.584]),
     )
Exemple #11
0
def get_partition_function_BP(G):
    '''
	Calculate partition function of G using belief propogation

	'''
    bp = BeliefPropagation(G)
    bp.calibrate()
    clique_beliefs = bp.get_clique_beliefs()
    partition_function = np.sum(clique_beliefs.values()[0].values)
    return partition_function
Exemple #12
0
 def test_query_multiple_variable(self):
     belief_propagation = BeliefPropagation(self.bayesian_model)
     query_result = belief_propagation.query(["Q", "J"])
     self.assertEqual(
         query_result,
         DiscreteFactor(
             variables=["J", "Q"],
             cardinality=[2, 2],
             values=np.array([[0.3744, 0.0416], [0.1168, 0.4672]]),
         ),
     )
Exemple #13
0
def find_MAP_val(G):
    '''
	Inputs:
	- G: MarkovModel
	'''

    bp = BeliefPropagation(G)
    bp.max_calibrate()
    clique_beliefs = bp.get_clique_beliefs()
    map_val = np.max(clique_beliefs.values()[0].values)
    return map_val
Exemple #14
0
 def test_map_query(self):
     belief_propagation = BeliefPropagation(self.bayesian_model)
     map_query = belief_propagation.map_query()
     self.assertDictEqual(map_query, {
         'A': 1,
         'R': 1,
         'J': 1,
         'Q': 1,
         'G': 0,
         'L': 0
     })
Exemple #15
0
 def test_map_query(self):
     belief_propagation = BeliefPropagation(self.bayesian_model)
     map_query = belief_propagation.map_query()
     self.assertDictEqual(map_query, {
         "A": 1,
         "R": 1,
         "J": 1,
         "Q": 1,
         "G": 0,
         "L": 0
     })
Exemple #16
0
    def step(self, adjustment, episode):
        print('######## Ajustes ########')
        print(adjustment)
        print('######## Episódio atual ########')
        print(episode)

        bp = BeliefPropagation(self.model)
        replaced_episode = {k: replacer[k][v] for k, v in episode.iteritems()}

        upper_bound = self.state[0] + adjustment
        lower_bound = self.state[1] - adjustment

        if not (upper_bound > 1 or upper_bound < 0):
            state_aware = [upper_bound, lower_bound]

            cpds = self._tabular_cpds_to_dict(self.model)
            adjustments = self.fit_probabilities(cpds, adjustment)
            for node in self.model.get_cpds():
                if node.variable != 'Consciente':
                    node.values = self._get_cpd_values(
                        adjustments[node.variable])
                    node.normalize()
                else:
                    node.values = np.array(state_aware)

            for node in self.model.get_cpds():
                print(node)
        else:
            state_aware = [self.state]

        print('######## Consciente ########')
        bp = BeliefPropagation(self.model)
        print(
            bp.query(['Consciente'], evidence=replaced_episode)['Consciente'])

        reward = float(input('Recompensa entre -1 e 1: '))
        next_state = []
        next_state.append(np.round(state_aware, 2))
        next_state.extend(list(replaced_episode.values()))

        return next_state, reward
Exemple #17
0
def find_MAP_state(G):
    '''
	Inputs:
	- G: MarkovModel
	'''

    bp = BeliefPropagation(G)
    bp.max_calibrate()
    clique_beliefs = bp.get_clique_beliefs()
    phi_query = bp._query(G.nodes(), operation='maximize')
    print phi_query
    return phi_query
Exemple #18
0
 def test_query_multiple_variable_with_evidence(self):
     belief_propagation = BeliefPropagation(self.bayesian_model)
     query_result = belief_propagation.query(variables=['J', 'Q'],
                                             evidence={
                                                 'A': 0,
                                                 'R': 0,
                                                 'G': 0,
                                                 'L': 1
                                             })
     np_test.assert_array_almost_equal(query_result['J'].values,
                                       np.array([0.818182, 0.181818]))
     np_test.assert_array_almost_equal(query_result['Q'].values,
                                       np.array([0.772727, 0.227273]))
Exemple #19
0
 def test_query_single_variable_with_evidence(self):
     belief_propagation = BeliefPropagation(self.bayesian_model)
     query_result = belief_propagation.query(variables=["J"],
                                             evidence={
                                                 "A": 0,
                                                 "R": 1
                                             })
     self.assertEqual(
         query_result,
         DiscreteFactor(variables=["J"],
                        cardinality=[2],
                        values=np.array([0.072, 0.048])),
     )
    def map_query(self, targets, evidences, algorithm):
        if algorithm == "Variable Elimination":
            from pgmpy.inference import VariableElimination
            model_infer = VariableElimination(self.model_pgmpy)
        if algorithm == "Belief Propagation":
            from pgmpy.inference import BeliefPropagation
            model_infer = BeliefPropagation(self.model_pgmpy)
        if algorithm == "MPLP":
            from pgmpy.inference import Mplp
            model_infer = Mplp(self.model_pgmpy.to_markov_model())

        return model_infer.map_query(variables=list(targets),
                                     evidence=evidences)
Exemple #21
0
    def _pure_spe_finder(self) -> List[defaultdict]:
        """this finds all pure strategy subgame perfect NE when the strategic relevance graph is acyclic
        - first initialises the maid with uniform random conditional probability distributions at every decision.
        - then fills up a queue with trees containing each solution
        - the queue will contain only one entry (tree) if there's only one pure strategy subgame perfect NE"""
        for dec in self.all_decision_nodes:
            self.impute_random_decision(
                dec)  # impute random fully mixed policy to all decision nodes.

        bp = BeliefPropagation(self)
        print(type(bp))
        queue = self._instantiate_initial_tree()

        while not self._stopping_condition(queue):
            queue = self._reduce_tree_once(queue, bp)
        return queue
Exemple #22
0
 def test_query_multiple_variable_with_evidence(self):
     belief_propagation = BeliefPropagation(self.bayesian_model)
     query_result = belief_propagation.query(variables=["J", "Q"],
                                             evidence={
                                                 "A": 0,
                                                 "R": 0,
                                                 "G": 0,
                                                 "L": 1
                                             })
     self.assertEqual(
         query_result,
         DiscreteFactor(
             variables=["J", "Q"],
             cardinality=[2, 2],
             values=np.array([[0.003888, 0.000432], [0.000192, 0.000768]]),
         ),
     )
Exemple #23
0
    def test_max_calibrate_clique_belief(self):
        belief_propagation = BeliefPropagation(self.junction_tree)
        belief_propagation.max_calibrate()
        clique_belief = belief_propagation.get_clique_beliefs()

        phi1 = DiscreteFactor(["A", "B"], [2, 3], range(6))
        phi2 = DiscreteFactor(["B", "C"], [3, 2], range(6))
        phi3 = DiscreteFactor(["C", "D"], [2, 2], range(4))

        b_A_B = phi1 * (phi3.maximize(["D"], inplace=False) * phi2).maximize(
            ["C"], inplace=False)
        b_B_C = phi2 * (phi1.maximize(["A"], inplace=False) *
                        phi3.maximize(["D"], inplace=False))
        b_C_D = phi3 * (phi1.maximize(["A"], inplace=False) * phi2).maximize(
            ["B"], inplace=False)

        np_test.assert_array_almost_equal(clique_belief[("A", "B")].values,
                                          b_A_B.values)
        np_test.assert_array_almost_equal(clique_belief[("B", "C")].values,
                                          b_B_C.values)
        np_test.assert_array_almost_equal(clique_belief[("C", "D")].values,
                                          b_C_D.values)
Exemple #24
0
    def test_max_calibrate_sepset_belief(self):
        belief_propagation = BeliefPropagation(self.junction_tree)
        belief_propagation.max_calibrate()
        sepset_belief = belief_propagation.get_sepset_beliefs()

        phi1 = DiscreteFactor(["A", "B"], [2, 3], range(6))
        phi2 = DiscreteFactor(["B", "C"], [3, 2], range(6))
        phi3 = DiscreteFactor(["C", "D"], [2, 2], range(4))

        b_B = (phi1 * (phi3.maximize(["D"], inplace=False) * phi2).maximize(
            ["C"], inplace=False)).maximize(["A"], inplace=False)

        b_C = (phi2 * (phi1.maximize(["A"], inplace=False) *
                       phi3.maximize(["D"], inplace=False))).maximize(
                           ["B"], inplace=False)

        np_test.assert_array_almost_equal(
            sepset_belief[frozenset((("A", "B"), ("B", "C")))].values,
            b_B.values)
        np_test.assert_array_almost_equal(
            sepset_belief[frozenset((("B", "C"), ("C", "D")))].values,
            b_C.values)
Exemple #25
0
    def test_max_calibrate_clique_belief(self):
        belief_propagation = BeliefPropagation(self.junction_tree)
        belief_propagation.max_calibrate()
        clique_belief = belief_propagation.get_clique_beliefs()

        phi1 = Factor(['A', 'B'], [2, 3], range(6))
        phi2 = Factor(['B', 'C'], [3, 2], range(6))
        phi3 = Factor(['C', 'D'], [2, 2], range(4))

        b_A_B = phi1 * (phi3.maximize(['D'], inplace=False) * phi2).maximize(
            ['C'], inplace=False)
        b_B_C = phi2 * (phi1.maximize(['A'], inplace=False) *
                        phi3.maximize(['D'], inplace=False))
        b_C_D = phi3 * (phi1.maximize(['A'], inplace=False) * phi2).maximize(
            ['B'], inplace=False)

        np_test.assert_array_almost_equal(clique_belief[('A', 'B')].values,
                                          b_A_B.values)
        np_test.assert_array_almost_equal(clique_belief[('B', 'C')].values,
                                          b_B_C.values)
        np_test.assert_array_almost_equal(clique_belief[('C', 'D')].values,
                                          b_C_D.values)
Exemple #26
0
    def test_max_calibrate_sepset_belief(self):
        belief_propagation = BeliefPropagation(self.junction_tree)
        belief_propagation.max_calibrate()
        sepset_belief = belief_propagation.get_sepset_beliefs()

        phi1 = Factor(['A', 'B'], [2, 3], range(6))
        phi2 = Factor(['B', 'C'], [3, 2], range(6))
        phi3 = Factor(['C', 'D'], [2, 2], range(4))

        b_B = (phi1 * (phi3.maximize(['D'], inplace=False) * phi2).maximize(
            ['C'], inplace=False)).maximize(['A'], inplace=False)

        b_C = (phi2 * (phi1.maximize(['A'], inplace=False) *
                       phi3.maximize(['D'], inplace=False))).maximize(
                           ['B'], inplace=False)

        np_test.assert_array_almost_equal(
            sepset_belief[frozenset((('A', 'B'), ('B', 'C')))].values,
            b_B.values)
        np_test.assert_array_almost_equal(
            sepset_belief[frozenset((('B', 'C'), ('C', 'D')))].values,
            b_C.values)
Exemple #27
0
def gen_env_model():
    """Specify BBN."""
    cpd_tws = TabularCPD('TWS', 2, values=[[0.8, 0.2]])
    cpd_twa = TabularCPD('TWA', 2, values=[[0.8, 0.2]])
    cpd_wind = TabularCPD(
        'Wind',
        2,
        # values=[[1, 0.1, 0.1, 0.0],
        #         [0.0, 0.9, 0.9, 1.0]],
        values=[[1, 0.999, 0.999, 0.998], [0.0, 0.001, 0.001, 0.002]],  # min
        evidence=['TWA', 'TWS'],
        evidence_card=[2, 2])
    cpd_wh = TabularCPD('WH', 2, values=[[0.8, 0.2]])
    cpd_wd = TabularCPD('WD', 2, values=[[0.8, 0.2]])
    cpd_waves = TabularCPD(
        'Waves',
        2,
        values=[
            [1, 0.1, 0.1, 0.0],  # normal vals
            [0.0, 0.9, 0.9, 1.0]
        ],
        # values = [[1, 0.999, 0.999, 0.998],
        #           [0.0, 0.001, 0.001, 0.002]], # min failure
        evidence=['WH', 'WD'],
        evidence_card=[2, 2])
    cpd_fail = TabularCPD('Craft failure',
                          2,
                          values=[[1.0, 0.1, 0.1, 0.0], [0.0, 0.9, 0.9, 1.0]],
                          evidence=['Waves', 'Wind'],
                          evidence_card=[2, 2])
    model = BayesianModel([('TWS', 'Wind'), ('TWA', 'Wind'), ('WH', 'Waves'),
                           ('WD', 'Waves'), ('Waves', 'Craft failure'),
                           ('Wind', 'Craft failure')])
    model.add_cpds(cpd_tws, cpd_twa, cpd_wind, cpd_wh, cpd_wd, cpd_waves,
                   cpd_fail)
    belief_propagation = BeliefPropagation(model)
    return belief_propagation
Exemple #28
0
    def backward_inference(self, variables, evidence=None):
        """
        Backward inference method using belief propagation.

        Parameters:
        ----------
        variables: list
            list of variables for which you want to compute the probability
        evidence: dict
            a dict key, value pair as {var: state_of_var_observed}
            None if no evidence

        Examples:
        --------
        >>> from pgmpy.factors.discrete import TabularCPD
        >>> from pgmpy.models import DynamicBayesianNetwork as DBN
        >>> from pgmpy.inference import DBNInference
        >>> dbnet = DBN()
        >>> dbnet.add_edges_from([(('Z', 0), ('X', 0)), (('X', 0), ('Y', 0)),
        ...                       (('Z', 0), ('Z', 1))])
        >>> z_start_cpd = TabularCPD(('Z', 0), 2, [[0.5, 0.5]])
        >>> x_i_cpd = TabularCPD(('X', 0), 2, [[0.6, 0.9],
        ...                                    [0.4, 0.1]],
        ...                      evidence=[('Z', 0)],
        ...                      evidence_card=[2])
        >>> y_i_cpd = TabularCPD(('Y', 0), 2, [[0.2, 0.3],
        ...                                    [0.8, 0.7]],
        ...                      evidence=[('X', 0)],
        ...                      evidence_card=[2])
        >>> z_trans_cpd = TabularCPD(('Z', 1), 2, [[0.4, 0.7],
        ...                                        [0.6, 0.3]],
        ...                      evidence=[('Z', 0)],
        ...                      evidence_card=[2])
        >>> dbnet.add_cpds(z_start_cpd, z_trans_cpd, x_i_cpd, y_i_cpd)
        >>> dbnet.initialize_initial_state()
        >>> dbn_inf = DBNInference(dbnet)
        >>> dbn_inf.backward_inference([('X', 0)], {('Y', 0):0, ('Y', 1):1, ('Y', 2):1})[('X', 0)].values
        array([ 0.66594382,  0.33405618])
        """
        variable_dict = defaultdict(list)
        for var in variables:
            variable_dict[var[1]].append(var)
        time_range = max(variable_dict)
        interface_nodes_dict = {}
        if evidence:
            evid_time_range = max(
                [time_slice for var, time_slice in evidence.keys()])
            time_range = max(time_range, evid_time_range)
        end_bp = BeliefPropagation(self.start_junction_tree)
        potential_dict = self.forward_inference(variables, evidence,
                                                'potential')
        update_factor = self._shift_factor(potential_dict[time_range], 1)
        factor_values = {}

        for time_slice in range(time_range, 0, -1):
            evidence_time = self._get_evidence(evidence, time_slice, 1)
            evidence_prev_time = self._get_evidence(evidence, time_slice - 1,
                                                    0)
            if evidence_prev_time:
                interface_nodes_dict = {
                    k: v
                    for k, v in evidence_prev_time.items()
                    if k in self.interface_nodes_0
                }
            if evidence_time:
                evidence_time.update(interface_nodes_dict)
            mid_bp = BeliefPropagation(self.one_and_half_junction_tree)
            self._update_belief(mid_bp, self.in_clique,
                                potential_dict[time_slice - 1])
            forward_factor = self._shift_factor(potential_dict[time_slice], 1)
            self._update_belief(mid_bp, self.out_clique, forward_factor,
                                update_factor)

            if variable_dict[time_slice]:
                variable_time = self._shift_nodes(variable_dict[time_slice], 1)
                new_values = mid_bp.query(variable_time,
                                          evidence=evidence_time)
                changed_values = {}
                for key in new_values.keys():
                    new_key = (key[0], time_slice)
                    new_factor = DiscreteFactor([new_key],
                                                new_values[key].cardinality,
                                                new_values[key].values)
                    changed_values[new_key] = new_factor
                factor_values.update(changed_values)

            clique_phi = self._get_factor(mid_bp, evidence_time)
            in_clique_phi = self._marginalize_factor(self.interface_nodes_0,
                                                     clique_phi)
            update_factor = self._shift_factor(in_clique_phi, 1)

        out_clique_phi = self._shift_factor(update_factor, 0)
        self._update_belief(end_bp, self.start_interface_clique,
                            potential_dict[0], out_clique_phi)
        evidence_0 = self._get_evidence(evidence, 0, 0)
        if variable_dict[0]:
            factor_values.update(end_bp.query(variable_dict[0], evidence_0))
        return factor_values
Exemple #29
0
            ND_Sample_NS = ND_Sample_NS[ND_Sample_NS[i_switch[jj]] == state[ii]
                                        [jj]]
        if len(ND_Sample_NS) != 0:
            P2.append(sum(ND_Sample_NS[i]) / len(ND_Sample_NS))
            P1.append(1 - sum(ND_Sample_NS[i]) / len(ND_Sample_NS))
        else:
            P2.append(0)
            P1.append(1)
    CPD = TabularCPD(i,
                     2, [P1, P2],
                     evidence=i_switch,
                     evidence_card=[2] * len(i_switch))
    PPN_N.add_cpds(CPD)

error = 0
bp_N = BeliefPropagation(PPN_N)

#确定观测变量和隐变量,计算BP算法推理结果的误差
for i in range(0, 500):
    ITEM = random.sample(ND_Sample_N.index.tolist(), 1)
    #     vbs = ['SL32','SC22','SF1','SC33','SF2']
    vbs = ['SC33', 'SF2', 'SL34', 'SF3', 'SF4', 'SF5', 'SC44']
    #     VBS = {'SL32':int(ND_Sample_N['SL32'][ITEM]),'SC22':int(ND_Sample_N['SC22'][ITEM]),'SF1':int(ND_Sample_N['SF1'][ITEM]),
    #            'SC33':int(ND_Sample_N['SC33'][ITEM]),'SF2':int(ND_Sample_N['SF2'][ITEM])}
    #     EDS = {'CP2B21':int(ND_Sample_N['CP2B21'][ITEM]),'CB22B21':int(ND_Sample_N['CB22B21'][ITEM]),
    #            'CB32B22':int(ND_Sample_N['CB32B22'][ITEM]),'CB32B31':int(ND_Sample_N['CB32B31'][ITEM]),
    #            'CP3B31':int(ND_Sample_N['CP3B31'][ITEM])}
    VBS = {
        'SC33': int(ND_Sample_N['SC33'][ITEM]),
        'SF2': int(ND_Sample_N['SF2'][ITEM]),
        'SL34': int(ND_Sample_N['SL34'][ITEM]),
Exemple #30
0
from pgmpy.factors.discrete import TabularCPD
from pgmpy.models import BayesianModel
from pgmpy.inference import BeliefPropagation

bayesian_model = BayesianModel([('A', 'J'), ('R', 'J'), ('J', 'Q'), ('J', 'L'),
                                ('G', 'L')])
cpd_a = TabularCPD('A', 2, [[0.2], [0.8]])
cpd_r = TabularCPD('R', 2, [[0.4], [0.6]])
cpd_j = TabularCPD('J', 2, [[0.9, 0.6, 0.7, 0.1], [0.1, 0.4, 0.3, 0.9]],
                   ['R', 'A'], [2, 2])
cpd_q = TabularCPD('Q', 2, [[0.9, 0.2], [0.1, 0.8]], ['J'], [2])
cpd_l = TabularCPD('L', 2, [[0.9, 0.45, 0.8, 0.1], [0.1, 0.55, 0.2, 0.9]],
                   ['G', 'J'], [2, 2])
cpd_g = TabularCPD('G', 2, [[0.6], [0.4]])

bayesian_model.add_cpds(cpd_a, cpd_r, cpd_j, cpd_q, cpd_l, cpd_g)
belief_propagation = BeliefPropagation(bayesian_model)
print(
    belief_propagation.map_query(variables=['J', 'Q'],
                                 evidence={
                                     'A': 0,
                                     'R': 0,
                                     'G': 0,
                                     'L': 1
                                 }))