def main():
    """
    Main functiont to lunch the belief propagation algorithm
    """

    M = model()
    M.check_model()

    #Belief propagation
    bp = BeliefPropagation(M)
    bp.calibrate()
    print("maximal cliques are:")
    print(bp.get_cliques())

    # first query
    print("computing probability of B=")
    query1 = bp.query(variables=list('b'), show_progress=True)
    print(query1)

    #second query
    print("computing probability of B|C")
    query2 = bp.query(variables=['b', 'c'])
    query2.marginalize(['c'])
    print(query2)

    #Third query
    print("computing joint")
    query3 = bp.query(['a', 'b', 'c', 'd', 'e'])
    query3.normalize()
    print(query3)
def belief_propagation(evidence: dict, bn: BayesianNetwork):
    """
    evidence: {node: evidence_value, ...}
    bn : our own bayesian network class
    """
    network = convert2pgm(bn)
    net_infer = BeliefPropagation(network)
    to_infer = list(set(network.nodes()) - set(evidence.keys()))

    prior = {}
    prior_dist = net_infer.query(variables=to_infer, evidence={}, joint=False)
    for factor in prior_dist.values():
        node = factor.variables[0]
        values = factor.values.tolist()
        prior[factor.variables[0]] = pd.DataFrame({
            node: [0, 1],
            "prob": values
        })

    post = {}
    post_dist = net_infer.query(variables=to_infer,
                                evidence=evidence,
                                joint=False)
    for factor in post_dist.values():
        node = factor.variables[0]
        values = factor.values.tolist()
        post[factor.variables[0]] = pd.DataFrame({
            node: [0, 1],
            "prob": values
        })

    return prior, post
Esempio n. 3
0
 def test_query_multiple_variable(self):
     belief_propagation = BeliefPropagation(self.bayesian_model)
     query_result = belief_propagation.query(['Q', 'J'])
     np_test.assert_array_almost_equal(query_result['J'].values,
                                       np.array([0.416, 0.584]))
     np_test.assert_array_almost_equal(query_result['Q'].values,
                                       np.array([0.4912, 0.5088]))
Esempio n. 4
0
 def test_query_multiple_variable(self):
     belief_propagation = BeliefPropagation(self.bayesian_model)
     query_result = belief_propagation.query(['Q', 'J'])
     np_test.assert_array_almost_equal(query_result['J'].values,
                                       np.array([0.416, 0.584]))
     np_test.assert_array_almost_equal(query_result['Q'].values,
                                       np.array([0.4912, 0.5088]))
Esempio n. 5
0
 def test_query_multiple_variable_with_evidence(self):
     belief_propagation = BeliefPropagation(self.bayesian_model)
     query_result = belief_propagation.query(variables=['J', 'Q'],
                                             evidence={'A': 0, 'R': 0,
                                                       'G': 0, 'L': 1})
     np_test.assert_array_almost_equal(query_result['J'].values,
                                       np.array([0.818182, 0.181818]))
     np_test.assert_array_almost_equal(query_result['Q'].values,
                                       np.array([0.772727, 0.227273]))
Esempio n. 6
0
 def test_query_single_variable(self):
     belief_propagation = BeliefPropagation(self.bayesian_model)
     query_result = belief_propagation.query(["J"])
     self.assertEqual(
         query_result,
         DiscreteFactor(variables=["J"],
                        cardinality=[2],
                        values=[0.416, 0.584]),
     )
Esempio n. 7
0
 def test_query_single_variable_with_evidence(self):
     belief_propagation = BeliefPropagation(self.bayesian_model)
     query_result = belief_propagation.query(variables=['J'],
                                             evidence={
                                                 'A': 0,
                                                 'R': 1
                                             })
     np_test.assert_array_almost_equal(query_result['J'].values,
                                       np.array([0.60, 0.40]))
Esempio n. 8
0
 def test_query_multiple_variable(self):
     belief_propagation = BeliefPropagation(self.bayesian_model)
     query_result = belief_propagation.query(["Q", "J"])
     self.assertEqual(
         query_result,
         DiscreteFactor(
             variables=["J", "Q"],
             cardinality=[2, 2],
             values=np.array([[0.3744, 0.0416], [0.1168, 0.4672]]),
         ),
     )
Esempio n. 9
0
 def test_query_single_variable_with_evidence(self):
     belief_propagation = BeliefPropagation(self.bayesian_model)
     query_result = belief_propagation.query(variables=["J"],
                                             evidence={
                                                 "A": 0,
                                                 "R": 1
                                             })
     self.assertEqual(
         query_result,
         DiscreteFactor(variables=["J"],
                        cardinality=[2],
                        values=np.array([0.072, 0.048])),
     )
Esempio n. 10
0
 def test_query_multiple_variable_with_evidence(self):
     belief_propagation = BeliefPropagation(self.bayesian_model)
     query_result = belief_propagation.query(variables=['J', 'Q'],
                                             evidence={
                                                 'A': 0,
                                                 'R': 0,
                                                 'G': 0,
                                                 'L': 1
                                             })
     np_test.assert_array_almost_equal(query_result['J'].values,
                                       np.array([0.818182, 0.181818]))
     np_test.assert_array_almost_equal(query_result['Q'].values,
                                       np.array([0.772727, 0.227273]))
Esempio n. 11
0
 def test_query_multiple_variable_with_evidence(self):
     belief_propagation = BeliefPropagation(self.bayesian_model)
     query_result = belief_propagation.query(variables=["J", "Q"],
                                             evidence={
                                                 "A": 0,
                                                 "R": 0,
                                                 "G": 0,
                                                 "L": 1
                                             })
     self.assertEqual(
         query_result,
         DiscreteFactor(
             variables=["J", "Q"],
             cardinality=[2, 2],
             values=np.array([[0.003888, 0.000432], [0.000192, 0.000768]]),
         ),
     )
Esempio n. 12
0
    def step(self, adjustment, episode):
        print('######## Ajustes ########')
        print(adjustment)
        print('######## Episódio atual ########')
        print(episode)

        bp = BeliefPropagation(self.model)
        replaced_episode = {k: replacer[k][v] for k, v in episode.iteritems()}

        upper_bound = self.state[0] + adjustment
        lower_bound = self.state[1] - adjustment

        if not (upper_bound > 1 or upper_bound < 0):
            state_aware = [upper_bound, lower_bound]

            cpds = self._tabular_cpds_to_dict(self.model)
            adjustments = self.fit_probabilities(cpds, adjustment)
            for node in self.model.get_cpds():
                if node.variable != 'Consciente':
                    node.values = self._get_cpd_values(
                        adjustments[node.variable])
                    node.normalize()
                else:
                    node.values = np.array(state_aware)

            for node in self.model.get_cpds():
                print(node)
        else:
            state_aware = [self.state]

        print('######## Consciente ########')
        bp = BeliefPropagation(self.model)
        print(
            bp.query(['Consciente'], evidence=replaced_episode)['Consciente'])

        reward = float(input('Recompensa entre -1 e 1: '))
        next_state = []
        next_state.append(np.round(state_aware, 2))
        next_state.extend(list(replaced_episode.values()))

        return next_state, reward
Esempio n. 13
0
    def _get_ev(self, dec_instantiation: Tuple[int], row: int,
                bp: BeliefPropagation) -> float:
        """Return the expected value of a certain decision node instantiation
        for the agent making the decision"""
        macid = self.copy_without_cpds()
        dec_list = macid.get_valid_acyclic_dec_node_ordering()
        dec = dec_list[row]

        agent = self.whose_node[dec]  # gets the agent making that decision
        utils = self.utility_nodes_agent[
            agent]  # gets the utility nodes for that agent
        factor = bp.query(variables=utils,
                          evidence=dict(zip(dec_list, dec_instantiation)))

        ev = 0
        for idx, prob in np.ndenumerate(factor.values):
            for i in range(
                    len(utils)
            ):  # account for each agent having multiple utilty nodes
                if prob != 0:
                    ev += prob * idx[i]
        return ev
Esempio n. 14
0
                    values=[[0.01], [0.99]])
cpd_do = TabularCPD(variable='dog_out',
                    variable_card=2,
                    values=[[0.99, 0.9, 0.97, 0.3], [0.01, 0.1, 0.03, 0.7]],
                    evidence=['family_out', 'bowel_problem'],
                    evidence_card=[2, 2])
cpd_lo = TabularCPD(variable='light_on',
                    variable_card=2,
                    values=[[0.6, 0.05], [0.4, 0.95]],
                    evidence=['family_out'],
                    evidence_card=[2])
cpd_hb = TabularCPD(variable='hear_bark',
                    variable_card=2,
                    values=[[0.7, 0.01], [0.3, 0.99]],
                    evidence=['dog_out'],
                    evidence_card=[2])

#integrity checking
model.add_cpds(cpd_fo, cpd_bp, cpd_do, cpd_lo, cpd_hb)
model.check_model()

junction_tree = model.to_junction_tree()
print(junction_tree.nodes())

infer_bp = BeliefPropagation(junction_tree)
print(
    infer_bp.query(['family_out'], evidence={
        'light_on': 0,
        'hear_bark': 1
    })['family_out'])
Esempio n. 15
0
    def compute_pk(self, type_list, fid):
        assert len(type_list) == 5, print("ComputePk Error: number of type_list should be 5")

        constraint_name = ['m', 'r', 's', 'd', 'v']
        '''
        m, r, s, d, v = type_list
        p_m, p_r, p_s, p_d, p_v = self.p_observation
        p_ktox, p_xtok = self.p_implication
        p_ktom, p_ktor, p_ktos, p_ktod, p_ktov = p_ktox
        p_mtok, p_rtok, p_stok, p_dtok, p_vtok = p_xtok
        '''
        fg = FactorGraph()
        fg.add_node('k')

        for i in range(len(type_list)):
            if type_list[i] == 0:
                fg = self.add_constraints_k2x_x2k(fg, self.p_observation[fid][i], self.p_implication[fid][0][i], self.p_implication[fid][1][i], constraint_name[i])
            elif type_list[i] == 1:
                fg = self.add_constraints_k2x(fg, self.p_observation[fid][i], self.p_implication[fid][0][i], constraint_name[i])
            elif type_list[i] == 2:
                fg = self.add_constraints_x2k(fg, self.p_observation[fid][i], self.p_implication[fid][1][i], constraint_name[i])
        '''
        if m == 0:
            fg = add_constraints_kv_vk(fg, p_m, p_ktom, p_mtok, 'm')
        elif m == 1:
            fg = add_constraints_kv(fg, p_m, p_mtok, 'm')
        elif m == 2:
            fg = add_constraints_vk(fg, p_m, p_mtok, 'm')

        if r == 0:
            fg = add_constraints_kv_vk(fg, p_r, p_ktor, p_rtok, 'r')
        elif r == 1:
            fg = add_constraints_kv(fg, p_r, p_ktor, 'r')
        elif r == 2:
            fg = add_constraints_vk(fg, p_r, p_rtok, 'r')

        if s == 0:
            fg = add_constraints_kv_vk(fg, p_s, p_ktos, p_stok, 's')
        elif s == 1:
            fg = add_constraints_kv(fg, p_s, p_ktos, 's')
        elif s == 2:
            fg = add_constraints_vk(fg, p_s, p_stok, 's')

        if d == 0:
            fg = add_constraints_kv_vk(fg, p_d, p_ktod, p_dtok, 'd')
        elif d == 1:
            fg = add_constraints_kv(fg, p_d, p_ktod, 'd')
        elif d == 2:
            fg = add_constraints_vk(fg, p_d, p_dtok, 'd')

        if v == 0:
            fg = add_constraints_kv_vk(fg, p_v, p_ktov, p_vtok, 'v')
        elif v == 1:
            fg = add_constraints_kv(fg, p_v, p_ktov, 'v')
        elif v == 2:
            fg = add_constraints_vk(fg, p_v, p_vtok, 'v')
        '''

        bp = BeliefPropagation(fg)

        #result = bp.query(variables=['k'])['k']
        #result = bp.query(variables=['k'], joint=False)['k']
        result = bp.query(variables=['k'])
        result.normalize()
        #print(result)

        return result.values[1]
Esempio n. 16
0
PGM.add_edges_from([('w1', 'w2'), ('w2', 'w3')])
tr_matrix = np.array([1, 10, 3, 2, 1, 5, 3, 3, 2])
tr_matrix = np.array([1, 2, 3, 10, 1, 3, 3, 5, 2]).reshape(3, 3).T.reshape(-1)
phi = [DiscreteFactor(edge, [3, 3], tr_matrix) for edge in PGM.edges()]
print(phi[0])
print(phi[1])
PGM.add_factors(*phi)

# Calculate partition funtion
Z = PGM.get_partition_function()
print('The partition function is:', Z)

# Calibrate the click
belief_propagation = BeliefPropagation(PGM)
belief_propagation.calibrate()

# Output calibration result, which you should get
query = belief_propagation.query(variables=['w2'])
print('After calibration you should get the following mu(S):\n', query * Z)

# Get marginal distribution over third word
query = belief_propagation.query(variables=['w3'])
print('Marginal distribution over the third word is:\n', query)

#Get conditional distribution over third word
query = belief_propagation.query(variables=['w3'],
                                 evidence={'w1': 0})  # 0 stays for "noun"
print(
    'Conditional distribution over the third word, given that the first word is noun is:\n',
    query)
Esempio n. 17
0
    def backward_inference(self, variables, evidence=None):
        """
        Backward inference method using belief propagation.

        Parameters:
        ----------
        variables: list
            list of variables for which you want to compute the probability
        evidence: dict
            a dict key, value pair as {var: state_of_var_observed}
            None if no evidence

        Examples:
        --------
        >>> from pgmpy.factors import TabularCPD
        >>> from pgmpy.models import DynamicBayesianNetwork as DBN
        >>> from pgmpy.inference import DBNInference
        >>> dbnet = DBN()
        >>> dbnet.add_edges_from([(('Z', 0), ('X', 0)), (('X', 0), ('Y', 0)),
        ...                       (('Z', 0), ('Z', 1))])
        >>> z_start_cpd = TabularCPD(('Z', 0), 2, [[0.5, 0.5]])
        >>> x_i_cpd = TabularCPD(('X', 0), 2, [[0.6, 0.9],
        ...                                    [0.4, 0.1]],
        ...                      evidence=[('Z', 0)],
        ...                      evidence_card=2)
        >>> y_i_cpd = TabularCPD(('Y', 0), 2, [[0.2, 0.3],
        ...                                    [0.8, 0.7]],
        ...                      evidence=[('X', 0)],
        ...                      evidence_card=2)
        >>> z_trans_cpd = TabularCPD(('Z', 1), 2, [[0.4, 0.7],
        ...                                        [0.6, 0.3]],
        ...                      evidence=[('Z', 0)],
        ...                      evidence_card=2)
        >>> dbnet.add_cpds(z_start_cpd, z_trans_cpd, x_i_cpd, y_i_cpd)
        >>> dbnet.initialize_initial_state()
        >>> dbn_inf = DBNInference(dbnet)
        >>> dbn_inf.backward_inference([('X', 0)], {('Y', 0):0, ('Y', 1):1, ('Y', 2):1})[('X', 0)].values
        array([ 0.66594382,  0.33405618])
        """
        variable_dict = defaultdict(list)
        for var in variables:
            variable_dict[var[1]].append(var)
        time_range = max(variable_dict)
        interface_nodes_dict = {}
        if evidence:
            evid_time_range = max([time_slice for var, time_slice in evidence.keys()])
            time_range = max(time_range, evid_time_range)
        end_bp = BeliefPropagation(self.start_junction_tree)
        potential_dict = self.forward_inference(variables, evidence, 'potential')
        update_factor = self._shift_factor(potential_dict[time_range], 1)
        factor_values = {}

        for time_slice in range(time_range, 0, -1):
            evidence_time = self._get_evidence(evidence, time_slice, 1)
            evidence_prev_time = self._get_evidence(evidence, time_slice - 1, 0)
            if evidence_prev_time:
                interface_nodes_dict = {k: v for k, v in evidence_prev_time.items() if k in self.interface_nodes_0}
            if evidence_time:
                evidence_time.update(interface_nodes_dict)
            mid_bp = BeliefPropagation(self.one_and_half_junction_tree)
            self._update_belief(mid_bp, self.in_clique, potential_dict[time_slice - 1])
            forward_factor = self._shift_factor(potential_dict[time_slice], 1)
            self._update_belief(mid_bp, self.out_clique, forward_factor, update_factor)

            if variable_dict[time_slice]:
                variable_time = self._shift_nodes(variable_dict[time_slice], 1)
                new_values = mid_bp.query(variable_time, evidence=evidence_time)
                changed_values = {}
                for key in new_values.keys():
                    new_key = (key[0], time_slice)
                    new_factor = Factor([new_key], new_values[key].cardinality, new_values[key].values)
                    changed_values[new_key] = new_factor
                factor_values.update(changed_values)

            clique_phi = self._get_factor(mid_bp, evidence_time)
            in_clique_phi = self._marginalize_factor(self.interface_nodes_0, clique_phi)
            update_factor = self._shift_factor(in_clique_phi, 1)

        out_clique_phi = self._shift_factor(update_factor, 0)
        self._update_belief(end_bp, self.start_interface_clique, potential_dict[0], out_clique_phi)
        evidence_0 = self._get_evidence(evidence, 0, 0)
        if variable_dict[0]:
            factor_values.update(end_bp.query(variable_dict[0], evidence_0))
        return factor_values
Esempio n. 18
0
#------------------Calculate inference using Mplp algorithm--------------------

mplp = Mplp(mark)
mplp.find_triangles()
mplp.map_query()

#infer1 = BayesianModelSampling(mark)
#evidence1 = [State('y',1)]
#sample1 = infer1.forward_sample(5)
#sample1

#---------Calculate inference using Belief Propagation and Variable Elimination and answer corresponding queries-------------

belief_prop = BeliefPropagation(mark)
bp1 = belief_prop.query(variables=['y'], evidence={'marital': 2, 'default': 0})
print(bp1['y'])

infer = VariableElimination(mark)
phi_query = infer.query(variables=['y'], evidence={'marital': 0, 'default': 0})
print(bp1['y'])

bp2 = belief_prop.query(variables=['y'],
                        evidence={
                            'education': 2,
                            'housing': 1
                        })
print(bp2['y'])

infer = VariableElimination(mark)
phi_query = infer.query(variables=['y'],
Esempio n. 19
0
                      columns=['A', 'B', 'C', 'D', 'E'])

print(values)
model = BayesianModel([('A', 'B'), ('C', 'B'), ('C', 'D'), ('B', 'E')])
model.fit(values)
ve = VariableElimination(model)
bp = BeliefPropagation(model)

start = time.time()
ve_result = ve.query(['B'], evidence={'A': 0})  #, 'B'])
end = time.time()
print(ve_result['B'])
print("time: {}".format(end - start))

start = time.time()
bp_result = bp.query(['B'], evidence={'A': 0})
'''
if 'B' in bp_result:
    if 'B_0' in bp_result['B']:
        print("yes")
    else:
        print("no")
'''

print(bp_result['B'].variables)
print(bp_result['B'].values)
'''for key, value in bp_result.items():
    print value.keys()[0]
   '''
'''
end = time.time()
Esempio n. 20
0
 def test_query_single_variable_with_evidence(self):
     belief_propagation = BeliefPropagation(self.bayesian_model)
     query_result = belief_propagation.query(variables=['J'],
                                             evidence={'A': 0, 'R': 1})
     np_test.assert_array_almost_equal(query_result['J'].values,
                                       np.array([0.60, 0.40]))
Esempio n. 21
0
train = data[1:600]

x = Mental_health_model.fit(train, estimator=MaximumLikelihoodEstimator)

#for cpd in Mental_health_model.get_cpds():
#   print(cpd)

# In[2]:

# In[3]:

belief_prop = BeliefPropagation(Mental_health_model)

# In[5]:

bp1 = belief_prop.query(variables=['leave', 'wellness_program'],
                        evidence={'tech_company': 0})
print(bp1['leave'])
print(bp1['wellness_program'])

# In[6]:

bp2 = belief_prop.query(variables=['treatment'],
                        evidence={
                            'Age': 1,
                            'Gender': 0,
                            'family_history': 1
                        })
print(bp2['treatment'])

# In[7]:
    def markov_inference(dict_of_esp_jointprob):
        """Calculate the markov model """
        factor_mmb_cmma = Factor(variables=['money_market_bonus', 'collateral_mma'],
                             cardinality=[2, 2],
                             values=[dict_of_esp_jointprob['mmb0_cmma0'], dict_of_esp_jointprob['mmb0_cmma1'],
                                    dict_of_esp_jointprob['mmb1_cmma0'], dict_of_esp_jointprob['mmb1_cmma1']])
        factor_mmb_cm = Factor(variables=['money_market_bonus', 'cash_management'],
                            cardinality=[2, 2],
                            values=[dict_of_esp_jointprob['mmb0_cm0'], dict_of_esp_jointprob['mmb0_cm1'],
                                    dict_of_esp_jointprob['mmb1_cm0'], dict_of_esp_jointprob['mmb1_cm1']])
        factor_mmb_fx = Factor(variables=['money_market_bonus', 'fx_products'],
                             cardinality=[2, 2],
                             values=[dict_of_esp_jointprob['mmb0_fx0'], dict_of_esp_jointprob['mmb0_fx1'],
                                    dict_of_esp_jointprob['mmb1_fx0'], dict_of_esp_jointprob['mmb1_fx1']])
        factor_mmb_loc = Factor(variables=['money_market_bonus', 'letters_of_credit'],
                             cardinality=[2, 2],
                             values=[dict_of_esp_jointprob['mmb0_loc0'], dict_of_esp_jointprob['mmb0_loc1'],
                                    dict_of_esp_jointprob['mmb1_loc0'], dict_of_esp_jointprob['mmb1_loc1']])
        factor_mmb_es = Factor(variables=['money_market_bonus', 'enterprise_sweep'],
                             cardinality=[2, 2],
                             values=[dict_of_esp_jointprob['mmb0_es0'], dict_of_esp_jointprob['mmb0_es1'],
                                    dict_of_esp_jointprob['mmb1_es0'], dict_of_esp_jointprob['mmb1_es1']])
        factor_mmb_checking = Factor(variables=['money_market_bonus', 'checking_usd'],
                             cardinality=[2, 2],
                             values=[dict_of_esp_jointprob['mmb0_checking0'], dict_of_esp_jointprob['mmb0_checking1'],
                                    dict_of_esp_jointprob['mmb1_checking0'], dict_of_esp_jointprob['mmb1_checking1']])
        # collateral mma

        factor_cmma_cm = Factor(variables=['collateral_mma','cash_management'],
                            cardinality=[2, 2],
                        values=[dict_of_esp_jointprob['cmma0_cm0'], dict_of_esp_jointprob['cmma0_cm1'],
                                dict_of_esp_jointprob['cmma1_cm0'], dict_of_esp_jointprob['cmma1_cm1']])

        factor_cmma_fx = Factor(variables=['collateral_mma', 'fx_products'],
                            cardinality=[2, 2],
                            values=[dict_of_esp_jointprob['cmma0_fx0'], dict_of_esp_jointprob['cmma0_fx1'],
                                    dict_of_esp_jointprob['cmma1_fx0'], dict_of_esp_jointprob['cmma1_fx1']])
        factor_cmma_loc = Factor(variables=['collateral_mma', 'letters_of_credit'],
                             cardinality=[2, 2],
                             values=[dict_of_esp_jointprob['cmma0_loc0'], dict_of_esp_jointprob['cmma0_loc1'],
                                    dict_of_esp_jointprob['cmma1_loc0'], dict_of_esp_jointprob['cmma1_loc1']])
        factor_cmma_es= Factor(variables=['collateral_mma', 'enterprise_sweep'],
                             cardinality=[2, 2],
                             values=[dict_of_esp_jointprob['cmma0_es0'], dict_of_esp_jointprob['cmma0_es1'],
                                    dict_of_esp_jointprob['cmma1_es0'], dict_of_esp_jointprob['cmma1_es1']])
        factor_cmma_checking = Factor(variables=['collateral_mma', 'checking_usd'],
                             cardinality=[2, 2],
                             values=[dict_of_esp_jointprob['cmma0_checking0'], dict_of_esp_jointprob['cmma0_checking1'],
                                    dict_of_esp_jointprob['cmma1_checking0'],dict_of_esp_jointprob['cmma1_checking1']])
        # cash management
        factor_cm_fx = Factor(variables=['cash_management', 'fx_products'],
                            cardinality=[2, 2],
                            values=[dict_of_esp_jointprob['cm0_fx0'], dict_of_esp_jointprob['cm0_fx1'],
                                    dict_of_esp_jointprob['cm1_fx0'], dict_of_esp_jointprob['cm1_fx1']])
        factor_cm_loc = Factor(variables=['cash_management', 'letters_of_credit'],
                             cardinality=[2, 2],
                             values=[dict_of_esp_jointprob['cm0_loc0'], dict_of_esp_jointprob['cm0_loc1'],
                                    dict_of_esp_jointprob['cm1_loc0'], dict_of_esp_jointprob['cm1_loc1']])
        factor_cm_es= Factor(variables=['cash_management', 'enterprise_sweep'],
                             cardinality=[2, 2],
                             values=[dict_of_esp_jointprob['cm0_es0'], dict_of_esp_jointprob['cm0_es1'],
                                    dict_of_esp_jointprob['cm1_es0'], dict_of_esp_jointprob['cm1_es1']])
        factor_cm_checking = Factor(variables=['cash_management', 'checking_usd'],
                             cardinality=[2, 2],
                             values=[dict_of_esp_jointprob['cm0_checking0'], dict_of_esp_jointprob['cm0_checking1'],
                                    dict_of_esp_jointprob['cm1_checking0'], dict_of_esp_jointprob['cm1_checking1']])

        # FX products
        factor_fx_loc = Factor(variables=['fx_products', 'letters_of_credit'],
                             cardinality=[2, 2],
                             values=[dict_of_esp_jointprob['fx0_loc0'], dict_of_esp_jointprob['fx0_loc1'],
                                    dict_of_esp_jointprob['fx1_loc0'], dict_of_esp_jointprob['fx1_loc1']])
        factor_fx_es= Factor(variables=['fx_products', 'enterprise_sweep'],
                             cardinality=[2, 2],
                             values=[dict_of_esp_jointprob['fx0_es0'], dict_of_esp_jointprob['fx0_es1'],
                                    dict_of_esp_jointprob['fx1_es0'], dict_of_esp_jointprob['fx1_es1']])
        factor_fx_checking = Factor(variables=['fx_products', 'checking_usd'],
                             cardinality=[2, 2],
                             values=[dict_of_esp_jointprob['fx0_checking0'], dict_of_esp_jointprob['fx0_checking1'],
                                    dict_of_esp_jointprob['fx1_checking0'], dict_of_esp_jointprob['fx1_checking1']])

        # letters of credit

        factor_loc_es= Factor(variables=['letters_of_credit', 'enterprise_sweep'],
                             cardinality=[2, 2],
                             values=[dict_of_esp_jointprob['loc0_es0'], dict_of_esp_jointprob['loc0_es1'],
                                    dict_of_esp_jointprob['loc1_es0'], dict_of_esp_jointprob['loc1_es1']])
        factor_loc_checking = Factor(variables=['letters_of_credit', 'checking_usd'],
                         cardinality=[2, 2],
                         values=[dict_of_esp_jointprob['loc0_checking0'], dict_of_esp_jointprob['loc0_checking1'],
                                dict_of_esp_jointprob['loc1_checking0'], dict_of_esp_jointprob['loc1_checking1']])
        #enterprise sweep

        factor_es_checking = Factor(variables=['enterprise_sweep', 'checking_usd'],
                             cardinality=[2, 2],
                             values=[dict_of_esp_jointprob['es0_checking0'], dict_of_esp_jointprob['es0_checking1'],
                                    dict_of_esp_jointprob['es1_checking0'], dict_of_esp_jointprob['es1_checking1']])

        # built the markov model
        model.add_factors(factor_mmb_cmma , factor_mmb_cm, factor_mmb_fx, factor_mmb_loc,factor_mmb_es, factor_mmb_checking,
                          factor_cmma_cm , factor_cmma_fx, factor_cmma_loc, factor_cmma_es,factor_cmma_checking,
             factor_cm_fx,   factor_cm_loc,    factor_cm_es,  factor_cm_checking , factor_fx_loc,
                  factor_fx_es ,  factor_fx_checking,   factor_loc_es, factor_loc_checking , factor_es_checking )


        belief_propagation = BeliefPropagation(model)


        all_products = ['money_market_bonus','collateral_mma', 'cash_management','enterprise_sweep',
                                    'fx_products','letters_of_credit','checking_usd']


        # perform inference for all product except the one in the for loop
        for prod in all_products:
            if evidence_==None:
                new_evidence=evidence_

            else:
                new_evidence = {key: value for key, value in evidence_.items()
                 if key != prod}
            # perform belief inference on only one product at a time
            belief_inference_products = str(prod)


            # belief propogation on one product at a time given evidence from all other products
            belief = belief_propagation.query(variables=[belief_inference_products], evidence=new_evidence)

            try:
                #mmb = belief_mmb['money_market_bonus'].values[1]
                mmb = belief['money_market_bonus'].values[1]
                if mmb <0 :
                    mmb = .0000001
                elif mmb >1:
                    mmb =1
                prob_mmb.append(mmb)# one is having the product
            except: # can't perform inference on this product
                pass
            try:
                cmma = belief['collateral_mma'].values[1]
                if cmma <0:
                    cmma = .0000001
                elif cmma >1:
                    cmma =1
                prob_cmma.append(cmma)
            except:## don't have this product
                pass
            try:
                cm = belief['cash_management'].values[1]
                if cm <0:
                    cm = .0000001
                elif cm >1:
                    cm =1
                prob_cm.append(cm)
            except:
                pass
            try:
                checking = belief['checking_usd'].values[1]
                if checking <0:
                    checking = .0000001
                elif checking >1:
                    checking =1
                prob_checking.append(checking)
            except:
                pass
            try:

                fx = belief['fx_products'].values[1]
                if fx <0:
                    fx = .0000001
                elif fx >1:
                    fx =1
                prob_fx.append(fx)
            except:
                pass
            try:
                loc = belief['letters_of_credit'].values[1]
                if loc <0:
                    loc = .0000001
                elif loc > 1:
                    loc = 1
                prob_loc.append(loc)
            except:

                pass
            try:
                es = belief['enterprise_sweep'].values[1]
                if es<0:
                    es = .0000001
                elif es >1:
                    es = 1
                prob_es.append(es)
            except:
                pass
    def plot_causal_influence(self, file_path):
        """
        Computes the odds of the target variable being value 1 over value 0 (i.e. the odds ratio)
        by iterating through all other network variables/nodes, changing their values,
        and observing how the probability of the target variable changes. Belief propagation
        is used for inference. A forest plot is produced from this and saved to disk.

        Arguments:
            file_path: str, the absolute path to save the file to (e.g. "~/Desktop/forest_plot.png")

        Returns:
            None
        """

        # Estimate CPTs
        self._estimate_CPT()

        if self.verbose:
            print(f"Calculating influence of all nodes on target node")

        if not self.bn_model.check_model():
            print("""
                There is a problem with your network structure. You have disconnected nodes
                or separated sub-networks. Please examine your network plot and re-learn your
                network structure with tweaked settings.
                """)
            return None

        if self.target_variable not in self.bn_model.nodes:
            print("""
                Your target variable has no parent nodes! Can't perform inference! Please examine
                your network plot and re-learn your network structure with tweaked settings.
                """)
            return None

        # Prep for belief propagation
        belief_propagation = BeliefPropagation(self.bn_model)
        belief_propagation.calibrate()

        # Iterate over all intervention nodes and values, calculating odds ratios w.r.t target variable
        overall_dict = {}

        variables_to_test = list(
            set(list(self.bn_model.nodes)) - set(list(self.target_variable)))

        for node in variables_to_test:
            results = []
            for value in self.filtered_df[node].unique():
                prob = belief_propagation.query(
                    variables=[self.target_variable],
                    evidence={
                        node: value
                    },
                    show_progress=False,
                ).values
                results.append([node, value, prob[0], prob[1]])

            results_df = pd.DataFrame(
                results,
                columns=["node", "value", "probability_0", "probability_1"])
            results_df["odds_1"] = (results_df["probability_1"] /
                                    results_df["probability_0"])
            results_df = results_df.sort_values(
                "value", ascending=True, inplace=False).reset_index(drop=True)

            overall_dict[node] = results_df

        final_df_list = []

        for node, temp_df in overall_dict.items():
            first_value = temp_df["odds_1"].iloc[0]
            temp_df["odds_ratio"] = (temp_df["odds_1"] / first_value).round(3)
            final_df_list.append(temp_df)

        final_df = pd.concat(final_df_list)[["node", "value", "odds_ratio"]]
        self.odds_ratios = final_df

        if self.verbose:
            print(f"Saving forest plot to the following PNG file: {file_path}")

        # Clean up the dataframe of odds ratios so plot can have nice labels
        final_df2 = (pd.concat([
            final_df,
            final_df.groupby("node")["value"].apply(
                lambda x: x.shift(-1).iloc[-1]).reset_index(),
        ]).sort_values(by=["node", "value"],
                       ascending=True).reset_index(drop=True))
        final_df2["node"][final_df2["value"].isnull()] = np.nan
        final_df2["value"] = final_df2["value"].astype("Int32").astype(str)
        final_df2["value"].replace({np.nan: ""}, inplace=True)
        final_df3 = final_df2.reset_index(drop=True).reset_index()
        final_df3.rename(columns={"index": "vertical_index"}, inplace=True)
        final_df3["y_label"] = final_df3["node"] + " = " + final_df3["value"]
        final_df3["y_label"][final_df3["odds_ratio"] == 1.0] = (
            final_df3["y_label"] + " (ref)")
        final_df3["y_label"].fillna("", inplace=True)

        # Produce large plot
        plt.clf()
        plt.title(
            "Strength of Associations Between Interventions and Target Variable"
        )
        plt.scatter(
            x=final_df3["odds_ratio"],
            y=final_df3["vertical_index"],
            s=70,
            color="b",
            alpha=0.5,
        )
        plt.xlabel("Odds Ratio")
        plt.axvline(x=1.0, color="red", linewidth="1.5", linestyle="--")
        plt.yticks(final_df3["vertical_index"], final_df3["y_label"])

        for _, row in final_df3.iterrows():
            if not np.isnan(row["odds_ratio"]):
                plt.plot(
                    [0, row["odds_ratio"]],
                    [row["vertical_index"], row["vertical_index"]],
                    color="black",
                    linewidth="0.4",
                )

        plt.xlim([0, final_df3["odds_ratio"].max() + 1])

        figure = plt.gcf()
        figure.set_size_inches(12, 7)

        plt.savefig(expanduser(file_path),
                    bbox_inches="tight",
                    format="PNG",
                    dpi=300)
Esempio n. 24
0
PGM.add_nodes_from(['w1', 'w2', 'w3'])
PGM.add_edges_from([('w1', 'w2'), ('w2', 'w3')])
tr_matrix = np.array([1, 2, 3, 10, 1, 3, 3, 5, 2]).reshape(3, 3).T.reshape(-1)
phi = [DiscreteFactor(edge, [3, 3], tr_matrix) for edge in PGM.edges()]
PGM.add_factors(*phi)

# Calculate partition funtion
Z = PGM.get_partition_function()
print('The partition function is:', Z)

# Calibrate the click
belief_propagation = BeliefPropagation(PGM)
belief_propagation.calibrate()

# Output calibration result, which you should get
query = belief_propagation.query(variables=["w2"], joint=False)
print('After calibration you should get the following mu(S):', query["w2"] * Z)

# Get marginal distribution over third word
query = belief_propagation.query(variables=['w3'],
                                 joint=False)  #, evidence = {'w2':0})
print('Marginal distribution over the third word is:\n', query["w3"])

#Get conditional distribution over third word
query = belief_propagation.query(variables=['w3'],
                                 evidence={'w1': 0},
                                 joint=False)  # 0 stays for "noun"
print(
    'Conditional distribution over the third word, given that the first word is noun is:\n',
    query["w3"])
Esempio n. 25
0
 def time_query_alarm(self):
     infer = BeliefPropagation(self.alarm)
     infer.query(variables=['VENTLUNG'])
Esempio n. 26
0
########################################################################

model = BayesianModel([('class', 'doors'), ('class', 'safety'),
                       ('class', 'maint'), ('class', 'buying'),
                       ('class', 'persons'), ('class', 'lug_boot')])

model.fit(df_train, estimator=MaximumLikelihoodEstimator)

# print(model.get_cpds('class').values)
# print(model.active_trail_nodes('doors', observed='class'))

# can also use Belief propagation #
inference = BeliefPropagation(model)

print("Class variable prior:\n{}\n".format(
    inference.query(variables=['class'])['class']))
print("Class variable posterior after certain observation:\n{}\n".format(
    inference.query(variables=['class'], evidence={'doors': 0})['class']))

#####################
# Predict test data #
#####################

predict_data = df_test.copy()
predict_data.drop(['class'], axis=1, inplace=True)
# y_pred = model.predict(predict_data)

pred_values = []
for index, data_point in predict_data.iterrows():
    prob_dist = inference.query(variables=['class'],
                                evidence=data_point.to_dict())['class'].values
Esempio n. 27
0
    def backward_inference(self, variables, evidence=None):
        """
        Backward inference method using belief propagation.

        Parameters:
        ----------
        variables: list
            list of variables for which you want to compute the probability
        evidence: dict
            a dict key, value pair as {var: state_of_var_observed}
            None if no evidence

        Examples:
        --------
        >>> from pgmpy.factors.discrete import TabularCPD
        >>> from pgmpy.models import DynamicBayesianNetwork as DBN
        >>> from pgmpy.inference import DBNInference
        >>> dbnet = DBN()
        >>> dbnet.add_edges_from([(('Z', 0), ('X', 0)), (('X', 0), ('Y', 0)),
        ...                       (('Z', 0), ('Z', 1))])
        >>> z_start_cpd = TabularCPD(('Z', 0), 2, [[0.5, 0.5]])
        >>> x_i_cpd = TabularCPD(('X', 0), 2, [[0.6, 0.9],
        ...                                    [0.4, 0.1]],
        ...                      evidence=[('Z', 0)],
        ...                      evidence_card=[2])
        >>> y_i_cpd = TabularCPD(('Y', 0), 2, [[0.2, 0.3],
        ...                                    [0.8, 0.7]],
        ...                      evidence=[('X', 0)],
        ...                      evidence_card=[2])
        >>> z_trans_cpd = TabularCPD(('Z', 1), 2, [[0.4, 0.7],
        ...                                        [0.6, 0.3]],
        ...                      evidence=[('Z', 0)],
        ...                      evidence_card=[2])
        >>> dbnet.add_cpds(z_start_cpd, z_trans_cpd, x_i_cpd, y_i_cpd)
        >>> dbnet.initialize_initial_state()
        >>> dbn_inf = DBNInference(dbnet)
        >>> dbn_inf.backward_inference([('X', 0)], {('Y', 0):0, ('Y', 1):1, ('Y', 2):1})[('X', 0)].values
        array([ 0.66594382,  0.33405618])
        """
        variable_dict = defaultdict(list)
        for var in variables:
            variable_dict[var[1]].append(var)
        time_range = max(variable_dict)
        interface_nodes_dict = {}
        if evidence:
            evid_time_range = max(
                [time_slice for var, time_slice in evidence.keys()])
            time_range = max(time_range, evid_time_range)
        end_bp = BeliefPropagation(self.start_junction_tree)
        potential_dict = self.forward_inference(variables, evidence,
                                                'potential')
        update_factor = self._shift_factor(potential_dict[time_range], 1)
        factor_values = {}

        for time_slice in range(time_range, 0, -1):
            evidence_time = self._get_evidence(evidence, time_slice, 1)
            evidence_prev_time = self._get_evidence(evidence, time_slice - 1,
                                                    0)
            if evidence_prev_time:
                interface_nodes_dict = {
                    k: v
                    for k, v in evidence_prev_time.items()
                    if k in self.interface_nodes_0
                }
            if evidence_time:
                evidence_time.update(interface_nodes_dict)
            mid_bp = BeliefPropagation(self.one_and_half_junction_tree)
            self._update_belief(mid_bp, self.in_clique,
                                potential_dict[time_slice - 1])
            forward_factor = self._shift_factor(potential_dict[time_slice], 1)
            self._update_belief(mid_bp, self.out_clique, forward_factor,
                                update_factor)

            if variable_dict[time_slice]:
                variable_time = self._shift_nodes(variable_dict[time_slice], 1)
                new_values = mid_bp.query(variable_time,
                                          evidence=evidence_time)
                changed_values = {}
                for key in new_values.keys():
                    new_key = (key[0], time_slice)
                    new_factor = DiscreteFactor([new_key],
                                                new_values[key].cardinality,
                                                new_values[key].values)
                    changed_values[new_key] = new_factor
                factor_values.update(changed_values)

            clique_phi = self._get_factor(mid_bp, evidence_time)
            in_clique_phi = self._marginalize_factor(self.interface_nodes_0,
                                                     clique_phi)
            update_factor = self._shift_factor(in_clique_phi, 1)

        out_clique_phi = self._shift_factor(update_factor, 0)
        self._update_belief(end_bp, self.start_interface_clique,
                            potential_dict[0], out_clique_phi)
        evidence_0 = self._get_evidence(evidence, 0, 0)
        if variable_dict[0]:
            factor_values.update(end_bp.query(variable_dict[0], evidence_0))
        return factor_values
Esempio n. 28
0
model.add_cpds(..........)

# Apply propagation
belief_propagation = BeliefPropagation(model)

# To calibrate the clique tree, use calibrate() method
belief_propagation.calibrate()

# To get cluster (or clique) beliefs use the corresponding getters
belief_propagation.get_clique_beliefs()

# To get the sepset beliefs use the corresponding getters
belief_propagation.get_sepset_beliefs()

>> # Query variables not in the same cluster
belief_propagation.query(variables=['no_of_people'], evidence={'location':1, 'quality':1})

>> # Can apply MAP_Query - next
belief_propagation.map_query(variables=['no_of_people'], evidence={'location':1, 'quality':1})
" {'no_of_people': 0} "







-4- " MAP - Maximize A Posterior Probability "
" Given the current states to find out the maximized predicted var state - prediction "
" Different from Query which only care find out the distribution of target var over all states "