def test_map_query_with_evidence(self): belief_propagation = BeliefPropagation(self.bayesian_model) map_query = belief_propagation.map_query(['A', 'R', 'L'], { 'J': 0, 'Q': 1, 'G': 0 }) self.assertDictEqual(map_query, {'A': 1, 'R': 0, 'L': 0})
def test_map_query_with_evidence(self): belief_propagation = BeliefPropagation(self.bayesian_model) map_query = belief_propagation.map_query(["A", "R", "L"], { "J": 0, "Q": 1, "G": 0 }) self.assertDictEqual(map_query, {"A": 1, "R": 0, "L": 0})
def test_map_query(self): belief_propagation = BeliefPropagation(self.bayesian_model) map_query = belief_propagation.map_query() self.assertDictEqual(map_query, { 'A': 1, 'R': 1, 'J': 1, 'Q': 1, 'G': 0, 'L': 0 })
def test_map_query(self): belief_propagation = BeliefPropagation(self.bayesian_model) map_query = belief_propagation.map_query() self.assertDictEqual(map_query, { "A": 1, "R": 1, "J": 1, "Q": 1, "G": 0, "L": 0 })
ITEM = random.sample(ND_Sample_N.index.tolist(), 1) # vbs = ['SL32','SC22','SF1','SC33','SF2'] vbs = ['SC33', 'SF2', 'SL34', 'SF3', 'SF4', 'SF5', 'SC44'] # VBS = {'SL32':int(ND_Sample_N['SL32'][ITEM]),'SC22':int(ND_Sample_N['SC22'][ITEM]),'SF1':int(ND_Sample_N['SF1'][ITEM]), # 'SC33':int(ND_Sample_N['SC33'][ITEM]),'SF2':int(ND_Sample_N['SF2'][ITEM])} # EDS = {'CP2B21':int(ND_Sample_N['CP2B21'][ITEM]),'CB22B21':int(ND_Sample_N['CB22B21'][ITEM]), # 'CB32B22':int(ND_Sample_N['CB32B22'][ITEM]),'CB32B31':int(ND_Sample_N['CB32B31'][ITEM]), # 'CP3B31':int(ND_Sample_N['CP3B31'][ITEM])} VBS = { 'SC33': int(ND_Sample_N['SC33'][ITEM]), 'SF2': int(ND_Sample_N['SF2'][ITEM]), 'SL34': int(ND_Sample_N['SL34'][ITEM]), 'SF3': int(ND_Sample_N['SF3'][ITEM]), 'SF4': int(ND_Sample_N['SF4'][ITEM]), 'SF5': int(ND_Sample_N['SF5'][ITEM]), 'SC44': int(ND_Sample_N['SC44'][ITEM]) } EDS = { 'CP4B41': int(ND_Sample_N['CP4B41'][ITEM]), 'CB42B41': int(ND_Sample_N['CB42B41'][ITEM]), 'CB43B42': int(ND_Sample_N['CB43B42'][ITEM]), 'CB44B43': int(ND_Sample_N['CB44B43'][ITEM]), 'CB44B32': int(ND_Sample_N['CB44B32'][ITEM]), 'CB32B31': int(ND_Sample_N['CB32B31'][ITEM]), 'CP3B32': int(ND_Sample_N['CP3B32'][ITEM]) } query = bp_N.map_query(variables=vbs, evidence=EDS) for key in query: if query[key] != VBS[key]: error += 1 print(error)
from pgmpy.factors.discrete import TabularCPD from pgmpy.models import BayesianModel from pgmpy.inference import BeliefPropagation bayesian_model = BayesianModel([('A', 'J'), ('R', 'J'), ('J', 'Q'), ('J', 'L'), ('G', 'L')]) cpd_a = TabularCPD('A', 2, [[0.2], [0.8]]) cpd_r = TabularCPD('R', 2, [[0.4], [0.6]]) cpd_j = TabularCPD('J', 2, [[0.9, 0.6, 0.7, 0.1], [0.1, 0.4, 0.3, 0.9]], ['R', 'A'], [2, 2]) cpd_q = TabularCPD('Q', 2, [[0.9, 0.2], [0.1, 0.8]], ['J'], [2]) cpd_l = TabularCPD('L', 2, [[0.9, 0.45, 0.8, 0.1], [0.1, 0.55, 0.2, 0.9]], ['G', 'J'], [2, 2]) cpd_g = TabularCPD('G', 2, [[0.6], [0.4]]) bayesian_model.add_cpds(cpd_a, cpd_r, cpd_j, cpd_q, cpd_l, cpd_g) belief_propagation = BeliefPropagation(bayesian_model) print( belief_propagation.map_query(variables=['J', 'Q'], evidence={ 'A': 0, 'R': 0, 'G': 0, 'L': 1 }))
test_value[i] = 0 elif (min_value[i] == 1): if (test_value[i] < 1): test_value[i] = 0 else: test_value[i] = test_value[i] - 1 #print(model.check_model()) #bp.calibrate() bp = BeliefPropagation(new_model) result = bp.map_query(variables=['AN'], evidence={ 'CsM': test_value[0], 'CsH': test_value[1], 'CsI': test_value[2], 'InI': test_value[3], 'InD': test_value[4], 'InN': test_value[5], 'OutI': test_value[6], 'OutD': test_value[7], 'OutN': test_value[8], 'PitC': test_value[9], 'PitUp': test_value[10], 'PitD': test_value[11], 'PitT': test_value[12], 'PitUn': test_value[13], 'IDro': test_value[14], 'DDro': test_value[15], 'NDro': test_value[16], 'PitN': test_value[17] }) print(result['AN'])
('PVS', 'VTUB'), ('PVS', 'ACO2'), ('SAO2', 'VMCH'), ('SAO2', 'VLNG'), ('SAO2', 'VALV'), ('SAO2', 'ACO2'), ('SHNT', 'INT'), ('INT', 'VALV'), ('PRSS', 'VTUB'), ('DISC', 'VTUB'), ('MVS', 'VMCH'), ('VMCH', 'VTUB'), ('VMCH', 'VALV'), ('VTUB', 'VLNG'), ('VTUB', 'VALV'), ('VLNG', 'VALV'), ('VLNG', 'ACO2'), ('VALV', 'ACO2'), ('CCHL', 'HR'), ('HR', 'CO'), ('CO', 'BP'), ('HRBP', 'INT')]) # pe = ParameterEstimator(model, df) # print("\n", pe.state_counts('SAO2')) mle = MaximumLikelihoodEstimator(model, df) model.fit(df, MaximumLikelihoodEstimator) model.fit(df, estimator=BayesianEstimator) # infer = VariableElimination(model) # print (" **************** Inference using variable elimination ...***********"); # print ("VALV : 'VTUB':0,'PRSS':1"); # print (infer.query(['VALV'],evidence={'VTUB':0,'PRSS':1}) ['VALV']) print(" ****************Inference using belief propagation ...***********") print("VALV : 'VTUB':0,'PRSS':1") belief_propagation = BeliefPropagation(model) belief_propagation.map_query(variables=['VALV'], evidence={ 'VTUB': 0, 'PRSS': 0 })
belief_propagation = BeliefPropagation(model) # To calibrate the clique tree, use calibrate() method belief_propagation.calibrate() # To get cluster (or clique) beliefs use the corresponding getters belief_propagation.get_clique_beliefs() # To get the sepset beliefs use the corresponding getters belief_propagation.get_sepset_beliefs() >> # Query variables not in the same cluster belief_propagation.query(variables=['no_of_people'], evidence={'location':1, 'quality':1}) >> # Can apply MAP_Query - next belief_propagation.map_query(variables=['no_of_people'], evidence={'location':1, 'quality':1}) " {'no_of_people': 0} " -4- " MAP - Maximize A Posterior Probability " " Given the current states to find out the maximized predicted var state - prediction " " Different from Query which only care find out the distribution of target var over all states " ** " MAP using Variable Elimination " " Using factor maximization "
('Q11', 'attendance'), ('Q12', 'attendance'), ('class', 'difficulty'), ('class', 'Q7'), ('class', 'Q9'), ('difficulty', 'Q9'), ('class', 'Q11'), ('Q18', 'Q16'), ('Q13', 'Q25'), ('Q23', 'Q25'), ('class', 'Q12'), ('Q17', 'Q12') ]) amlmodel = bayesmodel.fit(df, estimator=MaximumLikelihoodEstimator) for cpd in bayesmodel.get_cpds(): print("CPD of {variable}:".format(variable=cpd.variable)) print(cpd) belpro = BeliefPropagation(bayesmodel) print( belpro.map_query(variables=['attendance'], evidence={ 'difficulty': 2, 'Q9': 3 })) # print(belpro.map_query(variables=['Q25', 'Q18','Q16'],evidence={'instr':1})) print( belpro.map_query(variables=['attendance', 'Q9', 'difficulty'], evidence={'class': 7})) #Commented some queries because taking a lot of time to run # print(belpro.map_query(variables=['Q28','Q11'],evidence={'instr':2, 'class':10})) # print(belpro.map_query(variables=['Q18', 'Q26','Q13'],evidence={'instr':2})) # print(belpro.map_query(variables=['Q23', 'Q21','Q17'],evidence={'instr':2})) inference = BayesianModelSampling(bayesmodel) df = inference.forward_sample(5)
def test_map_query_with_evidence(self): belief_propagation = BeliefPropagation(self.bayesian_model) map_query = belief_propagation.map_query(['A', 'R', 'L'], {'J': 0, 'Q': 1, 'G': 0}) self.assertDictEqual(map_query, {'A': 1, 'R': 0, 'L': 0})
def test_map_query(self): belief_propagation = BeliefPropagation(self.bayesian_model) map_query = belief_propagation.map_query() self.assertDictEqual(map_query, {'A': 1, 'R': 1, 'J': 1, 'Q': 1, 'G': 0, 'L': 0})