def setUp(self): self.network = DynamicBayesianNetwork() self.grade_cpd = TabularCPD( ("G", 0), 3, values=[[0.3, 0.05, 0.8, 0.5], [0.4, 0.25, 0.1, 0.3], [0.3, 0.7, 0.1, 0.2]], evidence=[("D", 0), ("I", 0)], evidence_card=[2, 2], ) self.d_i_cpd = TabularCPD( ("D", 1), 2, values=[[0.6, 0.3], [0.4, 0.7]], evidence=[("D", 0)], evidence_card=[2], ) self.diff_cpd = TabularCPD(("D", 0), 2, values=[[0.6, 0.4]]) self.intel_cpd = TabularCPD(("I", 0), 2, values=[[0.7, 0.3]]) self.i_i_cpd = TabularCPD( ("I", 1), 2, values=[[0.5, 0.4], [0.5, 0.6]], evidence=[("I", 0)], evidence_card=[2], ) self.grade_1_cpd = TabularCPD( ("G", 1), 3, values=[[0.3, 0.05, 0.8, 0.5], [0.4, 0.25, 0.1, 0.3], [0.3, 0.7, 0.1, 0.2]], evidence=[("D", 1), ("I", 1)], evidence_card=[2, 2], )
def setUp(self): self.G = DynamicBayesianNetwork() self.G.add_edges_from([ (("D", 0), ("G", 0)), (("I", 0), ("G", 0)), (("D", 0), ("D", 1)), (("I", 0), ("I", 1)), ]) """
def query_time_frame_3(): # Dynamic Bayesian Network only supports 2-time slice, 2 time frame. Hence, create new DBN with # datas of time 2 and time 3 to query nodes in time 3. data23 = self.data.rename( columns={ 'DPQ2': ('DPQ', 0), 'C2': ('C', 0), 'TQ2': ('TQ', 0), 'OU2': ('OU', 0), 'DI2': ('DI', 0), 'DFT2': ('DFT', 0), 'RD2': ('RD', 0), 'DFO2': ('DFO', 0), 'DPQ3': ('DPQ', 1), 'C3': ('C', 1), 'TQ3': ('TQ', 1), 'OU3': ('OU', 1), 'DI3': ('DI', 1), 'DFT3': ('DFT', 1), 'RD3': ('RD', 1), 'DFO3': ('DFO', 1) }) data23 = data23.drop( ['DPQ', 'C', 'TQ', 'OU', 'DI', 'DFT', 'RD', 'DFO'], 1) self.model23 = DynamicBayesianNetwork() self.model23.add_edges_from([(('DPQ', 0), ('DI', 0)), (('C', 0), ('DI', 0)), (('TQ', 0), ('DFT', 0)), (('DI', 0), ('DFT', 0)), (('DI', 0), ('RD', 0)), (('DFT', 0), ('RD', 0)), (('RD', 0), ('DFO', 0)), (('OU', 0), ('DFO', 0)), (('DPQ', 0), ('DPQ', 1)), (('C', 0), ('C', 1)), (('TQ', 0), ('TQ', 1)), (('OU', 0), ('OU', 1)), (('RD', 0), (('DI', 1)))]) add_cpds_to_model(self.model23, data23) # save state names to draw graph for key, names in self.model23.state_names.iteritems(): if key[1] == 1: self.state_names[(key[0], 2)] = names pr3 = pr2 nodes3 = nodes2 print 'query 3', pr3, nodes3 # pr = {('DPQ', 1): 1,...} | nodes = [('DPQ', 1),...] infer3 = DBNInferenceRewritten(self.model23) query = infer3.query(nodes3, evidence=pr3) for key, value in query.iteritems(): Distribution[(key[0], 2)] = value.values
def setUp(self): self.network = DynamicBayesianNetwork() self.grade_cpd = TabularCPD( ('G', 0), 3, [[0.3, 0.05, 0.9, 0.5], [0.4, 0.25, 0.08, 0.3], [0.3, 0.7, 0.2, 0.2]], [('D', 0), ('I', 0)], [2, 2]) self.d_i_cpd = TabularCPD(('D', 1), 2, [[0.6, 0.3], [0.4, 0.7]], [('D', 0)], 2) self.diff_cpd = TabularCPD(('D', 0), 2, [[0.6, 0.4]]) self.intel_cpd = TabularCPD(('I', 0), 2, [[0.7, 0.3]]) self.i_i_cpd = TabularCPD(('I', 1), 2, [[0.5, 0.4], [0.5, 0.6]], [('I', 0)], 2) self.grade_1_cpd = TabularCPD( ('G', 1), 3, [[0.3, 0.05, 0.9, 0.5], [0.4, 0.25, 0.08, 0.3], [0.3, 0.7, 0.2, 0.2]], [('D', 1), ('I', 1)], [2, 2])
def active_trail( dynamic_model: DynamicBayesianNetwork, node: typing.Tuple[str, int], evidence: typing.Sequence[typing.Tuple[str, int]] = [], ): # node = DynamicNode(*node) # evidence = [DynamicNode(*e) for e in evidence] reachable = dynamic_model.active_trail_nodes(node, observed=evidence).get(node) reachable.remove(node) reachable = sorted(reachable) if reachable: if evidence: print( f"Active trail between {node} and {reachable} given {evidence}" ) else: print( f"Active trail between {node} and {reachable} given no evidence" ) else: if evidence: print(f"No active trails from {node} given {evidence}") else: print(f"No active trails from {node} given no evidence")
def __init__(self, parameters=None): super().__init__(parameters) # set up the network based on the parameters self.model = DBN() self.model.add_nodes_from(self.parameters['nodes']) self.model.add_edges_from(self.parameters['edges']) print(f'EDGES: {sorted(self.model.edges())}') import ipdb ipdb.set_trace() # TODO -- add 'evidence' -- get from network? cpds = (TabularCPD(variable=node_id, variable_card=len(values), values=values, evidence=[]) for node_id, values in self.parameters['conditional_probabilities']) self.model.add_cpds(cpds) # make an inference instance for sampling the model self.inference = BayesianModelSampling(self.model) # get a sample sample = self.inference.forward_sample(size=2)
def buildDBN(): # Construct a DBN object dbn = DBN() #!!!!!!!!!!!!!!! VERY IMPORTANT !!!!!!!!!!!!!!! # MAKE SURE to NAME THE RANDOM VARIABLE "LOCATION AS "L" AND "SENSOR" OBSERVATION AS "O" ########-----YOUR CODE STARTS HERE-----######## dbn.add_edges_from([(('L', 0), ('O', 0)), (('L', 0), ('L', 1))]) l_cpd = TabularCPD(('L', 0), 4, [[0], [0], [1], [0]]) o_cpd = TabularCPD(('O', 0), 4, [ [0.7, 0.1, 0.1, 0.1], [0.1, 0.7, 0.1, 0.1], [0.1, 0.1, 0.7, 0.1], [0.1, 0.1, 0.1, 0.7], ], evidence=[('L', 0)], evidence_card=[4]) # l_i_cpd = TabularCPD(('L', 1), 4, [[.5, .5, 0, 0], # [0, .5, 0, .5], # [.5, 0, .5, 0], # [0, 0, .5, .5],], # evidence=[('L',0)], # evidence_card=[4]) l_i_cpd = TabularCPD(('L', 1), 4, [ [.5, 0, .5, 0], [.5, .5, 0, 0], [0, 0, .5, .5], [0, .5, 0, .5], ], evidence=[('L', 0)], evidence_card=[4]) # print(o_i_cpd) # print(l_cpd) # print(o_cpd) # print(l_i_cpd) dbn.add_cpds(l_cpd, o_cpd, l_i_cpd) ########-----YOUR CODE ENDS HERE-----######## # Do NOT forget to initialize before doing any inference! Otherwise, errors will be introduced. dbn.initialize_initial_state() # Create an inference object dbn_inf = DBNInference(dbn) ########-----YOUR MAY TEST YOUR CODE BELOW -----######## ########-----ADDITIONAL CODE STARTS HERE-----######## #print(dbn_inf.query([('L',3)])[('L',3)].values) # print(dbn_inf.query([('L',1)])[('L',1)].values) #print(dbn_inf.query([('L',1)], {('O', 1):2})[('L',1)].values) ########-----YOUR CODE ENDS HERE-----######## return dbn_inf
def setUp(self): self.network = DynamicBayesianNetwork() self.grade_cpd = TabularCPD(('G', 0), 3, [[0.3, 0.05, 0.8, 0.5], [0.4, 0.25, 0.1, 0.3], [0.3, 0.7, 0.1, 0.2]], [('D', 0), ('I', 0)], [2, 2]) self.d_i_cpd = TabularCPD(('D', 1), 2, [[0.6, 0.3], [0.4, 0.7]], [('D', 0)], 2) self.diff_cpd = TabularCPD(('D', 0), 2, [[0.6, 0.4]]) self.intel_cpd = TabularCPD(('I', 0), 2, [[0.7, 0.3]]) self.i_i_cpd = TabularCPD(('I', 1), 2, [[0.5, 0.4], [0.5, 0.6]], [('I', 0)], 2) self.grade_1_cpd = TabularCPD(('G', 1), 3, [[0.3, 0.05, 0.8, 0.5], [0.4, 0.25, 0.1, 0.3], [0.3, 0.7, 0.1, 0.2]], [('D', 1), ('I', 1)], [2, 2])
def main(): data, string = readData() genes = np.array(data.columns[1:]) labels = np.array(data.columns) bayesianModel = BayesianModel() transitionModel = DBN() bayesianModel.add_nodes_from(genes) transitionModel.add_nodes_from(genes) bData, tData = getData(data, labels) print "\nDynamic Bayesian Network inference", print "\nB_0 network relations: " hcb = HillClimbSearch(bData, genes, scoring_method=BicScore(bData, labels, bk1=string, weight=4)) best_model_b = hcb.estimate(start=bayesianModel, tabu_length=15, max_indegree=2) print(best_model_b.edges()) printOutputB(best_model_b) print "\nLocal Probability Model: " best_model_b.fit(bData, BayesianEstimator) for cpd in best_model_b.get_cpds(): print(cpd) print "\nB_transition network relations: " hct = HillClimbSearch(tData, genes, scoring_method=BicScore(tData, labels, bk1=string, weight=4)) best_model_t = hct.estimate_dynamic(start=transitionModel, tabu_length=15, max_indegree=2) print(best_model_t.edges()) printOutputT(best_model_t) print "\nLocal Probability Model: " best_model_t.fit(tData, BayesianEstimator) for cpd in best_model_t.get_cpds(): print(cpd)
def buildDBN(): # Construct a DBN object dbn = DBN() dbn.add_edges_from([(('L', 0), ('O', 0)), (('L', 0), ('L', 1)), (('L', 1), ('O', 1))]) # setup conditional probability tables for nodes in network O0_cpd = TabularCPD(('O', 0), 4, [[0.7, 0.1, 0.1, 0.1], # A [0.1, 0.7, 0.1, 0.1], # B [0.1, 0.1, 0.7, 0.1], # C [0.1, 0.1, 0.1, 0.7]], # D evidence=[('L', 0)], evidence_card=[4]) l0_cpd = TabularCPD(('L', 0), 4, [[0], # A [0], # B [1], # C [0]]) # D l1_cpd = TabularCPD(('L', 1), 4, [[0.5, 0.0, 0.5, 0.0], # A [0.5, 0.5, 0.0, 0.0], # B [0.0, 0.0, 0.5, 0.5], # C [0.0, 0.5, 0.0, 0.5]], # D evidence=[('L', 0)], evidence_card=[4]) #add these conditional probability tables to our BayesianModel dbn.add_cpds(l0_cpd, l1_cpd, O0_cpd) #initialize our model for time series analysis dbn.initialize_initial_state() # Create an inference object to perform queries dbn_inf = DBNInference(dbn) #print(dbn_inf.query(variables=[('L',1)], evidence={('O',1): 2})[('L',1)]) return dbn_inf
class DynamicBayesianNetwork(Process): defaults = { 'nodes': [], 'edges': [], 'conditional_probabilities': { 'node_id': [] } } def __init__(self, parameters=None): super().__init__(parameters) # set up the network based on the parameters self.model = DBN() self.model.add_nodes_from(self.parameters['nodes']) self.model.add_edges_from(self.parameters['edges']) print(f'EDGES: {sorted(self.model.edges())}') import ipdb ipdb.set_trace() # TODO -- add 'evidence' -- get from network? cpds = (TabularCPD(variable=node_id, variable_card=len(values), values=values, evidence=[]) for node_id, values in self.parameters['conditional_probabilities']) self.model.add_cpds(cpds) # make an inference instance for sampling the model self.inference = BayesianModelSampling(self.model) # get a sample sample = self.inference.forward_sample(size=2) def ports_schema(self): return {} def next_update(self, timestep, states): return {}
class DbnCnnInterface(object): def __init__(self, model_file='../DBN/network.nx'): nx_model = nx.read_gpickle(model_file) self.dbn = DynamicBayesianNetwork(nx_model.edges()) self.dbn.add_cpds(*nx_model.cpds) self.dbn.initialize_initial_state() self.dbn_infer = DBNInference(self.dbn) def filter_q_values(self, q_values, evidence=0, method='binary'): inferred = np.ndarray(shape=(len(q_values), ), dtype=float) inferred.fill(0) variables = self.dbn.get_slice_nodes(1) ev = {node: 0 for node in self.dbn.get_slice_nodes(0)} if evidence != 0: self.set_evidence(ev, evidence) q = self.dbn_infer.query(variables=variables, evidence=ev) for variable in q.values(): action = self.get_action_id(variable.variables[0]) if method == 'binary': inferred[action] = 1 if variable.values[1] > 0 else 0 else: inferred[action] = variable.values[1] return q_values * inferred def get_action_id(self, action): if action[0] == 'Prompt': return 0 elif action[0] == 'Reward': return 1 elif action[0] == 'Abort': return 2 return 3 def set_evidence(self, evidence, id): if id == 1: evidence[("Prompt", 0)] = 1
class TestDynamicBayesianNetworkCreation(unittest.TestCase): def setUp(self): self.network = DynamicBayesianNetwork() def test_add_single_node(self): self.network.add_node('a') self.assertListEqual(self.network.nodes(), ['a']) def test_add_multiple_nodes(self): self.network.add_nodes_from(['a', 'b', 'c']) self.assertListEqual(sorted(self.network.nodes()), ['a', 'b', 'c']) def test_add_single_edge_with_timeslice(self): self.network.add_edge(('a', 0), ('b', 0)) self.assertListEqual(sorted(self.network.edges()), [(('a', 0), ('b', 0)), (('a', 1), ('b', 1))]) self.assertListEqual(sorted(self.network.nodes()), ['a', 'b']) def test_add_edge_with_different_number_timeslice(self): self.network.add_edge(('a', 2), ('b', 2)) self.assertListEqual(sorted(self.network.edges()), [(('a', 0), ('b', 0)), (('a', 1), ('b', 1))]) def test_add_edge_going_backward(self): self.assertRaises(ValueError, self.network.add_edge, ('a', 1), ('b', 0)) def test_add_edge_with_farther_timeslice(self): self.assertRaises(ValueError, self.network.add_edge, ('a', 2), ('b', 4)) def test_add_edge_with_self_loop(self): self.assertRaises(ValueError, self.network.add_edge, ('a', 0), ('a', 0)) def test_add_edge_with_varying_length(self): self.assertRaises(ValueError, self.network.add_edge, ('a', 1, 1), ('b', 2)) self.assertRaises(ValueError, self.network.add_edge, ('b', 2), ('a', 2, 3)) def test_add_edge_with_closed_path(self): self.assertRaises(ValueError, self.network.add_edges_from, [(('a', 0), ('b', 0)), (('b', 0), ('c', 0)), (('c', 0), ('a', 0))]) def test_add_single_edge_without_timeslice(self): self.assertRaises(ValueError, self.network.add_edge, 'a', 'b') def test_add_single_edge_with_incorrect_timeslice(self): self.assertRaises(ValueError, self.network.add_edge, ('a', 'b'), ('b', 'c')) def test_add_multiple_edges(self): self.network.add_edges_from([(('a', 0), ('b', 0)), (('a', 0), ('a', 1)), (('b', 0), ('b', 1))]) self.assertListEqual(sorted(self.network.edges()), [(('a', 0), ('a', 1)), (('a', 0), ('b', 0)), (('a', 1), ('b', 1)), (('b', 0), ('b', 1))]) def tearDown(self): del self.network
class TestDynamicBayesianNetworkMethods2(unittest.TestCase): def setUp(self): self.G = DynamicBayesianNetwork() self.G.add_edges_from([ (("D", 0), ("G", 0)), (("I", 0), ("G", 0)), (("D", 0), ("D", 1)), (("I", 0), ("I", 1)), ]) """ G.edges() [(('I', 0), ('G', 0)), (('I', 0), ('I', 1)), (('D', 1), ('G', 1)), (('D', 0), ('G', 0)), (('D', 0), ('D', 1)), (('I', 1), ('G', 1))] """ def test_check_model(self): grade_cpd = TabularCPD( ("G", 0), 3, values=[[0.3, 0.05, 0.7, 0.5], [0.4, 0.25, 0.1, 0.3], [0.3, 0.7, 0.2, 0.2]], evidence=[("D", 0), ("I", 0)], evidence_card=[2, 2], ) d_i_cpd = TabularCPD( ("D", 1), 2, values=[[0.6, 0.3], [0.4, 0.7]], evidence=[("D", 0)], evidence_card=[2], ) diff_cpd = TabularCPD(("D", 0), 2, values=[[0.6, 0.4]]) intel_cpd = TabularCPD(("I", 0), 2, values=[[0.7, 0.3]]) i_i_cpd = TabularCPD( ("I", 1), 2, values=[[0.5, 0.4], [0.5, 0.6]], evidence=[("I", 0)], evidence_card=[2], ) grade_1_cpd = TabularCPD( ("G", 1), 3, values=[[0.3, 0.05, 0.8, 0.5], [0.4, 0.25, 0.1, 0.3], [0.3, 0.7, 0.1, 0.2]], evidence=[("D", 1), ("I", 1)], evidence_card=[2, 2], ) self.G.add_cpds(grade_cpd, d_i_cpd, i_i_cpd) self.assertTrue(self.G.check_model()) self.G.remove_cpds(grade_cpd, d_i_cpd, i_i_cpd) self.G.add_cpds(grade_1_cpd, diff_cpd, intel_cpd) self.assertTrue(self.G.check_model()) def test_check_model1(self): diff_cpd = TabularCPD( ("D", 0), 3, values=[[0.3, 0.05, 0.7, 0.5], [0.4, 0.25, 0.1, 0.3], [0.3, 0.7, 0.2, 0.2]], evidence=[("G", 0), ("I", 0)], evidence_card=[2, 2], ) self.G.add_cpds(diff_cpd) self.assertRaises(ValueError, self.G.check_model) self.G.remove_cpds(diff_cpd) grade_cpd = TabularCPD( ("G", 0), 2, values=[[0.6, 0.3], [0.4, 0.7]], evidence=[("D", 0)], evidence_card=[2], ) self.G.add_cpds(grade_cpd) self.assertRaises(ValueError, self.G.check_model) self.G.remove_cpds(grade_cpd) diff_cpd = TabularCPD( ("D", 0), 2, values=[[0.6, 0.3], [0.4, 0.7]], evidence=[("D", 1)], evidence_card=[2], ) self.G.add_cpds(diff_cpd) self.assertRaises(ValueError, self.G.check_model) self.G.remove_cpds(diff_cpd) grade_cpd = TabularCPD( ("G", 0), 3, values=[[0.3, 0.05, 0.8, 0.5], [0.4, 0.25, 0.1, 0.3], [0.3, 0.7, 0.1, 0.2]], evidence=[("D", 1), ("I", 1)], evidence_card=[2, 2], ) self.G.add_cpds(grade_cpd) self.assertRaises(ValueError, self.G.check_model) self.G.remove_cpds(grade_cpd) grade_cpd = TabularCPD( ("G", 1), 3, values=[[0.3, 0.05, 0.8, 0.5], [0.4, 0.25, 0.1, 0.3], [0.3, 0.7, 0.1, 0.2]], evidence=[("D", 0), ("I", 0)], evidence_card=[2, 2], ) self.G.add_cpds(grade_cpd) self.assertRaises(ValueError, self.G.check_model) self.G.remove_cpds(grade_cpd) grade_cpd = TabularCPD( ("G", 0), 2, values=[[0.6, 0.3], [0.4, 0.7]], evidence=[("D", 1)], evidence_card=[2], ) self.G.add_cpds(grade_cpd) self.assertRaises(ValueError, self.G.check_model) self.G.remove_cpds(grade_cpd) def test_check_model2(self): grade_cpd = TabularCPD( ("G", 0), 3, values=[[0.9, 0.05, 0.7, 0.5], [0.4, 0.25, 0.1, 0.3], [0.3, 0.7, 0.2, 0.2]], evidence=[("D", 0), ("I", 0)], evidence_card=[2, 2], ) self.G.add_cpds(grade_cpd) self.assertRaises(ValueError, self.G.check_model) self.G.remove_cpds(grade_cpd) d_i_cpd = TabularCPD( ("D", 1), 2, values=[[0.1, 0.3], [0.4, 0.7]], evidence=[("D", 0)], evidence_card=[2], ) self.G.add_cpds(d_i_cpd) self.assertRaises(ValueError, self.G.check_model) self.G.remove_cpds(d_i_cpd) diff_cpd = TabularCPD(("D", 0), 2, values=[[0.7, 0.4]]) self.G.add_cpds(diff_cpd) self.assertRaises(ValueError, self.G.check_model) self.G.remove_cpds(diff_cpd) intel_cpd = TabularCPD(("I", 0), 2, values=[[1.7, 0.3]]) self.G.add_cpds(intel_cpd) self.assertRaises(ValueError, self.G.check_model) self.G.remove_cpds(intel_cpd) i_i_cpd = TabularCPD( ("I", 1), 2, values=[[0.9, 0.4], [0.5, 0.6]], evidence=[("I", 0)], evidence_card=[2], ) self.G.add_cpds(i_i_cpd) self.assertRaises(ValueError, self.G.check_model) self.G.remove_cpds(i_i_cpd) grade_1_cpd = TabularCPD( ("G", 1), 3, values=[[0.3, 0.05, 0.8, 0.5], [0.4, 0.5, 0.1, 0.3], [0.3, 0.7, 0.1, 0.2]], evidence=[("D", 1), ("I", 1)], evidence_card=[2, 2], ) self.G.add_cpds(grade_1_cpd) self.assertRaises(ValueError, self.G.check_model) self.G.remove_cpds(grade_1_cpd) def tearDown(self): del self.G
class TestDynamicBayesianNetworkMethods(unittest.TestCase): def setUp(self): self.network = DynamicBayesianNetwork() self.grade_cpd = TabularCPD( ('G', 0), 3, [[0.3, 0.05, 0.9, 0.5], [0.4, 0.25, 0.08, 0.3], [0.3, 0.7, 0.2, 0.2]], [('D', 0), ('I', 0)], [2, 2]) self.d_i_cpd = TabularCPD(('D', 1), 2, [[0.6, 0.3], [0.4, 0.7]], [('D', 0)], 2) self.diff_cpd = TabularCPD(('D', 0), 2, [[0.6, 0.4]]) self.intel_cpd = TabularCPD(('I', 0), 2, [[0.7, 0.3]]) self.i_i_cpd = TabularCPD(('I', 1), 2, [[0.5, 0.4], [0.5, 0.6]], [('I', 0)], 2) self.grade_1_cpd = TabularCPD( ('G', 1), 3, [[0.3, 0.05, 0.9, 0.5], [0.4, 0.25, 0.08, 0.3], [0.3, 0.7, 0.2, 0.2]], [('D', 1), ('I', 1)], [2, 2]) def test_get_intra_and_inter_edges(self): self.network.add_edges_from([(('a', 0), ('b', 0)), (('a', 0), ('a', 1)), (('b', 0), ('b', 1))]) self.assertListEqual(sorted(self.network.get_intra_edges()), [(('a', 0), ('b', 0))]) self.assertListEqual(sorted(self.network.get_intra_edges(1)), [(('a', 1), ('b', 1))]) self.assertRaises(ValueError, self.network.get_intra_edges, -1) self.assertRaises(ValueError, self.network.get_intra_edges, '-') self.assertListEqual(sorted(self.network.get_inter_edges()), [(('a', 0), ('a', 1)), (('b', 0), ('b', 1))]) def test_get_interface_nodes(self): self.network.add_edges_from([ (('D', 0), ('G', 0)), (('I', 0), ('G', 0)), (('D', 0), ('D', 1)), (('I', 0), ('I', 1)) ]) self.assertListEqual(sorted(self.network.get_interface_nodes()), [('D', 0), ('I', 0)]) self.assertRaises(ValueError, self.network.get_interface_nodes, -1) self.assertRaises(ValueError, self.network.get_interface_nodes, '-') def test_get_slice_nodes(self): self.network.add_edges_from([ (('D', 0), ('G', 0)), (('I', 0), ('G', 0)), (('D', 0), ('D', 1)), (('I', 0), ('I', 1)) ]) self.assertListEqual(sorted(self.network.get_slice_nodes()), [('D', 0), ('G', 0), ('I', 0)]) self.assertListEqual(sorted(self.network.get_slice_nodes(1)), [('D', 1), ('G', 1), ('I', 1)]) self.assertRaises(ValueError, self.network.get_slice_nodes, -1) self.assertRaises(ValueError, self.network.get_slice_nodes, '-') def test_add_single_cpds(self): self.network.add_edges_from([(('D', 0), ('G', 0)), (('I', 0), ('G', 0))]) self.network.add_cpds(self.grade_cpd) self.assertListEqual(self.network.get_cpds(), [self.grade_cpd]) def test_get_cpds(self): self.network.add_edges_from([ (('D', 0), ('G', 0)), (('I', 0), ('G', 0)), (('D', 0), ('D', 1)), (('I', 0), ('I', 1)) ]) self.network.add_cpds(self.grade_cpd, self.d_i_cpd, self.diff_cpd, self.intel_cpd, self.i_i_cpd) self.network.initialize_initial_state() self.assertEqual(set(self.network.get_cpds()), set([self.diff_cpd, self.intel_cpd, self.grade_cpd])) self.assertEqual( self.network.get_cpds(time_slice=1)[0].variable, ('G', 1)) def test_add_multiple_cpds(self): self.network.add_edges_from([ (('D', 0), ('G', 0)), (('I', 0), ('G', 0)), (('D', 0), ('D', 1)), (('I', 0), ('I', 1)) ]) self.network.add_cpds(self.grade_cpd, self.d_i_cpd, self.diff_cpd, self.intel_cpd, self.i_i_cpd) self.assertEqual(self.network.get_cpds(('G', 0)).variable, ('G', 0)) self.assertEqual(self.network.get_cpds(('D', 1)).variable, ('D', 1)) self.assertEqual(self.network.get_cpds(('D', 0)).variable, ('D', 0)) self.assertEqual(self.network.get_cpds(('I', 0)).variable, ('I', 0)) self.assertEqual(self.network.get_cpds(('I', 1)).variable, ('I', 1)) def test_initialize_initial_state(self): self.network.add_nodes_from(['D', 'G', 'I', 'S', 'L']) self.network.add_edges_from([ (('D', 0), ('G', 0)), (('I', 0), ('G', 0)), (('D', 0), ('D', 1)), (('I', 0), ('I', 1)) ]) self.network.add_cpds(self.grade_cpd, self.d_i_cpd, self.diff_cpd, self.intel_cpd, self.i_i_cpd) self.network.initialize_initial_state() self.assertEqual(len(self.network.cpds), 6) self.assertEqual(self.network.get_cpds(('G', 1)).variable, ('G', 1)) def test_moralize(self): self.network.add_edges_from(([(('D', 0), ('G', 0)), (('I', 0), ('G', 0))])) moral_graph = self.network.moralize() self.assertListEqual(hf.recursive_sorted(moral_graph.edges()), [[('D', 0), ('G', 0)], [('D', 0), ('I', 0)], [('D', 1), ('G', 1)], [('D', 1), ('I', 1)], [('G', 0), ('I', 0)], [('G', 1), ('I', 1)]]) def tearDown(self): del self.network
from pgmpy.models import DynamicBayesianNetwork from pgmpy.factors.discrete import TabularCPD from pgmpy.estimators import HillClimbSearchDBN, BicScore import networkx as nx import random as rand import pandas as pd # CREATES SIMULATED DBN MODEL dbn = DynamicBayesianNetwork() # Node Name Values # I Subject Interest engaged, neutral, off # A Subject Action response, no response # R Robot Action prompt, fail, reward # O Observation q values dbn.add_nodes_from(['I', 'A', 'R', 'O']) # Check diagram for details # I -----------> I2 # | ------------^ # v / | # A ---> R ------- # | # v # O dbn.add_edges_from([(('I', 0), ('A', 0)), (('I', 0), ('R', 0)), (('I', 0), ('I', 1)), (('A', 0), ('O', 0)), (('A', 0), ('R', 0)), (('A', 0), ('I', 1)), (('R', 0), ('I', 1))])
def setUp(self): self.network = DynamicBayesianNetwork()
def __init__(self): self.dbn = DBN() self.dbn.add_edges_from( [ (('CA', 0), ('C', 0)), (('CA', 0), ('C', 0)), (('CT', 0), ('H', 0)), (('CA', 0), ('H', 0)), (('C', 0), ('P', 0)), (('H', 0), ('P', 0)), (('CA', 0), ('CA', 1)) ] ) self.model = DynamicBayesianNetwork() self.model.add_edges_from( [ (('Classificacao_Angulos', 0), ('Catetos', 0)), (('Classificacao_Triangulos', 0), ('Hipotenusa', 0)), (('Classificacao_Angulos', 0), ('Hipotenusa', 0)), (('Catetos', 0), ('Pitagoras', 0)), (('Hipotenusa', 0), ('Pitagoras', 0)) ] ) self.model = BayesianModel( [ ('Classificacao_Angulos', 'Catetos'), ('Classificacao_Triangulos', 'Hipotenusa'), ('Classificacao_Angulos', 'Hipotenusa'), ('Catetos', 'Pitagoras'), ('Hipotenusa', 'Pitagoras'), ] ) x = 4 cpd_classificao_angulos = TabularCPD( variable='Classificacao_Angulos', variable_card=2, values=[[0.4], [0.6]] ) print(cpd_classificao_angulos) print('\n' * x) cpd_classificao_triangulos = TabularCPD( variable='Classificacao_Triangulos', variable_card=2, values=[[0.3], [0.7]] ) print(cpd_classificao_triangulos) print('\n' * x) cpd_catetos = TabularCPD( variable='Catetos', variable_card=2, values=[[0.9, 0.2], [0.1, 0.8]], evidence=['Classificacao_Angulos'], evidence_card=[2] ) print(cpd_catetos) print('\n' * x) cpd_hipotenusa = TabularCPD( variable='Hipotenusa', variable_card=2, values=[[0.9, 0.2, 0.3, 0.1], [0.1, 0.8, 0.7, 0.9]], evidence=['Classificacao_Angulos', 'Classificacao_Triangulos'], evidence_card=[2, 2] ) print(cpd_hipotenusa) print('\n' * x) cpd_pitagoras = TabularCPD( variable='Pitagoras', variable_card=2, values=[[0.9, 0.3, 0.3, 0.1], [0.1, 0.7, 0.7, 0.9]], evidence=['Catetos', 'Hipotenusa'], evidence_card=[2, 2] ) print(cpd_pitagoras) print('\n' * x) self.model.add_cpds( cpd_classificao_angulos, cpd_classificao_triangulos, cpd_catetos, cpd_hipotenusa, cpd_pitagoras ) self.model.check_model() self.inference = VariableElimination(self.model)
def setUp(self): dbn_1 = DynamicBayesianNetwork() dbn_1.add_edges_from( [(('Z', 0), ('X', 0)), (('Z', 0), ('Y', 0)), (('Z', 0), ('Z', 1))]) cpd_start_z_1 = TabularCPD(('Z', 0), 2, [[0.8, 0.2]]) cpd_x_1 = TabularCPD( ('X', 0), 2, [[0.9, 0.6], [0.1, 0.4]], [('Z', 0)], [2]) cpd_y_1 = TabularCPD( ('Y', 0), 2, [[0.7, 0.2], [0.3, 0.8]], [('Z', 0)], [2]) cpd_trans_z_1 = TabularCPD( ('Z', 1), 2, [[0.9, 0.1], [0.1, 0.9]], [('Z', 0)], [2]) dbn_1.add_cpds(cpd_start_z_1, cpd_trans_z_1, cpd_x_1, cpd_y_1) dbn_1.initialize_initial_state() self.dbn_inference_1 = DBNInference(dbn_1) dbn_2 = DynamicBayesianNetwork() dbn_2.add_edges_from([(('Z', 0), ('X', 0)), (('X', 0), ('Y', 0)), (('Z', 0), ('Z', 1))]) cpd_start_z_2 = TabularCPD(('Z', 0), 2, [[0.5, 0.5]]) cpd_x_2 = TabularCPD( ('X', 0), 2, [[0.6, 0.9], [0.4, 0.1]], [('Z', 0)], [2]) cpd_y_2 = TabularCPD( ('Y', 0), 2, [[0.2, 0.3], [0.8, 0.7]], [('X', 0)], [2]) cpd_z_2 = TabularCPD( ('Z', 1), 2, [[0.4, 0.7], [0.6, 0.3]], [('Z', 0)], [2]) dbn_2.add_cpds(cpd_x_2, cpd_y_2, cpd_z_2, cpd_start_z_2) dbn_2.initialize_initial_state() self.dbn_inference_2 = DBNInference(dbn_2)
def setUp(self): self.G = DynamicBayesianNetwork() self.G.add_edges_from( [(('D', 0), ('G', 0)), (('I', 0), ('G', 0)), (('D', 0), ('D', 1)), (('I', 0), ('I', 1))]) """
class TestDynamicBayesianNetworkMethods(unittest.TestCase): def setUp(self): self.network = DynamicBayesianNetwork() self.grade_cpd = TabularCPD( ("G", 0), 3, values=[[0.3, 0.05, 0.8, 0.5], [0.4, 0.25, 0.1, 0.3], [0.3, 0.7, 0.1, 0.2]], evidence=[("D", 0), ("I", 0)], evidence_card=[2, 2], ) self.d_i_cpd = TabularCPD( ("D", 1), 2, values=[[0.6, 0.3], [0.4, 0.7]], evidence=[("D", 0)], evidence_card=[2], ) self.diff_cpd = TabularCPD(("D", 0), 2, values=[[0.6, 0.4]]) self.intel_cpd = TabularCPD(("I", 0), 2, values=[[0.7, 0.3]]) self.i_i_cpd = TabularCPD( ("I", 1), 2, values=[[0.5, 0.4], [0.5, 0.6]], evidence=[("I", 0)], evidence_card=[2], ) self.grade_1_cpd = TabularCPD( ("G", 1), 3, values=[[0.3, 0.05, 0.8, 0.5], [0.4, 0.25, 0.1, 0.3], [0.3, 0.7, 0.1, 0.2]], evidence=[("D", 1), ("I", 1)], evidence_card=[2, 2], ) def test_get_intra_and_inter_edges(self): self.network.add_edges_from([(("a", 0), ("b", 0)), (("a", 0), ("a", 1)), (("b", 0), ("b", 1))]) self.assertListEqual(sorted(self.network.get_intra_edges()), [(("a", 0), ("b", 0))]) self.assertListEqual(sorted(self.network.get_intra_edges(1)), [(("a", 1), ("b", 1))]) self.assertRaises(ValueError, self.network.get_intra_edges, -1) self.assertRaises(ValueError, self.network.get_intra_edges, "-") self.assertListEqual( sorted(self.network.get_inter_edges()), [(("a", 0), ("a", 1)), (("b", 0), ("b", 1))], ) def test_get_interface_nodes(self): self.network.add_edges_from([ (("D", 0), ("G", 0)), (("I", 0), ("G", 0)), (("D", 0), ("D", 1)), (("I", 0), ("I", 1)), ]) self.assertListEqual(sorted(self.network.get_interface_nodes()), [("D", 0), ("I", 0)]) self.assertRaises(ValueError, self.network.get_interface_nodes, -1) self.assertRaises(ValueError, self.network.get_interface_nodes, "-") def test_get_slice_nodes(self): self.network.add_edges_from([ (("D", 0), ("G", 0)), (("I", 0), ("G", 0)), (("D", 0), ("D", 1)), (("I", 0), ("I", 1)), ]) self.assertListEqual(sorted(self.network.get_slice_nodes()), [("D", 0), ("G", 0), ("I", 0)]) self.assertListEqual(sorted(self.network.get_slice_nodes(1)), [("D", 1), ("G", 1), ("I", 1)]) self.assertRaises(ValueError, self.network.get_slice_nodes, -1) self.assertRaises(ValueError, self.network.get_slice_nodes, "-") def test_add_single_cpds(self): self.network.add_edges_from([(("D", 0), ("G", 0)), (("I", 0), ("G", 0))]) self.network.add_cpds(self.grade_cpd) self.assertListEqual(self.network.get_cpds(), [self.grade_cpd]) def test_get_cpds(self): self.network.add_edges_from([ (("D", 0), ("G", 0)), (("I", 0), ("G", 0)), (("D", 0), ("D", 1)), (("I", 0), ("I", 1)), ]) self.network.add_cpds(self.grade_cpd, self.d_i_cpd, self.diff_cpd, self.intel_cpd, self.i_i_cpd) self.network.initialize_initial_state() self.assertEqual( set(self.network.get_cpds()), set([self.diff_cpd, self.intel_cpd, self.grade_cpd]), ) self.assertEqual( {cpd.variable for cpd in self.network.get_cpds(time_slice=1)}, {("D", 1), ("I", 1), ("G", 1)}, ) def test_add_multiple_cpds(self): self.network.add_edges_from([ (("D", 0), ("G", 0)), (("I", 0), ("G", 0)), (("D", 0), ("D", 1)), (("I", 0), ("I", 1)), ]) self.network.add_cpds(self.grade_cpd, self.d_i_cpd, self.diff_cpd, self.intel_cpd, self.i_i_cpd) self.assertEqual(self.network.get_cpds(("G", 0)).variable, ("G", 0)) self.assertEqual(self.network.get_cpds(("D", 1)).variable, ("D", 1)) self.assertEqual(self.network.get_cpds(("D", 0)).variable, ("D", 0)) self.assertEqual(self.network.get_cpds(("I", 0)).variable, ("I", 0)) self.assertEqual(self.network.get_cpds(("I", 1)).variable, ("I", 1)) def test_initialize_initial_state(self): self.network.add_nodes_from(["D", "G", "I", "S", "L"]) self.network.add_edges_from([ (("D", 0), ("G", 0)), (("I", 0), ("G", 0)), (("D", 0), ("D", 1)), (("I", 0), ("I", 1)), ]) self.network.add_cpds(self.grade_cpd, self.d_i_cpd, self.diff_cpd, self.intel_cpd, self.i_i_cpd) self.network.initialize_initial_state() self.assertEqual(len(self.network.cpds), 6) self.assertEqual(self.network.get_cpds(("G", 1)).variable, ("G", 1)) def test_moralize(self): self.network.add_edges_from(([(("D", 0), ("G", 0)), (("I", 0), ("G", 0))])) moral_graph = self.network.moralize() self.assertListEqual( hf.recursive_sorted(moral_graph.edges()), [ [("D", 0), ("G", 0)], [("D", 0), ("I", 0)], [("D", 1), ("G", 1)], [("D", 1), ("I", 1)], [("G", 0), ("I", 0)], [("G", 1), ("I", 1)], ], ) def test_copy(self): self.network.add_edges_from([ (("D", 0), ("G", 0)), (("I", 0), ("G", 0)), (("D", 0), ("D", 1)), (("I", 0), ("I", 1)), ]) cpd = TabularCPD( ("G", 0), 3, values=[[0.3, 0.05, 0.8, 0.5], [0.4, 0.25, 0.1, 0.3], [0.3, 0.7, 0.1, 0.2]], evidence=[("D", 0), ("I", 0)], evidence_card=[2, 2], ) self.network.add_cpds(cpd) copy = self.network.copy() self.assertIsInstance(copy, DynamicBayesianNetwork) self.assertListEqual(sorted(self.network._nodes()), sorted(copy._nodes())) self.assertListEqual(sorted(self.network.edges()), sorted(copy.edges())) self.assertListEqual(self.network.get_cpds(), copy.get_cpds()) self.assertListEqual(sorted(self.network.get_intra_edges()), sorted(copy.get_intra_edges())) self.assertListEqual(sorted(self.network.get_inter_edges()), sorted(copy.get_inter_edges())) self.assertListEqual(sorted(self.network.get_slice_nodes()), sorted(copy.get_slice_nodes())) copy.cpds[0].values = np.array([[0.4, 0.05, 0.3, 0.5], [0.3, 0.25, 0.5, 0.3], [0.3, 0.7, 0.2, 0.2]]) self.assertNotEqual(self.network.get_cpds(), copy.get_cpds()) self.network.add_cpds(self.i_i_cpd, self.d_i_cpd) copy.add_cpds(self.diff_cpd, self.intel_cpd) self.network.add_node("A") copy.add_node("Z") self.network.add_edge(("A", 0), ("D", 0)) copy.add_edge(("Z", 0), ("D", 0)) self.assertNotEqual(sorted(self.network._nodes()), sorted(copy._nodes())) self.assertNotEqual(sorted(self.network.edges()), sorted(copy.edges())) self.assertNotEqual(self.network.get_cpds(), copy.get_cpds()) self.assertNotEqual(sorted(self.network.get_intra_edges()), sorted(copy.get_intra_edges())) self.assertListEqual(sorted(self.network.get_inter_edges()), sorted(copy.get_inter_edges())) self.assertNotEqual(sorted(self.network.get_slice_nodes()), sorted(copy.get_slice_nodes())) self.network.add_edge(("A", 0), ("D", 1)) copy.add_edge(("Z", 0), ("D", 1)) self.assertNotEqual(sorted(self.network.get_inter_edges()), sorted(copy.get_inter_edges())) def tearDown(self): del self.network
from pgmpy.models import DynamicBayesianNetwork as DBN from pgmpy.factors.discrete import TabularCPD from pgmpy.factors.continuous import LinearGaussianCPD dbn = DBN() dbn.add_edges_from([(('D', 0), ('G', 0)), (('I', 0), ('G', 0)), (('D', 0), ('D', 1)), (('I', 0), ('I', 1))]) grade_cpd = TabularCPD( ('G', 0), 3, [[0.3, 0.05, 0.9, 0.5], [0.4, 0.25, 0.8, 0.03], [0.3, 0.7, 0.02, 0.2]], evidence=[('I', 0), ('D', 0)], evidence_card=[2, 2]) print 'grade_cpd', grade_cpd d_i_cpd = TabularCPD(('D', 1), 2, [[0.6, 0.3], [0.4, 0.7]], evidence=[('D', 0)], evidence_card=[2]) diff_cpd = TabularCPD(('D', 0), 2, [[0.6, 0.4]]) intel_cpd = TabularCPD(('I', 0), 2, [[0.7, 0.3]]) i_i_cpd = TabularCPD(('I', 1), 2, [[0.5, 0.4], [0.5, 0.6]], evidence=[('I', 0)], evidence_card=[2]) dbn.add_edges_from([(('T', 0), ('T', 1)), (('P', 0), ('T', 1)), (('P', 1), ('T', 1)), (('T', 0), ('I', 1)), (('T', 1), ('I', 1)), (('E', 1), ('I', 1))]) trust_cpd = LinearGaussianCPD('T', [0.2, -2, 3, 7], 9.6, ['T', 'P', 'X3']) # interaction_cpd = # extranous_interaction_cpd =
##Project Representation: from pgmpy.models import DynamicBayesianNetwork as DBN from pgmpy.factors.discrete import TabularCPD from pgmpy.inference import DBNInference import networkx as nx import matplotlib.pyplot as plt dbn = DBN() # Vt ,vt-1 , st ,ct,et #dbn.add_edges_from([(('D', 0),('G', 0)),(('I', 0),('G', 0)),(('D', 0),('D', 1)),(('I', 0),('I', 1))]) #dbn.add_edges_from([(('V(t-1)', 0),('V(t-1)', 1)),(('V(t-1)', 0),('S(t-1)', 0)),(('V(t-1)', 0),('E(t-1)', 0)),(('V(t-1)', 0),('C(t-1)', 0)),(('V(t)', 1),('S(t)', 1)),(('V(t)', 1),('E(t)', 1)),(('V(t)', 1),('C(t)', 1))]) #dbn.add_edges_from([(('V(t)', 0),('V(t)', 1)),(('V(t)', 0),('S(t)', 0)),(('V(t)', 0),('E(t)', 0)),(('V(t)', 0),('C(t)', 0))]) dbn.add_edges_from([(('V(t)', 0), ('V(t)', 1)), (('S(t)', 0), ('V(t)', 0)), (('E(t)', 0), ('V(t)', 0)), (('C(t)', 0), ('V(t)', 0))]) Vt_1_cpd = TabularCPD(('V(t)', 0), 2, [[0.506257, 0.493743]]) st_cpd = TabularCPD(('S(t)', 1), 2, [[0.989, 0.658], [0.0103, 0.3412]], evidence=[('V(t)', 0)], evidence_card=[2]) et_cpd = TabularCPD(('E(t)', 1), 2, [[0.9828, 0.4068], [0.017108, 0.59317]], evidence=[('V(t)', 0)], evidence_card=[2]) ct_cpd = TabularCPD(('C(t)', 1), 2, [[0.9388, 0.82414], [0.0611017, 0.17585]],
def process(self): def add_cpds_to_model(model, data): mle = MaximumLikelihoodEstimator(model, data) cpds = [] nodes = model.get_slice_nodes(0) + model.get_slice_nodes(1) for node in nodes: cpds.append(mle.estimate_cpd(node)) model.add_cpds(*cpds) model.state_names = mle.state_names def calculate_distribution_nodes_input(): for key in pr.keys(): Distribution[key] = [ 1 - abs(np.sign(pr[key] - i)) for i in range(5) ] Distribution[(key[0], 1)] = Distribution[key] Distribution[(key[0], 2)] = Distribution[key] nodes.remove(key) nodes2.remove((key[0], 1)) def query_time_frame_1(): print 'query 1', pr, nodes query = infer.query(nodes, evidence=pr) for key, value in query.iteritems(): Distribution[key] = value.values def query_time_frame_2(): global pr2 for key, value in pr.iteritems(): pr2[(key[0], 1)] = pr[key] print 'query 2', pr2, nodes2 query = infer.query(nodes2, evidence=pr2) for key, value in query.iteritems(): Distribution[key] = value.values def query_time_frame_3(): # Dynamic Bayesian Network only supports 2-time slice, 2 time frame. Hence, create new DBN with # datas of time 2 and time 3 to query nodes in time 3. data23 = self.data.rename( columns={ 'DPQ2': ('DPQ', 0), 'C2': ('C', 0), 'TQ2': ('TQ', 0), 'OU2': ('OU', 0), 'DI2': ('DI', 0), 'DFT2': ('DFT', 0), 'RD2': ('RD', 0), 'DFO2': ('DFO', 0), 'DPQ3': ('DPQ', 1), 'C3': ('C', 1), 'TQ3': ('TQ', 1), 'OU3': ('OU', 1), 'DI3': ('DI', 1), 'DFT3': ('DFT', 1), 'RD3': ('RD', 1), 'DFO3': ('DFO', 1) }) data23 = data23.drop( ['DPQ', 'C', 'TQ', 'OU', 'DI', 'DFT', 'RD', 'DFO'], 1) self.model23 = DynamicBayesianNetwork() self.model23.add_edges_from([(('DPQ', 0), ('DI', 0)), (('C', 0), ('DI', 0)), (('TQ', 0), ('DFT', 0)), (('DI', 0), ('DFT', 0)), (('DI', 0), ('RD', 0)), (('DFT', 0), ('RD', 0)), (('RD', 0), ('DFO', 0)), (('OU', 0), ('DFO', 0)), (('DPQ', 0), ('DPQ', 1)), (('C', 0), ('C', 1)), (('TQ', 0), ('TQ', 1)), (('OU', 0), ('OU', 1)), (('RD', 0), (('DI', 1)))]) add_cpds_to_model(self.model23, data23) # save state names to draw graph for key, names in self.model23.state_names.iteritems(): if key[1] == 1: self.state_names[(key[0], 2)] = names pr3 = pr2 nodes3 = nodes2 print 'query 3', pr3, nodes3 # pr = {('DPQ', 1): 1,...} | nodes = [('DPQ', 1),...] infer3 = DBNInferenceRewritten(self.model23) query = infer3.query(nodes3, evidence=pr3) for key, value in query.iteritems(): Distribution[(key[0], 2)] = value.values # sketch number axis with max values = max values DI + 1 def stretch_distributions(max_value_di): remove_nodes = [] for time in range(3): remove_nodes.append(('DPQ', time)) remove_nodes.append(('C', time)) remove_nodes.append(('OU', time)) remove_nodes.append(('TQ', time)) ns = nodes + nodes2 + [(node[0], 2) for node in nodes] for key in ns: if key not in remove_nodes: if self.state_names[key][-1] == max_value_di: self.state_names[key].append(max_value_di + 1) Distribution[key] = np.append(Distribution[key], [0]) elif self.state_names[key][-1] < max_value_di: self.state_names[key].extend( [self.state_names[key][-1] + 1, max_value_di + 1]) Distribution[key] = np.append(Distribution[key], [0, 0]) def standarlize_distribution(): # use when data size is too small and length(DPQ or C or TQ or OU) < 5 => error when draw graph ns = ['DPQ', 'TQ', 'C', 'OU'] for node in ns: exist_in_pr = False for key in pr.keys(): if key[0] == node: exist_in_pr = True break if not exist_in_pr: for index in range(5): if index not in self.model.state_names[(node, 0)]: Distribution[(node, 0)].insert(index, 0) Distribution[(node, 1)].insert(index, 0) Distribution[(node, 2)].insert(index, 0) if self.history_file != self.file_path: self.data = pd.read_csv(self.file_path) # "fisrm.csv" self.data_size = len(self.data) self.history_file = self.file_path self.state_names = {} self.model = DynamicBayesianNetwork() self.model.add_edges_from([(('DPQ', 0), ('DI', 0)), (('C', 0), ('DI', 0)), (('TQ', 0), ('DFT', 0)), (('DI', 0), ('DFT', 0)), (('DI', 0), ('RD', 0)), (('DFT', 0), ('RD', 0)), (('RD', 0), ('DFO', 0)), (('OU', 0), ('DFO', 0)), (('DPQ', 0), ('DPQ', 1)), (('C', 0), ('C', 1)), (('TQ', 0), ('TQ', 1)), (('OU', 0), ('OU', 1)), (('RD', 0), (('DI', 1)))]) global pr global pr2 global pr3 pr = self.processBox() pr2 = {} pr3 = {} nodes = self.model.get_slice_nodes(0) nodes2 = self.model.get_slice_nodes(1) Distribution = {} # Rename and drop data columns to use MaximumLikelyHood data12 = self.data.rename( columns={ 'DPQ': ('DPQ', 0), 'C': ('C', 0), 'TQ': ('TQ', 0), 'DI': ('DI', 0), 'DFT': ('DFT', 0), 'RD': ('RD', 0), 'DFO': ('DFO', 0), 'OU': ('OU', 0), 'DPQ2': ('DPQ', 1), 'C2': ('C', 1), 'TQ2': ('TQ', 1), 'OU2': ('OU', 1), 'DI2': ('DI', 1), 'DFT2': ('DFT', 1), 'RD2': ('RD', 1), 'DFO2': ('DFO', 1) }) data12 = data12.drop( ['DPQ3', 'C3', 'TQ3', 'OU3', 'DI3', 'DFT3', 'RD3', 'DFO3'], 1) add_cpds_to_model(self.model, data12) self.state_names = self.model.state_names infer = DBNInferenceRewritten(self.model) calculate_distribution_nodes_input() query_time_frame_1() query_time_frame_2() query_time_frame_3() max_value_di = self.state_names[('DI', 0)][-1] # array has been sorted stretch_distributions(max_value_di) # standarlize_distribution() self.draw_subplots(Distribution, 0, 1, max_value_di) self.draw_subplots(Distribution, 1, 2, max_value_di) self.draw_subplots(Distribution, 0, 2, max_value_di) plt.show()
class TestDynamicBayesianNetworkCreation(unittest.TestCase): def setUp(self): self.network = DynamicBayesianNetwork() def test_add_single_node(self): self.network.add_node("a") self.assertListEqual(self.network._nodes(), ["a"]) def test_add_multiple_nodes(self): self.network.add_nodes_from(["a", "b", "c"]) self.assertListEqual(sorted(self.network._nodes()), ["a", "b", "c"]) def test_add_single_edge_with_timeslice(self): self.network.add_edge(("a", 0), ("b", 0)) self.assertListEqual(sorted(self.network.edges()), [(("a", 0), ("b", 0)), (("a", 1), ("b", 1))]) self.assertListEqual(sorted(self.network._nodes()), ["a", "b"]) def test_add_edge_with_different_number_timeslice(self): self.network.add_edge(("a", 2), ("b", 2)) self.assertListEqual(sorted(self.network.edges()), [(("a", 0), ("b", 0)), (("a", 1), ("b", 1))]) def test_add_edge_going_backward(self): self.assertRaises(NotImplementedError, self.network.add_edge, ("a", 1), ("b", 0)) def test_add_edge_with_farther_timeslice(self): self.assertRaises(ValueError, self.network.add_edge, ("a", 2), ("b", 4)) def test_add_edge_with_self_loop(self): self.assertRaises(ValueError, self.network.add_edge, ("a", 0), ("a", 0)) def test_add_edge_with_varying_length(self): self.assertRaises(ValueError, self.network.add_edge, ("a", 1, 1), ("b", 2)) self.assertRaises(ValueError, self.network.add_edge, ("b", 2), ("a", 2, 3)) def test_add_edge_with_closed_path(self): self.assertRaises( ValueError, self.network.add_edges_from, [(("a", 0), ("b", 0)), (("b", 0), ("c", 0)), (("c", 0), ("a", 0))], ) def test_add_single_edge_without_timeslice(self): self.assertRaises(ValueError, self.network.add_edge, "a", "b") def test_add_single_edge_with_incorrect_timeslice(self): self.assertRaises(ValueError, self.network.add_edge, ("a", "b"), ("b", "c")) def test_add_multiple_edges(self): self.network.add_edges_from([(("a", 0), ("b", 0)), (("a", 0), ("a", 1)), (("b", 0), ("b", 1))]) self.assertListEqual( sorted(self.network.edges()), [ (("a", 0), ("a", 1)), (("a", 0), ("b", 0)), (("a", 1), ("b", 1)), (("b", 0), ("b", 1)), ], ) def tearDown(self): del self.network
import numpy as np import pandas as pd import sys from pgmpy.models import DynamicBayesianNetwork from pgmpy.estimators import BayesianEstimator from pgmpy.inference import VariableElimination import matplotlib.pyplot as plt model = DynamicBayesianNetwork() list_edges = [(('DPQ', 0), ('DI', 0)), (('C', 0), ('DI', 0))] for i in range(3): list_edges += [(('DI', i), ('DFT', i)), (('TQ', i), ('DFT', i)), (('DFT', i), ('RD', i)), (('RD', i), ('DFO', i)), (('OU', i), ('DFO', i))] if (i == 2): break list_edges += [(('RD', i), ('DI', i + 1)), (('TQ', i), ('TQ', i + 1)), (('OU', i), ('OU', i + 1))] model.add_edges_from(list_edges) print(model.edges()) print(model.nodes())
class TestDynamicBayesianNetworkCreation(unittest.TestCase): def setUp(self): self.network = DynamicBayesianNetwork() def test_add_single_node(self): self.network.add_node('a') self.assertListEqual(self.network.nodes(), ['a']) def test_add_multiple_nodes(self): self.network.add_nodes_from(['a', 'b', 'c']) self.assertListEqual(sorted(self.network.nodes()), ['a', 'b', 'c']) def test_add_single_edge_with_timeslice(self): self.network.add_edge(('a', 0), ('b', 0)) self.assertListEqual(sorted(self.network.edges()), [(('a', 0), ('b', 0)), (('a', 1), ('b', 1))]) self.assertListEqual(sorted(self.network.nodes()), ['a', 'b']) def test_add_edge_with_different_number_timeslice(self): self.network.add_edge(('a', 2), ('b', 2)) self.assertListEqual(sorted(self.network.edges()), [(('a', 0), ('b', 0)), (('a', 1), ('b', 1))]) def test_add_edge_going_backward(self): self.assertRaises(NotImplementedError, self.network.add_edge, ('a', 1), ('b', 0)) def test_add_edge_with_farther_timeslice(self): self.assertRaises(ValueError, self.network.add_edge, ('a', 2), ('b', 4)) def test_add_edge_with_self_loop(self): self.assertRaises(ValueError, self.network.add_edge, ('a', 0), ('a', 0)) def test_add_edge_with_varying_length(self): self.assertRaises(ValueError, self.network.add_edge, ('a', 1, 1), ('b', 2)) self.assertRaises(ValueError, self.network.add_edge, ('b', 2), ('a', 2, 3)) def test_add_edge_with_closed_path(self): self.assertRaises(ValueError, self.network.add_edges_from, [(('a', 0), ('b', 0)), (('b', 0), ('c', 0)), (('c', 0), ('a', 0))]) def test_add_single_edge_without_timeslice(self): self.assertRaises(ValueError, self.network.add_edge, 'a', 'b') def test_add_single_edge_with_incorrect_timeslice(self): self.assertRaises(ValueError, self.network.add_edge, ('a', 'b'), ('b', 'c')) def test_add_multiple_edges(self): self.network.add_edges_from([(('a', 0), ('b', 0)), (('a', 0), ('a', 1)), (('b', 0), ('b', 1))]) self.assertListEqual(sorted(self.network.edges()), [(('a', 0), ('a', 1)), (('a', 0), ('b', 0)), (('a', 1), ('b', 1)), (('b', 0), ('b', 1))]) def tearDown(self): del self.network
class Application(Frame): def choosefile(self): self.file_path = tkFileDialog.askopenfilename() self.datafile_label["text"] = self.file_path def draw_subplots(self, Distribution, time1, time2, max_value_di): plt.figure() plt.subplot(4, 4, 1) plt.bar([1, 2, 3, 4, 5], Distribution[('DPQ', time1)]) plt.xticks([1, 2, 3, 4, 5], ['very low', 'low', 'medium', 'high', 'very high']) plt.title("design process quality " + str(time1 + 1)) plt.ylabel('probability') plt.subplot(4, 4, 2) plt.bar([1, 2, 3, 4, 5], Distribution[('C', time1)]) plt.xticks([1, 2, 3, 4, 5], ['very low', 'low', 'medium', 'high', 'very high']) plt.title("complexity " + str(time1 + 1)) plt.ylabel('probability') plt.subplot(4, 4, 3) plt.bar([1, 2, 3, 4, 5], Distribution[('OU', time1)]) plt.xticks([1, 2, 3, 4, 5], ['very low', 'low', 'medium', 'high', 'very high']) plt.title("operational usage " + str(time1 + 1)) plt.ylabel('probability') plt.subplot(4, 4, 4) plt.bar([1, 2, 3, 4, 5], Distribution[('TQ', time1)]) plt.xticks([1, 2, 3, 4, 5], ['very low', 'low', 'medium', 'high', 'very high']) plt.title("Test quality " + str(time1 + 1)) plt.ylabel('probability') plt.subplot(4, 4, 5) plt.plot(self.state_names[('DI', time1)], Distribution[('DI', time1)]) plt.title("defects inserted " + str(time1 + 1)) plt.ylabel('probability') plt.subplot(4, 4, 6) plt.plot(self.state_names[('DFT', time1)], Distribution[('DFT', time1)]) plt.title("defects found in testing " + str(time1 + 1)) plt.ylabel('probability') plt.subplot(4, 4, 7) plt.plot(self.state_names[('RD', time1)], Distribution[('RD', time1)]) plt.title("residual defects " + str(time1 + 1)) plt.ylabel('probability') plt.subplot(4, 4, 8) plt.plot(self.state_names[('DFO', time1)], Distribution[('DFO', time1)]) plt.title("defects found in operation " + str(time1 + 1)) plt.ylabel('probability') plt.subplot(4, 4, 9) plt.bar([1, 2, 3, 4, 5], Distribution[('DPQ', time2)]) plt.xticks([1, 2, 3, 4, 5], ['very low', 'low', 'medium', 'high', 'very high']) plt.title("design process quality " + str(time2 + 1)) plt.ylabel('probability') plt.subplot(4, 4, 10) plt.bar([1, 2, 3, 4, 5], Distribution[('C', time2)]) plt.xticks([1, 2, 3, 4, 5], ['very low', 'low', 'medium', 'high', 'very high']) plt.title("complexity " + str(time2 + 1)) plt.ylabel('probability') plt.subplot(4, 4, 11) plt.bar([1, 2, 3, 4, 5], Distribution[('OU', time2)]) plt.xticks([1, 2, 3, 4, 5], ['very low', 'low', 'medium', 'high', 'very high']) plt.title("operational usage " + str(time2 + 1)) plt.ylabel('probability') plt.subplot(4, 4, 12) plt.bar([1, 2, 3, 4, 5], Distribution[('TQ', time2)]) plt.xticks([1, 2, 3, 4, 5], ['very low', 'low', 'medium', 'high', 'very high']) plt.title("Test quality " + str(time2 + 1)) plt.ylabel('probability') plt.subplot(4, 4, 13) plt.plot(self.state_names[('DI', time2)], Distribution[('DI', time2)]) plt.title("defects inserted " + str(time2 + 1)) plt.ylabel('probability') plt.subplot(4, 4, 14) plt.plot(self.state_names[('DFT', time2)], Distribution[('DFT', time2)]) plt.title("defects found in testing " + str(time2 + 1)) plt.ylabel('probability') plt.subplot(4, 4, 15) plt.plot(self.state_names[('RD', time2)], Distribution[('RD', time2)]) plt.title("residual defects " + str(time2 + 1)) plt.ylabel('probability') plt.subplot(4, 4, 16) plt.plot(self.state_names[('DFO', time2)], Distribution[('DFO', time2)]) plt.title("defects found in operation " + str(time2 + 1)) plt.ylabel('probability') plt.subplots_adjust(left=0.05, right=0.95, bottom=0.05, top=0.95, hspace=0.4, wspace=0.4) def processBox(self): if not hasattr(self, 'file_path'): print "chua chon file" exit() pr = {} # 'very low', 'low', 'medium', 'high', 'very high' # print type(self.dpq_box.get()) if self.dpq_box.get() == 'unknown': if ('DPQ', 0) in pr.keys(): del pr[('DPQ', 0)] elif self.dpq_box.get() == 'very low': pr[('DPQ', 0)] = 0 elif self.dpq_box.get() == 'low': pr[('DPQ', 0)] = 1 elif self.dpq_box.get() == 'medium': pr[('DPQ', 0)] = 2 elif self.dpq_box.get() == 'high': pr[('DPQ', 0)] = 3 elif self.dpq_box.get() == 'very high': pr[('DPQ', 0)] = 4 else: pass if self.c_box.get() == 'unknown': if ('C', 0) in pr.keys(): del pr[('C', 0)] elif self.c_box.get() == 'very low': pr[('C', 0)] = 0 elif self.c_box.get() == 'low': pr[('C', 0)] = 1 elif self.c_box.get() == 'medium': pr[('C', 0)] = 2 elif self.c_box.get() == 'high': pr[('C', 0)] = 3 elif self.c_box.get() == 'very high': pr[('C', 0)] = 4 else: pass if self.tq_box.get() == 'unknown': if ('TQ', 0) in pr.keys(): del pr[('TQ', 0)] elif self.tq_box.get() == 'very low': pr[('TQ', 0)] = 0 elif self.tq_box.get() == 'low': pr[('TQ', 0)] = 1 elif self.tq_box.get() == 'medium': pr[('TQ', 0)] = 2 elif self.tq_box.get() == 'high': pr[('TQ', 0)] = 3 elif self.tq_box.get() == 'very high': pr[('TQ', 0)] = 4 else: pass if self.ou_box.get() == 'unknown': if ('OU', 0) in pr.keys(): del pr[('OU', 0)] elif self.ou_box.get() == 'very low': pr[('OU', 0)] = 0 elif self.ou_box.get() == 'low': pr[('OU', 0)] = 1 elif self.ou_box.get() == 'medium': pr[('OU', 0)] = 2 elif self.ou_box.get() == 'high': pr[('OU', 0)] = 3 elif self.ou_box.get() == 'very high': pr[('OU', 0)] = 4 else: pass return pr def process(self): def add_cpds_to_model(model, data): mle = MaximumLikelihoodEstimator(model, data) cpds = [] nodes = model.get_slice_nodes(0) + model.get_slice_nodes(1) for node in nodes: cpds.append(mle.estimate_cpd(node)) model.add_cpds(*cpds) model.state_names = mle.state_names def calculate_distribution_nodes_input(): for key in pr.keys(): Distribution[key] = [ 1 - abs(np.sign(pr[key] - i)) for i in range(5) ] Distribution[(key[0], 1)] = Distribution[key] Distribution[(key[0], 2)] = Distribution[key] nodes.remove(key) nodes2.remove((key[0], 1)) def query_time_frame_1(): print 'query 1', pr, nodes query = infer.query(nodes, evidence=pr) for key, value in query.iteritems(): Distribution[key] = value.values def query_time_frame_2(): global pr2 for key, value in pr.iteritems(): pr2[(key[0], 1)] = pr[key] print 'query 2', pr2, nodes2 query = infer.query(nodes2, evidence=pr2) for key, value in query.iteritems(): Distribution[key] = value.values def query_time_frame_3(): # Dynamic Bayesian Network only supports 2-time slice, 2 time frame. Hence, create new DBN with # datas of time 2 and time 3 to query nodes in time 3. data23 = self.data.rename( columns={ 'DPQ2': ('DPQ', 0), 'C2': ('C', 0), 'TQ2': ('TQ', 0), 'OU2': ('OU', 0), 'DI2': ('DI', 0), 'DFT2': ('DFT', 0), 'RD2': ('RD', 0), 'DFO2': ('DFO', 0), 'DPQ3': ('DPQ', 1), 'C3': ('C', 1), 'TQ3': ('TQ', 1), 'OU3': ('OU', 1), 'DI3': ('DI', 1), 'DFT3': ('DFT', 1), 'RD3': ('RD', 1), 'DFO3': ('DFO', 1) }) data23 = data23.drop( ['DPQ', 'C', 'TQ', 'OU', 'DI', 'DFT', 'RD', 'DFO'], 1) self.model23 = DynamicBayesianNetwork() self.model23.add_edges_from([(('DPQ', 0), ('DI', 0)), (('C', 0), ('DI', 0)), (('TQ', 0), ('DFT', 0)), (('DI', 0), ('DFT', 0)), (('DI', 0), ('RD', 0)), (('DFT', 0), ('RD', 0)), (('RD', 0), ('DFO', 0)), (('OU', 0), ('DFO', 0)), (('DPQ', 0), ('DPQ', 1)), (('C', 0), ('C', 1)), (('TQ', 0), ('TQ', 1)), (('OU', 0), ('OU', 1)), (('RD', 0), (('DI', 1)))]) add_cpds_to_model(self.model23, data23) # save state names to draw graph for key, names in self.model23.state_names.iteritems(): if key[1] == 1: self.state_names[(key[0], 2)] = names pr3 = pr2 nodes3 = nodes2 print 'query 3', pr3, nodes3 # pr = {('DPQ', 1): 1,...} | nodes = [('DPQ', 1),...] infer3 = DBNInferenceRewritten(self.model23) query = infer3.query(nodes3, evidence=pr3) for key, value in query.iteritems(): Distribution[(key[0], 2)] = value.values # sketch number axis with max values = max values DI + 1 def stretch_distributions(max_value_di): remove_nodes = [] for time in range(3): remove_nodes.append(('DPQ', time)) remove_nodes.append(('C', time)) remove_nodes.append(('OU', time)) remove_nodes.append(('TQ', time)) ns = nodes + nodes2 + [(node[0], 2) for node in nodes] for key in ns: if key not in remove_nodes: if self.state_names[key][-1] == max_value_di: self.state_names[key].append(max_value_di + 1) Distribution[key] = np.append(Distribution[key], [0]) elif self.state_names[key][-1] < max_value_di: self.state_names[key].extend( [self.state_names[key][-1] + 1, max_value_di + 1]) Distribution[key] = np.append(Distribution[key], [0, 0]) def standarlize_distribution(): # use when data size is too small and length(DPQ or C or TQ or OU) < 5 => error when draw graph ns = ['DPQ', 'TQ', 'C', 'OU'] for node in ns: exist_in_pr = False for key in pr.keys(): if key[0] == node: exist_in_pr = True break if not exist_in_pr: for index in range(5): if index not in self.model.state_names[(node, 0)]: Distribution[(node, 0)].insert(index, 0) Distribution[(node, 1)].insert(index, 0) Distribution[(node, 2)].insert(index, 0) if self.history_file != self.file_path: self.data = pd.read_csv(self.file_path) # "fisrm.csv" self.data_size = len(self.data) self.history_file = self.file_path self.state_names = {} self.model = DynamicBayesianNetwork() self.model.add_edges_from([(('DPQ', 0), ('DI', 0)), (('C', 0), ('DI', 0)), (('TQ', 0), ('DFT', 0)), (('DI', 0), ('DFT', 0)), (('DI', 0), ('RD', 0)), (('DFT', 0), ('RD', 0)), (('RD', 0), ('DFO', 0)), (('OU', 0), ('DFO', 0)), (('DPQ', 0), ('DPQ', 1)), (('C', 0), ('C', 1)), (('TQ', 0), ('TQ', 1)), (('OU', 0), ('OU', 1)), (('RD', 0), (('DI', 1)))]) global pr global pr2 global pr3 pr = self.processBox() pr2 = {} pr3 = {} nodes = self.model.get_slice_nodes(0) nodes2 = self.model.get_slice_nodes(1) Distribution = {} # Rename and drop data columns to use MaximumLikelyHood data12 = self.data.rename( columns={ 'DPQ': ('DPQ', 0), 'C': ('C', 0), 'TQ': ('TQ', 0), 'DI': ('DI', 0), 'DFT': ('DFT', 0), 'RD': ('RD', 0), 'DFO': ('DFO', 0), 'OU': ('OU', 0), 'DPQ2': ('DPQ', 1), 'C2': ('C', 1), 'TQ2': ('TQ', 1), 'OU2': ('OU', 1), 'DI2': ('DI', 1), 'DFT2': ('DFT', 1), 'RD2': ('RD', 1), 'DFO2': ('DFO', 1) }) data12 = data12.drop( ['DPQ3', 'C3', 'TQ3', 'OU3', 'DI3', 'DFT3', 'RD3', 'DFO3'], 1) add_cpds_to_model(self.model, data12) self.state_names = self.model.state_names infer = DBNInferenceRewritten(self.model) calculate_distribution_nodes_input() query_time_frame_1() query_time_frame_2() query_time_frame_3() max_value_di = self.state_names[('DI', 0)][-1] # array has been sorted stretch_distributions(max_value_di) # standarlize_distribution() self.draw_subplots(Distribution, 0, 1, max_value_di) self.draw_subplots(Distribution, 1, 2, max_value_di) self.draw_subplots(Distribution, 0, 2, max_value_di) plt.show() def createWidgets(self): pad_x = 5 pad_y = 5 self.firstlabel = Label(self) self.firstlabel["text"] = "Choose_file:__", # self.text_datafile["command"] = self.say_hi # self.firstlabel.grid(row=0, column=1, padx=pad_x, pady=pad_y, sticky=W) self.datafile_label = Label(self) self.datafile_label["text"] = "no_file", self.datafile_label.grid(row=0, column=1, padx=pad_x, pady=pad_y, columnspan=3, sticky=W) self.choosefilebutton = Button(self) self.choosefilebutton["text"] = "Choose_data_file", self.choosefilebutton["command"] = self.choosefile self.choosefilebutton.grid(row=1, column=1, padx=pad_x, pady=pad_y, sticky=W) self.dpq_label = Label(self) self.dpq_label["text"] = "design_process_quality", self.dpq_label.grid(row=2, column=1, padx=pad_x, pady=pad_y, sticky=W) # self.dpq_entry = Entry(self, width=10) # self.dpq_entry.grid(row=1, column=3, padx=pad_x, pady=pad_y, sticky=W) self.dpq_box_value = StringVar() self.dpq_box = ttk.Combobox(self, textvariable=self.dpq_box_value) self.dpq_box['values'] = ('unknown', 'very low', 'low', 'medium', 'high', 'very high') self.dpq_box.current(0) self.dpq_box.grid(row=2, column=2, padx=pad_x, pady=pad_y, sticky=W) self.c_label = Label(self) self.c_label["text"] = "Complexity", self.c_label.grid(row=2, column=3, padx=pad_x, pady=pad_y, sticky=W) # self.c_entry = Entry(self, width=10) # self.c_entry.grid(row=1, column=5, padx=pad_x, pady=pad_y, sticky=W) self.c_box_value = StringVar() self.c_box = ttk.Combobox(self, textvariable=self.c_box_value) self.c_box['values'] = ('unknown', 'very low', 'low', 'medium', 'high', 'very high') self.c_box.current(0) self.c_box.grid(row=2, column=4, padx=pad_x, pady=pad_y, sticky=W) self.tq_label = Label(self) self.tq_label["text"] = "Test_quality", self.tq_label.grid(row=2, column=5, padx=pad_x, pady=pad_y, sticky=W) # self.tq_entry = Entry(self, width=10) # self.tq_entry.grid(row=1, column=7, padx=pad_x, pady=pad_y, sticky=W) self.tq_box_value = StringVar() self.tq_box = ttk.Combobox(self, textvariable=self.tq_box_value) self.tq_box['values'] = ('unknown', 'very low', 'low', 'medium', 'high', 'very high') self.tq_box.current(0) self.tq_box.grid(row=2, column=6, padx=pad_x, pady=pad_y, sticky=W) self.ou_label = Label(self) self.ou_label["text"] = "operational_usage", self.ou_label.grid(row=2, column=7, padx=pad_x, pady=pad_y, sticky=W) # self.tq_entry = Entry(self, width=10) # self.tq_entry.grid(row=1, column=7, padx=pad_x, pady=pad_y, sticky=W) self.ou_box_value = StringVar() self.ou_box = ttk.Combobox(self, textvariable=self.ou_box_value) self.ou_box['values'] = ('unknown', 'very low', 'low', 'medium', 'high', 'very high') self.ou_box.current(0) self.ou_box.grid(row=2, column=8, padx=pad_x, pady=pad_y, sticky=W) self.processbotton = Button(self) self.processbotton["text"] = "Process", self.processbotton["command"] = self.process self.processbotton.grid(row=3, column=1, padx=pad_x, pady=pad_y, sticky=W) self.QUIT = Button(self) self.QUIT["text"] = "QUIT" self.QUIT["fg"] = "red" self.QUIT["command"] = self.quit self.QUIT.grid(row=3, column=2, padx=pad_x, pady=pad_y, sticky=W) def __init__(self, master=None): Frame.__init__(self, master) self.pack() self.createWidgets() self.history_file = ''
from pgmpy.models import DynamicBayesianNetwork as DBN from pgmpy.factors.discrete import TabularCPD from pgmpy.inference import DBNInference import sys import os time_querying = sys.argv[1] action_id = sys.argv[2] # Construct a DBN object dbn = DBN() # Create the edges for this 2-TBN (two time slice bayesian network) # For edges in the same time slice, you only need to provide their connections in the first slice dbn.add_edges_from([(('goal', 0), ('action', 0)), (('state', 0), ('action', 0)), (('goal', 0), ('goal', 1))]) # Create the CPD (Conditional Probability Distribution) tables # First, create the CPD tables for edges in the same time slice goal_cpd = TabularCPD(('goal', 0), 4, [[0.25, 0.25, 0.25, 0.25]]) state_cpd = TabularCPD(('state', 0), 100, [[ 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01 ]])
if values[IX_CORRECT_ACTION] == PROMPT: curr_sample[('Prompt', 1)] = 1 elif values[IX_CORRECT_ACTION] == REWARD: curr_sample[('Reward', 1)] = 1 elif values[IX_CORRECT_ACTION] == ABORT: curr_sample[('Abort', 1)] = 1 final_samples.append(curr_sample) # LEARNS STRUCTURE FROM DATA print 'Learning model' data = pd.DataFrame(final_samples) hc = HillClimbSearchDBN(data, scoring_method=BicScore(data)) # GIVE STRUCTURE LEARNING ALGORITHM A HINT OF THE STRUCTURE nodes = hc.state_names.keys() start = DynamicBayesianNetwork() nodes = set(X[0] for X in nodes) start.add_nodes_from_ts(nodes, [0, 1]) # start.add_edge(('P', 0), ('R', 0)) # start.add_edge(('P', 0), ('R', 1)) # start.add_edge(('P', 0), ('A', 0)) # start.add_edge(('P', 0), ('A', 1)) # start.add_edge(('P', 0), ('P', 1)) model = hc.estimate(start=start, tabu_length=10, max_indegree=2) # LEARNS PARAMETERS FROM DATA print 'Learning parameters' model.fit(data) # model.fit(data, estimator=BayesianEstimator) # FINALIZES MODEL
class TestDynamicBayesianNetworkMethods2(unittest.TestCase): def setUp(self): self.G = DynamicBayesianNetwork() self.G.add_edges_from( [(('D', 0), ('G', 0)), (('I', 0), ('G', 0)), (('D', 0), ('D', 1)), (('I', 0), ('I', 1))]) """ G.edges() [(('I', 0), ('G', 0)), (('I', 0), ('I', 1)), (('D', 1), ('G', 1)), (('D', 0), ('G', 0)), (('D', 0), ('D', 1)), (('I', 1), ('G', 1))] """ def test_check_model(self): grade_cpd = TabularCPD(('G', 0), 3, [[0.3, 0.05, 0.7, 0.5], [0.4, 0.25, 0.1, 0.3], [0.3, 0.7, 0.2, 0.2]], [('D', 0), ('I', 0)], [2, 2]) d_i_cpd = TabularCPD(('D', 1), 2, [[0.6, 0.3], [0.4, 0.7]], [('D', 0)], 2) diff_cpd = TabularCPD(('D', 0), 2, [[0.6, 0.4]]) intel_cpd = TabularCPD(('I', 0), 2, [[0.7, 0.3]]) i_i_cpd = TabularCPD(('I', 1), 2, [[0.5, 0.4], [0.5, 0.6]], [('I', 0)], 2) grade_1_cpd = TabularCPD(('G', 1), 3, [[0.3, 0.05, 0.8, 0.5], [0.4, 0.25, 0.1, 0.3], [0.3, 0.7, 0.1, 0.2]], [('D', 1), ('I', 1)], [2, 2]) self.G.add_cpds(grade_cpd, d_i_cpd, i_i_cpd) self.assertTrue(self.G.check_model()) self.G.remove_cpds(grade_cpd, d_i_cpd, i_i_cpd) self.G.add_cpds(grade_1_cpd, diff_cpd, intel_cpd) self.assertTrue(self.G.check_model()) def test_check_model1(self): diff_cpd = TabularCPD(('D', 0), 3, [[0.3, 0.05, 0.7, 0.5], [0.4, 0.25, 0.1, 0.3], [0.3, 0.7, 0.2, 0.2]], [('G', 0), ('I', 0)], [2, 2]) self.G.add_cpds(diff_cpd) self.assertRaises(ValueError, self.G.check_model) self.G.remove_cpds(diff_cpd) grade_cpd = TabularCPD(('G', 0), 2, [[0.6, 0.3], [0.4, 0.7]], [('D', 0)], 2) self.G.add_cpds(grade_cpd) self.assertRaises(ValueError, self.G.check_model) self.G.remove_cpds(grade_cpd) diff_cpd = TabularCPD(('D', 0), 2, [[0.6, 0.3], [0.4, 0.7]], [('D', 1)], 2) self.G.add_cpds(diff_cpd) self.assertRaises(ValueError, self.G.check_model) self.G.remove_cpds(diff_cpd) grade_cpd = TabularCPD(('G', 0), 3, [[0.3, 0.05, 0.8, 0.5], [0.4, 0.25, 0.1, 0.3], [0.3, 0.7, 0.1, 0.2]], [('D', 1), ('I', 1)], [2, 2]) self.G.add_cpds(grade_cpd) self.assertRaises(ValueError, self.G.check_model) self.G.remove_cpds(grade_cpd) grade_cpd = TabularCPD(('G', 1), 3, [[0.3, 0.05, 0.8, 0.5], [0.4, 0.25, 0.1, 0.3], [0.3, 0.7, 0.1, 0.2]], [('D', 0), ('I', 0)], [2, 2]) self.G.add_cpds(grade_cpd) self.assertRaises(ValueError, self.G.check_model) self.G.remove_cpds(grade_cpd) grade_cpd = TabularCPD(('G', 0), 2, [[0.6, 0.3], [0.4, 0.7]], [('D', 1)], 2) self.G.add_cpds(grade_cpd) self.assertRaises(ValueError, self.G.check_model) self.G.remove_cpds(grade_cpd) def test_check_model2(self): grade_cpd = TabularCPD(('G', 0), 3, [[0.9, 0.05, 0.7, 0.5], [0.4, 0.25, 0.1, 0.3], [0.3, 0.7, 0.2, 0.2]], [('D', 0), ('I', 0)], [2, 2]) self.G.add_cpds(grade_cpd) self.assertRaises(ValueError, self.G.check_model) self.G.remove_cpds(grade_cpd) d_i_cpd = TabularCPD(('D', 1), 2, [[0.1, 0.3], [0.4, 0.7]], [('D', 0)], 2) self.G.add_cpds(d_i_cpd) self.assertRaises(ValueError, self.G.check_model) self.G.remove_cpds(d_i_cpd) diff_cpd = TabularCPD(('D', 0), 2, [[0.7, 0.4]]) self.G.add_cpds(diff_cpd) self.assertRaises(ValueError, self.G.check_model) self.G.remove_cpds(diff_cpd) intel_cpd = TabularCPD(('I', 0), 2, [[1.7, 0.3]]) self.G.add_cpds(intel_cpd) self.assertRaises(ValueError, self.G.check_model) self.G.remove_cpds(intel_cpd) i_i_cpd = TabularCPD(('I', 1), 2, [[0.9, 0.4], [0.5, 0.6]], [('I', 0)], 2) self.G.add_cpds(i_i_cpd) self.assertRaises(ValueError, self.G.check_model) self.G.remove_cpds(i_i_cpd) grade_1_cpd = TabularCPD(('G', 1), 3, [[0.3, 0.05, 0.8, 0.5], [0.4, 0.5, 0.1, 0.3], [0.3, 0.7, 0.1, 0.2]], [('D', 1), ('I', 1)], [2, 2]) self.G.add_cpds(grade_1_cpd) self.assertRaises(ValueError, self.G.check_model) self.G.remove_cpds(grade_1_cpd) def tearDown(self): del self.G
class TestDynamicBayesianNetworkMethods(unittest.TestCase): def setUp(self): self.network = DynamicBayesianNetwork() self.grade_cpd = TabularCPD(('G', 0), 3, [[0.3, 0.05, 0.8, 0.5], [0.4, 0.25, 0.1, 0.3], [0.3, 0.7, 0.1, 0.2]], [('D', 0), ('I', 0)], [2, 2]) self.d_i_cpd = TabularCPD(('D', 1), 2, [[0.6, 0.3], [0.4, 0.7]], [('D', 0)], 2) self.diff_cpd = TabularCPD(('D', 0), 2, [[0.6, 0.4]]) self.intel_cpd = TabularCPD(('I', 0), 2, [[0.7, 0.3]]) self.i_i_cpd = TabularCPD(('I', 1), 2, [[0.5, 0.4], [0.5, 0.6]], [('I', 0)], 2) self.grade_1_cpd = TabularCPD(('G', 1), 3, [[0.3, 0.05, 0.8, 0.5], [0.4, 0.25, 0.1, 0.3], [0.3, 0.7, 0.1, 0.2]], [('D', 1), ('I', 1)], [2, 2]) def test_get_intra_and_inter_edges(self): self.network.add_edges_from([(('a', 0), ('b', 0)), (('a', 0), ('a', 1)), (('b', 0), ('b', 1))]) self.assertListEqual(sorted(self.network.get_intra_edges()), [(('a', 0), ('b', 0))]) self.assertListEqual(sorted(self.network.get_intra_edges(1)), [(('a', 1), ('b', 1))]) self.assertRaises(ValueError, self.network.get_intra_edges, -1) self.assertRaises(ValueError, self.network.get_intra_edges, '-') self.assertListEqual(sorted(self.network.get_inter_edges()), [(('a', 0), ('a', 1)), (('b', 0), ('b', 1))]) def test_get_interface_nodes(self): self.network.add_edges_from( [(('D', 0), ('G', 0)), (('I', 0), ('G', 0)), (('D', 0), ('D', 1)), (('I', 0), ('I', 1))]) self.assertListEqual(sorted(self.network.get_interface_nodes()), [('D', 0), ('I',0)]) self.assertRaises(ValueError, self.network.get_interface_nodes, -1) self.assertRaises(ValueError, self.network.get_interface_nodes, '-') def test_get_slice_nodes(self): self.network.add_edges_from( [(('D', 0), ('G', 0)), (('I', 0), ('G', 0)), (('D', 0), ('D', 1)), (('I', 0), ('I', 1))]) self.assertListEqual(sorted(self.network.get_slice_nodes()), [('D', 0), ('G', 0), ('I', 0)]) self.assertListEqual(sorted(self.network.get_slice_nodes(1)), [('D', 1), ('G', 1), ('I', 1)]) self.assertRaises(ValueError, self.network.get_slice_nodes, -1) self.assertRaises(ValueError, self.network.get_slice_nodes, '-') def test_add_single_cpds(self): self.network.add_edges_from([(('D', 0), ('G', 0)), (('I', 0), ('G', 0))]) self.network.add_cpds(self.grade_cpd) self.assertListEqual(self.network.get_cpds(), [self.grade_cpd]) def test_get_cpds(self): self.network.add_edges_from( [(('D', 0), ('G', 0)), (('I', 0), ('G', 0)), (('D', 0), ('D', 1)), (('I', 0), ('I', 1))]) self.network.add_cpds(self.grade_cpd, self.d_i_cpd, self.diff_cpd, self.intel_cpd, self.i_i_cpd) self.network.initialize_initial_state() self.assertEqual(set(self.network.get_cpds()), set([self.diff_cpd, self.intel_cpd, self.grade_cpd])) self.assertEqual(self.network.get_cpds(time_slice=1)[0].variable, ('G', 1)) def test_add_multiple_cpds(self): self.network.add_edges_from( [(('D', 0), ('G', 0)), (('I', 0), ('G', 0)), (('D', 0), ('D', 1)), (('I', 0), ('I', 1))]) self.network.add_cpds(self.grade_cpd, self.d_i_cpd, self.diff_cpd, self.intel_cpd, self.i_i_cpd) self.assertEqual(self.network.get_cpds(('G', 0)).variable, ('G', 0)) self.assertEqual(self.network.get_cpds(('D', 1)).variable, ('D', 1)) self.assertEqual(self.network.get_cpds(('D', 0)).variable, ('D', 0)) self.assertEqual(self.network.get_cpds(('I', 0)).variable, ('I', 0)) self.assertEqual(self.network.get_cpds(('I', 1)).variable, ('I', 1)) def test_initialize_initial_state(self): self.network.add_nodes_from(['D', 'G', 'I', 'S', 'L']) self.network.add_edges_from( [(('D', 0), ('G', 0)), (('I', 0), ('G', 0)), (('D', 0), ('D', 1)), (('I', 0), ('I', 1))]) self.network.add_cpds(self.grade_cpd, self.d_i_cpd, self.diff_cpd, self.intel_cpd, self.i_i_cpd) self.network.initialize_initial_state() self.assertEqual(len(self.network.cpds), 6) self.assertEqual(self.network.get_cpds(('G', 1)).variable, ('G', 1)) def test_moralize(self): self.network.add_edges_from(([(('D',0), ('G',0)), (('I',0), ('G',0))])) moral_graph = self.network.moralize() self.assertListEqual(hf.recursive_sorted(moral_graph.edges()), [[('D', 0), ('G', 0)], [('D', 0), ('I', 0)], [('D', 1), ('G', 1)], [('D', 1), ('I', 1)], [('G', 0), ('I', 0)], [('G', 1), ('I', 1)]]) def tearDown(self): del self.network
def __init__(self, model_file='../DBN/network.nx'): nx_model = nx.read_gpickle(model_file) self.dbn = DynamicBayesianNetwork(nx_model.edges()) self.dbn.add_cpds(*nx_model.cpds) self.dbn.initialize_initial_state() self.dbn_infer = DBNInference(self.dbn)
variables = data.columns.values if len(variables) > 0: nvars = list() for var in variables: nvar = var.replace('(', '').replace(')', '').replace('\'', '').replace(' ', '').split(',') nvars.append((nvar[0], int(nvar[1]))) data.columns = nvars # LEARNS MODEL FROM REAL DATA hc = HillClimbSearchDBN(data, scoring_method=BicScore(data)) print 'Learning model' # GIVE STRUCTURE LEARNING ALGORITHM A HINT OF THE MODEL nodes = hc.state_names.keys() start = DynamicBayesianNetwork() nodes = set(X[0] for X in nodes) start.add_nodes_from_ts(nodes, [0, 1]) start.add_edge(('A', 1), ('O', 1)) model = hc.estimate(start=start, tabu_length=5, max_indegree=2) # model = hc.estimate(tabu_length=5, max_indegree=2) print 'Learning parameters' model.fit(data) # model.fit(data, estimator=BayesianEstimator) model.initialize_initial_state() print "Model learned successfully: ", model.check_model() print model.edges() for cpd in model.get_cpds(): print cpd