def setUp(self): skel = GraphSkeleton() skel.load("unittestdict.txt") skel.toporder() nodedata = NodeData() nodedata.load("unittestdict.txt") self.instance = DiscreteBayesianNetwork(skel, nodedata)
def set_bayesnet(self): nd = NodeData() skel = GraphSkeleton() nd.load(self.file) skel.load(self.file) skel.toporder() self.bn = DiscreteBayesianNetwork(skel, nd)
def setUp(self): nodedata = NodeData() nodedata.load("unittestlgdict.txt") skel = GraphSkeleton() skel.load("unittestdict.txt") skel.toporder() self.lgb = LGBayesianNetwork(skel, nodedata)
def setUp(self): skel = GraphSkeleton() skel.load("unittestdict.txt") skel.toporder() nodedata = NodeData() nodedata.load("unittestdict.txt") self.bn = DiscreteBayesianNetwork(skel, nodedata) self.fn = TableCPDFactorization(self.bn)
def getTableCPD(): nd = NodeData() skel = GraphSkeleton() jsonpath = "" nd.load(jsonpath) skel.load(jsonpath) bn = DiscreteBayesianNetwork(skel, nd) tablecpd = TableCPDFactorization(bn) return tablecpd
def setUp(self): skel = GraphSkeleton() skel.load("unittestdict.txt") skel.toporder() nodedata = NodeData() nodedata.load("unittestdict.txt") self.instance = DiscreteBayesianNetwork(skel, nodedata) self.factor = TableCPDFactor("Grade", self.instance) self.factor2 = TableCPDFactor("Letter", self.instance)
class TestNodeData(unittest.TestCase): def setUp(self): self.nd = NodeData() def test_entriestoinstances(self): self.nd.load("unittesthdict.txt") self.nd.entriestoinstances() result = self.nd.nodes["Intelligence"].choose([]) self.assertTrue(result == 'low' or result == 'high')
def getTableCPD(): nd = NodeData() skel = GraphSkeleton() jsonpath = "./graph/graph_example.txt" nd.load(jsonpath) skel.load(jsonpath) # load Bayesian network bn = DiscreteBayesianNetwork(skel, nd) tablecpd = TableCPDFactorization(bn) return tablecpd
def load(self, file_name): #### Load BN nd = NodeData() skel = GraphSkeleton() nd.load(file_name) # any input file skel.load(file_name) # topologically order graphskeleton skel.toporder() super(DiscreteBayesianNetworkExt, self).__init__(skel, nd)
def getTableCPD(): nd = NodeData() skel = GraphSkeleton() jsonpath = "job_interview.txt" nd.load(jsonpath) skel.load(jsonpath) #load bayesian network bn = DiscreteBayesianNetwork(skel, nd) tablecpd = TableCPDFactorization(bn) return tablecpd
def test_query(self): teacher_nd = NodeData() teacher_nd.load(self.teacher_data_path) req = DiscreteQueryRequest() req.nodes = U.discrete_nodes_to_ros(teacher_nd.Vdata) req.evidence = [DiscreteNodeState("Letter", "weak")] req.query = ["Grade"] res = self.query(req) self.assertEqual(len(res.nodes), 1) n = res.nodes[0] self.assertEqual(n.name, "Grade") self.assertListEqual(['A','B','C'], n.outcomes)
def test_query(self): teacher_nd = NodeData() teacher_nd.load(self.teacher_data_path) req = DiscreteQueryRequest() req.nodes = U.discrete_nodes_to_ros(teacher_nd.Vdata) req.evidence = [DiscreteNodeState("Letter", "weak")] req.query = ["Grade"] res = self.query(req) self.assertEqual(len(res.nodes), 1) n = res.nodes[0] self.assertEqual(n.name, "Grade") self.assertListEqual(['A', 'B', 'C'], n.outcomes)
def load(self, file_name): #### Load BN nd = NodeData() skel = GraphSkeleton() nd.load(file_name) # any input file skel.load(file_name) # topologically order graphskeleton skel.toporder() super(DiscreteBayesianNetworkExt, self).__init__(skel, nd) ##TODO load evidence
def loadbn(param_file): """ This function loads the bn model into the workspace from its associated .txt file. """ file_path = os.path.join(experiment_dir, 'parameters', param_file + '.txt') nd = NodeData() skel = GraphSkeleton() nd.load(file_path) skel.load(file_path) skel.toporder() bn = DiscreteBayesianNetwork(skel, nd) return bn
class TestDynDiscBayesianNetwork(unittest.TestCase): def setUp(self): self.nd = NodeData() self.nd.load("unittestdyndict.txt") self.skel = GraphSkeleton() self.skel.load("unittestdyndict.txt") self.skel.toporder() self.d = DynDiscBayesianNetwork(self.skel, self.nd) def test_randomsample(self): sample = self.d.randomsample(10) for i in range(1, 10): self.assertEqual(sample[0]['Difficulty'], sample[i]['Difficulty'])
class TestHyBayesianNetwork(unittest.TestCase): def setUp(self): self.nd = NodeData() self.nd.load("unittesthdict.txt") self.nd.entriestoinstances() self.skel = GraphSkeleton() self.skel.load("unittestdict.txt") self.skel.toporder() self.hybn = HyBayesianNetwork(self.skel, self.nd) def test_randomsample(self): sample = self.hybn.randomsample(1)[0] self.assertTrue(isinstance(sample['Grade'], float)) self.assertTrue(isinstance(sample['Intelligence'], str)) self.assertEqual(sample["SAT"][-12:], 'blueberries!')
def setUp(self): skel = GraphSkeleton() skel.load("unittestdict.txt") skel.toporder() nodedata = NodeData() nodedata.load("unittestdict.txt") self.bn = DiscreteBayesianNetwork(skel, nodedata) agg = SampleAggregator() agg.aggregate(self.bn.randomsample(50)) self.rseq = agg.seq self.ravg = agg.avg self.fn = TableCPDFactorization(self.bn) evidence = dict(Letter='weak') agg.aggregate(self.fn.gibbssample(evidence, 51)) self.gseq = agg.seq self.gavg = agg.avg
def createData(): nd = NodeData() skel = GraphSkeleton() fpath = "job_interview.txt" nd.load(fpath) skel.load(fpath) skel.toporder() bn = DiscreteBayesianNetwork(skel, nd) learner = PGMLearner() data = bn.randomsample(1000) X, Y = 'Grades', 'Offer' c,p,w=learner.discrete_condind(data, X, Y, ['Interview']) print "independence between X and Y: ", c, " p-value ", p, " witness node: ", w result = learner.discrete_constraint_estimatestruct(data) print result.E
def net2(): nd = NodeData() skel = GraphSkeleton() nd.load("net.txt") # an input file skel.load("net.txt") # topologically order graphskeleton skel.toporder() # load bayesian network lgbn = LGBayesianNetwork(skel, nd) in_data = read_data.getdata2() learner = PGMLearner() bn = learner.lg_mle_estimateparams(skel, in_data) p = cal_prob(in_data[300:500], bn) print p return 0
def net2(): nd = NodeData() skel = GraphSkeleton() nd.load("net.txt") # an input file skel.load("net.txt") # topologically order graphskeleton skel.toporder() # load bayesian network lgbn = LGBayesianNetwork(skel, nd) in_data=read_data.getdata2() learner = PGMLearner() bn=learner.lg_mle_estimateparams(skel,in_data) p=cal_prob(in_data[300:500],bn) print p return 0
def test_structure_estimation(self): req = DiscreteStructureEstimationRequest() skel = GraphSkeleton() skel.load(self.data_path) skel.toporder() teacher_nd = NodeData() teacher_nd.load(self.teacher_data_path) bn = DiscreteBayesianNetwork(skel, teacher_nd) data = bn.randomsample(8000) for v in data: gs = DiscreteGraphState() for k_s, v_s in v.items(): gs.node_states.append(DiscreteNodeState(node=k_s, state=v_s)) req.states.append(gs) res = self.struct_estimate(req) self.assertIsNotNone(res.graph) self.assertEqual(len(res.graph.nodes), 5) self.assertGreater(len(res.graph.edges), 0)
def setUp(self): # instantiate learner self.l = PGMLearner() # generate graph skeleton skel = GraphSkeleton() skel.load("unittestdict.txt") skel.toporder() # generate sample sequence to try to learn from - discrete nd = NodeData.load("unittestdict.txt") self.samplediscbn = DiscreteBayesianNetwork(nd) self.samplediscseq = self.samplediscbn.randomsample(5000) # generate sample sequence to try to learn from - discrete nda = NodeData.load("unittestlgdict.txt") self.samplelgbn = LGBayesianNetwork(nda) self.samplelgseq = self.samplelgbn.randomsample(10000) self.skel = skel
def main(): in_data = read_data.getdata() f_data = format_data(in_data) nd = NodeData() nd.load("net4.txt") # an input file skel = GraphSkeleton() skel.load("net4.txt") skel.toporder() bn = DiscreteBayesianNetwork(skel, nd) #training dataset:70% bn2 = em(f_data[1:6000], bn, skel) pr_training = precision(f_data[1:6000], bn2) print "Prediction accuracy for training data:", pr_training[1] #testing dataset:30% pr = precision(f_data[6700:6800], bn2) print "Prediction accuracy for test data:", pr[1]
def test_param_estimation(self): req = DiscreteParameterEstimationRequest() # load graph structure skel = GraphSkeleton() skel.load(self.data_path) req.graph.nodes = skel.V req.graph.edges = [GraphEdge(k, v) for k,v in skel.E] skel.toporder() # generate trial data teacher_nd = NodeData() teacher_nd.load(self.teacher_data_path) bn = DiscreteBayesianNetwork(skel, teacher_nd) data = bn.randomsample(200) for v in data: gs = DiscreteGraphState() for k_s, v_s in v.items(): gs.node_states.append(DiscreteNodeState(node=k_s, state=v_s)) req.states.append(gs) self.assertEqual(len(self.param_estimate(req).nodes), 5)
def main(): in_data=read_data.getdata() f_data=format_data(in_data) nd = NodeData() nd.load("net4.txt") # an input file skel = GraphSkeleton() skel.load("net4.txt") skel.toporder() bn=DiscreteBayesianNetwork(skel,nd) #training dataset:70% bn2=em(f_data[1:6000],bn,skel) pr_training = precision(f_data[1:6000],bn2) print "Prediction accuracy for training data:" , pr_training[1] #testing dataset:30% pr=precision(f_data[6700:6800],bn2) print "Prediction accuracy for test data:", pr[1]
def test_param_estimation(self): req = DiscreteParameterEstimationRequest() # load graph structure skel = GraphSkeleton() skel.load(self.data_path) req.graph.nodes = skel.V req.graph.edges = [GraphEdge(k, v) for k, v in skel.E] skel.toporder() # generate trial data teacher_nd = NodeData() teacher_nd.load(self.teacher_data_path) bn = DiscreteBayesianNetwork(skel, teacher_nd) data = bn.randomsample(200) for v in data: gs = DiscreteGraphState() for k_s, v_s in v.items(): gs.node_states.append(DiscreteNodeState(node=k_s, state=v_s)) req.states.append(gs) self.assertEqual(len(self.param_estimate(req).nodes), 5)
# ], (...) # # This means that the survival probability given Class=1 and Sex = 0 is 0.968; the prob of not survival given the same conditions is 0.032. # I now create a bayesian network in order to run queries on it, given # some evidence. In this case, we're not learning any parameters, # we've calculated them previously and we use them to define the net. # In[ ]: nd = NodeData() skel = GraphSkeleton() jsonpath_skel ="titanic_skel.json" jsonpath_node ="titanic_nodes.json" nd.load(jsonpath_node) skel.load(jsonpath_skel) # load bayesian network bn = DiscreteBayesianNetwork(skel, nd) print (skel.getchildren("Class"),skel.getchildren("Sex"),skel.getchildren("Fare"),skel.getchildren("Surv")) ([u'Surv'], [u'Surv'], [u'Class'], []) # In[ ]: # We can now start querying our network. We provide a query (first dictionary in the arguments) # and an evidence (second dictionary in the args)) tablecpd=TableCPDFactorization(bn) print ("P(Surv=0) = {}".format(tablecpd.specificquery(dict(Surv='0'),dict())))
def setUp(self): self.nd = NodeData.load("unittestdyndict.txt") self.skel = GraphSkeleton() self.skel.load("unittestdyndict.txt") self.skel.toporder() self.d = DynDiscBayesianNetwork(self.skel, self.nd)
from libpgm.graphskeleton import GraphSkeleton from libpgm.discretebayesiannetwork import DiscreteBayesianNetwork from libpgm.lgbayesiannetwork import LGBayesianNetwork from libpgm.hybayesiannetwork import HyBayesianNetwork from libpgm.dyndiscbayesiannetwork import DynDiscBayesianNetwork from libpgm.tablecpdfactorization import TableCPDFactorization from libpgm.sampleaggregator import SampleAggregator from libpgm.pgmlearner import PGMLearner # (1) --------------------------------------------------------------------- # Generate a sequence of samples from a discrete-CPD Bayesian network # load nodedata and graphskeleton nd = NodeData() skel = GraphSkeleton() nd.load("../tests/unittestdict.txt") skel.load("../tests/unittestdict.txt") # topologically order graphskeleton skel.toporder() # load bayesian network bn = DiscreteBayesianNetwork(skel, nd) # sample result = bn.randomsample(10) # output - toggle comment to see #print json.dumps(result, indent=2) # (2) ----------------------------------------------------------------------
param_estimate = rospy.ServiceProxy( "pgm_learner/linear_gaussian/parameter_estimation", LinearGaussianParameterEstimation ) req = LinearGaussianParameterEstimationRequest() dpath = os.path.join(PKG_PATH, "test", "graph-test.txt") tpath = os.path.join(PKG_PATH, "test", "graph-lg-test.txt") # load graph structure skel = GraphSkeleton() skel.load(dpath) req.graph.nodes = skel.V req.graph.edges = [GraphEdge(k, v) for k, v in skel.E] skel.toporder() # generate trial data teacher_nd = NodeData() teacher_nd.load(tpath) bn = LGBayesianNetwork(skel, teacher_nd) data = bn.randomsample(200) for v in data: gs = LinearGaussianGraphState() for k_s, v_s in v.items(): gs.node_states.append(LinearGaussianNodeState(node=k_s, state=v_s)) req.states.append(gs) PP.pprint(param_estimate(req).nodes)
temp = [] #temp.append(min(list)+float(max(list) - min(list))*1/3) #temp.append(min(list)+float(max(list) - min(list))*2/3) temp.append(float(max(list))/3) temp.append(float(max(list))/3*2) return temp EachLikeThreshold = Threshold(EachLike) EachLikedThreshold = Threshold(EachLiked) print EachLikeThreshold print EachLikedThreshold BulliedPro = [] nd = NodeData() skel = GraphSkeleton() nd.load('unittestdict.txt') skel.load('unittestdict.txt') bn = DiscreteBayesianNetwork(skel, nd) fn = TableCPDFactorization(bn) for i in range(len(EachLike)): evidence = {} if EachLike[i] <= EachLikeThreshold[0]: evidence['LikeN'] = 'Small' elif EachLikeThreshold[0] < EachLike[i] and EachLike[i] <= EachLikeThreshold[1]: evidence['LikeN'] = 'Mid' else: evidence['LikeN'] = 'Big' if EachLiked[i] <= EachLikedThreshold[0]: evidence['LikedN'] = 'Small' elif EachLikedThreshold[0] < EachLiked[i] and EachLiked[i] <= EachLikedThreshold[1]:
import json from libpgm.graphskeleton import GraphSkeleton from libpgm.nodedata import NodeData from libpgm.discretebayesiannetwork import DiscreteBayesianNetwork from libpgm.tablecpdfactorization import TableCPDFactorization # load nodedata and graphskeleton nd = NodeData() skel = GraphSkeleton() nd.load("grades.txt") skel.load("grades.txt") # toporder graph skeleton skel.toporder() # load evidence evidence = dict(Letter='weak') # load bayesian network bn = DiscreteBayesianNetwork(skel, nd) # load factorization fn = TableCPDFactorization(bn) # sample result = fn.gibbssample(evidence, 1000) # output print json.dumps(result, indent=2)
import json from libpgm.nodedata import NodeData from libpgm.graphskeleton import GraphSkeleton from libpgm.discretebayesiannetwork import DiscreteBayesianNetwork from libpgm.pgmlearner import PGMLearner nd = NodeData() nd.load("nodedata.json") skel = GraphSkeleton() skel.load("nodedata.json") skel.toporder() bn = DiscreteBayesianNetwork(skel,nd) with open("manipulatedata.json") as fp: data = json.load(fp) learner = PGMLearner() # result = learner.discrete_constraint_estimatestruct(data) result = learner.discrete_estimatebn(data) print json.dumps(result.E, indent=2) print json.dumps(result.Vdata, indent=2)
import json from libpgm.nodedata import NodeData from libpgm.graphskeleton import GraphSkeleton from libpgm.discretebayesiannetwork import DiscreteBayesianNetwork from libpgm.pgmlearner import PGMLearner # generate some data to use nd = NodeData() nd.load("grades.txt") # an input file skel = GraphSkeleton() skel.load("grades.txt") skel.toporder() bn = DiscreteBayesianNetwork(skel, nd) data = bn.randomsample(80000) # instantiate my learner learner = PGMLearner() # estimate structure result = learner.discrete_constraint_estimatestruct(data) # output print json.dumps(result.E, indent=2)
rospy.init_node("pgm_learner_sample_discrete") param_estimate = rospy.ServiceProxy( "pgm_learner/discrete/parameter_estimation", DiscreteParameterEstimation) req = DiscreteParameterEstimationRequest() dpath = os.path.join(PKG_PATH, "test", "graph-test.txt") tpath = dpath # load graph structure skel = GraphSkeleton() skel.load(dpath) req.graph.nodes = skel.V req.graph.edges = [GraphEdge(k, v) for k, v in skel.E] skel.toporder() # generate trial data teacher_nd = NodeData() teacher_nd.load(dpath) bn = DiscreteBayesianNetwork(skel, teacher_nd) data = bn.randomsample(200) for v in data: gs = DiscreteGraphState() for k_s, v_s in v.items(): gs.node_states.append(DiscreteNodeState(node=k_s, state=v_s)) req.states.append(gs) PP.pprint(param_estimate(req).nodes)
data_l = [] for line in data_r.readlines(): data_l.append(map(int, line.split())) truth_l = [] for row in truth_r: truth_l.append(row[0]) w = csv.writer(open("bayesian_outcome.txt", "wb")) count = 0 for i in range(104): nd = NodeData() skel = GraphSkeleton() nd.load('bayes_net/'+str(i)+".txt") # any input file skel.load('bayes_net/'+str(i)+".txt") # topologically order graphskeleton skel.toporder() # load bayesian network # load bayesian network bn = DiscreteBayesianNetwork(skel, nd) dic1 = {} k = 1 for c in data_l[i]: dic1[str(k)] = str(c) k += 2 print dic1
# # instantiate my learner # learner = PGMLearner() # # # estimate parameters # result = learner.discrete_mle_estimateparams(skel, data) # # # output - toggle comment to see # print json.dumps(result.Vdata, indent=2) # (5) -------------------------------------------------------------------------- # Compute the probability distribution over a specific node or nodes # load nodedata and graphskeleton nd = NodeData() skel = GraphSkeleton() nd.load("../tests/unittestdict.txt") skel.load("../tests/unittestdict.txt") # toporder graph skeleton print skel.toporder() # load evidence evidence = {"Intelligence": "high"} query = {"Grade": "A"} # load bayesian network bn = DiscreteBayesianNetwork(skel, nd) # load factorization fn = TableCPDFactorization(bn)
import json from libpgm.nodedata import NodeData from libpgm.graphskeleton import GraphSkeleton from libpgm.discretebayesiannetwork import DiscreteBayesianNetwork from libpgm.tablecpdfactorization import TableCPDFactorization # load nodedata and graphskeleton nd = NodeData() skel = GraphSkeleton() nd.load("tests/net1.json") # any input file skel.load("tests/net1.json") # topologically order graphskeleton skel.toporder() # load bayesian network bn = DiscreteBayesianNetwork(skel, nd) fn = TableCPDFactorization(bn) # sample result = fn.specificquery(dict(C='T'), dict(B='F')) # output print json.dumps(result, indent=2)
import json from libpgm.nodedata import NodeData from libpgm.graphskeleton import GraphSkeleton from libpgm.discretebayesiannetwork import DiscreteBayesianNetwork from libpgm.pgmlearner import PGMLearner # generate some data to use nd = NodeData() nd.load("bayes_structure.txt") # an input file skel = GraphSkeleton() skel.load("bayes_structure.txt") skel.toporder() bn = DiscreteBayesianNetwork(skel, nd) data = bn.randomsample(200) # instantiate my learner learner = PGMLearner() # estimate parameters from data and skeleton result = learner.discrete_mle_estimateparams(skel, data) # output print json.dumps(result.Vdata, indent=2)
#Draw sample from beta distribution sampled_theta += [dist.rvs()] # Return the index of the sample with the largest value return sampled_theta.index(max(sampled_theta)) def regret(self, bestprob): # regret as ratio between reward and expectation of reward had we always selected best reward = sum(self.successes) / float(sum(self.trials)) optimal = bestprob return 1 - reward / bestprob # load nodedata and graphskeleton nd = NodeData() skel = GraphSkeleton() nd.load("bayesnet.json") # any input file skel.load("bayesnet.json") # topologically order graphskeleton skel.toporder() # load bayesian network bn = DiscreteBayesianNetwork(skel, nd) simulations = 10000 # the number of simulations of the whole process experiments = 32 # the number of experiments we run in each simulation # specify what the interventions are for the 'try all combinations of interventions' bandit interventions = [{ "X1": '0', "X2": '0',
if __name__ == '__main__': rospy.init_node("pgm_learner_sample_discrete") param_estimate = rospy.ServiceProxy("pgm_learner/discrete/parameter_estimation", DiscreteParameterEstimation) req = DiscreteParameterEstimationRequest() dpath = os.path.join(PKG_PATH, "test", "graph-test.txt") tpath = dpath # load graph structure skel = GraphSkeleton() skel.load(dpath) req.graph.nodes = skel.V req.graph.edges = [GraphEdge(k, v) for k,v in skel.E] skel.toporder() # generate trial data teacher_nd = NodeData() teacher_nd.load(dpath) bn = DiscreteBayesianNetwork(skel, teacher_nd) data = bn.randomsample(200) for v in data: gs = DiscreteGraphState() for k_s, v_s in v.items(): gs.node_states.append(DiscreteNodeState(node=k_s, state=v_s)) req.states.append(gs) PP.pprint(param_estimate(req).nodes)
from libpgm.nodedata import NodeData from libpgm.graphskeleton import GraphSkeleton from libpgm.discretebayesiannetwork import DiscreteBayesianNetwork from inference.exact_inference import ExactInferenceEngine from inference.approximate_inference import ApproximateInferenceEngine node_data = NodeData() network_skeleton = GraphSkeleton() node_data.load('test_bayesian_networks/network.txt') network_skeleton.load('test_bayesian_networks/network.txt') network = DiscreteBayesianNetwork(network_skeleton, node_data) exact_inference_engine = ExactInferenceEngine(network) approximate_inference_engine = ApproximateInferenceEngine(network) query_variable = 'Burglary' evidence_variables = {'MaryCalls': 'true', 'JohnCalls': 'true'} resulting_distribution = exact_inference_engine.perform_inference(query_variable, evidence_variables) print 'P(B|m,j) - enumeration: ', resulting_distribution resulting_distribution = exact_inference_engine.perform_ve_inference(query_variable, evidence_variables) print '(B|m,j) - variable elimination: ', resulting_distribution resulting_distribution = approximate_inference_engine.perform_rs_inference(query_variable, evidence_variables, 100000) print 'P(B|m,j) - approximate - rejection sampling: ', resulting_distribution resulting_distribution = approximate_inference_engine.perform_lw_inference(query_variable, evidence_variables, 100000) print 'P(B|m,j) - approximate - likelihood weighting: ', resulting_distribution resulting_distribution = approximate_inference_engine.perform_gibbs_inference(query_variable, evidence_variables, 100000) print 'P(B|m,j) - approximate - Gibbs: ', resulting_distribution print query_variable = 'JohnCalls'
import json from libpgm.nodedata import NodeData from libpgm.graphskeleton import GraphSkeleton from libpgm.lgbayesiannetwork import LGBayesianNetwork from libpgm.pgmlearner import PGMLearner # generate some data to use nd = NodeData() nd.load("gaussGrades.txt") # an input file skel = GraphSkeleton() skel.load("gaussGrades.txt") skel.toporder() lgbn = LGBayesianNetwork(skel, nd) data = lgbn.randomsample(8000) print data # instantiate my learner learner = PGMLearner() # estimate structure result = learner.lg_constraint_estimatestruct(data) # output print json.dumps(result.E, indent=2)
if len(sys.argv) != 2: # the program name and the datafile # stop the program and print an error message sys.exit("usage: ch16.py datafile.txt ") filename = sys.argv[1] try: f = open(filename, 'r') # opens the input file except IOError: print("Cannot open file %s" % filename) sys.exit("BYE!") #load data nd = NodeData() skel = GraphSkeleton() nd.load(filename) skel.load(filename) #load B Network skel.toporder() bn = DiscreteBayesianNetwork(skel, nd) #set up function jp = [] temp = [] cal = [] #initiralization sc = {} for i in range(len(bn.V)): sc[bn.V[i]] = '0' temp.append(bn.V[i]) temp.append('p')
dictionary = set().union(wkdayValsList, hourValsList, locatValsList, activValsList) # checking if input from user was approppriate if set(userinput).issubset(dictionary): # initializing probabilities lists wkdayProbList = [] hourProbList = [] locatProbList = [] activProbList = [] #INITIALIZING BN 1 # load nodedata and graphskeleton nd1 = NodeData() skel1 = GraphSkeleton() nd1.load(path_bn1) skel1.load(path_bn1) skel1.toporder() # toporder graph skeleton #INITIALIZING BN 2 # load nodedata and graphskeleton nd2 = NodeData() skel2 = GraphSkeleton() nd2.load(path_bn2) skel2.load(path_bn2) skel2.toporder() # toporder graph skeleton # FINDING NEXT ACTIVITY ATTRIBUTES THROUGH INFERENCE ON BN 1 # wkday variable query evidence1 = dict(wkdayT0=userinput[0]) for i, item in enumerate(wkdayValsList):