Exemplo n.º 1
0
    def setUp(self):
        # A test Bayesian model
        diff_cpd = TabularCPD('diff', 2, [[0.6], [0.4]])
        intel_cpd = TabularCPD('intel', 2, [[0.7], [0.3]])
        grade_cpd = TabularCPD('grade',
                               3,
                               [[0.3, 0.05, 0.9, 0.5], [0.4, 0.25, 0.08, 0.3],
                                [0.3, 0.7, 0.02, 0.2]],
                               evidence=['diff', 'intel'],
                               evidence_card=[2, 2])
        self.bayesian_model = BayesianModel()
        self.bayesian_model.add_nodes_from(['diff', 'intel', 'grade'])
        self.bayesian_model.add_edges_from([('diff', 'grade'),
                                            ('intel', 'grade')])
        self.bayesian_model.add_cpds(diff_cpd, intel_cpd, grade_cpd)

        # A test Markov model
        self.markov_model = MarkovModel([('A', 'B'), ('C', 'B'), ('B', 'D')])
        factor_ab = DiscreteFactor(['A', 'B'], [2, 3], [1, 2, 3, 4, 5, 6])
        factor_cb = DiscreteFactor(['C', 'B'], [4, 3],
                                   [3, 1, 4, 5, 7, 8, 1, 3, 10, 4, 5, 6])
        factor_bd = DiscreteFactor(['B', 'D'], [3, 2], [5, 7, 2, 1, 9, 3])
        self.markov_model.add_factors(factor_ab, factor_cb, factor_bd)

        self.gibbs = GibbsSampling(self.bayesian_model)
    def setUp(self):
        # A test Bayesian model
        diff_cpd = TabularCPD("diff", 2, [[0.6], [0.4]])
        intel_cpd = TabularCPD("intel", 2, [[0.7], [0.3]])
        grade_cpd = TabularCPD(
            "grade",
            3,
            [[0.3, 0.05, 0.9, 0.5], [0.4, 0.25, 0.08, 0.3],
             [0.3, 0.7, 0.02, 0.2]],
            evidence=["diff", "intel"],
            evidence_card=[2, 2],
        )
        self.bayesian_model = BayesianModel()
        self.bayesian_model.add_nodes_from(["diff", "intel", "grade"])
        self.bayesian_model.add_edges_from([("diff", "grade"),
                                            ("intel", "grade")])
        self.bayesian_model.add_cpds(diff_cpd, intel_cpd, grade_cpd)

        # A test Markov model
        self.markov_model = MarkovModel([("A", "B"), ("C", "B"), ("B", "D")])
        factor_ab = DiscreteFactor(["A", "B"], [2, 3], [1, 2, 3, 4, 5, 6])
        factor_cb = DiscreteFactor(["C", "B"], [4, 3],
                                   [3, 1, 4, 5, 7, 8, 1, 3, 10, 4, 5, 6])
        factor_bd = DiscreteFactor(["B", "D"], [3, 2], [5, 7, 2, 1, 9, 3])
        self.markov_model.add_factors(factor_ab, factor_cb, factor_bd)

        self.gibbs = GibbsSampling(self.bayesian_model)
Exemplo n.º 3
0
 def test_get_kernel_from_markov_model(self):
     gibbs = GibbsSampling()
     gibbs._get_kernel_from_markov_model(self.markov_model)
     self.assertListEqual(list(gibbs.variables), self.markov_model.nodes())
     self.assertDictEqual(gibbs.cardinalities, {
         'A': 2,
         'B': 3,
         'C': 4,
         'D': 2
     })
Exemplo n.º 4
0
 def test_get_kernel_from_bayesian_model(self):
     gibbs = GibbsSampling()
     gibbs._get_kernel_from_bayesian_model(self.bayesian_model)
     self.assertListEqual(list(gibbs.variables),
                          self.bayesian_model.nodes())
     self.assertDictEqual(gibbs.cardinalities, {
         'diff': 2,
         'intel': 2,
         'grade': 3
     })
 def test_get_kernel_from_bayesian_model(self):
     gibbs = GibbsSampling()
     gibbs._get_kernel_from_bayesian_model(self.bayesian_model)
     self.assertListEqual(list(gibbs.variables),
                          list(self.bayesian_model.nodes()))
     self.assertDictEqual(gibbs.cardinalities, {
         "diff": 2,
         "intel": 2,
         "grade": 3
     })
 def test_get_kernel_from_markov_model(self):
     gibbs = GibbsSampling()
     gibbs._get_kernel_from_markov_model(self.markov_model)
     self.assertListEqual(list(gibbs.variables),
                          list(self.markov_model.nodes()))
     self.assertDictEqual(gibbs.cardinalities, {
         "A": 2,
         "B": 3,
         "C": 4,
         "D": 2
     })
def gibb_sam(n):
    gib_chain=GibbsSampling(disease_model)
    # gib_chain.sample(size=30)

    gen=gib_chain.generate_sample(size=n)
    l=[sample for sample in gen]
    r=0
    for c,x in enumerate(l):
        for j,(var,st) in enumerate(x):
            if (var,st)==('Flu',0):
                r=r+1
                plt.plot(c,(r/n),'bo')
    plt.show()
Exemplo n.º 8
0
    def infer(self, evidence, new_evidence):

        evidence.update(new_evidence)

        new_model, additional_evidence = self.reduce_model(evidence)

        try:
            if self.inference_type == InferenceType.BeliefPropagation:
                inference = BeliefPropagation(new_model)
            elif self.inference_type == InferenceType.GibbsSampling:
                inference = GibbsSampling(new_model)
            elif self.inference_type == InferenceType.BayesianModelSampler:
                inference = BayesianModelSampling(new_model)
        except Exception as e:
            # for factor in new_model.factors:
            #     print(factor)
            raise e

        self.evidence = {
            var: val
            for (var, val) in evidence.items() if "F(" not in var
        }
        self.evidence.update(additional_evidence)
        self.inference = inference
        self.scope = get_scope(new_model)

        return new_model
    def setUp(self):
        # A test Bayesian model
        diff_cpd = TabularCPD('diff', 2, [[0.6], [0.4]])
        intel_cpd = TabularCPD('intel', 2, [[0.7], [0.3]])
        grade_cpd = TabularCPD('grade', 3, [[0.3, 0.05, 0.9, 0.5], [0.4, 0.25, 0.08, 0.3], [0.3, 0.7, 0.02, 0.2]],
                               evidence=['diff', 'intel'], evidence_card=[2, 2])
        self.bayesian_model = BayesianModel()
        self.bayesian_model.add_nodes_from(['diff', 'intel', 'grade'])
        self.bayesian_model.add_edges_from([('diff', 'grade'), ('intel', 'grade')])
        self.bayesian_model.add_cpds(diff_cpd, intel_cpd, grade_cpd)

        # A test Markov model
        self.markov_model = MarkovModel([('A', 'B'), ('C', 'B'), ('B', 'D')])
        factor_ab = DiscreteFactor(['A', 'B'], [2, 3], [1, 2, 3, 4, 5, 6])
        factor_cb = DiscreteFactor(['C', 'B'], [4, 3], [3, 1, 4, 5, 7, 8, 1, 3, 10, 4, 5, 6])
        factor_bd = DiscreteFactor(['B', 'D'], [3, 2], [5, 7, 2, 1, 9, 3])
        self.markov_model.add_factors(factor_ab, factor_cb, factor_bd)

        self.gibbs = GibbsSampling(self.bayesian_model)
class TestGibbsSampling(unittest.TestCase):
    def setUp(self):
        # A test Bayesian model
        diff_cpd = TabularCPD('diff', 2, [[0.6], [0.4]])
        intel_cpd = TabularCPD('intel', 2, [[0.7], [0.3]])
        grade_cpd = TabularCPD('grade', 3, [[0.3, 0.05, 0.9, 0.5], [0.4, 0.25, 0.08, 0.3], [0.3, 0.7, 0.02, 0.2]],
                               evidence=['diff', 'intel'], evidence_card=[2, 2])
        self.bayesian_model = BayesianModel()
        self.bayesian_model.add_nodes_from(['diff', 'intel', 'grade'])
        self.bayesian_model.add_edges_from([('diff', 'grade'), ('intel', 'grade')])
        self.bayesian_model.add_cpds(diff_cpd, intel_cpd, grade_cpd)

        # A test Markov model
        self.markov_model = MarkovModel([('A', 'B'), ('C', 'B'), ('B', 'D')])
        factor_ab = DiscreteFactor(['A', 'B'], [2, 3], [1, 2, 3, 4, 5, 6])
        factor_cb = DiscreteFactor(['C', 'B'], [4, 3], [3, 1, 4, 5, 7, 8, 1, 3, 10, 4, 5, 6])
        factor_bd = DiscreteFactor(['B', 'D'], [3, 2], [5, 7, 2, 1, 9, 3])
        self.markov_model.add_factors(factor_ab, factor_cb, factor_bd)

        self.gibbs = GibbsSampling(self.bayesian_model)

    def tearDown(self):
        del self.bayesian_model
        del self.markov_model

    @patch('pgmpy.sampling.GibbsSampling._get_kernel_from_bayesian_model', autospec=True)
    @patch('pgmpy.models.MarkovChain.__init__', autospec=True)
    def test_init_bayesian_model(self, init, get_kernel):
        model = MagicMock(spec_set=BayesianModel)
        gibbs = GibbsSampling(model)
        init.assert_called_once_with(gibbs)
        get_kernel.assert_called_once_with(gibbs, model)

    @patch('pgmpy.sampling.GibbsSampling._get_kernel_from_markov_model', autospec=True)
    def test_init_markov_model(self, get_kernel):
        model = MagicMock(spec_set=MarkovModel)
        gibbs = GibbsSampling(model)
        get_kernel.assert_called_once_with(gibbs, model)

    def test_get_kernel_from_bayesian_model(self):
        gibbs = GibbsSampling()
        gibbs._get_kernel_from_bayesian_model(self.bayesian_model)
        self.assertListEqual(list(gibbs.variables), self.bayesian_model.nodes())
        self.assertDictEqual(gibbs.cardinalities, {'diff': 2, 'intel': 2, 'grade': 3})

    def test_get_kernel_from_markov_model(self):
        gibbs = GibbsSampling()
        gibbs._get_kernel_from_markov_model(self.markov_model)
        self.assertListEqual(list(gibbs.variables), self.markov_model.nodes())
        self.assertDictEqual(gibbs.cardinalities, {'A': 2, 'B': 3, 'C': 4, 'D': 2})

    def test_sample(self):
        start_state = [State('diff', 0), State('intel', 0), State('grade', 0)]
        sample = self.gibbs.sample(start_state, 2)
        self.assertEquals(len(sample), 2)
        self.assertEquals(len(sample.columns), 3)
        self.assertIn('diff', sample.columns)
        self.assertIn('intel', sample.columns)
        self.assertIn('grade', sample.columns)
        self.assertTrue(set(sample['diff']).issubset({0, 1}))
        self.assertTrue(set(sample['intel']).issubset({0, 1}))
        self.assertTrue(set(sample['grade']).issubset({0, 1, 2}))

    @patch("pgmpy.sampling.GibbsSampling.random_state", autospec=True)
    def test_sample_less_arg(self, random_state):
        self.gibbs.state = None
        random_state.return_value = [State('diff', 0), State('intel', 0), State('grade', 0)]
        sample = self.gibbs.sample(size=2)
        random_state.assert_called_once_with(self.gibbs)
        self.assertEqual(len(sample), 2)

    def test_generate_sample(self):
        start_state = [State('diff', 0), State('intel', 0), State('grade', 0)]
        gen = self.gibbs.generate_sample(start_state, 2)
        samples = [sample for sample in gen]
        self.assertEqual(len(samples), 2)
        self.assertEqual({samples[0][0].var, samples[0][1].var, samples[0][2].var}, {'diff', 'intel', 'grade'})
        self.assertEqual({samples[1][0].var, samples[1][1].var, samples[1][2].var}, {'diff', 'intel', 'grade'})

    @patch("pgmpy.sampling.GibbsSampling.random_state", autospec=True)
    def test_generate_sample_less_arg(self, random_state):
        self.gibbs.state = None
        gen = self.gibbs.generate_sample(size=2)
        samples = [sample for sample in gen]
        random_state.assert_called_once_with(self.gibbs)
        self.assertEqual(len(samples), 2)
Exemplo n.º 11
0
 def test_init_markov_model(self, get_kernel):
     model = MagicMock(spec_set=MarkovModel)
     gibbs = GibbsSampling(model)
     get_kernel.assert_called_once_with(gibbs, model)
Exemplo n.º 12
0
 def test_init_bayesian_model(self, init, get_kernel):
     model = MagicMock(spec_set=BayesianModel)
     gibbs = GibbsSampling(model)
     init.assert_called_once_with(gibbs)
     get_kernel.assert_called_once_with(gibbs, model)
 def test_get_kernel_from_bayesian_model(self):
     gibbs = GibbsSampling()
     gibbs._get_kernel_from_bayesian_model(self.bayesian_model)
     self.assertListEqual(list(gibbs.variables), self.bayesian_model.nodes())
     self.assertDictEqual(gibbs.cardinalities, {'diff': 2, 'intel': 2, 'grade': 3})
Exemplo n.º 14
0
class TestGibbsSampling(unittest.TestCase):
    def setUp(self):
        # A test Bayesian model
        diff_cpd = TabularCPD('diff', 2, [[0.6], [0.4]])
        intel_cpd = TabularCPD('intel', 2, [[0.7], [0.3]])
        grade_cpd = TabularCPD('grade',
                               3,
                               [[0.3, 0.05, 0.9, 0.5], [0.4, 0.25, 0.08, 0.3],
                                [0.3, 0.7, 0.02, 0.2]],
                               evidence=['diff', 'intel'],
                               evidence_card=[2, 2])
        self.bayesian_model = BayesianModel()
        self.bayesian_model.add_nodes_from(['diff', 'intel', 'grade'])
        self.bayesian_model.add_edges_from([('diff', 'grade'),
                                            ('intel', 'grade')])
        self.bayesian_model.add_cpds(diff_cpd, intel_cpd, grade_cpd)

        # A test Markov model
        self.markov_model = MarkovModel([('A', 'B'), ('C', 'B'), ('B', 'D')])
        factor_ab = DiscreteFactor(['A', 'B'], [2, 3], [1, 2, 3, 4, 5, 6])
        factor_cb = DiscreteFactor(['C', 'B'], [4, 3],
                                   [3, 1, 4, 5, 7, 8, 1, 3, 10, 4, 5, 6])
        factor_bd = DiscreteFactor(['B', 'D'], [3, 2], [5, 7, 2, 1, 9, 3])
        self.markov_model.add_factors(factor_ab, factor_cb, factor_bd)

        self.gibbs = GibbsSampling(self.bayesian_model)

    def tearDown(self):
        del self.bayesian_model
        del self.markov_model

    @patch('pgmpy.sampling.GibbsSampling._get_kernel_from_bayesian_model',
           autospec=True)
    @patch('pgmpy.models.MarkovChain.__init__', autospec=True)
    def test_init_bayesian_model(self, init, get_kernel):
        model = MagicMock(spec_set=BayesianModel)
        gibbs = GibbsSampling(model)
        init.assert_called_once_with(gibbs)
        get_kernel.assert_called_once_with(gibbs, model)

    @patch('pgmpy.sampling.GibbsSampling._get_kernel_from_markov_model',
           autospec=True)
    def test_init_markov_model(self, get_kernel):
        model = MagicMock(spec_set=MarkovModel)
        gibbs = GibbsSampling(model)
        get_kernel.assert_called_once_with(gibbs, model)

    def test_get_kernel_from_bayesian_model(self):
        gibbs = GibbsSampling()
        gibbs._get_kernel_from_bayesian_model(self.bayesian_model)
        self.assertListEqual(list(gibbs.variables),
                             self.bayesian_model.nodes())
        self.assertDictEqual(gibbs.cardinalities, {
            'diff': 2,
            'intel': 2,
            'grade': 3
        })

    def test_get_kernel_from_markov_model(self):
        gibbs = GibbsSampling()
        gibbs._get_kernel_from_markov_model(self.markov_model)
        self.assertListEqual(list(gibbs.variables), self.markov_model.nodes())
        self.assertDictEqual(gibbs.cardinalities, {
            'A': 2,
            'B': 3,
            'C': 4,
            'D': 2
        })

    def test_sample(self):
        start_state = [State('diff', 0), State('intel', 0), State('grade', 0)]
        sample = self.gibbs.sample(start_state, 2)
        self.assertEquals(len(sample), 2)
        self.assertEquals(len(sample.columns), 3)
        self.assertIn('diff', sample.columns)
        self.assertIn('intel', sample.columns)
        self.assertIn('grade', sample.columns)
        self.assertTrue(set(sample['diff']).issubset({0, 1}))
        self.assertTrue(set(sample['intel']).issubset({0, 1}))
        self.assertTrue(set(sample['grade']).issubset({0, 1, 2}))

    @patch("pgmpy.sampling.GibbsSampling.random_state", autospec=True)
    def test_sample_less_arg(self, random_state):
        self.gibbs.state = None
        random_state.return_value = [
            State('diff', 0),
            State('intel', 0),
            State('grade', 0)
        ]
        sample = self.gibbs.sample(size=2)
        random_state.assert_called_once_with(self.gibbs)
        self.assertEqual(len(sample), 2)

    def test_generate_sample(self):
        start_state = [State('diff', 0), State('intel', 0), State('grade', 0)]
        gen = self.gibbs.generate_sample(start_state, 2)
        samples = [sample for sample in gen]
        self.assertEqual(len(samples), 2)
        self.assertEqual(
            {samples[0][0].var, samples[0][1].var, samples[0][2].var},
            {'diff', 'intel', 'grade'})
        self.assertEqual(
            {samples[1][0].var, samples[1][1].var, samples[1][2].var},
            {'diff', 'intel', 'grade'})

    @patch("pgmpy.sampling.GibbsSampling.random_state", autospec=True)
    def test_generate_sample_less_arg(self, random_state):
        self.gibbs.state = None
        gen = self.gibbs.generate_sample(size=2)
        samples = [sample for sample in gen]
        random_state.assert_called_once_with(self.gibbs)
        self.assertEqual(len(samples), 2)
Exemplo n.º 15
0
def task3():
	global task4_best_bm, task2_best_bm, task2_best_mm, task4_best_mm
	st1 = time.time()
	task2_best_bm_samples = (BayesianModelSampling(task2_best_bm)).forward_sample(size=1000)
	et1 = time.time()
	diff1 = et1 - st1
	
	task2_best_bm_samplesC = task2_best_bm_samples.copy()
	task2_best_bm_samplesC.drop('x1', axis=1, inplace=True)
	task2_bm_predicted = task2_best_bm.predict(task2_best_bm_samplesC)
	
	task2_best_mm = task2_best_bm.to_markov_model()
	st2 = time.time()
	task2_best_mm_samples = (GibbsSampling(task2_best_mm)).sample(size=1000)
	et2 = time.time()
	diff2 = et2 - st2
	task2_best_mm_samples_values = (task2_best_mm_samples.values)
	task2_mm_predicted=[]
	task2_mmprop = BeliefPropagation(task2_best_mm)
	for i in range(1000):
		nik_temp = np.array(task2_best_mm_samples_values[i,:])
		try:
			task2_mm_predicted.append((task2_mmprop.map_query(variables=['x1'],
														evidence={
														'x2':int(nik_temp[2]),
														'x3':int(nik_temp[1]),
														'x4':int(nik_temp[5]),
														'x5':int(nik_temp[0]),
														'x6':int(nik_temp[4])
														})))
		except:
			task2_mm_predicted.append({'x1':-1})
	cnt1=0
	cnt2=0
	data1 = task2_best_mm_samples[['x1']].as_matrix()
	data2 = task2_best_bm_samples[['x1']].as_matrix()
	for i in range(1000):
		if(task2_mm_predicted[i]['x1']==int(data1[i])):
			cnt1=cnt1+1
		#if(task2_bm_predicted[i]['x1']==int(data2[i])):
			#cnt2=cnt2+1
	task2_mm_acc = cnt1/10.0
	task2_bm_acc = cnt2/10.0
	print("	Bayesian Model for 'th' data : "+str(task2_best_bm.edges()))
	print("	Bayesian Model for 'th' data takes time : "+str(diff1))
	#print("	Bayesian Model for 'th' data has accuracy : "+str(task2_mm_acc))
	print("	Markov Model for 'th' data : "+str(task2_best_mm.edges()))
	print("	Markov Model for 'th' data takes time : "+str(diff2))
	print("	Markov Model for 'th' data has accuracy : "+str(task2_mm_acc))

	st3 = time.time()
	task4_best_bm_samples = (BayesianModelSampling(task4_best_bm)).forward_sample(size=1000)
	et3 = time.time()
	diff3 = et3 - st3
	

	'''
	task4_best_bm_samplesC = task4_best_bm_samples.copy()
	task4_best_bm_samplesC.drop('f1', axis=1, inplace=True)
	#print(task4_best_bm_samplesC)
	task4_bm_predicted = task4_best_bm.predict(task4_best_bm_samplesC)
	#print(task4_bm_predicted)
	'''
	task4_best_mm = task4_best_bm.to_markov_model()
	st4 = time.time()
	task4_best_mm_samples = (GibbsSampling(task4_best_mm)).sample(size=1000)
	et4 = time.time()
	diff4 = et4 - st4
	'''print(task4_best_mm_samples)
	task4_best_mm_samples_values = (task4_best_mm_samples.values)
	task4_mm_predicted=[]
	task4_mmprop = BeliefPropagation(task4_best_mm)
	for i in range(1000):
		nik_temp = np.array(task4_best_mm_samples_values[i,:])
		print((nik_temp))
		try:
			task4_mm_predicted.append((task4_mmprop.map_query(variables=['f1'],
														evidence={
														'f2':int(nik_temp[2]),
														'f3':int(nik_temp[1]),
														'f4':int(nik_temp[5]),
														'f5':int(nik_temp[0]),
														'f6':int(nik_temp[4]),
														'f7':int(nik_temp[2]),
														'f8':int(nik_temp[1]),
														'f9':int(nik_temp[5])
														})))
		except:
			task4_mm_predicted.append({'f1':-1})
	cnt1=0
	cnt2=0
	data1 = task4_best_mm_samples[['f1']].as_matrix()
	data2 = task4_best_bm_samples[['f1']].as_matrix()
	for i in range(1000):
		if(task2_mm_predicted[i]['x1']==int(data1[i])):
			cnt1=cnt1+1
		if(task2_bm_predicted[i]['x1']==int(data2[i])):
			cnt2=cnt2+1
	task2_mm_acc = cnt1/10.0
	task2_bm_acc = cnt2/10.0'''
	print("	Bayesian Model for 'and' data : "+str(task4_best_bm.edges()))
	print("	Bayesian Model for 'and' data takes time : "+str(diff3))
	#print("	Bayesian Model for 'th' data has accuracy : "+str(task2_mm_acc))
	print("	Markov Model for 'and' data : "+str(task4_best_mm.edges()))
	print("	Markov Model for 'and' data takes time : "+str(diff4))
Exemplo n.º 16
0
 def time_gibbs_sampling(self):
     gibbs_samples = GibbsSampling(model=self.model)
     gibbs_sampling.sample(size=int(1e4))
Exemplo n.º 17
0
bp5 = belief_prop.query(variables=['y'], evidence={'poutcome': 2, 'loan': 1})
print(bp5['y'])

bp6 = belief_prop.query(variables=['y'],
                        evidence={
                            'marital': 1,
                            'loan': 1,
                            'contact': 1,
                            'month': 5
                        })
print(bp6['y'])

#-----------------Sampling using GibbsSampling--------------------------

gibbs_chain = GibbsSampling(mark)
gen = gibbs_chain.generate_sample(size=5)
[sample for sample in gen]

gibbs_chain.sample(size=4)

for fact in mark.get_factors():
    print(fact)

data1 = data[[
    'marital', 'education', 'default', 'housing', 'loan', 'contact', 'month',
    'poutcome', 'y'
]].copy()

df = data1[0:5]
 def test_get_kernel_from_markov_model(self):
     gibbs = GibbsSampling()
     gibbs._get_kernel_from_markov_model(self.markov_model)
     self.assertListEqual(list(gibbs.variables), self.markov_model.nodes())
     self.assertDictEqual(gibbs.cardinalities, {'A': 2, 'B': 3, 'C': 4, 'D': 2})