def test_copy(self): cpd = LinearGaussianCPD("y", [0.67, 1, 4.56, 8], 2, ["x1", "x2", "x3"]) copy = cpd.copy() self.assertEqual(cpd.variable, copy.variable) self.assertEqual(cpd.beta_0, copy.beta_0) self.assertEqual(cpd.variance, copy.variance) np_test.assert_array_equal(cpd.beta_vector, copy.beta_vector) self.assertEqual(cpd.evidence, copy.evidence) cpd.variable = "z" self.assertEqual(copy.variable, "y") cpd.variance = 0 self.assertEqual(copy.variance, 2) cpd.beta_0 = 1 self.assertEqual(copy.beta_0, 0.67) cpd.evidence = ["p", "q", "r"] self.assertEqual(copy.evidence, ["x1", "x2", "x3"]) cpd.beta_vector = [2, 2, 2] np_test.assert_array_equal(copy.beta_vector, [1, 4.56, 8]) copy = cpd.copy() copy.variable = "k" self.assertEqual(cpd.variable, "z") copy.variance = 0.3 self.assertEqual(cpd.variance, 0) copy.beta_0 = 1.5 self.assertEqual(cpd.beta_0, 1) copy.evidence = ["a", "b", "c"] self.assertEqual(cpd.evidence, ["p", "q", "r"]) copy.beta_vector = [2.2, 2.2, 2.2] np_test.assert_array_equal(cpd.beta_vector, [2, 2, 2])
def test_copy(self): cpd = LinearGaussianCPD('y', [0.67, 1, 4.56, 8], 2, ['x1', 'x2', 'x3']) copy = cpd.copy() self.assertEqual(cpd.variable, copy.variable) self.assertEqual(cpd.beta_0, copy.beta_0) self.assertEqual(cpd.variance, copy.variance) np_test.assert_array_equal(cpd.beta_vector, copy.beta_vector) self.assertEqual(cpd.evidence, copy.evidence) cpd.variable = 'z' self.assertEqual(copy.variable, 'y') cpd.variance = 0 self.assertEqual(copy.variance, 2) cpd.beta_0 = 1 self.assertEqual(copy.beta_0, 0.67) cpd.evidence = ['p', 'q', 'r'] self.assertEqual(copy.evidence, ['x1', 'x2', 'x3']) cpd.beta_vector = [2, 2, 2] np_test.assert_array_equal(copy.beta_vector, [1, 4.56, 8]) copy = cpd.copy() copy.variable = 'k' self.assertEqual(cpd.variable, 'z') copy.variance = 0.3 self.assertEqual(cpd.variance, 0) copy.beta_0 = 1.5 self.assertEqual(cpd.beta_0, 1) copy.evidence = ['a', 'b', 'c'] self.assertEqual(cpd.evidence, ['p', 'q', 'r']) copy.beta_vector = [2.2, 2.2, 2.2] np_test.assert_array_equal(cpd.beta_vector, [2, 2, 2])
def test_check_model(self): self.model.add_cpds(self.cpd1, self.cpd2, self.cpd3) self.assertEqual(self.model.check_model(), True) self.model.add_edge("x1", "x4") cpd4 = LinearGaussianCPD("x4", [4, -1], 3, ["x2"]) self.model.add_cpds(cpd4) self.assertRaises(ValueError, self.model.check_model)
def test_check_model(self): self.model.add_cpds(self.cpd1, self.cpd2, self.cpd3) self.assertEqual(self.model.check_model(), True) self.model.add_edge('x1', 'x4') cpd4 = LinearGaussianCPD('x4', [4, -1], 3, ['x2']) self.model.add_cpds(cpd4) self.assertRaises(ValueError, self.model.check_model)
def test_class_init(self): cpd1 = LinearGaussianCPD('x', [0.23], 0.56) self.assertEqual(cpd1.variable, 'x') self.assertEqual(cpd1.beta_0, 0.23) self.assertEqual(cpd1.variance, 0.56) cpd2 = LinearGaussianCPD('y', [0.67, 1, 4.56, 8], 2, ['x1', 'x2', 'x3']) self.assertEqual(cpd2.variable, 'y') self.assertEqual(cpd2.beta_0, 0.67) self.assertEqual(cpd2.variance, 2) self.assertEqual(cpd2.evidence, ['x1', 'x2', 'x3']) np_test.assert_array_equal(cpd2.beta_vector, [1, 4.56, 8]) self.assertRaises(ValueError, LinearGaussianCPD, 'x', [1, 1, 2], 2, ['a', 'b', 'c']) self.assertRaises(ValueError, LinearGaussianCPD, 'x', [1, 1, 2, 3], 2, ['a', 'b'])
def test_class_init(self): mu = np.array([7, 13]) sigma = np.array([[4, 3], [3, 6]]) cpd1 = LinearGaussianCPD("Y", evidence_mean=mu, evidence_variance=sigma, evidence=["X1", "X2"]) self.assertEqual(cpd1.variable, "Y") self.assertEqual(cpd1.evidence, ["X1", "X2"])
def test_maximum_likelihood_estimator(self): # Obtain the X and Y which are jointly gaussian from the distribution # beta = [2, 0.7, 0.3] sigma_c = 4 x_df = pd.read_csv( "pgmpy/tests/test_factors/test_continuous/gbn_values_1.csv") mu = np.array([7, 13]) sigma = np.array([[4, 3], [3, 6]]) cpd1 = LinearGaussianCPD("Y", evidence_mean=mu, evidence_variance=sigma, evidence=["X1", "X2"]) mean, variance = cpd1.fit(x_df, states=["(Y|X)", "X1", "X2"], estimator="MLE") np_test.assert_allclose(mean, [2.361152, 0.693147, 0.276383], rtol=1e-03) np_test.assert_allclose(variance, sigma_c, rtol=1e-1)
def test_pdf(self): cpd1 = LinearGaussianCPD("x", [0.23], 0.56) cpd2 = LinearGaussianCPD("y", [0.67, 1, 4.56, 8], 2, ["x1", "x2", "x3"]) np_test.assert_almost_equal(cpd1.assignment(1), 0.3139868) np_test.assert_almost_equal(cpd2.assignment(1, 1.2, 2.3, 3.4), 1.076e-162)
def test_str(self): cpd1 = LinearGaussianCPD('x', 0.23, 0.56) cpd2 = LinearGaussianCPD('y', 0.67, 2, ['x1', 'x2', 'x3'], [1, 4.56, 8]) self.assertEqual(cpd1.__str__(), "P(x) = N(0.23; 0.56)") self.assertEqual(cpd2.__str__(), "P(y | x1, x2, x3) = N(1.0*x1 + " "4.56*x2 + 8.0*x3 + 0.67; 2)")
def test_pdf(self): cpd1 = LinearGaussianCPD('x', [0.23], 0.56) cpd2 = LinearGaussianCPD('y', [0.67, 1, 4.56, 8], 2, ['x1', 'x2', 'x3']) np_test.assert_almost_equal(cpd1.assignment(1), 0.3139868) np_test.assert_almost_equal(cpd2.assignment(1, 1.2, 2.3, 3.4), 1.076e-162)
def test_str(self): cpd1 = LinearGaussianCPD("x", [0.23], 0.56) cpd2 = LinearGaussianCPD("y", [0.67, 1, 4.56, 8], 2, ["x1", "x2", "x3"]) self.assertEqual(cpd1.__str__(), "P(x) = N(0.23; 0.56)") self.assertEqual( cpd2.__str__(), "P(y | x1, x2, x3) = N(1.0*x1 + " "4.56*x2 + 8.0*x3 + 0.67; 2)", )
def test_str(self): cpd1 = LinearGaussianCPD('x', [0.23], 0.56) cpd2 = LinearGaussianCPD('y', [0.67, 1, 4.56, 8], 2, ['x1', 'x2', 'x3']) self.assertEqual(cpd1.__str__(), "P(x) = N(0.23; 0.56)") self.assertEqual(cpd2.__str__(), "P(y | x1, x2, x3) = N(1.0*x1 + " "4.56*x2 + 8.0*x3 + 0.67; 2)")
def setUp(self): self.model = LinearGaussianBayesianNetwork([("x1", "x2"), ("x2", "x3")]) self.cpd1 = LinearGaussianCPD("x1", [1], 4) self.cpd2 = LinearGaussianCPD("x2", [-5, 0.5], 4, ["x1"]) self.cpd3 = LinearGaussianCPD("x3", [4, -1], 3, ["x2"])
(('D', 0), ('D', 1)), (('I', 0), ('I', 1))]) grade_cpd = TabularCPD( ('G', 0), 3, [[0.3, 0.05, 0.9, 0.5], [0.4, 0.25, 0.8, 0.03], [0.3, 0.7, 0.02, 0.2]], evidence=[('I', 0), ('D', 0)], evidence_card=[2, 2]) print 'grade_cpd', grade_cpd d_i_cpd = TabularCPD(('D', 1), 2, [[0.6, 0.3], [0.4, 0.7]], evidence=[('D', 0)], evidence_card=[2]) diff_cpd = TabularCPD(('D', 0), 2, [[0.6, 0.4]]) intel_cpd = TabularCPD(('I', 0), 2, [[0.7, 0.3]]) i_i_cpd = TabularCPD(('I', 1), 2, [[0.5, 0.4], [0.5, 0.6]], evidence=[('I', 0)], evidence_card=[2]) dbn.add_edges_from([(('T', 0), ('T', 1)), (('P', 0), ('T', 1)), (('P', 1), ('T', 1)), (('T', 0), ('I', 1)), (('T', 1), ('I', 1)), (('E', 1), ('I', 1))]) trust_cpd = LinearGaussianCPD('T', [0.2, -2, 3, 7], 9.6, ['T', 'P', 'X3']) # interaction_cpd = # extranous_interaction_cpd = dbn.add_cpds(grade_cpd, d_i_cpd, diff_cpd, intel_cpd, i_i_cpd) dbn.get_cpds() t = dbn.get_interface_nodes(time_slice=0) leaves = dbn.get_roots() print t, leaves
# {{σ^2}_Y = σ^2 + β^TΣβ} # $$ # # The joint distribution over {X, Y} is a normal distribution where: # # $$Cov[X_i; Y] = {\sum_{j=1}^{k} β_jΣ_{i,j}}$$ # # Assume that X1,...,Xk are jointly Gaussian with distribution N (μ; Σ). Then: # For its representation pgmpy has a class named LinearGaussianCPD in the module pgmpy.factors.continuous. To instantiate an object of this class, one needs to provide a variable name, the value of the beta_0 term, the variance, a list of the parent variable names and a list of the coefficient values of the linear equation (beta_vector), where the list of parent variable names and beta_vector list is optional and defaults to None. # In[25]: # For P(Y| X1, X2, X3) = N(-2x1 + 3x2 + 7x3 + 0.2; 9.6) from pgmpy.factors.continuous import LinearGaussianCPD cpd = LinearGaussianCPD('Y', [0.2, -2, 3, 7], 9.6, ['X1', 'X2', 'X3']) print(cpd) # A Gaussian Bayesian is defined as a network all of whose variables are continuous, and where all of the CPDs are linear Gaussians. These networks are of particular interest as these are an alternate form of representaion of the Joint Gaussian distribution. # # These networks are implemented as the LinearGaussianBayesianNetwork class in the module, pgmpy.models.continuous. This class is a subclass of the BayesianModel class in pgmpy.models and will inherit most of the methods from it. It will have a special method known as to_joint_gaussian that will return an equivalent JointGuassianDistribution object for the model. # In[26]: from pgmpy.models import LinearGaussianBayesianNetwork model = LinearGaussianBayesianNetwork([('x1', 'x2'), ('x2', 'x3')]) cpd1 = LinearGaussianCPD('x1', [1], 4) cpd2 = LinearGaussianCPD('x2', [-5, 0.5], 4, ['x1'])
def setUp(self): self.model = LinearGaussianBayesianNetwork([('x1', 'x2'), ('x2', 'x3')]) self.cpd1 = LinearGaussianCPD('x1', 1, 4) self.cpd2 = LinearGaussianCPD('x2', -5, 4, ['x1'], [0.5]) self.cpd3 = LinearGaussianCPD('x3', 4, 3, ['x2'], [-1])
def setUp(self): self.model = LinearGaussianBayesianNetwork([('x1', 'x2'), ('x2', 'x3')]) self.cpd1 = LinearGaussianCPD('x1', [1], 4) self.cpd2 = LinearGaussianCPD('x2', [-5, 0.5], 4, ['x1']) self.cpd3 = LinearGaussianCPD('x3', [4, -1], 3, ['x2'])