Ejemplo n.º 1
0
    def test_evaluate_derivative(self):
        D = 8
        delta = 10.0**-10
        for variable_index in range(1, D):
            a = DiscreteFactor(['1', '2'], parameters=np.array([['a', 'b'], ['c', 'd']]))
            b = DiscreteFactor(['2', '3'], parameters=np.array([['e', 'f'], ['g', 'h']]))
            c = DiscreteFactor(['3', '4'], parameters=np.array([['i', 'j'], ['k', 'l']]))

            model = Model([a, b, c])
            prior_sigma2 = 2.3
            learner = LearnMrfParameters(model, prior=1.0/(prior_sigma2 ** 1.0))
            D = 12

            delta_vector = np.zeros(D)
            delta_vector[variable_index] = delta

            parameters = np.zeros(D)
            parameters_plus_delta = np.zeros(D)
            parameter_out_of_order = [1, 2, 3, 4, 3, 4, 5, 7, 8, 9, 10, 11]
            parameter_names = [c for c in 'abcdefghijkl']
            for param, param_name in zip(parameter_out_of_order, parameter_names):
                parameters[learner._parameters_to_index[param_name]] = np.log(param)
                parameters_plus_delta[learner._parameters_to_index[param_name]] = np.log(param)
            parameters_plus_delta += delta_vector

            evidence = {'1': 0, '3': 1}

            learner._parameters = parameters
            actual_log_likelihood1, actual_derivative1 = learner.log_likelihood_and_gradient(evidence)

            learner._parameters = parameters_plus_delta
            actual_log_likelihood2, actual_derivative2 = learner.log_likelihood_and_gradient(evidence)

            expected_deriv = (actual_log_likelihood2 - actual_log_likelihood1) / delta  # * delta_vector / delta / delta

            prior_factor = D * (-0.5 * np.log((2.0 * np.pi * prior_sigma2)))
            print 'pn', prior_factor, D * -0.5 * np.log(prior_sigma2)
            prior_factor += sum([-0.5 / (prior_sigma2) * param ** 2.0 for param in parameters])
            print 'dim', D
            print actual_log_likelihood1, np.log(0.18) + prior_factor, prior_factor
            #self.assertAlmostEqual(actual_log_likelihood1, np.log(0.18) + prior_factor)
            #self.assertAlmostEqual(actual_log_likelihood2, np.log(0.18) + prior_factor, delta=10.0**-3)

            print 'derivs'
            print actual_derivative1
            print actual_derivative2
            print expected_deriv
            self.assertAlmostEqual(expected_deriv, actual_derivative1[variable_index], delta=10.0**-4)
Ejemplo n.º 2
0
    def test_get_log_likelihood_by_parameters(self):
        a = DiscreteFactor(['1', '2'], parameters=np.array([['a', 'b'], ['c', 'd']]))
        b = DiscreteFactor(['2', '3'], parameters=np.array([['e', 'f'], ['g', 'h']]))

        model = Model([a, b])
        prior_sigma2 = 2.3
        learner = LearnMrfParameters(model, prior=1.0/(prior_sigma2 ** 1.0))
        D = 8
        parameters = np.zeros(D)
        parameter_out_of_order = [1, 2, 3, 4, 3, 4, 5, 7]
        parameter_names = [c for c in 'abcdefgh']
        for param, param_name in zip(parameter_out_of_order, parameter_names):
            parameters[learner._parameters_to_index[param_name]] = np.log(param)
        evidence = {'1': 0, '3': 1}

        learner._parameters = parameters

        actual_log_likelihood, _ = learner.log_likelihood_and_gradient(evidence)

        prior_factor = D * (-0.5 * np.log((2.0 * np.pi * prior_sigma2)))
        print 'pn', prior_factor, D * -0.5 * np.log(prior_sigma2)
        prior_factor += sum([-0.5 / (prior_sigma2) * param ** 2.0 for param in parameters])
        print 'dim', D
        print actual_log_likelihood, np.log(0.18) + prior_factor, prior_factor
        self.assertAlmostEqual(actual_log_likelihood, np.log(0.18) + prior_factor)