Ejemplo n.º 1
0
    def test_gradient_posterior_mean(self):
        gp = self.gp_complete

        point = np.array([[80.5]])

        # Test evaluate_grad_quadrature_cross_cov
        grad = gp.evaluate_grad_quadrature_cross_cov(
            point, gp.gp.data['points'], gp.gp.kernel.hypers_values_as_array)

        dh = 0.00001
        finite_diff = FiniteDifferences.forward_difference(
            lambda point: gp.evaluate_quadrature_cross_cov(
                point, gp.gp.data['points'], gp.gp.kernel.
                hypers_values_as_array), point, np.array([dh]))

        for i in xrange(grad.shape[1]):
            npt.assert_almost_equal(finite_diff[0][i], grad[0, i], decimal=1)

        npt.assert_almost_equal(finite_diff[0], grad[0, :], decimal=1)

        # Test gradient_posterior_mean
        gradient = gp.gradient_posterior_mean(point)

        dh = 0.0001
        finite_diff = FiniteDifferences.forward_difference(
            lambda points: gp.compute_posterior_parameters(
                points, only_mean=True)['mean'], point, np.array([dh]))

        npt.assert_almost_equal(finite_diff[0], gradient[0], decimal=5)
Ejemplo n.º 2
0
    def test_evaluate_grad_quadrature_cross_cov_resp_candidate(self):
        candidate_point = np.array([[51.5, 0]])
        points = np.array([[51.3], [30.5], [95.1]])
        parameters = self.gp_complete.gp.kernel.hypers_values_as_array
        sol = self.gp_complete.evaluate_grad_quadrature_cross_cov_resp_candidate(
            candidate_point, points, parameters)

        gp = self.gp_complete

        dh = 0.000001
        finite_diff = FiniteDifferences.forward_difference(
            lambda point: gp.evaluate_quadrature_cross_cov(
                points[0:1, :], point.reshape((1, len(point))), parameters),
            candidate_point[0, :], np.array([dh]))
        npt.assert_almost_equal(sol[0, 0], finite_diff[0][0], decimal=2)
        assert sol[1, 0] == finite_diff[1]

        dh = 0.000001
        finite_diff = FiniteDifferences.forward_difference(
            lambda point: gp.evaluate_quadrature_cross_cov(
                points[1:2, :], point.reshape((1, len(point))), parameters),
            candidate_point[0, :], np.array([dh]))
        npt.assert_almost_equal(sol[0, 1], finite_diff[0][0], decimal=1)
        assert sol[1, 1] == finite_diff[1]

        dh = 0.000001
        finite_diff = FiniteDifferences.forward_difference(
            lambda point: gp.evaluate_quadrature_cross_cov(
                points[2:3, :], point.reshape((1, len(point))), parameters),
            candidate_point[0, :], np.array([dh]))
        npt.assert_almost_equal(sol[0, 2], finite_diff[0][0], decimal=2)
        assert sol[1, 2] == finite_diff[1]
    def test_gradient_respect_parameters_finite_differences(self):
        dh = 0.00000001
        finite_diff = FiniteDifferences.forward_difference(
            lambda params: TasksKernel.evaluate_cov_defined_by_params(params, self.inputs_, 2),
            np.array([2.0, 3.0, 4.0]), np.array([dh]))

        gradient = TasksKernel.evaluate_grad_defined_by_params_respect_params(
            np.array([2.0, 3.0, 4.0]), self.inputs_, 2)

        for i in range(3):
            npt.assert_almost_equal(finite_diff[i], gradient[i],  decimal=4)

        gradient = TasksKernel.evaluate_grad_defined_by_params_respect_params(
            np.array([2.0, 3.0]), self.inputs_, 2, **{'same_correlation': True})

        finite_diff = FiniteDifferences.forward_difference(
            lambda params: TasksKernel.evaluate_cov_defined_by_params(params, self.inputs_, 2,
                                                                      **{'same_correlation': True}),
            np.array([2.0, 3.0]), np.array([dh]))

        for i in range(2):
            npt.assert_almost_equal(finite_diff[i], gradient[i],  decimal=4)

        gradient = TasksKernel.evaluate_grad_defined_by_params_respect_params(
            np.array([2.0]), self.inputs, 1, **{'same_correlation': True})

        finite_diff = FiniteDifferences.forward_difference(
            lambda params: TasksKernel.evaluate_cov_defined_by_params(params, self.inputs, 1,
                                                                      **{'same_correlation': True}),
            np.array([2.0]), np.array([dh]))

        npt.assert_almost_equal(finite_diff[0], gradient[0],  decimal=4)
    def test_gradient_uniform_finite_resp_candidate(self):
        gp = self.gp
        f = self.gp.gp.evaluate_grad_cross_cov_respect_point
        candidate_point = np.array([[40.0, 0]])
        index_points = self.gp.x_domain
        domain_random = self.gp.arguments_expectation['domain_random']
        points = np.array([[39.0], [41.0]])
        parameters_kernel = self.gp.gp.kernel.hypers_values_as_array
        value = gradient_uniform_finite_resp_candidate(
            f, candidate_point, index_points, domain_random, self.gp.w_domain,
            points, parameters_kernel)
        dh = 0.00001
        finite_diff = FiniteDifferences.forward_difference(
            lambda point: gp.evaluate_quadrature_cross_cov(
                points[0:1, :], point.reshape(
                    (1, len(point))), parameters_kernel),
            candidate_point[0, :], np.array([dh]))
        npt.assert_almost_equal(value[0, 0], finite_diff[0])
        assert value[1, 0] == finite_diff[1]

        finite_diff = FiniteDifferences.forward_difference(
            lambda point: gp.evaluate_quadrature_cross_cov(
                points[1:2, :], point.reshape(
                    (1, len(point))), parameters_kernel),
            candidate_point[0, :], np.array([dh]))
        npt.assert_almost_equal(value[0, 1], finite_diff[0])
        assert value[1, 1] == finite_diff[1]
    def test_grad_log_likelihood(self):
        grad = self.complex_gp_2.grad_log_likelihood(1.0, 1.0, np.array([1.0, 0.0, 0.0, 0.0]))

        dh = 0.0000001
        finite_diff = FiniteDifferences.forward_difference(
            lambda params: self.complex_gp_2.log_likelihood(
                params[0], params[1], params[2:]
            ),
            np.array([1.0, 1.0, 1.0, 0.0, 0.0, 0.0]), np.array([dh]))

        for i in range(6):
            npt.assert_almost_equal(finite_diff[i], grad[i])

        grad_2 = self.complex_gp_2.grad_log_likelihood(1.82, 123.1,
                                                       np.array([5.0, 1.0, -5.5, 10.0]))

        dh = 0.00000001
        finite_diff_2 = FiniteDifferences.forward_difference(
            lambda params: self.complex_gp_2.log_likelihood(
                params[0], params[1], params[2:]
            ),
            np.array([1.82, 123.1, 5.0, 1.0, -5.5, 10.0]), np.array([dh]))

        for i in range(6):
            npt.assert_almost_equal(finite_diff_2[i], grad_2[i], decimal=3)

        grad_3 = self.gp_3.grad_log_likelihood(1.82, 123.1, np.array([5.0, 7.3]))
        dh = 0.0000001
        finite_diff_3 = FiniteDifferences.forward_difference(
            lambda params: self.gp_3.log_likelihood(
                params[0], params[1], params[2:]
            ),
            np.array([1.82, 123.1, 5.0, 7.3]), np.array([dh]))
        for i in range(4):
            npt.assert_almost_equal(finite_diff_3[i], grad_3[i], decimal=5)

        grad_4 = self.gp_gaussian.grad_log_likelihood(1.0, 0.0, np.array([14.0, 0.9]))
        dh = 0.0000001
        finite_diff_4 = FiniteDifferences.forward_difference(
            lambda params: self.gp_gaussian.log_likelihood(
                params[0], params[1], params[2:]
            ),
            np.array([1.0, 0.0, 14.0, 0.9]), np.array([dh]))
        for i in range(4):
            npt.assert_almost_equal(finite_diff_4[i], grad_4[i], decimal=5)
    def test_evaluate_gradient_bq(self):
        point = np.array([[91.5]])
        grad = self.ei_2.evaluate_gradient(point)

        dh = 0.0001
        finite_diff = FiniteDifferences.forward_difference(
            lambda point: self.ei_2.evaluate(point.reshape((1, len(point)))),
            np.array([91.5]), np.array([dh]))

        npt.assert_almost_equal(finite_diff[0], grad[0], decimal=2)
    def test_gradient_posterior_parameters(self):
        point = np.array([[49.5]])
        grad = self.gp_gaussian.gradient_posterior_parameters(point)

        dh = 0.0000001
        finite_diff = FiniteDifferences.forward_difference(
            lambda x: self.gp_gaussian.compute_posterior_parameters(
                x.reshape((1, len(x))), only_mean=True)['mean'],
            np.array([49.5]), np.array([dh]))

        npt.assert_almost_equal(grad['mean'], finite_diff[0])

        dh = 0.0000001
        finite_diff = FiniteDifferences.forward_difference(
            lambda x: self.gp_gaussian.compute_posterior_parameters(
                x.reshape((1, len(x))))['cov'],
            np.array([49.5]), np.array([dh]))

        npt.assert_almost_equal(grad['cov'], finite_diff[0])
    def test_grad_respect_point_finite_differences(self):
        dh = 0.000000000001
        inputs_1 = np.array([[2.0, 4.0], [3.0, 5.0]])
        point = np.array([[42.0, 35.0]])
        finite_diff = FiniteDifferences.forward_difference(
            lambda point: self.matern52_.cross_cov(point.reshape([1, 2]),
                                                   inputs_1),
            np.array([42.0, 35.0]), np.array([dh]))

        gradient = self.matern52_.grad_respect_point(point, inputs_1)
        for i in range(2):
            npt.assert_almost_equal(finite_diff[i],
                                    gradient[:, i:i + 1].transpose())
    def test_gradient_respect_parameters_finite_differences(self):
        inputs_1 = np.array([[2.0, 4.0], [3.0, 5.0]])
        dh = 0.00000001
        finite_diff = FiniteDifferences.forward_difference(
            lambda params: ScaledKernel.evaluate_cov_defined_by_params(
                params, inputs_1, 2, *([MATERN52_NAME], )),
            np.array([2.0, 3.0, 4.0]), np.array([dh]))

        gradient = ScaledKernel.evaluate_grad_defined_by_params_respect_params(
            np.array([2.0, 3.0, 4.0]), inputs_1, 2, *([MATERN52_NAME], ))

        for i in range(3):
            npt.assert_almost_equal(finite_diff[i], gradient[i])
Ejemplo n.º 10
0
    def test_compute_hessian_parameters_for_sample(self):
        point = np.array([[95.0]])
        candidate_point = np.array([[99.15, 0]])
        val = self.gp_complete_2.compute_hessian_parameters_for_sample(
            point, candidate_point)

        dh = 0.01
        finite_diff = FiniteDifferences.second_order_central(
            lambda x: self.gp_complete_2.compute_parameters_for_sample(
                x.reshape(
                    (1, len(point))), candidate_point, clear_cache=False)['a'],
            point[0, :], np.array([dh]))

        npt.assert_almost_equal(finite_diff[(0, 0)], val['a'][0, :], decimal=5)

        dh = 0.1
        finite_diff = FiniteDifferences.second_order_central(
            lambda x: self.gp_complete_2.compute_parameters_for_sample(
                x.reshape(
                    (1, len(point))), candidate_point, clear_cache=False)['b'],
            point[0, :], np.array([dh]))

        npt.assert_almost_equal(finite_diff[(0, 0)], val['b'], decimal=5)
    def test_forward_difference(self):

        result = FiniteDifferences.forward_difference(self.f, self.x, self.h)

        base_eval = \
            Matern52.evaluate_cov_defined_by_params(self.x, np.array([[2.0, 0.0], [0.0, 2.0]]), 2)

        for i in result:
            new_x = deepcopy(self.x)
            new_x[i] += self.h[0]
            new_eval = \
                Matern52.evaluate_cov_defined_by_params(
                    new_x, np.array([[2.0, 0.0], [0.0, 2.0]]), 2)
            for j in range(2):
                for h in range(2):
                    assert result[i][j, h] == (new_eval[j, h] - base_eval[j, h]) / self.h[0]
Ejemplo n.º 12
0
    def test_hessian_posterior_mean(self):

        gp = self.gp_complete

        point = np.array([[80.5]])

        # Test evaluate_grad_quadrature_cross_cov
        hessian = gp.hessian_posterior_mean(point)

        dh = 0.1
        finite_diff = FiniteDifferences.second_order_central(
            lambda points: gp.compute_posterior_parameters(
                points.reshape((1, len(points))), only_mean=True)['mean'],
            point[0, :], np.array([dh]))

        npt.assert_almost_equal(finite_diff[(0, 0)], hessian[0, 0])
Ejemplo n.º 13
0
    def test_gradient_vector_b(self):
        np.random.seed(5)
        n_points = 10
        points = np.linspace(0, 100, n_points)
        points = points.reshape([n_points, 1])
        tasks = np.random.randint(2, size=(n_points, 1))

        add = [10, -10]
        kernel = Matern52.define_kernel_from_array(1, np.array([100.0, 1.0]))
        function = SampleFunctions.sample_from_gp(points, kernel)

        for i in xrange(n_points):
            function[0, i] += add[tasks[i, 0]]
        points = np.concatenate((points, tasks), axis=1)

        function = function[0, :]

        training_data = {
            'evaluations': list(function),
            'points': points,
            "var_noise": [],
        }

        gaussian_p = GPFittingGaussian(
            [PRODUCT_KERNELS_SEPARABLE, MATERN52_NAME, TASKS_KERNEL_NAME],
            training_data, [2, 1, 2],
            bounds_domain=[[0, 100]])
        gaussian_p = gaussian_p.fit_gp_regression(random_seed=1314938)

        gp = BayesianQuadrature(gaussian_p, [0], UNIFORM_FINITE, {TASKS: 2})
        #  gp = self.gp_complete
        candidate_point = np.array([[84.0, 1]])
        points = np.array([[99.5], [12.1], [70.2]])
        value = gp.gradient_vector_b(candidate_point, points, cache=False)

        dh_ = 0.0000001
        dh = [dh_]
        finite_diff = FiniteDifferences.forward_difference(
            lambda point: gp.compute_posterior_parameters_kg(
                points, point.reshape((1, len(point))), cache=False)['b'],
            candidate_point[0, :], np.array(dh))
        npt.assert_almost_equal(finite_diff[0], value[:, 0], decimal=5)
        assert np.all(finite_diff[1] == value[:, 1])

        value_2 = gp.gradient_vector_b(candidate_point, points, cache=True)
        assert np.all(value_2 == value)
    def test_evaluate_hessian_respect_point(self):
        point = np.array([[4.5, 7.5]])
        inputs = np.array([[5.0, 6.0], [8.0, 9.0]])
        params = np.array([1.0, 5.0])
        result = Matern52.evaluate_hessian_respect_point(
            params, point, inputs, 2)

        dh = 0.00001
        finite_diff = FiniteDifferences.second_order_central(
            lambda x: Matern52.evaluate_cross_cov_defined_by_params(
                params, x.reshape((1, len(x))), inputs, 2), point[0, :],
            np.array([dh]))

        for i in xrange(2):
            for j in xrange(2):
                print i, j
                npt.assert_almost_equal(
                    finite_diff[i, j],
                    np.array([[result[0, i, j], result[1, i, j]]]),
                    decimal=5)
    def test_hessian_distance_length_scale_respect_point(self):
        params = np.array([1.0, 5.0])
        point = np.array([[4.5, 7.5]])
        inputs = np.array([[5.0, 6.0], [8.0, 9.0]])
        result = Distances.gradient_distance_length_scale_respect_point(
            params, point, inputs, second=True)
        result = result['second']

        dh = 0.00001
        finite_diff = FiniteDifferences.second_order_central(
            lambda x: np.sqrt(
                Distances.dist_square_length_scale(
                    params, x.reshape((1, len(x))), inputs)), point[0, :],
            np.array([dh]))

        for i in xrange(2):
            for j in xrange(2):
                print i, j
                npt.assert_almost_equal(
                    finite_diff[i, j],
                    np.array([[result[0, i, j], result[1, i, j]]]),
                    decimal=5)
    def test_second_order_central(self):
        self.x = np.array([1.5, 1.5, 1.5])
        h = np.array([0.1])
        result = FiniteDifferences.second_order_central(self.f, self.x, h)

        base_eval = \
            Matern52.evaluate_cov_defined_by_params(self.x, np.array([[2.0, 0.0], [0.0, 2.0]]), 2)

        for i in xrange(3):
                val = result[(i, i)]
                new_x = deepcopy(self.x)
                new_x[i] += h[0]
                new_eval = \
                    Matern52.evaluate_cov_defined_by_params(
                        new_x, np.array([[2.0, 0.0], [0.0, 2.0]]), 2)

                new_x = deepcopy(self.x)
                new_x[i] -= h[0]
                new_eval_2 = \
                    Matern52.evaluate_cov_defined_by_params(
                        new_x, np.array([[2.0, 0.0], [0.0, 2.0]]), 2)
                assert np.all(val == (new_eval + new_eval_2 - 2.0 * base_eval) / (h[0] ** 2))

        for i in xrange(3):
            for j in xrange(3):
                if j == i:
                    continue

                val = result[(i, j)]

                new_x = deepcopy(self.x)
                new_x[i] += h[0]
                new_x[j] += h[0]
                new_eval = \
                    Matern52.evaluate_cov_defined_by_params(
                        new_x, np.array([[2.0, 0.0], [0.0, 2.0]]), 2)

                val_2 = new_eval

                new_x = deepcopy(self.x)
                new_x[i] -= h[0]
                new_x[j] -= h[0]
                new_eval_2 = \
                    Matern52.evaluate_cov_defined_by_params(
                        new_x, np.array([[2.0, 0.0], [0.0, 2.0]]), 2)

                val_2 += new_eval_2
                new_x = deepcopy(self.x)
                new_x[i] += h[0]
                new_x[j] -= h[0]
                new_eval = \
                    Matern52.evaluate_cov_defined_by_params(
                        new_x, np.array([[2.0, 0.0], [0.0, 2.0]]), 2)

                val_2 -= new_eval


                new_x = deepcopy(self.x)
                new_x[i] -= h[0]
                new_x[j] += h[0]
                new_eval_2 = \
                    Matern52.evaluate_cov_defined_by_params(
                        new_x, np.array([[2.0, 0.0], [0.0, 2.0]]), 2)

                val_2 -= new_eval_2

                val_2 /= (4.0 * h * h)

                assert np.all(val_2 == val)