def test_define_prior_parameters(self):
        data = {
            'points': np.array([[1]]),
            'evaluations': np.array([1]),
            'var_noise': None,
        }

        dimension = 1

        result = Matern52.define_prior_parameters(data, dimension)

        assert result == {
            LENGTH_SCALE_NAME: [0.0],
        }

        data2 = {
            'points': np.array([[1], [2]]),
            'evaluations': np.array([1, 2]),
            'var_noise': None,
        }

        dimension2 = 1

        result2 = Matern52.define_prior_parameters(data2, dimension2)

        assert result2 == {
            LENGTH_SCALE_NAME: [1.5432098765432098],
        }
    def test_define_default_kernel(self):
        kern1 = Matern52.define_default_kernel(1)

        assert kern1.name == MATERN52_NAME
        assert kern1.dimension == 1
        assert kern1.dimension_parameters == 1
        assert kern1.length_scale.value == np.array([1])
        assert kern1.length_scale.prior.max == [LARGEST_NUMBER]
        assert kern1.length_scale.prior.min == [SMALLEST_POSITIVE_NUMBER]

        kern2 = Matern52.define_default_kernel(1, default_values=np.array([5]))

        assert kern2.name == MATERN52_NAME
        assert kern2.dimension == 1
        assert kern2.dimension_parameters == 1
        assert kern2.length_scale.value == np.array([5])
        assert kern2.length_scale.prior.max == [LARGEST_NUMBER]
        assert kern2.length_scale.prior.min == [SMALLEST_POSITIVE_NUMBER]

        kern3 = Matern52.define_default_kernel(1, bounds=[[5, 6]])
        assert kern3.name == MATERN52_NAME
        assert kern3.dimension == 1
        assert kern3.dimension_parameters == 1
        assert kern3.length_scale.value == np.array([1])
        assert kern3.length_scale.prior.max == [20.0]
        assert kern3.length_scale.prior.min == [SMALLEST_POSITIVE_NUMBER]
    def test_evaluate_grad_respect_point(self):
        result = Matern52.evaluate_grad_respect_point(np.array([5.0]),
                                                      np.array([[1]]),
                                                      np.array([[4], [5]]), 1)

        kernel = Matern52.define_kernel_from_array(1, np.array([5.0]))
        assert np.all(result == kernel.grad_respect_point(
            np.array([[1]]), np.array([[4], [5]])))
    def test_evaluate_grad_defined_by_params_respect_params(self):
        result = Matern52.evaluate_grad_defined_by_params_respect_params(
            np.array([1, 3]), np.array([[4, 5]]), 2)
        kernel = Matern52.define_kernel_from_array(2, np.array([1, 3]))

        grad_kernel = kernel.gradient_respect_parameters(np.array([[4, 5]]))
        assert result == {
            0: grad_kernel['length_scale'][0],
            1: grad_kernel['length_scale'][1]
        }
    def test_evaluate_grad_cross_cov_respect_point(self):
        value = self.gp.evaluate_grad_cross_cov_respect_point(np.array([[40.0]]),
                                                              np.array([[39.0], [38.0]]),
                                                              np.array([1.0, 1.0]))

        value_2 = ScaledKernel.evaluate_grad_respect_point(np.array([1.0, 1.0]),
                                                           np.array([[40.0]]),
                                                           np.array([[39.0], [38.0]]), 1,
                                                           *([MATERN52_NAME],))

        assert np.all(value == value_2)


        type_kernel = [MATERN52_NAME]
        training_data = {
            "evaluations":
                [42.2851784656, 72.3121248508, 1.0113231069, 30.9309246906, 15.5288331909],
            "points": [
                [42.2851784656], [72.3121248508], [1.0113231069], [30.9309246906], [15.5288331909]],
            "var_noise": []}
        dimensions = [1]

        gp = GPFittingGaussian(type_kernel, training_data, dimensions)
        value = gp.evaluate_grad_cross_cov_respect_point(np.array([[40.0]]),
                                                         np.array([[39.0], [38.0]]),
                                                         np.array([1.0]))

        value_2 = Matern52.evaluate_grad_respect_point(np.array([1.0]),
                                                       np.array([[40.0]]),
                                                       np.array([[39.0], [38.0]]), 1)

        assert np.all(value == value_2)
    def test_cross_validation_mle_parameters_2(self):
        type_kernel = [MATERN52_NAME]

        np.random.seed(5)
        n_points = 10
        normal_noise = np.random.normal(0, 0.01, n_points)
        points = np.linspace(0, 100, n_points)
        points = points.reshape([n_points, 1])

        kernel = Matern52.define_kernel_from_array(1, np.array([100.0]))
        function = SampleFunctions.sample_from_gp(points, kernel)
        function = function[0, :]
        evaluations = function + normal_noise

        training_data = {
            "evaluations": evaluations,
            "points": points,
            "var_noise": None}

        dimensions = [1]
        problem_name = 'a'

        result = \
            ValidationGPModel.cross_validation_mle_parameters(type_kernel, training_data,
                                                              dimensions, problem_name,
                                                              start=np.array([-1]))
        assert result['success_proportion'] == -1
    def test_cross_validation_mle_parameters(self):
        type_kernel = [MATERN52_NAME]

        np.random.seed(5)
        n_points = 10
        normal_noise = np.random.normal(0, 0.01, n_points)
        points = np.linspace(0, 100, n_points)
        points = points.reshape([n_points, 1])

        kernel = Matern52.define_kernel_from_array(1, np.array([100.0]))
        function = SampleFunctions.sample_from_gp(points, kernel)
        function = function[0, :]
        evaluations = function + normal_noise

        training_data = {
            "evaluations": evaluations,
            "points": points,
            "var_noise": None}

        dimensions = [1]
        problem_name = 'a'

        result = \
            ValidationGPModel.cross_validation_mle_parameters(type_kernel, training_data,
                                                              dimensions, problem_name,
                                                              start=np.array([0.01**2, 0.0, 100.0]))

        compare = 'results/diagnostic_kernel/a/validation_kernel_histogram_a_' + MATERN52_NAME + \
                  '_same_correlation_False_10_None.png'
        assert result['filename_histogram'] == compare
        assert np.all(result['y_eval'] == evaluations)
        assert result['n_data'] == n_points
        assert result['filename_plot'] == 'results/diagnostic_kernel/a/' \
                                          'validation_kernel_mean_vs_observations_a_' + \
                                          MATERN52_NAME + '_same_correlation_False_10_None' + '.png'
        assert result['success_proportion'] >= 0.9

        noise = np.random.normal(0, 0.000001, n_points)
        evaluations_noisy = evaluations + noise

        training_data_2 = {
            "evaluations": evaluations_noisy,
            "points": points,
            "var_noise": np.array(n_points * [0.000001**2])}

        result_2 = \
            ValidationGPModel.cross_validation_mle_parameters(type_kernel, training_data_2,
                                                              dimensions, problem_name,
                                                              start=np.array([0.01**2, 0.0, 100.0]))

        compare = 'results/diagnostic_kernel/a/validation_kernel_histogram_a_' + MATERN52_NAME + \
                  '_same_correlation_False_10_None.png'
        assert result_2['filename_histogram'] == compare
        assert np.all(result_2['y_eval'] == evaluations_noisy)
        assert result_2['n_data'] == n_points

        compare = 'results/diagnostic_kernel/a/validation_kernel_mean_vs_observations_a_' + \
                  MATERN52_NAME + '_same_correlation_False_10_None.png'
        assert result_2['filename_plot'] == compare
        assert result_2['success_proportion'] >= 0.9
    def test_sample_new_observations(self):
        np.random.seed(5)
        n_points = 10
        normal_noise = np.random.normal(0, 0.5, n_points)
        points = np.linspace(0, 500, n_points)
        points = points.reshape([n_points, 1])
        kernel = Matern52.define_kernel_from_array(1, np.array([100.0, 1.0]))
        function = SampleFunctions.sample_from_gp(points, kernel)
        function = function[0, :]
        evaluations = function + normal_noise

        training_data_gp = {
            "evaluations": list(evaluations[1:]),
            "points": points[1:, :],
            "var_noise": []}
        gp = GPFittingGaussian([MATERN52_NAME], training_data_gp, [1], kernel_values=[100.0, 1.0],
                               mean_value=[0.0], var_noise_value=[0.5**2])

        n_samples = 100
        samples = gp.sample_new_observations(np.array([[30.0]]), n_samples, random_seed=1)

        new_point = np.array([[30.0]])
        z = gp.compute_posterior_parameters(new_point)
        mean = z['mean']
        cov = z['cov']

        npt.assert_almost_equal(mean, np.mean(samples), decimal=1)
        npt.assert_almost_equal(cov, np.var(samples), decimal=1)
    def test_compute_posterior_parameters(self):
        np.random.seed(5)
        n_points = 10
        normal_noise = np.random.normal(0, 0.5, n_points)
        points = np.linspace(0, 500, n_points)
        points = points.reshape([n_points, 1])
        kernel = Matern52.define_kernel_from_array(1, np.array([100.0, 1.0]))
        function = SampleFunctions.sample_from_gp(points, kernel)
        function = function[0, :]
        evaluations = function + normal_noise

        training_data_gp = {
            "evaluations": list(evaluations[1:]),
            "points": points[1:, :],
            "var_noise": []}
        gp = GPFittingGaussian([MATERN52_NAME], training_data_gp, [1], kernel_values=[100.0, 1.0],
                               mean_value=[0.0], var_noise_value=[0.5**2])

        new_point = np.array([points[0], points[1]])
        z = gp.compute_posterior_parameters(new_point)
        mean = z['mean']
        cov = z['cov']

        assert mean[1] - 2.0 * np.sqrt(cov[1, 1]) <= function[1]
        assert function[1] <= mean[1] + 2.0 * np.sqrt(cov[1, 1])
        assert mean[0] - 2.0 * np.sqrt(cov[0, 0]) <= function[0]
        assert function[0] <= mean[0] + 2.0 * np.sqrt(cov[0, 0])

        # Values obtained from GPy
        npt.assert_almost_equal(mean, np.array([0.30891226, 0.60256237]))
        npt.assert_almost_equal(cov, np.array([[0.48844879, 0.16799927], [0.16799927, 0.16536313]]))
    def setUp(self):
        np.random.seed(2)
        n_points = 100
        normal_noise = np.random.normal(0, 1.0, n_points)
        points = np.linspace(0, 10, n_points)
        points = points.reshape([n_points, 1])
        kernel = Matern52.define_kernel_from_array(1, np.array([2.0]))
        self.sigma2 = ParameterEntity(SIGMA2_NAME, np.array([1.0]), None)
        kernel = ScaledKernel(1, kernel, self.sigma2)

        function = SampleFunctions.sample_from_gp(points, kernel)
        function = function[0, :]
        evaluations = function + normal_noise + 10.0
        self.training_data_gp = {
            "evaluations": list(evaluations),
            "points": points,
            "var_noise": []
        }
        bounds = None
        self.gp_gaussian = GPFittingGaussian([MATERN52_NAME],
                                             self.training_data_gp, [1],
                                             bounds,
                                             max_steps_out=1000)

        self.gp_gaussian_2 = GPFittingGaussian([SCALED_KERNEL, MATERN52_NAME],
                                               self.training_data_gp, [1],
                                               bounds,
                                               max_steps_out=1000)
    def test_forward_difference(self):

        result = FiniteDifferences.forward_difference(self.f, self.x, self.h)

        base_eval = \
            Matern52.evaluate_cov_defined_by_params(self.x, np.array([[2.0, 0.0], [0.0, 2.0]]), 2)

        for i in result:
            new_x = deepcopy(self.x)
            new_x[i] += self.h[0]
            new_eval = \
                Matern52.evaluate_cov_defined_by_params(
                    new_x, np.array([[2.0, 0.0], [0.0, 2.0]]), 2)
            for j in range(2):
                for h in range(2):
                    assert result[i][j, h] == (new_eval[j, h] - base_eval[j, h]) / self.h[0]
Beispiel #12
0
    def setUp(self):
        np.random.seed(5)
        n_points = 100
        points = np.linspace(0, 100, n_points)
        points = points.reshape([n_points, 1])
        tasks = np.random.randint(2, size=(n_points, 1))

        add = [-10, 10]
        kernel = Matern52.define_kernel_from_array(1, np.array([100.0, 1.0]))
        function = SampleFunctions.sample_from_gp(points, kernel)
        self.function = function

        for i in xrange(n_points):
            function[0, i] += add[tasks[i, 0]]
        points = np.concatenate((points, tasks), axis=1)
        self.points = points
        self.evaluations = function[0, :]

        function = function[0, :]

        training_data = {
            'evaluations': list(function),
            'points': points,
            "var_noise": [],
        }
        gaussian_p = GPFittingGaussian(
            [PRODUCT_KERNELS_SEPARABLE, MATERN52_NAME, TASKS_KERNEL_NAME],
            training_data, [2, 1, 2], bounds_domain=[[0, 100], [0, 1]], type_bounds=[0, 1])
        gaussian_p = gaussian_p.fit_gp_regression(random_seed=1314938)

        quadrature = BayesianQuadrature(gaussian_p, [0], UNIFORM_FINITE,
                                        parameters_distribution={TASKS: 2},
                                        model_only_x=True)
        self.mt = MultiTasks(quadrature, quadrature.parameters_distribution.get(TASKS))
    def test_train(self):
        np.random.seed(5)
        n_points = 10
        normal_noise = np.random.normal(0, 0.5, n_points)
        points = np.linspace(0, 500, n_points)
        points = points.reshape([n_points, 1])
        kernel = Matern52.define_kernel_from_array(1, np.array([100.0, 1.0]))
        function = SampleFunctions.sample_from_gp(points, kernel)
        function = function[0, :]

        evaluations = function + normal_noise

        training_data_gp = {
            "evaluations": list(evaluations),
            "points": points,
            "var_noise": []}
        new_gp = GPFittingGaussian.train([MATERN52_NAME], [1], True, training_data_gp, None,
                                         random_seed=1314938)

        gp_gaussian = GPFittingGaussian([MATERN52_NAME], training_data_gp, [1])
        gp_2 = gp_gaussian.fit_gp_regression(random_seed=1314938)

        npt.assert_almost_equal(new_gp.var_noise.value[0], gp_2.var_noise.value[0], decimal=6)
        npt.assert_almost_equal(new_gp.mean.value[0], gp_2.mean.value[0], decimal=6)
        npt.assert_almost_equal(new_gp.kernel_values, gp_2.kernel_values)

        gp_gaussian = GPFittingGaussian([MATERN52_NAME], training_data_gp, [1])
        new_gp_2 = GPFittingGaussian.train([MATERN52_NAME], [1], False, training_data_gp, None)

        npt.assert_almost_equal(new_gp_2.var_noise.value[0], gp_gaussian.var_noise.value[0])
        npt.assert_almost_equal(new_gp_2.mean.value[0], gp_gaussian.mean.value[0], decimal=6)
        npt.assert_almost_equal(new_gp_2.kernel_values, gp_gaussian.kernel_values)
Beispiel #14
0
    def parameters_from_list_to_dict(params, **kwargs):
        """
        Converts a list of parameters to dictionary using the order of the kernel.

        :param params: [float]
        :param kwargs:{
            'dimensions': [float],
            'kernels': [str],
            SAME_CORRELATION: (boolean),
        }
        :return: {
           PARAM_NAME: [float] or float
        }
        """

        parameters = {}

        for dim, kernel in zip(kwargs['dimensions'], kwargs['kernels']):
            if kernel == MATERN52_NAME:
                n_params = get_number_parameters_kernel([kernel], [dim])
                param_dict = Matern52.parameters_from_list_to_dict(
                    params[0:n_params])
                params = params[n_params:]
                parameters.update(param_dict)
            elif kernel == TASKS_KERNEL_NAME:
                n_params = get_number_parameters_kernel([kernel], [dim],
                                                        **kwargs)
                param_dict = TasksKernel.parameters_from_list_to_dict(
                    params[0:n_params])
                params = params[n_params:]
                parameters.update(param_dict)

        return parameters
Beispiel #15
0
 def setUp(self):
     self.dimension = 2
     self.length_scale = ParameterEntity('scale', np.array([1.0, 2.0]),
                                         None)
     self.sigma2 = ParameterEntity('sigma2', np.array([3]), None)
     self.matern52 = Matern52(self.dimension, self.length_scale)
     self.matern52 = ScaledKernel(self.dimension, self.matern52,
                                  self.sigma2)
Beispiel #16
0
    def test_compare_kernels(self):
        kernel_t = TasksKernel(self.dimension, np.array([0.0]))
        assert ScaledKernel.compare_kernels(self.matern52, kernel_t) is False

        kernel_s = Matern52(3, self.length_scale)
        assert ScaledKernel.compare_kernels(self.matern52, kernel_s) is False

        kernel_s = Matern52(2, self.length_scale)
        assert ScaledKernel.compare_kernels(self.matern52, kernel_s) is False

        sigma2 = ParameterEntity('sigma2', np.array([1]), None)
        kernel = ScaledKernel(self.dimension, kernel_s, sigma2)
        assert ScaledKernel.compare_kernels(self.matern52, kernel) is False

        kernel_s = Matern52(
            2, ParameterEntity('scale', np.array([1.0, 3.0]), None))
        kernel = ScaledKernel(self.dimension, kernel_s, self.sigma2)
        assert ScaledKernel.compare_kernels(self.matern52, kernel) is False
    def test_compare_kernels(self):
        kernel = Matern52.define_kernel_from_array(1, np.ones(1))

        kernel_ = copy.deepcopy(kernel)
        kernel_.name = 'a'
        assert Matern52.compare_kernels(kernel, kernel_) is False

        kernel_ = copy.deepcopy(kernel)
        kernel_.dimension = 2
        assert Matern52.compare_kernels(kernel, kernel_) is False

        kernel_ = copy.deepcopy(kernel)
        kernel_.dimension_parameters = 5
        assert Matern52.compare_kernels(kernel, kernel_) is False

        kernel_ = copy.deepcopy(kernel)
        kernel_.length_scale.value = np.array([-1])
        assert Matern52.compare_kernels(kernel, kernel_) is False
    def test_evaluate_hessian_respect_point(self):
        point = np.array([[4.5, 7.5]])
        inputs = np.array([[5.0, 6.0], [8.0, 9.0]])
        params = np.array([1.0, 5.0])
        result = Matern52.evaluate_hessian_respect_point(
            params, point, inputs, 2)

        dh = 0.00001
        finite_diff = FiniteDifferences.second_order_central(
            lambda x: Matern52.evaluate_cross_cov_defined_by_params(
                params, x.reshape((1, len(x))), inputs, 2), point[0, :],
            np.array([dh]))

        for i in xrange(2):
            for j in xrange(2):
                print i, j
                npt.assert_almost_equal(
                    finite_diff[i, j],
                    np.array([[result[0, i, j], result[1, i, j]]]),
                    decimal=5)
    def setUp(self):
        self.dimension = 2
        self.length_scale = ParameterEntity('scale', np.array([1.0, 2.0]),
                                            None)
        self.sigma2 = ParameterEntity('sigma2', np.array([3]), None)
        self.matern52 = Matern52(self.dimension, self.length_scale)
        self.matern52 = ScaledKernel(self.dimension, self.matern52,
                                     self.sigma2)

        self.inputs = np.array([[1, 0], [0, 1]])

        self.prior = UniformPrior(2, [1, 1], [100, 100])
        self.prior_2 = UniformPrior(1, [1], [100])
        self.matern52_ = Matern52(
            2,
            ParameterEntity(LENGTH_SCALE_NAME, np.array([2.0, 3.0]),
                            self.prior))
        self.matern52_ = ScaledKernel(
            self.dimension, self.matern52_,
            ParameterEntity('sigma2', np.array([4.0]), self.prior_2))
    def test_get_kernel_default(self):
        kernel_name = [MATERN52_NAME]
        dimension = [2]
        kernel = get_kernel_default(kernel_name, dimension)

        kernel_ = Matern52.define_kernel_from_array(2, np.ones(3))

        assert Matern52.compare_kernels(kernel, kernel_)

        kernel_name = [TASKS_KERNEL_NAME]
        dimension = [1]
        kernel = get_kernel_default(kernel_name, dimension)

        kernel_ = TasksKernel.define_kernel_from_array(1, np.array([0]))

        assert TasksKernel.compare_kernels(kernel, kernel_)

        kernel_name = [PRODUCT_KERNELS_SEPARABLE, MATERN52_NAME, TASKS_KERNEL_NAME]
        dimension = [2, 1, 1]
        kernel = get_kernel_default(kernel_name, dimension)

        kernel_ = ProductKernels.define_kernel_from_array([1, 1], [np.array([1]), np.array([0])],
                                                          [MATERN52_NAME, TASKS_KERNEL_NAME])
        assert ProductKernels.compare_kernels(kernel, kernel_)

        kernel = get_kernel_default(kernel_name, dimension, default_values=np.array([1, 0]))
        assert ProductKernels.compare_kernels(kernel, kernel_)
        assert kernel.parameters[MATERN52_NAME][LENGTH_SCALE_NAME].prior.max == [LARGEST_NUMBER]

        compare = kernel.parameters[MATERN52_NAME][LENGTH_SCALE_NAME].prior.min
        assert compare == SMALLEST_POSITIVE_NUMBER

        kernel = get_kernel_default(kernel_name, dimension, default_values=np.array([1, 0]),
                                    bounds=[[-1, 2]])

        assert ProductKernels.compare_kernels(kernel, kernel_)
        assert kernel.parameters[MATERN52_NAME][LENGTH_SCALE_NAME].prior.max == 60.0

        compare = kernel.parameters[MATERN52_NAME][LENGTH_SCALE_NAME].prior.min
        assert compare == SMALLEST_POSITIVE_NUMBER
Beispiel #21
0
    def test_optimize_posterior_mean_samples(self):
        np.random.seed(5)
        n_points = 100
        points = np.linspace(0, 100, n_points)
        points = points.reshape([n_points, 1])
        tasks = np.random.randint(2, size=(n_points, 1))

        add = [10, -10]
        kernel = Matern52.define_kernel_from_array(1, np.array([100.0, 1.0]))
        function = SampleFunctions.sample_from_gp(points, kernel)
        max_value = function[0, np.argmax(function)]
        max_point = points[np.argmax(function), 0]

        for i in xrange(n_points):
            function[0, i] += add[tasks[i, 0]]
        points = np.concatenate((points, tasks), axis=1)

        function = function[0, :]

        training_data = {
            'evaluations': list(function),
            'points': points,
            "var_noise": [],
        }

        gaussian_p = GPFittingGaussian(
            [PRODUCT_KERNELS_SEPARABLE, MATERN52_NAME, TASKS_KERNEL_NAME],
            training_data, [2, 1, 2],
            bounds_domain=[[0, 100]],
            max_steps_out=1000)
        gaussian_p = gaussian_p.fit_gp_regression(random_seed=1314938)
        gp = BayesianQuadrature(gaussian_p, [0], UNIFORM_FINITE, {TASKS: 2})

        random_seed = 10

        n_samples_parameters = 15
        gp.gp.thinning = 10
        gp.gp.n_burning = 500

        sol_2 = gp.optimize_posterior_mean(
            random_seed=random_seed,
            n_best_restarts=2,
            n_samples_parameters=n_samples_parameters,
            start_new_chain=True)

        assert max_point == sol_2['solution']
        npt.assert_almost_equal(max_value, sol_2['optimal_value'], decimal=3)
Beispiel #22
0
    def test_gradient_vector_b(self):
        np.random.seed(5)
        n_points = 10
        points = np.linspace(0, 100, n_points)
        points = points.reshape([n_points, 1])
        tasks = np.random.randint(2, size=(n_points, 1))

        add = [10, -10]
        kernel = Matern52.define_kernel_from_array(1, np.array([100.0, 1.0]))
        function = SampleFunctions.sample_from_gp(points, kernel)

        for i in xrange(n_points):
            function[0, i] += add[tasks[i, 0]]
        points = np.concatenate((points, tasks), axis=1)

        function = function[0, :]

        training_data = {
            'evaluations': list(function),
            'points': points,
            "var_noise": [],
        }

        gaussian_p = GPFittingGaussian(
            [PRODUCT_KERNELS_SEPARABLE, MATERN52_NAME, TASKS_KERNEL_NAME],
            training_data, [2, 1, 2],
            bounds_domain=[[0, 100]])
        gaussian_p = gaussian_p.fit_gp_regression(random_seed=1314938)

        gp = BayesianQuadrature(gaussian_p, [0], UNIFORM_FINITE, {TASKS: 2})
        #  gp = self.gp_complete
        candidate_point = np.array([[84.0, 1]])
        points = np.array([[99.5], [12.1], [70.2]])
        value = gp.gradient_vector_b(candidate_point, points, cache=False)

        dh_ = 0.0000001
        dh = [dh_]
        finite_diff = FiniteDifferences.forward_difference(
            lambda point: gp.compute_posterior_parameters_kg(
                points, point.reshape((1, len(point))), cache=False)['b'],
            candidate_point[0, :], np.array(dh))
        npt.assert_almost_equal(finite_diff[0], value[:, 0], decimal=5)
        assert np.all(finite_diff[1] == value[:, 1])

        value_2 = gp.gradient_vector_b(candidate_point, points, cache=True)
        assert np.all(value_2 == value)
    def test_cross_cov(self):
        r2 = np.array([[0.0, 1.25], [1.25, 0.0]])
        r = np.sqrt(r2)

        left_term = ((1.0 + np.sqrt(5) * r + (5.0 / 3.0) * r2) *
                     np.exp(-np.sqrt(5) * r) * np.array([3]))[0, 1]
        comparisons = left_term == self.matern52.cross_cov(
            self.inputs, self.inputs)[0, 1]
        assert np.all(comparisons)

        point_1 = np.array([[2.0, 4.0]])
        point_2 = np.array([[3.0, 5.0]])

        matern52 = Matern52(
            2, ParameterEntity('scale', np.array([2.0, 3.0]), None))
        matern52 = ScaledKernel(
            2, matern52, ParameterEntity('sigma2', np.array([4.0]), None))

        assert np.all(
            matern52.cross_cov(point_1, point_2) == np.array(
                [[3.0737065834936015]]))

        inputs_1 = np.array([[2.0, 4.0], [3.0, 5.0]])
        inputs_2 = np.array([[1.5, 9.0], [-3.0, 8.0]])

        assert np.all(
            matern52.cross_cov(inputs_1, inputs_2) == np.array([[
                0.87752659905500319, 0.14684671522649542
            ], [1.0880320585678382, 0.084041575076539962]]))

        inputs_1 = np.array([[2.0, 4.0]])
        inputs_2 = np.array([[1.5, 9.0], [-3.0, 8.0]])

        assert np.all(
            matern52.cross_cov(inputs_1, inputs_2) == np.array(
                [[0.87752659905500319, 0.14684671522649542]]))

        inputs_1 = np.array([[2.0, 4.0], [3.0, 5.0]])
        inputs_2 = np.array([[1.5, 9.0]])

        npt.assert_almost_equal(
            matern52.cross_cov(inputs_1, inputs_2),
            np.array([[0.87752659905500319], [1.0880320585678382]]))
    def test_mle_parameters(self):
        # Results compared with the ones given by GPy

        np.random.seed(1)
        add = -45.946926660233636

        llh = self.gp_gaussian.log_likelihood(1.0, 0.0, np.array([100.0, 1.0]))
        npt.assert_almost_equal(llh + add, -59.8285565516, decimal=6)

        opt = self.gp_gaussian.mle_parameters(start=np.array([1.0, 0.0, 14.0, 0.9]))

        assert opt['optimal_value'] + add >= -67.1494227694

        compare = self.gp_gaussian.log_likelihood(9, 10.0, np.array([100.2, 1.1]))
        assert self.gp_gaussian_central.log_likelihood(9, 0.0, np.array([100.2, 1.1])) == compare

        np.random.seed(5)
        n_points = 10
        normal_noise = np.random.normal(0, 0.5, n_points)
        points = np.linspace(0, 500, n_points)
        points = points.reshape([n_points, 1])
        kernel = Matern52.define_kernel_from_array(1, np.array([100.0, 1.0]))
        function = SampleFunctions.sample_from_gp(points, kernel)
        function = function[0, :]

        evaluations = function + normal_noise

        training_data_gp = {
            "evaluations": list(evaluations),
            "points": points,
            "var_noise": []}

        gp_gaussian = GPFittingGaussian([MATERN52_NAME], training_data_gp, [1])

        opt_3 = gp_gaussian.mle_parameters(random_seed=1314938)
        np.random.seed(1314938)
        start = gp_gaussian.sample_parameters_posterior(1)[0, :]
        opt_4 = gp_gaussian.mle_parameters(start)

        npt.assert_almost_equal(opt_3['optimal_value'], opt_4['optimal_value'])
        npt.assert_almost_equal(opt_3['solution'], opt_4['solution'], decimal=4)
    def setUp(self):
        np.random.seed(2)
        n_points = 50
        points = np.linspace(0, 500, n_points)
        points = points.reshape([n_points, 1])
        kernel = Matern52.define_kernel_from_array(1, np.array([100.0, 1.0]))
        self.cov = kernel.cov(points)

        self.cov_2 = np.array(
            [[3.76518160e-02, 8.35508788e-03, 2.26375310e-03, 4.81839112e-02, 1.19018900e-02],
             [8.35508788e-03, 3.76518160e-02, 4.65867508e-05, 2.23451418e-03, 3.20904593e-04],
             [2.26375310e-03, 4.65867508e-05, 3.76518160e-02, 8.45452922e-03, 3.79688244e-02],
             [4.81839112e-02, 2.23451418e-03, 8.45452922e-03, 3.76518160e-02, 3.53194132e-02],
             [1.19018900e-02, 3.20904593e-04, 3.79688244e-02, 3.53194132e-02, 3.76518160e-02]])

        self.cov_ = np.array(
            [[1.04681851e+00, 9.95986475e-02, 2.71028578e-02, 5.70675240e-01, 1.41705272e-01],
             [9.95986475e-02, 1.04681851e+00, 5.64490356e-04, 2.67539188e-02, 3.86574119e-03],
             [2.71028578e-02, 5.64490356e-04, 1.04681851e+00, 1.00779972e-01, 4.50126283e-01],
             [5.70675240e-01, 2.67539188e-02, 1.00779972e-01, 1.04681851e+00, 4.18836616e-01],
             [1.41705272e-01, 3.86574119e-03, 4.50126283e-01, 4.18836616e-01, 1.04681851e+00]])
    def test_sample_parameters(self):
        parameters = self.matern52_.hypers_as_list
        samples = []
        np.random.seed(1)
        for parameter in parameters:
            samples.append(parameter.sample_from_prior(2))
        assert np.all(
            self.matern52_.sample_parameters(2, random_seed=1) == np.array([[
                samples[0][0, 0], samples[0][0, 1], samples[1][0]
            ], [samples[0][1, 0], samples[0][1, 1], samples[1][1]]]))

        np.random.seed(1)
        matern52 = Matern52(
            2,
            ParameterEntity(LENGTH_SCALE_NAME, np.array([2.0, 3.0]),
                            self.prior))
        samples = []
        parameters1 = matern52.hypers_as_list
        for parameter in parameters1:
            samples.append(parameter.sample_from_prior(2))
        assert np.all(
            matern52.sample_parameters(2, random_seed=1) == samples[0])
    def test_sample_from_gp(self):
        x = np.linspace(0, 10, 50)
        x = x.reshape([50, 1])
        kernel = Matern52.define_kernel_from_array(1, np.array([3.0, 5.0]))
        function = SampleFunctions.sample_from_gp(x, kernel, n_samples=100000)

        mean = np.mean(function, axis=0)
        cov = np.cov(function.transpose())
        cov_ = kernel.cov(x)

        npt.assert_almost_equal(mean, np.zeros(len(mean)), decimal=1)
        npt.assert_almost_equal(cov, cov_, decimal=1)

        function_2 = SampleFunctions.sample_from_gp(x,
                                                    kernel,
                                                    n_samples=100000,
                                                    random_seed=10)
        mean = np.mean(function_2, axis=0)
        cov = np.cov(function_2.transpose())
        cov_ = kernel.cov(x)

        npt.assert_almost_equal(mean, np.zeros(len(mean)), decimal=1)
        npt.assert_almost_equal(cov, cov_, decimal=1)
    def compare_kernels(kernel1, kernel2):
        """
        Compare the values of kernel1 and kernel2. Returns True if they're equal, otherwise it
        return False.

        :param kernel1: ProductKernels instance object
        :param kernel2: ProductKernels instance object
        :return: boolean
        """

        if kernel1.name != kernel2.name:
            return False

        if kernel1.dimension != kernel2.dimension:
            return False

        if kernel1.dimension_parameters != kernel2.dimension_parameters:
            return False

        if kernel1.names != kernel2.names:
            return False

        for i in xrange(len(kernel1.names)):
            name1 = kernel1.names[i]

            kernel_1 = kernel1.kernels[name1]
            kernel_2 = kernel2.kernels[name1]

            if name1 == MATERN52_NAME:
                if Matern52.compare_kernels(kernel_1, kernel_2) is False:
                    return False

            if name1 == TASKS_KERNEL_NAME:
                if TasksKernel.compare_kernels(kernel_1, kernel_2) is False:
                    return False

        return True
 def test_define_kernel_from_array(self):
     kernel = Matern52.define_kernel_from_array(2, np.array([1, 3]))
     assert np.all(kernel.length_scale.value == np.array([1, 3]))
    def test_evaluate_cov_defined_by_params(self):
        result = Matern52.evaluate_cov_defined_by_params(
            np.array([1, 3, 5]), np.array([[4, 5]]), 2)

        kernel = Matern52.define_kernel_from_array(2, np.array([1, 3, 5]))
        assert result == kernel.cov(np.array([[4, 5]]))