def test_evaluate_grad_cross_cov_respect_point(self):
        value = self.gp.evaluate_grad_cross_cov_respect_point(np.array([[40.0]]),
                                                              np.array([[39.0], [38.0]]),
                                                              np.array([1.0, 1.0]))

        value_2 = ScaledKernel.evaluate_grad_respect_point(np.array([1.0, 1.0]),
                                                           np.array([[40.0]]),
                                                           np.array([[39.0], [38.0]]), 1,
                                                           *([MATERN52_NAME],))

        assert np.all(value == value_2)


        type_kernel = [MATERN52_NAME]
        training_data = {
            "evaluations":
                [42.2851784656, 72.3121248508, 1.0113231069, 30.9309246906, 15.5288331909],
            "points": [
                [42.2851784656], [72.3121248508], [1.0113231069], [30.9309246906], [15.5288331909]],
            "var_noise": []}
        dimensions = [1]

        gp = GPFittingGaussian(type_kernel, training_data, dimensions)
        value = gp.evaluate_grad_cross_cov_respect_point(np.array([[40.0]]),
                                                         np.array([[39.0], [38.0]]),
                                                         np.array([1.0]))

        value_2 = Matern52.evaluate_grad_respect_point(np.array([1.0]),
                                                       np.array([[40.0]]),
                                                       np.array([[39.0], [38.0]]), 1)

        assert np.all(value == value_2)
Exemple #2
0
    def setUp(self):
        np.random.seed(5)
        n_points = 100
        points = np.linspace(0, 100, n_points)
        points = points.reshape([n_points, 1])
        tasks = np.random.randint(2, size=(n_points, 1))

        add = [-10, 10]
        kernel = Matern52.define_kernel_from_array(1, np.array([100.0, 1.0]))
        function = SampleFunctions.sample_from_gp(points, kernel)
        self.function = function

        for i in xrange(n_points):
            function[0, i] += add[tasks[i, 0]]
        points = np.concatenate((points, tasks), axis=1)
        self.points = points
        self.evaluations = function[0, :]

        function = function[0, :]

        training_data = {
            'evaluations': list(function),
            'points': points,
            "var_noise": [],
        }
        gaussian_p = GPFittingGaussian(
            [PRODUCT_KERNELS_SEPARABLE, MATERN52_NAME, TASKS_KERNEL_NAME],
            training_data, [2, 1, 2], bounds_domain=[[0, 100], [0, 1]], type_bounds=[0, 1])
        gaussian_p = gaussian_p.fit_gp_regression(random_seed=1314938)

        quadrature = BayesianQuadrature(gaussian_p, [0], UNIFORM_FINITE,
                                        parameters_distribution={TASKS: 2},
                                        model_only_x=True)
        self.mt = MultiTasks(quadrature, quadrature.parameters_distribution.get(TASKS))
    def setUp(self):
        np.random.seed(2)
        n_points = 100
        normal_noise = np.random.normal(0, 1.0, n_points)
        points = np.linspace(0, 10, n_points)
        points = points.reshape([n_points, 1])
        kernel = Matern52.define_kernel_from_array(1, np.array([2.0]))
        self.sigma2 = ParameterEntity(SIGMA2_NAME, np.array([1.0]), None)
        kernel = ScaledKernel(1, kernel, self.sigma2)

        function = SampleFunctions.sample_from_gp(points, kernel)
        function = function[0, :]
        evaluations = function + normal_noise + 10.0
        self.training_data_gp = {
            "evaluations": list(evaluations),
            "points": points,
            "var_noise": []
        }
        bounds = None
        self.gp_gaussian = GPFittingGaussian([MATERN52_NAME],
                                             self.training_data_gp, [1],
                                             bounds,
                                             max_steps_out=1000)

        self.gp_gaussian_2 = GPFittingGaussian([SCALED_KERNEL, MATERN52_NAME],
                                               self.training_data_gp, [1],
                                               bounds,
                                               max_steps_out=1000)
Exemple #4
0
    def test_optimize_2(self):
        bgo_2 = BGO.from_spec(self.spec_2)
        sol = bgo_2.optimize(random_seed=1, n_restarts=1)
        #
        # # test that after the first iterations the new points are added correctly
        training_data = deepcopy(self.training_data)

        training_data['points'] = np.concatenate(
            (training_data['points'], np.array([[99.9863644153, 0]])), axis=0)
        training_data['evaluations'] = np.concatenate(
            (training_data['evaluations'], np.array([9.0335599603])))
        gaussian_p_med = GPFittingGaussian(
            [PRODUCT_KERNELS_SEPARABLE, MATERN52_NAME, TASKS_KERNEL_NAME],
            training_data, [2, 1, 2],
            bounds_domain=[[0, 100], [0, 1]],
            type_bounds=[0, 1])
        gaussian_p_med.update_value_parameters(self.params)
        gp_med = BayesianQuadrature(gaussian_p_med, [0], UNIFORM_FINITE,
                                    {TASKS: 2})
        sbo = SBO(gp_med, np.array(self.domain.discretization_domain_x))

        point = sbo.optimize(start=np.array([[10, 0]]))

        npt.assert_almost_equal(point['optimal_value'],
                                542.4598435381,
                                decimal=4)
        npt.assert_almost_equal(point['solution'], np.array([61.58743036, 0]))
    def test_compute_posterior_parameters(self):
        np.random.seed(5)
        n_points = 10
        normal_noise = np.random.normal(0, 0.5, n_points)
        points = np.linspace(0, 500, n_points)
        points = points.reshape([n_points, 1])
        kernel = Matern52.define_kernel_from_array(1, np.array([100.0, 1.0]))
        function = SampleFunctions.sample_from_gp(points, kernel)
        function = function[0, :]
        evaluations = function + normal_noise

        training_data_gp = {
            "evaluations": list(evaluations[1:]),
            "points": points[1:, :],
            "var_noise": []}
        gp = GPFittingGaussian([MATERN52_NAME], training_data_gp, [1], kernel_values=[100.0, 1.0],
                               mean_value=[0.0], var_noise_value=[0.5**2])

        new_point = np.array([points[0], points[1]])
        z = gp.compute_posterior_parameters(new_point)
        mean = z['mean']
        cov = z['cov']

        assert mean[1] - 2.0 * np.sqrt(cov[1, 1]) <= function[1]
        assert function[1] <= mean[1] + 2.0 * np.sqrt(cov[1, 1])
        assert mean[0] - 2.0 * np.sqrt(cov[0, 0]) <= function[0]
        assert function[0] <= mean[0] + 2.0 * np.sqrt(cov[0, 0])

        # Values obtained from GPy
        npt.assert_almost_equal(mean, np.array([0.30891226, 0.60256237]))
        npt.assert_almost_equal(cov, np.array([[0.48844879, 0.16799927], [0.16799927, 0.16536313]]))
    def test_sample_new_observations(self):
        np.random.seed(5)
        n_points = 10
        normal_noise = np.random.normal(0, 0.5, n_points)
        points = np.linspace(0, 500, n_points)
        points = points.reshape([n_points, 1])
        kernel = Matern52.define_kernel_from_array(1, np.array([100.0, 1.0]))
        function = SampleFunctions.sample_from_gp(points, kernel)
        function = function[0, :]
        evaluations = function + normal_noise

        training_data_gp = {
            "evaluations": list(evaluations[1:]),
            "points": points[1:, :],
            "var_noise": []}
        gp = GPFittingGaussian([MATERN52_NAME], training_data_gp, [1], kernel_values=[100.0, 1.0],
                               mean_value=[0.0], var_noise_value=[0.5**2])

        n_samples = 100
        samples = gp.sample_new_observations(np.array([[30.0]]), n_samples, random_seed=1)

        new_point = np.array([[30.0]])
        z = gp.compute_posterior_parameters(new_point)
        mean = z['mean']
        cov = z['cov']

        npt.assert_almost_equal(mean, np.mean(samples), decimal=1)
        npt.assert_almost_equal(cov, np.var(samples), decimal=1)
    def test_convert_from_numpy_to_list(self):
        data = GPFittingGaussian.convert_from_list_to_numpy(self.training_data_noisy)
        data_list = GPFittingGaussian.convert_from_numpy_to_list(data)
        assert data_list == self.training_data_noisy

        data_ = GPFittingGaussian.convert_from_list_to_numpy(self.training_data)
        data_list_ = GPFittingGaussian.convert_from_numpy_to_list(data_)
        assert data_list_ == self.training_data
    def test_convert_from_list_to_numpy(self):
        data = GPFittingGaussian.convert_from_list_to_numpy(self.training_data_noisy)
        assert np.all(data['points'] == np.array([[42.2851784656]]))
        assert data['evaluations'] == np.array([41.0101845096])
        assert data['var_noise'] == np.array([0.0181073779])

        data_ = GPFittingGaussian.convert_from_list_to_numpy(self.training_data)
        assert np.all(data_['points'] == np.array(
            [[42.2851784656], [72.3121248508], [1.0113231069], [30.9309246906], [15.5288331909]]))
        assert np.all(data_['evaluations'] == np.array([42.2851784656, 72.3121248508, 1.0113231069,
                                                        30.9309246906, 15.5288331909]))
        assert data_['var_noise'] is None
    def test_set_samplers(self):
        type_kernel = [TASKS_KERNEL_NAME]
        training_data = {
            "evaluations": [42.2851784656, 72.3121248508],
            "points": [[0], [1]],
            "var_noise": []}
        dimensions = [2]

        gp_tk = GPFittingGaussian(type_kernel, training_data, dimensions)

        assert gp_tk.length_scale_indexes is None
        assert len(gp_tk.slice_samplers) == 1

        value = gp_tk.sample_parameters(1, random_seed=1)[-1]
        gp_tk_ = GPFittingGaussian(type_kernel, training_data, dimensions, n_burning=1,
                                   random_seed=1)
        assert np.all(gp_tk_.start_point_sampler == value)

        type_kernel = [PRODUCT_KERNELS_SEPARABLE, MATERN52_NAME, TASKS_KERNEL_NAME]
        training_data = {
            "evaluations": [42.2851784656, 72.3121248508],
            "points": [[0, 0], [1, 0]],
            "var_noise": []}
        dimensions = [2, 1, 1]
        gp = GPFittingGaussian(type_kernel, training_data, dimensions, n_burning=1, random_seed=1)

        gp2 = GPFittingGaussian(type_kernel, training_data, dimensions)
        value2 = gp2.sample_parameters(1, random_seed=1)[-1]
        assert np.all(gp.start_point_sampler == value2)
Exemple #10
0
    def test_optimize_posterior_mean_samples(self):
        np.random.seed(5)
        n_points = 100
        points = np.linspace(0, 100, n_points)
        points = points.reshape([n_points, 1])
        tasks = np.random.randint(2, size=(n_points, 1))

        add = [10, -10]
        kernel = Matern52.define_kernel_from_array(1, np.array([100.0, 1.0]))
        function = SampleFunctions.sample_from_gp(points, kernel)
        max_value = function[0, np.argmax(function)]
        max_point = points[np.argmax(function), 0]

        for i in xrange(n_points):
            function[0, i] += add[tasks[i, 0]]
        points = np.concatenate((points, tasks), axis=1)

        function = function[0, :]

        training_data = {
            'evaluations': list(function),
            'points': points,
            "var_noise": [],
        }

        gaussian_p = GPFittingGaussian(
            [PRODUCT_KERNELS_SEPARABLE, MATERN52_NAME, TASKS_KERNEL_NAME],
            training_data, [2, 1, 2],
            bounds_domain=[[0, 100]],
            max_steps_out=1000)
        gaussian_p = gaussian_p.fit_gp_regression(random_seed=1314938)
        gp = BayesianQuadrature(gaussian_p, [0], UNIFORM_FINITE, {TASKS: 2})

        random_seed = 10

        n_samples_parameters = 15
        gp.gp.thinning = 10
        gp.gp.n_burning = 500

        sol_2 = gp.optimize_posterior_mean(
            random_seed=random_seed,
            n_best_restarts=2,
            n_samples_parameters=n_samples_parameters,
            start_new_chain=True)

        assert max_point == sol_2['solution']
        npt.assert_almost_equal(max_value, sol_2['optimal_value'], decimal=3)
Exemple #11
0
    def test_gradient_vector_b(self):
        np.random.seed(5)
        n_points = 10
        points = np.linspace(0, 100, n_points)
        points = points.reshape([n_points, 1])
        tasks = np.random.randint(2, size=(n_points, 1))

        add = [10, -10]
        kernel = Matern52.define_kernel_from_array(1, np.array([100.0, 1.0]))
        function = SampleFunctions.sample_from_gp(points, kernel)

        for i in xrange(n_points):
            function[0, i] += add[tasks[i, 0]]
        points = np.concatenate((points, tasks), axis=1)

        function = function[0, :]

        training_data = {
            'evaluations': list(function),
            'points': points,
            "var_noise": [],
        }

        gaussian_p = GPFittingGaussian(
            [PRODUCT_KERNELS_SEPARABLE, MATERN52_NAME, TASKS_KERNEL_NAME],
            training_data, [2, 1, 2],
            bounds_domain=[[0, 100]])
        gaussian_p = gaussian_p.fit_gp_regression(random_seed=1314938)

        gp = BayesianQuadrature(gaussian_p, [0], UNIFORM_FINITE, {TASKS: 2})
        #  gp = self.gp_complete
        candidate_point = np.array([[84.0, 1]])
        points = np.array([[99.5], [12.1], [70.2]])
        value = gp.gradient_vector_b(candidate_point, points, cache=False)

        dh_ = 0.0000001
        dh = [dh_]
        finite_diff = FiniteDifferences.forward_difference(
            lambda point: gp.compute_posterior_parameters_kg(
                points, point.reshape((1, len(point))), cache=False)['b'],
            candidate_point[0, :], np.array(dh))
        npt.assert_almost_equal(finite_diff[0], value[:, 0], decimal=5)
        assert np.all(finite_diff[1] == value[:, 1])

        value_2 = gp.gradient_vector_b(candidate_point, points, cache=True)
        assert np.all(value_2 == value)
    def test_serialize(self):
        self.gp.add_points_evaluations(self.new_point, self.evaluation)
        dict = self.gp.serialize()

        n = len(self.training_data['points'])
        ls = np.mean([abs(self.training_data['points'][j][0] - self.training_data['points'][h][0])
                      for j in xrange(n) for h in xrange(n)]) / 0.324

        data = {
            "evaluations": [42.2851784656, 72.3121248508, 1.0113231069, 30.9309246906,
                            15.5288331909, 80.0],
            "points": [
                [42.2851784656], [72.3121248508], [1.0113231069], [30.9309246906], [15.5288331909],
                [80.0]],
            "var_noise": []
        }

        st_sampler = [592.54740339691523, 32.413676860959995, 83.633554944444455,
                      592.54740339691523]

        assert dict == {
            'type_kernel': [SCALED_KERNEL, MATERN52_NAME],
            'training_data': self.training_data,
            'dimensions': [1],
            'kernel_values': [ls, np.var(self.training_data['evaluations'])],
            'mean_value': [np.mean(self.training_data['evaluations'])],
            'var_noise_value': [np.var(self.training_data['evaluations'])],
            'thinning': 0,
            'data': data,
            "bounds_domain": [],
            'n_burning': 0,
            'max_steps_out': 1,
            'bounds_domain': [[0, 100]],
            'type_bounds': [0],
            'name_model': 'gp_fitting_gaussian',
            'problem_name': '',
            'training_name': '',
            'same_correlation': False,
            'start_point_sampler': st_sampler,
            'samples_parameters': dict['samples_parameters'],
        }

        gp = GPFittingGaussian([MATERN52_NAME], self.training_data, dimensions=[1])
        dict = gp.serialize()
        assert dict['bounds_domain'] == []
    def test_train(self):
        np.random.seed(5)
        n_points = 10
        normal_noise = np.random.normal(0, 0.5, n_points)
        points = np.linspace(0, 500, n_points)
        points = points.reshape([n_points, 1])
        kernel = Matern52.define_kernel_from_array(1, np.array([100.0, 1.0]))
        function = SampleFunctions.sample_from_gp(points, kernel)
        function = function[0, :]

        evaluations = function + normal_noise

        training_data_gp = {
            "evaluations": list(evaluations),
            "points": points,
            "var_noise": []}
        new_gp = GPFittingGaussian.train([MATERN52_NAME], [1], True, training_data_gp, None,
                                         random_seed=1314938)

        gp_gaussian = GPFittingGaussian([MATERN52_NAME], training_data_gp, [1])
        gp_2 = gp_gaussian.fit_gp_regression(random_seed=1314938)

        npt.assert_almost_equal(new_gp.var_noise.value[0], gp_2.var_noise.value[0], decimal=6)
        npt.assert_almost_equal(new_gp.mean.value[0], gp_2.mean.value[0], decimal=6)
        npt.assert_almost_equal(new_gp.kernel_values, gp_2.kernel_values)

        gp_gaussian = GPFittingGaussian([MATERN52_NAME], training_data_gp, [1])
        new_gp_2 = GPFittingGaussian.train([MATERN52_NAME], [1], False, training_data_gp, None)

        npt.assert_almost_equal(new_gp_2.var_noise.value[0], gp_gaussian.var_noise.value[0])
        npt.assert_almost_equal(new_gp_2.mean.value[0], gp_gaussian.mean.value[0], decimal=6)
        npt.assert_almost_equal(new_gp_2.kernel_values, gp_gaussian.kernel_values)
    def test_deserialize(self):
        params = {
            'type_kernel': [MATERN52_NAME],
            'training_data': self.training_data,
            'dimensions': [1],
        }
        gp = GPFittingGaussian.deserialize(params)

        assert gp.type_kernel == [MATERN52_NAME]
        assert gp.training_data == self.training_data
        assert gp.dimensions == [1]
    def test_mle_parameters(self):
        # Results compared with the ones given by GPy

        np.random.seed(1)
        add = -45.946926660233636

        llh = self.gp_gaussian.log_likelihood(1.0, 0.0, np.array([100.0, 1.0]))
        npt.assert_almost_equal(llh + add, -59.8285565516, decimal=6)

        opt = self.gp_gaussian.mle_parameters(start=np.array([1.0, 0.0, 14.0, 0.9]))

        assert opt['optimal_value'] + add >= -67.1494227694

        compare = self.gp_gaussian.log_likelihood(9, 10.0, np.array([100.2, 1.1]))
        assert self.gp_gaussian_central.log_likelihood(9, 0.0, np.array([100.2, 1.1])) == compare

        np.random.seed(5)
        n_points = 10
        normal_noise = np.random.normal(0, 0.5, n_points)
        points = np.linspace(0, 500, n_points)
        points = points.reshape([n_points, 1])
        kernel = Matern52.define_kernel_from_array(1, np.array([100.0, 1.0]))
        function = SampleFunctions.sample_from_gp(points, kernel)
        function = function[0, :]

        evaluations = function + normal_noise

        training_data_gp = {
            "evaluations": list(evaluations),
            "points": points,
            "var_noise": []}

        gp_gaussian = GPFittingGaussian([MATERN52_NAME], training_data_gp, [1])

        opt_3 = gp_gaussian.mle_parameters(random_seed=1314938)
        np.random.seed(1314938)
        start = gp_gaussian.sample_parameters_posterior(1)[0, :]
        opt_4 = gp_gaussian.mle_parameters(start)

        npt.assert_almost_equal(opt_3['optimal_value'], opt_4['optimal_value'])
        npt.assert_almost_equal(opt_3['solution'], opt_4['solution'], decimal=4)
    def setUp(self):
        training_data_complex = {
            "evaluations": [1.0, 1.1],
            "points": [[42.2851784656, 0], [42.3851784656, 1]],
            "var_noise": []
        }

        self.complex_gp_2 = GPFittingGaussian(
            [PRODUCT_KERNELS_SEPARABLE, MATERN52_NAME, TASKS_KERNEL_NAME],
            training_data_complex, [2, 1, 2])

        self.gp = BayesianQuadrature(self.complex_gp_2, [0], UNIFORM_FINITE,
                                     {TASKS: 2})
Exemple #17
0
    def setUp(self):

        self.bounds_domain_x = BoundsEntity({
            'lower_bound': 0,
            'upper_bound': 100,
        })

        spec_domain = {
            'dim_x': 1,
            'choose_noise': True,
            'bounds_domain_x': [self.bounds_domain_x],
            'number_points_each_dimension': [100],
            'problem_name': 'a',
        }

        self.domain = DomainService.from_dict(spec_domain)

        dict = {
            'problem_name':
            'test_problem_with_tasks',
            'dim_x':
            1,
            'choose_noise':
            True,
            'bounds_domain_x':
            [BoundsEntity({
                'lower_bound': 0,
                'upper_bound': 100
            })],
            'number_points_each_dimension': [100],
            'method_optimization':
            'sbo',
            'training_name':
            'test_bgo',
            'bounds_domain': [[0, 100], [0, 1]],
            'n_training':
            4,
            'type_kernel':
            [PRODUCT_KERNELS_SEPARABLE, MATERN52_NAME, TASKS_KERNEL_NAME],
            'noise':
            False,
            'random_seed':
            5,
            'parallel':
            False,
            'type_bounds': [0, 1],
            'dimensions': [2, 1, 2],
            'name_model':
            'gp_fitting_gaussian',
            'mle':
            True,
            'thinning':
            0,
            'n_burning':
            0,
            'max_steps_out':
            1,
            'training_data':
            None,
            'x_domain': [0],
            'distribution':
            UNIFORM_FINITE,
            'parameters_distribution':
            None,
            'minimize':
            False,
            'n_iterations':
            5,
        }

        self.spec = RunSpecEntity(dict)

        self.acquisition_function = None
        self.gp_model = None

        ###Define other BGO object
        np.random.seed(5)
        n_points = 100
        points = np.linspace(0, 100, n_points)
        points = points.reshape([n_points, 1])
        tasks = np.random.randint(2, size=(n_points, 1))

        add = [10, -10]
        kernel = Matern52.define_kernel_from_array(1, np.array([100.0, 1.0]))
        function = SampleFunctions.sample_from_gp(points, kernel)

        for i in xrange(n_points):
            function[0, i] += add[tasks[i, 0]]
        points = np.concatenate((points, tasks), axis=1)
        self.points = points

        function = function[0, :]

        points_ls = [list(points[i, :]) for i in xrange(n_points)]

        training_data_med = {
            'evaluations': list(function[0:5]),
            'points': points_ls[0:5],
            "var_noise": [],
        }

        self.training_data = training_data_med

        training_data = {
            'evaluations': list(function),
            'points': points,
            "var_noise": [],
        }

        gaussian_p = GPFittingGaussian(
            [PRODUCT_KERNELS_SEPARABLE, MATERN52_NAME, TASKS_KERNEL_NAME],
            training_data, [2, 1, 2],
            bounds_domain=[[0, 100], [0, 1]],
            type_bounds=[0, 1])
        gaussian_p = gaussian_p.fit_gp_regression(random_seed=1314938)

        params = gaussian_p.get_value_parameters_model
        self.params = params

        dict = {
            'problem_name':
            'test_simulated_gp',
            'dim_x':
            1,
            'choose_noise':
            True,
            'bounds_domain_x':
            [BoundsEntity({
                'lower_bound': 0,
                'upper_bound': 100
            })],
            'number_points_each_dimension': [100],
            'method_optimization':
            'sbo',
            'training_name':
            'test_sbo',
            'bounds_domain': [[0, 100], [0, 1]],
            'n_training':
            5,
            'type_kernel':
            [PRODUCT_KERNELS_SEPARABLE, MATERN52_NAME, TASKS_KERNEL_NAME],
            'noise':
            False,
            'random_seed':
            5,
            'parallel':
            False,
            'type_bounds': [0, 1],
            'dimensions': [2, 1, 2],
            'name_model':
            'gp_fitting_gaussian',
            'mle':
            False,
            'thinning':
            0,
            'n_burning':
            0,
            'max_steps_out':
            1,
            'training_data':
            training_data_med,
            'x_domain': [0],
            'distribution':
            UNIFORM_FINITE,
            'parameters_distribution':
            None,
            'minimize':
            False,
            'n_iterations':
            1,
            'var_noise_value': [params[0]],
            'mean_value': [params[1]],
            'kernel_values':
            list(params[2:]),
            'cache':
            False,
            'debug':
            False,
        }

        self.spec_2 = RunSpecEntity(dict)
Exemple #18
0
    def setUp(self):
        self.training_data_complex = {
            "evaluations": [1.0, 1.1],
            "points": [[42.2851784656, 0], [42.3851784656, 0]],
            "var_noise": []
        }

        self.complex_gp = GPFittingGaussian(
            [PRODUCT_KERNELS_SEPARABLE, MATERN52_NAME, TASKS_KERNEL_NAME],
            self.training_data_complex, [2, 1, 1])

        self.gp = BayesianQuadrature(self.complex_gp, [0], UNIFORM_FINITE,
                                     {TASKS: 1})

        training_data_complex = {
            "evaluations": [1.0, 1.1],
            "points": [[42.2851784656, 0], [42.3851784656, 1]],
            "var_noise": []
        }

        self.complex_gp_2 = GPFittingGaussian(
            [PRODUCT_KERNELS_SEPARABLE, MATERN52_NAME, TASKS_KERNEL_NAME],
            training_data_complex, [3, 1, 2])

        self.gp_2 = BayesianQuadrature(self.complex_gp_2, [0], UNIFORM_FINITE,
                                       {TASKS: 2})

        np.random.seed(5)
        n_points = 100
        points = np.linspace(0, 100, n_points)
        points = points.reshape([n_points, 1])
        tasks = np.random.randint(2, size=(n_points, 1))

        add = [10, -10]
        kernel = Matern52.define_kernel_from_array(1, np.array([100.0, 1.0]))
        function = SampleFunctions.sample_from_gp(points, kernel)
        self.original_function = function

        self.max_value = function[0, np.argmax(function)]
        self.max_point = points[np.argmax(function), 0]
        for i in xrange(n_points):
            function[0, i] += add[tasks[i, 0]]
        points = np.concatenate((points, tasks), axis=1)

        function = function[0, :]

        training_data = {
            'evaluations': list(function),
            'points': points,
            "var_noise": [],
        }

        training_data_2 = {
            'evaluations': list(function[[0, 30, 50, 90, 99]]),
            'points': points[[0, 30, 50, 90, 99], :],
            "var_noise": [],
        }

        gaussian_p = GPFittingGaussian(
            [PRODUCT_KERNELS_SEPARABLE, MATERN52_NAME, TASKS_KERNEL_NAME],
            training_data, [2, 1, 2],
            bounds_domain=[[0, 100]])
        gaussian_p = gaussian_p.fit_gp_regression(random_seed=1314938)

        gaussian_p_2 = GPFittingGaussian(
            [PRODUCT_KERNELS_SEPARABLE, MATERN52_NAME, TASKS_KERNEL_NAME],
            training_data_2, [2, 1, 2],
            bounds_domain=[[0, 100]])
        gaussian_p_2 = gaussian_p.fit_gp_regression(random_seed=1314938)

        self.gp_complete_2 = BayesianQuadrature(gaussian_p_2, [0],
                                                UNIFORM_FINITE, {TASKS: 2})
        self.gp_complete = BayesianQuadrature(gaussian_p, [0], UNIFORM_FINITE,
                                              {TASKS: 2})
class TestSliceSampling(unittest.TestCase):
    def setUp(self):
        np.random.seed(2)
        n_points = 100
        normal_noise = np.random.normal(0, 1.0, n_points)
        points = np.linspace(0, 10, n_points)
        points = points.reshape([n_points, 1])
        kernel = Matern52.define_kernel_from_array(1, np.array([2.0]))
        self.sigma2 = ParameterEntity(SIGMA2_NAME, np.array([1.0]), None)
        kernel = ScaledKernel(1, kernel, self.sigma2)

        function = SampleFunctions.sample_from_gp(points, kernel)
        function = function[0, :]
        evaluations = function + normal_noise + 10.0
        self.training_data_gp = {
            "evaluations": list(evaluations),
            "points": points,
            "var_noise": []
        }
        bounds = None
        self.gp_gaussian = GPFittingGaussian([MATERN52_NAME],
                                             self.training_data_gp, [1],
                                             bounds,
                                             max_steps_out=1000)

        self.gp_gaussian_2 = GPFittingGaussian([SCALED_KERNEL, MATERN52_NAME],
                                               self.training_data_gp, [1],
                                               bounds,
                                               max_steps_out=1000)

    def test_slice_sample(self):
        # Benchmark numbers from Ryan's code.

        np.random.seed(1)
        point = np.array([0.1, 0.7, 0.8, 0.2])

        new_point = self.gp_gaussian_2.sample_parameters(1, point, 1)[0]
        benchmark_point = \
            np.array([0.17721380376549206, 0.67091995290377726, 2.23209165, 0.17489317792506012])

        npt.assert_almost_equal(new_point, benchmark_point)

        np.random.seed(1)
        point = np.array([0.1, 0.7, 0.8, 0.2])

        sampler = self.gp_gaussian.slice_samplers[0]
        sampler.doubling_step = False

        new_point = self.gp_gaussian_2.sample_parameters(1, point, 1)[0]
        npt.assert_almost_equal(new_point, benchmark_point)

    def test_acceptable(self):
        sampler = self.gp_gaussian.slice_samplers[0]

        accept = sampler.acceptable(0.75, 1000000, 0, 1.5, np.array([1.0, 0]),
                                    np.array([0.1, 0.7]), np.array([0.8]),
                                    *(self.gp_gaussian, ))
        assert accept is False

        sampler.doubling_step = False

        accept_2 = sampler.acceptable(0.75, 1000000, 0, 1.5, np.array([1.0,
                                                                       0]),
                                      np.array([0.1, 0.7]), np.array([0.8]),
                                      *(self.gp_gaussian, ))

        assert accept_2 is True

    def test_find_x_interval(self):
        sampler = self.gp_gaussian.slice_samplers[0]
        sampler.doubling_step = False
        interval = sampler.find_x_interval(-2000, 0, 1.5, np.array([1.0, 0]),
                                           np.array([0.1, 0.7]),
                                           np.array([0.8]),
                                           *(self.gp_gaussian, ))
        assert interval == (1001.5, -1.0)

    def test_find_sample(self):
        sampler = self.gp_gaussian.slice_samplers[0]

        with self.assertRaises(Exception):
            sampler.find_sample(0, 1.5, -1000, np.array([1.0, 0]),
                                np.array([-1.0, 0.7]), np.array([0.8]),
                                *(self.gp_gaussian, ))

        expect(sampler).directional_log_prob.and_return(np.nan)
        with self.assertRaises(Exception):
            sampler.find_sample(0, 1.5, -1000, np.array([1.0, 0, 0]),
                                np.array([-1.0, 0.7, 0.2]), np.array([0.8]),
                                *(self.gp_gaussian, ))
class TestGPFittingGaussian(unittest.TestCase):

    def setUp(self):
        type_kernel = [SCALED_KERNEL, MATERN52_NAME]
        self.training_data = {
            "evaluations":
                [42.2851784656, 72.3121248508, 1.0113231069, 30.9309246906, 15.5288331909],
            "points": [
                [42.2851784656], [72.3121248508], [1.0113231069], [30.9309246906], [15.5288331909]],
            "var_noise": []}
        dimensions = [1]

        self.gp = GPFittingGaussian(type_kernel, self.training_data, dimensions,
                                    bounds_domain=[[0, 100]])

        self.training_data_3 = {
            "evaluations": [42.2851784656, 72.3121248508, 1.0113231069, 30.9309246906,
                            15.5288331909],
            "points": [
                [42.2851784656], [72.3121248508], [1.0113231069], [30.9309246906], [15.5288331909]],
            "var_noise": [0.5, 0.8, 0.7, 0.9, 1.0]}

        self.gp_3 = GPFittingGaussian(type_kernel, self.training_data_3, dimensions,
                                      bounds_domain=[[0, 100]])
        self.training_data_simple = {
            "evaluations": [5],
            "points": [[5]],
            "var_noise": []}
        dimensions = [1]

        self.simple_gp = GPFittingGaussian(type_kernel, self.training_data_simple, dimensions,
                                           bounds_domain=[[0, 100]])

        self.training_data_complex = {
            "evaluations": [1.0],
            "points": [[42.2851784656, 0]],
            "var_noise": [0.5]}

        self.complex_gp = GPFittingGaussian(
            [PRODUCT_KERNELS_SEPARABLE, MATERN52_NAME, TASKS_KERNEL_NAME],
            self.training_data_complex, [2, 1, 1], bounds_domain=[[0, 100], [0]])

        self.training_data_complex_2 = {
            "evaluations": [1.0, 2.0, 3.0],
            "points": [[42.2851784656, 0], [10.532, 0], [9.123123, 1]],
            "var_noise": [0.5, 0.2, 0.1]}

        self.complex_gp_2 = GPFittingGaussian(
            [PRODUCT_KERNELS_SEPARABLE, MATERN52_NAME, TASKS_KERNEL_NAME],
            self.training_data_complex_2, [3, 1, 2], bounds_domain=[[0, 100], [0, 1]])

        self.new_point = np.array([[80.0]])
        self.evaluation = np.array([80.0])

        self.training_data_noisy = {
            "evaluations": [41.0101845096],
            "points": [[42.2851784656]],
            "var_noise": [0.0181073779]}

        self.gp_noisy = GPFittingGaussian(type_kernel, self.training_data_noisy, dimensions,
                                          bounds_domain=[[0, 100]])

        np.random.seed(2)
        n_points = 50
        normal_noise = np.random.normal(0, 0.5, n_points)
        points = np.linspace(0, 500, n_points)
        points = points.reshape([n_points, 1])
        kernel = Matern52.define_kernel_from_array(1, np.array([100.0, 1.0]))
        function = SampleFunctions.sample_from_gp(points, kernel)
        function = function[0, :]

        evaluations = function + normal_noise

        self.training_data_gp = {
            "evaluations": list(evaluations),
            "points": points,
            "var_noise": []}

        self.gp_gaussian = GPFittingGaussian([SCALED_KERNEL, MATERN52_NAME], self.training_data_gp,
                                             [1])

        self.gp_gaussian_2 = GPFittingGaussian([MATERN52_NAME], self.training_data_gp, [1],
                                               bounds_domain=[[0, 100]])

        self.training_data_gp_2 = {
            "evaluations": list(evaluations - 10.0),
            "points": points,
            "var_noise": []}
        self.gp_gaussian_central = GPFittingGaussian([SCALED_KERNEL, MATERN52_NAME],
                                                     self.training_data_gp_2, [1],
                                                     bounds_domain=[[0, 100]])

    def test_add_points_evaluations(self):

        self.gp.add_points_evaluations(self.new_point, self.evaluation)
        assert np.all(self.gp.data['evaluations'] == np.concatenate(
            (self.training_data['evaluations'], [80.0])))
        assert np.all(self.gp.data['points'] == np.concatenate(
            (self.training_data['points'], [[80.0]])))
        assert self.gp.data['var_noise'] is None

        assert self.gp.training_data == self.training_data

        self.gp_noisy.add_points_evaluations(self.new_point, self.evaluation, np.array([0.00001]))

        assert np.all(self.gp_noisy.data['evaluations'] == np.concatenate(
            (self.training_data_noisy['evaluations'], [80.0])))
        assert np.all(self.gp_noisy.data['points'] == np.concatenate(
            (self.training_data_noisy['points'], [[80.0]])))
        assert np.all(self.gp_noisy.data['var_noise'] == np.concatenate(
            (self.training_data_noisy['var_noise'], [0.00001])))

        assert self.gp_noisy.training_data == self.training_data_noisy

    def test_convert_from_list_to_numpy(self):
        data = GPFittingGaussian.convert_from_list_to_numpy(self.training_data_noisy)
        assert np.all(data['points'] == np.array([[42.2851784656]]))
        assert data['evaluations'] == np.array([41.0101845096])
        assert data['var_noise'] == np.array([0.0181073779])

        data_ = GPFittingGaussian.convert_from_list_to_numpy(self.training_data)
        assert np.all(data_['points'] == np.array(
            [[42.2851784656], [72.3121248508], [1.0113231069], [30.9309246906], [15.5288331909]]))
        assert np.all(data_['evaluations'] == np.array([42.2851784656, 72.3121248508, 1.0113231069,
                                                        30.9309246906, 15.5288331909]))
        assert data_['var_noise'] is None

    def test_convert_from_numpy_to_list(self):
        data = GPFittingGaussian.convert_from_list_to_numpy(self.training_data_noisy)
        data_list = GPFittingGaussian.convert_from_numpy_to_list(data)
        assert data_list == self.training_data_noisy

        data_ = GPFittingGaussian.convert_from_list_to_numpy(self.training_data)
        data_list_ = GPFittingGaussian.convert_from_numpy_to_list(data_)
        assert data_list_ == self.training_data

    def test_serialize(self):
        self.gp.add_points_evaluations(self.new_point, self.evaluation)
        dict = self.gp.serialize()

        n = len(self.training_data['points'])
        ls = np.mean([abs(self.training_data['points'][j][0] - self.training_data['points'][h][0])
                      for j in xrange(n) for h in xrange(n)]) / 0.324

        data = {
            "evaluations": [42.2851784656, 72.3121248508, 1.0113231069, 30.9309246906,
                            15.5288331909, 80.0],
            "points": [
                [42.2851784656], [72.3121248508], [1.0113231069], [30.9309246906], [15.5288331909],
                [80.0]],
            "var_noise": []
        }

        st_sampler = [592.54740339691523, 32.413676860959995, 83.633554944444455,
                      592.54740339691523]

        assert dict == {
            'type_kernel': [SCALED_KERNEL, MATERN52_NAME],
            'training_data': self.training_data,
            'dimensions': [1],
            'kernel_values': [ls, np.var(self.training_data['evaluations'])],
            'mean_value': [np.mean(self.training_data['evaluations'])],
            'var_noise_value': [np.var(self.training_data['evaluations'])],
            'thinning': 0,
            'data': data,
            "bounds_domain": [],
            'n_burning': 0,
            'max_steps_out': 1,
            'bounds_domain': [[0, 100]],
            'type_bounds': [0],
            'name_model': 'gp_fitting_gaussian',
            'problem_name': '',
            'training_name': '',
            'same_correlation': False,
            'start_point_sampler': st_sampler,
            'samples_parameters': dict['samples_parameters'],
        }

        gp = GPFittingGaussian([MATERN52_NAME], self.training_data, dimensions=[1])
        dict = gp.serialize()
        assert dict['bounds_domain'] == []

    def test_deserialize(self):
        params = {
            'type_kernel': [MATERN52_NAME],
            'training_data': self.training_data,
            'dimensions': [1],
        }
        gp = GPFittingGaussian.deserialize(params)

        assert gp.type_kernel == [MATERN52_NAME]
        assert gp.training_data == self.training_data
        assert gp.dimensions == [1]

    def test_get_parameters_model(self):
        parameters = self.gp.get_parameters_model
        parameters_values = [parameter.value for parameter in parameters]

        var = np.var(self.training_data['evaluations'])
        n = len(self.training_data['evaluations'])
        ls = np.mean([abs(self.training_data['points'][j][0] -
                          self.training_data['points'][h][0]) for j in xrange(n) for h in
                      xrange(n)]) / 0.324
        assert parameters_values[0] == var
        assert parameters_values[1] == np.mean(self.training_data['evaluations'])

        assert np.all(parameters_values[2] == ls)
        assert parameters_values[3] == var

    def test_get_value_parameters_model(self):
        var = np.var(self.training_data['evaluations'])
        mean = np.mean(self.training_data['evaluations'])

        n = len(self.training_data['evaluations'])
        ls = np.mean([abs(self.training_data['points'][j][0] -
                          self.training_data['points'][h][0]) for j in xrange(n) for h in
                      xrange(n)]) / 0.324
        parameters = self.gp.get_value_parameters_model
        assert np.all(parameters == np.array([var, mean, ls, var]))

    def test_cached_data(self):
        self.gp._updated_cached_data((3, 5, 1), -1, SOL_CHOL_Y_UNBIASED)
        assert self.gp.cache_sol_chol_y_unbiased[(3, 5, 1)] == -1
        assert self.gp.cache_sol_chol_y_unbiased.keys() == [(3, 5, 1)]
        assert self.gp._get_cached_data((3, 5, 1), SOL_CHOL_Y_UNBIASED) == -1

        self.gp._updated_cached_data((3, 5), 0, CHOL_COV)
        assert self.gp.cache_chol_cov[(3, 5)] == 0
        assert self.gp.cache_chol_cov.keys() == [(3, 5)]
        assert self.gp.cache_sol_chol_y_unbiased == {}
        assert self.gp._get_cached_data((3, 5), CHOL_COV) == 0

        assert self.gp._get_cached_data((3, 0), CHOL_COV) is False

    def test_chol_cov_including_noise(self):
        chol, cov = self.simple_gp._chol_cov_including_noise(1.0, np.array([1.0, 1.0]))
        assert cov == np.array([[2.0]])
        assert chol == np.array([[np.sqrt(2.0)]])

        chol, cov = self.simple_gp._chol_cov_including_noise(1.0, np.array([1.0, 1.0]))
        assert cov == np.array([[2.0]])
        assert chol == np.array([[np.sqrt(2.0)]])

        chol, cov = self.complex_gp._chol_cov_including_noise(1.0, np.array([1.0, 0.0]))
        assert cov == np.array([[2.5]])
        assert chol == np.array([[np.sqrt(2.5)]])

    def test_log_likelihood(self):
        llh = self.complex_gp.log_likelihood(1.0, 1.0, np.array([1.0, 0.0]))
        assert llh == -0.45814536593707761

    def test_grad_log_likelihood(self):
        grad = self.complex_gp_2.grad_log_likelihood(1.0, 1.0, np.array([1.0, 0.0, 0.0, 0.0]))

        dh = 0.0000001
        finite_diff = FiniteDifferences.forward_difference(
            lambda params: self.complex_gp_2.log_likelihood(
                params[0], params[1], params[2:]
            ),
            np.array([1.0, 1.0, 1.0, 0.0, 0.0, 0.0]), np.array([dh]))

        for i in range(6):
            npt.assert_almost_equal(finite_diff[i], grad[i])

        grad_2 = self.complex_gp_2.grad_log_likelihood(1.82, 123.1,
                                                       np.array([5.0, 1.0, -5.5, 10.0]))

        dh = 0.00000001
        finite_diff_2 = FiniteDifferences.forward_difference(
            lambda params: self.complex_gp_2.log_likelihood(
                params[0], params[1], params[2:]
            ),
            np.array([1.82, 123.1, 5.0, 1.0, -5.5, 10.0]), np.array([dh]))

        for i in range(6):
            npt.assert_almost_equal(finite_diff_2[i], grad_2[i], decimal=3)

        grad_3 = self.gp_3.grad_log_likelihood(1.82, 123.1, np.array([5.0, 7.3]))
        dh = 0.0000001
        finite_diff_3 = FiniteDifferences.forward_difference(
            lambda params: self.gp_3.log_likelihood(
                params[0], params[1], params[2:]
            ),
            np.array([1.82, 123.1, 5.0, 7.3]), np.array([dh]))
        for i in range(4):
            npt.assert_almost_equal(finite_diff_3[i], grad_3[i], decimal=5)

        grad_4 = self.gp_gaussian.grad_log_likelihood(1.0, 0.0, np.array([14.0, 0.9]))
        dh = 0.0000001
        finite_diff_4 = FiniteDifferences.forward_difference(
            lambda params: self.gp_gaussian.log_likelihood(
                params[0], params[1], params[2:]
            ),
            np.array([1.0, 0.0, 14.0, 0.9]), np.array([dh]))
        for i in range(4):
            npt.assert_almost_equal(finite_diff_4[i], grad_4[i], decimal=5)

    def test_grad_log_likelihood_dict(self):
        grad = self.complex_gp_2.grad_log_likelihood_dict(
            1.82, 123.1, np.array([5.0, 1.0, -5.5, 10.0]))
        grad_2 = self.complex_gp_2.grad_log_likelihood(1.82, 123.1,
                                                       np.array([5.0, 1.0, -5.5, 10.0]))

        assert grad_2[0] == grad['var_noise']
        assert grad_2[1] == grad['mean']
        assert np.all(grad_2[2:] == grad['kernel_params'])

    def test_mle_parameters(self):
        # Results compared with the ones given by GPy

        np.random.seed(1)
        add = -45.946926660233636

        llh = self.gp_gaussian.log_likelihood(1.0, 0.0, np.array([100.0, 1.0]))
        npt.assert_almost_equal(llh + add, -59.8285565516, decimal=6)

        opt = self.gp_gaussian.mle_parameters(start=np.array([1.0, 0.0, 14.0, 0.9]))

        assert opt['optimal_value'] + add >= -67.1494227694

        compare = self.gp_gaussian.log_likelihood(9, 10.0, np.array([100.2, 1.1]))
        assert self.gp_gaussian_central.log_likelihood(9, 0.0, np.array([100.2, 1.1])) == compare

        np.random.seed(5)
        n_points = 10
        normal_noise = np.random.normal(0, 0.5, n_points)
        points = np.linspace(0, 500, n_points)
        points = points.reshape([n_points, 1])
        kernel = Matern52.define_kernel_from_array(1, np.array([100.0, 1.0]))
        function = SampleFunctions.sample_from_gp(points, kernel)
        function = function[0, :]

        evaluations = function + normal_noise

        training_data_gp = {
            "evaluations": list(evaluations),
            "points": points,
            "var_noise": []}

        gp_gaussian = GPFittingGaussian([MATERN52_NAME], training_data_gp, [1])

        opt_3 = gp_gaussian.mle_parameters(random_seed=1314938)
        np.random.seed(1314938)
        start = gp_gaussian.sample_parameters_posterior(1)[0, :]
        opt_4 = gp_gaussian.mle_parameters(start)

        npt.assert_almost_equal(opt_3['optimal_value'], opt_4['optimal_value'])
        npt.assert_almost_equal(opt_3['solution'], opt_4['solution'], decimal=4)

    def test_objective_llh(self):
        funct = deepcopy(self.gp_gaussian.log_likelihood)

        def llh(a, b, c):
            return float(funct(a, b, c)) / 0.0

        self.gp_gaussian.log_likelihood = llh
        assert self.gp_gaussian.objective_llh(np.array([1.0, 3.0, 14.0, 0.9])) == -np.inf

    def test_sample_parameters_prior(self):
        sample = self.gp_gaussian.sample_parameters_prior(1, 1)[0]

        assert len(sample) == 4

        np.random.seed(1)

        lambda_ = np.abs(np.random.standard_cauchy(size=(1, 1)))
        a = np.abs(np.random.randn(1, 1) * lambda_ * np.var(self.training_data_gp['evaluations']))

        assert sample[0] == a[0][0]

        a = np.random.randn(1, 1) + np.mean(self.training_data_gp['evaluations'])
        assert sample[1] == a[0][0]

        n = self.training_data_gp['points'].shape[0]
        mean_ls = np.mean([abs(self.training_data_gp['points'][j, 0] -
                               self.training_data_gp['points'][h, 0]) for j in xrange(n) for h in
                           xrange(n)]) / 0.324
        a = SMALLEST_POSITIVE_NUMBER + np.random.rand(1, 1) * (mean_ls - SMALLEST_POSITIVE_NUMBER)
        assert sample[2] == a

        mean_var = np.var(self.training_data_gp['evaluations'])
        a = np.random.lognormal(mean=np.sqrt(mean_var), sigma=1.0, size=1) ** 2
        assert sample[3] == a[0]

    def test_log_prob_parameters(self):
        prob = self.gp_gaussian.log_prob_parameters(np.array([1.0, 3.0, 14.0, 0.9]))
        lp = self.gp_gaussian.log_likelihood(1.0, 3.0, np.array([14.0, 0.9])) - 10.13680717
        npt.assert_almost_equal(prob, lp)

    def test_set_samplers(self):
        type_kernel = [TASKS_KERNEL_NAME]
        training_data = {
            "evaluations": [42.2851784656, 72.3121248508],
            "points": [[0], [1]],
            "var_noise": []}
        dimensions = [2]

        gp_tk = GPFittingGaussian(type_kernel, training_data, dimensions)

        assert gp_tk.length_scale_indexes is None
        assert len(gp_tk.slice_samplers) == 1

        value = gp_tk.sample_parameters(1, random_seed=1)[-1]
        gp_tk_ = GPFittingGaussian(type_kernel, training_data, dimensions, n_burning=1,
                                   random_seed=1)
        assert np.all(gp_tk_.start_point_sampler == value)

        type_kernel = [PRODUCT_KERNELS_SEPARABLE, MATERN52_NAME, TASKS_KERNEL_NAME]
        training_data = {
            "evaluations": [42.2851784656, 72.3121248508],
            "points": [[0, 0], [1, 0]],
            "var_noise": []}
        dimensions = [2, 1, 1]
        gp = GPFittingGaussian(type_kernel, training_data, dimensions, n_burning=1, random_seed=1)

        gp2 = GPFittingGaussian(type_kernel, training_data, dimensions)
        value2 = gp2.sample_parameters(1, random_seed=1)[-1]
        assert np.all(gp.start_point_sampler == value2)

    def test_sample_parameters_posterior(self):
        start = self.gp.samples_parameters[-1]
        sample = self.gp.sample_parameters_posterior(1, 1)

        np.random.seed(1)
        sample2 = self.gp.sample_parameters_posterior(1, start_point=start)
        assert np.all(sample == sample2)
        assert sample.shape == (1, 4)

        start = self.gp.samples_parameters[-1]
        sample3 = self.gp.sample_parameters_posterior(2, 1)
        sample4 = self.gp.sample_parameters(2, random_seed=1, start_point=start)

        assert np.all(sample4[0] == sample3[0, :])
        assert np.all(sample4[1] == sample3[1, :])

    def test_fit_gp_regression(self):
        np.random.seed(5)
        n_points = 10
        normal_noise = np.random.normal(0, 0.5, n_points)
        points = np.linspace(0, 500, n_points)
        points = points.reshape([n_points, 1])
        kernel = Matern52.define_kernel_from_array(1, np.array([100.0, 1.0]))
        function = SampleFunctions.sample_from_gp(points, kernel)
        function = function[0, :]

        evaluations = function + normal_noise

        training_data_gp = {
            "evaluations": list(evaluations),
            "points": points,
            "var_noise": []}

        gp_gaussian = GPFittingGaussian([MATERN52_NAME], training_data_gp, [1])
        gp_gaussian_2 = GPFittingGaussian([MATERN52_NAME], training_data_gp, [1])

        new_gp = gp_gaussian.fit_gp_regression(random_seed=1314938)

        results = gp_gaussian_2.mle_parameters(random_seed=1314938)
        results = results['solution']

        npt.assert_almost_equal(new_gp.var_noise.value[0], results[0], decimal=6)
        npt.assert_almost_equal(new_gp.mean.value[0], results[1], decimal=6)
        npt.assert_almost_equal(new_gp.kernel_values, results[2:], decimal=1)

    def test_train(self):
        np.random.seed(5)
        n_points = 10
        normal_noise = np.random.normal(0, 0.5, n_points)
        points = np.linspace(0, 500, n_points)
        points = points.reshape([n_points, 1])
        kernel = Matern52.define_kernel_from_array(1, np.array([100.0, 1.0]))
        function = SampleFunctions.sample_from_gp(points, kernel)
        function = function[0, :]

        evaluations = function + normal_noise

        training_data_gp = {
            "evaluations": list(evaluations),
            "points": points,
            "var_noise": []}
        new_gp = GPFittingGaussian.train([MATERN52_NAME], [1], True, training_data_gp, None,
                                         random_seed=1314938)

        gp_gaussian = GPFittingGaussian([MATERN52_NAME], training_data_gp, [1])
        gp_2 = gp_gaussian.fit_gp_regression(random_seed=1314938)

        npt.assert_almost_equal(new_gp.var_noise.value[0], gp_2.var_noise.value[0], decimal=6)
        npt.assert_almost_equal(new_gp.mean.value[0], gp_2.mean.value[0], decimal=6)
        npt.assert_almost_equal(new_gp.kernel_values, gp_2.kernel_values)

        gp_gaussian = GPFittingGaussian([MATERN52_NAME], training_data_gp, [1])
        new_gp_2 = GPFittingGaussian.train([MATERN52_NAME], [1], False, training_data_gp, None)

        npt.assert_almost_equal(new_gp_2.var_noise.value[0], gp_gaussian.var_noise.value[0])
        npt.assert_almost_equal(new_gp_2.mean.value[0], gp_gaussian.mean.value[0], decimal=6)
        npt.assert_almost_equal(new_gp_2.kernel_values, gp_gaussian.kernel_values)

    def test_compute_posterior_parameters(self):
        np.random.seed(5)
        n_points = 10
        normal_noise = np.random.normal(0, 0.5, n_points)
        points = np.linspace(0, 500, n_points)
        points = points.reshape([n_points, 1])
        kernel = Matern52.define_kernel_from_array(1, np.array([100.0, 1.0]))
        function = SampleFunctions.sample_from_gp(points, kernel)
        function = function[0, :]
        evaluations = function + normal_noise

        training_data_gp = {
            "evaluations": list(evaluations[1:]),
            "points": points[1:, :],
            "var_noise": []}
        gp = GPFittingGaussian([MATERN52_NAME], training_data_gp, [1], kernel_values=[100.0, 1.0],
                               mean_value=[0.0], var_noise_value=[0.5**2])

        new_point = np.array([points[0], points[1]])
        z = gp.compute_posterior_parameters(new_point)
        mean = z['mean']
        cov = z['cov']

        assert mean[1] - 2.0 * np.sqrt(cov[1, 1]) <= function[1]
        assert function[1] <= mean[1] + 2.0 * np.sqrt(cov[1, 1])
        assert mean[0] - 2.0 * np.sqrt(cov[0, 0]) <= function[0]
        assert function[0] <= mean[0] + 2.0 * np.sqrt(cov[0, 0])

        # Values obtained from GPy
        npt.assert_almost_equal(mean, np.array([0.30891226, 0.60256237]))
        npt.assert_almost_equal(cov, np.array([[0.48844879, 0.16799927], [0.16799927, 0.16536313]]))

    def test_sample_new_observations(self):
        np.random.seed(5)
        n_points = 10
        normal_noise = np.random.normal(0, 0.5, n_points)
        points = np.linspace(0, 500, n_points)
        points = points.reshape([n_points, 1])
        kernel = Matern52.define_kernel_from_array(1, np.array([100.0, 1.0]))
        function = SampleFunctions.sample_from_gp(points, kernel)
        function = function[0, :]
        evaluations = function + normal_noise

        training_data_gp = {
            "evaluations": list(evaluations[1:]),
            "points": points[1:, :],
            "var_noise": []}
        gp = GPFittingGaussian([MATERN52_NAME], training_data_gp, [1], kernel_values=[100.0, 1.0],
                               mean_value=[0.0], var_noise_value=[0.5**2])

        n_samples = 100
        samples = gp.sample_new_observations(np.array([[30.0]]), n_samples, random_seed=1)

        new_point = np.array([[30.0]])
        z = gp.compute_posterior_parameters(new_point)
        mean = z['mean']
        cov = z['cov']

        npt.assert_almost_equal(mean, np.mean(samples), decimal=1)
        npt.assert_almost_equal(cov, np.var(samples), decimal=1)

    def test_cross_validation_mle_parameters(self):
        type_kernel = [MATERN52_NAME]

        np.random.seed(5)
        n_points = 10
        normal_noise = np.random.normal(0, 0.01, n_points)
        points = np.linspace(0, 100, n_points)
        points = points.reshape([n_points, 1])

        kernel = Matern52.define_kernel_from_array(1, np.array([100.0]))
        function = SampleFunctions.sample_from_gp(points, kernel)
        function = function[0, :]
        evaluations = function + normal_noise

        training_data = {
            "evaluations": evaluations,
            "points": points,
            "var_noise": None}

        dimensions = [1]
        problem_name = 'a'

        result = \
            ValidationGPModel.cross_validation_mle_parameters(type_kernel, training_data,
                                                              dimensions, problem_name,
                                                              start=np.array([0.01**2, 0.0, 100.0]))

        compare = 'results/diagnostic_kernel/a/validation_kernel_histogram_a_' + MATERN52_NAME + \
                  '_same_correlation_False_10_None.png'
        assert result['filename_histogram'] == compare
        assert np.all(result['y_eval'] == evaluations)
        assert result['n_data'] == n_points
        assert result['filename_plot'] == 'results/diagnostic_kernel/a/' \
                                          'validation_kernel_mean_vs_observations_a_' + \
                                          MATERN52_NAME + '_same_correlation_False_10_None' + '.png'
        assert result['success_proportion'] >= 0.9

        noise = np.random.normal(0, 0.000001, n_points)
        evaluations_noisy = evaluations + noise

        training_data_2 = {
            "evaluations": evaluations_noisy,
            "points": points,
            "var_noise": np.array(n_points * [0.000001**2])}

        result_2 = \
            ValidationGPModel.cross_validation_mle_parameters(type_kernel, training_data_2,
                                                              dimensions, problem_name,
                                                              start=np.array([0.01**2, 0.0, 100.0]))

        compare = 'results/diagnostic_kernel/a/validation_kernel_histogram_a_' + MATERN52_NAME + \
                  '_same_correlation_False_10_None.png'
        assert result_2['filename_histogram'] == compare
        assert np.all(result_2['y_eval'] == evaluations_noisy)
        assert result_2['n_data'] == n_points

        compare = 'results/diagnostic_kernel/a/validation_kernel_mean_vs_observations_a_' + \
                  MATERN52_NAME + '_same_correlation_False_10_None.png'
        assert result_2['filename_plot'] == compare
        assert result_2['success_proportion'] >= 0.9

    def test_cross_validation_mle_parameters_2(self):
        type_kernel = [MATERN52_NAME]

        np.random.seed(5)
        n_points = 10
        normal_noise = np.random.normal(0, 0.01, n_points)
        points = np.linspace(0, 100, n_points)
        points = points.reshape([n_points, 1])

        kernel = Matern52.define_kernel_from_array(1, np.array([100.0]))
        function = SampleFunctions.sample_from_gp(points, kernel)
        function = function[0, :]
        evaluations = function + normal_noise

        training_data = {
            "evaluations": evaluations,
            "points": points,
            "var_noise": None}

        dimensions = [1]
        problem_name = 'a'

        result = \
            ValidationGPModel.cross_validation_mle_parameters(type_kernel, training_data,
                                                              dimensions, problem_name,
                                                              start=np.array([-1]))
        assert result['success_proportion'] == -1

    def test_check_value_within_ci(self):
        assert ValidationGPModel.check_value_within_ci(0, 1.0, 1.0)
        assert not ValidationGPModel.check_value_within_ci(3.1, 1.0, 1.0)
        assert not ValidationGPModel.check_value_within_ci(-1.1, 1.0, 1.0)
        assert ValidationGPModel.check_value_within_ci(0, 1.0, 1.0, var_noise=0.00001)

    def test_evaluate_cross_cov(self):

        value = self.complex_gp.evaluate_cross_cov(np.array([[2.0, 0.0]]), np.array([[1.0, 0.0]]),
                                           np.array([1.0, 0.0]))
        assert value == np.array([[0.52399410883182029]])

    def test_evaluate_grad_cross_cov_respect_point(self):
        value = self.gp.evaluate_grad_cross_cov_respect_point(np.array([[40.0]]),
                                                              np.array([[39.0], [38.0]]),
                                                              np.array([1.0, 1.0]))

        value_2 = ScaledKernel.evaluate_grad_respect_point(np.array([1.0, 1.0]),
                                                           np.array([[40.0]]),
                                                           np.array([[39.0], [38.0]]), 1,
                                                           *([MATERN52_NAME],))

        assert np.all(value == value_2)


        type_kernel = [MATERN52_NAME]
        training_data = {
            "evaluations":
                [42.2851784656, 72.3121248508, 1.0113231069, 30.9309246906, 15.5288331909],
            "points": [
                [42.2851784656], [72.3121248508], [1.0113231069], [30.9309246906], [15.5288331909]],
            "var_noise": []}
        dimensions = [1]

        gp = GPFittingGaussian(type_kernel, training_data, dimensions)
        value = gp.evaluate_grad_cross_cov_respect_point(np.array([[40.0]]),
                                                         np.array([[39.0], [38.0]]),
                                                         np.array([1.0]))

        value_2 = Matern52.evaluate_grad_respect_point(np.array([1.0]),
                                                       np.array([[40.0]]),
                                                       np.array([[39.0], [38.0]]), 1)

        assert np.all(value == value_2)

    def test_evaluate_hessian_respect_point(self):

        type_kernel = [MATERN52_NAME]
        training_data = {
            "evaluations":
                [42.2851784656, 72.3121248508, 1.0113231069, 30.9309246906, 15.5288331909],
            "points": [
                [42.2851784656], [72.3121248508], [1.0113231069], [30.9309246906], [15.5288331909]],
            "var_noise": []}
        dimensions = [1]

        gp = GPFittingGaussian(type_kernel, training_data, dimensions)
        value = gp.evaluate_hessian_cross_cov_respect_point(np.array([[40.0]]),
                                                         np.array([[39.0], [38.0]]),
                                                         np.array([1.0]))

        value_2 = Matern52.evaluate_hessian_respect_point(np.array([1.0]),
                                                       np.array([[40.0]]),
                                                       np.array([[39.0], [38.0]]), 1)
        assert np.all(value == value_2)


    def test_get_historical_best_solution(self):
        max_ = self.gp.get_historical_best_solution()
        assert max_ == 72.3121248508

        max_ = self.gp_3.get_historical_best_solution(noisy_evaluations=True)

        assert max_ == self.gp_3.compute_posterior_parameters(
                np.array([[72.3121248508]]), only_mean=True)['mean']

    def test_gradient_posterior_parameters(self):
        point = np.array([[49.5]])
        grad = self.gp_gaussian.gradient_posterior_parameters(point)

        dh = 0.0000001
        finite_diff = FiniteDifferences.forward_difference(
            lambda x: self.gp_gaussian.compute_posterior_parameters(
                x.reshape((1, len(x))), only_mean=True)['mean'],
            np.array([49.5]), np.array([dh]))

        npt.assert_almost_equal(grad['mean'], finite_diff[0])

        dh = 0.0000001
        finite_diff = FiniteDifferences.forward_difference(
            lambda x: self.gp_gaussian.compute_posterior_parameters(
                x.reshape((1, len(x))))['cov'],
            np.array([49.5]), np.array([dh]))

        npt.assert_almost_equal(grad['cov'], finite_diff[0])
    def setUp(self):
        type_kernel = [SCALED_KERNEL, MATERN52_NAME]
        self.training_data = {
            "evaluations":
                [42.2851784656, 72.3121248508, 1.0113231069, 30.9309246906, 15.5288331909],
            "points": [
                [42.2851784656], [72.3121248508], [1.0113231069], [30.9309246906], [15.5288331909]],
            "var_noise": []}
        dimensions = [1]

        self.gp = GPFittingGaussian(type_kernel, self.training_data, dimensions,
                                    bounds_domain=[[0, 100]])

        self.training_data_3 = {
            "evaluations": [42.2851784656, 72.3121248508, 1.0113231069, 30.9309246906,
                            15.5288331909],
            "points": [
                [42.2851784656], [72.3121248508], [1.0113231069], [30.9309246906], [15.5288331909]],
            "var_noise": [0.5, 0.8, 0.7, 0.9, 1.0]}

        self.gp_3 = GPFittingGaussian(type_kernel, self.training_data_3, dimensions,
                                      bounds_domain=[[0, 100]])
        self.training_data_simple = {
            "evaluations": [5],
            "points": [[5]],
            "var_noise": []}
        dimensions = [1]

        self.simple_gp = GPFittingGaussian(type_kernel, self.training_data_simple, dimensions,
                                           bounds_domain=[[0, 100]])

        self.training_data_complex = {
            "evaluations": [1.0],
            "points": [[42.2851784656, 0]],
            "var_noise": [0.5]}

        self.complex_gp = GPFittingGaussian(
            [PRODUCT_KERNELS_SEPARABLE, MATERN52_NAME, TASKS_KERNEL_NAME],
            self.training_data_complex, [2, 1, 1], bounds_domain=[[0, 100], [0]])

        self.training_data_complex_2 = {
            "evaluations": [1.0, 2.0, 3.0],
            "points": [[42.2851784656, 0], [10.532, 0], [9.123123, 1]],
            "var_noise": [0.5, 0.2, 0.1]}

        self.complex_gp_2 = GPFittingGaussian(
            [PRODUCT_KERNELS_SEPARABLE, MATERN52_NAME, TASKS_KERNEL_NAME],
            self.training_data_complex_2, [3, 1, 2], bounds_domain=[[0, 100], [0, 1]])

        self.new_point = np.array([[80.0]])
        self.evaluation = np.array([80.0])

        self.training_data_noisy = {
            "evaluations": [41.0101845096],
            "points": [[42.2851784656]],
            "var_noise": [0.0181073779]}

        self.gp_noisy = GPFittingGaussian(type_kernel, self.training_data_noisy, dimensions,
                                          bounds_domain=[[0, 100]])

        np.random.seed(2)
        n_points = 50
        normal_noise = np.random.normal(0, 0.5, n_points)
        points = np.linspace(0, 500, n_points)
        points = points.reshape([n_points, 1])
        kernel = Matern52.define_kernel_from_array(1, np.array([100.0, 1.0]))
        function = SampleFunctions.sample_from_gp(points, kernel)
        function = function[0, :]

        evaluations = function + normal_noise

        self.training_data_gp = {
            "evaluations": list(evaluations),
            "points": points,
            "var_noise": []}

        self.gp_gaussian = GPFittingGaussian([SCALED_KERNEL, MATERN52_NAME], self.training_data_gp,
                                             [1])

        self.gp_gaussian_2 = GPFittingGaussian([MATERN52_NAME], self.training_data_gp, [1],
                                               bounds_domain=[[0, 100]])

        self.training_data_gp_2 = {
            "evaluations": list(evaluations - 10.0),
            "points": points,
            "var_noise": []}
        self.gp_gaussian_central = GPFittingGaussian([SCALED_KERNEL, MATERN52_NAME],
                                                     self.training_data_gp_2, [1],
                                                     bounds_domain=[[0, 100]])