Beispiel #1
0
    def setUp(self):
        np.random.seed(5)
        n_points = 100
        points = np.linspace(0, 100, n_points)
        points = points.reshape([n_points, 1])
        tasks = np.random.randint(2, size=(n_points, 1))

        add = [-10, 10]
        kernel = Matern52.define_kernel_from_array(1, np.array([100.0, 1.0]))
        function = SampleFunctions.sample_from_gp(points, kernel)
        self.function = function

        for i in xrange(n_points):
            function[0, i] += add[tasks[i, 0]]
        points = np.concatenate((points, tasks), axis=1)
        self.points = points
        self.evaluations = function[0, :]

        function = function[0, :]

        training_data = {
            'evaluations': list(function),
            'points': points,
            "var_noise": [],
        }
        gaussian_p = GPFittingGaussian(
            [PRODUCT_KERNELS_SEPARABLE, MATERN52_NAME, TASKS_KERNEL_NAME],
            training_data, [2, 1, 2], bounds_domain=[[0, 100], [0, 1]], type_bounds=[0, 1])
        gaussian_p = gaussian_p.fit_gp_regression(random_seed=1314938)

        quadrature = BayesianQuadrature(gaussian_p, [0], UNIFORM_FINITE,
                                        parameters_distribution={TASKS: 2},
                                        model_only_x=True)
        self.mt = MultiTasks(quadrature, quadrature.parameters_distribution.get(TASKS))
Beispiel #2
0
    def test_optimize_2(self):
        bgo_2 = BGO.from_spec(self.spec_2)
        sol = bgo_2.optimize(random_seed=1, n_restarts=1)
        #
        # # test that after the first iterations the new points are added correctly
        training_data = deepcopy(self.training_data)

        training_data['points'] = np.concatenate(
            (training_data['points'], np.array([[99.9863644153, 0]])), axis=0)
        training_data['evaluations'] = np.concatenate(
            (training_data['evaluations'], np.array([9.0335599603])))
        gaussian_p_med = GPFittingGaussian(
            [PRODUCT_KERNELS_SEPARABLE, MATERN52_NAME, TASKS_KERNEL_NAME],
            training_data, [2, 1, 2],
            bounds_domain=[[0, 100], [0, 1]],
            type_bounds=[0, 1])
        gaussian_p_med.update_value_parameters(self.params)
        gp_med = BayesianQuadrature(gaussian_p_med, [0], UNIFORM_FINITE,
                                    {TASKS: 2})
        sbo = SBO(gp_med, np.array(self.domain.discretization_domain_x))

        point = sbo.optimize(start=np.array([[10, 0]]))

        npt.assert_almost_equal(point['optimal_value'],
                                542.4598435381,
                                decimal=4)
        npt.assert_almost_equal(point['solution'], np.array([61.58743036, 0]))
Beispiel #3
0
    def test_optimize_posterior_mean_samples(self):
        np.random.seed(5)
        n_points = 100
        points = np.linspace(0, 100, n_points)
        points = points.reshape([n_points, 1])
        tasks = np.random.randint(2, size=(n_points, 1))

        add = [10, -10]
        kernel = Matern52.define_kernel_from_array(1, np.array([100.0, 1.0]))
        function = SampleFunctions.sample_from_gp(points, kernel)
        max_value = function[0, np.argmax(function)]
        max_point = points[np.argmax(function), 0]

        for i in xrange(n_points):
            function[0, i] += add[tasks[i, 0]]
        points = np.concatenate((points, tasks), axis=1)

        function = function[0, :]

        training_data = {
            'evaluations': list(function),
            'points': points,
            "var_noise": [],
        }

        gaussian_p = GPFittingGaussian(
            [PRODUCT_KERNELS_SEPARABLE, MATERN52_NAME, TASKS_KERNEL_NAME],
            training_data, [2, 1, 2],
            bounds_domain=[[0, 100]],
            max_steps_out=1000)
        gaussian_p = gaussian_p.fit_gp_regression(random_seed=1314938)
        gp = BayesianQuadrature(gaussian_p, [0], UNIFORM_FINITE, {TASKS: 2})

        random_seed = 10

        n_samples_parameters = 15
        gp.gp.thinning = 10
        gp.gp.n_burning = 500

        sol_2 = gp.optimize_posterior_mean(
            random_seed=random_seed,
            n_best_restarts=2,
            n_samples_parameters=n_samples_parameters,
            start_new_chain=True)

        assert max_point == sol_2['solution']
        npt.assert_almost_equal(max_value, sol_2['optimal_value'], decimal=3)
Beispiel #4
0
    def test_get_cached_data(self):
        gp = BayesianQuadrature(self.complex_gp, [0], UNIFORM_FINITE, {})
        gp.cache_quadratures['a'] = 1
        gp.cache_posterior_mean['a'] = 2
        gp.cache_quadrature_with_candidate['b'] = 3

        assert gp._get_cached_data('a', QUADRATURES) == 1
        assert gp._get_cached_data('a', POSTERIOR_MEAN) == 2
        assert gp._get_cached_data('b', B_NEW) == 3
Beispiel #5
0
    def test_gradient_vector_b(self):
        np.random.seed(5)
        n_points = 10
        points = np.linspace(0, 100, n_points)
        points = points.reshape([n_points, 1])
        tasks = np.random.randint(2, size=(n_points, 1))

        add = [10, -10]
        kernel = Matern52.define_kernel_from_array(1, np.array([100.0, 1.0]))
        function = SampleFunctions.sample_from_gp(points, kernel)

        for i in xrange(n_points):
            function[0, i] += add[tasks[i, 0]]
        points = np.concatenate((points, tasks), axis=1)

        function = function[0, :]

        training_data = {
            'evaluations': list(function),
            'points': points,
            "var_noise": [],
        }

        gaussian_p = GPFittingGaussian(
            [PRODUCT_KERNELS_SEPARABLE, MATERN52_NAME, TASKS_KERNEL_NAME],
            training_data, [2, 1, 2],
            bounds_domain=[[0, 100]])
        gaussian_p = gaussian_p.fit_gp_regression(random_seed=1314938)

        gp = BayesianQuadrature(gaussian_p, [0], UNIFORM_FINITE, {TASKS: 2})
        #  gp = self.gp_complete
        candidate_point = np.array([[84.0, 1]])
        points = np.array([[99.5], [12.1], [70.2]])
        value = gp.gradient_vector_b(candidate_point, points, cache=False)

        dh_ = 0.0000001
        dh = [dh_]
        finite_diff = FiniteDifferences.forward_difference(
            lambda point: gp.compute_posterior_parameters_kg(
                points, point.reshape((1, len(point))), cache=False)['b'],
            candidate_point[0, :], np.array(dh))
        npt.assert_almost_equal(finite_diff[0], value[:, 0], decimal=5)
        assert np.all(finite_diff[1] == value[:, 1])

        value_2 = gp.gradient_vector_b(candidate_point, points, cache=True)
        assert np.all(value_2 == value)
    def setUp(self):
        training_data_complex = {
            "evaluations": [1.0, 1.1],
            "points": [[42.2851784656, 0], [42.3851784656, 1]],
            "var_noise": []
        }

        self.complex_gp_2 = GPFittingGaussian(
            [PRODUCT_KERNELS_SEPARABLE, MATERN52_NAME, TASKS_KERNEL_NAME],
            training_data_complex, [2, 1, 2])

        self.gp = BayesianQuadrature(self.complex_gp_2, [0], UNIFORM_FINITE,
                                     {TASKS: 2})
Beispiel #7
0
    def setUp(self):
        self.training_data_complex = {
            "evaluations": [1.0, 1.1],
            "points": [[42.2851784656, 0], [42.3851784656, 0]],
            "var_noise": []
        }

        self.complex_gp = GPFittingGaussian(
            [PRODUCT_KERNELS_SEPARABLE, MATERN52_NAME, TASKS_KERNEL_NAME],
            self.training_data_complex, [2, 1, 1])

        self.gp = BayesianQuadrature(self.complex_gp, [0], UNIFORM_FINITE,
                                     {TASKS: 1})

        training_data_complex = {
            "evaluations": [1.0, 1.1],
            "points": [[42.2851784656, 0], [42.3851784656, 1]],
            "var_noise": []
        }

        self.complex_gp_2 = GPFittingGaussian(
            [PRODUCT_KERNELS_SEPARABLE, MATERN52_NAME, TASKS_KERNEL_NAME],
            training_data_complex, [3, 1, 2])

        self.gp_2 = BayesianQuadrature(self.complex_gp_2, [0], UNIFORM_FINITE,
                                       {TASKS: 2})

        np.random.seed(5)
        n_points = 100
        points = np.linspace(0, 100, n_points)
        points = points.reshape([n_points, 1])
        tasks = np.random.randint(2, size=(n_points, 1))

        add = [10, -10]
        kernel = Matern52.define_kernel_from_array(1, np.array([100.0, 1.0]))
        function = SampleFunctions.sample_from_gp(points, kernel)
        self.original_function = function

        self.max_value = function[0, np.argmax(function)]
        self.max_point = points[np.argmax(function), 0]
        for i in xrange(n_points):
            function[0, i] += add[tasks[i, 0]]
        points = np.concatenate((points, tasks), axis=1)

        function = function[0, :]

        training_data = {
            'evaluations': list(function),
            'points': points,
            "var_noise": [],
        }

        training_data_2 = {
            'evaluations': list(function[[0, 30, 50, 90, 99]]),
            'points': points[[0, 30, 50, 90, 99], :],
            "var_noise": [],
        }

        gaussian_p = GPFittingGaussian(
            [PRODUCT_KERNELS_SEPARABLE, MATERN52_NAME, TASKS_KERNEL_NAME],
            training_data, [2, 1, 2],
            bounds_domain=[[0, 100]])
        gaussian_p = gaussian_p.fit_gp_regression(random_seed=1314938)

        gaussian_p_2 = GPFittingGaussian(
            [PRODUCT_KERNELS_SEPARABLE, MATERN52_NAME, TASKS_KERNEL_NAME],
            training_data_2, [2, 1, 2],
            bounds_domain=[[0, 100]])
        gaussian_p_2 = gaussian_p.fit_gp_regression(random_seed=1314938)

        self.gp_complete_2 = BayesianQuadrature(gaussian_p_2, [0],
                                                UNIFORM_FINITE, {TASKS: 2})
        self.gp_complete = BayesianQuadrature(gaussian_p, [0], UNIFORM_FINITE,
                                              {TASKS: 2})
Beispiel #8
0
class TestBayesianQuadrature(unittest.TestCase):
    def setUp(self):
        self.training_data_complex = {
            "evaluations": [1.0, 1.1],
            "points": [[42.2851784656, 0], [42.3851784656, 0]],
            "var_noise": []
        }

        self.complex_gp = GPFittingGaussian(
            [PRODUCT_KERNELS_SEPARABLE, MATERN52_NAME, TASKS_KERNEL_NAME],
            self.training_data_complex, [2, 1, 1])

        self.gp = BayesianQuadrature(self.complex_gp, [0], UNIFORM_FINITE,
                                     {TASKS: 1})

        training_data_complex = {
            "evaluations": [1.0, 1.1],
            "points": [[42.2851784656, 0], [42.3851784656, 1]],
            "var_noise": []
        }

        self.complex_gp_2 = GPFittingGaussian(
            [PRODUCT_KERNELS_SEPARABLE, MATERN52_NAME, TASKS_KERNEL_NAME],
            training_data_complex, [3, 1, 2])

        self.gp_2 = BayesianQuadrature(self.complex_gp_2, [0], UNIFORM_FINITE,
                                       {TASKS: 2})

        np.random.seed(5)
        n_points = 100
        points = np.linspace(0, 100, n_points)
        points = points.reshape([n_points, 1])
        tasks = np.random.randint(2, size=(n_points, 1))

        add = [10, -10]
        kernel = Matern52.define_kernel_from_array(1, np.array([100.0, 1.0]))
        function = SampleFunctions.sample_from_gp(points, kernel)
        self.original_function = function

        self.max_value = function[0, np.argmax(function)]
        self.max_point = points[np.argmax(function), 0]
        for i in xrange(n_points):
            function[0, i] += add[tasks[i, 0]]
        points = np.concatenate((points, tasks), axis=1)

        function = function[0, :]

        training_data = {
            'evaluations': list(function),
            'points': points,
            "var_noise": [],
        }

        training_data_2 = {
            'evaluations': list(function[[0, 30, 50, 90, 99]]),
            'points': points[[0, 30, 50, 90, 99], :],
            "var_noise": [],
        }

        gaussian_p = GPFittingGaussian(
            [PRODUCT_KERNELS_SEPARABLE, MATERN52_NAME, TASKS_KERNEL_NAME],
            training_data, [2, 1, 2],
            bounds_domain=[[0, 100]])
        gaussian_p = gaussian_p.fit_gp_regression(random_seed=1314938)

        gaussian_p_2 = GPFittingGaussian(
            [PRODUCT_KERNELS_SEPARABLE, MATERN52_NAME, TASKS_KERNEL_NAME],
            training_data_2, [2, 1, 2],
            bounds_domain=[[0, 100]])
        gaussian_p_2 = gaussian_p.fit_gp_regression(random_seed=1314938)

        self.gp_complete_2 = BayesianQuadrature(gaussian_p_2, [0],
                                                UNIFORM_FINITE, {TASKS: 2})
        self.gp_complete = BayesianQuadrature(gaussian_p, [0], UNIFORM_FINITE,
                                              {TASKS: 2})

    def test_constructor(self):
        gp = BayesianQuadrature(self.complex_gp, [0], UNIFORM_FINITE, {})
        assert gp.parameters_distribution == {TASKS: 1}

    def test_get_cached_data(self):
        gp = BayesianQuadrature(self.complex_gp, [0], UNIFORM_FINITE, {})
        gp.cache_quadratures['a'] = 1
        gp.cache_posterior_mean['a'] = 2
        gp.cache_quadrature_with_candidate['b'] = 3

        assert gp._get_cached_data('a', QUADRATURES) == 1
        assert gp._get_cached_data('a', POSTERIOR_MEAN) == 2
        assert gp._get_cached_data('b', B_NEW) == 3

    def test_evaluate_quadrature_cross_cov(self):
        point = np.array([[1.0]])
        points_2 = np.array([[42.2851784656, 0], [42.3851784656, 0]])

        parameters_kernel = self.gp.gp.kernel.hypers_values_as_array
        value = self.gp.evaluate_quadrature_cross_cov(point, points_2,
                                                      parameters_kernel)

        value_1 = self.gp.gp.evaluate_cross_cov(np.array([[1.0, 0.0]]),
                                                np.array([[42.2851784656, 0]]),
                                                parameters_kernel)
        value_2 = self.gp.gp.evaluate_cross_cov(np.array([[1.0, 0.0]]),
                                                np.array([[42.3851784656, 0]]),
                                                parameters_kernel)
        assert value[0] == value_1[0, 0]
        assert value[1] == value_2[0, 0]

        point = np.array([[1.0]])
        points_2 = np.array([[42.2851784656, 0], [42.3851784656, 1]])

        parameters_kernel = self.gp_2.gp.kernel.hypers_values_as_array
        value = self.gp_2.evaluate_quadrature_cross_cov(
            point, points_2, parameters_kernel)

        value_1 = self.gp_2.gp.evaluate_cross_cov(
            np.array([[1.0, 0.0]]), np.array([[42.2851784656, 0]]),
            parameters_kernel)
        value_2 = self.gp_2.gp.evaluate_cross_cov(
            np.array([[1.0, 1.0]]), np.array([[42.2851784656, 0]]),
            parameters_kernel)

        assert value[0] == np.mean([value_1, value_2])

        value_1 = self.gp_2.gp.evaluate_cross_cov(
            np.array([[1.0, 0.0]]), np.array([[42.3851784656, 1]]),
            parameters_kernel)
        value_2 = self.gp_2.gp.evaluate_cross_cov(
            np.array([[1.0, 1.0]]), np.array([[42.3851784656, 1]]),
            parameters_kernel)

        assert value[1] == np.mean([value_1, value_2])

    def test_compute_posterior_parameters_kg(self):
        points = np.array([[42.0], [42.1], [41.0]])
        candidate_point = np.array([[41.0, 0]])
        value = self.gp_2.compute_posterior_parameters_kg(
            points, candidate_point)

        n_samples = 150
        point = np.array([[41.0]])
        samples = self.gp_2.gp.sample_new_observations(candidate_point,
                                                       n_samples, 1)
        a_n = []
        points_x = deepcopy(self.gp_2.gp.data['points'])
        points_x = np.concatenate((points_x, candidate_point))

        for i in xrange(n_samples):
            evaluations = deepcopy(self.gp_2.gp.data['evaluations'])
            evaluations = np.concatenate((evaluations, [samples[i]]))
            val = self.gp_2.compute_posterior_parameters(
                point,
                historical_evaluations=evaluations,
                historical_points=points_x,
                cache=False)
            a_n.append(val['mean'])

        npt.assert_almost_equal(np.mean(a_n), value['a'][2], decimal=1)
        npt.assert_almost_equal(np.var(a_n), (value['b'][2])**2, decimal=1)

    def test_gradient_posterior_mean(self):
        gp = self.gp_complete

        point = np.array([[80.5]])

        # Test evaluate_grad_quadrature_cross_cov
        grad = gp.evaluate_grad_quadrature_cross_cov(
            point, gp.gp.data['points'], gp.gp.kernel.hypers_values_as_array)

        dh = 0.00001
        finite_diff = FiniteDifferences.forward_difference(
            lambda point: gp.evaluate_quadrature_cross_cov(
                point, gp.gp.data['points'], gp.gp.kernel.
                hypers_values_as_array), point, np.array([dh]))

        for i in xrange(grad.shape[1]):
            npt.assert_almost_equal(finite_diff[0][i], grad[0, i], decimal=1)

        npt.assert_almost_equal(finite_diff[0], grad[0, :], decimal=1)

        # Test gradient_posterior_mean
        gradient = gp.gradient_posterior_mean(point)

        dh = 0.0001
        finite_diff = FiniteDifferences.forward_difference(
            lambda points: gp.compute_posterior_parameters(
                points, only_mean=True)['mean'], point, np.array([dh]))

        npt.assert_almost_equal(finite_diff[0], gradient[0], decimal=5)

    def test_optimize_posterior_mean(self):
        gp = self.gp_complete

        random_seed = 10
        sol = gp.optimize_posterior_mean(random_seed=random_seed)

        n_points = 1000
        points = np.linspace(0, 100, n_points)
        points = points.reshape([n_points, 1])
        evaluations = gp.compute_posterior_parameters(points,
                                                      only_mean=True)['mean']

        point = points[np.argmax(evaluations), 0]
        index = np.argmax(evaluations)

        npt.assert_almost_equal(sol['optimal_value'][0], evaluations[index])
        npt.assert_almost_equal(sol['solution'], point, decimal=1)

        bounds_x = [
            gp.gp.bounds[i] for i in xrange(len(gp.gp.bounds))
            if i in gp.x_domain
        ]
        random_seed = 10
        np.random.seed(10)
        start = DomainService.get_points_domain(1,
                                                bounds_x,
                                                type_bounds=len(gp.x_domain) *
                                                [0])
        start = np.array(start[0])

        var_noise = gp.gp.var_noise.value[0]
        parameters_kernel = gp.gp.kernel.hypers_values_as_array
        mean = gp.gp.mean.value[0]

        index_cache = (var_noise, mean, tuple(parameters_kernel))
        if index_cache not in gp.optimal_solutions:
            gp.optimal_solutions[index_cache] = []
        gp.optimal_solutions[index_cache].append({'solution': start})

        sol_2 = gp.optimize_posterior_mean(random_seed=random_seed)
        npt.assert_almost_equal(sol_2['optimal_value'], sol['optimal_value'])
        npt.assert_almost_equal(sol['solution'], sol_2['solution'], decimal=3)

    def test_optimize_posterior_mean_hessian(self):
        gp = self.gp_complete
        random_seed = 1
        sol_3 = gp.optimize_posterior_mean(random_seed=random_seed,
                                           method_opt=DOGLEG)

        gp.clean_cache()
        sol_2 = gp.optimize_posterior_mean(random_seed=random_seed)
        assert sol_3['solution'] == sol_2['solution']
        npt.assert_almost_equal(sol_3['optimal_value'],
                                sol_2['optimal_value'],
                                decimal=2)

    def test_evaluate_grad_quadrature_cross_cov_resp_candidate(self):
        candidate_point = np.array([[51.5, 0]])
        points = np.array([[51.3], [30.5], [95.1]])
        parameters = self.gp_complete.gp.kernel.hypers_values_as_array
        sol = self.gp_complete.evaluate_grad_quadrature_cross_cov_resp_candidate(
            candidate_point, points, parameters)

        gp = self.gp_complete

        dh = 0.000001
        finite_diff = FiniteDifferences.forward_difference(
            lambda point: gp.evaluate_quadrature_cross_cov(
                points[0:1, :], point.reshape((1, len(point))), parameters),
            candidate_point[0, :], np.array([dh]))
        npt.assert_almost_equal(sol[0, 0], finite_diff[0][0], decimal=2)
        assert sol[1, 0] == finite_diff[1]

        dh = 0.000001
        finite_diff = FiniteDifferences.forward_difference(
            lambda point: gp.evaluate_quadrature_cross_cov(
                points[1:2, :], point.reshape((1, len(point))), parameters),
            candidate_point[0, :], np.array([dh]))
        npt.assert_almost_equal(sol[0, 1], finite_diff[0][0], decimal=1)
        assert sol[1, 1] == finite_diff[1]

        dh = 0.000001
        finite_diff = FiniteDifferences.forward_difference(
            lambda point: gp.evaluate_quadrature_cross_cov(
                points[2:3, :], point.reshape((1, len(point))), parameters),
            candidate_point[0, :], np.array([dh]))
        npt.assert_almost_equal(sol[0, 2], finite_diff[0][0], decimal=2)
        assert sol[1, 2] == finite_diff[1]

    def test_gradient_vector_b(self):
        np.random.seed(5)
        n_points = 10
        points = np.linspace(0, 100, n_points)
        points = points.reshape([n_points, 1])
        tasks = np.random.randint(2, size=(n_points, 1))

        add = [10, -10]
        kernel = Matern52.define_kernel_from_array(1, np.array([100.0, 1.0]))
        function = SampleFunctions.sample_from_gp(points, kernel)

        for i in xrange(n_points):
            function[0, i] += add[tasks[i, 0]]
        points = np.concatenate((points, tasks), axis=1)

        function = function[0, :]

        training_data = {
            'evaluations': list(function),
            'points': points,
            "var_noise": [],
        }

        gaussian_p = GPFittingGaussian(
            [PRODUCT_KERNELS_SEPARABLE, MATERN52_NAME, TASKS_KERNEL_NAME],
            training_data, [2, 1, 2],
            bounds_domain=[[0, 100]])
        gaussian_p = gaussian_p.fit_gp_regression(random_seed=1314938)

        gp = BayesianQuadrature(gaussian_p, [0], UNIFORM_FINITE, {TASKS: 2})
        #  gp = self.gp_complete
        candidate_point = np.array([[84.0, 1]])
        points = np.array([[99.5], [12.1], [70.2]])
        value = gp.gradient_vector_b(candidate_point, points, cache=False)

        dh_ = 0.0000001
        dh = [dh_]
        finite_diff = FiniteDifferences.forward_difference(
            lambda point: gp.compute_posterior_parameters_kg(
                points, point.reshape((1, len(point))), cache=False)['b'],
            candidate_point[0, :], np.array(dh))
        npt.assert_almost_equal(finite_diff[0], value[:, 0], decimal=5)
        assert np.all(finite_diff[1] == value[:, 1])

        value_2 = gp.gradient_vector_b(candidate_point, points, cache=True)
        assert np.all(value_2 == value)

    def test_sample_new_observations(self):
        gp = self.gp_complete
        point = np.array([[5.1]])
        samples = gp.sample_new_observations(point, 2, random_seed=1)
        assert len(samples) == 2

    @patch('os.path.exists')
    @patch('os.mkdir')
    def test_write_debug_data(self, mock_mkdir, mock_exists):
        mock_exists.return_value = False
        with patch('__builtin__.open', mock_open()):
            self.gp.write_debug_data("a", "b", "c", "d", "e")
            JSONFile.write([], "a")
        mock_mkdir.assert_called_with('data/debugging/a')

    def test_evaluate_posterior_mean_params(self):
        point = np.array([[97.5]])

        np.random.seed(1)
        val_2 = self.gp_complete.objective_posterior_mean(
            point[0, :], 1.0, 5.0, np.array([50.0, 8.6, -3.0, -0.1]))

        val = self.gp_complete.objective_posterior_mean(point[0, :])

        self.gp_complete.gp.var_noise.value[0] = 1.0
        self.gp_complete.gp.mean.value[0] = 5.0
        self.gp_complete.gp.kernel.update_value_parameters(
            np.array([50.0, 8.6, -3.0, -0.1]))

        np.random.seed(1)
        val_1 = self.gp_complete.objective_posterior_mean(point[0, :])

        npt.assert_almost_equal(val_1, val_2)

    def test_evaluate_grad_posterior_mean_params(self):
        point = np.array([[97.5]])

        np.random.seed(1)
        val_2 = self.gp_complete.grad_posterior_mean(
            point[0, :], 1.0, 5.0, np.array([50.0, 8.6, -3.0, -0.1]))

        val = self.gp_complete.grad_posterior_mean(point[0, :])

        self.gp_complete.gp.var_noise.value[0] = 1.0
        self.gp_complete.gp.mean.value[0] = 5.0
        self.gp_complete.gp.kernel.update_value_parameters(
            np.array([50.0, 8.6, -3.0, -0.1]))

        np.random.seed(1)
        val_1 = self.gp_complete.grad_posterior_mean(point[0, :])

        npt.assert_almost_equal(val_1, val_2)

    def test_optimize_posterior_mean_samples(self):
        np.random.seed(5)
        n_points = 100
        points = np.linspace(0, 100, n_points)
        points = points.reshape([n_points, 1])
        tasks = np.random.randint(2, size=(n_points, 1))

        add = [10, -10]
        kernel = Matern52.define_kernel_from_array(1, np.array([100.0, 1.0]))
        function = SampleFunctions.sample_from_gp(points, kernel)
        max_value = function[0, np.argmax(function)]
        max_point = points[np.argmax(function), 0]

        for i in xrange(n_points):
            function[0, i] += add[tasks[i, 0]]
        points = np.concatenate((points, tasks), axis=1)

        function = function[0, :]

        training_data = {
            'evaluations': list(function),
            'points': points,
            "var_noise": [],
        }

        gaussian_p = GPFittingGaussian(
            [PRODUCT_KERNELS_SEPARABLE, MATERN52_NAME, TASKS_KERNEL_NAME],
            training_data, [2, 1, 2],
            bounds_domain=[[0, 100]],
            max_steps_out=1000)
        gaussian_p = gaussian_p.fit_gp_regression(random_seed=1314938)
        gp = BayesianQuadrature(gaussian_p, [0], UNIFORM_FINITE, {TASKS: 2})

        random_seed = 10

        n_samples_parameters = 15
        gp.gp.thinning = 10
        gp.gp.n_burning = 500

        sol_2 = gp.optimize_posterior_mean(
            random_seed=random_seed,
            n_best_restarts=2,
            n_samples_parameters=n_samples_parameters,
            start_new_chain=True)

        assert max_point == sol_2['solution']
        npt.assert_almost_equal(max_value, sol_2['optimal_value'], decimal=3)

    def test_compute_hessian_parameters_for_sample(self):
        point = np.array([[95.0]])
        candidate_point = np.array([[99.15, 0]])
        val = self.gp_complete_2.compute_hessian_parameters_for_sample(
            point, candidate_point)

        dh = 0.01
        finite_diff = FiniteDifferences.second_order_central(
            lambda x: self.gp_complete_2.compute_parameters_for_sample(
                x.reshape(
                    (1, len(point))), candidate_point, clear_cache=False)['a'],
            point[0, :], np.array([dh]))

        npt.assert_almost_equal(finite_diff[(0, 0)], val['a'][0, :], decimal=5)

        dh = 0.1
        finite_diff = FiniteDifferences.second_order_central(
            lambda x: self.gp_complete_2.compute_parameters_for_sample(
                x.reshape(
                    (1, len(point))), candidate_point, clear_cache=False)['b'],
            point[0, :], np.array([dh]))

        npt.assert_almost_equal(finite_diff[(0, 0)], val['b'], decimal=5)

    def test_hessian_posterior_mean(self):

        gp = self.gp_complete

        point = np.array([[80.5]])

        # Test evaluate_grad_quadrature_cross_cov
        hessian = gp.hessian_posterior_mean(point)

        dh = 0.1
        finite_diff = FiniteDifferences.second_order_central(
            lambda points: gp.compute_posterior_parameters(
                points.reshape((1, len(points))), only_mean=True)['mean'],
            point[0, :], np.array([dh]))

        npt.assert_almost_equal(finite_diff[(0, 0)], hessian[0, 0])
Beispiel #9
0
 def test_constructor(self):
     gp = BayesianQuadrature(self.complex_gp, [0], UNIFORM_FINITE, {})
     assert gp.parameters_distribution == {TASKS: 1}
n_samples = 0
kernel_values = None
mean_value = None
var_noise_value = None
cache = True
parameters_distribution = None

gp = GPFittingService.get_gp(name_model, problem_name, type_kernel, dimensions,
                             bounds_domain, type_bounds, n_training, noise,
                             training_data, points, training_name, mle,
                             thinning, n_burning, max_steps_out, n_samples,
                             random_seed, kernel_values, mean_value,
                             var_noise_value, cache, same_correlation)
quadrature = BayesianQuadrature(
    gp,
    x_domain,
    distribution,
    parameters_distribution=parameters_distribution)
gp.data = gp.convert_from_list_to_numpy(gp.training_data)

bq = quadrature
bounds_x = [
    bq.gp.bounds[i] for i in xrange(len(bq.gp.bounds)) if i in bq.x_domain
]

np.random.seed(1)
points = DomainService.get_points_domain(100000,
                                         bounds_x,
                                         type_bounds=len(bounds_x) * [0])

sbo = SBO(quadrature, np.array(points))
class TestEI(unittest.TestCase):
    def setUp(self):

        np.random.seed(5)
        n_points = 100
        points = np.linspace(0, 100, n_points)
        points = points.reshape([n_points, 1])
        tasks = np.random.randint(2, size=(n_points, 1))

        add = [10, -10]
        kernel = Matern52.define_kernel_from_array(1, np.array([100.0, 1.0]))
        function = SampleFunctions.sample_from_gp(points, kernel)

        for i in xrange(n_points):
            function[0, i] += add[tasks[i, 0]]
        points = np.concatenate((points, tasks), axis=1)
        self.points = points
        self.evaluations = function[0, :]

        function = function[0, :]

        training_data = {
            'evaluations': list(function),
            'points': points,
            "var_noise": [],
        }

        gaussian_p = GPFittingGaussian(
            [PRODUCT_KERNELS_SEPARABLE, MATERN52_NAME, TASKS_KERNEL_NAME],
            training_data, [2, 1, 2],
            bounds_domain=[[0, 100], [0, 1]],
            type_bounds=[0, 1])
        gaussian_p = gaussian_p.fit_gp_regression(random_seed=1314938)

        self.gp = gaussian_p

        self.bq = BayesianQuadrature(gaussian_p, [0],
                                     UNIFORM_FINITE, {TASKS: 2},
                                     model_only_x=True)

        self.ei = EI(self.gp)

        self.ei_2 = EI(self.bq)

    def test_evaluate(self):
        point = np.array([[97.5, 0]])
        val = self.ei.evaluate(point)

        maximum = np.max(self.evaluations)

        n_samples = 10000
        samples = self.gp.sample_new_observations(point,
                                                  n_samples,
                                                  random_seed=1)

        evals = np.clip(samples - maximum, 0, None)

        npt.assert_almost_equal(val, np.mean(evals), decimal=2)

    def test_evaluate_bq(self):
        point = np.array([[97.5]])
        val = self.ei_2.evaluate(point)

        maximum = 0.831339057477
        n_samples = 10000
        samples = self.bq.sample_new_observations(point,
                                                  n_samples,
                                                  random_seed=1)

        evals = np.clip(samples - maximum, 0, None)
        npt.assert_almost_equal(val, np.mean(evals), decimal=2)

    def test_evaluate_gradient(self):
        point = np.array([[91.5, 0]])
        grad = self.ei.evaluate_gradient(point)

        dh = 0.0001
        finite_diff = FiniteDifferences.forward_difference(
            lambda point: self.ei.evaluate(point.reshape((1, len(point)))),
            np.array([91.5, 0]), np.array([dh]))

        npt.assert_almost_equal(finite_diff[0], grad[0], decimal=2)
        npt.assert_almost_equal(finite_diff[1], grad[1], decimal=3)

    def test_evaluate_gradient_bq(self):
        point = np.array([[91.5]])
        grad = self.ei_2.evaluate_gradient(point)

        dh = 0.0001
        finite_diff = FiniteDifferences.forward_difference(
            lambda point: self.ei_2.evaluate(point.reshape((1, len(point)))),
            np.array([91.5]), np.array([dh]))

        npt.assert_almost_equal(finite_diff[0], grad[0], decimal=2)

    def test_optimize(self):
        np.random.seed(2)
        opt = self.ei.optimize(random_seed=1, n_restarts=120)

        evaluations = self.ei.generate_evaluations('1', '2', '3', 1, 1, 1,
                                                   [100], 2)
        npt.assert_almost_equal(opt['optimal_value'], np.max(evaluations))

    def test_optimize_bq(self):
        np.random.seed(2)
        opt = self.ei_2.optimize(random_seed=1, n_restarts=50)

        evaluations = self.ei_2.generate_evaluations('1', '2', '3', 1, 1, 1,
                                                     [100], 0)

        npt.assert_almost_equal(opt['optimal_value'], np.max(evaluations))

    def test_evaluate_parameters(self):
        point = np.array([[97.5, 0]])

        np.random.seed(1)
        val_2 = self.ei.evaluate(point, 1.0, 5.0,
                                 np.array([50.0, 8.6, -3.0, -0.1]))

        val = self.ei.evaluate(point)

        self.gp.var_noise.value[0] = 1.0
        self.gp.mean.value[0] = 5.0
        self.gp.kernel.update_value_parameters(
            np.array([50.0, 8.6, -3.0, -0.1]))

        np.random.seed(1)
        val_1 = self.ei.evaluate(point)

        npt.assert_almost_equal(val_1, val_2)

    def test_evaluate_bq_parameters(self):
        point = np.array([[97.5]])

        np.random.seed(1)
        val_2 = self.ei_2.evaluate(point, 1.0, 5.0,
                                   np.array([50.0, 8.6, -3.0, -0.1]))
        val = self.ei_2.evaluate(point)

        self.bq.gp.var_noise.value[0] = 1.0
        self.bq.gp.mean.value[0] = 5.0
        self.bq.gp.kernel.update_value_parameters(
            np.array([50.0, 8.6, -3.0, -0.1]))

        np.random.seed(1)
        val_1 = self.ei_2.evaluate(point)

        npt.assert_almost_equal(val_1, val_2)

    def test_gradient_parameters(self):
        point = np.array([[91.5, 0]])

        np.random.seed(1)
        grad_2 = self.ei.evaluate_gradient(
            point, *(1.0, 5.0, np.array([50.0, 8.6, -3.0, -0.1])))
        grad = self.ei.evaluate_gradient(point)

        self.gp.var_noise.value[0] = 1.0
        self.gp.mean.value[0] = 5.0
        self.gp.kernel.update_value_parameters(
            np.array([50.0, 8.6, -3.0, -0.1]))

        np.random.seed(1)
        grad_1 = self.ei.evaluate_gradient(point)

        npt.assert_almost_equal(grad_1, grad_2)

    def test_gradient_bq_parameters(self):
        point = np.array([[91.5]])
        np.random.seed(1)
        grad_2 = self.ei_2.evaluate_gradient(
            point, *(1.0, 5.0, np.array([50.0, 8.6, -3.0, -0.1])))

        grad = self.ei_2.evaluate_gradient(point)
        self.bq.gp.var_noise.value[0] = 1.0
        self.bq.gp.mean.value[0] = 5.0
        self.bq.gp.kernel.update_value_parameters(
            np.array([50.0, 8.6, -3.0, -0.1]))

        np.random.seed(1)
        grad_1 = self.ei_2.evaluate_gradient(point)

        npt.assert_almost_equal(grad_2, grad_1)

    def test_combine_ei_gradient(self):
        point = np.array([[91.5, 0]])

        np.random.seed(1)

        val = self.ei.evaluate(point,
                               *(1.0, 5.0, np.array([50.0, 8.6, -3.0, -0.1])))

        grad = self.ei.evaluate_gradient(
            point, *(1.0, 5.0, np.array([50.0, 8.6, -3.0, -0.1])))

        self.ei.clean_cache()
        np.random.seed(1)
        grad_1 = self.ei.evaluate_gradient(
            point, *(1.0, 5.0, np.array([50.0, 8.6, -3.0, -0.1])))

        npt.assert_almost_equal(grad, grad_1)

    def test_evaluate_ei_sample_parameters(self):
        point = np.array([91.5, 0])
        self.ei.gp.thinning = 5
        self.ei.gp.n_burning = 100

        n_samples_parameters = 15
        np.random.seed(1)
        self.gp.start_new_chain()
        self.gp.sample_parameters(n_samples_parameters)
        value = wrapper_objective_acquisition_function(point, self.ei,
                                                       n_samples_parameters)

        gradient = wrapper_gradient_acquisition_function(
            point, self.ei, n_samples_parameters)
        np.random.seed(1)
        sol = self.ei.optimize(None,
                               1,
                               True,
                               10,
                               n_samples_parameters=n_samples_parameters,
                               maxepoch=5)
        npt.assert_almost_equal(value, 0.297100121625)
        npt.assert_almost_equal(gradient, np.array([0.00058253, 0]))
        npt.assert_almost_equal(sol['optimal_value'], 0.3205552)

    def test_optimize_ei(self):
        np.random.seed(2)
        opt = self.ei.optimize(random_seed=1, n_restarts=120)

        np.random.seed(2)
        opt_2 = self.ei.optimize(random_seed=1,
                                 n_restarts=120,
                                 n_best_restarts=10)

        npt.assert_almost_equal(opt['optimal_value'], opt_2['optimal_value'])

    def test_optimize_ei_2(self):
        self.ei.gp.thinning = 5
        self.ei.gp.n_burning = 100
        n_samples_parameters = 15

        np.random.seed(2)
        opt = self.ei.optimize(random_seed=1,
                               n_restarts=4,
                               start_new_chain=True,
                               n_samples_parameters=n_samples_parameters,
                               maxepoch=5)

        np.random.seed(2)
        opt_2 = self.ei.optimize(random_seed=1,
                                 n_restarts=10,
                                 n_best_restarts=10,
                                 start_new_chain=True,
                                 n_samples_parameters=n_samples_parameters,
                                 maxepoch=5)
        npt.assert_almost_equal(opt['optimal_value'], 0.31500651)
        npt.assert_almost_equal(opt_2['optimal_value'], 0.20618003)
def bgo(objective_function,
        bounds_domain_x,
        integrand_function=None,
        simplex_domain=None,
        noise=False,
        n_samples_noise=0,
        bounds_domain_w=None,
        type_bounds=None,
        distribution=None,
        parameters_distribution=None,
        name_method='bqo',
        n_iterations=50,
        type_kernel=None,
        dimensions_kernel=None,
        n_restarts=10,
        n_best_restarts=0,
        problem_name=None,
        n_training=None,
        random_seed=1,
        mle=False,
        n_samples_parameters=5,
        maxepoch=50,
        thinning=50,
        n_burning=500,
        max_steps_out=1000,
        parallel=True,
        same_correlation=True,
        monte_carlo_sbo=True,
        n_samples_mc=5,
        n_restarts_mc=5,
        n_best_restarts_mc=0,
        factr_mc=1e12,
        maxiter_mc=10,
        method_opt_mc=LBFGS_NAME,
        n_restarts_mean=100,
        n_best_restarts_mean=10,
        n_samples_parameters_mean=5,
        maxepoch_mean=50,
        parallel_training=False,
        default_n_samples_parameters=None,
        default_n_samples=None):
    """
    Maximizes the objective function.

    :param objective_function: function G to be maximized:
        If the function is noisy-free, G(([float])point) and returns [float].
        If the function is noisy, G(([float])point, (int) n_samples) and
            returns [(float) value, (float) variance]
    :param bounds_domain_x: [(float, float)]
    :param integrand_function: function F:
        If the function is noisy-free, F(([float])point) and returns [float].
        If the function is noisy, F(([float])point, (int) n_samples) and
            returns [(float) value, (float) variance]
    :param simplex_domain: (float) {sum[i, from 1 to domain]=simplex_domain}
    :param noise: (boolean) True if the evaluations of the objective function are noisy
    :param n_samples_noise: (int)  If noise is true, we take n_samples of the function to estimate
            its value.
    :param bounds_domain_w: [([float, float] or [float])], the first case is when the bounds
            are lower or upper bound of the respective entry; in the second case, it's list of
            finite points representing the domain of that entry (e.g. when W is finite).
    :param type_bounds: [0 or 1], 0 if the bounds are lower or upper bound of the respective
            entry, 1 if the bounds are all the finite options for that entry.
    :param distribution: str, probability distributions for the Bayesian quadrature (i.e. the
        distribution of W)
    :param parameters_distribution: {str: float}
    :param name_method: str, Options: 'SBO', 'EI'
    :param n_iterations: int
    :param type_kernel: [str] Must be in possible_kernels. If it's a product of kernels it
            should be a list as: [PRODUCT_KERNELS_SEPARABLE, NAME_1_KERNEL, NAME_2_KERNEL].
            If we want to use a scaled NAME_1_KERNEL, the parameter must be
            [SCALED_KERNEL, NAME_1_KERNEL].
    :param dimensions_kernel: [int]. It has only the n_tasks for the task_kernels, and for the
            PRODUCT_KERNELS_SEPARABLE contains the dimensions of every kernel in the product, and
            the total dimension of the product_kernels_separable too in the first entry.
    :param n_restarts: (int) Number of starting points to optimize the acquisition function
    :param n_best_restarts:  (int) Number of best starting points chosen from the n_restart
            points.
    :param problem_name: str
    :param n_training: (int) number of training points
    :param random_seed: int
    :param mle: (boolean) If true, fits the GP by MLE. Otherwise, we use a fully Bayesian approach.
    :param n_samples_parameters: (int) Number of samples of the parameter to estimate the stochastic
        gradient when optimizing the acquisition function.
    :param maxepoch: (int) Maximum number of iterations of the SGD when optimizing the acquisition
        function.
    :param thinning: int
    :param n_burning: (int) Number of burnings samples for slice sampling.
    :param max_steps_out: (int)  Maximum number of steps out for the stepping out  or
        doubling procedure in slice sampling.
    :param parallel: (boolean)
    :param same_correlation: (boolean) If true, it uses the same correlations for the task kernel.
    :param monte_carlo_sbo: (boolean) If True, the code estimates the objective function and
        gradient with the discretization-free approach.
    :param n_samples_mc: (int) Number of samples for the MC method.
    :param n_restarts_mc: (int) Number of restarts to optimize the posterior mean given a sample of
        the normal random variable.
    :param n_best_restarts_mc:  (int) Number of best restarting points used to optimize the
        posterior mean given a sample of the normal random variable.
    :param factr_mc: (float) Parameter of LBFGS to optimize a sample of BQO when using the
        discretization-free approach.
    :param maxiter_mc: (int) Max number of iterations to optimize a sample of BQO when using the
        discretization-free approach.
    :param method_opt_mc: (str) Optimization method used when using the discretization-free approach
        of BQO.
    :param n_restarts_mean: (int) Number of starting points to optimize the posterior mean.
    :param n_best_restarts_mean: int
    :param n_samples_parameters_mean: (int) Number of sample of hyperparameters to estimate the
        stochastic gradient inside of the SGD when optimizing the posterior mean.
    :param maxepoch_mean: (int) Maxepoch for the optimization of the posterior mean.
    :param parallel_training: (boolean) Train in parallel if it's True.
    :param default_n_samples_parameters: (int) Number of samples of Z for the discretization-free
        estimation of the VOI.
    :param default_n_samples: (int) Number of samples of the hyperparameters to estimate the VOI.
    :return: {'optimal_solution': np.array(n),
            'optimal_value': float}
    """

    np.random.seed(random_seed)
    # default_parameters

    dim_x = len(bounds_domain_x)
    x_domain = range(dim_x)

    if name_method == 'bqo':
        name_method = SBO_METHOD

    dim_w = 0
    if name_method == SBO_METHOD:
        if type_bounds is None:
            dim_w = len(bounds_domain_w)
        elif type_bounds is not None and type_bounds[-1] == 1:
            dim_w = 1
        elif type_bounds is not None:
            dim_w = len(type_bounds[dim_x:])

    total_dim = dim_w + dim_x

    if type_bounds is None:
        type_bounds = total_dim * [0]

    if bounds_domain_w is None:
        bounds_domain_w = []
    bounds_domain = [
        list(bound) for bound in bounds_domain_x + bounds_domain_w
    ]

    training_name = None

    if problem_name is None:
        problem_name = 'user_problem'

    if type_kernel is None:
        if name_method == SBO_METHOD:
            if type_bounds[-1] == 1:
                type_kernel = [
                    PRODUCT_KERNELS_SEPARABLE, MATERN52_NAME, TASKS_KERNEL_NAME
                ]
                dimensions_kernel = [total_dim, dim_x, len(bounds_domain[-1])]
            else:
                type_kernel = [SCALED_KERNEL, MATERN52_NAME]
                dimensions_kernel = [total_dim]
        elif name_method == EI_METHOD:
            type_kernel = [SCALED_KERNEL, MATERN52_NAME]
            dimensions_kernel = [total_dim]

    if dimensions_kernel is None:
        raise Exception("Not enough inputs to run the BGO algorithm")

    if n_training is None:
        if type_bounds[-1] == 1:
            n_training = len(bounds_domain[-1])
        else:
            n_training = 5

    if distribution is None:
        if type_bounds[-1] == 1:
            distribution = UNIFORM_FINITE
        else:
            distribution = GAMMA

    method_optimization = name_method

    name_model = 'gp_fitting_gaussian'

    if name_method == SBO_METHOD:
        training_function = integrand_function
    elif name_method == EI_METHOD:
        training_function = objective_function

    bounds_domain_x = BoundsEntity.to_bounds_entity(bounds_domain_x)

    spec = {
        'name_model': name_model,
        'problem_name': problem_name,
        'type_kernel': type_kernel,
        'dimensions': dimensions_kernel,
        'bounds_domain': bounds_domain,
        'type_bounds': type_bounds,
        'n_training': n_training,
        'noise': noise,
        'training_data': None,
        'points': None,
        'training_name': training_name,
        'mle': mle,
        'thinning': thinning,
        'n_burning': n_burning,
        'max_steps_out': max_steps_out,
        'n_samples': n_samples_noise,
        'random_seed': random_seed,
        'kernel_values': None,
        'mean_value': None,
        'var_noise_value': None,
        'cache': True,
        'same_correlation': same_correlation,
        'use_only_training_points': True,
        'optimization_method': method_optimization,
        'n_samples_parameters': n_samples_parameters,
        'parallel_training': parallel_training,
        'simplex_domain': simplex_domain,
        'objective_function': training_function,
        'dim_x': dim_x,
        'choose_noise': True,
        'bounds_domain_x': bounds_domain_x,
    }

    gp_model = GPFittingService.from_dict(spec)

    quadrature = None
    acquisition_function = None

    domain = DomainService.from_dict(spec)

    if method_optimization not in _possible_optimization_methods:
        raise Exception("Incorrect BGO method")

    if method_optimization == SBO_METHOD:
        quadrature = BayesianQuadrature(
            gp_model,
            x_domain,
            distribution,
            parameters_distribution=parameters_distribution)

        acquisition_function = SBO(quadrature,
                                   np.array(domain.discretization_domain_x))
    elif method_optimization == EI_METHOD:
        acquisition_function = EI(gp_model, noisy_evaluations=noise)

    bgo_obj = BGO(acquisition_function,
                  gp_model,
                  n_iterations,
                  problem_name,
                  training_name,
                  random_seed,
                  n_training,
                  name_model,
                  method_optimization,
                  minimize=False,
                  n_samples=n_samples_noise,
                  noise=noise,
                  quadrature=quadrature,
                  parallel=parallel,
                  number_points_each_dimension_debug=None,
                  n_samples_parameters=n_samples_parameters,
                  use_only_training_points=True,
                  objective_function=objective_function,
                  training_function=training_function)

    opt_params_mc = {}

    if factr_mc is not None:
        opt_params_mc['factr'] = factr_mc
    if maxiter_mc is not None:
        opt_params_mc['maxiter'] = maxiter_mc

    result = bgo_obj.optimize(
        debug=False,
        n_samples_mc=n_samples_mc,
        n_restarts_mc=n_restarts_mc,
        n_best_restarts_mc=n_best_restarts_mc,
        monte_carlo_sbo=monte_carlo_sbo,
        n_restarts=n_restarts,
        n_best_restarts=n_best_restarts,
        n_samples_parameters=n_samples_parameters,
        n_restarts_mean=n_restarts_mean,
        n_best_restarts_mean=n_best_restarts_mean,
        random_seed=bgo_obj.random_seed,
        method_opt_mc=method_opt_mc,
        n_samples_parameters_mean=n_samples_parameters_mean,
        maxepoch_mean=maxepoch_mean,
        maxepoch=maxepoch,
        threshold_sbo=0.001,
        optimize_only_posterior_mean=False,
        start_optimize_posterior_mean=0,
        optimize_mean_each_iteration=False,
        default_n_samples_parameters=default_n_samples_parameters,
        default_n_samples=default_n_samples,
        **opt_params_mc)

    return result
    def from_spec(cls, spec):
        """
        Construct BGO instance from spec
        :param spec: RunSpecEntity

        :return: BGO
        # TO DO: It now only returns domain
        """

        random_seed = spec.get('random_seed')
        method_optimization = spec.get('method_optimization')

        logger.info("Training GP model")
        logger.info("Random seed is: %d" % random_seed)
        logger.info("Algorithm used is:")
        logger.info(method_optimization)

        gp_model = GPFittingService.from_dict(spec)
        noise = spec.get('noise')
        quadrature = None
        acquisition_function = None

        domain = DomainService.from_dict(spec)

        if method_optimization not in cls._possible_optimization_methods:
            raise Exception("Incorrect BGO method")

        if method_optimization == SBO_METHOD:
            x_domain = spec.get('x_domain')
            distribution = spec.get('distribution')
            parameters_distribution = spec.get('parameters_distribution')
            quadrature = BayesianQuadrature(
                gp_model,
                x_domain,
                distribution,
                parameters_distribution=parameters_distribution)

            acquisition_function = SBO(
                quadrature, np.array(domain.discretization_domain_x))
        elif method_optimization == MULTI_TASK_METHOD:
            x_domain = spec.get('x_domain')
            distribution = spec.get('distribution')
            parameters_distribution = spec.get('parameters_distribution')
            quadrature = BayesianQuadrature(
                gp_model,
                x_domain,
                distribution,
                parameters_distribution=parameters_distribution,
                model_only_x=True)
            acquisition_function = MultiTasks(
                quadrature, quadrature.parameters_distribution.get(TASKS))
        elif method_optimization == EI_METHOD:
            acquisition_function = EI(gp_model, noisy_evaluations=noise)
        elif method_optimization == SDE_METHOD:
            x_domain = len(spec.get('x_domain'))
            parameters_distribution = spec.get('parameters_distribution')
            domain_random = np.array(parameters_distribution['domain_random'])
            weights = np.array(parameters_distribution['weights'])
            acquisition_function = SDE(gp_model, domain_random, x_domain,
                                       weights)

        problem_name = spec.get('problem_name')
        training_name = spec.get('training_name')
        n_samples = spec.get('n_samples')
        minimize = spec.get('minimize')
        n_iterations = spec.get('n_iterations')
        name_model = spec.get('name_model')
        parallel = spec.get('parallel')
        n_training = spec.get('n_training')
        number_points_each_dimension_debug = spec.get(
            'number_points_each_dimension_debug')
        n_samples_parameters = spec.get('n_samples_parameters', 0)
        use_only_training_points = spec.get('use_only_training_points', True)

        n_iterations = n_iterations - (
            len(gp_model.training_data['evaluations']) - n_training)

        bgo = cls(acquisition_function,
                  gp_model,
                  n_iterations,
                  problem_name,
                  training_name,
                  random_seed,
                  n_training,
                  name_model,
                  method_optimization,
                  minimize=minimize,
                  n_samples=n_samples,
                  noise=noise,
                  quadrature=quadrature,
                  parallel=parallel,
                  number_points_each_dimension_debug=
                  number_points_each_dimension_debug,
                  n_samples_parameters=n_samples_parameters,
                  use_only_training_points=use_only_training_points)

        if n_training < len(bgo.gp_model.training_data['evaluations']):
            extra_iterations = len(
                bgo.gp_model.training_data['evaluations']) - n_training
            data = JSONFile.read(bgo.objective.file_path)
            bgo.objective.evaluated_points = data['evaluated_points'][
                0:extra_iterations]
            bgo.objective.objective_values = data['objective_values'][
                0:extra_iterations]
            bgo.objective.model_objective_values = \
                data['model_objective_values'][0:extra_iterations]
            bgo.objective.standard_deviation_evaluations = data[
                'standard_deviation_evaluations']

        return bgo