def test_domain_from_dict(self):
        expect(DomainService).load_discretization.never()
        DomainService.from_dict(self.spec)

        expect(DomainService).load_discretization.once().and_return([])
        self.spec['number_points_each_dimension'] = [5]
        self.spec['problem_name'] = 'test'
        DomainService.from_dict(self.spec)
    def test_load_discretization_file_not_exists(self):
        allow(JSONFile).read
        expect(JSONFile).write.twice()
        expect(DomainEntity).discretize_domain.twice().and_return([])
        expect(BoundsEntity).get_bounds_as_lists.twice().and_return([2])

        assert DomainService.load_discretization('test_problem', 1, 0) == []

        with patch('os.path.exists', new=MagicMock(return_value=False)):
            os.mkdir = MockMkdir()
            assert DomainService.load_discretization('test_problem', 1,
                                                     0) == []
    def test_get_points_domain(self):
        sample = DomainService.get_points_domain(2, [[1, 5], [2, 3, 4]],
                                                 [0, 1], 1)
        np.random.seed(1)

        assert sample == [[2.668088018810296, 2], [3.8812979737686324, 3]]

        sample_2 = DomainService.get_points_domain(2, [[1, 5], [2, 3]],
                                                   random_seed=1)
        np.random.seed(1)
        a = list(np.random.uniform(1, 5, 2))
        b = list(np.random.uniform(2, 3, 2))

        assert sample_2 == [[a[0], b[0]], [a[1], b[1]]]
    def iteration_algorithm(self, n_restarts=10, n_samples=10):
        """
        Checked
        :param n_restarts:
        :param n_samples:
        :return:
        """
        if self.parameters is None:
            self.estimate_parameters_kernel()
            parameters = self.parameters
        else:
            parameters = self.parameters

        samples = self.sample_variable(parameters, n_samples)

        bounds = [
            tuple(bound)
            for bound in [self.gp.bounds[i] for i in range(self.x_domain)]
        ]
        start = DomainService.get_points_domain(
            n_restarts,
            self.gp.bounds[0:self.x_domain],
            type_bounds=self.gp.type_bounds[0:self.x_domain])

        dim = len(start[0])
        start_points = {}
        for i in range(n_restarts):
            start_points[i] = start[i]
        optimization = Optimization(NELDER,
                                    wrapper_ei_objective,
                                    bounds,
                                    None,
                                    hessian=None,
                                    tol=None,
                                    minimize=False)
        args = (False, None, True, 0, optimization, self, samples, parameters)
        sol = Parallel.run_function_different_arguments_parallel(
            wrapper_optimize, start_points, *args)
        solutions = []
        results_opt = []
        for i in range(n_restarts):
            if sol.get(i) is None:
                logger.info(
                    "Error in computing optimum of a_{n+1} at one sample at point %d"
                    % i)
                continue
            solutions.append(sol.get(i)['optimal_value'])
            results_opt.append(sol.get(i))
        ind_max = np.argmax(solutions)
        control = results_opt[ind_max]['solution']
        # do in parallel

        environment = self.get_environment(control, parameters)

        return np.concatenate((control, environment))
    def get_environment(self, control, parameters_kernel, n_restarts=10):
        """
        correct
        See p.1142, eq. 15
        :param control:
        :return:

        """
        bounds = [
            tuple(bound)
            for bound in [self.gp.bounds[i] for i in self.w_domain]
        ]
        bounds_2 = []
        for bound in bounds:
            bounds_2.append([bound[0], bound[-1]])
        bounds = bounds_2
        start = DomainService.get_points_domain(n_restarts, bounds)

        dim = len(start[0])
        start_points = {}
        for i in range(n_restarts):
            start_points[i] = start[i]
        optimization = Optimization(NELDER,
                                    wrapper_evaluate_squared_error,
                                    bounds,
                                    None,
                                    hessian=None,
                                    tol=None,
                                    minimize=False)

        run_parallel = True
        args = (False, None, run_parallel, 0, optimization, self, control,
                parameters_kernel)
        sol = Parallel.run_function_different_arguments_parallel(
            wrapper_optimize, start_points, *args)
        solutions = []
        results_opt = []
        for i in range(n_restarts):
            if sol.get(i) is None:
                logger.info(
                    "Error in computing optimum of a_{n+1} at one sample at point %d"
                    % i)
                continue
            solutions.append(sol.get(i)['optimal_value'])
            results_opt.append(sol.get(i))
        ind_max = np.argmax(solutions)
        environment = results_opt[ind_max]['solution']

        return environment
    def get_points_domain(cls,
                          n_training,
                          bounds_domain,
                          random_seed,
                          training_name,
                          problem_name,
                          type_bounds=None,
                          simplex_domain=None):
        """
        Get random points in the domain.

        :param n_training: (int) Number of points
        :param bounds_domain: [([float, float] or [float])], the first case is when the bounds are
            lower or upper bound of the respective entry; in the second case, it's list of finite
            points representing the domain of that entry.
        :param random_seed: (int)
        :param training_name: (str), prefix used to save the training data.
        :param problem_name: str
        :param type_bounds: [0 or 1], 0 if the bounds are lower or upper bound of the respective
            entry, 1 if the bounds are all the finite options for that entry.
        :return: [[float]]
        """

        file_name = cls._filename_domain(
            problem_name=problem_name,
            training_name=training_name,
            n_points=n_training,
            random_seed=random_seed,
        )

        training_dir = path.join(PROBLEM_DIR, problem_name, 'data')
        training_path = path.join(training_dir, file_name)

        points = JSONFile.read(training_path)
        if points is not None:
            return points

        points = DomainService.get_points_domain(n_training,
                                                 bounds_domain,
                                                 type_bounds=type_bounds,
                                                 random_seed=random_seed,
                                                 simplex_domain=simplex_domain)
        print(points)
        JSONFile.write(points, training_path)

        return points
Esempio n. 7
0
    def test_optimize_posterior_mean(self):
        gp = self.gp_complete

        random_seed = 10
        sol = gp.optimize_posterior_mean(random_seed=random_seed)

        n_points = 1000
        points = np.linspace(0, 100, n_points)
        points = points.reshape([n_points, 1])
        evaluations = gp.compute_posterior_parameters(points,
                                                      only_mean=True)['mean']

        point = points[np.argmax(evaluations), 0]
        index = np.argmax(evaluations)

        npt.assert_almost_equal(sol['optimal_value'][0], evaluations[index])
        npt.assert_almost_equal(sol['solution'], point, decimal=1)

        bounds_x = [
            gp.gp.bounds[i] for i in xrange(len(gp.gp.bounds))
            if i in gp.x_domain
        ]
        random_seed = 10
        np.random.seed(10)
        start = DomainService.get_points_domain(1,
                                                bounds_x,
                                                type_bounds=len(gp.x_domain) *
                                                [0])
        start = np.array(start[0])

        var_noise = gp.gp.var_noise.value[0]
        parameters_kernel = gp.gp.kernel.hypers_values_as_array
        mean = gp.gp.mean.value[0]

        index_cache = (var_noise, mean, tuple(parameters_kernel))
        if index_cache not in gp.optimal_solutions:
            gp.optimal_solutions[index_cache] = []
        gp.optimal_solutions[index_cache].append({'solution': start})

        sol_2 = gp.optimize_posterior_mean(random_seed=random_seed)
        npt.assert_almost_equal(sol_2['optimal_value'], sol['optimal_value'])
        npt.assert_almost_equal(sol['solution'], sol_2['solution'], decimal=3)
Esempio n. 8
0
    def setUp(self):

        self.bounds_domain_x = BoundsEntity({
            'lower_bound': 0,
            'upper_bound': 100,
        })

        spec_domain = {
            'dim_x': 1,
            'choose_noise': True,
            'bounds_domain_x': [self.bounds_domain_x],
            'number_points_each_dimension': [100],
            'problem_name': 'a',
        }

        self.domain = DomainService.from_dict(spec_domain)

        dict = {
            'problem_name':
            'test_problem_with_tasks',
            'dim_x':
            1,
            'choose_noise':
            True,
            'bounds_domain_x':
            [BoundsEntity({
                'lower_bound': 0,
                'upper_bound': 100
            })],
            'number_points_each_dimension': [100],
            'method_optimization':
            'sbo',
            'training_name':
            'test_bgo',
            'bounds_domain': [[0, 100], [0, 1]],
            'n_training':
            4,
            'type_kernel':
            [PRODUCT_KERNELS_SEPARABLE, MATERN52_NAME, TASKS_KERNEL_NAME],
            'noise':
            False,
            'random_seed':
            5,
            'parallel':
            False,
            'type_bounds': [0, 1],
            'dimensions': [2, 1, 2],
            'name_model':
            'gp_fitting_gaussian',
            'mle':
            True,
            'thinning':
            0,
            'n_burning':
            0,
            'max_steps_out':
            1,
            'training_data':
            None,
            'x_domain': [0],
            'distribution':
            UNIFORM_FINITE,
            'parameters_distribution':
            None,
            'minimize':
            False,
            'n_iterations':
            5,
        }

        self.spec = RunSpecEntity(dict)

        self.acquisition_function = None
        self.gp_model = None

        ###Define other BGO object
        np.random.seed(5)
        n_points = 100
        points = np.linspace(0, 100, n_points)
        points = points.reshape([n_points, 1])
        tasks = np.random.randint(2, size=(n_points, 1))

        add = [10, -10]
        kernel = Matern52.define_kernel_from_array(1, np.array([100.0, 1.0]))
        function = SampleFunctions.sample_from_gp(points, kernel)

        for i in xrange(n_points):
            function[0, i] += add[tasks[i, 0]]
        points = np.concatenate((points, tasks), axis=1)
        self.points = points

        function = function[0, :]

        points_ls = [list(points[i, :]) for i in xrange(n_points)]

        training_data_med = {
            'evaluations': list(function[0:5]),
            'points': points_ls[0:5],
            "var_noise": [],
        }

        self.training_data = training_data_med

        training_data = {
            'evaluations': list(function),
            'points': points,
            "var_noise": [],
        }

        gaussian_p = GPFittingGaussian(
            [PRODUCT_KERNELS_SEPARABLE, MATERN52_NAME, TASKS_KERNEL_NAME],
            training_data, [2, 1, 2],
            bounds_domain=[[0, 100], [0, 1]],
            type_bounds=[0, 1])
        gaussian_p = gaussian_p.fit_gp_regression(random_seed=1314938)

        params = gaussian_p.get_value_parameters_model
        self.params = params

        dict = {
            'problem_name':
            'test_simulated_gp',
            'dim_x':
            1,
            'choose_noise':
            True,
            'bounds_domain_x':
            [BoundsEntity({
                'lower_bound': 0,
                'upper_bound': 100
            })],
            'number_points_each_dimension': [100],
            'method_optimization':
            'sbo',
            'training_name':
            'test_sbo',
            'bounds_domain': [[0, 100], [0, 1]],
            'n_training':
            5,
            'type_kernel':
            [PRODUCT_KERNELS_SEPARABLE, MATERN52_NAME, TASKS_KERNEL_NAME],
            'noise':
            False,
            'random_seed':
            5,
            'parallel':
            False,
            'type_bounds': [0, 1],
            'dimensions': [2, 1, 2],
            'name_model':
            'gp_fitting_gaussian',
            'mle':
            False,
            'thinning':
            0,
            'n_burning':
            0,
            'max_steps_out':
            1,
            'training_data':
            training_data_med,
            'x_domain': [0],
            'distribution':
            UNIFORM_FINITE,
            'parameters_distribution':
            None,
            'minimize':
            False,
            'n_iterations':
            1,
            'var_noise_value': [params[0]],
            'mean_value': [params[1]],
            'kernel_values':
            list(params[2:]),
            'cache':
            False,
            'debug':
            False,
        }

        self.spec_2 = RunSpecEntity(dict)
Esempio n. 9
0
    def optimize(self,
                 start=None,
                 random_seed=None,
                 parallel=True,
                 n_restarts=10,
                 n_best_restarts=0,
                 n_samples_parameters=0,
                 start_new_chain=False,
                 maxepoch=11,
                 **kwargs):
        """
        Optimizes EI

        :param start: np.array(n)
        :param random_seed: int
        :param parallel: boolean
        :param n_restarts: int
        :param n_best_restarts: (int) Chooses the best n_best_restarts based on EI
        :param n_samples_parameters: int
        :param start_new_chain: (boolean) If True, we start a new chain with n_samples_parameters
            samples of the parameters of the GP model.
        :return:
        """

        if random_seed is not None:
            np.random.seed(random_seed)

        if start_new_chain:
            if self.gp.name_model == BAYESIAN_QUADRATURE:
                self.gp.gp.start_new_chain()
                self.gp.gp.sample_parameters(DEFAULT_N_PARAMETERS)
            else:
                self.gp.start_new_chain()
                self.gp.sample_parameters(DEFAULT_N_PARAMETERS)

        bounds = self.gp.bounds

        if start is None:
            if self.gp.separate_tasks and self.gp.name_model == BAYESIAN_QUADRATURE:
                tasks = self.gp.tasks
                n_tasks = len(tasks)
                n_restarts = int(np.ceil(n_restarts / n_tasks) * n_tasks)

                ind = [[i] for i in range(n_restarts)]
                np.random.shuffle(ind)
                task_chosen = np.zeros((n_restarts, 1))
                n_task_per_group = n_restarts / n_tasks

                for i in range(n_tasks):
                    for j in range(n_task_per_group):
                        tk = ind[j + i * n_task_per_group]
                        task_chosen[tk, 0] = i

                start_points = DomainService.get_points_domain(
                    n_restarts,
                    bounds,
                    type_bounds=self.gp.type_bounds,
                    simplex_domain=self.simplex_domain)

                start_points = np.concatenate((start_points, task_chosen),
                                              axis=1)
            else:
                start_points = DomainService.get_points_domain(
                    n_restarts,
                    bounds,
                    type_bounds=self.gp.type_bounds,
                    simplex_domain=self.simplex_domain)

            start = np.array(start_points)

        if n_best_restarts > 0 and n_best_restarts < n_restarts:
            point_dict = {}
            for j in xrange(start.shape[0]):
                point_dict[j] = start[j, :]
            args = (False, None, True, 0, self, DEFAULT_N_PARAMETERS)
            ei_values = Parallel.run_function_different_arguments_parallel(
                wrapper_objective_acquisition_function, point_dict, *args)
            values = [ei_values[i] for i in ei_values]
            values_index = sorted(range(len(values)), key=lambda k: values[k])
            values_index = values_index[-n_best_restarts:]
            start = []
            for j in values_index:
                start.append(point_dict[j])
            start = np.array(start)

        n_restarts = start.shape[0]
        bounds = [tuple(bound) for bound in self.bounds_opt]

        objective_function = wrapper_objective_acquisition_function
        grad_function = wrapper_gradient_acquisition_function

        if n_samples_parameters == 0:
            #TODO: CHECK THIS
            optimization = Optimization(LBFGS_NAME,
                                        objective_function,
                                        bounds,
                                        grad_function,
                                        minimize=False)

            args = (False, None, parallel, 0, optimization, self,
                    n_samples_parameters)

            opt_method = wrapper_optimize

            point_dict = {}
            for j in xrange(n_restarts):
                point_dict[j] = start[j, :]
        else:

            #TODO CHANGE wrapper_objective_voi, wrapper_grad_voi_sgd TO NO SOLVE MAX_a_{n+1} in
            #TODO: parallel for the several starting points

            args_ = (self, DEFAULT_N_PARAMETERS)

            optimization = Optimization(
                SGD_NAME,
                objective_function,
                bounds,
                wrapper_evaluate_gradient_ei_sample_params,
                minimize=False,
                full_gradient=grad_function,
                args=args_,
                debug=True,
                simplex_domain=self.simplex_domain,
                **{'maxepoch': maxepoch})

            args = (False, None, parallel, 0, optimization,
                    n_samples_parameters, self)

            #TODO: THINK ABOUT N_THREADS. Do we want to run it in parallel?

            opt_method = wrapper_sgd

            random_seeds = np.random.randint(0, 4294967295, n_restarts)
            point_dict = {}
            for j in xrange(n_restarts):
                point_dict[j] = [start[j, :], random_seeds[j]]

        optimal_solutions = Parallel.run_function_different_arguments_parallel(
            opt_method, point_dict, *args)

        maximum_values = []
        for j in xrange(n_restarts):
            maximum_values.append(optimal_solutions.get(j)['optimal_value'])

        ind_max = np.argmax(maximum_values)

        logger.info("Results of the optimization of the EI: ")
        logger.info(optimal_solutions.get(ind_max))

        self.optimization_results.append(optimal_solutions.get(ind_max))

        return optimal_solutions.get(ind_max)
    def optimize_mean(self, n_restarts=10, candidate_solutions=None, candidate_values=None):
        """
        Checked
        :param n_restarts:
        :return:
        """
        if self.parameters is None:
            self.estimate_parameters_kernel()
            parameters = self.parameters
        else:
            parameters = self.parameters
        bounds = [tuple(bound) for bound in [self.gp.bounds[i] for i in range(self.x_domain)]]
        start = DomainService.get_points_domain(
            n_restarts, self.gp.bounds[0:self.x_domain], type_bounds=self.gp.type_bounds[0:self.x_domain])
        dim = len(start[0])
        start_points = {}
        for i in range(n_restarts):
            start_points[i] = start[i]
        optimization = Optimization(
            NELDER,
            wrapper_mean_objective,
            bounds,
            None,
            hessian=None, tol=None,
            minimize=False)
        args = (False, None, True, 0, optimization, self, parameters)
        sol = Parallel.run_function_different_arguments_parallel(
            wrapper_optimize, start_points, *args)
        solutions = []
        results_opt = []
        for i in range(n_restarts):
            if sol.get(i) is None:
                logger.info("Error in computing optimum of a_{n+1} at one sample at point %d"
                            % i)
                continue
            solutions.append(sol.get(i)['optimal_value'])
            results_opt.append(sol.get(i))
        ind_max = np.argmax(solutions)

        sol = results_opt[ind_max]
        sol['optimal_value'] = [sol['optimal_value']]

        if candidate_solutions is not None and len(candidate_solutions) > 0:

            n = len(candidate_values)
            candidate_solutions_2 = candidate_solutions
            values = []
            point_dict = {}
            args = (False, None, True, 0, self, parameters)
            for j in range(n):
                point_dict[j] = np.array(candidate_solutions_2[j])
            values = Parallel.run_function_different_arguments_parallel(
                wrapper_mean_objective, point_dict, *args)
            values_candidates = []
            for j in range(n):
                values_candidates.append(values[j])
            ind_max_2 = np.argmax(values_candidates)

            if np.max(values_candidates) > sol['optimal_value'][0]:
                solution = point_dict[ind_max_2]
                value = np.max(values_candidates)
                sol = {}
                sol['optimal_value'] = [value]
                sol['solution'] = solution

        return sol
    def test_load_discretization_file_exists(self):
        allow(JSONFile).read.and_return([])
        expect(DomainEntity).discretize_domain.never()
        expect(BoundsEntity).get_bounds_as_lists.once().and_return([2])

        assert DomainService.load_discretization('test_problem', 1, 0) == []
cache = True
parameters_distribution = None

gp = GPFittingService.get_gp(name_model, problem_name, type_kernel, dimensions,
                             bounds_domain, type_bounds, n_training, noise,
                             training_data, points, training_name, mle,
                             thinning, n_burning, max_steps_out, n_samples,
                             random_seed, kernel_values, mean_value,
                             var_noise_value, cache, same_correlation)
quadrature = BayesianQuadrature(
    gp,
    x_domain,
    distribution,
    parameters_distribution=parameters_distribution)
gp.data = gp.convert_from_list_to_numpy(gp.training_data)

bq = quadrature
bounds_x = [
    bq.gp.bounds[i] for i in xrange(len(bq.gp.bounds)) if i in bq.x_domain
]

np.random.seed(1)
points = DomainService.get_points_domain(100000,
                                         bounds_x,
                                         type_bounds=len(bounds_x) * [0])

sbo = SBO(quadrature, np.array(points))
point = np.array([[0.78777778, 0.32222222, 21., 178.77777778, 1.]])
z = sbo.evaluate(point)

print z
def bgo(objective_function,
        bounds_domain_x,
        integrand_function=None,
        simplex_domain=None,
        noise=False,
        n_samples_noise=0,
        bounds_domain_w=None,
        type_bounds=None,
        distribution=None,
        parameters_distribution=None,
        name_method='bqo',
        n_iterations=50,
        type_kernel=None,
        dimensions_kernel=None,
        n_restarts=10,
        n_best_restarts=0,
        problem_name=None,
        n_training=None,
        random_seed=1,
        mle=False,
        n_samples_parameters=5,
        maxepoch=50,
        thinning=50,
        n_burning=500,
        max_steps_out=1000,
        parallel=True,
        same_correlation=True,
        monte_carlo_sbo=True,
        n_samples_mc=5,
        n_restarts_mc=5,
        n_best_restarts_mc=0,
        factr_mc=1e12,
        maxiter_mc=10,
        method_opt_mc=LBFGS_NAME,
        n_restarts_mean=100,
        n_best_restarts_mean=10,
        n_samples_parameters_mean=5,
        maxepoch_mean=50,
        parallel_training=False,
        default_n_samples_parameters=None,
        default_n_samples=None):
    """
    Maximizes the objective function.

    :param objective_function: function G to be maximized:
        If the function is noisy-free, G(([float])point) and returns [float].
        If the function is noisy, G(([float])point, (int) n_samples) and
            returns [(float) value, (float) variance]
    :param bounds_domain_x: [(float, float)]
    :param integrand_function: function F:
        If the function is noisy-free, F(([float])point) and returns [float].
        If the function is noisy, F(([float])point, (int) n_samples) and
            returns [(float) value, (float) variance]
    :param simplex_domain: (float) {sum[i, from 1 to domain]=simplex_domain}
    :param noise: (boolean) True if the evaluations of the objective function are noisy
    :param n_samples_noise: (int)  If noise is true, we take n_samples of the function to estimate
            its value.
    :param bounds_domain_w: [([float, float] or [float])], the first case is when the bounds
            are lower or upper bound of the respective entry; in the second case, it's list of
            finite points representing the domain of that entry (e.g. when W is finite).
    :param type_bounds: [0 or 1], 0 if the bounds are lower or upper bound of the respective
            entry, 1 if the bounds are all the finite options for that entry.
    :param distribution: str, probability distributions for the Bayesian quadrature (i.e. the
        distribution of W)
    :param parameters_distribution: {str: float}
    :param name_method: str, Options: 'SBO', 'EI'
    :param n_iterations: int
    :param type_kernel: [str] Must be in possible_kernels. If it's a product of kernels it
            should be a list as: [PRODUCT_KERNELS_SEPARABLE, NAME_1_KERNEL, NAME_2_KERNEL].
            If we want to use a scaled NAME_1_KERNEL, the parameter must be
            [SCALED_KERNEL, NAME_1_KERNEL].
    :param dimensions_kernel: [int]. It has only the n_tasks for the task_kernels, and for the
            PRODUCT_KERNELS_SEPARABLE contains the dimensions of every kernel in the product, and
            the total dimension of the product_kernels_separable too in the first entry.
    :param n_restarts: (int) Number of starting points to optimize the acquisition function
    :param n_best_restarts:  (int) Number of best starting points chosen from the n_restart
            points.
    :param problem_name: str
    :param n_training: (int) number of training points
    :param random_seed: int
    :param mle: (boolean) If true, fits the GP by MLE. Otherwise, we use a fully Bayesian approach.
    :param n_samples_parameters: (int) Number of samples of the parameter to estimate the stochastic
        gradient when optimizing the acquisition function.
    :param maxepoch: (int) Maximum number of iterations of the SGD when optimizing the acquisition
        function.
    :param thinning: int
    :param n_burning: (int) Number of burnings samples for slice sampling.
    :param max_steps_out: (int)  Maximum number of steps out for the stepping out  or
        doubling procedure in slice sampling.
    :param parallel: (boolean)
    :param same_correlation: (boolean) If true, it uses the same correlations for the task kernel.
    :param monte_carlo_sbo: (boolean) If True, the code estimates the objective function and
        gradient with the discretization-free approach.
    :param n_samples_mc: (int) Number of samples for the MC method.
    :param n_restarts_mc: (int) Number of restarts to optimize the posterior mean given a sample of
        the normal random variable.
    :param n_best_restarts_mc:  (int) Number of best restarting points used to optimize the
        posterior mean given a sample of the normal random variable.
    :param factr_mc: (float) Parameter of LBFGS to optimize a sample of BQO when using the
        discretization-free approach.
    :param maxiter_mc: (int) Max number of iterations to optimize a sample of BQO when using the
        discretization-free approach.
    :param method_opt_mc: (str) Optimization method used when using the discretization-free approach
        of BQO.
    :param n_restarts_mean: (int) Number of starting points to optimize the posterior mean.
    :param n_best_restarts_mean: int
    :param n_samples_parameters_mean: (int) Number of sample of hyperparameters to estimate the
        stochastic gradient inside of the SGD when optimizing the posterior mean.
    :param maxepoch_mean: (int) Maxepoch for the optimization of the posterior mean.
    :param parallel_training: (boolean) Train in parallel if it's True.
    :param default_n_samples_parameters: (int) Number of samples of Z for the discretization-free
        estimation of the VOI.
    :param default_n_samples: (int) Number of samples of the hyperparameters to estimate the VOI.
    :return: {'optimal_solution': np.array(n),
            'optimal_value': float}
    """

    np.random.seed(random_seed)
    # default_parameters

    dim_x = len(bounds_domain_x)
    x_domain = range(dim_x)

    if name_method == 'bqo':
        name_method = SBO_METHOD

    dim_w = 0
    if name_method == SBO_METHOD:
        if type_bounds is None:
            dim_w = len(bounds_domain_w)
        elif type_bounds is not None and type_bounds[-1] == 1:
            dim_w = 1
        elif type_bounds is not None:
            dim_w = len(type_bounds[dim_x:])

    total_dim = dim_w + dim_x

    if type_bounds is None:
        type_bounds = total_dim * [0]

    if bounds_domain_w is None:
        bounds_domain_w = []
    bounds_domain = [
        list(bound) for bound in bounds_domain_x + bounds_domain_w
    ]

    training_name = None

    if problem_name is None:
        problem_name = 'user_problem'

    if type_kernel is None:
        if name_method == SBO_METHOD:
            if type_bounds[-1] == 1:
                type_kernel = [
                    PRODUCT_KERNELS_SEPARABLE, MATERN52_NAME, TASKS_KERNEL_NAME
                ]
                dimensions_kernel = [total_dim, dim_x, len(bounds_domain[-1])]
            else:
                type_kernel = [SCALED_KERNEL, MATERN52_NAME]
                dimensions_kernel = [total_dim]
        elif name_method == EI_METHOD:
            type_kernel = [SCALED_KERNEL, MATERN52_NAME]
            dimensions_kernel = [total_dim]

    if dimensions_kernel is None:
        raise Exception("Not enough inputs to run the BGO algorithm")

    if n_training is None:
        if type_bounds[-1] == 1:
            n_training = len(bounds_domain[-1])
        else:
            n_training = 5

    if distribution is None:
        if type_bounds[-1] == 1:
            distribution = UNIFORM_FINITE
        else:
            distribution = GAMMA

    method_optimization = name_method

    name_model = 'gp_fitting_gaussian'

    if name_method == SBO_METHOD:
        training_function = integrand_function
    elif name_method == EI_METHOD:
        training_function = objective_function

    bounds_domain_x = BoundsEntity.to_bounds_entity(bounds_domain_x)

    spec = {
        'name_model': name_model,
        'problem_name': problem_name,
        'type_kernel': type_kernel,
        'dimensions': dimensions_kernel,
        'bounds_domain': bounds_domain,
        'type_bounds': type_bounds,
        'n_training': n_training,
        'noise': noise,
        'training_data': None,
        'points': None,
        'training_name': training_name,
        'mle': mle,
        'thinning': thinning,
        'n_burning': n_burning,
        'max_steps_out': max_steps_out,
        'n_samples': n_samples_noise,
        'random_seed': random_seed,
        'kernel_values': None,
        'mean_value': None,
        'var_noise_value': None,
        'cache': True,
        'same_correlation': same_correlation,
        'use_only_training_points': True,
        'optimization_method': method_optimization,
        'n_samples_parameters': n_samples_parameters,
        'parallel_training': parallel_training,
        'simplex_domain': simplex_domain,
        'objective_function': training_function,
        'dim_x': dim_x,
        'choose_noise': True,
        'bounds_domain_x': bounds_domain_x,
    }

    gp_model = GPFittingService.from_dict(spec)

    quadrature = None
    acquisition_function = None

    domain = DomainService.from_dict(spec)

    if method_optimization not in _possible_optimization_methods:
        raise Exception("Incorrect BGO method")

    if method_optimization == SBO_METHOD:
        quadrature = BayesianQuadrature(
            gp_model,
            x_domain,
            distribution,
            parameters_distribution=parameters_distribution)

        acquisition_function = SBO(quadrature,
                                   np.array(domain.discretization_domain_x))
    elif method_optimization == EI_METHOD:
        acquisition_function = EI(gp_model, noisy_evaluations=noise)

    bgo_obj = BGO(acquisition_function,
                  gp_model,
                  n_iterations,
                  problem_name,
                  training_name,
                  random_seed,
                  n_training,
                  name_model,
                  method_optimization,
                  minimize=False,
                  n_samples=n_samples_noise,
                  noise=noise,
                  quadrature=quadrature,
                  parallel=parallel,
                  number_points_each_dimension_debug=None,
                  n_samples_parameters=n_samples_parameters,
                  use_only_training_points=True,
                  objective_function=objective_function,
                  training_function=training_function)

    opt_params_mc = {}

    if factr_mc is not None:
        opt_params_mc['factr'] = factr_mc
    if maxiter_mc is not None:
        opt_params_mc['maxiter'] = maxiter_mc

    result = bgo_obj.optimize(
        debug=False,
        n_samples_mc=n_samples_mc,
        n_restarts_mc=n_restarts_mc,
        n_best_restarts_mc=n_best_restarts_mc,
        monte_carlo_sbo=monte_carlo_sbo,
        n_restarts=n_restarts,
        n_best_restarts=n_best_restarts,
        n_samples_parameters=n_samples_parameters,
        n_restarts_mean=n_restarts_mean,
        n_best_restarts_mean=n_best_restarts_mean,
        random_seed=bgo_obj.random_seed,
        method_opt_mc=method_opt_mc,
        n_samples_parameters_mean=n_samples_parameters_mean,
        maxepoch_mean=maxepoch_mean,
        maxepoch=maxepoch,
        threshold_sbo=0.001,
        optimize_only_posterior_mean=False,
        start_optimize_posterior_mean=0,
        optimize_mean_each_iteration=False,
        default_n_samples_parameters=default_n_samples_parameters,
        default_n_samples=default_n_samples,
        **opt_params_mc)

    return result
var_noise_value = None
cache = True
parameters_distribution = None

gp = GPFittingService.get_gp(name_model, problem_name, type_kernel, dimensions,
                             bounds_domain, type_bounds, n_training, noise,
                             training_data, points, training_name, mle,
                             thinning, n_burning, max_steps_out, n_samples,
                             random_seed, kernel_values, mean_value,
                             var_noise_value, cache, same_correlation)
quadrature = BayesianQuadrature(
    gp,
    x_domain,
    distribution,
    parameters_distribution=parameters_distribution)
gp.data = gp.convert_from_list_to_numpy(gp.training_data)

entry = {
    'dim_x': dim_x,
    'choose_noise': True,
    'bounds_domain_x': BoundsEntity.to_bounds_entity(bounds_domain_x),
    'dim_w': 1,
    'number_points_each_dimension': number_points_each_dimension,
    'problem_name': problem_name,
}
domain = DomainService.from_dict(entry)
sbo = SBO(quadrature, np.array(domain.discretization_domain_x))
point = np.array([[0.78777778, 0.32222222, 21., 178.77777778, 1.]])
z = sbo.evaluate_mc(point, 100, n_restarts=5)

print z
    def from_spec(cls, spec):
        """
        Construct BGO instance from spec
        :param spec: RunSpecEntity

        :return: BGO
        # TO DO: It now only returns domain
        """

        random_seed = spec.get('random_seed')
        method_optimization = spec.get('method_optimization')

        logger.info("Training GP model")
        logger.info("Random seed is: %d" % random_seed)
        logger.info("Algorithm used is:")
        logger.info(method_optimization)

        gp_model = GPFittingService.from_dict(spec)
        noise = spec.get('noise')
        quadrature = None
        acquisition_function = None

        domain = DomainService.from_dict(spec)

        if method_optimization not in cls._possible_optimization_methods:
            raise Exception("Incorrect BGO method")

        if method_optimization == SBO_METHOD:
            x_domain = spec.get('x_domain')
            distribution = spec.get('distribution')
            parameters_distribution = spec.get('parameters_distribution')
            quadrature = BayesianQuadrature(
                gp_model,
                x_domain,
                distribution,
                parameters_distribution=parameters_distribution)

            acquisition_function = SBO(
                quadrature, np.array(domain.discretization_domain_x))
        elif method_optimization == MULTI_TASK_METHOD:
            x_domain = spec.get('x_domain')
            distribution = spec.get('distribution')
            parameters_distribution = spec.get('parameters_distribution')
            quadrature = BayesianQuadrature(
                gp_model,
                x_domain,
                distribution,
                parameters_distribution=parameters_distribution,
                model_only_x=True)
            acquisition_function = MultiTasks(
                quadrature, quadrature.parameters_distribution.get(TASKS))
        elif method_optimization == EI_METHOD:
            acquisition_function = EI(gp_model, noisy_evaluations=noise)
        elif method_optimization == SDE_METHOD:
            x_domain = len(spec.get('x_domain'))
            parameters_distribution = spec.get('parameters_distribution')
            domain_random = np.array(parameters_distribution['domain_random'])
            weights = np.array(parameters_distribution['weights'])
            acquisition_function = SDE(gp_model, domain_random, x_domain,
                                       weights)

        problem_name = spec.get('problem_name')
        training_name = spec.get('training_name')
        n_samples = spec.get('n_samples')
        minimize = spec.get('minimize')
        n_iterations = spec.get('n_iterations')
        name_model = spec.get('name_model')
        parallel = spec.get('parallel')
        n_training = spec.get('n_training')
        number_points_each_dimension_debug = spec.get(
            'number_points_each_dimension_debug')
        n_samples_parameters = spec.get('n_samples_parameters', 0)
        use_only_training_points = spec.get('use_only_training_points', True)

        n_iterations = n_iterations - (
            len(gp_model.training_data['evaluations']) - n_training)

        bgo = cls(acquisition_function,
                  gp_model,
                  n_iterations,
                  problem_name,
                  training_name,
                  random_seed,
                  n_training,
                  name_model,
                  method_optimization,
                  minimize=minimize,
                  n_samples=n_samples,
                  noise=noise,
                  quadrature=quadrature,
                  parallel=parallel,
                  number_points_each_dimension_debug=
                  number_points_each_dimension_debug,
                  n_samples_parameters=n_samples_parameters,
                  use_only_training_points=use_only_training_points)

        if n_training < len(bgo.gp_model.training_data['evaluations']):
            extra_iterations = len(
                bgo.gp_model.training_data['evaluations']) - n_training
            data = JSONFile.read(bgo.objective.file_path)
            bgo.objective.evaluated_points = data['evaluated_points'][
                0:extra_iterations]
            bgo.objective.objective_values = data['objective_values'][
                0:extra_iterations]
            bgo.objective.model_objective_values = \
                data['model_objective_values'][0:extra_iterations]
            bgo.objective.standard_deviation_evaluations = data[
                'standard_deviation_evaluations']

        return bgo