def test_from_dict(self):
        name_model = 'gp_fitting_gaussian'
        dimensions = [1]
        bounds = [[-10, 10]]
        n_training = 30
        points_ = list(np.linspace(-10, 10, n_training))
        points = [[point] for point in points_]

        gp = GPFittingService.get_gp(name_model,
                                     self.problem_name,
                                     [SCALED_KERNEL, MATERN52_NAME],
                                     dimensions,
                                     bounds,
                                     type_bounds=[0],
                                     n_training=n_training,
                                     noise=False,
                                     points=points,
                                     mle=True,
                                     random_seed=1)

        model = gp.serialize()

        spec = {
            'name_model': name_model,
            'problem_name': self.problem_name,
            'type_kernel': [SCALED_KERNEL, MATERN52_NAME],
            'dimensions': dimensions,
            'bounds_domain': bounds,
            'type_bounds': [0],
            'n_training': n_training,
            'noise': False,
            'points': points,
            'mle': True,
            'random_seed': 1
        }

        gp_2 = GPFittingService.from_dict(spec)

        model_2 = gp_2.serialize()

        npt.assert_almost_equal(model['kernel_values'],
                                model_2['kernel_values'])
        npt.assert_almost_equal(model['mean_value'], model_2['mean_value'])
        npt.assert_almost_equal(model['training_data']['evaluations'],
                                model_2['training_data']['evaluations'])
        npt.assert_almost_equal(model['training_data']['points'],
                                model_2['training_data']['points'])
        npt.assert_almost_equal(model['training_data']['var_noise'],
                                model_2['training_data']['var_noise'])

        del model['kernel_values']
        del model['training_data']
        del model['mean_value']
        del model['data']
        del model_2['kernel_values']
        del model_2['training_data']
        del model_2['mean_value']
        del model_2['data']
        assert model == model_2
 def test_get_filename(self):
     name = GPFittingService._get_filename(GPFittingGaussian,
                                           self.problem_name,
                                           self.type_kernel,
                                           self.trainining_name)
     file = 'gp_GPFittingGaussian_test_problem_Product_of_kernels_with_separable_domain_Tasks_' \
            'Kernel_Matern52_training.json'
     assert name == file
    def define_gp_model(self):
        def toy_objective_function(x):
            return [np.sum(x)]

        self.gp_model = {}
        for i in range(self.dimensions):
            tmp_training_data = {}
            tmp_training_data['var_noise'] = []
            tmp_training_data['points'] = list(self.training_data['points'])
            tmp_training_data['evaluations'] = list(
                [t[i] for t in self.training_data['evaluations']])

            spec = {
                'name_model': 'gp_fitting_gaussian',
                'problem_name': self.problem_name,
                'type_kernel': [ORNSTEIN_KERNEL],
                'dimensions': [1],
                'bounds_domain': [[1, self.max_iterations]],
                'type_bounds': [0],
                'n_training': 10,
                'noise': False,
                'training_data': tmp_training_data,
                'points': None,
                'training_name': None,
                'mle': False,
                'thinning': self.n_thinning,
                'n_burning': self.n_burning,
                'max_steps_out': 1000,
                'n_samples': 0,
                'random_seed': 1,
                'kernel_values': None,
                'mean_value': None,
                'var_noise_value': None,
                'cache': False,
                'same_correlation': True,
                'use_only_training_points': True,
                'optimization_method': 'SBO',
                'n_samples_parameters': 10,
                'parallel_training': False,
                'simplex_domain': None,
                'objective_function': toy_objective_function,
                'define_samplers': False
            }

            model = GPFittingService.from_dict(spec)
            model.dimension_parameters -= 2
            model.best_result = self.best_result
            model.current_iteration = self.current_iteration
            model.raw_results = dict(
                self.raw_results
            )  #maybe change this to only keep the dimension?
            model.data['points'] = list(self.points_differences)
            model.mean_params = []

            self.gp_model[i] = model
Exemple #4
0
    def define_gp_model(self):

        def toy_objective_function(x):
            return [np.sum(x)]

        spec = {
            'name_model': 'gp_fitting_gaussian',
            'problem_name': self.problem_name,
            'type_kernel': ['swersky_kernel'],
            'dimensions': [1],
            'bounds_domain': [[1, self.max_iterations]],
            'type_bounds': [0],
            'n_training': 10,
            'noise': False,
            'training_data': self.training_data,
            'points': None,
            'training_name': None,
            'mle': False,
            'thinning': self.n_thinning,
            'n_burning': self.n_burning,
            'max_steps_out': 1000,
            'n_samples': 0,
            'random_seed': 1,
            'kernel_values': None,
            'mean_value': None,
            'var_noise_value': None,
            'cache': False,
            'same_correlation': True,
            'use_only_training_points': True,
            'optimization_method': 'SBO',
            'n_samples_parameters': 10,
            'parallel_training': False,
            'simplex_domain': None,
            'objective_function': toy_objective_function,
            'define_samplers': False
        }

        model = GPFittingService.from_dict(spec)
        model.dimension_parameters -= 2
        model.best_result = self.best_result
        model.current_iteration = self.current_iteration
        model.raw_results = dict(self.raw_results)
        model.data['points'] = [[float(i)] for i in range(0, len(self.raw_results['values']))]
        model.mean_params = []

        self.gp_model = model
    def test_get_gp_cached(self):
        name_model = 'gp_fitting_gaussian'
        dimensions = [1]
        bounds = [[-10, 10]]
        n_training = 30
        points_ = list(np.linspace(-10, 10, n_training))
        points = [[point] for point in points_]

        gp = GPFittingService.get_gp(name_model,
                                     self.problem_name,
                                     [SCALED_KERNEL, MATERN52_NAME],
                                     dimensions,
                                     bounds,
                                     type_bounds=[0],
                                     n_training=n_training,
                                     noise=False,
                                     points=points,
                                     mle=True,
                                     random_seed=1)

        model = gp.serialize()

        assert model == {
            'type_kernel': [SCALED_KERNEL, MATERN52_NAME],
            'training_data': model['training_data'],
            'data': model['data'],
            'dimensions': [1],
            'kernel_values': model['kernel_values'],
            'mean_value': model['mean_value'],
            'var_noise_value': [1e-10],
            'thinning': 0,
            'bounds_domain': bounds,
            'n_burning': 0,
            'max_steps_out': 1,
            'type_bounds': [0],
            'name_model': 'gp_fitting_gaussian',
            'problem_name': 'test_problem',
            'training_name': 'default_training_data_30_points_rs_1',
            'same_correlation': False,
            'start_point_sampler': model['start_point_sampler'],
            'samples_parameters': model['samples_parameters'],
        }
    def test_gp_no_dir(self, mock_mkdir, mock_exists):
        mock_exists.return_value = False
        name_model = 'gp_fitting_gaussian'
        dimensions = [1]
        bounds = [[-10, 10]]
        n_training = 30
        points_ = list(np.linspace(-10, 10, n_training))
        points = [[point] for point in points_]

        gp = GPFittingService.get_gp(name_model,
                                     self.problem_name,
                                     [SCALED_KERNEL, MATERN52_NAME],
                                     dimensions,
                                     bounds,
                                     type_bounds=[0],
                                     n_training=n_training,
                                     noise=False,
                                     points=points,
                                     mle=True,
                                     random_seed=1)

        mock_mkdir.assert_called_with('problems/test_problem/data')
same_correlation = True
debug = False
number_points_each_dimension_debug = [10, 10, 10, 10]
noise = False
training_data = None
points = None
n_samples = 0
kernel_values = None
mean_value = None
var_noise_value = None
cache = True
parameters_distribution = None

gp = GPFittingService.get_gp(name_model, problem_name, type_kernel, dimensions,
                             bounds_domain, type_bounds, n_training, noise,
                             training_data, points, training_name, mle,
                             thinning, n_burning, max_steps_out, n_samples,
                             random_seed, kernel_values, mean_value,
                             var_noise_value, cache, same_correlation)
quadrature = BayesianQuadrature(
    gp,
    x_domain,
    distribution,
    parameters_distribution=parameters_distribution)
gp.data = gp.convert_from_list_to_numpy(gp.training_data)

bq = quadrature
bounds_x = [
    bq.gp.bounds[i] for i in xrange(len(bq.gp.bounds)) if i in bq.x_domain
]

np.random.seed(1)
def bgo(objective_function,
        bounds_domain_x,
        integrand_function=None,
        simplex_domain=None,
        noise=False,
        n_samples_noise=0,
        bounds_domain_w=None,
        type_bounds=None,
        distribution=None,
        parameters_distribution=None,
        name_method='bqo',
        n_iterations=50,
        type_kernel=None,
        dimensions_kernel=None,
        n_restarts=10,
        n_best_restarts=0,
        problem_name=None,
        n_training=None,
        random_seed=1,
        mle=False,
        n_samples_parameters=5,
        maxepoch=50,
        thinning=50,
        n_burning=500,
        max_steps_out=1000,
        parallel=True,
        same_correlation=True,
        monte_carlo_sbo=True,
        n_samples_mc=5,
        n_restarts_mc=5,
        n_best_restarts_mc=0,
        factr_mc=1e12,
        maxiter_mc=10,
        method_opt_mc=LBFGS_NAME,
        n_restarts_mean=100,
        n_best_restarts_mean=10,
        n_samples_parameters_mean=5,
        maxepoch_mean=50,
        parallel_training=False,
        default_n_samples_parameters=None,
        default_n_samples=None):
    """
    Maximizes the objective function.

    :param objective_function: function G to be maximized:
        If the function is noisy-free, G(([float])point) and returns [float].
        If the function is noisy, G(([float])point, (int) n_samples) and
            returns [(float) value, (float) variance]
    :param bounds_domain_x: [(float, float)]
    :param integrand_function: function F:
        If the function is noisy-free, F(([float])point) and returns [float].
        If the function is noisy, F(([float])point, (int) n_samples) and
            returns [(float) value, (float) variance]
    :param simplex_domain: (float) {sum[i, from 1 to domain]=simplex_domain}
    :param noise: (boolean) True if the evaluations of the objective function are noisy
    :param n_samples_noise: (int)  If noise is true, we take n_samples of the function to estimate
            its value.
    :param bounds_domain_w: [([float, float] or [float])], the first case is when the bounds
            are lower or upper bound of the respective entry; in the second case, it's list of
            finite points representing the domain of that entry (e.g. when W is finite).
    :param type_bounds: [0 or 1], 0 if the bounds are lower or upper bound of the respective
            entry, 1 if the bounds are all the finite options for that entry.
    :param distribution: str, probability distributions for the Bayesian quadrature (i.e. the
        distribution of W)
    :param parameters_distribution: {str: float}
    :param name_method: str, Options: 'SBO', 'EI'
    :param n_iterations: int
    :param type_kernel: [str] Must be in possible_kernels. If it's a product of kernels it
            should be a list as: [PRODUCT_KERNELS_SEPARABLE, NAME_1_KERNEL, NAME_2_KERNEL].
            If we want to use a scaled NAME_1_KERNEL, the parameter must be
            [SCALED_KERNEL, NAME_1_KERNEL].
    :param dimensions_kernel: [int]. It has only the n_tasks for the task_kernels, and for the
            PRODUCT_KERNELS_SEPARABLE contains the dimensions of every kernel in the product, and
            the total dimension of the product_kernels_separable too in the first entry.
    :param n_restarts: (int) Number of starting points to optimize the acquisition function
    :param n_best_restarts:  (int) Number of best starting points chosen from the n_restart
            points.
    :param problem_name: str
    :param n_training: (int) number of training points
    :param random_seed: int
    :param mle: (boolean) If true, fits the GP by MLE. Otherwise, we use a fully Bayesian approach.
    :param n_samples_parameters: (int) Number of samples of the parameter to estimate the stochastic
        gradient when optimizing the acquisition function.
    :param maxepoch: (int) Maximum number of iterations of the SGD when optimizing the acquisition
        function.
    :param thinning: int
    :param n_burning: (int) Number of burnings samples for slice sampling.
    :param max_steps_out: (int)  Maximum number of steps out for the stepping out  or
        doubling procedure in slice sampling.
    :param parallel: (boolean)
    :param same_correlation: (boolean) If true, it uses the same correlations for the task kernel.
    :param monte_carlo_sbo: (boolean) If True, the code estimates the objective function and
        gradient with the discretization-free approach.
    :param n_samples_mc: (int) Number of samples for the MC method.
    :param n_restarts_mc: (int) Number of restarts to optimize the posterior mean given a sample of
        the normal random variable.
    :param n_best_restarts_mc:  (int) Number of best restarting points used to optimize the
        posterior mean given a sample of the normal random variable.
    :param factr_mc: (float) Parameter of LBFGS to optimize a sample of BQO when using the
        discretization-free approach.
    :param maxiter_mc: (int) Max number of iterations to optimize a sample of BQO when using the
        discretization-free approach.
    :param method_opt_mc: (str) Optimization method used when using the discretization-free approach
        of BQO.
    :param n_restarts_mean: (int) Number of starting points to optimize the posterior mean.
    :param n_best_restarts_mean: int
    :param n_samples_parameters_mean: (int) Number of sample of hyperparameters to estimate the
        stochastic gradient inside of the SGD when optimizing the posterior mean.
    :param maxepoch_mean: (int) Maxepoch for the optimization of the posterior mean.
    :param parallel_training: (boolean) Train in parallel if it's True.
    :param default_n_samples_parameters: (int) Number of samples of Z for the discretization-free
        estimation of the VOI.
    :param default_n_samples: (int) Number of samples of the hyperparameters to estimate the VOI.
    :return: {'optimal_solution': np.array(n),
            'optimal_value': float}
    """

    np.random.seed(random_seed)
    # default_parameters

    dim_x = len(bounds_domain_x)
    x_domain = range(dim_x)

    if name_method == 'bqo':
        name_method = SBO_METHOD

    dim_w = 0
    if name_method == SBO_METHOD:
        if type_bounds is None:
            dim_w = len(bounds_domain_w)
        elif type_bounds is not None and type_bounds[-1] == 1:
            dim_w = 1
        elif type_bounds is not None:
            dim_w = len(type_bounds[dim_x:])

    total_dim = dim_w + dim_x

    if type_bounds is None:
        type_bounds = total_dim * [0]

    if bounds_domain_w is None:
        bounds_domain_w = []
    bounds_domain = [
        list(bound) for bound in bounds_domain_x + bounds_domain_w
    ]

    training_name = None

    if problem_name is None:
        problem_name = 'user_problem'

    if type_kernel is None:
        if name_method == SBO_METHOD:
            if type_bounds[-1] == 1:
                type_kernel = [
                    PRODUCT_KERNELS_SEPARABLE, MATERN52_NAME, TASKS_KERNEL_NAME
                ]
                dimensions_kernel = [total_dim, dim_x, len(bounds_domain[-1])]
            else:
                type_kernel = [SCALED_KERNEL, MATERN52_NAME]
                dimensions_kernel = [total_dim]
        elif name_method == EI_METHOD:
            type_kernel = [SCALED_KERNEL, MATERN52_NAME]
            dimensions_kernel = [total_dim]

    if dimensions_kernel is None:
        raise Exception("Not enough inputs to run the BGO algorithm")

    if n_training is None:
        if type_bounds[-1] == 1:
            n_training = len(bounds_domain[-1])
        else:
            n_training = 5

    if distribution is None:
        if type_bounds[-1] == 1:
            distribution = UNIFORM_FINITE
        else:
            distribution = GAMMA

    method_optimization = name_method

    name_model = 'gp_fitting_gaussian'

    if name_method == SBO_METHOD:
        training_function = integrand_function
    elif name_method == EI_METHOD:
        training_function = objective_function

    bounds_domain_x = BoundsEntity.to_bounds_entity(bounds_domain_x)

    spec = {
        'name_model': name_model,
        'problem_name': problem_name,
        'type_kernel': type_kernel,
        'dimensions': dimensions_kernel,
        'bounds_domain': bounds_domain,
        'type_bounds': type_bounds,
        'n_training': n_training,
        'noise': noise,
        'training_data': None,
        'points': None,
        'training_name': training_name,
        'mle': mle,
        'thinning': thinning,
        'n_burning': n_burning,
        'max_steps_out': max_steps_out,
        'n_samples': n_samples_noise,
        'random_seed': random_seed,
        'kernel_values': None,
        'mean_value': None,
        'var_noise_value': None,
        'cache': True,
        'same_correlation': same_correlation,
        'use_only_training_points': True,
        'optimization_method': method_optimization,
        'n_samples_parameters': n_samples_parameters,
        'parallel_training': parallel_training,
        'simplex_domain': simplex_domain,
        'objective_function': training_function,
        'dim_x': dim_x,
        'choose_noise': True,
        'bounds_domain_x': bounds_domain_x,
    }

    gp_model = GPFittingService.from_dict(spec)

    quadrature = None
    acquisition_function = None

    domain = DomainService.from_dict(spec)

    if method_optimization not in _possible_optimization_methods:
        raise Exception("Incorrect BGO method")

    if method_optimization == SBO_METHOD:
        quadrature = BayesianQuadrature(
            gp_model,
            x_domain,
            distribution,
            parameters_distribution=parameters_distribution)

        acquisition_function = SBO(quadrature,
                                   np.array(domain.discretization_domain_x))
    elif method_optimization == EI_METHOD:
        acquisition_function = EI(gp_model, noisy_evaluations=noise)

    bgo_obj = BGO(acquisition_function,
                  gp_model,
                  n_iterations,
                  problem_name,
                  training_name,
                  random_seed,
                  n_training,
                  name_model,
                  method_optimization,
                  minimize=False,
                  n_samples=n_samples_noise,
                  noise=noise,
                  quadrature=quadrature,
                  parallel=parallel,
                  number_points_each_dimension_debug=None,
                  n_samples_parameters=n_samples_parameters,
                  use_only_training_points=True,
                  objective_function=objective_function,
                  training_function=training_function)

    opt_params_mc = {}

    if factr_mc is not None:
        opt_params_mc['factr'] = factr_mc
    if maxiter_mc is not None:
        opt_params_mc['maxiter'] = maxiter_mc

    result = bgo_obj.optimize(
        debug=False,
        n_samples_mc=n_samples_mc,
        n_restarts_mc=n_restarts_mc,
        n_best_restarts_mc=n_best_restarts_mc,
        monte_carlo_sbo=monte_carlo_sbo,
        n_restarts=n_restarts,
        n_best_restarts=n_best_restarts,
        n_samples_parameters=n_samples_parameters,
        n_restarts_mean=n_restarts_mean,
        n_best_restarts_mean=n_best_restarts_mean,
        random_seed=bgo_obj.random_seed,
        method_opt_mc=method_opt_mc,
        n_samples_parameters_mean=n_samples_parameters_mean,
        maxepoch_mean=maxepoch_mean,
        maxepoch=maxepoch,
        threshold_sbo=0.001,
        optimize_only_posterior_mean=False,
        start_optimize_posterior_mean=0,
        optimize_mean_each_iteration=False,
        default_n_samples_parameters=default_n_samples_parameters,
        default_n_samples=default_n_samples,
        **opt_params_mc)

    return result
    def test_get_gp(self):
        name_model = 'gp_fitting_gaussian'
        dimensions = [1]
        bounds = [[-10, 10]]
        n_training = 30
        points_ = list(np.linspace(-10, 10, n_training))
        points = [[point] for point in points_]

        expect(JSONFile).read.and_return(None)
        gp = GPFittingService.get_gp(name_model,
                                     self.problem_name,
                                     [SCALED_KERNEL, MATERN52_NAME],
                                     dimensions,
                                     bounds,
                                     type_bounds=[0],
                                     n_training=n_training,
                                     noise=False,
                                     points=points,
                                     mle=True,
                                     random_seed=1)
        model = gp.serialize()

        data = {
            'points': points,
            'var_noise': [],
            'evaluations': points_,
        }

        assert model == {
            'type_kernel': [SCALED_KERNEL, MATERN52_NAME],
            'training_data': data,
            'data': data,
            'dimensions': [1],
            'kernel_values': model['kernel_values'],
            'mean_value': model['mean_value'],
            'var_noise_value': [1e-10],
            'thinning': 0,
            'bounds_domain': bounds,
            'n_burning': 0,
            'max_steps_out': 1,
            'type_bounds': [0],
            'name_model': 'gp_fitting_gaussian',
            'problem_name': 'test_problem',
            'training_name': 'default_training_data_30_points_rs_1',
            'same_correlation': False,
            'start_point_sampler': model['start_point_sampler'],
            'samples_parameters': model['samples_parameters'],
        }

        estimation = gp.compute_posterior_parameters(
            np.array([[1.4], [2.4], [0], [-9.9], [8.5], [points_[3]]]))

        points_2 = np.array([[1.4], [2.4], [0], [-9.9], [8.5],
                             [points_[3]]]).reshape(6)
        npt.assert_almost_equal(estimation['mean'], points_2, decimal=4)
        npt.assert_almost_equal(estimation['cov'], np.zeros((6, 6)))

        gp = GPFittingService.get_gp(
            name_model,
            "test_problem_with_tasks",
            [PRODUCT_KERNELS_SEPARABLE, MATERN52_NAME, TASKS_KERNEL_NAME],
            [2, 1, 2], [[-5, 5], [0, 1]],
            type_bounds=[0, 1],
            n_training=n_training,
            noise=False,
            mle=True,
            random_seed=1,
            same_correlation=True)

        model = gp.serialize()

        assert model == {
            'type_kernel':
            [PRODUCT_KERNELS_SEPARABLE, MATERN52_NAME, TASKS_KERNEL_NAME],
            'training_data':
            model['training_data'],
            'data':
            model['training_data'],
            'dimensions': [2, 1, 2],
            'kernel_values':
            model['kernel_values'],
            'mean_value':
            model['mean_value'],
            'var_noise_value':
            model['var_noise_value'],
            'thinning':
            0,
            'bounds_domain': [[-5, 5], [0, 1]],
            'n_burning':
            0,
            'max_steps_out':
            1,
            'type_bounds': [0, 1],
            'name_model':
            'gp_fitting_gaussian',
            'problem_name':
            'test_problem_with_tasks',
            'training_name':
            'default_training_data_30_points_rs_1',
            'same_correlation':
            True,
            'start_point_sampler':
            model['start_point_sampler'],
            'samples_parameters':
            model['samples_parameters'],
        }
    def from_spec(cls, spec):
        """
        Construct BGO instance from spec
        :param spec: RunSpecEntity

        :return: BGO
        # TO DO: It now only returns domain
        """

        random_seed = spec.get('random_seed')
        method_optimization = spec.get('method_optimization')

        logger.info("Training GP model")
        logger.info("Random seed is: %d" % random_seed)
        logger.info("Algorithm used is:")
        logger.info(method_optimization)

        gp_model = GPFittingService.from_dict(spec)
        noise = spec.get('noise')
        quadrature = None
        acquisition_function = None

        domain = DomainService.from_dict(spec)

        if method_optimization not in cls._possible_optimization_methods:
            raise Exception("Incorrect BGO method")

        if method_optimization == SBO_METHOD:
            x_domain = spec.get('x_domain')
            distribution = spec.get('distribution')
            parameters_distribution = spec.get('parameters_distribution')
            quadrature = BayesianQuadrature(
                gp_model,
                x_domain,
                distribution,
                parameters_distribution=parameters_distribution)

            acquisition_function = SBO(
                quadrature, np.array(domain.discretization_domain_x))
        elif method_optimization == MULTI_TASK_METHOD:
            x_domain = spec.get('x_domain')
            distribution = spec.get('distribution')
            parameters_distribution = spec.get('parameters_distribution')
            quadrature = BayesianQuadrature(
                gp_model,
                x_domain,
                distribution,
                parameters_distribution=parameters_distribution,
                model_only_x=True)
            acquisition_function = MultiTasks(
                quadrature, quadrature.parameters_distribution.get(TASKS))
        elif method_optimization == EI_METHOD:
            acquisition_function = EI(gp_model, noisy_evaluations=noise)
        elif method_optimization == SDE_METHOD:
            x_domain = len(spec.get('x_domain'))
            parameters_distribution = spec.get('parameters_distribution')
            domain_random = np.array(parameters_distribution['domain_random'])
            weights = np.array(parameters_distribution['weights'])
            acquisition_function = SDE(gp_model, domain_random, x_domain,
                                       weights)

        problem_name = spec.get('problem_name')
        training_name = spec.get('training_name')
        n_samples = spec.get('n_samples')
        minimize = spec.get('minimize')
        n_iterations = spec.get('n_iterations')
        name_model = spec.get('name_model')
        parallel = spec.get('parallel')
        n_training = spec.get('n_training')
        number_points_each_dimension_debug = spec.get(
            'number_points_each_dimension_debug')
        n_samples_parameters = spec.get('n_samples_parameters', 0)
        use_only_training_points = spec.get('use_only_training_points', True)

        n_iterations = n_iterations - (
            len(gp_model.training_data['evaluations']) - n_training)

        bgo = cls(acquisition_function,
                  gp_model,
                  n_iterations,
                  problem_name,
                  training_name,
                  random_seed,
                  n_training,
                  name_model,
                  method_optimization,
                  minimize=minimize,
                  n_samples=n_samples,
                  noise=noise,
                  quadrature=quadrature,
                  parallel=parallel,
                  number_points_each_dimension_debug=
                  number_points_each_dimension_debug,
                  n_samples_parameters=n_samples_parameters,
                  use_only_training_points=use_only_training_points)

        if n_training < len(bgo.gp_model.training_data['evaluations']):
            extra_iterations = len(
                bgo.gp_model.training_data['evaluations']) - n_training
            data = JSONFile.read(bgo.objective.file_path)
            bgo.objective.evaluated_points = data['evaluated_points'][
                0:extra_iterations]
            bgo.objective.objective_values = data['objective_values'][
                0:extra_iterations]
            bgo.objective.model_objective_values = \
                data['model_objective_values'][0:extra_iterations]
            bgo.objective.standard_deviation_evaluations = data[
                'standard_deviation_evaluations']

        return bgo
    def optimize(self,
                 random_seed=None,
                 start=None,
                 debug=False,
                 monte_carlo_sbo=False,
                 n_samples_mc=1,
                 n_restarts_mc=1,
                 n_best_restarts_mc=0,
                 n_restarts=10,
                 n_best_restarts=0,
                 n_samples_parameters=0,
                 n_restarts_mean=1000,
                 n_best_restarts_mean=100,
                 method_opt_mc=None,
                 maxepoch=10,
                 n_samples_parameters_mean=0,
                 maxepoch_mean=20,
                 threshold_sbo=None,
                 optimize_only_posterior_mean=False,
                 start_optimize_posterior_mean=0,
                 **opt_params_mc):
        """
        Optimize objective over the domain.
        :param random_seed: int
        :param start: (np.array(n)) starting point for the optimization of VOI
        :param debug: (boolean) If true, saves evaluations of the VOI and posterior mean at each
            iteration.
        :param monte_carlo_sbo: (boolean) If True, estimates the objective function and gradient by
            MC.
        :param n_samples_mc: (int) Number of samples for the MC method.
        :param n_restarts_mc: (int) Number of restarts to optimize a_{n+1} given a sample.
        :param n_best_restarts_mc: (int) Number of best restarting points chosen to optimize
            a_{n+1} given a sample.
        :param n_restarts: (int) Number of restarts of the VOI
        :param n_best_restarts: (int) Number of best restarting points chosen to optimize the VOI
        :param n_samples_parameters: (int)
        :param n_restarts_mean: int
        :param n_best_restarts_mean: int
        :param method_opt_mc: (str)
        :param maxepoch: (int) For SGD
        :param n_samples_parameters_mean: (int)
        :param maxepoch_mean: (int)
        :param threshold_sbo: (float) If VOI < threshold_sbo, then we choose randomly a point
            instead.
        :param opt_params_mc:
            -'factr': int
            -'maxiter': int

        :return: Objective
        """

        if optimize_only_posterior_mean:
            # only for noisless problems
            chosen_points = self.gp_model.data.copy()
            n_training = self.n_training
            start_optimize_posterior_mean = np.min(
                len(chosen_points['evaluations']) - n_training,
                start_optimize_posterior_mean)
            total_points = \
                len(chosen_points['evaluations']) - n_training - start_optimize_posterior_mean
            self.gp_model.clean_cache()
            self.gp_model.data['evaluations'] = \
                self.gp_model.data['evaluations'][0: n_training + start_optimize_posterior_mean]
            self.gp_model.data['points'] =\
                self.gp_model.data['points'][0: n_training + start_optimize_posterior_mean, :]

            self.objective.evaluated_points = \
                self.objective.evaluated_points[0:start_optimize_posterior_mean]
            self.objective.objective_values = \
                self.objective.objective_values[0:start_optimize_posterior_mean]
            self.objective.model_objective_values = \
                self.objective.model_objective_values[0:start_optimize_posterior_mean]

        start_ei = True
        if self.quadrature is not None and self.quadrature.task_continue:
            start_ei = False

        if n_samples_parameters > 0 and n_samples_parameters_mean == 0:
            n_samples_parameters_mean = n_samples_parameters

        if method_opt_mc is None:
            method_opt_mc = LBFGS_NAME

        if random_seed is not None:
            np.random.seed(random_seed)

        threshold_af = None
        if self.method_optimization == SBO_METHOD:
            threshold_af = threshold_sbo

        if self.method_optimization == SBO_METHOD or self.method_optimization == MULTI_TASK_METHOD:
            model = self.quadrature
        else:
            model = self.gp_model

        noise = None

        if n_samples_parameters_mean > 0:
            method_opt_mu = SGD_NAME
        else:
            method_opt_mu = DOGLEG

        if self.method_optimization == SDE_METHOD:
            optimize_mean = self.acquisition_function.optimize_mean(
                n_restarts=n_restarts_mean,
                candidate_solutions=self.objective.evaluated_points,
                candidate_values=self.objective.objective_values)
        else:
            optimize_mean = model.optimize_posterior_mean(
                minimize=self.minimize,
                n_restarts=n_restarts_mean,
                n_best_restarts=n_best_restarts_mean,
                n_samples_parameters=n_samples_parameters_mean,
                start_new_chain=True,
                method_opt=method_opt_mu,
                maxepoch=maxepoch_mean,
                candidate_solutions=self.objective.evaluated_points,
                candidate_values=self.objective.objective_values)

        optimal_value = \
            self.objective.add_point(optimize_mean['solution'], optimize_mean['optimal_value'][0])

        model.write_debug_data(self.problem_name, self.name_model,
                               self.training_name, self.n_training,
                               self.random_seed, self.method_optimization,
                               n_samples_parameters)

        if debug:
            model.generate_evaluations(
                self.problem_name,
                self.name_model,
                self.training_name,
                self.n_training,
                self.random_seed,
                0,
                n_points_by_dimension=self.number_points_each_dimension_debug)

        for iteration in xrange(self.n_iterations):
            evaluation = None
            if not optimize_only_posterior_mean or iteration >= total_points:
                new_point_sol = self.acquisition_function.optimize(
                    parallel=self.parallel,
                    start=start,
                    monte_carlo=monte_carlo_sbo,
                    n_samples=n_samples_mc,
                    n_restarts_mc=n_restarts_mc,
                    n_best_restarts_mc=n_best_restarts_mc,
                    n_restarts=n_restarts,
                    n_best_restarts=n_best_restarts,
                    n_samples_parameters=n_samples_parameters,
                    start_new_chain=False,
                    method_opt_mc=method_opt_mc,
                    maxepoch=maxepoch,
                    start_ei=start_ei,
                    **opt_params_mc)
            else:
                point = \
                    chosen_points['points'][n_training + start_optimize_posterior_mean + iteration, :]
                new_point_sol = {'optimal_value': 0.0, 'solution': point}
                evaluation = \
                    chosen_points['evaluations'][n_training + start_optimize_posterior_mean + iteration]
                evaluation = [evaluation]

            value_sbo = new_point_sol['optimal_value']
            new_point = new_point_sol['solution']

            self.acquisition_function.write_debug_data(
                self.problem_name,
                self.name_model,
                self.training_name,
                self.n_training,
                self.random_seed,
                n_samples_parameters=n_samples_parameters,
                monte_carlo=monte_carlo_sbo)

            if debug:
                self.acquisition_function.generate_evaluations(
                    self.problem_name,
                    self.name_model,
                    self.training_name,
                    self.n_training,
                    self.random_seed,
                    iteration,
                    n_points_by_dimension=self.
                    number_points_each_dimension_debug,
                    monte_carlo=monte_carlo_sbo,
                    n_samples=n_samples_mc,
                    n_restarts_mc=n_restarts_mc)

            self.acquisition_function.clean_cache()

            if evaluation is None:
                evaluation = TrainingDataService.evaluate_function(
                    self.objective.module, new_point, self.n_samples)

            if self.objective.noise:
                noise = np.array([evaluation[1]])

            self.gp_model.add_points_evaluations(new_point.reshape(
                (1, len(new_point))),
                                                 np.array([evaluation[0]]),
                                                 var_noise_eval=noise)

            GPFittingService.write_gp_model(
                self.gp_model,
                method=self.method_optimization,
                n_samples_parameters=n_samples_parameters)

            if self.method_optimization == SDE_METHOD:
                optimize_mean = self.acquisition_function.optimize_mean(
                    n_restarts=n_restarts_mean,
                    candidate_solutions=self.objective.evaluated_points,
                    candidate_values=self.objective.objective_values)
            else:
                optimize_mean = model.optimize_posterior_mean(
                    minimize=self.minimize,
                    n_restarts=n_restarts_mean,
                    n_best_restarts=n_best_restarts_mean,
                    n_samples_parameters=n_samples_parameters_mean,
                    start_new_chain=True,
                    method_opt=method_opt_mu,
                    maxepoch=maxepoch_mean,
                    candidate_solutions=self.objective.evaluated_points,
                    candidate_values=self.objective.objective_values)

            optimal_value = \
                self.objective.add_point(optimize_mean['solution'],
                                         optimize_mean['optimal_value'][0])

            model.write_debug_data(self.problem_name, self.name_model,
                                   self.training_name, self.n_training,
                                   self.random_seed, self.method_optimization,
                                   n_samples_parameters)

            if debug:
                model.generate_evaluations(self.problem_name,
                                           self.name_model,
                                           self.training_name,
                                           self.n_training,
                                           self.random_seed,
                                           iteration + 1,
                                           n_points_by_dimension=self.
                                           number_points_each_dimension_debug)

        return {
            'optimal_solution': optimize_mean['solution'],
            'optimal_value': optimal_value,
        }