Ejemplo n.º 1
0
    def test_get_training_data_noise(self):
        problem_name = 'test_problem_noise'
        training_name = 'test'
        bounds_domain = [[1, 100]]
        expect(JSONFile).read.and_return(None)
        training_data = \
            TrainingDataService.get_training_data(problem_name, training_name, bounds_domain,
                                                  n_training=1, noise=True, n_samples=5)
        np.random.seed(DEFAULT_RANDOM_SEED)
        points = [list(np.random.uniform(1, 100, 1))]
        noise = np.random.normal(0, 1, 5)
        eval = points[0] + noise
        evaluations = [np.mean(eval)]
        var = [np.var(eval) / 25.0]

        npt.assert_almost_equal(training_data['points'], points)
        npt.assert_almost_equal(training_data['var_noise'], var)
        npt.assert_almost_equal(training_data['evaluations'], evaluations)

        training_data_ = \
            TrainingDataService.get_training_data(problem_name, training_name, bounds_domain,
                                                  n_training=1, noise=True, n_samples=5,
                                                  parallel=False)

        assert np.all(training_data['points'] == training_data_['points'])
        assert np.all(
            training_data['var_noise'] == training_data_['var_noise'])
        assert np.all(
            training_data['evaluations'] == training_data_['evaluations'])
Ejemplo n.º 2
0
    def test_get_training_data(self):
        problem_name = 'test_problem'
        training_name = 'test'
        bounds_domain = [[1, 100]]
        expect(JSONFile).read.and_return(None)
        training_data = \
            TrainingDataService.get_training_data(problem_name, training_name, bounds_domain)

        np.random.seed(DEFAULT_RANDOM_SEED)
        points = \
            [[42.2851784656], [72.3121248508], [1.0113231069], [30.9309246906], [15.5288331909]]
        evaluations = [i[0] for i in points]

        assert training_data['var_noise'] == []
        npt.assert_almost_equal(training_data['evaluations'], evaluations)
        npt.assert_almost_equal(training_data['points'], points)

        training_data_ = \
            TrainingDataService.get_training_data(problem_name, training_name, bounds_domain,
                                                  parallel=False)

        assert training_data['var_noise'] == training_data_['var_noise']
        assert np.all(
            training_data['evaluations'] == training_data_['evaluations'])
        assert np.all(training_data['points'] == training_data_['points'])

        with patch('os.path.exists', new=MagicMock(return_value=False)):
            os.mkdir = MockMkdir()
            training_data_ = \
                TrainingDataService.get_training_data(problem_name, training_name, bounds_domain,
                                                      parallel=False)
            assert training_data['var_noise'] == training_data_['var_noise']
            assert np.all(
                training_data['evaluations'] == training_data_['evaluations'])
            assert np.all(training_data['points'] == training_data_['points'])
def plot_objective_function(problem_name,
                            filename_plot,
                            bounds,
                            n_points_by_dimension=None,
                            n_samples=0):
    """
    Plot the objective function used for the acquisition function.

    :param problem_name: (str)
    :param filename_plot: (str)
    :param bounds: [[float, float]] (only for x domain)
    :param n_points_by_dimension: (int)
    :param n_samples: (int)
    """

    n_points = n_points_by_dimension
    if n_points is None:
        n_points = (bounds[0][1] - bounds[0][0]) * 10

    points = np.linspace(bounds[0][0], bounds[0][1], n_points)

    name_module = TrainingDataService.get_name_module(problem_name)
    module = __import__(name_module, globals(), locals(), -1)

    values = []
    for point in points:
        evaluation = Objective.evaluate_objective(module, [point], n_samples)
        values.append(evaluation)
    plt.figure()
    plt.plot(points, values, label='objective')
    plt.legend()
    plt.savefig(filename_plot)
Ejemplo n.º 4
0
    def test_training_data_from_dict(self):
        problem_name = 'test_problem'
        training_name = 'test'
        bounds_domain = [[1, 100]]
        expect(JSONFile).read.and_return(None)
        np.random.seed(DEFAULT_RANDOM_SEED)
        points = \
            [[42.2851784656], [72.3121248508], [1.0113231069], [30.9309246906], [15.5288331909]]

        dict = {
            'problem_name': problem_name,
            'training_name': training_name,
            'bounds_domain': bounds_domain,
            'n_training': 5,
            'points': points,
            'noise': False,
            'n_samples': 0,
            'random_seed': 1,
            'parallel': True,
            'type_bounds': [0],
        }

        training_data = TrainingDataService.from_dict(dict)

        len(training_data) == 3
        assert training_data['var_noise'] == []
        assert np.all(training_data['evaluations'] == [i[0] for i in points])
        assert np.all(training_data['points'] == points)
Ejemplo n.º 5
0
    def test_cached_get_training_data(self):
        problem_name = 'test_problem'
        training_name = 'test'
        bounds_domain = [[1, 100]]

        expect(JSONFile).read.and_return(0)
        training_data = \
            TrainingDataService.get_training_data(problem_name, training_name, bounds_domain)
        assert training_data == 0
Ejemplo n.º 6
0
 def test_get_training_data_cached_points(self):
     problem_name = 'test_problem'
     training_name = 'test'
     points = TrainingDataService.get_points_domain(5, [[1, 100]],
                                                    DEFAULT_RANDOM_SEED,
                                                    training_name,
                                                    problem_name)
     compare_point = \
         [[42.2851784656], [72.3121248508], [1.0113231069], [30.9309246906], [15.5288331909]]
     assert points == compare_point
def plot_objective_function_af(problem_name,
                               filename_plot,
                               bounds,
                               n_points_by_dimension=None,
                               n_samples=0,
                               n_tasks=0):
    """
    Plot the objective function used for the acquisition function.

    :param problem_name: (str)
    :param filename_plot: (str)
    :param bounds: [[float, float]] (only for x domain)
    :param n_points_by_dimension: (int)
    :param n_samples: (int)
    :param n_tasks: (int)
    """

    n_points = n_points_by_dimension
    if n_points is None:
        n_points = (bounds[0][1] - bounds[0][0]) * 10

    points = np.linspace(bounds[0][0], bounds[0][1], n_points)

    name_module = TrainingDataService.get_name_module(problem_name)
    module = __import__(name_module, globals(), locals(), -1)

    values = {}
    filename_plot = filename_plot[0:-4]

    if n_tasks > 0:
        for i in xrange(n_tasks):
            vals = []
            for point in points:
                point_ = np.concatenate((np.array([point]), np.array([i])))
                evaluation = TrainingDataService.evaluate_function(
                    module, point_, n_samples)
                vals.append(evaluation)
            values[i] = vals
            plt.figure()
            plt.plot(points, values[i], label='task_' + str(i))
            plt.legend()
            plt.savefig(filename_plot + '_task_' + str(i) + '.png')
Ejemplo n.º 8
0
    def __init__(self,
                 problem_name,
                 training_name,
                 random_seed,
                 n_training,
                 n_samples=None,
                 noise=False,
                 method=SBO_METHOD,
                 n_samples_parameters=0):
        """

        :param problem_name: (str)
        :param training_name: (str)
        :param random_seed: int
        :param n_training: int
        :param n_samples: (int) Take n_samples evaluations when we have noisy evaluations
        :param noise: boolean, true if the evaluations are noisy
        :param method: (str) bgo method
        :param n_samples_parameters: int
        """
        self.evaluated_points = []
        self.objective_values = []
        self.model_objective_values = []
        self.standard_deviation_evaluations = []

        self.noise = noise
        self.random_seed = random_seed
        self.n_samples = n_samples
        self.n_training = n_training
        self.problem_name = problem_name
        self.training_name = training_name
        name_module = TrainingDataService.get_name_module(problem_name)
        self.module = __import__(name_module, globals(), locals(), -1)
        self.method = method
        self.n_samples_parameters = n_samples_parameters

        dir = path.join(PROBLEM_DIR, self.problem_name, PARTIAL_RESULTS)

        if not os.path.exists(dir):
            os.mkdir(dir)

        file_name = self._filename(
            problem_name=self.problem_name,
            training_name=self.training_name,
            n_points=self.n_training,
            random_seed=self.random_seed,
            method=self.method,
            n_samples_parameters=self.n_samples_parameters,
        )

        self.file_path = path.join(dir, file_name)
Ejemplo n.º 9
0
    def test_get_training_data_given_points(self):
        points = \
            [[42.2851784656], [72.3121248508], [1.0113231069], [30.9309246906], [15.5288331909]]
        problem_name = 'test_problem'
        training_name = 'test_given_points'
        bounds_domain = [[1, 100]]
        expect(JSONFile).read.and_return(None)
        training_data = \
            TrainingDataService.get_training_data(problem_name, training_name, bounds_domain,
                                                  points=points)

        assert training_data['var_noise'] == []
        assert np.all(training_data['evaluations'] == [i[0] for i in points])
        assert np.all(training_data['points'] == points)
Ejemplo n.º 10
0
    def get_gp(cls,
               name_model,
               problem_name,
               type_kernel,
               dimensions,
               bounds_domain,
               type_bounds=None,
               n_training=0,
               noise=False,
               training_data=None,
               points=None,
               training_name=None,
               mle=True,
               thinning=0,
               n_burning=0,
               max_steps_out=1,
               n_samples=None,
               random_seed=DEFAULT_RANDOM_SEED,
               kernel_values=None,
               mean_value=None,
               var_noise_value=None,
               cache=True,
               same_correlation=False,
               use_only_training_points=True,
               optimization_method=None,
               n_samples_parameters=0,
               parallel_training=True,
               simplex_domain=None,
               objective_function=None,
               define_samplers=True):
        """
        Fetch a GP model from file if it exists, otherwise train a new model and save it locally.

        :param name_model: str
        :param problem_name: str
        :param type_kernel: [(str)] Must be in possible_kernels. If it's a product of kernels it
            should be a list as: [PRODUCT_KERNELS_SEPARABLE, NAME_1_KERNEL, NAME_2_KERNEL]
        :param dimensions: [int]. It has only the n_tasks for the task_kernels, and for the
            PRODUCT_KERNELS_SEPARABLE contains the dimensions of every kernel in the product
        :param bounds_domain: [([float, float] or [float])], the first case is when the bounds are
            lower or upper bound of the respective entry; in the second case, it's list of finite
            points representing the domain of that entry.
        :param type_bounds: [0 or 1], 0 if the bounds are lower or upper bound of the respective
            entry, 1 if the bounds are all the finite options for that entry.
        :param n_training: int
        :param noise: (boolean) If true, we get noisy evaluations.
        :param training_data: {'points': [[float]], 'evaluations': [float],
            'var_noise': [float] or None}
        :param points: [[float]]. If training_data is None, we can evaluate the objective
            function in these points.
        :param training_name: (str), prefix used to save the training data.
        :param mle: (boolean) If true, fits the GP by MLE.
        :param thinning: (int)
        :param n_burning: (int) Number of burnings samples for the MCMC.
        :param max_steps_out: (int)  Maximum number of steps out for the stepping out  or
                doubling procedure in slice sampling.
        :param n_samples: (int) If the objective is noisy, we take n_samples of the function to
            estimate its value.
        :param random_seed: (int)
        :param kernel_values: [float], contains the default values of the parameters of the kernel
        :param mean_value: [float], It contains the value of the mean parameter.
        :param var_noise_value: [float], It contains the variance of the noise of the model
        :param cache: (boolean) Try to get model from cache
        :param same_correlation: (boolean) If true, it uses the same correlations for the task
            kernel.
        :param use_only_training_points (boolean) If the model is read, and the param is true,
            it uses only the training points in data. Otherwise, it also includes new points
            previously computed.
        :param optimization_method: (str)
        :param n_samples_parameters: (int)
        :param parallel_training: (boolean)
        :param define_samplers: (boolean) If False, samplers for the hyperparameters are not
            defined.

        :return: (GPFittingGaussian) - An instance of GPFittingGaussian
        """
        model_type = cls._model_map[name_model]

        if training_name is None:
            training_name = 'default_training_data_%d_points_rs_%d' % (
                n_training, random_seed)

        if use_only_training_points:
            f_name = cls._get_filename(model_type, problem_name, type_kernel,
                                       training_name)
            f_name_cache = cls._get_filename_modified(model_type, problem_name,
                                                      type_kernel,
                                                      training_name,
                                                      optimization_method,
                                                      n_samples_parameters)
        else:
            f_name = cls._get_filename_modified(model_type, problem_name,
                                                type_kernel, training_name,
                                                optimization_method,
                                                n_samples_parameters)

        if not os.path.exists('data'):
            os.mkdir('data')

        if not os.path.exists(GP_DIR):
            os.mkdir(GP_DIR)

        gp_dir = path.join(GP_DIR, problem_name)

        if not os.path.exists(gp_dir):
            os.mkdir(gp_dir)

        gp_path = path.join(gp_dir, f_name)

        gp_path_cache = path.join(gp_dir, f_name_cache)

        if cache:
            data = JSONFile.read(gp_path)
            data = None
        else:
            data = None

        if data is not None:
            return model_type.deserialize(
                data, use_only_training_points=use_only_training_points)

        if training_data is None or training_data == {}:
            training_data = TrainingDataService.get_training_data(
                problem_name,
                training_name,
                bounds_domain,
                n_training=n_training,
                points=points,
                noise=noise,
                n_samples=n_samples,
                random_seed=random_seed,
                type_bounds=type_bounds,
                cache=cache,
                parallel=parallel_training,
                gp_path_cache=gp_path_cache,
                simplex_domain=simplex_domain,
                objective_function=objective_function)

        logger.info("Training %s" % model_type.__name__)

        gp_model = model_type.train(type_kernel,
                                    dimensions,
                                    mle,
                                    training_data,
                                    bounds_domain,
                                    thinning=thinning,
                                    n_burning=n_burning,
                                    max_steps_out=max_steps_out,
                                    random_seed=random_seed,
                                    type_bounds=type_bounds,
                                    training_name=training_name,
                                    problem_name=problem_name,
                                    kernel_values=kernel_values,
                                    mean_value=mean_value,
                                    var_noise_value=var_noise_value,
                                    same_correlation=same_correlation,
                                    simplex_domain=simplex_domain,
                                    define_samplers=define_samplers)

        JSONFile.write(gp_model.serialize(), gp_path)

        return gp_model
Ejemplo n.º 11
0
    def validate_gp_model(cls, type_kernel, n_training, problem_name, bounds_domain, type_bounds,
                          dimensions, thinning=0, n_burning=0, max_steps_out=1,
                          random_seed=None, training_name=None, points=None, noise=False,
                          n_samples=0, cache=True, **kernel_parameters):
        """

        :param type_kernel: [(str)] Must be in possible_kernels. If it's a product of kernels it
            should be a list as: [PRODUCT_KERNELS_SEPARABLE, NAME_1_KERNEL, NAME_2_KERNEL]
        :param n_training: int
        :param problem_name: str
        :param bounds_domain: [([float, float] or [float])], the first case is when the bounds are
            lower or upper bound of the respective entry; in the second case, it's list of finite
            points representing the domain of that entry.
        :param type_bounds: [0 or 1], 0 if the bounds are lower or upper bound of the respective
            entry, 1 if the bounds are all the finite options for that entry.
        :param dimensions: [int]. It has only the n_tasks for the task_kernels, and for the
            PRODUCT_KERNELS_SEPARABLE contains the dimensions of every kernel in the product
        :param thinning: (int)
        :param n_burning: (int) Number of burnings samples for the MCMC.
        :param max_steps_out: (int)  Maximum number of steps out for the stepping out  or
                doubling procedure in slice sampling.
        :param random_seed: (int)
        :param training_name: str
        :param points: [[float]]. If training_data is None, we can evaluate the objective
            function in these points.
        :param noise: boolean
        :param n_samples: (int) If the objective is noisy, we take n_samples of the function to
            estimate its value.
        :param cache: (boolean)  Try to get trainng_data from cache if it's True
        :param kernel_parameters: additional kernel parameters,
            - SAME_CORRELATION: (boolean) True or False. Parameter used only for task kernel.

        :return: (int) percentage of success
        """

        if random_seed is None:
            random_seed = DEFAULT_RANDOM_SEED

        if training_name is None:
            training_name = 'default_training_data_%d_points_rs_%d' % (n_training, random_seed)

        training_data = TrainingDataService.get_training_data(problem_name, training_name,
                                                              bounds_domain,
                                                              n_training=n_training,
                                                              points=points,
                                                              noise=noise,
                                                              n_samples=n_samples,
                                                              random_seed=random_seed,
                                                              type_bounds=type_bounds,
                                                              cache=cache)

        training_data['evaluations'] = np.array(training_data['evaluations'])
        training_data['points'] = np.array(training_data['points'])

        if len(training_data['var_noise']) > 0:
            training_data['var_noise'] = np.array(training_data['var_noise'])
        else:
            training_data['var_noise'] = None

        results = ValidationGPModel.cross_validation_mle_parameters(
            type_kernel, training_data, dimensions, problem_name, bounds_domain, thinning,
            n_burning, max_steps_out, start=None, random_seed=random_seed,
            training_name=training_name, **kernel_parameters
        )

        logger.info('Percentage of success is: %f' % results['success_proportion'])

        return results['success_proportion']
    def optimize(self,
                 random_seed=None,
                 start=None,
                 debug=False,
                 monte_carlo_sbo=False,
                 n_samples_mc=1,
                 n_restarts_mc=1,
                 n_best_restarts_mc=0,
                 n_restarts=10,
                 n_best_restarts=0,
                 n_samples_parameters=0,
                 n_restarts_mean=1000,
                 n_best_restarts_mean=100,
                 method_opt_mc=None,
                 maxepoch=10,
                 n_samples_parameters_mean=0,
                 maxepoch_mean=20,
                 threshold_sbo=None,
                 optimize_only_posterior_mean=False,
                 start_optimize_posterior_mean=0,
                 **opt_params_mc):
        """
        Optimize objective over the domain.
        :param random_seed: int
        :param start: (np.array(n)) starting point for the optimization of VOI
        :param debug: (boolean) If true, saves evaluations of the VOI and posterior mean at each
            iteration.
        :param monte_carlo_sbo: (boolean) If True, estimates the objective function and gradient by
            MC.
        :param n_samples_mc: (int) Number of samples for the MC method.
        :param n_restarts_mc: (int) Number of restarts to optimize a_{n+1} given a sample.
        :param n_best_restarts_mc: (int) Number of best restarting points chosen to optimize
            a_{n+1} given a sample.
        :param n_restarts: (int) Number of restarts of the VOI
        :param n_best_restarts: (int) Number of best restarting points chosen to optimize the VOI
        :param n_samples_parameters: (int)
        :param n_restarts_mean: int
        :param n_best_restarts_mean: int
        :param method_opt_mc: (str)
        :param maxepoch: (int) For SGD
        :param n_samples_parameters_mean: (int)
        :param maxepoch_mean: (int)
        :param threshold_sbo: (float) If VOI < threshold_sbo, then we choose randomly a point
            instead.
        :param opt_params_mc:
            -'factr': int
            -'maxiter': int

        :return: Objective
        """

        if optimize_only_posterior_mean:
            # only for noisless problems
            chosen_points = self.gp_model.data.copy()
            n_training = self.n_training
            start_optimize_posterior_mean = np.min(
                len(chosen_points['evaluations']) - n_training,
                start_optimize_posterior_mean)
            total_points = \
                len(chosen_points['evaluations']) - n_training - start_optimize_posterior_mean
            self.gp_model.clean_cache()
            self.gp_model.data['evaluations'] = \
                self.gp_model.data['evaluations'][0: n_training + start_optimize_posterior_mean]
            self.gp_model.data['points'] =\
                self.gp_model.data['points'][0: n_training + start_optimize_posterior_mean, :]

            self.objective.evaluated_points = \
                self.objective.evaluated_points[0:start_optimize_posterior_mean]
            self.objective.objective_values = \
                self.objective.objective_values[0:start_optimize_posterior_mean]
            self.objective.model_objective_values = \
                self.objective.model_objective_values[0:start_optimize_posterior_mean]

        start_ei = True
        if self.quadrature is not None and self.quadrature.task_continue:
            start_ei = False

        if n_samples_parameters > 0 and n_samples_parameters_mean == 0:
            n_samples_parameters_mean = n_samples_parameters

        if method_opt_mc is None:
            method_opt_mc = LBFGS_NAME

        if random_seed is not None:
            np.random.seed(random_seed)

        threshold_af = None
        if self.method_optimization == SBO_METHOD:
            threshold_af = threshold_sbo

        if self.method_optimization == SBO_METHOD or self.method_optimization == MULTI_TASK_METHOD:
            model = self.quadrature
        else:
            model = self.gp_model

        noise = None

        if n_samples_parameters_mean > 0:
            method_opt_mu = SGD_NAME
        else:
            method_opt_mu = DOGLEG

        if self.method_optimization == SDE_METHOD:
            optimize_mean = self.acquisition_function.optimize_mean(
                n_restarts=n_restarts_mean,
                candidate_solutions=self.objective.evaluated_points,
                candidate_values=self.objective.objective_values)
        else:
            optimize_mean = model.optimize_posterior_mean(
                minimize=self.minimize,
                n_restarts=n_restarts_mean,
                n_best_restarts=n_best_restarts_mean,
                n_samples_parameters=n_samples_parameters_mean,
                start_new_chain=True,
                method_opt=method_opt_mu,
                maxepoch=maxepoch_mean,
                candidate_solutions=self.objective.evaluated_points,
                candidate_values=self.objective.objective_values)

        optimal_value = \
            self.objective.add_point(optimize_mean['solution'], optimize_mean['optimal_value'][0])

        model.write_debug_data(self.problem_name, self.name_model,
                               self.training_name, self.n_training,
                               self.random_seed, self.method_optimization,
                               n_samples_parameters)

        if debug:
            model.generate_evaluations(
                self.problem_name,
                self.name_model,
                self.training_name,
                self.n_training,
                self.random_seed,
                0,
                n_points_by_dimension=self.number_points_each_dimension_debug)

        for iteration in xrange(self.n_iterations):
            evaluation = None
            if not optimize_only_posterior_mean or iteration >= total_points:
                new_point_sol = self.acquisition_function.optimize(
                    parallel=self.parallel,
                    start=start,
                    monte_carlo=monte_carlo_sbo,
                    n_samples=n_samples_mc,
                    n_restarts_mc=n_restarts_mc,
                    n_best_restarts_mc=n_best_restarts_mc,
                    n_restarts=n_restarts,
                    n_best_restarts=n_best_restarts,
                    n_samples_parameters=n_samples_parameters,
                    start_new_chain=False,
                    method_opt_mc=method_opt_mc,
                    maxepoch=maxepoch,
                    start_ei=start_ei,
                    **opt_params_mc)
            else:
                point = \
                    chosen_points['points'][n_training + start_optimize_posterior_mean + iteration, :]
                new_point_sol = {'optimal_value': 0.0, 'solution': point}
                evaluation = \
                    chosen_points['evaluations'][n_training + start_optimize_posterior_mean + iteration]
                evaluation = [evaluation]

            value_sbo = new_point_sol['optimal_value']
            new_point = new_point_sol['solution']

            self.acquisition_function.write_debug_data(
                self.problem_name,
                self.name_model,
                self.training_name,
                self.n_training,
                self.random_seed,
                n_samples_parameters=n_samples_parameters,
                monte_carlo=monte_carlo_sbo)

            if debug:
                self.acquisition_function.generate_evaluations(
                    self.problem_name,
                    self.name_model,
                    self.training_name,
                    self.n_training,
                    self.random_seed,
                    iteration,
                    n_points_by_dimension=self.
                    number_points_each_dimension_debug,
                    monte_carlo=monte_carlo_sbo,
                    n_samples=n_samples_mc,
                    n_restarts_mc=n_restarts_mc)

            self.acquisition_function.clean_cache()

            if evaluation is None:
                evaluation = TrainingDataService.evaluate_function(
                    self.objective.module, new_point, self.n_samples)

            if self.objective.noise:
                noise = np.array([evaluation[1]])

            self.gp_model.add_points_evaluations(new_point.reshape(
                (1, len(new_point))),
                                                 np.array([evaluation[0]]),
                                                 var_noise_eval=noise)

            GPFittingService.write_gp_model(
                self.gp_model,
                method=self.method_optimization,
                n_samples_parameters=n_samples_parameters)

            if self.method_optimization == SDE_METHOD:
                optimize_mean = self.acquisition_function.optimize_mean(
                    n_restarts=n_restarts_mean,
                    candidate_solutions=self.objective.evaluated_points,
                    candidate_values=self.objective.objective_values)
            else:
                optimize_mean = model.optimize_posterior_mean(
                    minimize=self.minimize,
                    n_restarts=n_restarts_mean,
                    n_best_restarts=n_best_restarts_mean,
                    n_samples_parameters=n_samples_parameters_mean,
                    start_new_chain=True,
                    method_opt=method_opt_mu,
                    maxepoch=maxepoch_mean,
                    candidate_solutions=self.objective.evaluated_points,
                    candidate_values=self.objective.objective_values)

            optimal_value = \
                self.objective.add_point(optimize_mean['solution'],
                                         optimize_mean['optimal_value'][0])

            model.write_debug_data(self.problem_name, self.name_model,
                                   self.training_name, self.n_training,
                                   self.random_seed, self.method_optimization,
                                   n_samples_parameters)

            if debug:
                model.generate_evaluations(self.problem_name,
                                           self.name_model,
                                           self.training_name,
                                           self.n_training,
                                           self.random_seed,
                                           iteration + 1,
                                           n_points_by_dimension=self.
                                           number_points_each_dimension_debug)

        return {
            'optimal_solution': optimize_mean['solution'],
            'optimal_value': optimal_value,
        }