Пример #1
0
    def scalarization(self, dataset):
        """
        Scalarize the dataset using the tchebycheff function
        :return:
        """
        logging.info('Computing Tchebycheff scalarization')
        fx = dataset.get_output()
        fx_min = np.array(np.min(fx, axis=1)).squeeze()
        fx_max = np.array(np.max(fx, axis=1)).squeeze()
        rescaled_fx = rutils.bounds(min=fx_min,
                                    max=fx_max).transform_01().transform(
                                        fx).T  # Rescale fx axis to 0-1

        lambdas = np.random.rand(self.task.get_n_objectives())
        lambdas = lambdas / np.sum(
            lambdas)  # Rescale such that np.lambdas == 1
        new_fx = self.tchebycheff_function(fx=rescaled_fx, lambdas=lambdas)
        out = rdata.dataset(data_input=dataset.get_input(), data_output=new_fx)
        return out
Пример #2
0
    def _select_parameters(self):
        """
        Select the next set of parameters to evaluate on the objective function
        :return: parameters: np.matrix
        """
        if (self._iter == 0) and (self.past_evals is None):
            # If there are no past evaluations, randomly initialize
            logging.info('Initializing with %d random evaluations' %
                         self.n_initial_evals)
            self._logs.data.model = [None]
            return self.task.get_bounds().sample_uniform(
                (self.n_initial_evals, self.task.get_n_parameters()))
        else:
            # TODO: use past_evals
            # Create model
            logging.info('Fitting response surface')
            dataset = rdata.dataset(data_input=self._logs.get_parameters(),
                                    data_output=self._logs.get_objectives())
            # print(dataset)
            p = DotMap()
            p.verbosity = 0
            self._model = self.model(parameters=p)
            self._model.train(train_set=dataset)

            # Update acquisition function
            self.acq_func.update(model=self._model, logs=self._logs)

            # Optimize acquisition function
            logging.info('Optimizing the acquisition function')
            task = OptTask(f=self.acq_func.evaluate,
                           n_parameters=self.task.get_n_parameters(),
                           n_objectives=1,
                           order=0,
                           bounds=self.task.get_bounds(),
                           name='Acquisition Function',
                           task={'minimize'},
                           labels_param=None,
                           labels_obj=None,
                           vectorized=True)
            stopCriteria = StopCriteria(maxEvals=self.optimizer.maxEvals)
            p = DotMap()
            p.verbosity = 1
            acq_opt = self.optimizer.optimizer(parameters=p,
                                               task=task,
                                               stopCriteria=stopCriteria)
            x = np.matrix(acq_opt.optimize())  # Optimize
            fx = self._model.predict(dataset=x.T)

            # Log stuff
            if self._logs.data.m is None:
                self._logs.data.m = np.matrix(fx[0])
                self._logs.data.v = np.matrix(fx[1])
            else:
                self._logs.data.m = np.concatenate((self._logs.data.m, fx[0]),
                                                   axis=0)
                self._logs.data.v = np.concatenate((self._logs.data.v, fx[1]),
                                                   axis=0)
            if self.store_model:
                if self._logs.data.model is None:
                    self._logs.data.model = [self._model]
                else:
                    self._logs.data.model.append(self._model)

            # Optimize mean function (for logging purposes)
            if self.log_best_mean:
                logging.info('Optimizing the mean function')
                task = OptTask(f=self._model.predict_mean,
                               n_parameters=self.task.get_n_parameters(),
                               n_objectives=1,
                               order=0,
                               bounds=self.task.get_bounds(),
                               name='Mean Function',
                               task={'minimize'},
                               labels_param=None,
                               labels_obj=None,
                               vectorized=True)
                stopCriteria = StopCriteria(maxEvals=self.optimizer.maxEvals)
                p = DotMap()
                p.verbosity = 1
                mean_opt = self.optimizer.optimizer(parameters=p,
                                                    task=task,
                                                    stopCriteria=stopCriteria)
                best_x = np.matrix(acq_opt.optimize())  # Optimize
                best_fx = self._model.predict(dataset=best_x.T)
                if self._iter == 1:
                    self._logs.data.best_m = np.matrix(best_fx[0])
                    self._logs.data.best_v = np.matrix(best_fx[1])
                else:
                    self._logs.data.best_m = np.concatenate(
                        (self._logs.data.best_m, best_fx[0]), axis=0)
                    self._logs.data.best_v = np.concatenate(
                        (self._logs.data.best_v, best_fx[1]), axis=0)

            return x
Пример #3
0
    def _select_parameters(self):
        """
        Select the next set of parameters to evaluate on the objective function
        :return: parameters: np.matrix
        """

        # If we don't have any data to start with, randomly pick points
        k = self.batch_size

        if (self._iter == 0) and (self.past_evals is None):
            logging.info('Initializing with %d random evaluations' %
                         self.n_initial_evals)
            self._logs.data.model = [None]
            return self.task.get_bounds() \
                .sample_uniform((self.n_initial_evals,
                                 self.task.get_n_parameters()))
        else:
            # TODO: use past_evals
            logging.info('Fitting response surface')

            dataset = rdata.dataset(data_input=self._logs.get_parameters(),
                                    data_output=self._logs.get_objectives())

            Xs, FXs, GPs = self._simulate_experiments(dataset)
            Xs = Xs.flatten(-1, Xs.shape[1:])
            Ws = np.array([])
            for i in range(k):
                np.append(Ws, self._weigh_data_points(Xs[i], GPs[i]))
            # now Xs and Ws should both be flattened w.r.t samples axis
            Xs = self._match_experiments(Xs, Ws, k)

            # TODO: integrate the different acquisition functions to form one GP
            # # Log the mean and variance
            # if self._logs.data.m is None:
            #     self._logs.data.m = np.matrix(fx[0])
            #     self._logs.data.v = np.matrix(fx[1])
            # else:
            #     self._logs.data.m = np.concatenate((self._logs.data.m, fx[0]),
            #                                        axis=0)
            #     self._logs.data.v = np.concatenate((self._logs.data.v, fx[1]),
            #                                        axis=0)
            #
            # # Store the model
            # if self.store_model:
            #     if self._logs.data.model is None:
            #         self._logs.data.model = [self._model]
            #     else:
            #         self._logs.data.model.append(self._model)
            #
            # # Optimize mean function (for logging purposes)
            # if self.log_best_mean:
            #     logging.info('Optimizing the mean function')
            #     task = OptTask(f=self._model.predict_mean,
            #                    n_parameters=self.task.get_n_parameters(),
            #                    n_objectives=1,
            #                    order=0,
            #                    bounds=self.task.get_bounds(),
            #                    name='Mean Function',
            #                    task={'minimize'},
            #                    labels_param=None, labels_obj=None,
            #                    vectorized=True)
            #     stop_criteria = StopCriteria(maxEvals=self.optimizer.maxEvals)
            #     p = DotMap()
            #     p.verbosity = 1
            #     mean_opt = self.optimizer.optimizer(parameters=p,
            #                                         task=task,
            #                                         stopCriteria=stop_criteria)
            #
            #     best_x = np.matrix(optimizer.optimize())
            #     best_fx = self._model.predict(dataset=best_x.T)
            #
            #     if self._iter == 1:
            #         self._logs.data.best_m = np.matrix(best_fx[0])
            #         self._logs.data.best_v = np.matrix(best_fx[1])
            #     else:
            #         self._logs.data.best_m = np.concatenate(
            #             (self._logs.data.best_m, best_fx[0]), axis=0)
            #         self._logs.data.best_v = np.concatenate(
            #             (self._logs.data.best_v, best_fx[1]), axis=0)

            return Xs
Пример #4
0
    def _simulate_experiments(self,
                              dataset_initial: rdata.dataset,
                              n: int = 5) -> tuple:
        """
        sample n times from S^k_pi, the set of k experiments resulting from
           running a sequential policy, pi, k iterations

        :param dataset_initial:
        :param n: number of samples of S^k_pi. A hyperparameter
        :return:
        """

        datasets = [dataset_initial.copy()] * n
        acq_funcs = [EI(model=None, logs=None)] * n
        parameters = np.array([])
        objectives = np.array([])
        models = np.array([])
        k = self.batch_size

        for sample in range(n):
            np.append(parameters, np.array([]))
            for iteration in range(k):
                p = DotMap()
                p.verbosity = 0

                # Instantiate the model with given parameters
                self._model = self.model(parameters=p)
                # Train the model with provided dataset
                self._model.train(train_set=datasets[sample])
                # Update acquisition function with the posterior
                acq_funcs[sample].update(model=self._model, logs=self._logs)

                # Optimize acquisition function
                logging.info('Optimizing the acquisition function')
                task = OptTask(f=self.acq_func.evaluate,
                               n_parameters=self.task.get_n_parameters(),
                               n_objectives=1,
                               order=0,
                               bounds=self.task.get_bounds(),
                               name='Acquisition Function',
                               task={'minimize'},
                               labels_param=None,
                               labels_obj=None,
                               vectorized=True)
                stop_criteria = StopCriteria(maxEvals=self.optimizer.maxEvals)

                p = DotMap()
                p.verbosity = 1

                # Calculate the optimizer
                optimizer = self.optimizer.optimizer(
                    parameters=p, task=task, stopCriteria=stop_criteria)
                x = np.matrix(optimizer.optimize())
                fx = self._model.predict(dataset=x.T)
                dataset_new = rdata.dataset(data_input=x, data_output=fx)
                datasets[sample] = datasets[sample].merge(dataset_new)
                parameters[sample].append(x)
                objectives[sample].append(fx)
            models.append(self._model)

        return parameters, objectives, models
Пример #5
0
    obj_dict = dict(zip(obj_labels, obj_values))

    # save these parameters to a json for the iteration
    save_params(iteration, param_dict, obj_dict, date=date)

    # load all of the parameters for the given date
    df = get_info_date("2019-03-26")

    # creates dataset object for our parameters and objective values # df[obj_labels]
    print("Parameters:")
    print(np.matrix(df[param_labels].values.T))
    print("Objectives:")
    print(np.matrix(df['InvHuber'].values.T))
    data_in = np.matrix(df[param_labels].values.T)
    data_out = np.matrix(df['InvHuber'].values.T)
    dataset = rdata.dataset(data_input=data_in, data_output=data_out)

    PID = PID_Objective(mode='Time')
    task = OptTask(f=PID, n_parameters=4, n_objectives=1, \
                bounds=bounds(min=[0,0,0,0],max=[100,100,10,10]), vectorized=False, \
                labels_param = ['KP_pitch','KP_roll', 'KD_pitch', 'KD_roll'])
    Stop = StopCriteria(maxEvals=50)

    # p_EI = DotMap()
    # p_EI.target = 999
    # print(p_EI.get('target', 0))
    # quit()

    # Create our own log object
    logs = Logs(store_x=True, store_fx=True, store_gx=False)
    logs.add_evals(