Esempio n. 1
0
    def initialize_modelM(self):


        nn = Ort_NN(dims=[self.data_x.shape[1], 20, self.proj_dim], N=0, proj_dim=0,
                    name=None)

        k_list, gp_nnjoint = initialize_m_models(x=np.copy(self.Xnorm), y=np.copy(self.Ynorm),
        # k_list, gp_nnjoint = initialize_m_models(x=np.copy(self.X_inf), y=np.copy(self.Ynorm),
                                                 input_dim=self.proj_dim,
                                                 model='BLR',
                                                 kernel='Matern52',
                                                 ARD=True,
                                                 nn=nn)     # last kernel is the Manifold GP kernel

        gp_nnjoint.likelihood.variance = 1e-06  # 0.001

        return k_list, gp_nnjoint, nn
Esempio n. 2
0
    def initialize_modelM(self):


        nn = Ort_NN(dims=[self.Xnorm.shape[1], 20, self.proj_dim], N=0, proj_dim=0,
                    name=None)

        kernm, gpm = initialize_m_models(x=np.copy(self.Xnorm), y=np.copy(self.Ynorm),
                                         input_dim=self.proj_dim,
                                         model='encoder',
                                         kernel='Matern52',
                                         ARD=True,
                                         nn=nn,
                                         decomp=None)

        gpm.likelihood.variance = 0.01

        return kernm, gpm
    def run(self, maxiter=20):
        opt_config = import_attr('tfbo/configurations', attribute='acquisition_opt')  # check import configuration
        opt_config['bounds'] = [(0., 1.)] * self.proj_dim * self.num_init
        for j in range(maxiter):
            print('iteration: ', j)
            # initialize model
            self.reset_graph()
            km, gpm = self.initialize_modelM()
            # train nn and gp hyper-parameters
            try:
                gpflow.train.ScipyOptimizer().minimize(gpm)
            except:
                self.reset_hyps(gpm)
            # self.reset_hyps(gpm)
            Xnn = gpm.nn.np_forward(self.Xnorm)
            self.Xnn = Xnn
            self.hyps.append(self.get_hyps(gpm))

            gp_acq = Stable_GPR(X=np.copy(Xnn), Y=np.copy(self.Ynorm), kern=km)
            gp_acq.likelihood.variance = gpm.likelihood.variance.read_value()
            # gp_acq.as_pandas_table()

            # optimize the acquisition function within 0,1 bounds
            kwargs = {'ymin': self.Ynorm.min()}
            acquisition = initialize_acquisition(loss=self.loss, gpmodel=gp_acq, **kwargs)
            x_proj_tp1, acq_tp1 = self.minimize_acquisition(acquisition, opt_config)

            k_list, gp_list = initialize_m_models(x=np.copy(Xnn), y=np.copy(self.Xnorm),
                                                  input_dim=self.proj_dim,
                                                  model='decoder',
                                                  kernel='Matern52',
                                                  ARD=True,
                                                  nn=None,
                                                  decomp=self.decomposition)
            # transform the optimal point into the original input space and clip to 0,1 bound for feasibility
            x_tp1 = self.generate_x(x_proj_tp1, gp_list)

            y_tp1 = self.evaluate(x_tp1)
            self.update_data(x_tp1, y_tp1)
            # self.reset_graph()
            # tf.reset_default_graph()
            # print(len(tf.all_variables()))
            # print(len(tf.get_default_graph().get_operations()))
        return self.data_x, self.data_y, self.hyps
Esempio n. 4
0
    def initialize_modelM(self):

        nn = Ort_NN(dims=[self.Xnorm.shape[1], 20, self.proj_dim],
                    N=0,
                    proj_dim=0,
                    name=None)
        # nn = NN(dims=[self.Xnorm.shape[1], 20, self.proj_dim], N=0, proj_dim=0,
        #             name=None)

        kernm, gpm = initialize_m_models(x=np.copy(self.Xnorm),
                                         y=np.copy(self.Ynorm),
                                         input_dim=self.proj_dim,
                                         model='encoder',
                                         kernel='Matern52',
                                         ARD=True,
                                         nn=nn,
                                         decomp=None)

        gpm.likelihood.variance = 0.01

        # k1 = gpflow.kernels.RBF(input_dim=self.proj_dim, ARD=True, active_dims=list(range(self.proj_dim)))
        # coreg = gpflow.kernels.Coregion(input_dim=1, output_dim=self.Xnorm.shape[1] + 1,
        #                                 rank=self.Xnorm.shape[1] + 1, active_dims=[self.proj_dim])
        # coreg.W = np.random.randn(self.Xnorm.shape[1] + 1, self.Xnorm.shape[1] + 1)

        # values = np.random.normal(loc=0., scale=1., size=[len(self.binary_tree_list)])
        # tree_coregion = TreeCoregion(input_dim=1, output_dim=self.Xnorm.shape[1] + 1,
        #                              indices_tree=self.binary_tree_list, values=values,
        #                              active_dims=[self.proj_dim])

        # kc = k1 * coreg
        # kc = k1 * tree_coregion

        # nn = NN(dims=[self.Xnorm.shape[1], 20, self.proj_dim], N=0, proj_dim=0,
        #         name=None)  # otherwise re-initialized at each BO iteration

        # nn = NN(dims=[self.Xnorm.shape[1], 6, self.proj_dim], N=0, proj_dim=0,
        #         name=None)  # otherwise re-initialized at each BO iteration

        # nn_mogp = NN_MOGPR(X=np.copy(self.Xnorm), Y=np.copy(self.Ynorm), kern=kc, nn=nn)
        # nn_mogp.likelihood.variance = 0.0001

        return kernm, gpm, nn
Esempio n. 5
0
    def initialize_modelM(self):

        # nn = Ort_NN(dims=[self.Xnorm.shape[1], 5, 10, 5, self.proj_dim], N=0, proj_dim=0,
        #             name=None)
        nn = NN(dims=[self.Xnorm.shape[1], 5, 10, 5, self.proj_dim],
                N=0,
                proj_dim=0,
                name=None)

        k_list, gp_nnjoint = initialize_m_models(
            x=np.copy(self.Xnorm),
            y=np.copy(self.Ynorm),
            input_dim=self.proj_dim,
            model='joint_Full',
            kernel='Matern52',
            ARD=True,
            nn=nn)  # last kernel is the Manifold GP kernel

        gp_nnjoint.likelihood.variance = 0.1  # 1e-06  # 0.001

        return k_list, gp_nnjoint, nn
Esempio n. 6
0
    def run(self, maxiter=20):
        opt_config = import_attr(
            'tfbo/configurations',
            attribute='acquisition_opt')  # check import configuration
        opt_config['bounds'] = [(0., 1.)] * self.proj_dim * self.num_init
        for j in range(maxiter):
            print('iteration: ', j)
            # initialize model
            self.reset_graph()
            km, gpm, nn = self.initialize_modelM()
            # train nn and gp hyper-parameters
            # try:
            #     gpflow.train.ScipyOptimizer().minimize(gpm)
            # except:
            #     self.reset_hyps(gpm)
            gpm.likelihood.variance = 0.001
            Xnn = gpm.nn.np_forward(self.Xnorm)
            self.Xnn = Xnn
            self.hyps.append(self.get_hyps(gpm))

            gp_acq = Stable_GPR(X=np.copy(Xnn), Y=np.copy(self.Ynorm), kern=km)
            gp_acq.likelihood.variance = gpm.likelihood.variance.read_value()
            # gp_acq.as_pandas_table()

            # optimize the acquisition function within 0,1 bounds
            kwargs = {'ymin': self.Ynorm.min()}
            acquisition = initialize_acquisition(loss=self.loss,
                                                 gpmodel=gp_acq,
                                                 **kwargs)
            x_proj_tp1, acq_tp1 = self.minimize_acquisition(
                acquisition, opt_config)

            k_list, gp_list = initialize_m_models(x=np.copy(Xnn),
                                                  y=np.copy(self.Xnorm),
                                                  input_dim=self.proj_dim,
                                                  model='decoder',
                                                  kernel='Matern52',
                                                  ARD=True,
                                                  nn=None,
                                                  decomp=self.decomposition)

            k_list.append(km)
            kern_joint = Kstack(k_list)
            gp_nnjoint = NN_MoGPR(X=np.copy(self.Xnorm),
                                  Y=np.copy(self.Ynorm),
                                  kern=kern_joint,
                                  nn=nn,
                                  Mo_dim=int(3))
            gp_nnjoint.likelihood.variance = 0.001

            fmean_acq, fvar_acq = gp_acq.predict_f(Xnn)
            fmean_joint, fvar_joint = gp_nnjoint.predict_f(Xnn)

            # jitter, jitter_mat = gp_nnjoint.compute_log_likelihood()

            gpflow.train.ScipyOptimizer().minimize(gp_nnjoint)

            Xnn = gp_nnjoint.nn.np_forward(self.Xnorm)
            self.Xnn = Xnn

            # liks, lik = gp_nnjoint.compute_log_likelihood()
            # err_i = [np.abs(gp_list[i].compute_log_likelihood() - liks[i, 0]) for i in range(len(gp_list))]
            # errs = np.array(err_i)
            # transform the optimal point into the original input space and clip to 0,1 bound for feasibility
            x_tp1 = self.generate_x(x_proj_tp1, gp_list, gp_nnjoint)

            y_tp1 = self.evaluate(x_tp1)
            self.update_data(x_tp1, y_tp1)
            # self.reset_graph()
            # tf.reset_default_graph()
            # print(len(tf.all_variables()))
            # print(len(tf.get_default_graph().get_operations()))
        return self.data_x, self.data_y, self.hyps