コード例 #1
0
    def optimize_i(self, gp_i, i, opt_config):
        # gp_i = self.reset_hyps(gp_i)
        # gp_i = self.fit_gp(gp_i)
        gp_i = self.fit_gp_fast(gp_i)
        # try:
        #     # gp_i.kern.lengthscales = np.array([1.,2.])
        #     # gp_i.kern.variance = 3.
        #     # gp_i.likelihood.variance = 4.
        #     # o = gpflow.train.ScipyOptimizer()
        #     # scipyopt = o.make_optimize_tensor(gp_i)
        #     # scipyopt.minimize(maxiter=100)
        #     # gp_i.kern.variance = 5.
        #     # gp_i.likelihood.variance = 0.0001
        #     # gp_i = self.fit_gp(gp_i)
        #     gpflow.train.ScipyOptimizer().minimize(gp_i)
        # except:
        #     gp_i = self.reset_hyps(gp_i)
        self.hyps_temp.append(self.get_hyps(gp_i))

        kwargs = {'ymin': self.Ynorm.min()}
        acquisition = initialize_acquisition(loss=self.loss, gpmodel=gp_i, **kwargs)
        acquisition_normalized = lambda x: self.acquisition_norm(acquisition, x,
                                                                 np.copy(self.X_mean[:, self.decomposition[i]]),
                                                                 np.copy(self.X_std[:, self.decomposition[i]]))
        x_opt, acq_opt = self.minimize_acquisition(acquisition_normalized, opt_config)
        return x_opt, acq_opt
コード例 #2
0
    def run(self, maxiters=20):
        # opt_config = import_attr('tfbo/configurations', attribute='acquisition_opt')
        opt_config = import_attr('tfbo/configurations', attribute='KLacquisition_opt')
        for j in range(maxiters):
            print('iteration: ', j)

            self.reset_graph()

            # initialize model
            k_list, gp_nnjoint, nn = self.initialize_modelM()
            gp_nnjoint.kern.kernels[0].variance = 1.

            try:
                gpflow.train.ScipyOptimizer().minimize(gp_nnjoint)
            except:
                try:
                    gp_nnjoint.jitter = 1e-03
                    gp_nnjoint.noise_variance = 1e-03
                    gp_nnjoint.kern.kernels[1].lengthscales = np.ones(shape=[self.proj_dim])
                    gpflow.train.ScipyOptimizer().minimize(gp_nnjoint)
                except:
                    print('Failure in optimization of hyper-parameters, reset to standard ones')

            Xnn = gp_nnjoint.nn.np_forward(self.Xnorm)
            # Xnn = gp_nnjoint.nn.np_forward(self.X_inf)
            self.Xnn = Xnn
            self.hyps.append(self.get_hyps(gp_nnjoint))

            # mgp = gpflow.models.GPR(X=np.copy(Xnn), Y=np.copy(self.Ynorm), kern=k_list[1])
            # mgp.likelihood.variance = gp_nnjoint.likelihood.variance.value
            # lik_mgp = mgp.compute_log_likelihood()
            # fmean_test, fvar_test = mgp.predict_f(Xnn)
            # lik_gpnnjoint = gp_nnjoint.compute_log_likelihood()
            # fmean_f, fvar_f = gp_nnjoint.predict_f(Xnn)
            # err_lik = np.abs(lik_mgp - lik_gpnnjoint)
            # err_posterior_mean = np.max(np.abs(fmean_test - fmean_f))
            # err_mean = np.max(np.abs(fmean_f - self.Ynorm))
            # err_posterior_var = np.max(np.abs(fvar_test - fvar_f))
            # Xnew = np.tile(np.linspace(start=0., stop=1., num=500)[:, None], [1, Xnn.shape[1]])
            # uZ, uZ_test, prior_meanXnn, uZnew = gp_nnjoint.test_log_likelihood(Xnew)

            # optimize the acquisition function within 0,1 bounds
            kwargs = {'ymin': self.Ynorm.min()}
            acquisition = initialize_acquisition(loss=self.loss, gpmodel=gp_nnjoint, **kwargs)

            if (j%10 == 0):
                self.learnLipschitz(gp_nnjoint)     # learn new Lipschitz constant every 10 iters

            opt_config = self.NLconstraint(opt_config)
            x_proj_tp1, acq_tp1 = self.minimize_acquisition(acquisition, opt_config)

            x_tp1 = self.generate_x(x_proj_tp1, gp_nnjoint)

            y_tp1 = self.evaluate(x_tp1)
            self.update_data(x_tp1, y_tp1)
        lik = []

        return self.data_x, self.data_y, self.hyps, lik
コード例 #3
0
ファイル: add_bo_optimizer.py プロジェクト: rm4216/BayesOpt
            def opt_i(self, gp_i, gpsum):
                gp_i.likelihood.variance = gpsum.likelihood.variance

                kwargs = {'ymin': self.Ynorm.min()}
                acquisition = initialize_acquisition(loss=self.loss, gpmodel=gp_i, **kwargs)    # alpha(x)
                acquisition_normalized = lambda x: \
                    self.acquisition_norm(acquisition, x, np.copy(self.X_mean), np.copy(self.X_std))     # alpha(xnorm)
                # x_opt, acq_opt = self.minimize_acquisition(acquisition_normalized, opt_config)
                try:
                    x_opt, acq_opt = self.minimize_acquisition(acquisition_normalized, opt_config)
                except:
                    gp_i = self.reset_hyps(gp_i)
                    x_opt, acq_opt = self.minimize_acquisition(acquisition_normalized, opt_config)
                return np.copy(x_opt[:, self.decomposition[gp_i.i[0]]]), acq_opt   # x^(i)_opt, alpha(x^(i)_opt)
コード例 #4
0
    def run(self, maxiters=20):
        opt_config = import_attr('tfbo/configurations',
                                 attribute='acquisition_opt')
        for j in range(maxiters):
            print('iteration: ', j)

            self.reset_graph()

            # initialize model
            k_list, gp_nnjoint, nn = self.initialize_modelM()
            gp_nnjoint.kern.kernels[1].variance = 10.

            try:
                gpflow.train.ScipyOptimizer().minimize(gp_nnjoint)
            except:
                try:
                    gp_nnjoint.kern.kernels[1].lengthscales = np.ones(
                        shape=[self.proj_dim])
                    gpflow.train.ScipyOptimizer().minimize(gp_nnjoint)
                except:
                    print(
                        'Failure in optimization of hyper-parameters, reset to standard ones'
                    )

            Xnn = gp_nnjoint.nn.np_forward(self.Xnorm)
            self.Xnn = Xnn
            self.hyps.append(self.get_hyps(gp_nnjoint))

            # optimize the acquisition function within 0,1 bounds
            kwargs = {'ymin': self.Ynorm.min()}
            acquisition = initialize_acquisition(loss=self.loss,
                                                 gpmodel=gp_nnjoint,
                                                 **kwargs)

            opt_config = self.update_latent_bounds(opt_config)
            x_proj_tp1, acq_tp1 = self.minimize_acquisition(
                acquisition, opt_config)

            x_tp1 = self.generate_x(x_proj_tp1, gp_nnjoint)

            y_tp1 = self.evaluate(x_tp1)
            self.update_data(x_tp1, y_tp1)
        lik = []

        return self.data_x, self.data_y, self.hyps, lik
コード例 #5
0
    def optimize_ith_model(self, gp_i, i, opt_config):
        # gp_i = self.reset_hyps(gp_i)
        gp_i = self.fit_gp(gp_i)
        # try:
        #     gpflow.train.ScipyOptimizer().minimize(gp_i)
        # except:
        #     gp_i = self.reset_hyps(gp_i)
        self.hyps.append(self.get_hyps(gp_i))

        kwargs = {'ymin': self.Ynorm.min()}
        acquisition = initialize_acquisition(self.loss, gp_i, **kwargs)
        acquisition_norm = lambda x: \
            self.acquisition_norm(acquisition=acquisition, x=x,
                                  X_proj_mean=np.copy(self.X_mean[:, self.decomposition[i]]),
                                  X_proj_std=np.copy(self.X_std[:, self.decomposition[i]]))
        x_opt, acq_opt = self.minimize_acquisition(acquisition_norm,
                                                   opt_config)
        return x_opt, acq_opt
コード例 #6
0
    def run(self, maxiter=20):
        opt_config = import_attr('tfbo/configurations', attribute='acquisition_opt')  # check import configuration
        opt_config['bounds'] = [(0., 1.)] * self.proj_dim * self.num_init
        for j in range(maxiter):
            print('iteration: ', j)
            # initialize model
            self.reset_graph()
            km, gpm = self.initialize_modelM()
            # train nn and gp hyper-parameters
            try:
                gpflow.train.ScipyOptimizer().minimize(gpm)
            except:
                self.reset_hyps(gpm)
            # self.reset_hyps(gpm)
            Xnn = gpm.nn.np_forward(self.Xnorm)
            self.Xnn = Xnn
            self.hyps.append(self.get_hyps(gpm))

            gp_acq = Stable_GPR(X=np.copy(Xnn), Y=np.copy(self.Ynorm), kern=km)
            gp_acq.likelihood.variance = gpm.likelihood.variance.read_value()
            # gp_acq.as_pandas_table()

            # optimize the acquisition function within 0,1 bounds
            kwargs = {'ymin': self.Ynorm.min()}
            acquisition = initialize_acquisition(loss=self.loss, gpmodel=gp_acq, **kwargs)
            x_proj_tp1, acq_tp1 = self.minimize_acquisition(acquisition, opt_config)

            k_list, gp_list = initialize_m_models(x=np.copy(Xnn), y=np.copy(self.Xnorm),
                                                  input_dim=self.proj_dim,
                                                  model='decoder',
                                                  kernel='Matern52',
                                                  ARD=True,
                                                  nn=None,
                                                  decomp=self.decomposition)
            # transform the optimal point into the original input space and clip to 0,1 bound for feasibility
            x_tp1 = self.generate_x(x_proj_tp1, gp_list)

            y_tp1 = self.evaluate(x_tp1)
            self.update_data(x_tp1, y_tp1)
            # self.reset_graph()
            # tf.reset_default_graph()
            # print(len(tf.all_variables()))
            # print(len(tf.get_default_graph().get_operations()))
        return self.data_x, self.data_y, self.hyps
コード例 #7
0
    def run(self, maxiter=20):
        opt_config = import_attr(
            'tfbo/configurations',
            attribute='acquisition_opt')  # check import configuration
        opt_config['bounds'] = [(0., 1.)] * self.proj_dim * self.num_init
        for j in range(maxiter):
            print('iteration: ', j)
            # initialize model
            self.reset_graph()
            km, gpm, nn = self.initialize_modelM()
            # train nn and gp hyper-parameters
            # try:
            #     gpflow.train.ScipyOptimizer().minimize(gpm)
            # except:
            #     self.reset_hyps(gpm)
            gpm.likelihood.variance = 0.001
            Xnn = gpm.nn.np_forward(self.Xnorm)
            self.Xnn = Xnn
            self.hyps.append(self.get_hyps(gpm))

            gp_acq = Stable_GPR(X=np.copy(Xnn), Y=np.copy(self.Ynorm), kern=km)
            gp_acq.likelihood.variance = gpm.likelihood.variance.read_value()
            # gp_acq.as_pandas_table()

            # optimize the acquisition function within 0,1 bounds
            kwargs = {'ymin': self.Ynorm.min()}
            acquisition = initialize_acquisition(loss=self.loss,
                                                 gpmodel=gp_acq,
                                                 **kwargs)
            x_proj_tp1, acq_tp1 = self.minimize_acquisition(
                acquisition, opt_config)

            k_list, gp_list = initialize_m_models(x=np.copy(Xnn),
                                                  y=np.copy(self.Xnorm),
                                                  input_dim=self.proj_dim,
                                                  model='decoder',
                                                  kernel='Matern52',
                                                  ARD=True,
                                                  nn=None,
                                                  decomp=self.decomposition)

            k_list.append(km)
            kern_joint = Kstack(k_list)
            gp_nnjoint = NN_MoGPR(X=np.copy(self.Xnorm),
                                  Y=np.copy(self.Ynorm),
                                  kern=kern_joint,
                                  nn=nn,
                                  Mo_dim=int(3))
            gp_nnjoint.likelihood.variance = 0.001

            fmean_acq, fvar_acq = gp_acq.predict_f(Xnn)
            fmean_joint, fvar_joint = gp_nnjoint.predict_f(Xnn)

            # jitter, jitter_mat = gp_nnjoint.compute_log_likelihood()

            gpflow.train.ScipyOptimizer().minimize(gp_nnjoint)

            Xnn = gp_nnjoint.nn.np_forward(self.Xnorm)
            self.Xnn = Xnn

            # liks, lik = gp_nnjoint.compute_log_likelihood()
            # err_i = [np.abs(gp_list[i].compute_log_likelihood() - liks[i, 0]) for i in range(len(gp_list))]
            # errs = np.array(err_i)
            # transform the optimal point into the original input space and clip to 0,1 bound for feasibility
            x_tp1 = self.generate_x(x_proj_tp1, gp_list, gp_nnjoint)

            y_tp1 = self.evaluate(x_tp1)
            self.update_data(x_tp1, y_tp1)
            # self.reset_graph()
            # tf.reset_default_graph()
            # print(len(tf.all_variables()))
            # print(len(tf.get_default_graph().get_operations()))
        return self.data_x, self.data_y, self.hyps
コード例 #8
0
    def run(self, maxiters=20):
        opt_config = import_attr(
            'tfbo/configurations',
            attribute='KLacquisition_opt')  # check import configuration
        for j in range(maxiters):
            print('iteration: ', j)

            self.reset_graph()

            # initialize model
            k_list, gp_nnjoint, nn = self.initialize_modelM()
            gp_nnjoint.kern.kernels[1].variance = 10.

            # # test likelihood
            # lik_nn = gp_nnjoint.compute_log_likelihood()
            # Xnn0 = gp_nnjoint.nn.np_forward(self.Xprobit)
            # kc = k_list[1] * k_list[0]
            # Xnn_test = np.concatenate(
            #     [np.concatenate([Xnn0, np.ones(shape=[np.shape(Xnn0)[0], 1]) * i], axis=1)
            #      for i in range(gp_nnjoint.Mo_dim)], axis=0)
            # Y_test = np.concatenate([self.Xprobit[:, i][:, None] for i in range(gp_nnjoint.Mo_dim)], axis=0)
            # gp_test = gpflow.models.GPR(X=Xnn_test, Y=Y_test, kern=kc)
            # gp_test.likelihood.variance = 1e-06
            # lik_test = gp_test.compute_log_likelihood()
            # gpm = gpflow.models.GPR(X=Xnn0, Y=self.Ynorm, kern=k_list[2])
            # gpm.likelihood.variance = 1e-06
            # likm = gpm.compute_log_likelihood()
            # lik_err = np.abs(lik_nn-lik_test-likm)

            # gpflow.train.ScipyOptimizer().minimize(gp_nnjoint)
            try:
                gpflow.train.ScipyOptimizer().minimize(gp_nnjoint)
            except:
                try:
                    gp_nnjoint.likelihood.variance = 1e-03
                    gp_nnjoint.kern.kernels[1].lengthscales = np.ones(
                        shape=[self.proj_dim])
                    gpflow.train.ScipyOptimizer().minimize(gp_nnjoint)
                except:
                    print(
                        'Failure in optimization of hyper-parameters, reset to standard ones'
                    )

            Xnn = gp_nnjoint.nn.np_forward(self.Xprobit)
            self.Xnn = Xnn
            self.hyps.append(self.get_hyps(gp_nnjoint))

            # # test Manifold GP predictions
            # fmean, fvar = gp_nnjoint.predict_f(Xnn)

            # optimize the acquisition function within 0,1 bounds
            kwargs = {'ymin': self.Ynorm.min()}
            acquisition = initialize_acquisition(loss=self.loss,
                                                 gpmodel=gp_nnjoint,
                                                 **kwargs)
            # opt_config = self.update_latent_bounds(opt_config)
            if (j % 10 == 0):
                self.learnLipschitz(
                    gp_nnjoint)  # learn new Lipschitz constant every 10 iters
            opt_config = self.NLconstraint(opt_config)
            x_proj_tp1, acq_tp1 = self.minimize_acquisition(
                acquisition, opt_config)

            x_tp1 = self.generate_x(x_proj_tp1, gp_nnjoint)

            y_tp1 = self.evaluate(x_tp1)
            self.update_data(x_tp1, y_tp1)
        lik = []

        return self.data_x, self.data_y, self.hyps, lik
コード例 #9
0
    def run(self, maxiters=20):
        opt_config = import_attr('tfbo/configurations', attribute='acquisition_opt')
        for j in range(maxiters):
            print('iteration: ', j)

            self.reset_graph()

            # initialize model
            k_list, gp_nnjoint, nn = self.initialize_modelM()
            gp_nnjoint.kern.kernels[0].variance = 1.

            # # W_0_bt = np.copy(gp_nnjoint.nn.W_0.read_value())
            # # W_1_bt = np.copy(gp_nnjoint.nn.W_1.read_value())
            # # b_0_bt = np.copy(gp_nnjoint.nn.b_0.read_value())
            # # b_1_bt = np.copy(gp_nnjoint.nn.b_1.read_value())
            # # X_mean = np.mean(self.X_inf, axis=0, keepdims=True)
            # # X_std = np.std(self.X_inf, axis=0, keepdims=True)
            # # Xnorm = (self.X_inf - X_mean) / X_std
            # St = (1 / self.Xnorm.shape[0]) * np.matmul(self.Xnorm.transpose(), self.Xnorm)
            # l_St, q_St = np.linalg.eigh(St)
            # assert np.max(np.abs(np.matmul(np.matmul(q_St, np.diag(l_St)), q_St.transpose()) - St)) < 1e-09
            # assert np.max(np.abs(np.eye(q_St.shape[0]) - np.matmul(q_St, q_St.transpose()))) < 1e-09
            # assert np.max(np.abs(np.eye(q_St.shape[0]) - np.matmul(q_St.transpose(), q_St))) < 1e-09
            # U_d = np.copy(q_St[:, -self.proj_dim:])
            # max_evals = np.copy(l_St[-self.proj_dim:][None])
            # # assert np.max(np.abs(np.matmul(St, U_d) / max_evals - U_d)) < 1e-09
            # Y_d = np.matmul(U_d.transpose(), self.Xnorm.transpose())
            # gp_nnjoint.nn.W_0 = np.copy(U_d.transpose())

            try:
                gpflow.train.ScipyOptimizer().minimize(gp_nnjoint)
            except:
                try:
                    gp_nnjoint.jitter = 1e-03
                    gp_nnjoint.noise_variance = 1e-03
                    gp_nnjoint.kern.kernels[1].lengthscales = np.ones(shape=[self.proj_dim])
                    gpflow.train.ScipyOptimizer().minimize(gp_nnjoint)
                except:
                    print('Failure in optimization of hyper-parameters, reset to standard ones')

            Xnn = gp_nnjoint.nn.np_forward(self.Xnorm)
            # Xnn = gp_nnjoint.nn.np_forward(self.X_inf)
            self.Xnn = Xnn
            self.hyps.append(self.get_hyps(gp_nnjoint))

            # W_0_at = np.copy(gp_nnjoint.nn.W_0.read_value())
            # W_1_at = np.copy(gp_nnjoint.nn.W_1.read_value())
            # b_0_at = np.copy(gp_nnjoint.nn.b_0.read_value())
            # b_1_at = np.copy(gp_nnjoint.nn.b_1.read_value())
            #
            # mgp = gpflow.models.GPR(X=np.copy(Xnn), Y=np.copy(self.Ynorm), kern=k_list[1])
            # mgp.likelihood.variance = np.copy(gp_nnjoint.likelihood.variance.value) + np.copy(gp_nnjoint.jitter.read_value())
            # lik_mgp = mgp.compute_log_likelihood()
            # fmean_test, fvar_test = mgp.predict_f(Xnn)
            # lik_gpnnjoint = gp_nnjoint.compute_log_likelihood()
            # lik_gpnnjoint0 = gp_nnjoint.compute_log_likelihood()
            # err_repeat_lik = np.abs(lik_gpnnjoint - lik_gpnnjoint0)
            # fmean_f, fvar_f = gp_nnjoint.predict_f(Xnn)
            # # err_lik = np.abs(lik_mgp - lik_gpnnjoint)
            # err_posterior_mean = np.max(np.abs(fmean_test - fmean_f))
            # err_mean = np.max(np.abs(fmean_f - self.Ynorm))
            # err_posterior_var = np.max(np.abs(fvar_test - fvar_f))
            # Xnew = np.tile(np.linspace(start=0., stop=1., num=500)[:, None], [1, Xnn.shape[1]])
            # logpdf_test, logpdfm_test, uZ, uZ_test_reshape, uZnew, Kprior_inv, Kprior, check_same_as_X_vec, X_vec, SMarg_inv, SMarg, Vprior_inv, Vprior, LK_invT, logpdfm_test_eigh, e_Km, v_Km, Km_test, d_eigh, Km_component, Lm_component = gp_nnjoint.test_log_likelihood(Xnew)
            # logpdf_test0, logpdfm_test0, uZ0, uZ_test_reshape0, uZnew0, Kprior_inv0, Kprior0, check_same_as_X_vec0, X_vec0, SMarg_inv0, SMarg0, Vprior_inv0, Vprior0, LK_invT0, logpdfm_test_eigh0, e_Km0, v_Km0, Km_test0, d_eigh0, Km_component0, Lm_component0 = gp_nnjoint.test_log_likelihood(Xnew)
            #
            # # check same values are returned over multiple calls
            # assert logpdf_test == logpdf_test0 and logpdfm_test == logpdfm_test0 and np.all(uZ == uZ0) and np.all(
            #     uZ_test_reshape == uZ_test_reshape0) and np.all(Kprior_inv == Kprior_inv0) and np.all(
            #     Kprior == Kprior0) and np.all(check_same_as_X_vec == check_same_as_X_vec0) and np.all(
            #     X_vec == X_vec0) and np.all(SMarg_inv == SMarg_inv0) and np.all(SMarg == SMarg0) and np.all(
            #     Vprior_inv == Vprior_inv0) and np.all(Vprior == Vprior0) and np.all(LK_invT == LK_invT0)
            # assert np.all(logpdfm_test_eigh == logpdfm_test_eigh0) and np.all(e_Km == e_Km0) and np.all(
            #     v_Km == v_Km0) and np.all(Km_test == Km_test0) and np.all(d_eigh == d_eigh0)
            # assert np.all(Km_component == Km_component0) and np.all(Lm_component == Lm_component0)
            #
            # kernBLR = LinearGeneralized(input_dim=self.Mo_dim, L_p=LK_invT)
            # gpBLR = gpflow.models.GPR(X=np.copy(uZ.transpose()), Y=np.copy(self.X_inf), kern=kernBLR)
            # # gpBLR = gpflow.models.GPR(X=np.copy(uZ.transpose()), Y=np.copy(self.Xnorm), kern=kernBLR)
            # gpBLR.likelihood.variance = np.copy(gp_nnjoint.noise_variance.read_value())
            # lik_BLR = gpBLR.compute_log_likelihood()
            # err_logpdfBLR = np.abs(lik_BLR - logpdf_test)
            #
            # # perform checkings listed in "_test_likelihood" method
            # threshold = 1e-04
            # assert np.all(uZ_test_reshape == uZ) and np.max(
            #     np.abs(np.matmul(Kprior_inv, Kprior) - np.eye(Kprior.shape[0]))) < threshold and np.all(
            #     check_same_as_X_vec == X_vec) and np.max(
            #     np.abs(np.matmul(SMarg_inv, SMarg) - np.eye(SMarg.shape[0]))) < threshold and np.max(
            #     np.abs(np.matmul(Vprior_inv, Vprior) - np.eye(Vprior.shape[0]))) < threshold and np.abs(
            #     logpdfm_test - lik_mgp) < threshold and np.abs(lik_BLR - logpdf_test) < threshold and np.abs(
            #     lik_gpnnjoint - lik_BLR - lik_mgp) < threshold
            #
            # # uZsT_cov = gp_nnjoint.test_predict_x(self.Xnn[0, :][None])
            # reconstructed_x = []
            # for i in range(self.Xnn.shape[0]):
            #     Xmean_i, Spost, V = gp_nnjoint.predict_x(self.Xnn[i, :][None])
            #     reconstructed_x.append(Xmean_i)
            # X_inf_reconstructed = np.concatenate(reconstructed_x, axis=0)
            # err_reconstruction = np.max(np.abs(X_inf_reconstructed - self.X_inf))
            # # err_reconstruction = np.max(np.abs(X_inf_reconstructed - self.Xnorm))
            #
            # LSxx_invT, MNT, post_meanT, post_S_test, Vp_test, uZs_test, uZ_test = gp_nnjoint.test_predict_x(self.Xnn[0, :][None])
            # kernBLR_post = LinearGeneralized(input_dim=self.Mo_dim, L_p=LSxx_invT)
            # mean_function_post = gpflow.mean_functions.Linear(A=MNT, b=np.zeros(1))
            # gpBLR_post = gpflow.models.GPR(X=np.copy(uZ.transpose()), Y=np.copy(self.X_inf), kern=kernBLR_post, mean_function=mean_function_post)
            # # gpBLR_post = gpflow.models.GPR(X=np.copy(uZ.transpose()), Y=np.copy(self.Xnorm), kern=kernBLR_post, mean_function=mean_function_post)
            # gpBLR_post.likelihood.variance = np.copy(gp_nnjoint.noise_variance.read_value())
            # X_inf_rec_test, X_inf_var_test = gpBLR_post.predict_f(np.copy(uZ.transpose()))
            # assert np.max(np.abs(X_inf_reconstructed - X_inf_rec_test)) < 1e-01


            # optimize the acquisition function within 0,1 bounds
            kwargs = {'ymin': self.Ynorm.min()}
            acquisition = initialize_acquisition(loss=self.loss, gpmodel=gp_nnjoint, **kwargs)

            opt_config = self.update_latent_bounds(opt_config)
            x_proj_tp1, acq_tp1 = self.minimize_acquisition(acquisition, opt_config)

            x_tp1 = self.generate_x(x_proj_tp1, gp_nnjoint)

            y_tp1 = self.evaluate(x_tp1)
            self.update_data(x_tp1, y_tp1)
        lik = []

        return self.data_x, self.data_y, self.hyps, lik
コード例 #10
0
    def run(self, maxiters=20):
        opt_config = import_attr('tfbo/configurations',
                                 attribute='acquisition_opt')
        for j in range(maxiters):
            print('iteration: ', j)

            self.reset_graph()

            # initialize model
            k_list, gp_nnjoint, nn = self.initialize_modelM()
            gp_nnjoint.kern.kernels[1].variance = 10.

            try:
                gpflow.train.ScipyOptimizer().minimize(gp_nnjoint)
            except:
                try:
                    gp_nnjoint.likelihood.variance = 1e-03
                    gp_nnjoint.kern.kernels[1].lengthscales = np.ones(
                        shape=[self.proj_dim])
                    gpflow.train.ScipyOptimizer().minimize(gp_nnjoint)
                except:
                    print(
                        'Failure in optimization of hyper-parameters, reset to standard ones'
                    )

            Xnn = gp_nnjoint.nn.np_forward(self.Xprobit)
            self.Xnn = Xnn
            self.hyps.append(self.get_hyps(gp_nnjoint))

            # # test Manifold GP predictions
            # fmean, fvar = gp_nnjoint.predict_f(Xnn)

            # # A bit of testing
            # # fmeanX, mean, y_vec, Xnorm_out, Xnorm_test = gp_nnjoint.predict_x(Xnn)
            # fmeanX = gp_nnjoint.predict_x(Xnn)
            # sampleX = gp_nnjoint.sample_x(Xnn)
            # # vec_Xnorm = np.copy(y_vec)
            # # vec_indices = np.concatenate([np.ones(shape=[Xnn.shape[0], 1]) * i for i in range(self.Mo_dim)], axis=0)
            # # Ymogp = vec_Xnorm
            # Ymogp = np.reshape(np.transpose(self.Xprobit), newshape=[self.Mo_dim * Xnn.shape[0], 1])
            # Xmogp = np.concatenate(
            #     [np.concatenate([np.copy(Xnn), np.ones(shape=[Xnn.shape[0], 1]) * i], axis=1) for i in
            #      range(self.Mo_dim)], axis=0)
            # kernmogp = k_list[0] * k_list[1]
            # mogp = gpflow.models.GPR(X=Xmogp, Y=Ymogp, kern=kernmogp)
            # mogp.likelihood.variance = gp_nnjoint.likelihood.variance.value
            # fmean_mogp, fvar_mogp = mogp.predict_f(Xmogp)
            # fmean_mogp0 = np.reshape(np.copy(fmean_mogp[:, 0]), newshape=[self.Mo_dim, Xnn.shape[0]]).transpose()
            # err_fmean = np.max(np.abs(fmean_mogp0 - self.Xprobit))
            # lik_mogp = mogp.compute_log_likelihood()
            # lik_gpnnjoint = gp_nnjoint.compute_log_likelihood()
            # K_test, B_test, y_vec_test, l_k_test, q_k_test, l_b_test, q_b_test, QbQkX_vec_test, kron_diag_test, Inv_vec_test, alpha_test = gp_nnjoint.test_log_likelihood()
            # Knn = gp_nnjoint.kern.kernels[1].compute_K_symm(Xnn)
            # err_K = np.max(np.abs(Knn - K_test))
            # Bnn = np.matmul(gp_nnjoint.kern.kernels[0].W.value,
            #                 np.transpose(gp_nnjoint.kern.kernels[0].W.value)) + np.diag(
            #     gp_nnjoint.kern.kernels[0].kappa.value)
            # err_B = np.max(np.abs(Bnn - B_test))
            # X_vec = np.reshape(np.transpose(self.Xprobit), newshape=[self.Mo_dim * self.Xprobit.shape[0], 1])
            # l_k, q_k = np.linalg.eigh(Knn)
            # l_b, q_b = np.linalg.eigh(Bnn)
            #
            # def mat_vec_mul(B, K, X_vec):
            #     Gb = np.shape(B)[0]
            #     Gk = np.shape(K)[1]
            #     X_Gk = np.reshape(X_vec, newshape=[Gb, Gk])
            #     Z = np.matmul(X_Gk, np.transpose(K))
            #     Z_vec = np.reshape(np.transpose(Z), newshape=[Gb * Gk, 1])
            #     Z_Gb = np.reshape(Z_vec, newshape=[Gk, Gb])
            #     M = np.matmul(Z_Gb, np.transpose(B))
            #     x_out = np.reshape(np.transpose(M), newshape=[-1, 1])
            #     return x_out
            #
            # QbQkX_vec = mat_vec_mul(np.transpose(q_b), np.transpose(q_k), X_vec)
            # kron_diag = np.concatenate([l_k[:, None] * l_b[i] for i in range(self.Mo_dim)], axis=0)
            # Inv_vec = QbQkX_vec / (kron_diag + gp_nnjoint.likelihood.variance.value)
            # alpha_gpnnjoint = mat_vec_mul(q_b, q_k, Inv_vec)
            # err_alpha = np.max(np.abs(alpha_gpnnjoint - alpha_test))
            # logpdf_gpnnjoint = -0.5 * np.matmul(np.transpose(X_vec), alpha_gpnnjoint) - 0.5 * X_vec.shape[0] * np.log(
            #     2 * np.pi) - 0.5 * np.sum(np.log(kron_diag + gp_nnjoint.likelihood.variance.value))
            # err_ll = np.abs(logpdf_gpnnjoint - lik_mogp)
            # mgp = gpflow.models.GPR(X=np.copy(Xnn), Y=np.copy(self.Ynorm), kern=k_list[2])
            # mgp.likelihood.variance = gp_nnjoint.likelihood.variance.value
            # lik_mgp = mgp.compute_log_likelihood()
            # err_lik_all = np.abs(lik_gpnnjoint - (lik_mogp + lik_mgp))
            # fmean_gpnn, fvar_gpnn = gp_nnjoint.predict_f(Xnn)
            # fmean_mgp, fvar_mgp = mgp.predict_f(Xnn)
            # err_predict_f = np.maximum(np.max(np.abs(fmean_gpnn - self.Ynorm)), np.max(np.abs(fmean_gpnn - fmean_mgp)))
            # err_predict_var = np.max(np.abs(fvar_gpnn - fvar_mgp))
            # err_fmeanX = np.max(np.abs(fmeanX - self.Xprobit))
            # err_sampleX = np.maximum(np.max(np.abs(sampleX - self.Xprobit)), np.max(np.abs(sampleX - fmeanX)))

            # optimize the acquisition function within 0,1 bounds
            kwargs = {'ymin': self.Ynorm.min()}
            acquisition = initialize_acquisition(loss=self.loss,
                                                 gpmodel=gp_nnjoint,
                                                 **kwargs)

            opt_config = self.update_latent_bounds(opt_config)
            x_proj_tp1, acq_tp1 = self.minimize_acquisition(
                acquisition, opt_config)

            x_tp1 = self.generate_x(x_proj_tp1, gp_nnjoint)

            y_tp1 = self.evaluate(x_tp1)
            self.update_data(x_tp1, y_tp1)
        lik = []

        return self.data_x, self.data_y, self.hyps, lik
コード例 #11
0
    def run(self, maxiters=20):
        opt_config = import_attr(
            'tfbo/configurations',
            attribute='KLacquisition_opt')  # check import configuration
        for j in range(maxiters):
            print('iteration: ', j)

            self.reset_graph()

            # initialize model
            k_list, gp_nnjoint, nn = self.initialize_modelM()
            gp_nnjoint.kern.kernels[1].variance = 10.

            # # test likelihood
            # lik_nn = gp_nnjoint.compute_log_likelihood()
            # Xnn0 = gp_nnjoint.nn.np_forward(self.Xnorm)
            # kc = k_list[1] * k_list[0]
            # Xnn_test = np.concatenate(
            #     [np.concatenate([Xnn0, np.ones(shape=[np.shape(Xnn0)[0], 1]) * i], axis=1)
            #      for i in range(gp_nnjoint.Mo_dim)], axis=0)
            # Y_test = np.concatenate([self.Xnorm[:, i][:, None] for i in range(gp_nnjoint.Mo_dim)], axis=0)
            # gp_test = gpflow.models.GPR(X=Xnn_test, Y=Y_test, kern=kc)
            # gp_test.likelihood.variance = 1e-06
            # lik_test = gp_test.compute_log_likelihood()
            # gpm = gpflow.models.GPR(X=Xnn0, Y=self.Ynorm, kern=k_list[2])
            # gpm.likelihood.variance = 1e-06
            # likm = gpm.compute_log_likelihood()
            # lik_err = np.abs(lik_nn-lik_test-likm)

            gp_nnjoint.nn.W_0 = np.array([[-0.72257252, 0.26017714],
                                          [-1.23547449, -0.87398094],
                                          [-0.05795134, 0.22184529],
                                          [-4.33704576, -1.03866942],
                                          [4.16884434, 0.1687948]])
            gp_nnjoint.nn.W_1 = np.array([
                [-0.17611191, 0.84349685, 1.44230698, 0.18555664, -0.19708862],
                [-0.13689745, 1.86417045, 2.33110755, 1.20521291, 0.71162644],
                [0.47687133, 0.31373425, -1.1891341, 2.18089067, -3.93909819],
                [
                    -0.2272015, 1.93327611, -1.57774183, -1.26255085,
                    -0.15080552
                ],
                [
                    -0.4890983, -1.81724449, -1.65700209, -0.75827901,
                    1.64434325
                ],
                [0.10663821, -0.12244555, 2.26286785, -0.88992352, 2.63438025],
                [-1.14518348, -2.48144707, -0.35203317, 0.23830179, 0.0816695],
                [-0.5185169, 2.43075116, 0.09996988, 1.56821543, 2.57299817],
                [
                    1.27373299, -2.17523897, 2.56801105, -1.29495389,
                    -1.38732749
                ],
                [2.16933267, -0.82218552, 1.94225155, 3.44593108, 1.76706837]
            ])
            gp_nnjoint.nn.W_2 = np.array(
                [[
                    -1.06815199, 0.67328749, 1.33295767, -0.82976342,
                    1.08580199, 0.07772985, -0.45765023, -0.05497667,
                    -2.4756558, 0.08808674
                ],
                 [
                     0.85855821, -0.10785176, 1.40417131, -1.4510554,
                     -2.43215512, 0.58832488, -0.31426693, 0.88093524,
                     -0.18911669, -1.21866324
                 ],
                 [
                     0.8989253, -0.04077404, 4.74024619, -0.25097489,
                     -0.68791512, -2.8158515, -1.05096808, -1.15249423,
                     2.40093649, 2.84014738
                 ],
                 [
                     1.71409331, 0.21485905, 0.47611273, 3.44473025,
                     -0.1917658, 3.08725273, -0.97657774, 0.22685569,
                     0.33642754, 0.69626424
                 ],
                 [
                     0.60789342, -2.02719287, 0.43644935, 2.13129863,
                     -0.4946168, 0.3486837, -0.02468686, -2.11012978,
                     0.80318346, -2.0538133
                 ]])
            gp_nnjoint.nn.W_3 = np.array(
                [[-1.17012522, 1.4669893, -2.33431889, 4.54361068, 0.219858]])
            gp_nnjoint.nn.b_0 = np.array([[-1.95648467], [-0.40078642],
                                          [0.03963978], [-3.13848025],
                                          [0.89017789]])
            gp_nnjoint.nn.b_1 = np.array([[0.84520059], [0.5069299],
                                          [-1.45844994], [0.32032038],
                                          [0.94691029], [0.87558343],
                                          [-0.41215514], [0.13526481],
                                          [-1.00605875], [-0.02132958]])
            gp_nnjoint.nn.b_2 = np.array([[-0.11726942], [0.14056033],
                                          [1.38538488], [1.71165805],
                                          [-0.41426653]])
            gp_nnjoint.nn.b_3 = np.array([[1.19480249]])
            # gp_nnjoint.nn.trainable = False

            # gpflow.train.ScipyOptimizer().minimize(gp_nnjoint)
            try:
                gp_nnjoint.kern.kernels[1].lengthscales = np.ones(
                    shape=[self.proj_dim])
                gpflow.train.ScipyOptimizer().minimize(gp_nnjoint)
            except:
                try:
                    gp_nnjoint.kern.kernels[1].lengthscales = np.ones(
                        shape=[self.proj_dim])
                    gpflow.train.ScipyOptimizer().minimize(gp_nnjoint)
                except:
                    print(
                        'Failure in optimization of hyper-parameters, reset to standard ones'
                    )

            Xnn = gp_nnjoint.nn.np_forward(self.Xnorm)
            self.Xnn = Xnn
            self.hyps.append(self.get_hyps(gp_nnjoint))

            # # test Manifold GP predictions
            # fmean, fvar = gp_nnjoint.predict_f(Xnn)

            # optimize the acquisition function within 0,1 bounds
            kwargs = {'ymin': self.Ynorm.min()}
            acquisition = initialize_acquisition(loss=self.loss,
                                                 gpmodel=gp_nnjoint,
                                                 **kwargs)
            # opt_config = self.update_latent_bounds(opt_config)
            if (j % 10 == 0):
                self.learnLipschitz(
                    gp_nnjoint)  # learn new Lipschitz constant every 10 iters
            opt_config = self.NLconstraint(opt_config)
            x_proj_tp1, acq_tp1 = self.minimize_acquisition(
                acquisition, opt_config)

            x_tp1 = self.generate_x(x_proj_tp1, gp_nnjoint)

            y_tp1 = self.evaluate(x_tp1)
            self.update_data(x_tp1, y_tp1)
        lik = []

        return self.data_x, self.data_y, self.hyps, lik
コード例 #12
0
ファイル: vae_bo_optimizer.py プロジェクト: rm4216/BayesOpt
    def run(self, maxiters):
        # load VAE models for a specific objective function
        path = '/home/rm4216/Desktop/ImperialCollege/Python/Github_manifold_bo/BayesOpt/Baselines/chemvae/' + \
               self.dict_args['obj'] + '/'
        encoder, decoder, prop_pred = self.load_vae_models(path)
        self.encoder = encoder
        self.decoder = decoder

        # encode inputs in low-dimensional space
        Z_vae, _ = encoder.predict(self.Xprobit[:, :, None])
        self.Z_vae = Z_vae.astype(np.float64)

        # initialize GP with embedded inputs "Z_vae" and normalized outputs "Ynorm"
        kernel, gp = initialize_models(x=np.copy(self.Z_vae),
                                       y=np.copy(self.Ynorm),
                                       input_dim=self.proj_dim,
                                       model='GPR',
                                       kernel='Matern52',
                                       ARD=True)
        opt_config = import_attr(
            'tfbo/configurations',
            attribute='acquisition_opt')  # check import configuration

        for i in range(maxiters):
            print(i)

            try:
                gpflow.train.ScipyOptimizer().minimize(gp)
            except:
                # if throws error in the optimization of hyper-parameters then set the values to reference
                gp = self.reset_hyps(gp)
            self.hyps.append(self.get_hyps(gp))

            kwargs = {'ymin': self.Ynorm.min()}
            acquisition = initialize_acquisition(
                self.loss, gpmodel=gp,
                **kwargs)  # updated model at each iteration
            opt_config = self.update_latent_bounds(opt_config)
            try:
                z_tp1, acq_tp1 = self.minimize_acquisition(
                    acquisition,
                    opt_config)  # check configuration, starting point
            except:
                gp = self.reset_hyps(gp)
                z_tp1, acq_tp1 = self.minimize_acquisition(
                    acquisition, opt_config)

            # self.reset_graph()
            # encoder, decoder, prop_pred = self.load_vae_models(path)
            # self.encoder = encoder
            # self.decoder = decoder

            x_tp1 = self.decode_zopt(z_tp1)
            y_tp1 = self.evaluate(x_tp1)

            self.update_data(x_tp1, y_tp1)

            Z_vae, _ = encoder.predict(self.Xprobit[:, :, None])
            self.Z_vae = Z_vae.astype(np.float64)

            self.reset_graph()
            kernel, gp = initialize_models(x=np.copy(self.Z_vae),
                                           y=np.copy(self.Ynorm),
                                           input_dim=self.proj_dim,
                                           model='GPR',
                                           kernel='Matern52',
                                           ARD=True)

        return self.data_x, self.data_y, self.hyps, self.log_lik_opt
コード例 #13
0
ファイル: rembo_optimizer.py プロジェクト: rm4216/BayesOpt
    def run(self, maxiters):
        kernel, gp = initialize_models(x=np.copy(self.X_proj_norm),
                                       y=np.copy(self.Ynorm),
                                       input_dim=self.proj_dim,
                                       model='GPR',
                                       kernel='Matern52',
                                       ARD=True)
        opt_config = import_attr(
            'tfbo/configurations',
            attribute='acquisition_opt')  # check import configuration
        opt_config['bounds'] = [(self.proj_bounds[0][i], self.proj_bounds[1][i]) for i in range(self.proj_dim)] * \
                               self.num_init
        # opt_hyp = import_attr('tfbo/configurations', attribute='hyp_opt')
        # # var_list = gp.
        # opt_hyp['var_to_bounds'] = [(np.log(np.exp(1e-04) - 1.), np.log(np.exp(1e04) - 1.))] + \
        #                            [(np.log(np.exp(1e-06) - 1.), np.log(np.exp(1e06) - 1.))] * self.proj_dim + \
        #                            [(np.log(np.exp(1e-08) - 1.), np.log(np.exp(1e08) - 1.))]   # order of hyps in list

        for i in range(maxiters):
            print(i)
            # gp = self.fit_gp(gp)
            try:
                gpflow.train.ScipyOptimizer().minimize(gp)
            except:
                # if throws error in the optimization of hyper-parameters then set the values to reference
                gp = self.reset_hyps(gp)
                # gpflow.train.ScipyOptimizer().minimize(gp)
            self.hyps.append(self.get_hyps(gp))

            kwargs = {'ymin': self.Ynorm.min()}
            acquisition = initialize_acquisition(
                self.loss, gpmodel=gp,
                **kwargs)  # updated model at each iteration
            # def acquisition_norm(acquisition, x, X_proj_mean, X_proj_std):
            #     # wrapper to normalize the input
            #     N = x.shape[0]
            #     X_proj_mean_rep = np.tile(X_proj_mean, reps=[N, 1])
            #     X_proj_std_rep = np.tile(X_proj_std, reps=[N, 1])
            #     xnorm = (x - X_proj_mean_rep) / X_proj_std_rep
            #     acq_norm, acq_sum, acq_grad = acquisition(xnorm)
            #     return acq_norm, acq_sum, acq_grad
            acquisition_normalized = lambda x: self.acquisition_norm(
                acquisition, x, self.X_proj_mean, self.X_proj_std)
            # acquisition_normalized = lambda x: acquisition_norm(acquisition, x, self.X_proj_mean, self.X_proj_std)  # check broadcasting

            try:
                x_tp1, acq_tp1 = self.minimize_acquisition(
                    acquisition_normalized,
                    opt_config)  # check configuration, starting point
            except:
                np.save('Xcrash_rembo', gp.X.read_value())
                np.save('Ycrash_rembo', gp.Y.read_value())
                np.save('hyps_crash', self.get_hyps(gp))
                gp = self.reset_hyps(gp)
                x_tp1, acq_tp1 = self.minimize_acquisition(
                    acquisition_normalized, opt_config)
            y_tp1 = self.evaluate(x_tp1)

            self.update_data(x_tp1, y_tp1)
            self.reset_graph()
            kernel, gp = initialize_models(x=np.copy(self.X_proj_norm),
                                           y=np.copy(self.Ynorm),
                                           input_dim=self.proj_dim,
                                           model='GPR',
                                           kernel='Matern52',
                                           ARD=True)
            # gp = self.update_model(gp)
        return self.data_x, self.data_y, self.hyps, self.log_lik_opt