Esempio n. 1
0
 def run(self, maxiter=20):
     opt_config = import_attr('tfbo/configurations', attribute='acquisition_opt')  # check import configuration
     # opt_config = import_attr('tfbo/configurations', attribute='bfgs_opt')  # check import configuration
     for j in range(maxiter):
         print('iteration: ', j)
         # initialize model
         self.reset_graph()
         _, dgp_model = self.initialize_modelM()
         # train deep gp hyper-parameters
         dgp_model = self.train_model(dgp_model)
         self.hyps.append(self.get_hyps(dgp_model))
         # initialize acquisition function
         kwargs = {'ymin': self.Ynorm.min()}
         acquisition = initialize_acquisition_dgp(loss=self.loss, gpmodel=dgp_model, num_samples=int(1), **kwargs)
         # only latent grid is modified, configurations of BFGS are the same
         opt_config = self.update_latent_bounds(dgp_model, opt_config)
         x_proj_tp1, acq_tp1 = self.minimize_acquisition(acquisition, opt_config)
         # transform the optimal point into the original input space and clip to 0,1 bound for feasibility
         x_tp1 = self.generate_x(x_proj_tp1, dgp_model)
         y_tp1 = self.evaluate(x_tp1)
         self.update_data(x_tp1, y_tp1)
         # self.reset_graph()
         # tf.reset_default_graph()
         # print(len(tf.all_variables()))
         # print(len(tf.get_default_graph().get_operations()))
     return self.data_x, self.data_y, self.hyps
Esempio n. 2
0
 def run(self, maxiter=20):
     opt_config = import_attr('tfbo/configurations', attribute='acquisition_opt')  # check import configuration
     opt_config['bounds'] = [(0., 1.)] * self.proj_dim * self.num_init
     for j in range(maxiter):
         print('iteration: ', j)
         # initialize model
         self.reset_graph()
         kc, nn_mogp = self.initialize_modelM()
         # train nn and gp hyper-parameters
         try:
             gpflow.train.ScipyOptimizer().minimize(nn_mogp)
         except:
             self.reset_hyps(nn_mogp)
         Xnn = nn_mogp.nn.np_forward(self.Xnorm)
         self.hyps.append(self.get_hyps(nn_mogp))
         # optimize the acquisition function within 0,1 bounds
         kwargs = {'ymin': self.Ynorm.min()}
         acquisition = initialize_acquisition_mo(loss=self.loss, gpmodel=nn_mogp, input_dim=self.input_dim, **kwargs)
         # acquisition_normalized = lambda x: self.acquisition_norm(acquisition, x, self.X_proj_mean, self.X_proj_std)
         x_proj_tp1, acq_tp1 = self.minimize_acquisition(acquisition, opt_config)
         # transform the optimal point into the original input space and clip to 0,1 bound for feasibility
         x_tp1 = self.generate_x(x_proj_tp1, nn_mogp)
         y_tp1 = self.evaluate(x_tp1)
         self.update_data(x_tp1, y_tp1)
         # self.reset_graph()
         # tf.reset_default_graph()
         # print(len(tf.all_variables()))
         # print(len(tf.get_default_graph().get_operations()))
     return self.data_x, self.data_y, self.hyps
Esempio n. 3
0
    def run(self, maxiters):
        list_i = list(range(len(self.decomposition)))
        # decompose normalized dataset
        list_xnorm = list(map(self.select_component, list_i))
        list_km = list(map(self.initialize_qgp, list_xnorm))
        list_kernel = [km_i[0] for km_i in list_km]
        list_models = [km_i[1] for km_i in list_km]

        # load optimizer options
        opt_config = import_attr('tfbo/configurations', attribute='acquisition_opt')
        opt_config['bounds'] = [(0., 1.)] * self.proj_dim * self.num_init

        optimize_acq_i = lambda gp_i, i: self.optimize_i(gp_i, i, opt_config)

        for j in range(maxiters):
            print(j)

            list_xa = list(map(optimize_acq_i, list_models, list_i))

            list_x = [xa_i[0] for xa_i in list_xa]
            self.hyps.append(self.collect_hyps())
            x_out = self.compose_x(list_x)
            y_out = self.evaluate(list_x)
            self.update_data(x_out, y_out)
            list_xnorm = list(map(self.select_component, list_i))
            self.reset_graph()
            list_km = list(map(self.initialize_qgp, list_xnorm))
            list_models = [km_i[1] for km_i in list_km]
            # list_models = self.update_models(list_models, list_xnorm)
        return self.data_x, self.data_y, self.hyps, self.log_lik_opt
Esempio n. 4
0
    def run(self, maxiters=20):
        # opt_config = import_attr('tfbo/configurations', attribute='acquisition_opt')
        opt_config = import_attr('tfbo/configurations', attribute='KLacquisition_opt')
        for j in range(maxiters):
            print('iteration: ', j)

            self.reset_graph()

            # initialize model
            k_list, gp_nnjoint, nn = self.initialize_modelM()
            gp_nnjoint.kern.kernels[0].variance = 1.

            try:
                gpflow.train.ScipyOptimizer().minimize(gp_nnjoint)
            except:
                try:
                    gp_nnjoint.jitter = 1e-03
                    gp_nnjoint.noise_variance = 1e-03
                    gp_nnjoint.kern.kernels[1].lengthscales = np.ones(shape=[self.proj_dim])
                    gpflow.train.ScipyOptimizer().minimize(gp_nnjoint)
                except:
                    print('Failure in optimization of hyper-parameters, reset to standard ones')

            Xnn = gp_nnjoint.nn.np_forward(self.Xnorm)
            # Xnn = gp_nnjoint.nn.np_forward(self.X_inf)
            self.Xnn = Xnn
            self.hyps.append(self.get_hyps(gp_nnjoint))

            # mgp = gpflow.models.GPR(X=np.copy(Xnn), Y=np.copy(self.Ynorm), kern=k_list[1])
            # mgp.likelihood.variance = gp_nnjoint.likelihood.variance.value
            # lik_mgp = mgp.compute_log_likelihood()
            # fmean_test, fvar_test = mgp.predict_f(Xnn)
            # lik_gpnnjoint = gp_nnjoint.compute_log_likelihood()
            # fmean_f, fvar_f = gp_nnjoint.predict_f(Xnn)
            # err_lik = np.abs(lik_mgp - lik_gpnnjoint)
            # err_posterior_mean = np.max(np.abs(fmean_test - fmean_f))
            # err_mean = np.max(np.abs(fmean_f - self.Ynorm))
            # err_posterior_var = np.max(np.abs(fvar_test - fvar_f))
            # Xnew = np.tile(np.linspace(start=0., stop=1., num=500)[:, None], [1, Xnn.shape[1]])
            # uZ, uZ_test, prior_meanXnn, uZnew = gp_nnjoint.test_log_likelihood(Xnew)

            # optimize the acquisition function within 0,1 bounds
            kwargs = {'ymin': self.Ynorm.min()}
            acquisition = initialize_acquisition(loss=self.loss, gpmodel=gp_nnjoint, **kwargs)

            if (j%10 == 0):
                self.learnLipschitz(gp_nnjoint)     # learn new Lipschitz constant every 10 iters

            opt_config = self.NLconstraint(opt_config)
            x_proj_tp1, acq_tp1 = self.minimize_acquisition(acquisition, opt_config)

            x_tp1 = self.generate_x(x_proj_tp1, gp_nnjoint)

            y_tp1 = self.evaluate(x_tp1)
            self.update_data(x_tp1, y_tp1)
        lik = []

        return self.data_x, self.data_y, self.hyps, lik
Esempio n. 5
0
def load_initializations(dict_args, names):
    dict_start = OrderedDict([(key_i, dict_args[key_i])
                              for key_i in names[:2]])
    name_attr = import_attr('tfbo/utils/name_file',
                            attribute='name_file_start')
    path = 'datasets/data/' + dict_args['obj'] + '/'
    attr_partload = import_attr('tfbo/utils/load_save',
                                attribute='load_partname')

    def load_pair(seed_i):
        dict_start['seed'] = seed_i
        x_file, y_file = name_attr(dict_start)
        y_start = attr_partload(path=path, partname=y_file)
        x_start = attr_partload(path=path, partname=x_file)
        return (x_start, y_start)

    xy_list = list(map(load_pair, dict_args['seed']))
    return xy_list
Esempio n. 6
0
def load_SRinitializations(dict_args, names):
    dict_start = OrderedDict([(key_i, dict_args[key_i])
                              for key_i in names[:2]])
    dict_start['n_samples'] = int(50)
    name_attr = import_attr('tfbo/utils/name_file',
                            attribute='name_file_start')
    path = 'datasets/data/' + dict_args['obj'] + '/' + 'part' + str(
        dict_args['part']) + '/'
    attr_load = import_attr('tfbo/utils/load_save', attribute='loadfile')
    full_path = '/home/rm4216/Desktop/ImperialCollege/Python/Github_manifold_bo/BayesOpt/' + path

    def load_pair(seed_i):
        dict_start['seed'] = seed_i
        x_file, y_file = name_attr(dict_start)
        y_start = attr_load(filename=full_path + y_file + '_' + 'part' +
                            str(dict_args['part']) + '.npy')
        x_start = attr_load(filename=full_path + x_file + '_' + 'part' +
                            str(dict_args['part']) + '.npy')
        return (x_start, y_start)

    xy_list = list(map(load_pair, dict_args['seed']))
    return xy_list
Esempio n. 7
0
    def run(self, maxiters=20):
        opt_config = import_attr(
            'tfbo/configurations',
            attribute='KLacquisition_opt')  # check import configuration
        for j in range(maxiters):
            print('iteration: ', j)

            self.reset_graph()

            # initialize model
            k_list, gp_nnjoint, nn = self.initialize_modelM()
            try:
                gpflow.train.ScipyOptimizer().minimize(gp_nnjoint)
            except:
                try:
                    gp_nnjoint.likelihood.variance = 1e-03
                    gpflow.train.ScipyOptimizer().minimize(gp_nnjoint)
                except:
                    print(
                        'Failure in optimization of hyper-parameters, reset to standard ones'
                    )

            Xnn = gp_nnjoint.nn.np_forward(self.Xprobit)
            self.Xnn = Xnn
            self.hyps.append(self.get_hyps(gp_nnjoint))

            # optimize the acquisition function within 0,1 bounds
            kwargs = {'ymin': self.Ynorm.min()}
            acquisition = block_diag_initialize_acquisition(loss=self.loss,
                                                            gpmodel=gp_nnjoint,
                                                            **kwargs)
            # opt_config = self.update_latent_bounds(opt_config)
            if (j % 10 == 0):
                self.learnLipschitz(
                    gp_nnjoint)  # learn new Lipschitz constant every 10 iters
            opt_config = self.NLconstraint(opt_config)
            x_proj_tp1, acq_tp1 = self.minimize_acquisition(
                acquisition, opt_config)

            x_tp1 = self.generate_x(x_proj_tp1, gp_nnjoint)

            y_tp1 = self.evaluate(x_tp1)
            self.update_data(x_tp1, y_tp1)
        lik = []

        return self.data_x, self.data_y, self.hyps, lik
Esempio n. 8
0
    def run(self, maxiters=20):
        opt_config = import_attr('tfbo/configurations',
                                 attribute='acquisition_opt')
        for j in range(maxiters):
            print('iteration: ', j)

            self.reset_graph()

            # initialize model
            k_list, gp_nnjoint, nn = self.initialize_modelM()
            gp_nnjoint.kern.kernels[1].variance = 10.

            try:
                gpflow.train.ScipyOptimizer().minimize(gp_nnjoint)
            except:
                try:
                    gp_nnjoint.kern.kernels[1].lengthscales = np.ones(
                        shape=[self.proj_dim])
                    gpflow.train.ScipyOptimizer().minimize(gp_nnjoint)
                except:
                    print(
                        'Failure in optimization of hyper-parameters, reset to standard ones'
                    )

            Xnn = gp_nnjoint.nn.np_forward(self.Xnorm)
            self.Xnn = Xnn
            self.hyps.append(self.get_hyps(gp_nnjoint))

            # optimize the acquisition function within 0,1 bounds
            kwargs = {'ymin': self.Ynorm.min()}
            acquisition = initialize_acquisition(loss=self.loss,
                                                 gpmodel=gp_nnjoint,
                                                 **kwargs)

            opt_config = self.update_latent_bounds(opt_config)
            x_proj_tp1, acq_tp1 = self.minimize_acquisition(
                acquisition, opt_config)

            x_tp1 = self.generate_x(x_proj_tp1, gp_nnjoint)

            y_tp1 = self.evaluate(x_tp1)
            self.update_data(x_tp1, y_tp1)
        lik = []

        return self.data_x, self.data_y, self.hyps, lik
    def run(self, maxiter=20):
        opt_config = import_attr('tfbo/configurations', attribute='acquisition_opt')  # check import configuration
        opt_config['bounds'] = [(0., 1.)] * self.proj_dim * self.num_init
        for j in range(maxiter):
            print('iteration: ', j)
            # initialize model
            self.reset_graph()
            km, gpm = self.initialize_modelM()
            # train nn and gp hyper-parameters
            try:
                gpflow.train.ScipyOptimizer().minimize(gpm)
            except:
                self.reset_hyps(gpm)
            # self.reset_hyps(gpm)
            Xnn = gpm.nn.np_forward(self.Xnorm)
            self.Xnn = Xnn
            self.hyps.append(self.get_hyps(gpm))

            gp_acq = Stable_GPR(X=np.copy(Xnn), Y=np.copy(self.Ynorm), kern=km)
            gp_acq.likelihood.variance = gpm.likelihood.variance.read_value()
            # gp_acq.as_pandas_table()

            # optimize the acquisition function within 0,1 bounds
            kwargs = {'ymin': self.Ynorm.min()}
            acquisition = initialize_acquisition(loss=self.loss, gpmodel=gp_acq, **kwargs)
            x_proj_tp1, acq_tp1 = self.minimize_acquisition(acquisition, opt_config)

            k_list, gp_list = initialize_m_models(x=np.copy(Xnn), y=np.copy(self.Xnorm),
                                                  input_dim=self.proj_dim,
                                                  model='decoder',
                                                  kernel='Matern52',
                                                  ARD=True,
                                                  nn=None,
                                                  decomp=self.decomposition)
            # transform the optimal point into the original input space and clip to 0,1 bound for feasibility
            x_tp1 = self.generate_x(x_proj_tp1, gp_list)

            y_tp1 = self.evaluate(x_tp1)
            self.update_data(x_tp1, y_tp1)
            # self.reset_graph()
            # tf.reset_default_graph()
            # print(len(tf.all_variables()))
            # print(len(tf.get_default_graph().get_operations()))
        return self.data_x, self.data_y, self.hyps
Esempio n. 10
0
    def run(self, maxiters):
        # initialize model
        k_list, gpmodels = initialize_models(np.copy(self.Xnorm), np.copy(self.Ynorm), input_dim=self.proj_dim,
                                             model='AddGPR', kernel='Matern52', ARD=True, decomp=self.decomposition)
        opt_config = import_attr('tfbo/configurations', attribute='acquisition_opt')
        opt_config['bounds'] = [(0., 1.)] * self.input_dim * self.num_init

        for i in range(maxiters):
            print(i)
            gp0 = gpmodels[0]
            try:
                gpflow.train.ScipyOptimizer().minimize(
                    gp0)  # test it trains all GPR models simultaneously: only kern variables not likelihood
            except:
                gp0 = self.reset_hyps(gp0)
            self.hyps.append(self.get_hyps(gp0))

            def opt_i(self, gp_i, gpsum):
                gp_i.likelihood.variance = gpsum.likelihood.variance

                kwargs = {'ymin': self.Ynorm.min()}
                acquisition = initialize_acquisition(loss=self.loss, gpmodel=gp_i, **kwargs)    # alpha(x)
                acquisition_normalized = lambda x: \
                    self.acquisition_norm(acquisition, x, np.copy(self.X_mean), np.copy(self.X_std))     # alpha(xnorm)
                # x_opt, acq_opt = self.minimize_acquisition(acquisition_normalized, opt_config)
                try:
                    x_opt, acq_opt = self.minimize_acquisition(acquisition_normalized, opt_config)
                except:
                    gp_i = self.reset_hyps(gp_i)
                    x_opt, acq_opt = self.minimize_acquisition(acquisition_normalized, opt_config)
                return np.copy(x_opt[:, self.decomposition[gp_i.i[0]]]), acq_opt   # x^(i)_opt, alpha(x^(i)_opt)
            optimize_i = lambda gp_ith: opt_i(self, gp_ith, gpsum=gp0)
            xa_list = list(map(optimize_i, gpmodels))

            x_list = [xi_ai[0] for xi_ai in xa_list]
            x_tp1 = self.compose_x(x_list)
            y_tp1 = self.evaluate(x_list)
            self.update_data(x_tp1, y_tp1)
            # self.update_model(gpmodels)
            self.reset_graph()
            k_list, gpmodels = initialize_models(np.copy(self.Xnorm), np.copy(self.Ynorm), input_dim=self.proj_dim,
                                                 model='AddGPR', kernel='Matern52', ARD=True, decomp=self.decomposition)
        return self.data_x, self.data_y, self.hyps, self.log_lik_opt
Esempio n. 11
0
    def run(self, maxiters):
        # Normalization -> decomposition -> initialization/update of each GP model
        list_Xnorm_cons, list_Ynorm_cons = self.remove_inconsistencies(
            _x=np.copy(self.Xnorm), _y=np.copy(self.Ynorm))
        list_km_out = list(
            map(self.initialize_single_model, list_Xnorm_cons,
                list_Ynorm_cons))
        # list_kernels = [km_i[0] for km_i in list_km_out]
        list_gpmodels = [km_i[1] for km_i in list_km_out]
        opt_config = import_attr('tfbo/configurations',
                                 attribute='acquisition_opt')
        opt_config['bounds'] = [(0., 1.)] * self.proj_dim * self.num_init

        list_components = list(range(len(self.decomposition)))
        optimize_i = lambda gp_i, i: self.optimize_ith_model(
            gp_i, i, opt_config)

        for j in range(maxiters):
            print(j)

            list_xa = list(map(optimize_i, list_gpmodels,
                               list_components))  # check double input

            list_x = [xa_i[0] for xa_i in list_xa]
            x_out = self.compose_x(list_x)
            y_out = self.evaluate(list_x)
            self.update_data(xnew=x_out,
                             ynew=y_out)  # augment dataset and normalize
            list_Xnorm_cons, list_Ynorm_cons = self.remove_inconsistencies(
                _x=np.copy(self.Xnorm),
                _y=np.copy(self.Ynorm))  # decompose and prune
            self.reset_graph()
            list_km_out = list(
                map(self.initialize_single_model, list_Xnorm_cons,
                    list_Ynorm_cons))
            list_gpmodels = [km_i[1] for km_i in list_km_out]
            # list_gpmodels = self.update_models(list_gp=list_gpmodels, list_x=list_Xnorm_cons, list_y=list_Ynorm_cons)
        return self.data_x, self.data_y, self.hyps, self.log_lik_opt
Esempio n. 12
0
    def run(self, maxiters=20):
        opt_config = import_attr(
            'tfbo/configurations',
            attribute='KLacquisition_opt')  # check import configuration
        for j in range(maxiters):
            print('iteration: ', j)

            self.reset_graph()

            # initialize model
            k_list, gp_nnjoint, nn = self.initialize_modelM()
            gp_nnjoint.kern.kernels[1].variance = 10.

            # # test likelihood
            # lik_nn = gp_nnjoint.compute_log_likelihood()
            # Xnn0 = gp_nnjoint.nn.np_forward(self.Xprobit)
            # kc = k_list[1] * k_list[0]
            # Xnn_test = np.concatenate(
            #     [np.concatenate([Xnn0, np.ones(shape=[np.shape(Xnn0)[0], 1]) * i], axis=1)
            #      for i in range(gp_nnjoint.Mo_dim)], axis=0)
            # Y_test = np.concatenate([self.Xprobit[:, i][:, None] for i in range(gp_nnjoint.Mo_dim)], axis=0)
            # gp_test = gpflow.models.GPR(X=Xnn_test, Y=Y_test, kern=kc)
            # gp_test.likelihood.variance = 1e-06
            # lik_test = gp_test.compute_log_likelihood()
            # gpm = gpflow.models.GPR(X=Xnn0, Y=self.Ynorm, kern=k_list[2])
            # gpm.likelihood.variance = 1e-06
            # likm = gpm.compute_log_likelihood()
            # lik_err = np.abs(lik_nn-lik_test-likm)

            # gpflow.train.ScipyOptimizer().minimize(gp_nnjoint)
            try:
                gpflow.train.ScipyOptimizer().minimize(gp_nnjoint)
            except:
                try:
                    gp_nnjoint.likelihood.variance = 1e-03
                    gp_nnjoint.kern.kernels[1].lengthscales = np.ones(
                        shape=[self.proj_dim])
                    gpflow.train.ScipyOptimizer().minimize(gp_nnjoint)
                except:
                    print(
                        'Failure in optimization of hyper-parameters, reset to standard ones'
                    )

            Xnn = gp_nnjoint.nn.np_forward(self.Xprobit)
            self.Xnn = Xnn
            self.hyps.append(self.get_hyps(gp_nnjoint))

            # # test Manifold GP predictions
            # fmean, fvar = gp_nnjoint.predict_f(Xnn)

            # optimize the acquisition function within 0,1 bounds
            kwargs = {'ymin': self.Ynorm.min()}
            acquisition = initialize_acquisition(loss=self.loss,
                                                 gpmodel=gp_nnjoint,
                                                 **kwargs)
            # opt_config = self.update_latent_bounds(opt_config)
            if (j % 10 == 0):
                self.learnLipschitz(
                    gp_nnjoint)  # learn new Lipschitz constant every 10 iters
            opt_config = self.NLconstraint(opt_config)
            x_proj_tp1, acq_tp1 = self.minimize_acquisition(
                acquisition, opt_config)

            x_tp1 = self.generate_x(x_proj_tp1, gp_nnjoint)

            y_tp1 = self.evaluate(x_tp1)
            self.update_data(x_tp1, y_tp1)
        lik = []

        return self.data_x, self.data_y, self.hyps, lik
Esempio n. 13
0
    def run(self, maxiters=20):
        opt_config = import_attr('tfbo/configurations', attribute='acquisition_opt')
        for j in range(maxiters):
            print('iteration: ', j)

            self.reset_graph()

            # initialize model
            k_list, gp_nnjoint, nn = self.initialize_modelM()
            gp_nnjoint.kern.kernels[0].variance = 1.

            # # W_0_bt = np.copy(gp_nnjoint.nn.W_0.read_value())
            # # W_1_bt = np.copy(gp_nnjoint.nn.W_1.read_value())
            # # b_0_bt = np.copy(gp_nnjoint.nn.b_0.read_value())
            # # b_1_bt = np.copy(gp_nnjoint.nn.b_1.read_value())
            # # X_mean = np.mean(self.X_inf, axis=0, keepdims=True)
            # # X_std = np.std(self.X_inf, axis=0, keepdims=True)
            # # Xnorm = (self.X_inf - X_mean) / X_std
            # St = (1 / self.Xnorm.shape[0]) * np.matmul(self.Xnorm.transpose(), self.Xnorm)
            # l_St, q_St = np.linalg.eigh(St)
            # assert np.max(np.abs(np.matmul(np.matmul(q_St, np.diag(l_St)), q_St.transpose()) - St)) < 1e-09
            # assert np.max(np.abs(np.eye(q_St.shape[0]) - np.matmul(q_St, q_St.transpose()))) < 1e-09
            # assert np.max(np.abs(np.eye(q_St.shape[0]) - np.matmul(q_St.transpose(), q_St))) < 1e-09
            # U_d = np.copy(q_St[:, -self.proj_dim:])
            # max_evals = np.copy(l_St[-self.proj_dim:][None])
            # # assert np.max(np.abs(np.matmul(St, U_d) / max_evals - U_d)) < 1e-09
            # Y_d = np.matmul(U_d.transpose(), self.Xnorm.transpose())
            # gp_nnjoint.nn.W_0 = np.copy(U_d.transpose())

            try:
                gpflow.train.ScipyOptimizer().minimize(gp_nnjoint)
            except:
                try:
                    gp_nnjoint.jitter = 1e-03
                    gp_nnjoint.noise_variance = 1e-03
                    gp_nnjoint.kern.kernels[1].lengthscales = np.ones(shape=[self.proj_dim])
                    gpflow.train.ScipyOptimizer().minimize(gp_nnjoint)
                except:
                    print('Failure in optimization of hyper-parameters, reset to standard ones')

            Xnn = gp_nnjoint.nn.np_forward(self.Xnorm)
            # Xnn = gp_nnjoint.nn.np_forward(self.X_inf)
            self.Xnn = Xnn
            self.hyps.append(self.get_hyps(gp_nnjoint))

            # W_0_at = np.copy(gp_nnjoint.nn.W_0.read_value())
            # W_1_at = np.copy(gp_nnjoint.nn.W_1.read_value())
            # b_0_at = np.copy(gp_nnjoint.nn.b_0.read_value())
            # b_1_at = np.copy(gp_nnjoint.nn.b_1.read_value())
            #
            # mgp = gpflow.models.GPR(X=np.copy(Xnn), Y=np.copy(self.Ynorm), kern=k_list[1])
            # mgp.likelihood.variance = np.copy(gp_nnjoint.likelihood.variance.value) + np.copy(gp_nnjoint.jitter.read_value())
            # lik_mgp = mgp.compute_log_likelihood()
            # fmean_test, fvar_test = mgp.predict_f(Xnn)
            # lik_gpnnjoint = gp_nnjoint.compute_log_likelihood()
            # lik_gpnnjoint0 = gp_nnjoint.compute_log_likelihood()
            # err_repeat_lik = np.abs(lik_gpnnjoint - lik_gpnnjoint0)
            # fmean_f, fvar_f = gp_nnjoint.predict_f(Xnn)
            # # err_lik = np.abs(lik_mgp - lik_gpnnjoint)
            # err_posterior_mean = np.max(np.abs(fmean_test - fmean_f))
            # err_mean = np.max(np.abs(fmean_f - self.Ynorm))
            # err_posterior_var = np.max(np.abs(fvar_test - fvar_f))
            # Xnew = np.tile(np.linspace(start=0., stop=1., num=500)[:, None], [1, Xnn.shape[1]])
            # logpdf_test, logpdfm_test, uZ, uZ_test_reshape, uZnew, Kprior_inv, Kprior, check_same_as_X_vec, X_vec, SMarg_inv, SMarg, Vprior_inv, Vprior, LK_invT, logpdfm_test_eigh, e_Km, v_Km, Km_test, d_eigh, Km_component, Lm_component = gp_nnjoint.test_log_likelihood(Xnew)
            # logpdf_test0, logpdfm_test0, uZ0, uZ_test_reshape0, uZnew0, Kprior_inv0, Kprior0, check_same_as_X_vec0, X_vec0, SMarg_inv0, SMarg0, Vprior_inv0, Vprior0, LK_invT0, logpdfm_test_eigh0, e_Km0, v_Km0, Km_test0, d_eigh0, Km_component0, Lm_component0 = gp_nnjoint.test_log_likelihood(Xnew)
            #
            # # check same values are returned over multiple calls
            # assert logpdf_test == logpdf_test0 and logpdfm_test == logpdfm_test0 and np.all(uZ == uZ0) and np.all(
            #     uZ_test_reshape == uZ_test_reshape0) and np.all(Kprior_inv == Kprior_inv0) and np.all(
            #     Kprior == Kprior0) and np.all(check_same_as_X_vec == check_same_as_X_vec0) and np.all(
            #     X_vec == X_vec0) and np.all(SMarg_inv == SMarg_inv0) and np.all(SMarg == SMarg0) and np.all(
            #     Vprior_inv == Vprior_inv0) and np.all(Vprior == Vprior0) and np.all(LK_invT == LK_invT0)
            # assert np.all(logpdfm_test_eigh == logpdfm_test_eigh0) and np.all(e_Km == e_Km0) and np.all(
            #     v_Km == v_Km0) and np.all(Km_test == Km_test0) and np.all(d_eigh == d_eigh0)
            # assert np.all(Km_component == Km_component0) and np.all(Lm_component == Lm_component0)
            #
            # kernBLR = LinearGeneralized(input_dim=self.Mo_dim, L_p=LK_invT)
            # gpBLR = gpflow.models.GPR(X=np.copy(uZ.transpose()), Y=np.copy(self.X_inf), kern=kernBLR)
            # # gpBLR = gpflow.models.GPR(X=np.copy(uZ.transpose()), Y=np.copy(self.Xnorm), kern=kernBLR)
            # gpBLR.likelihood.variance = np.copy(gp_nnjoint.noise_variance.read_value())
            # lik_BLR = gpBLR.compute_log_likelihood()
            # err_logpdfBLR = np.abs(lik_BLR - logpdf_test)
            #
            # # perform checkings listed in "_test_likelihood" method
            # threshold = 1e-04
            # assert np.all(uZ_test_reshape == uZ) and np.max(
            #     np.abs(np.matmul(Kprior_inv, Kprior) - np.eye(Kprior.shape[0]))) < threshold and np.all(
            #     check_same_as_X_vec == X_vec) and np.max(
            #     np.abs(np.matmul(SMarg_inv, SMarg) - np.eye(SMarg.shape[0]))) < threshold and np.max(
            #     np.abs(np.matmul(Vprior_inv, Vprior) - np.eye(Vprior.shape[0]))) < threshold and np.abs(
            #     logpdfm_test - lik_mgp) < threshold and np.abs(lik_BLR - logpdf_test) < threshold and np.abs(
            #     lik_gpnnjoint - lik_BLR - lik_mgp) < threshold
            #
            # # uZsT_cov = gp_nnjoint.test_predict_x(self.Xnn[0, :][None])
            # reconstructed_x = []
            # for i in range(self.Xnn.shape[0]):
            #     Xmean_i, Spost, V = gp_nnjoint.predict_x(self.Xnn[i, :][None])
            #     reconstructed_x.append(Xmean_i)
            # X_inf_reconstructed = np.concatenate(reconstructed_x, axis=0)
            # err_reconstruction = np.max(np.abs(X_inf_reconstructed - self.X_inf))
            # # err_reconstruction = np.max(np.abs(X_inf_reconstructed - self.Xnorm))
            #
            # LSxx_invT, MNT, post_meanT, post_S_test, Vp_test, uZs_test, uZ_test = gp_nnjoint.test_predict_x(self.Xnn[0, :][None])
            # kernBLR_post = LinearGeneralized(input_dim=self.Mo_dim, L_p=LSxx_invT)
            # mean_function_post = gpflow.mean_functions.Linear(A=MNT, b=np.zeros(1))
            # gpBLR_post = gpflow.models.GPR(X=np.copy(uZ.transpose()), Y=np.copy(self.X_inf), kern=kernBLR_post, mean_function=mean_function_post)
            # # gpBLR_post = gpflow.models.GPR(X=np.copy(uZ.transpose()), Y=np.copy(self.Xnorm), kern=kernBLR_post, mean_function=mean_function_post)
            # gpBLR_post.likelihood.variance = np.copy(gp_nnjoint.noise_variance.read_value())
            # X_inf_rec_test, X_inf_var_test = gpBLR_post.predict_f(np.copy(uZ.transpose()))
            # assert np.max(np.abs(X_inf_reconstructed - X_inf_rec_test)) < 1e-01


            # optimize the acquisition function within 0,1 bounds
            kwargs = {'ymin': self.Ynorm.min()}
            acquisition = initialize_acquisition(loss=self.loss, gpmodel=gp_nnjoint, **kwargs)

            opt_config = self.update_latent_bounds(opt_config)
            x_proj_tp1, acq_tp1 = self.minimize_acquisition(acquisition, opt_config)

            x_tp1 = self.generate_x(x_proj_tp1, gp_nnjoint)

            y_tp1 = self.evaluate(x_tp1)
            self.update_data(x_tp1, y_tp1)
        lik = []

        return self.data_x, self.data_y, self.hyps, lik
Esempio n. 14
0
def bloc_diag_initialize_models(x, y, input_dim, model, kernel, ARD, nn=None, decomp=None, **kwargs):
    '''
    Initializer for Manifold GP Autoencoder. It initializes the manifold GP for encoder and Multi-output GPs for decoder
    '''
    kernel_attr = import_attr('gpflow/kernels', attribute=kernel)
    if model == 'joint':
        # Joint Manifold GP and Manifold MOGP model. The Manifold MOGP assumes independence between subsets of dimensions (components).
        # In this mode 'joint', for each component a different base kernel is defined.
        output_dim = len(decomp[0])
        def _kern_new():
            return kernel_attr(input_dim=input_dim, ARD=ARD, active_dims=list(range(input_dim)), lengthscales=np.ones(shape=[input_dim])*0.2) * \
                   gpflow.kernels.Coregion(input_dim=1, output_dim=output_dim, rank=output_dim, active_dims=[input_dim])
        kern_out = [_kern_new() for _ in range(len(decomp))]

        for kern_i in kern_out:
            np.random.seed(23)
            kern_i.kernels[1].W = np.random.randn(output_dim, output_dim)

        kern_last = kernel_attr(input_dim=input_dim, ARD=ARD, lengthscales=np.ones(shape=[input_dim]) * 0.2)
        kern_out.append(kern_last)
        kern_joint = Kparallel(kern_out)
        gp_out = FastNN_MoGPR(X=x, Y=y, kern=kern_joint, nn=nn, Mo_dim=output_dim)  # Mo_dim = int(3)
    elif model == 'diagonal_joint':
        # Joint Manifold GP and Manifold MOGP model. The Manifold MOGP assumes independence between subsets of dimensions (components).
        # In this model 'diagonal_joint', for each component the same base kernel is defined "single_kernel".
        output_dim = len(decomp[0])
        single_kernel = kernel_attr(input_dim=input_dim, ARD=ARD, active_dims=list(range(input_dim)),
                    lengthscales=np.ones(shape=[input_dim]) * 0.2)
        def _kern_new():
            return single_kernel * gpflow.kernels.Coregion(input_dim=1, output_dim=output_dim, rank=output_dim,
                                                active_dims=[input_dim])
        kern_out = [_kern_new() for _ in range(len(decomp))]


        for kern_i in kern_out:
            np.random.seed(23)
            kern_i.kernels[1].W = np.random.randn(output_dim, output_dim)

        kern_last = kernel_attr(input_dim=input_dim, ARD=ARD, lengthscales=np.ones(shape=[input_dim]) * 0.2)
        kern_out.append(kern_last)
        kern_joint = Kparallel(kern_out)
        gp_out = FastNN_MoGPR(X=x, Y=y, kern=kern_joint, nn=nn, Mo_dim=output_dim)
    elif model == 'joint_Full':
        # Assuming kern in input is "Multiple_k" kernel with:
        # kern.K(, i=0) = gpflow.kernels.Coregion           Coregionalization kernel MOGP
        # kern.K(, i=1) = gpflow.kernels.Matern52/RBF/etc.  Standard kernel MOGP
        # kern.K(, i=2) = gpflow.kernels.Matern52/RBF/etc.  Standard kernel Manifold GP
        output_dim = x.shape[1]
        kern_out = []
        # [0] Coregionalization kernel
        kern_out.append(gpflow.kernels.Coregion(input_dim=1, output_dim=output_dim, rank=output_dim, active_dims=[
            input_dim]))  # input_dim = proj_dim, output_dim = x.shape[1]
        np.random.seed(23)
        kern_out[0].W = np.random.randn(output_dim, output_dim)
        # [1] Standard kernel MOGP
        kern_out.append(kernel_attr(input_dim=input_dim, ARD=ARD, active_dims=list(range(input_dim)),
                                    lengthscales=np.ones(shape=[input_dim]) * 0.2))
        # [2] Standard kernel Manifold GP
        kern_out.append(kernel_attr(input_dim=input_dim, ARD=ARD, lengthscales=np.ones(shape=[input_dim]) * 0.2))

        kern_joint = Multiple_k(kern_out)
        gp_out = NN_FullMoGP(X=x, Y=y, kern=kern_joint, nn=nn, Mo_dim=output_dim)
    else:
        raise ValueError('Model specified not implemented')

    return kern_out, gp_out
Esempio n. 15
0
    def run(self, maxiters):
        # load VAE models for a specific objective function
        path = '/home/rm4216/Desktop/ImperialCollege/Python/Github_manifold_bo/BayesOpt/Baselines/chemvae/' + \
               self.dict_args['obj'] + '/'
        encoder, decoder, prop_pred = self.load_vae_models(path)
        self.encoder = encoder
        self.decoder = decoder

        # encode inputs in low-dimensional space
        Z_vae, _ = encoder.predict(self.Xprobit[:, :, None])
        self.Z_vae = Z_vae.astype(np.float64)

        # initialize GP with embedded inputs "Z_vae" and normalized outputs "Ynorm"
        kernel, gp = initialize_models(x=np.copy(self.Z_vae),
                                       y=np.copy(self.Ynorm),
                                       input_dim=self.proj_dim,
                                       model='GPR',
                                       kernel='Matern52',
                                       ARD=True)
        opt_config = import_attr(
            'tfbo/configurations',
            attribute='acquisition_opt')  # check import configuration

        for i in range(maxiters):
            print(i)

            try:
                gpflow.train.ScipyOptimizer().minimize(gp)
            except:
                # if throws error in the optimization of hyper-parameters then set the values to reference
                gp = self.reset_hyps(gp)
            self.hyps.append(self.get_hyps(gp))

            kwargs = {'ymin': self.Ynorm.min()}
            acquisition = initialize_acquisition(
                self.loss, gpmodel=gp,
                **kwargs)  # updated model at each iteration
            opt_config = self.update_latent_bounds(opt_config)
            try:
                z_tp1, acq_tp1 = self.minimize_acquisition(
                    acquisition,
                    opt_config)  # check configuration, starting point
            except:
                gp = self.reset_hyps(gp)
                z_tp1, acq_tp1 = self.minimize_acquisition(
                    acquisition, opt_config)

            # self.reset_graph()
            # encoder, decoder, prop_pred = self.load_vae_models(path)
            # self.encoder = encoder
            # self.decoder = decoder

            x_tp1 = self.decode_zopt(z_tp1)
            y_tp1 = self.evaluate(x_tp1)

            self.update_data(x_tp1, y_tp1)

            Z_vae, _ = encoder.predict(self.Xprobit[:, :, None])
            self.Z_vae = Z_vae.astype(np.float64)

            self.reset_graph()
            kernel, gp = initialize_models(x=np.copy(self.Z_vae),
                                           y=np.copy(self.Ynorm),
                                           input_dim=self.proj_dim,
                                           model='GPR',
                                           kernel='Matern52',
                                           ARD=True)

        return self.data_x, self.data_y, self.hyps, self.log_lik_opt
Esempio n. 16
0
import numpy as np
import gpflow
import sys, os
sys.path.insert(0, os.path.join(sys.path[0], '..'))
from tfbo.utils.import_modules import import_attr
from tfbo.models.gplvm_models import FullMoGP


np.random.seed(123)

input_dim = int(60)
N = int(500)
output_dim = input_dim

X = np.random.uniform(low=0., high=1., size=[N, input_dim])
task_attr = import_attr('datasets/tasks/all_tasks', attribute='ProductSinesLinear10D')
objective = task_attr()
Y = objective.f(X, fulldim=False, noisy=True)

k0 = gpflow.kernels.Matern52(input_dim=input_dim, ARD=True, active_dims=list(range(input_dim)), lengthscales=np.ones(shape=[input_dim])*0.2)
k1 = gpflow.kernels.Coregion(input_dim=1, output_dim=output_dim, rank=output_dim, active_dims=[input_dim])
kernel = k0 * k1
np.random.seed(23)
kernel.kernels[1].W = np.random.randn(output_dim, output_dim)

gpMo = FullMoGP(X=X, Y=Y, kern=kernel, Mo_dim=output_dim)
gpMo.as_pandas_table()
logpdf_qlq = gpMo.compute_log_likelihood()


Esempio n. 17
0
def initialize_models(x,
                      y,
                      input_dim,
                      model,
                      kernel,
                      ARD,
                      decomp=None,
                      quantile=None,
                      **kwargs):
    '''
    Initialize the regression models in gpflow, for optimization
    :param xy: dataset
    :param input_dim:
    :param kernel:
    :param ARD:
    :return:
    '''
    if model == 'GPR':
        # with gpflow.defer_build():
        kernel_attr = import_attr('gpflow/kernels', attribute=kernel)
        kernel_out = kernel_attr(input_dim=input_dim, ARD=ARD)
        from tfbo.models.gpr_models import GPR_stable
        gpmodel = GPR_stable(X=x, Y=y, kern=kernel_out)
        # gpmodel.kern.lengthscales.prior = gpflow.priors.Gamma(shape=1., scale=1.)
        # gpmodel.compile()
        # # gpmodel = gpflow.models.GPR(X=x, Y=y, kern=kernel_out)
    if model == 'AddGPR':
        # with gpflow.defer_build():
        # use decomp: list of rank-1 arrays containing active dims
        kernel_attr = import_attr('gpflow/kernels', attribute=kernel)

        def kern_i(kern_attr, input_dim, ARD, decomp_i):
            return kern_attr(input_dim=input_dim,
                             ARD=ARD,
                             active_dims=list(decomp_i))

        kernel_i = lambda d_i: kern_i(
            kern_attr=kernel_attr, input_dim=input_dim, ARD=ARD, decomp_i=d_i)
        kernels = list(map(kernel_i, decomp))

        k_collection = Collection(kernels)

        def gpmodel_i(x, y, k_collection, indices_kernels):
            return AddGPR(X=x, Y=y, kern=k_collection,
                          i=[indices_kernels
                             ])  # indices list for sum of kernels

        gp_i = lambda i_list: gpmodel_i(
            x=x, y=y, k_collection=k_collection, indices_kernels=i_list)
        indices = list(range(len(decomp)))  #  + [list(range(len(decomp)))]
        gpmodel = list(map(gp_i, indices))
        kernel_out = kernels
        # for j in range(len(gpmodel)):
        #     gpmodel[0].kern.kernels[j].lengthscales.prior = gpflow.priors.Gamma(shape=1., scale=1.)
        # #     gpmodel[0].kern.kernels[j].variance.prior = gpflow.priors.Gamma(shape=3., scale=2.)
        # for gpmodel_j in gpmodel:
        #     gpmodel_j.compile()
    if model == 'QGPR':
        with gpflow.defer_build():
            kernel_attr = import_attr('gpflow/kernels', attribute=kernel)
            kernel_out = kernel_attr(input_dim=input_dim, ARD=ARD)
            from tfbo.models.gpr_models import QGPR_stable
            gpmodel = QGPR_stable(X=x, Y=y, kern=kernel_out, quantile=quantile)
            gpmodel.kern.lengthscales.prior = gpflow.priors.Gamma(shape=1.,
                                                                  scale=1.)
            # gpmodel.kern.variance.prior = gpflow.priors.Gamma(shape=7.5, scale=1.)
            gpmodel.compile()
    return kernel_out, gpmodel
Esempio n. 18
0
def initialize_m_models(x,
                        y,
                        input_dim,
                        model,
                        kernel,
                        ARD,
                        nn=None,
                        decomp=None,
                        **kwargs):
    '''
    Initializer for Manifold GP Autoencoder. It initializes the manifold GP for encoder and Multi-output GPs for decoder
    '''
    kernel_attr = import_attr('gpflow/kernels', attribute=kernel)
    if model == 'encoder':
        # Return the kernel and the manifold GP used for learning the nonlinear embedding z=NN(X). Non lists!
        # input_dim: proj_dim, the kernel operates in a low-dimensional space
        # nn is required! decomp is None!
        assert nn is not None and decomp is None
        kern_out = kernel_attr(input_dim=input_dim,
                               ARD=ARD,
                               lengthscales=np.ones(shape=[input_dim]) * 0.2)
        gp_out = MGPR(X=x, Y=y, kern=kern_out, nn=nn)
    elif model == 'decoder':
        # Return a list of kernels and multi-output GPs for learning the mapping to the original space.
        # input_dim: proj_dim, the kernel operates in a low-dimensional space
        # output_dim: number of output features for each MoGP
        # decomp: list of lists, each low-level list corresponds to indices of features mapped by the MoGP
        # decomp is required! nn is None!
        assert decomp is not None and nn is None
        output_dim = len(decomp[0])

        def _kern():
            return kernel_attr(input_dim=input_dim, ARD=ARD, active_dims=list(range(input_dim)), lengthscales=np.ones(shape=[input_dim])*0.2) * \
                   gpflow.kernels.Coregion(input_dim=1, output_dim=output_dim, rank=output_dim, active_dims=[input_dim])

        kern_out = [_kern() for _ in range(len(decomp))]

        X_aug = np.vstack([
            np.hstack([x.copy(), np.ones(shape=[x.shape[0], 1]) * i])
            for i in range(output_dim)
        ])

        def _MoGP(decomp_i, kern_i):
            # kern_i.as_pandas_table()
            np.random.seed(23)
            kern_i.kernels[1].W = np.random.randn(output_dim, output_dim)
            # kern_i.as_pandas_table()
            Y = y[:, decomp_i].copy()
            Y_aug = np.vstack(
                [Y[:, i].copy()[:, None] for i in range(len(decomp_i))])
            MoGP_i = Stable_GPR(X=X_aug, Y=Y_aug, kern=kern_i)
            MoGP_i.likelihood.variance = 1e-06  # 0.001
            return MoGP_i

        gp_out = list(map(_MoGP, decomp, kern_out))
    elif model == 'joint':
        output_dim = len(decomp[0])

        def _kern_new():
            return kernel_attr(input_dim=input_dim, ARD=ARD, active_dims=list(range(input_dim)), lengthscales=np.ones(shape=[input_dim])*0.2) * \
                   gpflow.kernels.Coregion(input_dim=1, output_dim=output_dim, rank=output_dim, active_dims=[input_dim])

        kern_out = [_kern_new() for _ in range(len(decomp))]

        for kern_i in kern_out:
            np.random.seed(23)
            kern_i.kernels[1].W = np.random.randn(output_dim, output_dim)

        kern_last = kernel_attr(input_dim=input_dim,
                                ARD=ARD,
                                lengthscales=np.ones(shape=[input_dim]) * 0.2)
        kern_out.append(kern_last)
        kern_joint = Kstack(kern_out)
        gp_out = NN_MoGPR(X=x, Y=y, kern=kern_joint, nn=nn, Mo_dim=output_dim)
    elif model == 'joint_Full':
        # Assuming kern in input is "Multiple_k" kernel with:
        # kern.K(, i=0) = gpflow.kernels.Coregion           Coregionalization kernel MOGP
        # kern.K(, i=1) = gpflow.kernels.Matern52/RBF/etc.  Standard kernel MOGP
        # kern.K(, i=2) = gpflow.kernels.Matern52/RBF/etc.  Standard kernel Manifold GP
        output_dim = x.shape[1]
        kern_out = []
        # [0] Coregionalization kernel
        kern_out.append(
            gpflow.kernels.Coregion(
                input_dim=1,
                output_dim=output_dim,
                rank=output_dim,
                active_dims=[
                    input_dim
                ]))  # input_dim = proj_dim, output_dim = x.shape[1]
        np.random.seed(23)
        kern_out[0].W = np.random.randn(output_dim, output_dim)
        # [1] Standard kernel MOGP
        kern_out.append(
            kernel_attr(input_dim=input_dim,
                        ARD=ARD,
                        active_dims=list(range(input_dim)),
                        lengthscales=np.ones(shape=[input_dim]) * 0.2))
        # [2] Standard kernel Manifold GP
        kern_out.append(
            kernel_attr(input_dim=input_dim,
                        ARD=ARD,
                        lengthscales=np.ones(shape=[input_dim]) * 0.2))

        kern_joint = Multiple_k(kern_out)
        gp_out = NN_FullMoGP(X=x,
                             Y=y,
                             kern=kern_joint,
                             nn=nn,
                             Mo_dim=output_dim)
    elif model == 'BLR':
        output_dim = x.shape[1]
        kern_out = []
        # [0] Standard kernel MOGP
        kern_out.append(
            kernel_attr(input_dim=input_dim,
                        ARD=ARD,
                        active_dims=list(range(input_dim)),
                        lengthscales=np.ones(shape=[input_dim]) * 0.5))
        # [1] Standard kernel Manifold GP
        kern_out.append(
            kernel_attr(input_dim=input_dim,
                        ARD=ARD,
                        lengthscales=np.ones(shape=[input_dim]) * 0.5))

        kern_joint = Multiple_k(kern_out)
        alpha_param = alpha()
        # p = output_dim
        p = int(250)
        np.random.seed(123)
        sample_train = np.random.normal(loc=0., scale=1., size=[x.shape[0], p])
        sample_test = np.random.normal(loc=0., scale=1., size=[1, p])
        gp_out = NN_BLRMoGP(X=x,
                            Y=y,
                            kern=kern_joint,
                            nn=nn,
                            Mo_dim=output_dim,
                            alpha=alpha_param,
                            sample_train=sample_train,
                            sample_test=sample_test,
                            p=p)
    else:
        raise ValueError('Model specified not implemented')

    return kern_out, gp_out
Esempio n. 19
0
parser = argparse.ArgumentParser(
    description=
    'Input: seed_number, objective_name, optimizer_name, loss_name, proj_dim,'
    ' input_dim, maxiter, proportion_of_SR')
for name_i, default_i, type_i, help_i in zip(names, defaults, types, helps):
    parser.add_argument('--' + name_i,
                        default=default_i,
                        type=type_i,
                        help=help_i)
args = parser.parse_args()
dict_args = vars(args)
print(dict_args)

# check inputs
verify_attr = import_attr('tfbo/utils/check_inputs',
                          attribute='verify_dict_inputs')
verify_attr(dict_args)
string_to_int_attr = import_attr('tfbo/utils/check_inputs',
                                 attribute='transform_inputs')
dict_args['seed'] = string_to_int_attr(dict_args['seed'])

# load start
load_start_attr = import_attr('tfbo/utils/load_save',
                              attribute='load_SRinitializations')
xy_list = load_start_attr(dict_args, names)

# load optimizer
path_opt = 'tfbo/optimizers/' + dict_args['opt'] + '_optimizer'
optim_attr = import_attr(path_opt, attribute=dict_args['opt'] + '_optimizer')

# load objective
Esempio n. 20
0
    def run(self, maxiters=20):
        opt_config = import_attr(
            'tfbo/configurations',
            attribute='KLacquisition_opt')  # check import configuration
        for j in range(maxiters):
            print('iteration: ', j)

            self.reset_graph()

            # initialize model
            k_list, gp_nnjoint, nn = self.initialize_modelM()
            gp_nnjoint.kern.kernels[1].variance = 10.

            # # test likelihood
            # lik_nn = gp_nnjoint.compute_log_likelihood()
            # Xnn0 = gp_nnjoint.nn.np_forward(self.Xnorm)
            # kc = k_list[1] * k_list[0]
            # Xnn_test = np.concatenate(
            #     [np.concatenate([Xnn0, np.ones(shape=[np.shape(Xnn0)[0], 1]) * i], axis=1)
            #      for i in range(gp_nnjoint.Mo_dim)], axis=0)
            # Y_test = np.concatenate([self.Xnorm[:, i][:, None] for i in range(gp_nnjoint.Mo_dim)], axis=0)
            # gp_test = gpflow.models.GPR(X=Xnn_test, Y=Y_test, kern=kc)
            # gp_test.likelihood.variance = 1e-06
            # lik_test = gp_test.compute_log_likelihood()
            # gpm = gpflow.models.GPR(X=Xnn0, Y=self.Ynorm, kern=k_list[2])
            # gpm.likelihood.variance = 1e-06
            # likm = gpm.compute_log_likelihood()
            # lik_err = np.abs(lik_nn-lik_test-likm)

            gp_nnjoint.nn.W_0 = np.array([[-0.72257252, 0.26017714],
                                          [-1.23547449, -0.87398094],
                                          [-0.05795134, 0.22184529],
                                          [-4.33704576, -1.03866942],
                                          [4.16884434, 0.1687948]])
            gp_nnjoint.nn.W_1 = np.array([
                [-0.17611191, 0.84349685, 1.44230698, 0.18555664, -0.19708862],
                [-0.13689745, 1.86417045, 2.33110755, 1.20521291, 0.71162644],
                [0.47687133, 0.31373425, -1.1891341, 2.18089067, -3.93909819],
                [
                    -0.2272015, 1.93327611, -1.57774183, -1.26255085,
                    -0.15080552
                ],
                [
                    -0.4890983, -1.81724449, -1.65700209, -0.75827901,
                    1.64434325
                ],
                [0.10663821, -0.12244555, 2.26286785, -0.88992352, 2.63438025],
                [-1.14518348, -2.48144707, -0.35203317, 0.23830179, 0.0816695],
                [-0.5185169, 2.43075116, 0.09996988, 1.56821543, 2.57299817],
                [
                    1.27373299, -2.17523897, 2.56801105, -1.29495389,
                    -1.38732749
                ],
                [2.16933267, -0.82218552, 1.94225155, 3.44593108, 1.76706837]
            ])
            gp_nnjoint.nn.W_2 = np.array(
                [[
                    -1.06815199, 0.67328749, 1.33295767, -0.82976342,
                    1.08580199, 0.07772985, -0.45765023, -0.05497667,
                    -2.4756558, 0.08808674
                ],
                 [
                     0.85855821, -0.10785176, 1.40417131, -1.4510554,
                     -2.43215512, 0.58832488, -0.31426693, 0.88093524,
                     -0.18911669, -1.21866324
                 ],
                 [
                     0.8989253, -0.04077404, 4.74024619, -0.25097489,
                     -0.68791512, -2.8158515, -1.05096808, -1.15249423,
                     2.40093649, 2.84014738
                 ],
                 [
                     1.71409331, 0.21485905, 0.47611273, 3.44473025,
                     -0.1917658, 3.08725273, -0.97657774, 0.22685569,
                     0.33642754, 0.69626424
                 ],
                 [
                     0.60789342, -2.02719287, 0.43644935, 2.13129863,
                     -0.4946168, 0.3486837, -0.02468686, -2.11012978,
                     0.80318346, -2.0538133
                 ]])
            gp_nnjoint.nn.W_3 = np.array(
                [[-1.17012522, 1.4669893, -2.33431889, 4.54361068, 0.219858]])
            gp_nnjoint.nn.b_0 = np.array([[-1.95648467], [-0.40078642],
                                          [0.03963978], [-3.13848025],
                                          [0.89017789]])
            gp_nnjoint.nn.b_1 = np.array([[0.84520059], [0.5069299],
                                          [-1.45844994], [0.32032038],
                                          [0.94691029], [0.87558343],
                                          [-0.41215514], [0.13526481],
                                          [-1.00605875], [-0.02132958]])
            gp_nnjoint.nn.b_2 = np.array([[-0.11726942], [0.14056033],
                                          [1.38538488], [1.71165805],
                                          [-0.41426653]])
            gp_nnjoint.nn.b_3 = np.array([[1.19480249]])
            # gp_nnjoint.nn.trainable = False

            # gpflow.train.ScipyOptimizer().minimize(gp_nnjoint)
            try:
                gp_nnjoint.kern.kernels[1].lengthscales = np.ones(
                    shape=[self.proj_dim])
                gpflow.train.ScipyOptimizer().minimize(gp_nnjoint)
            except:
                try:
                    gp_nnjoint.kern.kernels[1].lengthscales = np.ones(
                        shape=[self.proj_dim])
                    gpflow.train.ScipyOptimizer().minimize(gp_nnjoint)
                except:
                    print(
                        'Failure in optimization of hyper-parameters, reset to standard ones'
                    )

            Xnn = gp_nnjoint.nn.np_forward(self.Xnorm)
            self.Xnn = Xnn
            self.hyps.append(self.get_hyps(gp_nnjoint))

            # # test Manifold GP predictions
            # fmean, fvar = gp_nnjoint.predict_f(Xnn)

            # optimize the acquisition function within 0,1 bounds
            kwargs = {'ymin': self.Ynorm.min()}
            acquisition = initialize_acquisition(loss=self.loss,
                                                 gpmodel=gp_nnjoint,
                                                 **kwargs)
            # opt_config = self.update_latent_bounds(opt_config)
            if (j % 10 == 0):
                self.learnLipschitz(
                    gp_nnjoint)  # learn new Lipschitz constant every 10 iters
            opt_config = self.NLconstraint(opt_config)
            x_proj_tp1, acq_tp1 = self.minimize_acquisition(
                acquisition, opt_config)

            x_tp1 = self.generate_x(x_proj_tp1, gp_nnjoint)

            y_tp1 = self.evaluate(x_tp1)
            self.update_data(x_tp1, y_tp1)
        lik = []

        return self.data_x, self.data_y, self.hyps, lik
Esempio n. 21
0
        ('seed', int(0)),
        ('obj', 'RosenbrockLinear10D'),
        ('opt', 'NN_bo'),
        ('loss', 'Neg_pi'),
        ('proj_dim', int(10)),
        ('input_dim', int(60))
    ])
path_dict = '/home/rm4216/Desktop/ImperialCollege/Python/Github_manifold_bo/BayesOpt/tests/results/'
# filename = 'test_MgpOpt'
# dict_input = load_dictionary(path_dict + filename + '.p')
filename = name_synthetic_dict_no_quantile(dict_args)
# dict_input = load_dictionary(path_dict + filename + 'seed_' + str(dict_args['seed']) + '.p')
dict_input = load_dictionary(path_dict + filename + '.p')


task_attr = import_attr('datasets/tasks/all_tasks', attribute=dict_args['obj'])
objective = task_attr()
# f_out = objective.f(np.array([0.50313, 0.18502]), noisy=False, fulldim=True)
# f_out2 = objective.f(objective.minimizer1, noisy=False, fulldim=False)

xiters = dict_input['NNKL_bo']['Xepisodes'].shape[1]

if dict_args['obj'] == 'Branin2D':
    f, ax = generate_ax(title='', xlabel='iterations', ylabel='best f', xlim=[-1, xiters + 1.],
                    ylim=[objective.fmin - 0.5, 25.])
elif dict_args['obj'] == 'Hartmann6D':
    f, ax = generate_ax(title='', xlabel='iterations', ylabel='best f', xlim=[-1, xiters + 1.],
                        ylim=[objective.fmin - 0.5, 0.5])
elif dict_args['obj'] == 'HartmannLinear6D':
    f, ax = generate_ax(title='', xlabel='iterations', ylabel='best f', xlim=[-1, xiters + 1.],
                        ylim=[objective.fmin - 0.5, 0.5])
Esempio n. 22
0
    def run(self, maxiter=20):
        opt_config = import_attr(
            'tfbo/configurations',
            attribute='acquisition_opt')  # check import configuration
        opt_config['bounds'] = [(0., 1.)] * self.proj_dim * self.num_init
        for j in range(maxiter):
            print('iteration: ', j)
            # initialize model
            self.reset_graph()
            km, gpm, nn = self.initialize_modelM()
            # train nn and gp hyper-parameters
            # try:
            #     gpflow.train.ScipyOptimizer().minimize(gpm)
            # except:
            #     self.reset_hyps(gpm)
            gpm.likelihood.variance = 0.001
            Xnn = gpm.nn.np_forward(self.Xnorm)
            self.Xnn = Xnn
            self.hyps.append(self.get_hyps(gpm))

            gp_acq = Stable_GPR(X=np.copy(Xnn), Y=np.copy(self.Ynorm), kern=km)
            gp_acq.likelihood.variance = gpm.likelihood.variance.read_value()
            # gp_acq.as_pandas_table()

            # optimize the acquisition function within 0,1 bounds
            kwargs = {'ymin': self.Ynorm.min()}
            acquisition = initialize_acquisition(loss=self.loss,
                                                 gpmodel=gp_acq,
                                                 **kwargs)
            x_proj_tp1, acq_tp1 = self.minimize_acquisition(
                acquisition, opt_config)

            k_list, gp_list = initialize_m_models(x=np.copy(Xnn),
                                                  y=np.copy(self.Xnorm),
                                                  input_dim=self.proj_dim,
                                                  model='decoder',
                                                  kernel='Matern52',
                                                  ARD=True,
                                                  nn=None,
                                                  decomp=self.decomposition)

            k_list.append(km)
            kern_joint = Kstack(k_list)
            gp_nnjoint = NN_MoGPR(X=np.copy(self.Xnorm),
                                  Y=np.copy(self.Ynorm),
                                  kern=kern_joint,
                                  nn=nn,
                                  Mo_dim=int(3))
            gp_nnjoint.likelihood.variance = 0.001

            fmean_acq, fvar_acq = gp_acq.predict_f(Xnn)
            fmean_joint, fvar_joint = gp_nnjoint.predict_f(Xnn)

            # jitter, jitter_mat = gp_nnjoint.compute_log_likelihood()

            gpflow.train.ScipyOptimizer().minimize(gp_nnjoint)

            Xnn = gp_nnjoint.nn.np_forward(self.Xnorm)
            self.Xnn = Xnn

            # liks, lik = gp_nnjoint.compute_log_likelihood()
            # err_i = [np.abs(gp_list[i].compute_log_likelihood() - liks[i, 0]) for i in range(len(gp_list))]
            # errs = np.array(err_i)
            # transform the optimal point into the original input space and clip to 0,1 bound for feasibility
            x_tp1 = self.generate_x(x_proj_tp1, gp_list, gp_nnjoint)

            y_tp1 = self.evaluate(x_tp1)
            self.update_data(x_tp1, y_tp1)
            # self.reset_graph()
            # tf.reset_default_graph()
            # print(len(tf.all_variables()))
            # print(len(tf.get_default_graph().get_operations()))
        return self.data_x, self.data_y, self.hyps
Esempio n. 23
0
    def run(self, maxiters):
        kernel, gp = initialize_models(x=np.copy(self.X_proj_norm),
                                       y=np.copy(self.Ynorm),
                                       input_dim=self.proj_dim,
                                       model='GPR',
                                       kernel='Matern52',
                                       ARD=True)
        opt_config = import_attr(
            'tfbo/configurations',
            attribute='acquisition_opt')  # check import configuration
        opt_config['bounds'] = [(self.proj_bounds[0][i], self.proj_bounds[1][i]) for i in range(self.proj_dim)] * \
                               self.num_init
        # opt_hyp = import_attr('tfbo/configurations', attribute='hyp_opt')
        # # var_list = gp.
        # opt_hyp['var_to_bounds'] = [(np.log(np.exp(1e-04) - 1.), np.log(np.exp(1e04) - 1.))] + \
        #                            [(np.log(np.exp(1e-06) - 1.), np.log(np.exp(1e06) - 1.))] * self.proj_dim + \
        #                            [(np.log(np.exp(1e-08) - 1.), np.log(np.exp(1e08) - 1.))]   # order of hyps in list

        for i in range(maxiters):
            print(i)
            # gp = self.fit_gp(gp)
            try:
                gpflow.train.ScipyOptimizer().minimize(gp)
            except:
                # if throws error in the optimization of hyper-parameters then set the values to reference
                gp = self.reset_hyps(gp)
                # gpflow.train.ScipyOptimizer().minimize(gp)
            self.hyps.append(self.get_hyps(gp))

            kwargs = {'ymin': self.Ynorm.min()}
            acquisition = initialize_acquisition(
                self.loss, gpmodel=gp,
                **kwargs)  # updated model at each iteration
            # def acquisition_norm(acquisition, x, X_proj_mean, X_proj_std):
            #     # wrapper to normalize the input
            #     N = x.shape[0]
            #     X_proj_mean_rep = np.tile(X_proj_mean, reps=[N, 1])
            #     X_proj_std_rep = np.tile(X_proj_std, reps=[N, 1])
            #     xnorm = (x - X_proj_mean_rep) / X_proj_std_rep
            #     acq_norm, acq_sum, acq_grad = acquisition(xnorm)
            #     return acq_norm, acq_sum, acq_grad
            acquisition_normalized = lambda x: self.acquisition_norm(
                acquisition, x, self.X_proj_mean, self.X_proj_std)
            # acquisition_normalized = lambda x: acquisition_norm(acquisition, x, self.X_proj_mean, self.X_proj_std)  # check broadcasting

            try:
                x_tp1, acq_tp1 = self.minimize_acquisition(
                    acquisition_normalized,
                    opt_config)  # check configuration, starting point
            except:
                np.save('Xcrash_rembo', gp.X.read_value())
                np.save('Ycrash_rembo', gp.Y.read_value())
                np.save('hyps_crash', self.get_hyps(gp))
                gp = self.reset_hyps(gp)
                x_tp1, acq_tp1 = self.minimize_acquisition(
                    acquisition_normalized, opt_config)
            y_tp1 = self.evaluate(x_tp1)

            self.update_data(x_tp1, y_tp1)
            self.reset_graph()
            kernel, gp = initialize_models(x=np.copy(self.X_proj_norm),
                                           y=np.copy(self.Ynorm),
                                           input_dim=self.proj_dim,
                                           model='GPR',
                                           kernel='Matern52',
                                           ARD=True)
            # gp = self.update_model(gp)
        return self.data_x, self.data_y, self.hyps, self.log_lik_opt
Esempio n. 24
0
#         ('proj_dim', int(1)),
#         ('input_dim', int(2))
#     ])

# dictionary optimization inputs
names = ['seed', 'obj', 'opt', 'loss', 'proj_dim', 'input_dim', 'maxiter']
defaults = ['0', 'Hartmann6D', 'manifold_bo', 'Neg_ei', int(2), int(6), int(50)]    # list ints, name_objective, name_optimizer, name loss
types = [str, str, str, str, int, int, int, float]
parser = argparse.ArgumentParser(description='Input: list numbers, name_objective, name_optimizer, name loss, proj_dim, input_dim, maxiters')
for name_i, default_i, type_i in zip(names, defaults, types):
    parser.add_argument('--' + name_i, default=default_i, type=type_i)
args = parser.parse_args()
dict_args = vars(args)
print(dict_args)
# check inputs
verify_attr = import_attr('tfbo/utils/check_inputs', attribute='verify_dict_inputs')
verify_attr(dict_args)
string_to_int_attr = import_attr('tfbo/utils/check_inputs', attribute='transform_inputs')
dict_args['seed'] = string_to_int_attr(dict_args['seed'])



np.random.seed(dict_args['seed'])

# Generate starting data
num_starts = int(10)
shape_starts = [num_starts, dict_args['input_dim']]
Xstart = np.random.uniform(low=0., high=1., size=np.prod(shape_starts)).reshape(shape_starts)

obj_attr = import_attr('datasets/tasks/all_tasks', attribute=dict_args['obj'])
objective = obj_attr()
Esempio n. 25
0
    '0', 'ProductSinesLinear10D', 'NN_bo', 'Neg_pi',
    int(10),
    int(60),
    int(50)
]  # list ints, name_objective, name_optimizer, name loss
types = [str, str, str, str, int, int, int, float]
parser = argparse.ArgumentParser(
    description=
    'Input: list numbers, name_objective, name_optimizer, name loss, proj_dim, input_dim, maxiters'
)
for name_i, default_i, type_i in zip(names, defaults, types):
    parser.add_argument('--' + name_i, default=default_i, type=type_i)
args = parser.parse_args()
dict_args = vars(args)
print(dict_args)
string_to_int_attr = import_attr('tfbo/utils/check_inputs',
                                 attribute='transform_inputs')
dict_args['seed'] = string_to_int_attr(dict_args['seed'])

import_name_attr = import_attr('tfbo/utils/name_file',
                               attribute='name_synthetic')
import_name_save_attr = import_attr(
    'tfbo/utils/name_file', attribute='name_synthetic_dict_no_quantile')
import_dict_attr = import_attr('tfbo/utils/load_save',
                               attribute='load_dictionary')
save_dict_attr = import_attr('tfbo/utils/load_save',
                             attribute='save_dictionary')

path = '/home/rm4216/Desktop/ImperialCollege/Python/Github_manifold_bo/BayesOpt/tests/results/'

acqs = ['Neg_ei', 'lcb', 'Neg_pi']
# opts = ['NN_bo', 'add_bo']
Esempio n. 26
0
    '0', 'RosenbrockLinear10D', 'NNKL_bo', 'Neg_pi',
    int(10),
    int(60),
    int(50)
]  # list ints, name_objective, name_optimizer, name loss
types = [str, str, str, str, int, int, int, float]
parser = argparse.ArgumentParser(
    description=
    'Input: list numbers, name_objective, name_optimizer, name loss, proj_dim, input_dim, maxiters'
)
for name_i, default_i, type_i in zip(names, defaults, types):
    parser.add_argument('--' + name_i, default=default_i, type=type_i)
args = parser.parse_args()
dict_args = vars(args)
print(dict_args)
string_to_int_attr = import_attr('tfbo/utils/check_inputs',
                                 attribute='transform_inputs')
dict_args['seed'] = string_to_int_attr(dict_args['seed'])

import_dict_attr = import_attr('tfbo/utils/load_save',
                               attribute='load_dictionary')
import_name_attr = import_attr('tfbo/utils/name_file',
                               attribute='name_synthetic')
attach_dicts_attr = import_attr('tfbo/utils/store_outputs',
                                attribute='attach_subdictionaries')

path = '/home/rm4216/Desktop/ImperialCollege/Python/Github_manifold_bo/BayesOpt/tests/results/'
name_dict_a = import_name_attr(dict_args)
dict_a = import_dict_attr(path + name_dict_a + 'seed_0' + '.p')
for i in [
        0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18
]:  # range(19):     # [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18]:
Esempio n. 27
0
    def run(self, maxiters=20):
        opt_config = import_attr('tfbo/configurations',
                                 attribute='acquisition_opt')
        for j in range(maxiters):
            print('iteration: ', j)

            self.reset_graph()

            # initialize model
            k_list, gp_nnjoint, nn = self.initialize_modelM()
            gp_nnjoint.kern.kernels[1].variance = 10.

            try:
                gpflow.train.ScipyOptimizer().minimize(gp_nnjoint)
            except:
                try:
                    gp_nnjoint.likelihood.variance = 1e-03
                    gp_nnjoint.kern.kernels[1].lengthscales = np.ones(
                        shape=[self.proj_dim])
                    gpflow.train.ScipyOptimizer().minimize(gp_nnjoint)
                except:
                    print(
                        'Failure in optimization of hyper-parameters, reset to standard ones'
                    )

            Xnn = gp_nnjoint.nn.np_forward(self.Xprobit)
            self.Xnn = Xnn
            self.hyps.append(self.get_hyps(gp_nnjoint))

            # # test Manifold GP predictions
            # fmean, fvar = gp_nnjoint.predict_f(Xnn)

            # # A bit of testing
            # # fmeanX, mean, y_vec, Xnorm_out, Xnorm_test = gp_nnjoint.predict_x(Xnn)
            # fmeanX = gp_nnjoint.predict_x(Xnn)
            # sampleX = gp_nnjoint.sample_x(Xnn)
            # # vec_Xnorm = np.copy(y_vec)
            # # vec_indices = np.concatenate([np.ones(shape=[Xnn.shape[0], 1]) * i for i in range(self.Mo_dim)], axis=0)
            # # Ymogp = vec_Xnorm
            # Ymogp = np.reshape(np.transpose(self.Xprobit), newshape=[self.Mo_dim * Xnn.shape[0], 1])
            # Xmogp = np.concatenate(
            #     [np.concatenate([np.copy(Xnn), np.ones(shape=[Xnn.shape[0], 1]) * i], axis=1) for i in
            #      range(self.Mo_dim)], axis=0)
            # kernmogp = k_list[0] * k_list[1]
            # mogp = gpflow.models.GPR(X=Xmogp, Y=Ymogp, kern=kernmogp)
            # mogp.likelihood.variance = gp_nnjoint.likelihood.variance.value
            # fmean_mogp, fvar_mogp = mogp.predict_f(Xmogp)
            # fmean_mogp0 = np.reshape(np.copy(fmean_mogp[:, 0]), newshape=[self.Mo_dim, Xnn.shape[0]]).transpose()
            # err_fmean = np.max(np.abs(fmean_mogp0 - self.Xprobit))
            # lik_mogp = mogp.compute_log_likelihood()
            # lik_gpnnjoint = gp_nnjoint.compute_log_likelihood()
            # K_test, B_test, y_vec_test, l_k_test, q_k_test, l_b_test, q_b_test, QbQkX_vec_test, kron_diag_test, Inv_vec_test, alpha_test = gp_nnjoint.test_log_likelihood()
            # Knn = gp_nnjoint.kern.kernels[1].compute_K_symm(Xnn)
            # err_K = np.max(np.abs(Knn - K_test))
            # Bnn = np.matmul(gp_nnjoint.kern.kernels[0].W.value,
            #                 np.transpose(gp_nnjoint.kern.kernels[0].W.value)) + np.diag(
            #     gp_nnjoint.kern.kernels[0].kappa.value)
            # err_B = np.max(np.abs(Bnn - B_test))
            # X_vec = np.reshape(np.transpose(self.Xprobit), newshape=[self.Mo_dim * self.Xprobit.shape[0], 1])
            # l_k, q_k = np.linalg.eigh(Knn)
            # l_b, q_b = np.linalg.eigh(Bnn)
            #
            # def mat_vec_mul(B, K, X_vec):
            #     Gb = np.shape(B)[0]
            #     Gk = np.shape(K)[1]
            #     X_Gk = np.reshape(X_vec, newshape=[Gb, Gk])
            #     Z = np.matmul(X_Gk, np.transpose(K))
            #     Z_vec = np.reshape(np.transpose(Z), newshape=[Gb * Gk, 1])
            #     Z_Gb = np.reshape(Z_vec, newshape=[Gk, Gb])
            #     M = np.matmul(Z_Gb, np.transpose(B))
            #     x_out = np.reshape(np.transpose(M), newshape=[-1, 1])
            #     return x_out
            #
            # QbQkX_vec = mat_vec_mul(np.transpose(q_b), np.transpose(q_k), X_vec)
            # kron_diag = np.concatenate([l_k[:, None] * l_b[i] for i in range(self.Mo_dim)], axis=0)
            # Inv_vec = QbQkX_vec / (kron_diag + gp_nnjoint.likelihood.variance.value)
            # alpha_gpnnjoint = mat_vec_mul(q_b, q_k, Inv_vec)
            # err_alpha = np.max(np.abs(alpha_gpnnjoint - alpha_test))
            # logpdf_gpnnjoint = -0.5 * np.matmul(np.transpose(X_vec), alpha_gpnnjoint) - 0.5 * X_vec.shape[0] * np.log(
            #     2 * np.pi) - 0.5 * np.sum(np.log(kron_diag + gp_nnjoint.likelihood.variance.value))
            # err_ll = np.abs(logpdf_gpnnjoint - lik_mogp)
            # mgp = gpflow.models.GPR(X=np.copy(Xnn), Y=np.copy(self.Ynorm), kern=k_list[2])
            # mgp.likelihood.variance = gp_nnjoint.likelihood.variance.value
            # lik_mgp = mgp.compute_log_likelihood()
            # err_lik_all = np.abs(lik_gpnnjoint - (lik_mogp + lik_mgp))
            # fmean_gpnn, fvar_gpnn = gp_nnjoint.predict_f(Xnn)
            # fmean_mgp, fvar_mgp = mgp.predict_f(Xnn)
            # err_predict_f = np.maximum(np.max(np.abs(fmean_gpnn - self.Ynorm)), np.max(np.abs(fmean_gpnn - fmean_mgp)))
            # err_predict_var = np.max(np.abs(fvar_gpnn - fvar_mgp))
            # err_fmeanX = np.max(np.abs(fmeanX - self.Xprobit))
            # err_sampleX = np.maximum(np.max(np.abs(sampleX - self.Xprobit)), np.max(np.abs(sampleX - fmeanX)))

            # optimize the acquisition function within 0,1 bounds
            kwargs = {'ymin': self.Ynorm.min()}
            acquisition = initialize_acquisition(loss=self.loss,
                                                 gpmodel=gp_nnjoint,
                                                 **kwargs)

            opt_config = self.update_latent_bounds(opt_config)
            x_proj_tp1, acq_tp1 = self.minimize_acquisition(
                acquisition, opt_config)

            x_tp1 = self.generate_x(x_proj_tp1, gp_nnjoint)

            y_tp1 = self.evaluate(x_tp1)
            self.update_data(x_tp1, y_tp1)
        lik = []

        return self.data_x, self.data_y, self.hyps, lik
Esempio n. 28
0
def init_train(Xtrain=None, Ytrain=None):

    names = ['seed', 'obj', 'opt', 'loss', 'proj_dim', 'input_dim']
    defaults = [
        '0', 'RosenbrockLinear10D', 'vae_bo', 'Neg_pi',
        int(10),
        int(60)
    ]
    types = [str, str, str, str, int, int]
    helps = [
        'seed_number', 'objective_name', 'optimizer_name', 'loss_name',
        'proj_dim', 'input_dim'
    ]

    parser = argparse.ArgumentParser(
        description=
        'Input: seed_number, objective_name, optimizer_name, loss_name, proj_dim,'
        ' input_dim, maxiter')
    for name_i, default_i, type_i, help_i in zip(names, defaults, types,
                                                 helps):
        parser.add_argument('--' + name_i,
                            default=default_i,
                            type=type_i,
                            help=help_i)
    args_p = parser.parse_args()
    args = vars(args_p)

    # args = OrderedDict(
    #     [
    #         ('seed', int(0)),
    #         ('obj', 'MichalewiczLinear10D'),
    #         ('opt', 'vae_bo'),
    #         ('loss', 'Neg_pi'),
    #         ('proj_dim', int(10)),
    #         ('input_dim', int(60))
    #     ])

    path = '/home/rm4216/Desktop/ImperialCollege/Python/Github_manifold_bo/BayesOpt/Baselines/chemvae/settings/'
    filename = name_model_vae(args)

    # parser = argparse.ArgumentParser()
    # parser.add_argument('-e', '--exp_file',
    #                     help='experiment file', default='exp.json')
    # # parser.add_argument('-d', '--directory',
    # #                     help='exp directory', default=None)
    # parser.add_argument('-d', '--directory',
    #                     help='exp directory', default='/home/rm4216/Desktop/ImperialCollege/Python/Github_manifold_bo/chemical_vae-master/models/zinc_properties/')
    # args = vars(parser.parse_args())
    # if args['directory'] is not None:
    #     args['exp_file'] = os.path.join(args['directory'], args['exp_file'])

    params = hyperparameters.load_params(path + filename + '.json')
    print("All params:", params)

    np.random.seed(123)
    Xtrain = np.random.uniform(low=0.,
                               high=1.,
                               size=[int(500), args['input_dim']])
    from tfbo.utils.import_modules import import_attr
    task_attr = import_attr('datasets/tasks/all_tasks', attribute=args['obj'])
    objective = task_attr()
    Ytrain = [objective.f(Xtrain, fulldim=False, noisy=True)]

    # X_mean = np.mean(Xtrain, axis=0, keepdims=True)
    # X_std = np.std(Xtrain, axis=0, keepdims=True)

    Y_mean = np.mean(Ytrain[0].copy(), axis=0, keepdims=True)
    Y_std = np.std(Ytrain[0].copy(), axis=0, keepdims=True)

    # Xnorm = (Xtrain - X_mean) / X_std
    from scipy.stats import norm
    Xprobit = norm.ppf(Xtrain)
    Ynorm = (Ytrain[0].copy() - Y_mean) / Y_std  # check all shapes

    AE_PP_model, encoder, decoder, property_predictor, kl_loss_var = main_property_run(
        params, Xprobit, [Ynorm])
    output = AE_PP_model.predict(Xprobit[:, :, None],
                                 batch_size=params['batch_size'])

    return
Esempio n. 29
0
#     save_attr(path_to_data + x_file + '.npy', array=_x01)     # str.replace is not in-place modificattion of filename
#     save_attr(path_to_data + y_file + '.npy', array=_y)





# define all seeds for each proportion: proportion = part, i.e. (half roll) part10, part11, ..., part19 (all roll)
import sys, os
sys.path.insert(0, os.path.join(sys.path[0], '..'))
from tfbo.utils.import_modules import import_attr
import argparse
from collections import OrderedDict

path_to_name_file = 'tfbo/utils/name_file'
name_attr = import_attr(path_to_name_file, attribute='name_file_start')

path_to_load_save = 'tfbo/utils/load_save'
save_attr = import_attr(path_to_load_save, attribute='savefile')

names = ['seed', 'obj', 'n_samples']    # the point of this form is to store the order, lists preserve the order
defaults = ['0_1_2_3_4_5_6_7_8_9_10_11_12_13_14_15_16_17_18_19', 'SwissRoll1D', int(50)]
types = [str, str, int]
parser = argparse.ArgumentParser()
for name_i, default_i, type_i in zip(names, defaults, types):
    parser.add_argument('--' + name_i, default=default_i, type=type_i)

args = parser.parse_args()
dict_args = vars(args)

path_to_check_inputs = 'tfbo/utils/check_inputs'