示例#1
0
def design_of_experiments(X_train,
                          Y_train,
                          X_domain,
                          optimum_state,
                          obj_func,
                          aq,
                          model='blnn',
                          num_iter=10,
                          cap=2,
                          trial=1):
    """
    Number of iterations to perform
    Number of points slected at each iteration
    """
    # Obj function as input - TODO
    print('Aquisition input is :')
    print(aq)
    tt = []
    avg_dist = []
    for i in range(num_iter):
        print('Iter:' + str(i))
        Opt = Optimize(X_train, Y_train, X_domain)

        if model == 'gp':
            Opt.gp_mle_train(normalize_input=False, normalize_output=False)
            Opt.gp_mle_test()
        else:
            sys.exit('Model not found')

        start_time = time.time()
        if aq == 'seq_select_ei':
            print('Aq : Sequential MLE based EI')
            _, _, new = Opt.EI()
            _, _, new = Opt.integratedEI()
        if aq == 'multi_select_ei':
            print('Aq : Parallel MLE based EI')
            integrated = False
            new = Opt.parallelEI(integrated, cap)
        else:
            sys.exit('Aquisition not found')

        time_tk = time.time() - start_time
        tt.append(time_tk)

        y_new = gaussian_process.gaussian_process(new)

        mean = Opt.mean
        sd = Opt.sd
        avg_dist_suggest_optimum = np.linalg.norm(new - optimum_state)
        avg_dist.append(avg_dist_suggest_optimum)
        plt.figure()
        visualize.visualize_utility1D(X_train, Y_train, new, y_new,
                                      Opt.domain_features, mean, sd)

        plt.savefig('../figs/models/model' + str(trial) + '/' + 'iter' +
                    str(i),
                    dpi=600)

        X_train = np.vstack([X_train, new])
        Y_train = np.append(Y_train, y_new[:, 0])

    np.savetxt('../results/time/ShuTime.txt', np.array(tt))

    return Opt, X_train, Y_train
示例#2
0
 def __init__(self):
     self.X_train = np.linspace(-1, 1, 2)[:, None]
     self.Y_train = gaussian_process.gaussian_process(self.X_train)[:, 0]
     self.X_test = np.linspace(np.min(self.X_train), np.max(self.X_train),
                               40)[:, None]
示例#3
0
            if allgps.shape[0] > gp_samples:
                print('Got the required GP samples')
                break

        return allgps

    def gp_mle_samples(self, x, num):
        post_samples = self.m.predict_f_samples(x, num)[:, :, 0]

        return post_samples


if __name__ == '__main__':

    X_train = np.linspace(-1, 1, 2)[:, None]
    Y_train = gaussian_process.gaussian_process(X_train)[:, 0]
    X_test = np.linspace(np.min(X_train), np.max(X_train), 200)[:, None]

    # Train GP with HMC sampling
    GPO2 = GP(X_train, Y_train, X_train, False, False)

    # Assess the model fit
    GPO2.train_mcmc()
    #m, var = GPO2.test_mcmc()

#    # Visualize the model fit
#    visualize.diff_ut(X_train, Y_train, X_test, meanmat, varmat, trial_num = 1,
#                      savefig = True, num_gps = 10)
#

#    plt.scatter(X_train, Y_train, color = 'g')
示例#4
0
    # Assume that we have X_train and Y_train as our initial evals
    # Assume that initial suggested points are X_pending (attribute of Opt)
    # Now, we want to select next two points (such that every time you select
    # a new point, one state in X_pending is evaluated ---- this is just for illustration)
    # This need not be the case, I am just showing it as an example

    num_evals = 15
    for i in range(num_evals):
        print('Iter num:')
        print(i)
        Opt.gen_fantasies()
        _, _, X_suggest = Opt.suggest()
        xpendtotrain = Opt.X_pending[0]
        X_pending_new = np.vstack([Opt.X_pending[1:, :], X_suggest])
        print('X_pending_new')
        print(X_pending_new)

        plt.figure()
        GPO = Opt.ModelconditionTrain
        mean, sd = GPO.gp_mle_test()
        visualize.visualize_parallel_evals(
            Opt.X_train, Opt.Y_train, Opt.X_pending,
            gaussian_process.gaussian_process(Opt.X_pending)[:, 0], X_suggest,
            gaussian_process.gaussian_process(X_suggest)[:, 0], Opt.X_test,
            mean, sd)

        Opt.X_pending = X_pending_new
        Opt.X_train = np.vstack([Opt.X_train, xpendtotrain])
        ypendtotrain = gaussian_process.gaussian_process(xpendtotrain)[:, 0]
        Opt.Y_train = np.append(Opt.Y_train, ypendtotrain)
示例#5
0
# distance from optimum vs. total number of iterations
# distance from optimum vs. total number of function evals


# Example details
# X_train_initial : 2 states (s1, s2)
# X_pending : 2 states (s3, s4)
# New points select : 2 states (s5, s6)

num_evals = 10
cap = 2
states_evaluated = 2
obj_func = 'gp_test'
X_train = np.linspace(-1, 1, 2)[:,None]
Y_train = gaussian_process.gaussian_process(X_train)[:,0]
    
X_test = np.linspace(np.min(X_train), np.max(X_train), 100)[:,None]
Y_test = gaussian_process.gaussian_process(X_test)[:,0]

optimum_state = X_test[np.argmax(Y_test)]

#------------------------------------------------------------------------------
# 1.
# GP-MLE + Snoek Fantasy EI
trial_num = 6
num_fantasies = 8
Opt = optimize.TestParallelSnoekGPMLE()
Opt.init_pending(cap = 2)

tt = []