def test_convert_xy_lists_to_arrays():
    x_list = [np.array([[1, 0], [2, 1]]), np.array([[3, 2], [4, 5]])]
    y_list = [np.array([[0.0], [1.0]]), np.array([[2.0], [5.0]])]
    x_array, y_array = convert_xy_lists_to_arrays(x_list, y_list)

    expected_y = np.array([[0.], [1.0], [2.], [5.]])
    expected_x = np.array([[1, 0, 0], [2, 1, 0], [3, 2, 1], [4, 5, 1]])
    assert np.array_equal(y_array, expected_y)
    assert np.array_equal(x_array, expected_x)
    def train(self, x_l, y_l, x_h, y_h):
        # Construct a linear multi-fidelity model
        X_train, Y_train = convert_xy_lists_to_arrays([x_l, x_h], [y_l, y_h])
        kernels = [GPy.kern.RBF(x_l.shape[1]), GPy.kern.RBF(x_h.shape[1])]
        kernel = emukit.multi_fidelity.kernels.LinearMultiFidelityKernel(
            kernels)
        gpy_model = GPyLinearMultiFidelityModel(X_train,
                                                Y_train,
                                                kernel,
                                                n_fidelities=2)
        if self.noise is not None:
            gpy_model.mixed_noise.Gaussian_noise.fix(self.noise)
            gpy_model.mixed_noise.Gaussian_noise_1.fix(self.noise)

        # Wrap the model using the given 'GPyMultiOutputWrapper'
        self.model = GPyMultiOutputWrapper(
            gpy_model, 2, n_optimization_restarts=self.n_optimization_restarts)
        # Fit the model
        self.model.optimize()
예제 #3
0
# To arrays
hf_x_train = hf_train[['time', 'lon', 'lat', 'z',
                       'slope']].values.reshape(-1, 5)
hf_y_train = hf_train['tp'].values.reshape(-1, 1)

lf_y_train_log = lf_train.tp_tr.values.reshape(-1, 1)
hf_y_train_log = hf_train.tp_tr.values.reshape(-1, 1)

lf_x_train = lf_train[['time', 'lon', 'lat', 'z',
                       'slope']].values.reshape(-1, 5)
lf_y_train = lf_train['tp'].values.reshape(-1, 1)

x_val = val_df[['time', 'lon', 'lat', 'z', 'slope']].values.reshape(-1, 5)
y_val = val_df['tp'].values.reshape(-1, 1)

X_train, Y_train = convert_xy_lists_to_arrays([lf_x_train, hf_x_train],
                                              [lf_y_train, hf_y_train])
X_train, Y_train_log = convert_xy_lists_to_arrays(
    [lf_x_train, hf_x_train], [lf_y_train_log, hf_y_train_log])

# Model


def log_linear_mfdgp(X_train, Y_train):
    kernels = [GPy.kern.RBF(5), GPy.kern.RBF(5)]
    lin_mf_kernel = emukit.multi_fidelity.kernels.LinearMultiFidelityKernel(
        kernels)
    gpy_lin_mf_model = GPyLinearMultiFidelityModel(X_train,
                                                   Y_train,
                                                   lin_mf_kernel,
                                                   n_fidelities=2)
    gpy_lin_mf_model.mixed_noise.Gaussian_noise.fix(0)
예제 #4
0
x_plot = np.linspace(0, 1, 200)[:, None]
y_plot_l = low_fidelity(x_plot)
y_plot_h = high_fidelity(x_plot)

n_low_fidelity_points = 50
n_high_fidelity_points = 14

x_train_l = np.linspace(0, 1, n_low_fidelity_points)[:, None]
y_train_l = low_fidelity(x_train_l)

x_train_h = x_train_l[::4, :]
y_train_h = high_fidelity(x_train_h)

### Convert lists of arrays to ND-arrays augmented with fidelity indicators

X_train, Y_train = convert_xy_lists_to_arrays([x_train_l, x_train_h],
                                              [y_train_l, y_train_h])

# print(X_train)

plt.figure(figsize=(12, 8))
plt.plot(x_plot, y_plot_l, 'b')
plt.plot(x_plot, y_plot_h, 'r')
plt.scatter(x_train_l, y_train_l, color='b', s=40)
plt.scatter(x_train_h, y_train_h, color='r', s=40)
plt.xlabel('x')
plt.ylabel('f (x)')
plt.xlim([0, 1])
plt.legend(['Low fidelity', 'High fidelity'])
plt.title('High and low fidelity functions')

plt.figure(figsize=(12, 8))
예제 #5
0
def denvsrecmain(n_fid, x_dim, n):
    #################
    ### Functions ###
    #################

    f_exact = test_funs.ackley

    def f_name():
        return f_exact(0)[1]

    def f_m(x):
        return f_exact(x)[0]

    a = 4 * (np.random.rand(n_fid - 1) - .5)
    b = 4 * (np.random.rand(n_fid - 1) - .5)
    c = 4 * (np.random.rand(n_fid - 1) - .5)
    d = np.random.randint(-4, 5, size=(n_fid - 1, x_dim + 1))

    def f(x, fid):
        if fid == n_fid - 1:
            return f_m(x)
        else:
            x1_ptb = np.array([x_i[0] - d[fid][0] for x_i in x])[:, None]
            return a[fid] * f(x, fid + 1) + b[fid] * x1_ptb + c[fid]

    ###############
    ### x_dim-D ###
    ###############

    ### Plotting data ###

    x_min = [-5] * x_dim
    x_max = [5] * x_dim
    x_plot_grid = [
        np.linspace(x_min[j], x_max[j], 50)[:, None] for j in range(x_dim)
    ]
    x_plot_mesh = np.meshgrid(*x_plot_grid)
    x_plot_list = np.hstack([layer.reshape(-1, 1) for layer in x_plot_mesh])

    X_plot_mf = convert_x_list_to_array([x_plot_list] * n_fid)
    X_plot_mf_list = X_plot_mf.reshape((n_fid, len(x_plot_list), x_dim + 1))

    ### Training data ###

    x_train = [x_plot_list[::len(x_plot_list) // n_i] for n_i in n
               ]  # Include possibility to insert training data of choice.
    y_train = [f(x_train[j], j) for j in range(n_fid)]

    X_train_mf, Y_train_mf = convert_xy_lists_to_arrays(x_train, y_train)

    ############################
    ### DENSE GP CALCULATION ###
    ############################

    n_opt_restarts = 3

    kernels_mf = []
    for k in range(n_fid):
        kernels_mf.append(GPy.kern.RBF(input_dim=x_dim))

    lin_mf_kernel = emukit.multi_fidelity.kernels.LinearMultiFidelityKernel(
        kernels_mf)
    # print(lin_mf_kernel)
    # print(lin_mf_kernel.kernels[0])

    start_den = time.time()

    # print(X_train_mf)
    gpy_m_den_mf = GPyLinearMultiFidelityModel(X_train_mf,
                                               Y_train_mf,
                                               lin_mf_kernel,
                                               n_fidelities=n_fid)
    # print(gpy_m_den_mf)

    ### Fixing kernel parameters ###

    for k in range(n_fid):
        gpy_m_den_mf.mixed_noise.likelihoods_list[k].fix(0)
    # print(gpy_m_den_mf)

    m_den_mf = GPyMultiOutputWrapper(gpy_m_den_mf,
                                     n_fid,
                                     n_optimization_restarts=n_opt_restarts,
                                     verbose_optimization=False)

    end_den_1 = time.time()
    # print('Dense MFGPR construction', end_den_1 - start_den)

    ### Dense HPO ###
    m_den_mf_pre_HPO = m_den_mf
    m_den_mf.optimize()

    print(gpy_m_den_mf)
    # print(gpy_m_den_mf.kern)

    # print(lin_mf_kernel)
    # print(lin_mf_kernel.kernels[0])

    # print(X_train_mf)
    # test = lin_mf_kernel.K(X=X_train_mf)
    # print(test)
    # print(lin_mf_kernel)

    end_den_2 = time.time()
    # print('Dense MFGPR construction + HPO', end_den_2 - start_den)
    # print(gpy_m_den_mf)

    ### Prediction ###
    # for j in range(n_fid):
    #     a = time.time()
    #     test = m_den_mf.predict(X_plot_mf_list[j])
    #     b = time.time()
    #     print(b - a)
    mu_den_mf = [m_den_mf.predict(X_plot_mf_list[j])[0] for j in range(n_fid)]
    # print(X_plot_mf_list)
    # print(mu_den_mf)
    # sigma_den_mf = [m_den_mf.predict(X_plot_mf_list[j])[1] for j in range(n_fid)]
    #
    # end_den_3 = time.time()
    # print('Dense MFGPR construction + HPO + prediction', end_den_3 - start_den)

    ################################
    ### RECURSIVE GP CALCULATION ###
    ################################

    start_rec = time.time()

    # m_rec_mf = GPy.models.multiGPRegression(x_train, y_train, kernel=[GPy.kern.RBF(x_dim) for i in
    #                                                                   range(n_fid)])  # Improve kernel selection...?

    end_rec_1 = time.time()
    # print('Recursive MFGPR construction', end_rec_1 - start_rec)

    # for k in range(n_fid): m_rec_mf.models[k]['Gaussian_noise.variance'].fix(0)

    ### Recursive HPO ###
    # m_rec_mf_pre_HPO = m_rec_mf
    # m_rec_mf.optimize_restarts(restarts=n_opt_restarts, verbose=False)
    # for j in range(n_fid): print(m_rec_mf.models[j])

    end_rec_2 = time.time()
    # print('Recursive MFGPR construction + HPO', end_rec_2 - start_rec)

    # for k in range(m): print(m_rec_mf.models[k])

    ### Prediction ###
    # mu_rec_mf, sigma_rec_mf = m_rec_mf.predict(x_plot_list)
    # print(mu_rec_mf)
    #
    # end_rec_3 = time.time()
    # print('Recursive MFGPR construction + HPO + prediction', end_rec_3 - start_rec)

    # times = [np.array([end_den_1, end_den_2, end_den_3]) - start_den, np.array([end_rec_1, end_rec_2, end_rec_3]) - start_rec]
    times = [end_den_2 - start_den, end_rec_2 - start_rec]
    return times, n_fid, x_dim
def test_convert_xy_lists_to_arrays_fails_with_different_number_of_points_at_fidelity():
    x_list = [np.array([[1, 0], [2, 1], [3, 4]]), np.array([[3, 2], [4, 5]])]
    y_list = [np.array([0.0, 1.0]), np.array([2.0, 5.0])]
    with pytest.raises(ValueError):
        convert_xy_lists_to_arrays(x_list, y_list)
## Training data

x_train = [np.linspace(x_min, x_max, n_i)[:, None] for n_i in n]
y_train = [f(x_train[j], j) for j in range(m)]

# x_train_0 = np.linspace(x_min, x_max, n_0)[:, None] # Uniform initial DoE... would need to replace with proper opt-DoE
# y_train_0 = f_0(x_train_0)
#
# x_train_1 = np.random.permutation(x_train_0)[:n_1] # This is the nested DoE experiment.
# y_train_1 = f_1(x_train_1)
#
# x_train_2 = np.random.permutation(x_train_1)[:n_2] # This is the nested DoE experiment.
# y_train_2 = f_2(x_train_2)

X_train_mf, Y_train_mf = convert_xy_lists_to_arrays(x_train, y_train)

# X_train, Y_train = convert_xy_lists_to_arrays([x_train_0, x_train_1, x_train_2], [y_train_0, y_train_1, y_train_2])

## DENSE GP CALCULATION WITH EMUKIT

kernels_mf = []
for k in range(m): kernels_mf.append(GPy.kern.RBF(1))

kernels = [GPy.kern.RBF(1), GPy.kern.RBF(1), GPy.kern.RBF(1)]
# print(kernels_mf)
lin_mf_kernel = emukit.multi_fidelity.kernels.LinearMultiFidelityKernel(kernels_mf)
# lin_mf_kernel = emukit.multi_fidelity.kernels.LinearMultiFidelityKernel(kernels)

start = time.time()
예제 #8
0
    Y_trainH0 = E_T_fine_wcs0
    y_train_H_wcs = Y_trainH0
    Y_wcs = y_train_H_wcs.min()
    Y_trainH0 = E_T_fine_wcs[index2[:, 0], :]
    y_train_H = Y_trainH0
else:
    x_train_H = X_H
    Y_trainH0 = E_T_fine_wcs
    y_train_H = Y_trainH0
    Y_wcs = y_train_H.min()
max_same_iter = 5
max_iter = 10
Y_wcs_record = np.zeros((max_iter + 1, 1)).reshape(max_iter + 1, 1)
Y_pre_record = np.zeros((max_iter + 1, 1)).reshape(max_iter + 1, 1)
Y_wcs_record[0, 0] = Y_wcs
X_train, Y_train = convert_xy_lists_to_arrays([x_train_L, x_train_H],
                                              [y_train_L, y_train_H])
n_fidelity = 2  # 代表多保准度模型的数量 这里是2个保真度的模型
"""============================================全局优化算法预测======================================="""
wlcb = 1
count_iter = 0
same_iter = 0
gol.set_map("wlcb", wlcb)
gol.set_map("feature", feature)
gol.set_map("fre_E", fre_E)
gol.set_map("d_x", d_x)
bounds = np.array([[5.67, 6.07], [2.75, 3.55], [3.08, 3.48], [5.98, 6.38],
                   [33.51, 34.31], [21.63, 22.03], [-0.021, -0.005]])  # 参数范围
gol.set_map("bounds", bounds)
from function_opti import opti_function
nind = 50
maxgen = 50