class TestLCBAcquisition(unittest.TestCase):
    def setUp(self):
        self.mock_model = Mock()
        self.mock_optimizer = Mock()
        domain = [{
            'name': 'var_1',
            'type': 'continuous',
            'domain': (-5, 5),
            'dimensionality': 2
        }]
        self.space = Design_space(domain, None)

        self.lcb_acquisition = AcquisitionLCB(self.mock_model, self.space,
                                              self.mock_optimizer)

    def test_acquisition_function(self):
        self.mock_model.predict.return_value = (1, 3)

        weighted_acquisition = self.lcb_acquisition.acquisition_function(
            np.array([2, 2]))
        expected_acquisition = np.array([[-5.0], [-5.0]])

        self.assertTrue(
            np.array_equal(expected_acquisition, weighted_acquisition))

    def test_acquisition_function_withGradients(self):
        self.mock_model.predict_withGradients.return_value = (1, 1, 0.1, 0.1)

        weighted_acquisition, weighted_gradient = self.lcb_acquisition.acquisition_function_withGradients(
            np.array([2, 2]))

        self.assertTrue(
            np.array_equal(np.array([[-1.0], [-1.0]]), weighted_acquisition))
        self.assertTrue(
            np.array_equal(np.array([[-0.1, -0.1], [-0.1, -0.1]]),
                           weighted_gradient))

    def test_optimize_with_analytical_gradient_prediction(self):
        expected_optimum_position = [[0, 0]]
        self.mock_optimizer.optimize.return_value = expected_optimum_position
        self.mock_model.analytical_gradient_prediction = True

        self.lcb_acquisition = AcquisitionLCB(self.mock_model, self.space,
                                              self.mock_optimizer)
        optimum_position = self.lcb_acquisition.optimize()

        self.assertEqual(expected_optimum_position, optimum_position)

    def test_optimize_without_analytical_gradient_prediction(self):
        expected_optimum_position = [[0, 0]]
        self.mock_optimizer.optimize.return_value = expected_optimum_position
        self.mock_model.analytical_gradient_prediction = False
        self.lcb_acquisition = AcquisitionLCB(self.mock_model, self.space,
                                              self.mock_optimizer)

        optimum_position = self.lcb_acquisition.optimize()

        self.assertEqual(expected_optimum_position, optimum_position)
示例#2
0
def plot1D(filename='GP_results/test1.txt',
           magnet_list=['h13', 'v13', 'h31', 'v31']):
    ''' Plots at every iteration GP. Requires some manual hardcoded interaction '''

    reader = np.asmatrix(np.loadtxt(filename))
    x_observed = np.asarray(reader[:, 0])  # Hardcoded!!!!!!!!!
    f_observed = np.asarray(reader[:, -1])  # Hardcoded!!!!!!!!!

    n_rows = math.ceil(len(f_observed) / 5)
    f_mean, sub_mean = plt.subplots(n_rows, 5, sharex=True, sharey=True)
    f_mean.tight_layout()  # to adjust spacing between subplots
    f_acq, sub_acq = plt.subplots(n_rows, 5, sharex=True, sharey=True)
    f_acq.tight_layout()  # to adjust spacing between subplots

    num_points = 1000
    X_grid = np.linspace(-10, 10, num_points)[:, None]
    #X_grid = np.linspace(-15, 15, num_points)[:,None]
    for i in range(n_rows):
        j = 0
        while len(f_observed) > 5 * i + j and j < 5:
            X = x_observed[0:(5 * i + j + 1)]
            Y = f_observed[0:(5 * i + j + 1)]
            mean, Cov, variance, m = GP_analysis(X, Y, X_grid)
            sub_mean[i, j].plot(X_grid, mean)
            sub_mean[i, j].fill_between(X_grid[:, 0],
                                        (mean.T + variance.T).T[:, 0],
                                        (mean.T - variance.T).T[:, 0],
                                        facecolor="gray",
                                        alpha=0.15)
            sub_mean[i, j].scatter(X, Y)

            model = GPModel(optimize_restarts=1, verbose=True)
            model.model = m
            space = Design_space([{
                'name': 'var1',
                'type': 'continuous',
                'domain': (-10, 10)
            }])
            acq = AcquisitionLCB(model, space,
                                 exploration_weight=1)  # Hardcoded!!!!!!!!!
            alpha_full = acq.acquisition_function(X_grid)
            sub_acq[i, j].plot(X_grid, alpha_full)
            j = j + 1

    timestamp = (datetime.datetime.now()).strftime("%m-%d_%H-%M-%S")
    f_mean.subplots_adjust(wspace=0.3, top=None, bottom=None)
    f_mean.savefig(f'GP_results/dis_mean_M1-{timestamp}.pdf')
    f_acq.subplots_adjust(wspace=0.3, top=None, bottom=None)
    f_acq.savefig(f'GP_results/dis_acq_M1-{timestamp}.pdf')
    #plt.show()
    plt.close()
    return (None)
class TestLCBAcquisition(unittest.TestCase):
    def setUp(self):
        self.mock_model = Mock()
        self.mock_optimizer = Mock()
        domain = [{'name': 'var_1', 'type': 'continuous', 'domain': (-5,5), 'dimensionality': 2}]
        self.space = Design_space(domain, None)

        self.lcb_acquisition = AcquisitionLCB(self.mock_model, self.space, self.mock_optimizer)

    def test_acquisition_function(self):
        self.mock_model.predict.return_value = (1, 3)

        weighted_acquisition = self.lcb_acquisition.acquisition_function(np.array([2,2]))
        expected_acquisition = np.array([[-5.0],[-5.0]])

        self.assertTrue(np.array_equal(expected_acquisition, weighted_acquisition))

    def test_acquisition_function_withGradients(self):
        self.mock_model.predict_withGradients.return_value = (1, 1, 0.1, 0.1)

        weighted_acquisition, weighted_gradient = self.lcb_acquisition.acquisition_function_withGradients(np.array([2,2]))

        self.assertTrue(np.array_equal(np.array([[-1.0],[-1.0]]), weighted_acquisition))
        self.assertTrue(np.array_equal(np.array([[-0.1,-0.1],[-0.1,-0.1]]), weighted_gradient))

    def test_optimize_with_analytical_gradient_prediction(self):
        expected_optimum_position = [[0,0]]
        self.mock_optimizer.optimize.return_value = expected_optimum_position
        self.mock_model.analytical_gradient_prediction = True

        self.lcb_acquisition = AcquisitionLCB(self.mock_model, self.space, self.mock_optimizer)
        optimum_position = self.lcb_acquisition.optimize()

        self.assertEqual(expected_optimum_position, optimum_position)

    def test_optimize_without_analytical_gradient_prediction(self):
        expected_optimum_position = [[0,0]]
        self.mock_optimizer.optimize.return_value = expected_optimum_position
        self.mock_model.analytical_gradient_prediction = False
        self.lcb_acquisition = AcquisitionLCB(self.mock_model, self.space, self.mock_optimizer)
        
        optimum_position = self.lcb_acquisition.optimize()

        self.assertEqual(expected_optimum_position, optimum_position)
    print(m)

    f = open(f"GP_results/opt_params_{timestamp}.txt", "a+")
    ansi_escape = re.compile(r'\x1B\[[0-?]*[ -/]*[@-~]')
    text = ansi_escape.sub('', str(m))
    f.write(text + '\n')
    f.close()

    # Find next point
    model = GPModel(optimize_restarts=1, verbose=True)
    model.model = m
    #acq = AcquisitionEI(model, space, jitter = 1)
    acq = AcquisitionLCB(
        model, space,
        exploration_weight=0.5)  # Hardcoded HYPER_PARAMETER!!!!!!!!
    alpha_full = acq.acquisition_function(X_grid)
    magnet_values = X_grid[np.argmin(alpha_full), :]

    print("Min LCB: ", np.argmin(alpha_full), min(alpha_full),
          X_grid[np.argmin(alpha_full), :])
    print("Max LCB: ", np.argmax(alpha_full), max(alpha_full),
          X_grid[np.argmax(alpha_full), :])
    '''
	if (len(magnet_list)==1):
		gp.plot1D(f'GP_results/correctorValues_Distance_{timestamp}.txt')
	elif (len(magnet_list)==2):
		gp.plot2D(f'GP_results/correctorValues_Distance_{timestamp}.txt', timestamp = timestamp)
	'''
    #save new corrector values to file
    f = open(f"GP_results/newCorrectorValues_{timestamp}.txt", "a+")
    f.write('%s' % ' '.join(map('{:.4f}'.format, list(magnet_values))) + '\n')
示例#5
0
def plot2D(filename='GP_results/test2.txt',
           magnet_list=['h13', 'v13', 'h31', 'v31']):
    ''' Plots at every iteration GP. Requires some manual hardcoded interaction '''

    reader = np.asmatrix(np.loadtxt(filename))
    xy_observed = np.asarray(reader[:, 0:2])  # Hardcoded!!!!!!!!!
    f_observed = np.asarray(reader[:, -1])  # Hardcoded!!!!!!!!!

    n_rows = math.ceil(len(f_observed) / 5) + 1
    f_mean, sub_mean = plt.subplots(n_rows, 5, sharex=True, sharey=True)
    f_mean.tight_layout()  # to adjust spacing between subplots
    f_sigma, sub_sigma = plt.subplots(n_rows, 5, sharex=True, sharey=True)
    f_sigma.tight_layout()  # to adjust spacing between subplots
    f_acq, sub_acq = plt.subplots(n_rows, 5, sharex=True, sharey=True)
    f_acq.tight_layout()  # to adjust spacing between subplots

    num_points = 100
    XY_grid = np.mgrid[-10:10:0.3,
                       -10:10:0.3].reshape(2, -1).T  # Hardcoded!!!!!!!!!
    for i in range(n_rows - 1):
        j = 0
        while len(f_observed) > 5 * i + j and j < 5:
            XY = xy_observed[0:(5 * i + j + 1)]
            Z = f_observed[0:(5 * i + j + 1)]
            mean, Cov, variance, m = GP_analysis(XY, Z, XY_grid)
            xx = np.asarray(XY_grid[:, 0])
            yy = np.asarray(XY_grid[:, 1])
            xo = np.asarray(XY[:, 0]).reshape(-1)
            yo = np.asarray(XY[:, 1]).reshape(-1)
            sub_mean[i, j].scatter(xx,
                                   yy,
                                   c=mean.T[0],
                                   vmin=min(mean.T[0]),
                                   vmax=max(mean.T[0]),
                                   edgecolors='none',
                                   cmap='GnBu')
            sub_mean[i, j].scatter(xo, yo, c='k', marker='s')

            sub_sigma[i, j].scatter(xx,
                                    yy,
                                    c=variance,
                                    vmin=min(variance),
                                    vmax=max(variance),
                                    edgecolors='none')
            sub_sigma[i, j].scatter(xo, yo, c='white')

            model = GPModel(optimize_restarts=1, verbose=True)
            model.model = m
            space = Design_space([{
                'name': 'var1',
                'type': 'continuous',
                'domain': (-10, 10)
            }, {
                'name': 'var2',
                'type': 'continuous',
                'domain': (-10, 10)
            }])
            acq = AcquisitionLCB(model, space,
                                 exploration_weight=1)  # Hardcoded!!!!!!!!!
            alpha_full = acq.acquisition_function(XY_grid)
            sub_acq[i, j].scatter(xx,
                                  yy,
                                  c=alpha_full.T[0],
                                  vmin=min(alpha_full.T[0]),
                                  vmax=max(alpha_full.T[0]),
                                  edgecolors='none',
                                  cmap='GnBu')
            sub_acq[i, j].scatter(xo, yo, c='k', marker='s')
            minXY = XY_grid[np.argmin(alpha_full)]
            sub_acq[i, j].scatter(minXY[0], minXY[1], marker='P')

            j = j + 1

    timestamp = (datetime.datetime.now()).strftime("%m-%d_%H-%M-%S")
    f_mean.subplots_adjust(wspace=0.3, top=None, bottom=None)
    f_mean.savefig(f'GP_results/dis_mean_M1_M2-{timestamp}.pdf')
    f_sigma.subplots_adjust(wspace=0.3, top=None, bottom=None)
    f_sigma.savefig(f'GP_results/dis_sigma_M1_M2-{timestamp}.pdf')
    f_acq.subplots_adjust(wspace=0.3, top=None, bottom=None)
    f_acq.savefig(f'GP_results/dis_acq_M1_M2-{timestamp}.pdf')
    #plt.show()
    plt.close()