class TestLCBAcquisition(unittest.TestCase):
    def setUp(self):
        self.mock_model = Mock()
        self.mock_optimizer = Mock()
        domain = [{
            'name': 'var_1',
            'type': 'continuous',
            'domain': (-5, 5),
            'dimensionality': 2
        }]
        self.space = Design_space(domain, None)

        self.lcb_acquisition = AcquisitionLCB(self.mock_model, self.space,
                                              self.mock_optimizer)

    def test_acquisition_function(self):
        self.mock_model.predict.return_value = (1, 3)

        weighted_acquisition = self.lcb_acquisition.acquisition_function(
            np.array([2, 2]))
        expected_acquisition = np.array([[-5.0], [-5.0]])

        self.assertTrue(
            np.array_equal(expected_acquisition, weighted_acquisition))

    def test_acquisition_function_withGradients(self):
        self.mock_model.predict_withGradients.return_value = (1, 1, 0.1, 0.1)

        weighted_acquisition, weighted_gradient = self.lcb_acquisition.acquisition_function_withGradients(
            np.array([2, 2]))

        self.assertTrue(
            np.array_equal(np.array([[-1.0], [-1.0]]), weighted_acquisition))
        self.assertTrue(
            np.array_equal(np.array([[-0.1, -0.1], [-0.1, -0.1]]),
                           weighted_gradient))

    def test_optimize_with_analytical_gradient_prediction(self):
        expected_optimum_position = [[0, 0]]
        self.mock_optimizer.optimize.return_value = expected_optimum_position
        self.mock_model.analytical_gradient_prediction = True

        self.lcb_acquisition = AcquisitionLCB(self.mock_model, self.space,
                                              self.mock_optimizer)
        optimum_position = self.lcb_acquisition.optimize()

        self.assertEqual(expected_optimum_position, optimum_position)

    def test_optimize_without_analytical_gradient_prediction(self):
        expected_optimum_position = [[0, 0]]
        self.mock_optimizer.optimize.return_value = expected_optimum_position
        self.mock_model.analytical_gradient_prediction = False
        self.lcb_acquisition = AcquisitionLCB(self.mock_model, self.space,
                                              self.mock_optimizer)

        optimum_position = self.lcb_acquisition.optimize()

        self.assertEqual(expected_optimum_position, optimum_position)
    def test_optimize_without_analytical_gradient_prediction(self):
        expected_optimum_position = [[0, 0]]
        self.mock_optimizer.optimize.return_value = expected_optimum_position
        self.mock_model.analytical_gradient_prediction = False
        self.lcb_acquisition = AcquisitionLCB(self.mock_model, self.space,
                                              self.mock_optimizer)

        optimum_position = self.lcb_acquisition.optimize()

        self.assertEqual(expected_optimum_position, optimum_position)
Beispiel #3
0
def plot1D(filename='GP_results/test1.txt',
           magnet_list=['h13', 'v13', 'h31', 'v31']):
    ''' Plots at every iteration GP. Requires some manual hardcoded interaction '''

    reader = np.asmatrix(np.loadtxt(filename))
    x_observed = np.asarray(reader[:, 0])  # Hardcoded!!!!!!!!!
    f_observed = np.asarray(reader[:, -1])  # Hardcoded!!!!!!!!!

    n_rows = math.ceil(len(f_observed) / 5)
    f_mean, sub_mean = plt.subplots(n_rows, 5, sharex=True, sharey=True)
    f_mean.tight_layout()  # to adjust spacing between subplots
    f_acq, sub_acq = plt.subplots(n_rows, 5, sharex=True, sharey=True)
    f_acq.tight_layout()  # to adjust spacing between subplots

    num_points = 1000
    X_grid = np.linspace(-10, 10, num_points)[:, None]
    #X_grid = np.linspace(-15, 15, num_points)[:,None]
    for i in range(n_rows):
        j = 0
        while len(f_observed) > 5 * i + j and j < 5:
            X = x_observed[0:(5 * i + j + 1)]
            Y = f_observed[0:(5 * i + j + 1)]
            mean, Cov, variance, m = GP_analysis(X, Y, X_grid)
            sub_mean[i, j].plot(X_grid, mean)
            sub_mean[i, j].fill_between(X_grid[:, 0],
                                        (mean.T + variance.T).T[:, 0],
                                        (mean.T - variance.T).T[:, 0],
                                        facecolor="gray",
                                        alpha=0.15)
            sub_mean[i, j].scatter(X, Y)

            model = GPModel(optimize_restarts=1, verbose=True)
            model.model = m
            space = Design_space([{
                'name': 'var1',
                'type': 'continuous',
                'domain': (-10, 10)
            }])
            acq = AcquisitionLCB(model, space,
                                 exploration_weight=1)  # Hardcoded!!!!!!!!!
            alpha_full = acq.acquisition_function(X_grid)
            sub_acq[i, j].plot(X_grid, alpha_full)
            j = j + 1

    timestamp = (datetime.datetime.now()).strftime("%m-%d_%H-%M-%S")
    f_mean.subplots_adjust(wspace=0.3, top=None, bottom=None)
    f_mean.savefig(f'GP_results/dis_mean_M1-{timestamp}.pdf')
    f_acq.subplots_adjust(wspace=0.3, top=None, bottom=None)
    f_acq.savefig(f'GP_results/dis_acq_M1-{timestamp}.pdf')
    #plt.show()
    plt.close()
    return (None)
    def setUp(self):
        self.mock_model = Mock()
        self.mock_optimizer = Mock()
        domain = [{
            'name': 'var_1',
            'type': 'continuous',
            'domain': (-5, 5),
            'dimensionality': 2
        }]
        self.space = Design_space(domain, None)

        self.lcb_acquisition = AcquisitionLCB(self.mock_model, self.space,
                                              self.mock_optimizer)
Beispiel #5
0
 def test_ChecKGrads_LCB(self):
     acquisition_lcb = acquisition_for_test(
         AcquisitionLCB(self.model, self.feasible_region))
     grad_lcb = GradientChecker(acquisition_lcb.acquisition_function,
                                acquisition_lcb.d_acquisition_function,
                                self.X_test)
     self.assertTrue(grad_lcb.checkgrad(tolerance=self.tolerance))
    def setUp(self):
        self.mock_model = Mock()
        self.mock_optimizer = Mock()
        domain = [{'name': 'var_1', 'type': 'continuous', 'domain': (-5,5), 'dimensionality': 2}]
        self.space = Design_space(domain, None)

        self.lcb_acquisition = AcquisitionLCB(self.mock_model, self.space, self.mock_optimizer)
    def test_optimize_without_analytical_gradient_prediction(self):
        expected_optimum_position = [[0,0]]
        self.mock_optimizer.optimize.return_value = expected_optimum_position
        self.mock_model.analytical_gradient_prediction = False
        self.lcb_acquisition = AcquisitionLCB(self.mock_model, self.space, self.mock_optimizer)
        
        optimum_position = self.lcb_acquisition.optimize()

        self.assertEqual(expected_optimum_position, optimum_position)
class TestLCBAcquisition(unittest.TestCase):
    def setUp(self):
        self.mock_model = Mock()
        self.mock_optimizer = Mock()
        domain = [{'name': 'var_1', 'type': 'continuous', 'domain': (-5,5), 'dimensionality': 2}]
        self.space = Design_space(domain, None)

        self.lcb_acquisition = AcquisitionLCB(self.mock_model, self.space, self.mock_optimizer)

    def test_acquisition_function(self):
        self.mock_model.predict.return_value = (1, 3)

        weighted_acquisition = self.lcb_acquisition.acquisition_function(np.array([2,2]))
        expected_acquisition = np.array([[-5.0],[-5.0]])

        self.assertTrue(np.array_equal(expected_acquisition, weighted_acquisition))

    def test_acquisition_function_withGradients(self):
        self.mock_model.predict_withGradients.return_value = (1, 1, 0.1, 0.1)

        weighted_acquisition, weighted_gradient = self.lcb_acquisition.acquisition_function_withGradients(np.array([2,2]))

        self.assertTrue(np.array_equal(np.array([[-1.0],[-1.0]]), weighted_acquisition))
        self.assertTrue(np.array_equal(np.array([[-0.1,-0.1],[-0.1,-0.1]]), weighted_gradient))

    def test_optimize_with_analytical_gradient_prediction(self):
        expected_optimum_position = [[0,0]]
        self.mock_optimizer.optimize.return_value = expected_optimum_position
        self.mock_model.analytical_gradient_prediction = True

        self.lcb_acquisition = AcquisitionLCB(self.mock_model, self.space, self.mock_optimizer)
        optimum_position = self.lcb_acquisition.optimize()

        self.assertEqual(expected_optimum_position, optimum_position)

    def test_optimize_without_analytical_gradient_prediction(self):
        expected_optimum_position = [[0,0]]
        self.mock_optimizer.optimize.return_value = expected_optimum_position
        self.mock_model.analytical_gradient_prediction = False
        self.lcb_acquisition = AcquisitionLCB(self.mock_model, self.space, self.mock_optimizer)
        
        optimum_position = self.lcb_acquisition.optimize()

        self.assertEqual(expected_optimum_position, optimum_position)
Beispiel #9
0
def build_acquisition(X_init, space, aquisition_function, model):
    aquisition_optimizer = GPyOpt.optimization.AcquisitionOptimizer(space, eps=0)
    if(aquisition_function['type'] == 'ei'):
        aquisition_function = AcquisitionEI(model=model, space=space, optimizer=aquisition_optimizer,jitter=aquisition_function['epsilon'])
    elif(aquisition_function['type']== 'pi'):
        aquisition_function = AcquisitionMPI(model=model, space=space, optimizer=aquisition_optimizer,jitter=aquisition_function['epsilon'])
    elif(aquisition_function['type'] == 'lcb'):
        lcb_const = np.sqrt( aquisition_function['upsilon']* (2*  np.log( ((X_init.shape[0])**(X_init.shape[1]/2. + 2))*(np.pi**2)/(3. * aquisition_function['delta'])  )))
        aquisition_function = AcquisitionLCB(model=model, space=space, optimizer=aquisition_optimizer,exploration_weight=lcb_const)
    return aquisition_function
Beispiel #10
0
    def acquisition_creator(self, acquisition_type, model, space,
                            acquisition_optimizer, cost_withGradients,
                            **kwargs):
        """
        Acquisition chooser from the available options. Extra parameters can be passed via **kwargs.
        """
        acquisition_type = acquisition_type
        model = model
        space = space
        acquisition_optimizer = acquisition_optimizer
        cost_withGradients = cost_withGradients
        acquisition_jitter = self.kwargs.get('acquisition_jitter', 0.01)
        acquisition_weight = self.kwargs.get('acquisition_weight', 2)

        # --- Choose the acquisition
        if acquisition_type is None or acquisition_type == 'EI':
            return AcquisitionEI(model, space, acquisition_optimizer,
                                 cost_withGradients, acquisition_jitter)

        elif acquisition_type == 'EI_MCMC':
            return AcquisitionEI_MCMC(model, space, acquisition_optimizer,
                                      cost_withGradients, acquisition_jitter)

        elif acquisition_type == 'MPI':
            return AcquisitionMPI(model, space, acquisition_optimizer,
                                  cost_withGradients, acquisition_jitter)

        elif acquisition_type == 'MPI_MCMC':
            return AcquisitionMPI_MCMC(model, space, acquisition_optimizer,
                                       cost_withGradients, acquisition_jitter)

        elif acquisition_type == 'LCB':
            return AcquisitionLCB(model, space, acquisition_optimizer, None,
                                  acquisition_weight)

        elif acquisition_type == 'LCB_MCMC':
            return AcquisitionLCB_MCMC(model, space, acquisition_optimizer,
                                       None, acquisition_weight)
        elif acquisition_type == 'Adaptive_LCB':
            return original_acq.AcquisitionLCBwithAdaptiveExplorationWeight(
                model, space, acquisition_optimizer, None, **kwargs)
        elif acquisition_type == 'Adaptive_LCB_MCMC':
            return original_acq.AcquisitionLCBwithAdaptiveExplorationWeight_MCMC(
                model, space, acquisition_optimizer, None, **kwargs)
        elif acquisition_type == 'Continuous_Time_Varying':
            return original_acq.AcquisitionContinuousTimeVarying(
                model, space, acquisition_optimizer, None, **kwargs)
        elif acquisition_type == 'Continuous_Time_Varying_MCMC':
            return original_acq.AcquisitionContinuousTimeVarying_MCMC(
                model, space, acquisition_optimizer, None, **kwargs)
        else:
            raise Exception('Invalid acquisition selected.')
    m.optimize('bfgs', max_iters=100)  # Hyper-parameters are optimized here
    print(m)

    f = open(f"GP_results/opt_params_{timestamp}.txt", "a+")
    ansi_escape = re.compile(r'\x1B\[[0-?]*[ -/]*[@-~]')
    text = ansi_escape.sub('', str(m))
    f.write(text + '\n')
    f.close()

    # Find next point
    model = GPModel(optimize_restarts=1, verbose=True)
    model.model = m
    #acq = AcquisitionEI(model, space, jitter = 1)
    acq = AcquisitionLCB(
        model, space,
        exploration_weight=0.5)  # Hardcoded HYPER_PARAMETER!!!!!!!!
    alpha_full = acq.acquisition_function(X_grid)
    magnet_values = X_grid[np.argmin(alpha_full), :]

    print("Min LCB: ", np.argmin(alpha_full), min(alpha_full),
          X_grid[np.argmin(alpha_full), :])
    print("Max LCB: ", np.argmax(alpha_full), max(alpha_full),
          X_grid[np.argmax(alpha_full), :])
    '''
	if (len(magnet_list)==1):
		gp.plot1D(f'GP_results/correctorValues_Distance_{timestamp}.txt')
	elif (len(magnet_list)==2):
		gp.plot2D(f'GP_results/correctorValues_Distance_{timestamp}.txt', timestamp = timestamp)
	'''
    #save new corrector values to file

	m.optimize('bfgs', max_iters=100)  # Hyper-parameters are optimized here
	print(m)

	f = open(f"GP_results/opt_params_{timestamp}.txt", "a+")
	ansi_escape = re.compile(r'\x1B\[[0-?]*[ -/]*[@-~]')
	text = ansi_escape.sub('', str(m))
	f.write(text + '\n')
	f.close()

	# Find next point
	model = GPModel(optimize_restarts=1, verbose=True)
	model.model = m
    #acq = AcquisitionEI(model, space, jitter = 1)
	acq = AcquisitionLCB(model, space, exploration_weight = 1)
	alpha_full = acq.acquisition_function(X_grid)
	magnet_values = X_grid[np.argmin(alpha_full),:]

	print("Min LCB: ", np.argmin(alpha_full), min(alpha_full), X_grid[np.argmin(alpha_full),:])
	print("Max LCB: ", np.argmax(alpha_full), max(alpha_full), X_grid[np.argmax(alpha_full),:])

	if (len(magnet_list)==1):
		gp.plot1D(f'GP_results/correctorValues_Distance_{timestamp}.txt')
	elif (len(magnet_list)==2):
		gp.plot2D(f'GP_results/correctorValues_Distance_{timestamp}.txt')

	#save new corrector values to file
	f = open(f"GP_results/newCorrectorValues_{timestamp}.txt", "a+")
	f.write('%s' % ' '.join(map('{:.4f}'.format, list(magnet_values))) + '\n')
	f.close()
Beispiel #13
0
def plot2D(filename='GP_results/test2.txt',
           magnet_list=['h13', 'v13', 'h31', 'v31']):
    ''' Plots at every iteration GP. Requires some manual hardcoded interaction '''

    reader = np.asmatrix(np.loadtxt(filename))
    xy_observed = np.asarray(reader[:, 0:2])  # Hardcoded!!!!!!!!!
    f_observed = np.asarray(reader[:, -1])  # Hardcoded!!!!!!!!!

    n_rows = math.ceil(len(f_observed) / 5) + 1
    f_mean, sub_mean = plt.subplots(n_rows, 5, sharex=True, sharey=True)
    f_mean.tight_layout()  # to adjust spacing between subplots
    f_sigma, sub_sigma = plt.subplots(n_rows, 5, sharex=True, sharey=True)
    f_sigma.tight_layout()  # to adjust spacing between subplots
    f_acq, sub_acq = plt.subplots(n_rows, 5, sharex=True, sharey=True)
    f_acq.tight_layout()  # to adjust spacing between subplots

    num_points = 100
    XY_grid = np.mgrid[-10:10:0.3,
                       -10:10:0.3].reshape(2, -1).T  # Hardcoded!!!!!!!!!
    for i in range(n_rows - 1):
        j = 0
        while len(f_observed) > 5 * i + j and j < 5:
            XY = xy_observed[0:(5 * i + j + 1)]
            Z = f_observed[0:(5 * i + j + 1)]
            mean, Cov, variance, m = GP_analysis(XY, Z, XY_grid)
            xx = np.asarray(XY_grid[:, 0])
            yy = np.asarray(XY_grid[:, 1])
            xo = np.asarray(XY[:, 0]).reshape(-1)
            yo = np.asarray(XY[:, 1]).reshape(-1)
            sub_mean[i, j].scatter(xx,
                                   yy,
                                   c=mean.T[0],
                                   vmin=min(mean.T[0]),
                                   vmax=max(mean.T[0]),
                                   edgecolors='none',
                                   cmap='GnBu')
            sub_mean[i, j].scatter(xo, yo, c='k', marker='s')

            sub_sigma[i, j].scatter(xx,
                                    yy,
                                    c=variance,
                                    vmin=min(variance),
                                    vmax=max(variance),
                                    edgecolors='none')
            sub_sigma[i, j].scatter(xo, yo, c='white')

            model = GPModel(optimize_restarts=1, verbose=True)
            model.model = m
            space = Design_space([{
                'name': 'var1',
                'type': 'continuous',
                'domain': (-10, 10)
            }, {
                'name': 'var2',
                'type': 'continuous',
                'domain': (-10, 10)
            }])
            acq = AcquisitionLCB(model, space,
                                 exploration_weight=1)  # Hardcoded!!!!!!!!!
            alpha_full = acq.acquisition_function(XY_grid)
            sub_acq[i, j].scatter(xx,
                                  yy,
                                  c=alpha_full.T[0],
                                  vmin=min(alpha_full.T[0]),
                                  vmax=max(alpha_full.T[0]),
                                  edgecolors='none',
                                  cmap='GnBu')
            sub_acq[i, j].scatter(xo, yo, c='k', marker='s')
            minXY = XY_grid[np.argmin(alpha_full)]
            sub_acq[i, j].scatter(minXY[0], minXY[1], marker='P')

            j = j + 1

    timestamp = (datetime.datetime.now()).strftime("%m-%d_%H-%M-%S")
    f_mean.subplots_adjust(wspace=0.3, top=None, bottom=None)
    f_mean.savefig(f'GP_results/dis_mean_M1_M2-{timestamp}.pdf')
    f_sigma.subplots_adjust(wspace=0.3, top=None, bottom=None)
    f_sigma.savefig(f'GP_results/dis_sigma_M1_M2-{timestamp}.pdf')
    f_acq.subplots_adjust(wspace=0.3, top=None, bottom=None)
    f_acq.savefig(f'GP_results/dis_acq_M1_M2-{timestamp}.pdf')
    #plt.show()
    plt.close()
 def __init__(self, model, space, optimizer=None, cost_withGradients=None):
     AcquisitionLCB.__init__(self, model, space, optimizer, cost_withGradients,exploration_weight=0)

	m.optimize('bfgs', max_iters=100)  # Hyper-parameters are optimized here (lengthscale and noise)

    # Save optimizer output with hyperparameters for later analysis
	f = open(f"GP_results/opt_params_{timestamp}.txt", "a+")
	ansi_escape = re.compile(r'\x1B\[[0-?]*[ -/]*[@-~]')
	text = ansi_escape.sub('', str(m))
	f.write(text + '\n')
	f.close()

	# Find next point
	model = GPModel(optimize_restarts=1, verbose=True)
	model.model = m
    # Acquisition function uses posterior to select next observation point
	acq = AcquisitionLCB(model, space, exploration_weight = 0.5) # Hardcoded trade-off parameter
	alpha_full = acq.acquisition_function(X_grid)
    # Next point is minimum of acquisition function
	magnet_values = X_grid[np.argmin(alpha_full),:]

	print("Min LCB: ", np.argmin(alpha_full), min(alpha_full), X_grid[np.argmin(alpha_full),:])
	print("Max LCB: ", np.argmax(alpha_full), max(alpha_full), X_grid[np.argmax(alpha_full),:])

	# Plot results if 1D 
    if (len(magnet_list)==1):
		gp.plot1D(f'GP_results/correctorValues_Distance_{timestamp}.txt')

	# Save new corrector values to file
	f = open(f"GP_results/newCorrectorValues_{timestamp}.txt", "a+")
	f.write('%s' % ' '.join(map('{:.4f}'.format, list(magnet_values))) + '\n')
	f.close()