Example #1
0
def bayesian_opt():

    # 2. ranges of the synth parameters
    syn1 = syn2 = syn3 = syn4 = syn5 = np.arange(158)
    syn6 = np.arange(6000)
    syn7 = np.arange(1000)
    syn8 = np.arange(700)

    # 2. synth paramters ranges into an 8D parameter space
    # parameter_space = ParameterSpace(
    #     [ContinuousParameter('x1', 0., 157.)])

    # parameter_space = ParameterSpace(
    #     [DiscreteParameter('x8', syn8)])

    parameter_space = ParameterSpace(
        [ContinuousParameter('x1', 0., 157.), ContinuousParameter('x2', 0., 157.), ContinuousParameter('x3', 0., 157.),
         ContinuousParameter('x4', 0., 157.), ContinuousParameter('x5', 0., 157.), ContinuousParameter('x6', 0., 5999.),
         ContinuousParameter('x7', 0., 999.), ContinuousParameter('x8', 0., 699.)])

    # parameter_space = ParameterSpace(
    #     [DiscreteParameter('x1', syn1), DiscreteParameter('x2', syn2), DiscreteParameter('x3', syn3),
    #      DiscreteParameter('x4', syn4), DiscreteParameter('x5', syn5), DiscreteParameter('x6', syn6),
    #      DiscreteParameter('x7', syn1), DiscreteParameter('x8', syn8)])

    # 3. collect random points
    design = RandomDesign(parameter_space)

    X = design.get_samples(num_data_points)  # X is a numpy array
    print("X=", X)

    # [is the below needed?]
    # UserFunction.evaluate(training_function, X)
    # I put UserFunctionWrapper in line 94

    # 4. define training_function as Y
    Y = training_function(X)

    # [is this needed?]
    # loop_state = create_loop_state(X, Y)

    # 5. train and wrap the model in Emukit
    model_gpy = GPRegression(X, Y, normalizer=True)

    model_emukit = GPyModelWrapper(model_gpy)
    expected_improvement = ExpectedImprovement(model=model_emukit)
    bayesopt_loop = BayesianOptimizationLoop(model=model_emukit,
                                             space=parameter_space,
                                             acquisition=expected_improvement,
                                             batch_size=5)

    max_iterations = 15
    bayesopt_loop.run_loop(training_function, max_iterations)
    model_gpy.plot()
    plt.show()
    results = bayesopt_loop.get_results()
    # bayesopt_loop.loop_state.X
    print("X: ", bayesopt_loop.loop_state.X)
    print("Y: ", bayesopt_loop.loop_state.Y)
    print("cost: ", bayesopt_loop.loop_state.cost)
Example #2
0
def test_multi_fidelity_function_shapes(fcn):
    n_points = 10
    fcn, space = fcn()
    random = RandomDesign(space)
    samples = random.get_samples(n_points)

    # There are only 2 or 3 fidelity functions in set of functions we are testing
    n_fidelities = len(space.parameters[-1].domain)
    if n_fidelities == 2:
        samples[:5, -1] = 0
        samples[5:, -1] = 1
    elif n_fidelities == 3:
        samples[:5, -1] = 0
        samples[5:8, -1] = 1
        samples[8:, -1] = 2
    else:
        raise ValueError(
            'Please add a case for functions with {:.0f} fidelity levels'.
            format(n_fidelities))

    # Check shapes when calling through function wrapper
    results = fcn.evaluate(samples)
    assert len(results) == n_points
    for result in results:
        assert result.Y.shape == (1, )

    # Also check shape when calling each fidelity function individually
    for f in fcn.f:
        assert f(samples[:, :-1]).shape == (n_points, 1)
def run_optimisation(current_range, freq_range, power_range):
    parameter_space = ParameterSpace([\
        ContinuousParameter('current', current_range[0], current_range[1]), \
        ContinuousParameter('freq', freq_range[0], freq_range[1]), \
        ContinuousParameter('power', power_range[0], power_range[1])
        ])

    def function(X):
        current = X[:, 0]
        freq = X[:, 1]
        power = X[:, 2]
        out = np.zeros((len(current), 1))
        for g in range(len(current)):
            '''
            Set JPA Current, Frequency & Power
            '''
            out[g, 0] = -get_SNR(
                plot=False)[-1]  #Negative as want to maximise SNR
        return out

    num_data_points = 10

    design = RandomDesign(parameter_space)
    X = design.get_samples(num_data_points)
    Y = function(X)

    model_gpy = GPRegression(X, Y)
    model_gpy.optimize()
    model_emukit = GPyModelWrapper(model_gpy)

    exp_imprv = ExpectedImprovement(model=model_emukit)
    optimizer = GradientAcquisitionOptimizer(space=parameter_space)
    point_calc = SequentialPointCalculator(exp_imprv, optimizer)
    coords = []
    min = []

    bayesopt_loop = BayesianOptimizationLoop(model=model_emukit,
                                             space=parameter_space,
                                             acquisition=exp_imprv,
                                             batch_size=1)

    stopping_condition = FixedIterationsStoppingCondition(i_max=100)

    bayesopt_loop.run_loop(q, stopping_condition)

    coord_results = bayesopt_loop.get_results().minimum_location
    min_value = bayesopt_loop.get_results().minimum_value
    step_results = bayesopt_loop.get_results().best_found_value_per_iteration
    print(coord_results)
    print(min_value)

    return coord_results, abs(min_value)
Example #4
0
    def get_samples(self,
                    n_samples,
                    log_p_function,
                    burn_in_steps=50,
                    n_steps=100):
        """
        Generates samples.

        Parameters:
            n_samples - number of samples to generate
            log_p_function - a function that returns log density for a specific sample
            burn_in_steps - number of burn-in steps for sampling

        Returns a tuple of two array: (samples, log_p_function values for samples)
        """
        X_init = RandomDesign(self.space).get_samples(n_samples)
        sampler = emcee.EnsembleSampler(n_samples, X_init.shape[1],
                                        log_p_function)

        # Burn-In
        samples, samples_log, _ = sampler.run_mcmc(X_init, burn_in_steps)

        # MCMC Sampling
        samples, samples_log, _ = sampler.run_mcmc(samples, n_steps)

        # make sure we have an array of shape (n samples, space input dim)
        if len(samples.shape) == 1:
            samples = samples.reshape(-1, 1)
        samples_log = samples_log.reshape(-1, 1)

        return samples, samples_log
Example #5
0
    def __init__(self,
                 user_function: UserFunctionWithCostWrapper,
                 space: ParameterSpace,
                 s_min,
                 s_max,
                 n_init: int = 20) -> None:
        """
        Fast Bayesian optimization on large datasets

        :param space: contains the definition of the variables of the input space.

        """
        self.s_min = s_min
        self.s_max = s_max
        self.incumbents = []
        self.user_function = user_function

        init_design = RandomDesign(space)

        subsets = [s_max / s_sub for s_sub in [256, 128, 64, 32]]

        n_init = np.max([n_init, len((subsets))])
        n_init_point = n_init // len(subsets)
        cands = init_design.get_samples(n_init_point)
        # TODO: check that subsets are larger than smin
        # s = np.array(subsets)
        s = np.zeros([n_init])
        X_init = np.zeros([n_init, cands.shape[1] + 1])

        idx = 0
        for i in range(n_init_point):
            # s[i] = transform(subsets[i % len(subsets)], s_min, s_max)
            # s[i] = subsets[i % len(subsets)]
            for subset in subsets:
                X_init[idx] = np.append(cands[i], [subset])
                idx += 1

        # X_init = np.append(X_init, s[:, None], axis=1)

        C_init = []
        Y_init = []

        curr_inc = None
        curr_inc_val = np.inf
        for x in X_init:
            res = self.user_function.evaluate(x[None, :])
            C_init.append(res[0].C)
            Y_init.append(res[0].Y)

            if res[0].Y < curr_inc_val:
                curr_inc_val = res[0].Y
                curr_inc = x[:-1]
            self.incumbents.append(curr_inc)

        space.parameters.append(ContinuousParameter("s", s_min, s_max))

        Y_init = np.array(Y_init)
        C_init = np.array(C_init)

        model_objective = FabolasModel(X_init,
                                       Y_init,
                                       s_min,
                                       s_max,
                                       basis_func=quad)
        model_cost = FabolasModel(X_init,
                                  C_init,
                                  s_min,
                                  s_max,
                                  basis_func=linear)
        es = InformationGainPerCost(model=model_objective,
                                    cost_model=model_cost,
                                    space=space)
        acquisition_optimizer = DifferentialEvolution(space)

        candidate_point_calculator = Sequential(es, acquisition_optimizer)

        super(Fabolas, self).__init__(
            X_init=X_init,
            Y_init=Y_init,
            C_init=C_init,
            space=space,
            acquisition=es,
            candidate_point_calculator=candidate_point_calculator,
            model_objective=model_objective,
            model_cost=model_cost)
Example #6
0
    saver = tf.train.Saver()
    saver.restore(ppo.sess, model_path)
    PPO_totalR = []
    EI_totalR = []
    PI_totalR = []
    for ep in range(30):

        fun = env.reset(upper_bound=1, lower_bound=0)
        ppo.ppoMax = 0
        ppoMin = float('inf')
        ppo.ep_r = 0
        ppo.funCurMax = env.maxVal
        ppo.curFun = env.getCurFun()

        design = RandomDesign(parameter_space)  # Collect random points
        X = design.get_samples(num_data_points)
        Y = fun(X)
        model_gpy = GPRegression(X, Y)  # Train and wrap the model in Emukit
        model_emukit = GPyModelWrapper(model_gpy)
        ppo.model = model_emukit
        bo = BayesianOptimizationLoop(model=model_emukit,
                                      space=parameter_space,
                                      acquisition=ppo,
                                      batch_size=1)
        mu = np.array([np.mean(bo.loop_state.X)])[np.newaxis]
        var = np.array([np.var(bo.loop_state.X)])[np.newaxis]
        s = np.concatenate((mu, var), axis=1)
        boPPOep_r = []

        model_gpyEI = GPRegression(X, Y)  # Train and wrap the model in Emukit
def get_random_hyperparameter_configuration():
    random_design = RandomDesign(space)
    return RandomDesign.get_samples(random_design, 1)
Example #8
0
    l_cap = X[:,2]
    l_ind = X[:,3]
    w = X[:,4]
    gap_ind = X[:,5]
    out = np.zeros((len(l_ind),1))
    for g in range(len(l_ind)):
        # Check that resonator geometry is sensible:
        if l_ind[g] > l_cap[g]/2:
            out[g,0] = 10e20 #Large cost to bad geometry
        else:
            out[g,0] = -simulation_wrapper(host, COMSOL_model, paramfile, w[g], t, l_ind[g], pen, omega, gap_cap[g], w_cap[g], l_cap[g], w_mesa, h_mesa, gap_ind[g])[0]
    return out

# Set up random seeding of parameter space
num_data_points = no_random_seeds
design = RandomDesign(parameter_space)
X = design.get_samples(num_data_points)
Y = q(X)

# Set up emukit model
model_gpy = GPRegression(X,Y)
model_gpy.optimize()
model_emukit = GPyModelWrapper(model_gpy)

# Set up Bayesian optimisation routine
exp_imprv = ExpectedImprovement(model = model_emukit)
optimizer = GradientAcquisitionOptimizer(space = parameter_space)
point_calc = SequentialPointCalculator(exp_imprv,optimizer)

# Bayesian optimisation routine
bayesopt_loop = BayesianOptimizationLoop(model = model_emukit,
Example #9
0
    b = SinTwo()

obj = Wrapper(b)

f_opt = b.get_meta_information()["f_opt"]

cs = b.get_configuration_space()

list_params = []

for h in cs.get_hyperparameters():
    list_params.append(ContinuousParameter(h.name, h.lower, h.upper))

space = ParameterSpace(list_params)

init_design = RandomDesign(space)
X_init = init_design.get_samples(2)
Y_init = np.array([b.objective_function(xi)["function_value"] for xi in X_init])[:, None]


if args.model_type == "bnn":
    model = Bohamiann(X_init=X_init, Y_init=Y_init, verbose=True)

elif args.model_type == "rf":
    model = RandomForest(X_init=X_init, Y_init=Y_init)
    with_gradients = False

elif args.model_type == "dngo":
    model = DNGO(X_init=X_init, Y_init=Y_init)
    with_gradients = False
Example #10
0

cs = b.get_configuration_space()

list_params = []

for h in cs.get_hyperparameters():
    list_params.append(ContinuousParameter(h.name, h.lower, h.upper))

space = ParameterSpace(list_params)

if args.method == "gp_ei":

    obj = UserFunctionWithCostWrapper(evaluate)

    init_design = RandomDesign(space)
    X_init = init_design.get_samples(args.n_init)
    Y_init, C_init = evaluate(X_init)

    model = BOGP(X_init=X_init, Y_init=Y_init)

    acquisition = LogExpectedImprovement(model)

    # acquisition_optimizer = DirectOptimizer(space)
    acquisition_optimizer = DifferentialEvolution(space)
    candidate_point_calculator = Sequential(acquisition, acquisition_optimizer)

    bo = BayesianOptimizationLoop(model=model, space=space, X_init=X_init, Y_init=Y_init, acquisition=acquisition,
                                  candidate_point_calculator=candidate_point_calculator)
    initial_results = []
    for i in range(X_init.shape[0]):