def bayesian_optimization(num_iterations=50, num_runs=10, output_filename=None):
    """
    Run a Bayesian optimization procedure with the given number of iterations
    to find a good hyperparameter configuration on the network surrogate.
    Run the whole search a given number of times and write the average
    optimization trajectory (incumbent errors per epoch) as well as the
    cumulative runtime of all configuration evaluations per epoch into
    the file specified by the given filename.
    """
    lower_bounds = np.array([b[0] for b in hyperparam_ranges])
    upper_bounds = np.array([b[1] for b in hyperparam_ranges])
    incumbent_errors = np.zeros(num_iterations)
    cumulative_runtimes = np.zeros(num_iterations)
    for run in range(num_runs):
        results = fmin.bayesian_optimization(
                objective_function, lower_bounds, upper_bounds,
                num_iterations=num_iterations)
        cumulative_runtimes += \
                np.cumsum([runtime(config) for config in results["X"]])
        incumbent_errors += np.array(results["incumbent_values"])
        print("Finished run %d of %d." % (run, num_runs))
    incumbent_errors /= num_runs
    cumulative_runtimes /= num_runs
    # Write average incumbent error per epoch into file with given filename
    if output_filename is not None:
        with open(output_filename, "w") as output_file:
            for i in range(len(incumbent_errors)):
                output_file.write("%d\t%f\t%f\n" %
                        (i, incumbent_errors[i], cumulative_runtimes[i]))
Esempio n. 2
0
 def optimization(self):
     if (self.method == 'bayesian_optimization'):
         results = bayesian_optimization(self.objective_function,
                                         lower=self.lower_bounds,
                                         upper=self.upper_bounds,
                                         num_iterations=self.num_iterations,
                                         acquisition_func='ei')
         print(results['x_opt'], results['f_opt'])
         self.results = {
             'x_opt': results['x_opt'],
             'f_opt': results['f_opt'],
             'mse': results['incumbent_values']
         }
     elif (self.method == 'random_search'):
         results = random_search(self.objective_function,
                                 lower=self.lower_bounds,
                                 upper=self.upper_bounds,
                                 num_iterations=self.num_iterations)
         print(results['x_opt'], results['f_opt'])
         self.results = {
             'x_opt': results['x_opt'],
             'f_opt': results['f_opt'],
             'mse': results['incumbent_values']
         }
     else:
         pass
Esempio n. 3
0
 def bayesian_optimization(self):
     """ Implementation of the Bayesian Optimization. """
     # Set the parameters for the bayesian optimization
     function = ho.objective_function
     lower = np.array([
         self._learning_rate[0], self._batch_size[0],
         self._number_filters[0], self._number_filters[0],
         self._number_filters[0]
     ])
     upper = np.array([
         self._learning_rate[1], self._batch_size[1],
         self._number_filters[1], self._number_filters[1],
         self._number_filters[1]
     ])
     for i in range(RUNS):
         results = bayesian_optimization(function,
                                         lower,
                                         upper,
                                         num_iterations=ITERATIONS)
         self.bo_performance.append(results['incumbent_values'])
         self.bo_runtime.append(results['runtime'])
     # Mean over all the runs
     self.bo_performance = np.mean(self.bo_performance, axis=0)
     # Cumulative sum of the mean for all the runs
     self.bo_runtime = np.cumsum(np.mean(self.bo_runtime, axis=0), axis=0)
Esempio n. 4
0
 def test_bayesian_optimization_gp_mcmc(self):
     res = bayesian_optimization(objective_function=objective,
                                 lower=self.lower,
                                 upper=self.upper,
                                 n_init=2,
                                 num_iterations=3)
     assert len(res["x_opt"]) == 1
     assert np.array(res["x_opt"]) >= 0
     assert np.array(res["x_opt"]) <= 1
Esempio n. 5
0
 def test_bayesian_optimization(self):
     res = bayesian_optimization(objective_function=objective,
                                 lower=self.lower,
                                 upper=self.upper,
                                 n_init=2,
                                 num_iterations=3)
     assert len(res["x_opt"]) == 1
     assert np.array(res["x_opt"]) >= 0
     assert np.array(res["x_opt"]) <= 1
Esempio n. 6
0
 def test_rf(self):
     res = bayesian_optimization(objective_function=objective,
                                 lower=self.lower,
                                 upper=self.upper,
                                 n_init=2,
                                 num_iterations=3,
                                 model_type="rf")
     assert len(res["x_opt"]) == 1
     assert np.array(res["x_opt"]) >= 0
     assert np.array(res["x_opt"]) <= 1
Esempio n. 7
0
def robodrive(objective, hyperparameters, results_path, n_iterations=50):
    """
    Distributed Bayesian optimization with Robo.

    Parameters:
    ----------
    * `objective` [function]:
        User defined function which calls a learner
        and returns a metric of interest.

    * `hyperparameters` [list, shape=(n_hyperparameters,)]:

   * `results_path` [string]
        Path to save optimization results

    * `n_iterations` [int, default=50]
        Number of optimization iterations
    """
    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()
    size = comm.Get_size()

    # Setup savefile
    if rank < 10:
        # Ensure results are sorted by rank
        filename = 'hyperspace' + str(0) + str(rank)
    else:
        filename = 'hyperspace' + str(rank)

    if not os.path.exists(results_path):
        os.makedirs(results_path, exist_ok=True)

    savefile = os.path.join(results_path, filename)

    if rank == 0:
        robospace = create_robospace(hyperparameters)
        hyperspace = convert_robospace(robospace)
    else:
        hyperspace = None

    space = comm.scatter(hyperspace, root=0)

    lower = space[0]
    upper = space[1]

    results = bayesian_optimization(objective,
                                    lower,
                                    upper,
                                    num_iterations=n_iterations)

    with open(savefile, 'wb') as handle:
        pickle.dump(results, handle, protocol=pickle.HIGHEST_PROTOCOL)
Esempio n. 8
0
    def get_weights(self, feature, X, Y, val_X, val_Y):
        def objective_function(weights):
            selected_idx = self._select(feature, weights, Y)
            select_X, select_Y = X[selected_idx], Y[selected_idx]
            self.model.train(select_X, select_Y)
            error = self.model.loss(val_X, val_Y)
            return error

        return bayesian_optimization(
            objective_function=objective_function,
            lower=self.lower,
            upper=self.upper,
            num_iterations=self.num_iterations)['x_opt']
Esempio n. 9
0
def optimizate_bayesian():
    lower = np.array([-6, 32, 4, 4, 4])
    upper = np.array([0, 512, 10, 10, 10])
    list_y_performance = np.zeros((10, 50))
    list_y_runtime = np.zeros((10, 50))
    for i in range(10):
        result = bayesian_optimization(objective_function,
                                       lower,
                                       upper,
                                       num_iterations=50)
        list_y_performance[i] = result.get('incumbent_values')
        list_y_runtime[i] = result.get('runtime')
        mean_performance = np.mean(list_y_performance, axis=0)
        mean_runtime = np.cumsum(np.mean(list_y_runtime, axis=0))
    return mean_performance, mean_runtime
Esempio n. 10
0
def run_bayesian_optimization(args, kernel, objective_function, embedding_lst, eval_lst, BEST, WORST):
    origin_rdl = embedding_lst[:]
    origin_el = eval_lst[:]

    lower = np.array([np.amin(embedding_lst)]*len(embedding_lst[0]))
    upper = np.array([np.amax(embedding_lst)]*len(embedding_lst[0]))

    results = []
    best_ind = []
    fix_budget = []
    close_best = []

    for i in range(args.num_run):
        embedding_lst = origin_rdl[:]
        eval_lst = origin_el[:]
        logging.info("# %d run of bayesian optimization.", i)

        # get the initialization
        if i < len(embedding_lst)-2:
            start = i
            end = i+3
        else:
            start = i%(len(embedding_lst)-2)
            end = i%(len(embedding_lst)-2)+3
        x_init = np.array(embedding_lst[start:end])
        y_init = np.array(eval_lst[start:end])

        result = bayesian_optimization(objective_function, lower,upper, acquisition_func=args.acquisition_func, 
                                       model_type=args.model_type, num_iterations=len(origin_rdl), 
                                       X_init=x_init, Y_init=y_init, kernel=kernel, 
                                       sampling_method=args.sampling_method, replacement=args.replacement, 
                                       pool=np.array(embedding_lst), best=BEST)
        results.append(result)
        invs = result["incumbent_values"]

        if BEST in invs:
            best_ind.append(invs.index(BEST)+1)
        else:
            best_ind.append(WORST)

        fix_budget.append(abs(min(invs[:args.budget])-BEST))

        for i in range(len(invs)):
            if invs[i]-BEST <= args.dif:
                close_best.append(i+1)
                break

    return results, best_ind, fix_budget, close_best
Esempio n. 11
0
def bayesian_opt():
    bayesian_incumbent_list = []
    bayesian_incumbent_mean_list = []
    bayesian_runtime = []
    for i in range(0, 10):
        output_dict = bayesian_optimization(objective_function,
                                            lower,
                                            upper,
                                            num_iterations=50)
        bayesian_incumbent_mean_list.append(
            np.mean(output_dict['incumbent_values']))
        bayesian_incumbent_list.append(output_dict['incumbent_values'])
        bayesian_runtime.append(output_dict['runtime'])

    return np.mean(bayesian_incumbent_list,
                   axis=0), np.cumsum(np.mean(bayesian_runtime, axis=0),
                                      axis=0), bayesian_incumbent_mean_list
Esempio n. 12
0
    def get_res(self):
        lower = self.space.bound[0]
        upper = self.space.bound[1]

        res = bayesian_optimization(
            self.fn,
            lower,
            upper,
            maximizer=self.params.get('maximizer', 'scipy'),
            acquisition_func=self.params.get('acquisition_func', 'log_ei'),
            model_type=self.params.get('model_type', 'bohamiann'),
            num_iterations=self.params['niter'],
            output_path=str(self.output_path),
            rng=self.rng,
        )

        return res
def bayesian_opt():

    lower = np.array([-6, 32, 4, 4, 4])
    upper = np.array([0, 512, 10, 10, 10])
    
    f = objective_function
    incumbents = np.zeros((10, 50))
    incumbents_runtime = np.zeros((10, 50))

    
    for i in range(10):
        result = bayesian_optimization(f, lower, upper, num_iterations=50)
    incumbents_runtime[i] = result["runtime"]
    incumbents[i] = result["incumbent_values"]
    inc = np.mean(incumbents, axis=0)
    runtime = np.mean(incumbents_runtime, axis=0)

    return inc, runtime
Esempio n. 14
0
 def optimization(self):
     for optimization_method in self.optimization_methods:
         t_start = time.time()
         if (optimization_method == 'bayesian_optimization'):
             results = bayesian_optimization(
                 self.objective_function,
                 lower=self.lower_bounds,
                 upper=self.upper_bounds,
                 num_iterations=self.num_iterations,
                 acquisition_func='ei')
             t_end = time.time()
             t = round(t_end - t_start)
             x_opt = {}
             for i in range(len(self.tune_params)):
                 x_opt[self.tune_params[i]] = results['x_opt'][i]
             print(x_opt, results['f_opt'])
             self.results['bayesian_optimization'] = {
                 'method': 'bayesian_optimization',
                 'x_opt': x_opt,
                 'f_opt': results['f_opt'],
                 'mse': results['incumbent_values'],
                 'time_consume': t
             }
         elif (optimization_method == 'random_search'):
             results = random_search(self.objective_function,
                                     lower=self.lower_bounds,
                                     upper=self.upper_bounds,
                                     num_iterations=self.num_iterations)
             t_end = time.time()
             t = round(t_end - t_start)
             x_opt = {}
             print(results['x_opt'])
             for i in range(len(self.tune_params)):
                 x_opt[self.tune_params[i]] = results['x_opt'][i]
             print(x_opt, results['f_opt'])
             self.results['random_search'] = {
                 'method': 'random_search',
                 'x_opt': x_opt,
                 'f_opt': results['f_opt'],
                 'mse': results['incumbent_values'],
                 'time_consume': t
             }
         else:
             pass
    def gp_plain(self, seed, num_iterations=50):
        """Bayesian optimization with Gaussian Process(mcmc) as model and no contextual information

           Returns np.array(1,50) mean of incumbents
        """
        incumbent_value = np.ones((self.T, num_iterations))
        for i in range(self.T):
            rng1 = np.random.RandomState(seed)
            if self.add_coef: self.f = Quadratic_function(self.coef[i])
            result = bayesian_optimization(self.f.call,
                                           self.lower_x,
                                           self.upper_x,
                                           num_iterations=num_iterations,
                                           model_type="gp",
                                           acquisition_func="ei",
                                           n_init=1,
                                           rng=rng1)
            #Branin needs init=1
            incumbent_value[i] = result["incumbent_values"]
        return np.mean(incumbent_value, axis=0)
Esempio n. 16
0
    def train_weights(self,
                      feature,
                      X,
                      Y,
                      val_X,
                      val_Y,
                      select_num=1600,
                      isbalance=True):
        def objective_function(weights):
            selected_idx = self.select_data(feature, weights, select_num, Y,
                                            isbalance)
            select_X, select_Y = X[selected_idx], Y[selected_idx]
            self.model.train(select_X, select_Y)
            error = self.model.loss(val_X, val_Y)
            return error

        return bayesian_optimization(
            objective_function=objective_function,
            lower=self.lower,
            upper=self.upper,
            num_iterations=self.num_iterations)['x_opt']
Esempio n. 17
0
def bayes_opt():
    """ 
    Bayesian Optimization with RoBO.
    Returns dictionary with results:
        "x_opt" : the best found data point
        "f_opt" : the corresponding function value
        "incumbents": the incumbent (best found value) after each iteration
        "incumbent_value": the function values of the incumbents
        "runtime": the runtime in seconds after each iteration
        "overhead": the optimization overhead 
                    (i.e. time data we do not spend to evaluate the function) 
                    of each iteration
        "X": all data points that have been evaluated
        "y": the corresponding function evaluations
    """
    print('\n============= START Bayesian OPTIMIZATION =============\n')
    print("""Optimization parameters:
                    - lower = {}
                    - upper = {}
                    - num_iter = {}
                    - maximizer = {}
                    - acq_func = {}
                    - model_type = {} 
                    - n_init = {} """.format(lower, upper, args.num_iterations,
                                             args.maximizer,
                                             args.acquisition_func,
                                             args.model_type, args.n_init))

    results = bayesian_optimization(objective_function,
                                    lower,
                                    upper,
                                    num_iterations=args.num_iterations,
                                    maximizer=args.maximizer,
                                    acquisition_func=args.acquisition_func,
                                    model_type=args.model_type,
                                    n_init=args.n_init)
    print(results["x_opt"])
    print(results["f_opt"])
    print('\n============= END OPTIMIZATION =============\n')
Esempio n. 18
0
    output_path = "./experiments/fabolas/results/cnn_%s/ei_%d" % (dataset,
                                                                  run_id)
elif dataset == "res_net":
    f = ResidualNeuralNetworkOnCIFAR10(rng=rng)
    num_iterations = 10
    output_path = "./experiments/fabolas/results/res_%s/ei_%d" % (dataset,
                                                                  run_id)

os.makedirs(output_path, exist_ok=True)

info = f.get_meta_information()
bounds = np.array(info['bounds'])
results = bayesian_optimization(f,
                                bounds[:, 0],
                                bounds[:, 1],
                                num_iterations=num_iterations,
                                n_init=2,
                                rng=rng,
                                output_path=output_path)

results["run_id"] = run_id
results['X'] = results['X'].tolist()
results['y'] = results['y'].tolist()

test_error = []
current_inc = None
current_inc_val = None

key = "incumbents"

for inc in results["incumbents"]:
Esempio n. 19
0
if __name__ == '__main__':
    r_all_errors = []
    r_all_time = []
    b_all_errors = []
    b_all_time = []
    for i in range(10):
        print(i)
        sys.stdout.flush()
        plot_error, plot_time = random_search(objective_function,
                                              runtime,
                                              iterations=50)
        r_all_errors.append(plot_error)
        r_all_time.append(plot_time)
        b_results = bayesian_optimization(objective_function,
                                          lower,
                                          upper,
                                          num_iterations=50)
        b_all_errors.append(b_results['incumbent_values'])
        cur_time = 0.0
        b_plot_time = []
        for x in b_results['X']:
            cur_time += runtime(x)
            b_plot_time.append(cur_time)
        b_all_time.append(b_plot_time)
    for e in np.mean(r_all_errors, axis=0):
        print('{:.6f}'.format(e))
    print('=' * 80)
    for t in np.mean(r_all_time, axis=0):
        print('{:.2f}'.format(t))
    print('=' * 80)
    for e in np.mean(b_all_errors, axis=0):
Esempio n. 20
0
from robo.fmin import bayesian_optimization

import logging
logging.basicConfig(level=logging.INFO)


def fn(x):
    y = np.sin(3 * x[0]) * 4 * (x[0] - 1) * (x[0] + 2)
    return y


lower = np.array([-2])
upper = np.array([3])


bo = bayesian_optimization(fn, lower, upper, num_iterations=10)

for it in bo:
    print(it)


bo.incumbents
bo.incumbents_values

plt.plot(range(len(bo.incumbents_values)), bo.incumbents_values)

fn([150])


######################
# Improved interface #
Esempio n. 21
0
    f = SurrogateFCNet(path="/ihome/kleinaa/devel/git/HPOlib/surrogates/")
elif benchmark == "paramnet":
    dataset = sys.argv[4]
    f = SurrogateParamNet(dataset, "/ihome/kleinaa/devel/git/HPOlib/surrogates/")

    benchmark += "_" + dataset

info = f.get_meta_information()
bounds = np.array(info['bounds'])

if method == "entropy_search":
    results = entropy_search(f, bounds[:, 0], bounds[:, 1],
                             num_iterations=n_iters, n_init=n_init)
elif method == "gp_mcmc":
    results = bayesian_optimization(f, bounds[:, 0], bounds[:, 1],
                                    num_iterations=n_iters,
                                    n_init=n_init, model_type="gp_mcmc")
elif method == "gp":
    results = bayesian_optimization(f, bounds[:, 0], bounds[:, 1],
                                    num_iterations=n_iters,
                                    n_init=n_init, model_type="gp")
elif method == "rf":
    results = bayesian_optimization(f, bounds[:, 0], bounds[:, 1],
                                    num_iterations=n_iters,
                                    n_init=n_init, model_type="rf")
elif method == "random_search":
    results = random_search(f, bounds[:, 0], bounds[:, 1],
                            num_iterations=n_iters)
elif method == "bohamiann":
    results = bohamiann(f, bounds[:, 0], bounds[:, 1],
                        num_iterations=n_iters,
Esempio n. 22
0
import numpy as np

from hpolib.benchmarks.synthetic_functions import Branin

from robo.fmin import bayesian_optimization

f = Branin()
info = f.get_meta_information()
bounds = np.array(info['bounds'])

# Start Bayesian optimization to optimize the objective function
results = bayesian_optimization(f, bounds[:, 0], bounds[:, 1], num_iterations=50)
print(results["x_opt"])
print(results["f_opt"])
Esempio n. 23
0
    for i in range(len(x)):
        params[hps[i]] = int(x[i])

    # n_train = len(X_train)
    n_budget = s
    idx_train = np.random.randint(low=0, high=n_budget, size=n_budget)
    x_train_fab = X_train.iloc[idx_train]
    y_train_fab = y_train.iloc[idx_train]

    start_time = time.time()

    val_loss = train_evaluate_rf(x_train_fab, y_train_fab, X_val, y_val,
                                 params)

    cost = time.time() - start_time

    print('loss: % f, Cost: % f ' % (val_loss, cost))

    return val_loss, cost


# results = bayesian_optimization(objective_func_bo, lower=lower, upper=upper, num_iterations=5)
# results = fabolas(objective_func_fabolas, lower=lower, upper=upper, s_min=100, s_max=50000, num_iterations=120)
results = bayesian_optimization(objective_func_bo,
                                lower,
                                upper,
                                model_type='bohamiann',
                                num_iterations=20)

bla = 0
Esempio n. 24
0
    def optimize(self) -> TuningResult:
        """
        Method performs a hyperparameter optimization run according to the selected HPO-method.
        :return: result: TuningResult
            TuningResult-object that contains the results of this optimization run.
        :return:
        """

        # Convert the skopt hyperparameter space into a continuous space for RoBO
        hp_space_lower = np.zeros(shape=(len(self.hp_space), ))
        hp_space_upper = np.zeros(shape=(len(self.hp_space), ))

        for i in range(len(self.hp_space)):
            if type(self.hp_space[i]) == skopt.space.space.Integer:
                hp_space_lower[i, ] = self.hp_space[i].low
                hp_space_upper[i, ] = self.hp_space[i].high

            elif type(self.hp_space[i]) == skopt.space.space.Categorical:
                n_choices = len(list(self.hp_space[i].categories))
                hp_space_lower[i, ] = 0
                hp_space_upper[i, ] = n_choices - 1

            elif type(self.hp_space[i]) == skopt.space.space.Real:
                hp_space_lower[i, ] = self.hp_space[i].low
                hp_space_upper[i, ] = self.hp_space[i].high

            else:
                raise Exception(
                    'The skopt HP-space could not be converted correctly!')

        # Set the random seed of the random number generator
        rand_num_generator = np.random.RandomState(seed=self.random_seed)

        # Optimize on the predefined n_func_evals and measure the wall clock times
        start_time = time.time()
        self.times = []  # Initialize a list for saving the wall clock times

        # Use a warmstart configuration (only possible for BOHAMIANN, not FABOLAS)
        if self.do_warmstart == 'Yes':

            # Initialize numpy arrays for saving the warmstart configuration and the warmstart loss
            warmstart_config = np.zeros(shape=(1, len(self.hp_space)))
            warmstart_loss = np.zeros(shape=(1, 1))

            # Retrieve the default hyperparameters and the default loss for the ML-algorithm
            default_params = self.get_warmstart_configuration()

            try:

                # Dictionary for saving the warmstart HP-configuration (only contains the HPs, which are part of the
                # 'tuned' HP-space
                warmstart_dict = {}

                # Iterate over all HPs of this ML-algorithm's tuned HP-space and append the default values to
                # the numpy array
                for i in range(len(self.hp_space)):

                    this_param = self.hp_space[i].name

                    # Categorical HPs need to be encoded as integer values for RoBO
                    if type(self.hp_space[i]) == skopt.space.space.Categorical:

                        choices = self.hp_space[i].categories
                        this_warmstart_value_cat = default_params[this_param]
                        dict_value = this_warmstart_value_cat

                        # Find the index of the default / warmstart HP in the list of possible choices
                        for j in range(len(choices)):
                            if this_warmstart_value_cat == choices[j]:
                                this_warmstart_value = j

                    # For all non-categorical HPs
                    else:
                        this_warmstart_value = default_params[this_param]
                        dict_value = this_warmstart_value

                        # For some HPs (e.g. max_depth of RF) the default value is None, although their typical dtype is
                        # different (e.g. int)
                        if this_warmstart_value is None:
                            # Try to impute these values by the mean value
                            this_warmstart_value = int(
                                0.5 *
                                (self.hp_space[i].low + self.hp_space[i].high))
                            dict_value = this_warmstart_value

                    # Pass the warmstart value to the according numpy array
                    warmstart_config[0, i] = this_warmstart_value
                    warmstart_dict[this_param] = dict_value

                # Pass the default loss to the according numpy array
                warmstart_loss[0, 0] = self.get_warmstart_loss(
                    warmstart_dict=warmstart_dict)

                # Pass the warmstart configuration as a kwargs dict
                kwargs = {'X_init': warmstart_config, 'Y_init': warmstart_loss}

                # Set flag to indicate that a warmstart took place
                did_warmstart = True

            except:
                print('Warmstarting RoBO failed!')
                kwargs = {}

                # Set flag to indicate that NO warmstart took place
                did_warmstart = False

        # No warmstart requested
        else:
            kwargs = {}

            # Set flag to indicate that NO warmstart took place
            did_warmstart = False

        # Select the specified HPO-tuning method
        try:
            if self.hpo_method == 'Fabolas':

                # Budget correct? // Set further parameters?
                s_max = len(
                    self.x_train
                )  # Maximum number of data points for the training data set
                s_min = int(
                    0.05 * s_max
                )  # Maximum number of data points for the training data set
                n_init = int(self.n_func_evals /
                             3)  # Requirement of the fabolas implementation

                result_dict = fabolas(
                    objective_function=self.objective_fabolas,
                    s_min=s_min,
                    s_max=s_max,
                    lower=hp_space_lower,
                    upper=hp_space_upper,
                    num_iterations=self.n_func_evals,
                    rng=rand_num_generator,
                    n_init=n_init)
                run_successful = True

            elif self.hpo_method == 'Bohamiann':

                if did_warmstart:
                    # A single initial design point (warm start hyperparameter configuration)
                    kwargs['n_init'] = 1

                # Budget correct? // Set further parameters?
                result_dict = bayesian_optimization(
                    objective_function=self.objective_bohamiann,
                    lower=hp_space_lower,
                    upper=hp_space_upper,
                    model_type='bohamiann',
                    num_iterations=self.n_func_evals,
                    rng=rand_num_generator,
                    **kwargs)
                run_successful = True

            else:
                raise Exception('Unknown HPO-method!')

        # Algorithm crashed
        except:
            # Add a warning here
            run_successful = False

        # If the optimization run was successful, determine the optimization results
        if run_successful:

            for i in range(len(self.times)):
                # Subtract the start time to receive the wall clock time of each function evaluation
                self.times[i] = self.times[i] - start_time
            wall_clock_time = max(self.times)

            # Insert timestamp of 0.0 for the warm start hyperparameter configuration
            if did_warmstart:
                self.times.insert(0, 0.0)

            # Timestamps
            timestamps = self.times

            # Losses (not incumbent losses)
            losses = result_dict['y']

            evaluation_ids = list(range(1, len(losses) + 1))
            best_loss = min(losses)

            configurations = ()
            for config in result_dict['X']:
                # Cut off the unused Fabolas budget value at the end
                config = config[:len(self.hp_space)]
                config_dict = {}

                for i in range(len(config)):
                    if type(self.hp_space[i]) == skopt.space.space.Integer:
                        config_dict[self.hp_space[i].name] = int(
                            round(config[i]))

                    elif type(
                            self.hp_space[i]) == skopt.space.space.Categorical:
                        config_dict[self.hp_space[i].name] = list(
                            self.hp_space[i].categories)[int(round(config[i]))]

                    elif type(self.hp_space[i]) == skopt.space.space.Real:
                        config_dict[self.hp_space[i].name] = config[i]

                    else:
                        raise Exception(
                            'The continuous HP-space could not be converted correctly!'
                        )

                configurations = configurations + (config_dict, )

            # Find the best hyperparameter configuration (incumbent)
            best_configuration = {}
            x_opt = result_dict['x_opt']

            for i in range(len(x_opt)):
                if type(self.hp_space[i]) == skopt.space.space.Integer:
                    best_configuration[self.hp_space[i].name] = int(
                        round(x_opt[i]))

                elif type(self.hp_space[i]) == skopt.space.space.Categorical:
                    best_configuration[self.hp_space[i].name] = list(
                        self.hp_space[i].categories)[int(round(x_opt[i]))]

                elif type(self.hp_space[i]) == skopt.space.space.Real:
                    best_configuration[self.hp_space[i].name] = x_opt[i]

                else:
                    raise Exception(
                        'The continuous HP-space could not be converted correctly!'
                    )

        # Run not successful (algorithm crashed)
        else:
            evaluation_ids, timestamps, losses, configurations, best_loss, best_configuration, wall_clock_time = \
                self.impute_results_for_crash()

        # Pass the results to a TuningResult-Object
        result = TuningResult(evaluation_ids=evaluation_ids,
                              timestamps=timestamps,
                              losses=losses,
                              configurations=configurations,
                              best_loss=best_loss,
                              best_configuration=best_configuration,
                              wall_clock_time=wall_clock_time,
                              successful=run_successful,
                              did_warmstart=did_warmstart)

        return result
Esempio n. 25
0
def get_robo_results(count):
    lower = numpy.array([-5, 0])
    upper = numpy.array([10, 15])
    results = bayesian_optimization(branin_for_original_robo, lower, upper, num_iterations=count)
    return results
    benchmark += "_" + dataset

info = f.get_meta_information()
bounds = np.array(info['bounds'])

if method == "entropy_search":
    results = entropy_search(f,
                             bounds[:, 0],
                             bounds[:, 1],
                             num_iterations=n_iters,
                             n_init=n_init)
elif method == "gp_mcmc":
    results = bayesian_optimization(f,
                                    bounds[:, 0],
                                    bounds[:, 1],
                                    num_iterations=n_iters,
                                    n_init=n_init,
                                    model_type="gp_mcmc")
elif method == "gp":
    results = bayesian_optimization(f,
                                    bounds[:, 0],
                                    bounds[:, 1],
                                    num_iterations=n_iters,
                                    n_init=n_init,
                                    model_type="gp")
elif method == "rf":
    results = bayesian_optimization(f,
                                    bounds[:, 0],
                                    bounds[:, 1],
                                    num_iterations=n_iters,
                                    n_init=n_init,
Esempio n. 27
0
import numpy as np

from robo.fmin import bayesian_optimization


import logging
logging.basicConfig(level=logging.INFO)


# The optimization function that we want to optimize.
# It gets a numpy array with shape (1,D) where D is the number of input dimensions
def objective_function(x):
    y = np.sin(3 * x[0]) * 4 * (x[0] - 1) * (x[0] + 2)
    return y

# Defining the bounds and dimensions of the input space
lower = np.array([0])
upper = np.array([6])

# Start Bayesian optimization to optimize the objective function
results = bayesian_optimization(objective_function, lower, upper, num_iterations=50)
print(results["x_opt"])
print(results["f_opt"])

Esempio n. 28
0
info = f.get_meta_information()
bounds = np.array(info['bounds'])

if method == "entropy_search":
    results = entropy_search(f,
                             bounds[:, 0],
                             bounds[:, 1],
                             num_iterations=n_iters,
                             n_init=n_init)
elif method == "gp_mcmc":
    results = bayesian_optimization(f,
                                    bounds[:, 0],
                                    bounds[:, 1],
                                    num_iterations=n_iters,
                                    acquisition_func=acquisition,
                                    maximizer=maximizer,
                                    n_init=n_init,
                                    model_type="gp_mcmc",
                                    rng=rng)
elif method == "gp":
    results = bayesian_optimization(f,
                                    bounds[:, 0],
                                    bounds[:, 1],
                                    num_iterations=n_iters,
                                    acquisition_func=acquisition,
                                    maximizer=maximizer,
                                    n_init=n_init,
                                    model_type="gp")
elif method == "rf":
    results = bayesian_optimization(f,
    f = hpobench.SinTwo()
elif benchmark == "bohachevsky":
    f = hpobench.Bohachevsky()
elif benchmark == "levy":
    f = hpobench.Levy()

info = f.get_meta_information()
bounds = np.array(info['bounds'])

if method == "entropy_search":
    results = entropy_search(f, bounds[:, 0], bounds[:, 1],
                             num_iterations=n_iters, n_init=n_init)
elif method == "gp_mcmc":
    results = bayesian_optimization(f, bounds[:, 0], bounds[:, 1],
                                    num_iterations=n_iters,
                                    acquisition_func=acquisition,
                                    maximizer=maximizer,
                                    n_init=n_init, model_type="gp_mcmc", rng=rng)
elif method == "gp":
    results = bayesian_optimization(f, bounds[:, 0], bounds[:, 1],
                                    num_iterations=n_iters,
                                    acquisition_func=acquisition,
                                    maximizer=maximizer,
                                    n_init=n_init, model_type="gp")
elif method == "rf":
    results = bayesian_optimization(f, bounds[:, 0], bounds[:, 1],
                                    num_iterations=n_iters,
                                    n_init=n_init, model_type="rf")
elif method == "random_search":
    results = random_search(f, bounds[:, 0], bounds[:, 1],
                            num_iterations=n_iters)
Esempio n. 30
0
import numpy as np
from robo.fmin import bayesian_optimization

import logging
logging.basicConfig(level=logging.INFO)


def fn(x):
    y = np.sin(3 * x[0]) * 4 * (x[0] - 1) * (x[0] + 2)
    return y


lower = np.array([-2])
upper = np.array([3])

bo = bayesian_optimization(fn, lower, upper, num_iterations=10)

for it in bo:
    print(it)

bo.incumbents
bo.incumbents_values

plt.plot(range(len(bo.incumbents_values)), bo.incumbents_values)

fn([150])

######################
# Improved interface #
######################
Esempio n. 31
0
        :param x_sample: domain sampled from bayesian optimization
        :return: the corresponding evaluation result
        '''
        for i in range(x.shape[0]):
            if (sample == x[i]).all():
                return y[i]

    result = np.zeros((len(y)-3, len(y)))
    for i in range(len(y)-3):
        print("step {0}/{1}".format(i+1, len(y)-3))
        label_ids = np.array([i, i+1, i+2])
        pool = np.array([x[i] for i in range(x.shape[0]) if i not in label_ids])

        bo_res = bayesian_optimization(objective_function, lower, upper, acquisition_func=args.acquisition_func,
                                       model_type=args.model_type, num_iterations=len(y),
                                       X_init=x[label_ids], Y_init=y[label_ids], kernel=kernel,
                                       sampling_method=args.sampling_method, replacement=False,
                                       pool=pool, best=100000)
        x_samples = np.array(bo_res['X'])
        print(x_samples.shape[0])
        sampling_order = []
        for j in range(x_samples.shape[0]):
            for k in range(x.shape[0]):
                if (x_samples[j] == x[k]).all():
                    sampling_order.append(k)
        print(sampling_order)

        result[i] = np.array(sampling_order)

    model_name = "bo_" + args.acquisition_func + "_" + args.kernel + "_" + args.model_type
    output_file = args.output + "/" + args.architecture + "/" + args.dataset + "/" + \
Esempio n. 32
0
                    y_test,
                    parser_output_path=parser_output_path,
                    perl_script_path=perl_script_path)
                run_dict[baseline].append((val_accuracy, test_accuracy))

            # define the lower and upper bounds of the input space [-1, 1]
            lower = np.array(len(feature_names) * [-1])
            upper = np.array(len(feature_names) * [1])
            print('Lower limits shape:', lower.shape)
            print('Upper limits shape:', upper.shape)

            print('Running Bayesian Optimization...')
            print("Bayesian Opt number of iteration : {}".format(
                args.num_iterations))
            res = bayesian_optimization(objective_function,
                                        lower=lower,
                                        upper=upper,
                                        num_iterations=args.num_iterations)

            best_feature_weights = res['x_opt']
            print('Best feature weights', best_feature_weights)

            print("Selecting data based on best feature weights :")
            train_subset, labels_subset = task_utils.get_data_subsets(
                feature_values, best_feature_weights, X_train, y_train,
                args.task, num_train_examples)
            val_accuracy, test_accuracy = train_and_evaluate(train_subset,
                                                             labels_subset,
                                                             X_val,
                                                             y_val,
                                                             X_test,
                                                             y_test,
Esempio n. 33
0
"""
This example shows how RoBO can be combined with HPOlib.
Before you run it, make sure that you installed it.
For further information have a look here https://github.com/automl/HPOlib2.git
"""
import numpy as np
from hpolib.benchmarks.synthetic_functions import Branin

from robo.fmin import bayesian_optimization

f = Branin()
info = f.get_meta_information()
bounds = np.array(info['bounds'])

# Start Bayesian optimization to optimize the objective function
results = bayesian_optimization(f,
                                bounds[:, 0],
                                bounds[:, 1],
                                num_iterations=50)
print(results["x_opt"])
print(results["f_opt"])
Esempio n. 34
0
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

from robo.fmin import bayesian_optimization


# Global Configuration
try:
  CFG = sys.argv[1]
  CFG = CFG.replace(".py", "")
  SEED = int(sys.argv[2])
except:
  logger.info("USAGE: python run.py CONFIGURATION_FILE_PATH SEED_VALUE")
  sys.exit(-1)


PREFIX = CFG+("_%i" % SEED)
CFG = CFG+".py"
OUTFILE = "%s_bo_posterior" % PREFIX
logger.info("OPENING CODE FROM CFG=%s (SEED=%i) => OUTPUT:%s" % (CFG, SEED, OUTFILE))
exec(open(CFG).read())


# BlackBox BO over posterior
rng = np.random.RandomState(SEED)
res = bayesian_optimization(objective_function, lower, upper, num_iterations=num_iterations, X_init=X_init, Y_init=Y_init, n_init=n_init, rng=rng,) 


Esempio n. 35
0
    output_path = "./experiments/fabolas/results/cnn_%s/ei_%d" % (dataset, run_id)
elif dataset == "svhn":
    f = ConvolutionalNeuralNetworkOnSVHN(rng=rng)
    num_iterations = 15
    output_path = "./experiments/fabolas/results/cnn_%s/ei_%d" % (dataset, run_id)
elif dataset == "res_net":
    f = ResidualNeuralNetworkOnCIFAR10(rng=rng)
    num_iterations = 10
    output_path = "./experiments/fabolas/results/%s/ei_%d" % (dataset, run_id)

os.makedirs(output_path, exist_ok=True)

info = f.get_meta_information()
bounds = np.array(info['bounds'])
results = bayesian_optimization(f, bounds[:, 0], bounds[:, 1],
                                num_iterations=num_iterations, n_init=2,
                                rng=rng, output_path=output_path)

results["run_id"] = run_id
results['X'] = results['X'].tolist()
results['y'] = results['y'].tolist()

test_error = []
current_inc = None
current_inc_val = None

key = "incumbents"

for inc in results["incumbents"]:
    print(inc)
    if current_inc == inc: