コード例 #1
0
    def __init__(self, config_space, burnin=3000, n_iters=10000):

        super(Bohamiann,
              self).__init__(sacred_space_to_configspace(config_space))
        self.rng = np.random.RandomState(np.random.seed())
        self.n_dims = len(self.config_space.get_hyperparameters())

        # All inputs are mapped to be in [0, 1]^D
        self.lower = np.zeros([self.n_dims])
        self.upper = np.ones([self.n_dims])
        self.incumbents = []
        self.X = None
        self.y = None

        self.model = BayesianNeuralNetwork(sampling_method="sghmc",
                                           l_rate=np.sqrt(1e-4),
                                           mdecay=0.05,
                                           burn_in=burnin,
                                           n_iters=n_iters,
                                           precondition=True,
                                           normalize_input=True,
                                           normalize_output=True)

        self.acquisition_func = LogEI(self.model)

        self.maximizer = Direct(self.acquisition_func,
                                self.lower,
                                self.upper,
                                verbose=False)
コード例 #2
0
    def test_compute(self):
        log_ei = LogEI(self.model)

        X_test = np.random.rand(5, 2)
        a = log_ei.compute(X_test, derivative=False)
        assert a.shape[0] == X_test.shape[0]
        assert len(a.shape) == 1
コード例 #3
0
ファイル: test_log_ei.py プロジェクト: numairmansur/RoBO
    def test_compute(self):
        log_ei = LogEI(self.model)

        X_test = np.random.rand(5, 2)
        a = log_ei.compute(X_test, derivative=False)
        assert a.shape[0] == X_test.shape[0]
        assert len(a.shape) == 1
コード例 #4
0
class Bohamiann(Optimizer):
    def __init__(self, config_space, burnin=3000, n_iters=10000):

        super(Bohamiann,
              self).__init__(sacred_space_to_configspace(config_space))
        self.rng = np.random.RandomState(np.random.seed())
        self.n_dims = len(self.config_space.get_hyperparameters())

        # All inputs are mapped to be in [0, 1]^D
        self.lower = np.zeros([self.n_dims])
        self.upper = np.ones([self.n_dims])
        self.incumbents = []
        self.X = None
        self.y = None

        self.model = BayesianNeuralNetwork(sampling_method="sghmc",
                                           l_rate=np.sqrt(1e-4),
                                           mdecay=0.05,
                                           burn_in=burnin,
                                           n_iters=n_iters,
                                           precondition=True,
                                           normalize_input=True,
                                           normalize_output=True)

        self.acquisition_func = LogEI(self.model)

        self.maximizer = Direct(self.acquisition_func,
                                self.lower,
                                self.upper,
                                verbose=False)

    def suggest_configuration(self):

        if self.X is None and self.y is None:
            # No data points yet to train a model, just return a random configuration instead
            new_x = init_random_uniform(self.lower,
                                        self.upper,
                                        n_points=1,
                                        rng=self.rng)[0, :]

        else:
            # Train the model on all finished runs
            self.model.train(self.X, self.y)
            self.acquisition_func.update(self.model)

            # Maximize the acquisition function
            new_x = self.maximizer.maximize()

        # Maps from [0, 1]^D space back to original space
        next_config = Configuration(self.config_space, vector=new_x)

        # Transform to sacred configuration
        result = configspace_config_to_sacred(next_config)

        return result
コード例 #5
0
def build_acquisition_func(acquisition_func, model):
    """
    Build acquisition function

    Parameters
    ----------
    acquisition_func: str
        Name of the acquisition function. Can be one of ``['ei', 'log_ei', 'pi', 'lcb']``.
    model: ``robo.models.base_model.BaseModel``
        Model used for the Bayesian optimization.

    """
    if acquisition_func == "ei":
        acquisition_func = EI(model)
    elif acquisition_func == "log_ei":
        acquisition_func = LogEI(model)
    elif acquisition_func == "pi":
        acquisition_func = PI(model)
    elif acquisition_func == "lcb":
        acquisition_func = LCB(model)
    else:
        raise ValueError("'{}' is not a valid acquisition function".format(
            acquisition_func))

    return acquisition_func
コード例 #6
0
    def test_log_ei(self):
        log_ei = LogEI(self.model)
        acq = MarginalizationGPMCMC(log_ei)

        X_test = np.random.rand(5, 2)
        a = acq.compute(X_test, derivative=False)
        assert a.shape[0] == X_test.shape[0]
        assert len(a.shape) == 1
コード例 #7
0
def benchmark_function(
        function,
        seed,
        n_eval=20,
        n_initial_points=5,
        model_class=None,
        model_kwargs=None,
):
    lower = np.array([-10])
    upper = np.array([10])
    rng1 = np.random.RandomState(seed)
    rng2 = np.random.RandomState(seed)

    cov_amp = 2
    n_dims = lower.shape[0]

    initial_ls = np.ones([n_dims])
    exp_kernel = george.kernels.Matern52Kernel(initial_ls,
                                               ndim=n_dims)
    kernel = cov_amp * exp_kernel

    prior = DefaultPrior(len(kernel) + 1)

    if model_class is None:
        model = GaussianProcess(
            kernel,
            prior=prior,
            rng=rng1,
            normalize_output=True,
            normalize_input=True,
            lower=lower,
            upper=upper,
            noise=1e-3,
        )
    else:
        model = model_class(rng=rng1, **model_kwargs)

    acq = LogEI(model)
    max_func = SciPyOptimizer(acq, lower, upper, n_restarts=50, rng=rng2)

    bo = BayesianOptimization(
        objective_func=function,
        lower=np.array([-10]),
        upper=np.array([10]),
        acquisition_func=acq,
        model=model,
        initial_points=n_initial_points,
        initial_design=init_latin_hypercube_sampling,
        rng=rng2,
        maximize_func=max_func
    )

    bo.run(n_eval)
    rval = np.minimum.accumulate(bo.y)

    return rval
コード例 #8
0
    def suggest_configuration(self):
        if self.X is None and self.y is None:
            new_x = init_random_uniform(self.lower, self.upper,
                                        n_points=1, rng=self.rng)[0, :]

        elif self.X.shape[0] == 1:
            # We need at least 2 data points to train a GP
            new_x = init_random_uniform(self.lower, self.upper,
                                        n_points=1, rng=self.rng)[0, :]

        else:
            cov_amp = 1
            n_dims = self.lower.shape[0]

            initial_ls = np.ones([n_dims])
            exp_kernel = george.kernels.Matern52Kernel(initial_ls,
                                                       ndim=n_dims)
            kernel = cov_amp * exp_kernel

            prior = DefaultPrior(len(kernel) + 1)

            model = GaussianProcessMCMC(kernel, prior=prior,
                                        n_hypers=self.n_hypers,
                                        chain_length=self.chain_length,
                                        burnin_steps=self.burnin,
                                        normalize_input=False,
                                        normalize_output=True,
                                        rng=self.rng,
                                        lower=self.lower,
                                        upper=self.upper)

            a = LogEI(model)

            acquisition_func = MarginalizationGPMCMC(a)

            max_func = Direct(acquisition_func, self.lower, self.upper, verbose=False)

            model.train(self.X, self.y)

            acquisition_func.update(model)

            new_x = max_func.maximize()

        next_config = Configuration(self.config_space, vector=new_x)

        # Transform to sacred configuration
        result = configspace_config_to_sacred(next_config)

        return result
コード例 #9
0
ファイル: warmstart_mtbo.py プロジェクト: sonyeric/RoBO
def warmstart_mtbo(objective_function,
                   lower,
                   upper,
                   observed_X,
                   observed_y,
                   n_tasks=2,
                   num_iterations=30,
                   model_type="gp_mcmc",
                   target_task_id=1,
                   burnin=100,
                   chain_length=200,
                   n_hypers=20,
                   output_path=None,
                   rng=None):
    """
    Interface to MTBO[1] which uses an auxiliary cheaper task to warm start the optimization on new but similar task.
    Note here we only warmstart the optimization process, in case you want to speed up Bayesian optimization by
    evaluating on auxiliary task during the optimization check out mtbo() or fabolas().

    [1] Multi-Task Bayesian Optimization
        K. Swersky and J. Snoek and R. Adams
        Proceedings of the 27th International Conference on Advances in Neural Information Processing Systems (NIPS'13)

    Parameters
    ----------
    objective_function: function
        Objective function that will be optimized
    lower: np.array(D,)
        Lower bound of the input space
    upper: np.array(D,)
        Upper bound of the input space
    observed_X: np.array(N, D + 1)
        observed point from the auxiliary task. Make sure that the last dimension identifies the auxiliary task
        (default=0). We assume the main task to have the task id = 1
    observed_y: np.array(N,)
        corresponding target values
    n_tasks: int
        Number of task
    target_task_id: int
        the id of the target task
    num_iterations: int
        Number of iterations
    chain_length : int
        The length of the MCMC chain for each walker.
    burnin : int
        The number of burnin steps before the actual MCMC sampling starts.
    output_path: string
        Specifies the path where the intermediate output after each iteration will be saved.
        If None no output will be saved to disk.
    rng: numpy.random.RandomState
        Random number generator

    Returns
    -------
        dict with all results
    """

    assert lower.shape[0] == upper.shape[
        0], "Dimension miss match between upper and lower bound"

    time_start = time.time()
    if rng is None:
        rng = np.random.RandomState(np.random.randint(0, 10000))

    n_dims = lower.shape[0]

    # Bookkeeping
    time_func_eval = []
    time_overhead = []
    incumbents = []
    incumbent_values = []
    runtime = []

    X = deepcopy(observed_X)
    y = deepcopy(observed_y)

    if model_type == "gp_mcmc":
        # Define model for the objective function
        cov_amp = 1  # Covariance amplitude
        kernel = cov_amp

        # ARD Kernel for the configuration space
        for d in range(n_dims):
            kernel *= george.kernels.Matern52Kernel(np.ones([1]) * 0.01,
                                                    ndim=n_dims + 1,
                                                    axes=d)

        task_kernel = george.kernels.TaskKernel(n_dims + 1, n_dims, n_tasks)
        kernel *= task_kernel

        # Take 3 times more samples than we have hyperparameters
        if n_hypers < 2 * len(kernel):
            n_hypers = 3 * len(kernel)
            if n_hypers % 2 == 1:
                n_hypers += 1

        prior = MTBOPrior(len(kernel) + 1,
                          n_ls=n_dims,
                          n_kt=len(task_kernel),
                          rng=rng)

        model_objective = MTBOGPMCMC(kernel,
                                     prior=prior,
                                     burnin_steps=burnin,
                                     chain_length=chain_length,
                                     n_hypers=n_hypers,
                                     lower=lower,
                                     upper=upper,
                                     rng=rng)
    elif model_type == "bohamiann":
        model_objective = WrapperBohamiannMultiTask(n_tasks=n_tasks)

    acquisition_func = LogEI(model_objective)

    # Optimize acquisition function only on the main task
    def wrapper(x):
        x_ = np.append(x, np.ones([x.shape[0], 1]) * target_task_id, axis=1)

        if y.shape[0] == init_points:
            eta = 0
        else:
            eta = np.min(y[init_points:])
        a = acquisition_func(x_, eta=eta)
        return a

    maximizer = DifferentialEvolution(wrapper, lower, upper)

    X = np.array(X)
    y = np.array(y)

    init_points = y.shape[0]

    for it in range(num_iterations):
        logger.info("Start iteration %d ... ", it)

        start_time = time.time()

        # Train models
        model_objective.train(X, y, do_optimize=True)

        # Maximize acquisition function
        acquisition_func.update(model_objective)

        new_x = maximizer.maximize()
        new_x = np.append(new_x, np.array([target_task_id]))

        time_overhead.append(time.time() - start_time)
        logger.info("Optimization overhead was %f seconds", time_overhead[-1])

        # Evaluate the chosen configuration
        logger.info("Evaluate candidate %s", str(new_x))
        start_time = time.time()
        new_y = objective_function(new_x[:-1], int(new_x[-1]))
        time_func_eval.append(time.time() - start_time)

        logger.info("Configuration achieved a performance of %f", new_y)
        logger.info("Evaluation of this configuration took %f seconds",
                    time_func_eval[-1])

        # Add new observation to the data
        X = np.concatenate((X, new_x[None, :]), axis=0)
        y = np.concatenate(
            (y, np.array([new_y])),
            axis=0)  # Model the target function on a logarithmic scale

        # Estimate incumbent as the best observed value so far
        best_idx = np.argmin(y[init_points:]) + init_points
        incumbent = X[best_idx][:-1]
        incumbent_value = y[best_idx]

        incumbents.append(incumbent)
        incumbent_values.append(incumbent_value)
        logger.info("Current incumbent %s with estimated performance %f",
                    str(incumbent), incumbent_value)

        runtime.append(time.time() - time_start)

        if output_path is not None:
            data = dict()
            data["optimization_overhead"] = time_overhead[it]
            data["runtime"] = runtime[it]
            data["incumbent"] = incumbents[it].tolist()
            data["time_func_eval"] = time_func_eval[it]
            data["iteration"] = it

            json.dump(
                data,
                open(os.path.join(output_path, "mtbo_iter_%d.json" % it), "w"))

    logger.info("Final incumbent %s with estimated performance %f",
                str(incumbent), incumbent_value)

    results = dict()
    results["x_opt"] = incumbent.tolist()
    results["incumbents"] = [inc.tolist() for inc in incumbents]
    results["runtime"] = runtime
    results["overhead"] = time_overhead
    results["time_func_eval"] = time_func_eval
    results["incumbent_values"] = incumbent_values
    results["X"] = X
    results["y"] = y

    return results
コード例 #10
0
def bohamiann(objective_function,
              lower,
              upper,
              num_iterations=30,
              maximizer="random",
              acquisition_func="log_ei",
              n_init=3,
              output_path=None,
              rng=None):
    """
    Bohamiann uses Bayesian neural networks to model the objective function [1] inside Bayesian optimization.
    Bayesian neural networks usually scale better with the number of function evaluations and the number of dimensions
    than Gaussian processes.

    [1] Bayesian optimization with robust Bayesian neural networks
        J. T. Springenberg and A. Klein and S. Falkner and F. Hutter
        Advances in Neural Information Processing Systems 29

    Parameters
    ----------
    objective_function: function
        The objective function that is minimized. This function gets a numpy array (D,) as input and returns
        the function value (scalar)
    lower: np.ndarray (D,)
        The lower bound of the search space
    upper: np.ndarray (D,)
        The upper bound of the search space
    num_iterations: int
        The number of iterations (initial design + BO)
    acquisition_func: {"ei", "log_ei", "lcb", "pi"}
        The acquisition function
    maximizer: {"direct", "cmaes", "random", "scipy"}
        The optimizer for the acquisition function. NOTE: "cmaes" only works in D > 1 dimensions
    n_init: int
        Number of points for the initial design. Make sure that it is <= num_iterations.
    output_path: string
        Specifies the path where the intermediate output after each iteration will be saved.
        If None no output will be saved to disk.
    rng: numpy.random.RandomState
        Random number generator

    Returns
    -------
        dict with all results
    """
    assert upper.shape[0] == lower.shape[0]
    assert n_init <= num_iterations, "Number of initial design point has to be <= than the number of iterations"

    if rng is None:
        rng = np.random.RandomState(np.random.randint(0, 10000))

    model = BayesianNeuralNetwork(sampling_method="sghmc",
                                  l_rate=np.sqrt(1e-4),
                                  mdecay=0.05,
                                  burn_in=3000,
                                  n_iters=50000,
                                  precondition=True,
                                  normalize_input=True,
                                  normalize_output=True)

    if acquisition_func == "ei":
        a = EI(model)
    elif acquisition_func == "log_ei":
        a = LogEI(model)
    elif acquisition_func == "pi":
        a = PI(model)
    elif acquisition_func == "lcb":
        a = LCB(model)

    else:
        print("ERROR: %s is not a valid acquisition function!" %
              acquisition_func)
        return

    if maximizer == "cmaes":
        max_func = CMAES(a, lower, upper, verbose=True, rng=rng)
    elif maximizer == "direct":
        max_func = Direct(a, lower, upper, verbose=True)
    elif maximizer == "random":
        max_func = RandomSampling(a, lower, upper, rng=rng)
    elif maximizer == "scipy":
        max_func = SciPyOptimizer(a, lower, upper, rng=rng)

    bo = BayesianOptimization(objective_function,
                              lower,
                              upper,
                              a,
                              model,
                              max_func,
                              initial_points=n_init,
                              output_path=output_path,
                              rng=rng)

    x_best, f_min = bo.run(num_iterations)

    results = dict()
    results["x_opt"] = x_best
    results["f_opt"] = f_min
    results["incumbents"] = [inc for inc in bo.incumbents]
    results["incumbent_values"] = [val for val in bo.incumbents_values]
    results["runtime"] = bo.runtime
    results["overhead"] = bo.time_overhead
    results["X"] = [x.tolist() for x in bo.X]
    results["y"] = [y for y in bo.y]
    return results
コード例 #11
0
def bayesian_optimization(objective_function,
                          lower,
                          upper,
                          num_iterations=30,
                          maximizer="random",
                          acquisition_func="log_ei",
                          model_type="gp_mcmc",
                          n_init=3,
                          rng=None,
                          output_path=None):
    """
    General interface for Bayesian optimization for global black box
    optimization problems.

    Parameters
    ----------
    objective_function: function
        The objective function that is minimized. This function gets a numpy
        array (D,) as input and returns the function value (scalar)
    lower: np.ndarray (D,)
        The lower bound of the search space
    upper: np.ndarray (D,)
        The upper bound of the search space
    num_iterations: int
        The number of iterations (initial design + BO)
    maximizer: {"direct", "cmaes", "random", "scipy"}
        The optimizer for the acquisition function. NOTE: "cmaes" only works in D > 1 dimensions
    acquisition_func: {"ei", "log_ei", "lcb", "pi"}
        The acquisition function
    model_type: {"gp", "gp_mcmc", "rf"}
        The model for the objective function.
    n_init: int
        Number of points for the initial design. Make sure that it
        is <= num_iterations.
    output_path: string
        Specifies the path where the intermediate output after each iteration will be saved.
        If None no output will be saved to disk.
    rng: numpy.random.RandomState
        Random number generator

    Returns
    -------
        dict with all results
    """
    assert upper.shape[0] == lower.shape[0], "Dimension miss match"
    assert np.all(lower < upper), "Lower bound >= upper bound"
    assert n_init <= num_iterations, "Number of initial design point has to be <= than the number of iterations"

    if rng is None:
        rng = np.random.RandomState(np.random.randint(0, 10000))

    cov_amp = 2
    n_dims = lower.shape[0]

    initial_ls = np.ones([n_dims])
    exp_kernel = george.kernels.Matern52Kernel(initial_ls, ndim=n_dims)
    kernel = cov_amp * exp_kernel

    prior = DefaultPrior(len(kernel) + 1)

    n_hypers = 3 * len(kernel)
    if n_hypers % 2 == 1:
        n_hypers += 1

    if model_type == "gp":
        model = GaussianProcess(kernel,
                                prior=prior,
                                rng=rng,
                                normalize_output=False,
                                normalize_input=True,
                                lower=lower,
                                upper=upper)
    elif model_type == "gp_mcmc":
        model = GaussianProcessMCMC(kernel,
                                    prior=prior,
                                    n_hypers=n_hypers,
                                    chain_length=200,
                                    burnin_steps=100,
                                    normalize_input=True,
                                    normalize_output=True,
                                    rng=rng,
                                    lower=lower,
                                    upper=upper)

    elif model_type == "rf":
        model = RandomForest(rng=rng)

    else:
        raise ValueError("'{}' is not a valid model".format(model_type))

    if acquisition_func == "ei":
        a = EI(model)
    elif acquisition_func == "log_ei":
        a = LogEI(model)
    elif acquisition_func == "pi":
        a = PI(model)
    elif acquisition_func == "lcb":
        a = LCB(model)
    else:
        raise ValueError("'{}' is not a valid acquisition function".format(
            acquisition_func))

    if model_type == "gp_mcmc":
        acquisition_func = MarginalizationGPMCMC(a)
    else:
        acquisition_func = a

    if maximizer == "cmaes":
        max_func = CMAES(acquisition_func,
                         lower,
                         upper,
                         verbose=False,
                         rng=rng)
    elif maximizer == "direct":
        max_func = Direct(acquisition_func, lower, upper, verbose=True)
    elif maximizer == "random":
        max_func = RandomSampling(acquisition_func, lower, upper, rng=rng)
    elif maximizer == "scipy":
        max_func = SciPyOptimizer(acquisition_func, lower, upper, rng=rng)

    else:
        raise ValueError("'{}' is not a valid function to maximize the "
                         "acquisition function".format(maximizer))

    bo = BayesianOptimization(objective_function,
                              lower,
                              upper,
                              acquisition_func,
                              model,
                              max_func,
                              initial_points=n_init,
                              rng=rng,
                              output_path=output_path)

    x_best, f_min = bo.run(num_iterations)

    results = dict()
    results["x_opt"] = x_best
    results["f_opt"] = f_min
    results["incumbents"] = [inc for inc in bo.incumbents]
    results["incumbent_values"] = [val for val in bo.incumbents_values]
    results["runtime"] = bo.runtime
    results["overhead"] = bo.time_overhead
    results["X"] = [x.tolist() for x in bo.X]
    results["y"] = [y for y in bo.y]
    return results
コード例 #12
0
ファイル: bayesian_optimization.py プロジェクト: Este1le/RoBO
def bayesian_optimization(objective_function,
                          lower,
                          upper,
                          num_iterations=30,
                          X_init=None,
                          Y_init=None,
                          maximizer="random",
                          acquisition_func="log_ei",
                          model_type="gp_mcmc",
                          n_init=3,
                          rng=None,
                          output_path=None,
                          kernel=None,
                          sampling_method="origin",
                          distance="cosine",
                          replacement=True,
                          pool=None,
                          best=None):
    """
    General interface for Bayesian optimization for global black box
    optimization problems.

    Parameters
    ----------
    objective_function: function
        The objective function that is minimized. This function gets a numpy
        array (D,) as input and returns the function value (scalar)
    lower: np.ndarray (D,)
        The lower bound of the search space
    upper: np.ndarray (D,)
        The upper bound of the search space
    num_iterations: int
        The number of iterations (initial design + BO)
    X_init: np.ndarray(N,D)
            Initial points to warmstart BO
    Y_init: np.ndarray(N,1)
            Function values of the already initial points
    maximizer: {"random", "scipy", "differential_evolution"}
        The optimizer for the acquisition function.
    acquisition_func: {"ei", "log_ei", "lcb", "pi"}
        The acquisition function
    model_type: {"gp", "gp_mcmc", "rf", "bohamiann", "dngo"}
        The model for the objective function.
    n_init: int
        Number of points for the initial design. Make sure that it
        is <= num_iterations.
    output_path: string
        Specifies the path where the intermediate output after each iteration will be saved.
        If None no output will be saved to disk.
    rng: numpy.random.RandomState
        Random number generator
    kernel: george.kernels.ConstantKernel
            {"constant", "polynomial", "linear", "dotproduct",
             "exp", "expsquared", "matern32", "matern52", "rationalquadratic",
             "cosine", "expsine2", "heuristic"}
        Specify the kernel for Gaussian process.
    sampling_method: {"origin", "approx", "exact"}
        Specify the method to choose next sample to update model.
        approx: choose the sample in the candidate pool that is closest (measured by distance
        arg) to the one returned from maximizing acquisition function.
        exact: evaluate all samples in the candidate pool on acquisition function
        and choose the one with maximum output.
    distance: {"cosine", "euclidean"}
        The distance measurement for approximation sampling.
    replacement: boolean
        Whether to sample from pool with replacement.
    pool: np.ndarray(N,D)
        Candidate pool containing possible x
    best: float
        Stop training when the best point is sampled.
    Returns
    -------
        dict with all results
    """
    assert upper.shape[0] == lower.shape[0], "Dimension miss match"
    assert np.all(lower < upper), "Lower bound >= upper bound"
    assert n_init <= num_iterations, "Number of initial design point has to be <= than the number of iterations"

    if rng is None:
        rng = np.random.RandomState(np.random.randint(0, 10000))

    cov_amp = 2
    #n_dims = lower.shape[0]

    #initial_ls = np.ones([n_dims])

    # if kernel == "constant":
    #     exp_kernel = george.kernels.ConstantKernel(1, ndim=n_dims)
    # elif kernel == "polynomial":
    #     exp_kernel = george.kernels.PolynomialKernel(log_sigma2=1, order=3, ndim=n_dims)
    # elif kernel == "linear":
    #     exp_kernel = george.kernels.LinearKernel(log_gamma2=1, order=3, ndim=n_dims)
    # elif kernel == "dotproduct":
    #     exp_kernel = george.kernels.DotProductKernel(ndim=n_dims)
    # elif kernel == "exp":
    #     exp_kernel = george.kernels.ExpKernel(initial_ls, ndim=n_dims)
    # elif kernel == "expsquared":
    #     exp_kernel = george.kernels.ExpSquaredKernel(initial_ls, ndim=n_dims)
    # elif kernel == "matern32":
    #     exp_kernel = george.kernels.Matern32Kernel(initial_ls, ndim=n_dims)
    # elif kernel == "matern52":
    #     exp_kernel = george.kernels.Matern52Kernel(initial_ls, ndim=n_dims)
    # elif kernel == "rationalquadratic":
    #     exp_kernel = george.kernels.RationalQuadraticKernel(log_alpha=1, metric=initial_ls, ndim=n_dims)
    # elif kernel == "cosine":
    #     exp_kernel = george.kernels.CosineKernel(4, ndim=n_dims)
    # elif kernel == "expsine2":
    #     exp_kernel = george.kerngels.ExpSine2Kernel(1, 2, ndim=n_dims)
    # elif kernel == "heuristic":
    #     exp_kernel = george.kernels.PythonKernel(heuristic_kernel_function, ndim=n_dims)
    # else:
    #     raise ValueError("'{}' is not a valid kernel".format(kernel))

    kernel = cov_amp * kernel

    prior = DefaultPrior(len(kernel) + 1)

    n_hypers = 3 * len(kernel)
    if n_hypers % 2 == 1:
        n_hypers += 1

    if model_type == "gp":
        model = GaussianProcess(kernel,
                                prior=prior,
                                rng=rng,
                                normalize_output=False,
                                normalize_input=True,
                                lower=lower,
                                upper=upper)
    elif model_type == "gp_mcmc":
        model = GaussianProcessMCMC(kernel,
                                    prior=prior,
                                    n_hypers=n_hypers,
                                    chain_length=200,
                                    burnin_steps=100,
                                    normalize_input=True,
                                    normalize_output=False,
                                    rng=rng,
                                    lower=lower,
                                    upper=upper)

    elif model_type == "rf":
        model = RandomForest(rng=rng)

    elif model_type == "bohamiann":
        model = WrapperBohamiann()

    elif model_type == "dngo":
        model = DNGO()

    else:
        raise ValueError("'{}' is not a valid model".format(model_type))

    if acquisition_func == "ei":
        a = EI(model)
    elif acquisition_func == "log_ei":
        a = LogEI(model)
    elif acquisition_func == "pi":
        a = PI(model)
    elif acquisition_func == "lcb":
        a = LCB(model)
    else:
        raise ValueError("'{}' is not a valid acquisition function".format(
            acquisition_func))

    if model_type == "gp_mcmc":
        acquisition_func = MarginalizationGPMCMC(a)
    else:
        acquisition_func = a

    if maximizer == "random":
        max_func = RandomSampling(acquisition_func, lower, upper, rng=rng)
    elif maximizer == "scipy":
        max_func = SciPyOptimizer(acquisition_func, lower, upper, rng=rng)
    elif maximizer == "differential_evolution":
        max_func = DifferentialEvolution(acquisition_func,
                                         lower,
                                         upper,
                                         rng=rng)
    else:
        raise ValueError("'{}' is not a valid function to maximize the "
                         "acquisition function".format(maximizer))

    if sampling_method == "exact":
        max_func = ExactSampling(acquisition_func,
                                 lower,
                                 upper,
                                 pool,
                                 replacement,
                                 rng=rng)
        init_design = init_exact_random
    elif sampling_method == "approx":
        max_func = ApproxSampling(acquisition_func,
                                  lower,
                                  upper,
                                  pool,
                                  replacement,
                                  distance,
                                  rng=rng)
        init_design = init_exact_random
    else:
        init_design = init_latin_hypercube_sampling

    bo = BayesianOptimization(objective_function,
                              lower,
                              upper,
                              acquisition_func,
                              model,
                              max_func,
                              pool,
                              best,
                              sampling_method,
                              distance,
                              replacement,
                              initial_points=n_init,
                              rng=rng,
                              initial_design=init_design,
                              output_path=output_path)

    x_best, f_min = bo.run(num_iterations, X=X_init, y=Y_init)

    results = dict()
    results["x_opt"] = x_best
    results["f_opt"] = f_min
    results["incumbents"] = [inc for inc in bo.incumbents]
    results["incumbent_values"] = [val for val in bo.incumbents_values]
    results["runtime"] = bo.runtime
    results["overhead"] = bo.time_overhead
    results["X"] = [x.tolist() for x in bo.X]
    results["y"] = [y for y in bo.y]
    return results
コード例 #13
0
def benchmark_function_model_selection(
        target,
        seed,
        n_eval=20,
        n_initial_points=5,
        model_class=None
):
    lower = np.array([-10])
    upper = np.array([10])
    rng1 = np.random.RandomState(seed)
    rng2 = np.random.RandomState(seed)

    # Build models for all algorithms
    models = []
    acqs = []
    max_funcs = []
    targets = []

    for obj_function_name, obj_function in objective_functions.items():

        meta_data = {}
        base = {}
        for model_index in range(0, len_meta_data + 1):
            if obj_function != model_index:
                base[model_index] = obj_function(model_index)

        for i, (key, obj_function_) in enumerate(base.items()):
            rs = np.random.RandomState(i)
            X = rs.rand(20, 1) * 20 - 10
            y = obj_function_(X)
            meta_data[i] = (X, y)
        model_kwargs = {
            'lower': np.array([-10]),
            'upper': np.array([10]),
            'meta_data': meta_data,
        }

        target = obj_function(target_index)
        targets.append(target)

        model = model_class(rng=rng1, **model_kwargs)
        models.append(model)
        acq = LogEI(model)
        acqs.append(acq)
        max_func = SciPyOptimizer(acq, lower, upper, n_restarts=50, rng=rng2)
        max_funcs.append(max_func)

    print("Benchmark for model selection...")
    bo = BayesianOptimizationSurrogateModelEnsemble(
        objective_funcs=targets,
        lower=np.array([-10]),
        upper=np.array([10]),
        acquisition_funcs=acqs,
        models=models,
        initial_points=n_initial_points,
        initial_design=init_latin_hypercube_sampling,
        rng=rng2,
        maximize_funcs=max_funcs
    )

    bo.run(n_eval)
    rval = np.minimum.accumulate(bo.ys[bo.target_index])

    return rval
コード例 #14
0
ファイル: bohamiann.py プロジェクト: snpc94/RoBO
def bohamiann(objective_function,
              lower,
              upper,
              num_iterations=30,
              acquisition_func="log_ei",
              n_init=3,
              rng=None):
    """
    General interface for Bayesian optimization for global black box optimization problems.

    Parameters
    ----------
    objective_function: function
        The objective function that is minimized. This function gets a numpy array (D,) as input and returns
        the function value (scalar)
    lower: np.ndarray (D,)
        The lower bound of the search space
    upper: np.ndarray (D,)
        The upper bound of the search space
    num_iterations: int
        The number of iterations (initial design + BO)
    acquisition_func: {"ei", "log_ei", "lcb", "pi"}
        The acquisition function
    n_init: int
        Number of points for the initial design. Make sure that it is <= num_iterations.
    rng: numpy.random.RandomState
        Random number generator

    Returns
    -------
        dict with all results
    """
    assert upper.shape[0] == lower.shape[0]
    assert n_init <= num_iterations, "Number of initial design point has to be <= than the number of iterations"

    if rng is None:
        rng = np.random.RandomState(np.random.randint(0, 10000))

    model = BayesianNeuralNetwork(sampling_method="sghmc",
                                  l_rate=np.sqrt(1e-4),
                                  mdecay=0.05,
                                  burn_in=3000,
                                  n_iters=50000,
                                  precondition=True,
                                  normalize_input=True,
                                  normalize_output=True)

    if acquisition_func == "ei":
        a = EI(model)
    elif acquisition_func == "log_ei":
        a = LogEI(model)
    elif acquisition_func == "pi":
        a = PI(model)
    elif acquisition_func == "lcb":
        a = LCB(model)

    else:
        print("ERROR: %s is not a valid acquisition function!" %
              acquisition_func)
        return

    max_func = Direct(a, lower, upper, verbose=False)

    bo = BayesianOptimization(objective_function,
                              lower,
                              upper,
                              a,
                              model,
                              max_func,
                              initial_points=n_init,
                              rng=rng)

    x_best, f_min = bo.run(num_iterations)

    results = dict()
    results["x_opt"] = x_best
    results["f_opt"] = f_min
    results["incumbents"] = [inc for inc in bo.incumbents]
    results["incumbent_values"] = [val for val in bo.incumbents_values]
    results["runtime"] = bo.runtime
    results["overhead"] = bo.time_overhead
    return results
コード例 #15
0
ファイル: bayesian_optimization.py プロジェクト: snpc94/RoBO
def bayesian_optimization(objective_function,
                          lower,
                          upper,
                          num_iterations=30,
                          maximizer="direct",
                          acquisition_func="log_ei",
                          model="gp_mcmc",
                          n_init=3,
                          rng=None):
    """
    General interface for Bayesian optimization for global black box optimization problems.

    Parameters
    ----------
    objective_function: function
        The objective function that is minimized. This function gets a numpy array (D,) as input and returns
        the function value (scalar)
    lower: np.ndarray (D,)
        The lower bound of the search space
    upper: np.ndarray (D,)
        The upper bound of the search space
    num_iterations: int
        The number of iterations (initial design + BO)
    maximizer: {"direct", "cmaes"}
        Defines how the acquisition function is maximized. NOTE: "cmaes" only works in D > 1 dimensions
    acquisition_func: {"ei", "log_ei", "lcb", "pi"}
        The acquisition function
    model: {"gp", "gp_mcmc"}
        The model for the objective function.
    n_init: int
        Number of points for the initial design. Make sure that it is <= num_iterations.
    rng: numpy.random.RandomState
        Random number generator

    Returns
    -------
        dict with all results
    """
    assert upper.shape[0] == lower.shape[0]
    assert n_init <= num_iterations, "Number of initial design point has to be <= than the number of iterations"

    if rng is None:
        rng = np.random.RandomState(np.random.randint(0, 10000))

    cov_amp = 2
    n_dims = lower.shape[0]

    initial_ls = np.ones([n_dims])
    exp_kernel = george.kernels.Matern52Kernel(initial_ls, ndim=n_dims)
    kernel = cov_amp * exp_kernel

    prior = DefaultPrior(len(kernel) + 1)

    n_hypers = 3 * len(kernel)
    if n_hypers % 2 == 1:
        n_hypers += 1

    if model == "gp":
        gp = GaussianProcess(kernel,
                             prior=prior,
                             rng=rng,
                             normalize_output=True,
                             normalize_input=True,
                             lower=lower,
                             upper=upper)
    elif model == "gp_mcmc":
        gp = GaussianProcessMCMC(kernel,
                                 prior=prior,
                                 n_hypers=n_hypers,
                                 chain_length=200,
                                 burnin_steps=100,
                                 normalize_input=True,
                                 normalize_output=True,
                                 rng=rng,
                                 lower=lower,
                                 upper=upper)
    else:
        print("ERROR: %s is not a valid model!" % model)
        return

    if acquisition_func == "ei":
        a = EI(gp)
    elif acquisition_func == "log_ei":
        a = LogEI(gp)
    elif acquisition_func == "pi":
        a = PI(gp)
    elif acquisition_func == "lcb":
        a = LCB(gp)
    else:
        print("ERROR: %s is not a valid acquisition function!" %
              acquisition_func)
        return

    if model == "gp":
        acquisition_func = a
    elif model == "gp_mcmc":
        acquisition_func = MarginalizationGPMCMC(a)

    if maximizer == "cmaes":
        max_func = CMAES(acquisition_func,
                         lower,
                         upper,
                         verbose=False,
                         rng=rng)
    elif maximizer == "direct":
        max_func = Direct(acquisition_func, lower, upper, verbose=False)
    else:
        print(
            "ERROR: %s is not a valid function to maximize the acquisition function!"
            % maximizer)
        return

    bo = BayesianOptimization(objective_function,
                              lower,
                              upper,
                              acquisition_func,
                              gp,
                              max_func,
                              initial_points=n_init,
                              rng=rng)

    x_best, f_min = bo.run(num_iterations)

    results = dict()
    results["x_opt"] = x_best
    results["f_opt"] = f_min
    results["incumbents"] = [inc for inc in bo.incumbents]
    results["incumbent_values"] = [val for val in bo.incumbents_values]
    results["runtime"] = bo.runtime
    results["overhead"] = bo.time_overhead
    return results
コード例 #16
0
ファイル: robo.py プロジェクト: Delaunay/mlbaselines
def build_optimizer(model,
                    maximizer="random",
                    acquisition_func="log_ei",
                    maximizer_seed=1):
    """
    General interface for Bayesian optimization for global black box
    optimization problems.

    Parameters
    ----------
    maximizer: {"random", "scipy", "differential_evolution"}
        The optimizer for the acquisition function.
    acquisition_func: {"ei", "log_ei", "lcb", "pi"}
        The acquisition function
    maximizer_seed: int
        Seed for random number generator of the acquisition function maximizer

    Returns
    -------
        Optimizer
    """

    if acquisition_func == "ei":
        a = EI(model)
    elif acquisition_func == "log_ei":
        a = LogEI(model)
    elif acquisition_func == "pi":
        a = PI(model)
    elif acquisition_func == "lcb":
        a = LCB(model)
    else:
        raise ValueError("'{}' is not a valid acquisition function".format(
            acquisition_func))

    if isinstance(model, GaussianProcessMCMC):
        acquisition_func = MarginalizationGPMCMC(a)
    else:
        acquisition_func = a

    maximizer_rng = numpy.random.RandomState(maximizer_seed)
    if maximizer == "random":
        max_func = RandomSampling(acquisition_func,
                                  model.lower,
                                  model.upper,
                                  rng=maximizer_rng)
    elif maximizer == "scipy":
        max_func = SciPyOptimizer(acquisition_func,
                                  model.lower,
                                  model.upper,
                                  rng=maximizer_rng)
    elif maximizer == "differential_evolution":
        max_func = DifferentialEvolution(acquisition_func,
                                         model.lower,
                                         model.upper,
                                         rng=maximizer_rng)
    else:
        raise ValueError("'{}' is not a valid function to maximize the "
                         "acquisition function".format(maximizer))

    # NOTE: Internal RNG of BO won't be used.
    # NOTE: Nb of initial points won't be used within BO, but rather outside
    bo = BayesianOptimization(lambda: None,
                              model.lower,
                              model.upper,
                              acquisition_func,
                              model,
                              max_func,
                              initial_points=None,
                              rng=None,
                              initial_design=init_latin_hypercube_sampling,
                              output_path=None)

    return bo