def test_non_integer_batch_size(mock_model):
    space = ParameterSpace([ContinuousParameter('x', 0, 3)])

    # Make model
    with pytest.raises(ValueError):
        ExperimentalDesignLoop(space, mock_model, batch_size=3.5)
예제 #2
0
def space_2d():
    p1 = ContinuousParameter('c', 1.0, 5.0)
    p2 = ContinuousParameter('d', 1.0, 6.0)

    return ParameterSpace([p1, p2])
예제 #3
0
def meta_svm(fname_objective: str,
             fname_cost: str,
             noise: bool = True) -> Tuple[UserFunctionWrapper, ParameterSpace]:
    """
    Interface to the Meta-SVM benchmark which imitates the hyperparameter optimization of a
    support vector machine on OpenML like classification datasets.
    Offline generated function samples can be download here:

    http://www.ml4aad.org/wp-content/uploads/2019/05/profet_data.tar.gz

    NOTE: make sure that the index for the objective function and the cost function match,
    e.g for sample_objective_i.pkl and sample_cost_i.pkl the index i should be the same.
    Each of those files contain the parameters of a neural network which represents a single function sample and
    some additional information, such as the mean and variance of the input normalization.

    For further information about Profet and the generated meta-surrogate benchmarks see:

    Meta-Surrogate Benchmarking for Hyperparameter Optimization
    A. Klein and Z. Dai and F. Hutter and N. Lawrence and J. Gonzalez
    arXiv:1905.12982 [cs.LG] (2019)

    :param fname_objective: filename for the objective function
    :param fname_cost: filename for the cost function
    :param noise: determines whether to add noise on the function value or not
    :return: Tuple of user function object and parameter space
    """
    parameter_space = ParameterSpace([
        ContinuousParameter(
            "log_C", 0,
            1),  # scaled to [0, 1], original space was in [-10, 10]
        ContinuousParameter("log_gamma", 0, 1),
    ])  # scaled to [0, 1] original space was in [-10, 10]
    data = pickle.load(open(fname_objective, "rb"))

    x_mean_objective = data["x_mean"]
    x_std_objective = data["x_std"]
    task_feature_objective = data["task_feature"]
    objective = get_default_architecture(x_mean_objective.shape[0],
                                         classification=True).float()
    objective.load_state_dict(data["state_dict"])

    data = pickle.load(open(fname_cost, "rb"))

    x_mean_cost = data["x_mean"]
    x_std_cost = data["x_std"]
    y_mean_cost = data["y_mean"]
    y_std_cost = data["y_std"]
    task_feature_cost = data["task_feature"]
    cost = get_default_architecture(x_mean_cost.shape[0]).float()
    cost.load_state_dict(data["state_dict"])

    f = partial(
        objective_function,
        model_objective=objective,
        model_cost=cost,
        task_feature_objective=task_feature_objective,
        task_feature_cost=task_feature_cost,
        x_mean_objective=x_mean_objective,
        x_std_objective=x_std_objective,
        x_mean_cost=x_mean_cost,
        x_std_cost=x_std_cost,
        y_mean_objective=None,
        y_std_objective=None,
        y_mean_cost=y_mean_cost,
        y_std_cost=y_std_cost,
        log_objective=False,
        with_noise=noise,
    )

    return f, parameter_space
예제 #4
0
def space():
    return ParameterSpace(
        [ContinuousParameter('x1', 0, 15),
         ContinuousParameter('x2', 0, 15)])
예제 #5
0
def multi_source_entropy_search_acquisition(gpy_model):
    space = ParameterSpace([ContinuousParameter('x1', 0, 1), InformationSourceParameter(2)])
    return MultiInformationSourceEntropySearch(gpy_model, space, num_representer_points=10)
예제 #6
0
    b = SinOne()
elif args.benchmark == "sintwo":
    b = SinTwo()

obj = Wrapper(b)

f_opt = b.get_meta_information()["f_opt"]

cs = b.get_configuration_space()

list_params = []

for h in cs.get_hyperparameters():
    list_params.append(ContinuousParameter(h.name, h.lower, h.upper))

space = ParameterSpace(list_params)

init_design = RandomDesign(space)
X_init = init_design.get_samples(2)
Y_init = np.array([b.objective_function(xi)["function_value"] for xi in X_init])[:, None]


if args.model_type == "bnn":
    model = Bohamiann(X_init=X_init, Y_init=Y_init, verbose=True)

elif args.model_type == "rf":
    model = RandomForest(X_init=X_init, Y_init=Y_init)
    with_gradients = False

elif args.model_type == "dngo":
    model = DNGO(X_init=X_init, Y_init=Y_init)
예제 #7
0
def space():
    return ParameterSpace([ContinuousParameter("x", -1, 1), ContinuousParameter("y", -1, 1)])
예제 #8
0
    def __init__(self,
                 space: ParameterSpace,
                 X_init: np.ndarray,
                 Y_init: np.ndarray,
                 cost_init: np.ndarray,
                 s_min: float,
                 s_max: float,
                 update_interval: int = 1,
                 num_eval_points: int = 2000,
                 marginalize_hypers: bool = True):
        """
        Implements FAst Bayesian Optimization for LArge DataSets as described in:

        Fast Bayesian hyperparameter optimization on large datasets
        A. Klein and S. Falkner and S. Bartels and P. Hennig and F. Hutter
        Electronic Journal of Statistics (2017)

        :param space: input space where the optimization is carried out.
        :param X_init: initial data points
        :param Y_init: initial function values
        :param cost_init: initial costs
        :param s_min: smallest possible dataset size
        :param s_max: highest possible dataset size
        :param update_interval:  number of iterations between optimization of model hyper-parameters. Defaults to 1.
        :param num_eval_points: number of points to evaluate the acquisition function
        :param marginalize_hypers: if true, marginalize over the GP hyperparameters
        """

        l = space.parameters
        l.extend([ContinuousParameter("s", np.log(s_min), np.log(s_max))
                  ])  # optimize s on a log scale
        extended_space = ParameterSpace(l)

        model_objective = FabolasModel(X_init=X_init,
                                       Y_init=Y_init,
                                       s_min=s_min,
                                       s_max=s_max)
        model_cost = FabolasModel(X_init=X_init,
                                  Y_init=cost_init[:, None],
                                  s_min=s_min,
                                  s_max=s_max)

        if marginalize_hypers:
            acquisition_generator = lambda model: ContinuousFidelityEntropySearch(
                model_objective,
                space=extended_space,
                target_fidelity_index=len(extended_space.parameters) - 1)
            entropy_search = IntegratedHyperParameterAcquisition(
                model_objective, acquisition_generator)
        else:
            entropy_search = ContinuousFidelityEntropySearch(
                model_objective,
                space=extended_space,
                target_fidelity_index=len(extended_space.parameters) - 1)

        acquisition = acquisition_per_expected_cost(entropy_search, model_cost)

        model_updater_objective = FixedIntervalUpdater(model_objective,
                                                       update_interval)
        model_updater_cost = FixedIntervalUpdater(model_cost, update_interval,
                                                  lambda state: state.cost)

        acquisition_optimizer = RandomSearchAcquisitionOptimizer(
            extended_space, num_eval_points=num_eval_points)
        candidate_point_calculator = SequentialPointCalculator(
            acquisition, acquisition_optimizer)

        loop_state = create_loop_state(model_objective.X, model_objective.Y,
                                       model_cost.Y)

        super(CostSensitiveBayesianOptimizationLoop,
              self).__init__(candidate_point_calculator,
                             [model_updater_objective, model_updater_cost],
                             loop_state)
예제 #9
0
    return user_sample


# 1. user sample into a data vector
user_sample_vector = process_user_sample("audio_samples/synth_test.wav")
# print("user sample", user_sample_vector)

# 2. ranges of the synth parameters
syn1 = syn2 = syn3 = syn4 = syn5 = np.arange(158)
syn6 = np.arange(6000)
syn7 = np.arange(1000)
syn8 = np.arange(700)

n_samples = 5

parameter_space = ParameterSpace([ContinuousParameter('x1', 0., 157.)])

# parameter_space = ParameterSpace(
#     [ContinuousParameter('x1', 0., 157.), ContinuousParameter('x2', 0., 157.), ContinuousParameter('x3', 0., 157.),
#      ContinuousParameter('x4', 0., 157.), ContinuousParameter('x5', 0., 157.), ContinuousParameter('x6', 0., 5999.),
#      ContinuousParameter('x7', 0., 999.), ContinuousParameter('x8', 0., 699.)])

latin_design = LatinDesign(parameter_space=parameter_space)
X0 = latin_design.get_samples(n_samples)
Y0 = training_function(X0)
#D0 = ((Y0 - target)**2).sum(axis=1)
#plotter = BayesOptPlotter(h_noiseless, target, xmin, xmax, X0=X0, Y0=Y0)

model = GPRegression(X0, Y0)
model_wrapped = GPyModelWrapper(model)
target = user_sample_vector
예제 #10
0
def test_duplicate_parameter_names_fail():
    p1 = ContinuousParameter('c', 1.0, 5.0)
    p2 = ContinuousParameter('c', 1.0, 6.0)

    with pytest.raises(ValueError):
        ParameterSpace([p1, p2])
예제 #11
0
def get_parameter_space():
    return ParameterSpace([i for i in __C.PARAMETERS.values()])
예제 #12
0
파일: ppo.py 프로젝트: Bing-Jing/emukit
            evarr[i] = self.choose_action(s)
        ## state size can not dynamically change
        # s = np.concatenate((mean,variance),axis=1).reshape(-1,2)
        # evarr = self.choose_action(s)
        return evarr[np.newaxis, :]

    @property
    def has_gradients(self) -> bool:
        """Returns that this acquisition has gradients"""
        return False


if __name__ == "__main__":

    env = funcEnv()
    parameter_space = ParameterSpace([ContinuousParameter('x1', 0, 1)])
    num_data_points = 5
    fun = env.reset(upper_bound=1, lower_bound=0)

    ppo = PPO(model=None)
    ##### training
    ppo.buffer_ep = []
    for ep in range(EP_MAX):

        fun = env.reset(upper_bound=1, lower_bound=0)
        # ppo.ppoMax = 0
        ppo.ppoMin = env.maxVal
        ppo.ep_r = 0
        boPPOep_r = []
        # ppo.funCurMax = env.maxVal
        ppo.funCurMin = env.minVal
def multi_source_optimizer():
    mock_acquisition_optimizer = mock.create_autospec(GradientAcquisitionOptimizer)
    mock_acquisition_optimizer.optimize.return_value = (np.array([[0.]]), None)
    space = ParameterSpace([ContinuousParameter('x', 0, 1), InformationSourceParameter(2)])
    return MultiSourceAcquisitionOptimizer(mock_acquisition_optimizer, space)
예제 #14
0
def space_3d_mixed():
    p1 = ContinuousParameter('c', 1.0, 5.0)
    p2 = DiscreteParameter('d', [1, 2, 3])
    p3 = CategoricalParameter('cat', OneHotEncoding(['Maine Coon', 'Siamese']))
    return ParameterSpace([p1, p2, p3])
예제 #15
0
def continuous_space(n_dims):
    params = [ContinuousParameter('x' + str(i), 0, 1) for i in range(n_dims)]
    return ParameterSpace(params)
예제 #16
0
def test_two_information_source_parameters_fail():
    with pytest.raises(ValueError):
        ParameterSpace([InformationSourceParameter(2), InformationSourceParameter(2)])
예제 #17
0
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from tti import simulation
from emukit.test_functions.sensitivity import Ishigami
from emukit.core import ContinuousParameter, ParameterSpace

np.random.seed(10)  # for reproducibility

# Set the domain over which to perform the sensitiviy analysis
probability_domain = (0, 1)

space = ParameterSpace([
    ContinuousParameter('p_under18', *probability_domain),
    ContinuousParameter('compliance', *probability_domain)
])

config_details = {
    0: {
        "name": "p_under18",
        "config": "case_config"
    },
    1: {
        "name": "compliance",
        "config": "policy_config"
    }
}

# Run the simulation a number of times to get some datapoints for the emulator
from emukit.core.initial_designs import RandomDesign