Esempio n. 1
0
def test_convert_config():
    space = build_searchspace(space_with_condition)
    cspace = sacred_space_to_configspace(space)

    config = space.sample()
    cs_config = sacred_config_to_configspace(cspace, config)
    assert config == cs_config.get_dictionary()
    config_convert_back = configspace_config_to_sacred(cs_config)
    assert config == config_convert_back
Esempio n. 2
0
    def suggest_configuration(self):
        if self.X is None and self.y is None:
            next_config = self.config_space.sample_configuration()

        else:
            l = list(self.solver.solver.choose_next(self.X, self.y[:, None], incumbent_value=np.min(self.y)))
            next_config = l[0]

        result = configspace_config_to_sacred(next_config)

        return result
Esempio n. 3
0
    def suggest_configuration(self):
        if self.X is None and self.Y is None:
            new_x = init_random_uniform(self.X_lower,
                                        self.X_upper,
                                        N=1,
                                        rng=self.rng)

        elif self.X.shape[0] == 1:
            # We need at least 2 data points to train a GP
            Xopt = init_random_uniform(self.X_lower,
                                       self.X_upper,
                                       N=1,
                                       rng=self.rng)

        else:
            prior = DNGOPrior()
            model = DNGO(batch_size=100,
                         num_epochs=20000,
                         learning_rate=0.1,
                         momentum=0.9,
                         l2=1e-16,
                         adapt_epoch=5000,
                         n_hypers=20,
                         prior=prior,
                         do_optimize=True,
                         do_mcmc=True)

            #acquisition_func = EI(model, task.X_lower, task.X_upper)
            lo = np.ones([model.n_units_3]) * -1
            up = np.ones([model.n_units_3])
            ei = LogEI(model, lo, up)

            acquisition_func = IntegratedAcquisition(model, ei, self.X_lower,
                                                     self.X_upper)

            maximizer = Direct(acquisition_func, self.X_lower, self.X_upper)

            model.train(self.X, self.Y)

            acquisition_func.update(model)

            new_x = maximizer.maximize()

        # Map from [0, 1]^D space back to original space
        next_config = Configuration(self.config_space, vector=new_x[0, :])

        # Transform to sacred configuration
        result = configspace_config_to_sacred(next_config)

        return result
    def suggest_configuration(self):
        if self.X is None and self.y is None:
            new_x = init_random_uniform(self.lower, self.upper,
                                        n_points=1, rng=self.rng)[0, :]

        elif self.X.shape[0] == 1:
            # We need at least 2 data points to train a GP
            new_x = init_random_uniform(self.lower, self.upper,
                                        n_points=1, rng=self.rng)[0, :]

        else:
            cov_amp = 1
            n_dims = self.lower.shape[0]

            initial_ls = np.ones([n_dims])
            exp_kernel = george.kernels.Matern52Kernel(initial_ls,
                                                       ndim=n_dims)
            kernel = cov_amp * exp_kernel

            prior = DefaultPrior(len(kernel) + 1)

            model = GaussianProcessMCMC(kernel, prior=prior,
                                        n_hypers=self.n_hypers,
                                        chain_length=self.chain_length,
                                        burnin_steps=self.burnin,
                                        normalize_input=False,
                                        normalize_output=True,
                                        rng=self.rng,
                                        lower=self.lower,
                                        upper=self.upper)

            a = LogEI(model)

            acquisition_func = MarginalizationGPMCMC(a)

            max_func = Direct(acquisition_func, self.lower, self.upper, verbose=False)

            model.train(self.X, self.y)

            acquisition_func.update(model)

            new_x = max_func.maximize()

        next_config = Configuration(self.config_space, vector=new_x)

        # Transform to sacred configuration
        result = configspace_config_to_sacred(next_config)

        return result
Esempio n. 5
0
    def suggest_configuration(self):

        if self.X is None and self.y is None:
            # No data points yet to train a model, just return a random configuration instead
            new_x = init_random_uniform(self.lower,
                                        self.upper,
                                        n_points=1,
                                        rng=self.rng)[0, :]

        else:
            # Train the model on all finished runs
            self.model.train(self.X, self.y)
            self.acquisition_func.update(self.model)

            # Maximize the acquisition function
            new_x = self.maximizer.maximize()

        # Maps from [0, 1]^D space back to original space
        next_config = Configuration(self.config_space, vector=new_x)

        # Transform to sacred configuration
        result = configspace_config_to_sacred(next_config)

        return result