Exemple #1
0
def get_eips_object_callback(
    scenario_dict,
    seed,
    ta,
    ta_kwargs,
    backend,
    metalearning_configurations,
):
    scenario_dict['input_psmac_dirs'] = backend.get_smac_output_glob()
    scenario = Scenario(scenario_dict)
    types, bounds = get_types(scenario.cs, scenario.feature_array)
    model_kwargs = dict(
        target_names=['cost', 'runtime'],
        types=types,
        bounds=bounds,
        instance_features=scenario.feature_array,
        rf_kwargs={
            'seed': 1,
        },
    )
    return SMAC4AC(
        scenario=scenario,
        rng=seed,
        tae_runner=ta,
        tae_runner_kwargs=ta_kwargs,
        runhistory2epm=RunHistory2EPM4EIPS,
        runhistory2epm_kwargs={},
        model=UncorrelatedMultiObjectiveRandomForestWithInstances,
        model_kwargs=model_kwargs,
        acquisition_function=EIPS,
        run_id=seed,
    )
Exemple #2
0
    def testRandomImputation(self):
        rs = numpy.random.RandomState(1)

        for i in range(0, 150, 15):
            # First random imputation sanity check
            num_samples = max(1, i * 10)
            num_feat = max(1, i)
            num_censored = int(num_samples * 0.1)
            X = rs.rand(num_samples, num_feat)
            y = numpy.sin(X[:, 0:1])

            cutoff = max(y) * 0.9
            y[y > cutoff] = cutoff

            # We have some cen data
            cen_X = X[:num_censored, :]
            cen_y = y[:num_censored]
            uncen_X = X[num_censored:, :]
            uncen_y = y[num_censored:]

            cen_y /= 2

            cs = ConfigurationSpace()
            for i in range(num_feat):
                cs.add_hyperparameter(
                    UniformFloatHyperparameter(name="a_%d" % i,
                                               lower=0,
                                               upper=1,
                                               default_value=0.5))

            types, bounds = get_types(cs, None)
            self.model = RandomForestWithInstances(
                configspace=cs,
                types=types,
                bounds=bounds,
                instance_features=None,
                seed=1234567980,
            )
            imputor = rfr_imputator.RFRImputator(rng=rs,
                                                 cutoff=cutoff,
                                                 threshold=cutoff * 10,
                                                 change_threshold=0.01,
                                                 max_iter=5,
                                                 model=self.model)

            imp_y = imputor.impute(censored_X=cen_X,
                                   censored_y=cen_y,
                                   uncensored_X=uncen_X,
                                   uncensored_y=uncen_y)

            if imp_y is None:
                continue

            for idx in range(cen_y.shape[0]):
                self.assertGreater(imp_y[idx], cen_y[idx])
            self.assertTrue(numpy.isfinite(imp_y).all())
Exemple #3
0
 def get_model(self, config_space):
     kernel = self.get_kernel(config_space)
     types, bounds = get_types(config_space, instance_features=None)
     return GaussianProcess(
         configspace=config_space,
         types=types,
         bounds=bounds,
         seed=self._get_random_state().randint(MAXINT),
         kernel=kernel,
         normalize_y=True,
     )
    def setUp(self):
        logging.basicConfig(level=logging.DEBUG)
        self.cs = ConfigurationSpace()
        self.cs.add_hyperparameter(
            CategoricalHyperparameter(name="cat_a_b",
                                      choices=["a", "b"],
                                      default_value="a"))
        self.cs.add_hyperparameter(
            UniformFloatHyperparameter(name="float_0_1",
                                       lower=0,
                                       upper=1,
                                       default_value=0.5))
        self.cs.add_hyperparameter(
            UniformIntegerHyperparameter(name='integer_0_100',
                                         lower=-10,
                                         upper=10,
                                         default_value=0))

        self.rh = runhistory.RunHistory(aggregate_func=average_cost)
        rs = numpy.random.RandomState(1)
        to_count = 0
        cn_count = 0
        for i in range(500):
            config, seed, runtime, status, instance_id = \
                generate_config(cs=self.cs, rs=rs)
            if runtime == 40:
                to_count += 1
            if runtime < 40 and status == StatusType.TIMEOUT:
                cn_count += 1
            self.rh.add(config=config,
                        cost=runtime,
                        time=runtime,
                        status=status,
                        instance_id=instance_id,
                        seed=seed,
                        additional_info=None)
        print("%d TIMEOUTs, %d censored" % (to_count, cn_count))

        self.scen = Scen()
        self.scen.run_obj = "runtime"
        self.scen.overall_obj = "par10"
        self.scen.cutoff = 40

        types, bounds = get_types(self.cs, None)
        self.model = RandomForestWithInstances(
            configspace=self.cs,
            types=types,
            bounds=bounds,
            instance_features=None,
            seed=1234567980,
        )
    def test_with_ordinal(self):
        cs = smac.configspace.ConfigurationSpace()
        _ = cs.add_hyperparameter(
            CategoricalHyperparameter('a', [0, 1], default_value=0))
        _ = cs.add_hyperparameter(
            OrdinalHyperparameter('b', [0, 1], default_value=1))
        _ = cs.add_hyperparameter(
            UniformFloatHyperparameter('c',
                                       lower=0.,
                                       upper=1.,
                                       default_value=1))
        _ = cs.add_hyperparameter(
            UniformIntegerHyperparameter('d',
                                         lower=0,
                                         upper=10,
                                         default_value=1))
        cs.seed(1)

        feat_array = np.array([0, 0, 0]).reshape(1, -1)
        types, bounds = get_types(cs, feat_array)
        model = RandomForestWithInstances(
            configspace=cs,
            types=types,
            bounds=bounds,
            instance_features=feat_array,
            seed=1,
            ratio_features=1.0,
            pca_components=9,
        )
        self.assertEqual(bounds[0][0], 2)
        self.assertTrue(bounds[0][1] is np.nan)
        self.assertEqual(bounds[1][0], 0)
        self.assertEqual(bounds[1][1], 1)
        self.assertEqual(bounds[2][0], 0.)
        self.assertEqual(bounds[2][1], 1.)
        self.assertEqual(bounds[3][0], 0.)
        self.assertEqual(bounds[3][1], 1.)
        X = np.array(
            [[0., 0., 0., 0., 0., 0., 0.], [0., 0., 1., 0., 0., 0., 0.],
             [0., 1., 0., 9., 0., 0., 0.], [0., 1., 1., 4., 0., 0., 0.]],
            dtype=np.float64)
        y = np.array([0, 1, 2, 3], dtype=np.float64)

        X_train = np.vstack((X, X, X, X, X, X, X, X, X, X))
        y_train = np.vstack((y, y, y, y, y, y, y, y, y, y))

        model.train(X_train, y_train.reshape((-1, 1)))
        mean, _ = model.predict(X)
        for idx, m in enumerate(mean):
            self.assertAlmostEqual(y[idx], m, 0.05)
Exemple #6
0
 def get_model(self, cs, instance_features=None):
     if instance_features:
         instance_features = numpy.array(
             [instance_features[key] for key in instance_features])
     types, bounds = get_types(cs, instance_features)
     model = RandomForestWithInstances(
         configspace=cs,
         types=types,
         bounds=bounds,
         instance_features=instance_features,
         seed=1234567980,
         pca_components=7,
     )
     return model
    def test_apply_pca(self):
        cs = self._get_cs(5)
        instance_features = np.array([np.random.rand(10) for _ in range(5)])
        types, bounds = get_types(cs, instance_features)

        def get_X_y(num_samples, num_instance_features):
            X = smac.configspace.convert_configurations_to_array(
                cs.sample_configuration(num_samples))
            if num_instance_features:
                X_inst = np.random.rand(num_samples, num_instance_features)
                X = np.hstack((X, X_inst))
            y = np.random.rand(num_samples)
            return X, y

        with unittest.mock.patch.object(AbstractEPM, '_train'):
            with unittest.mock.patch.object(AbstractEPM,
                                            '_predict') as predict_mock:

                predict_mock.side_effect = lambda x, _: (x, x)

                epm = AbstractEPM(
                    configspace=cs,
                    types=types,
                    bounds=bounds,
                    seed=1,
                    pca_components=7,
                    instance_features=instance_features,
                )

                X, y = get_X_y(5, 10)
                epm.train(X, y)
                self.assertFalse(epm._apply_pca)
                X_test, _ = get_X_y(5, None)
                epm.predict_marginalized_over_instances(X_test)

                # more data points than pca components
                X, y = get_X_y(8, 10)
                epm.train(X, y)
                self.assertTrue(epm._apply_pca)
                X_test, _ = get_X_y(5, None)
                epm.predict_marginalized_over_instances(X_test)

                # and less again - this ensures that the types array inside the epm is reverted
                # and the pca is disabled again
                X, y = get_X_y(5, 10)
                epm.train(X, y)
                self.assertFalse(epm._apply_pca)
                X_test, _ = get_X_y(5, None)
                epm.predict_marginalized_over_instances(X_test)
Exemple #8
0
 def test_get_types(self):
     cs = ConfigurationSpace()
     cs.add_hyperparameter(CategoricalHyperparameter('a', ['a', 'b']))
     cs.add_hyperparameter(UniformFloatHyperparameter('b', 1, 5))
     cs.add_hyperparameter(UniformIntegerHyperparameter('c', 3, 7))
     cs.add_hyperparameter(Constant('d', -5))
     cs.add_hyperparameter(OrdinalHyperparameter('e', ['cold', 'hot']))
     cs.add_hyperparameter(CategoricalHyperparameter('f', ['x', 'y']))
     types, bounds = get_types(cs, None)
     np.testing.assert_array_equal(types, [2, 0, 0, 0, 0, 2])
     self.assertEqual(bounds[0][0], 2)
     self.assertFalse(np.isfinite(bounds[0][1]))
     np.testing.assert_array_equal(bounds[1], [0, 1])
     np.testing.assert_array_equal(bounds[2], [0, 1])
     self.assertEqual(bounds[3][0], 0)
     self.assertFalse(np.isfinite(bounds[3][1]))
     np.testing.assert_array_equal(bounds[4], [0, 1])
     self.assertEqual(bounds[5][0], 2)
     self.assertFalse(np.isfinite(bounds[5][1]))
Exemple #9
0
    def setUp(self):
        unittest.TestCase.setUp(self)

        self.rh = runhistory.RunHistory()
        self.cs = get_config_space()
        self.config1 = Configuration(self.cs,
                                     values={'a': 0, 'b': 100})
        self.config2 = Configuration(self.cs,
                                     values={'a': 100, 'b': 0})
        self.config3 = Configuration(self.cs,
                                     values={'a': 100, 'b': 100})
        self.config4 = Configuration(self.cs,
                                     values={'a': 23, 'b': 23})
        self.config5 = Configuration(self.cs,
                                     values={'a': 5, 'b': 10})
        self.scen = Scenario({'run_obj': 'runtime', 'cutoff_time': 20,
                              'cs': self.cs})
        self.types, self.bounds = get_types(self.cs, None)
        self.scen = Scenario({'run_obj': 'runtime', 'cutoff_time': 20, 'cs': self.cs,
                              'output_dir': ''})
Exemple #10
0
 def test_init_EIPS_as_arguments(self):
     for objective in ['runtime', 'quality']:
         self.scenario.run_obj = objective
         types, bounds = get_types(self.scenario.cs, None)
         umrfwi = UncorrelatedMultiObjectiveRandomForestWithInstances(
             ['cost', 'runtime'],
             self.scenario.cs,
             types,
             bounds,
             seed=1,
             rf_kwargs={'seed': 1},
         )
         eips = EIPS(umrfwi)
         rh2EPM = RunHistory2EPM4EIPS(self.scenario, 2)
         epils = EPILS(self.scenario,
                       model=umrfwi,
                       acquisition_function=eips,
                       runhistory2epm=rh2EPM).solver
         self.assertIs(umrfwi, epils.model)
         self.assertIs(eips, epils.acquisition_func)
         self.assertIs(rh2EPM, epils.rh2EPM)
Exemple #11
0
    def __init__(self,
                 scenario: Scenario,
                 rng: np.random.RandomState = None,
                 method=Method.HYPERBOOST,
                 pca_components=None,
                 **kwargs):

        # Types and bounds required to initialize EPM
        types, bounds = get_types(scenario.cs, scenario.feature_array)

        if method == Method.SCIKIT_OPTIMIZE:

            # Initialize Scikit-optimize's empirical performance model
            model = SkoptEPM(types=types,
                             bounds=bounds,
                             instance_features=scenario.feature_array,
                             seed=rng.randint(MAXINT),
                             pca_components=scenario.PCA_DIM)

            # Pass parameters to SMAC4HPO
            super().__init__(scenario=scenario, rng=rng, model=model, **kwargs)

        elif method == Method.HYPERBOOST:

            # Initialize HyperBoost's empirical performance model
            model = HyperEPM(types=types,
                             bounds=bounds,
                             instance_features=scenario.feature_array,
                             seed=rng.randint(MAXINT),
                             pca_components_=pca_components,
                             configspace=scenario.cs)

            # Pass parameters to SMAC4HPO
            super().__init__(scenario=scenario,
                             rng=rng,
                             model=model,
                             acquisition_function=ScorePlusDistance,
                             **kwargs)
Exemple #12
0
 def test_get_types_with_inactive(self):
     cs = ConfigurationSpace()
     a = cs.add_hyperparameter(CategoricalHyperparameter('a', ['a', 'b']))
     b = cs.add_hyperparameter(UniformFloatHyperparameter('b', 1, 5))
     c = cs.add_hyperparameter(UniformIntegerHyperparameter('c', 3, 7))
     d = cs.add_hyperparameter(Constant('d', -5))
     e = cs.add_hyperparameter(OrdinalHyperparameter('e', ['cold', 'hot']))
     f = cs.add_hyperparameter(CategoricalHyperparameter('f', ['x', 'y']))
     cs.add_condition(EqualsCondition(b, a, 'a'))
     cs.add_condition(EqualsCondition(c, a, 'a'))
     cs.add_condition(EqualsCondition(d, a, 'a'))
     cs.add_condition(EqualsCondition(e, a, 'a'))
     cs.add_condition(EqualsCondition(f, a, 'a'))
     types, bounds = get_types(cs, None)
     np.testing.assert_array_equal(types, [2, 0, 0, 2, 0, 3])
     self.assertEqual(bounds[0][0], 2)
     self.assertFalse(np.isfinite(bounds[0][1]))
     np.testing.assert_array_equal(bounds[1], [-1, 1])
     np.testing.assert_array_equal(bounds[2], [-1, 1])
     self.assertEqual(bounds[3][0], 2)
     self.assertFalse(np.isfinite(bounds[3][1]))
     np.testing.assert_array_equal(bounds[4], [0, 2])
     self.assertEqual(bounds[5][0], 3)
     self.assertFalse(np.isfinite(bounds[5][1]))
Exemple #13
0
    def run(self):
        """
        Implementation of the forward selection loop.
        Uses SMACs EPM (RF) wrt the feature space to minimize the OOB error.

        Returns
        -------
        feature_importance: OrderedDict
            dict_keys (first key -> most important) -> OOB error
        """
        parameters = [p.name for p in self.scenario.cs.get_hyperparameters()]
        self.logger.debug("Parameters: %s", parameters)

        rh2epm = RunHistory2EPM4Cost(scenario=self.scenario,
                                     num_params=len(parameters),
                                     success_states=[
                                         StatusType.SUCCESS, StatusType.CAPPED,
                                         StatusType.CRASHED
                                     ],
                                     impute_censored_data=False,
                                     impute_state=None)

        X, y = rh2epm.transform(self.rh)

        # reduce sample size to speedup computation
        if X.shape[0] > self.MAX_SAMPLES:
            idx = np.random.choice(X.shape[0],
                                   size=self.MAX_SAMPLES,
                                   replace=False)
            X = X[idx, :]
            y = y[idx]

        self.logger.debug(
            "Shape of X: %s, of y: %s, #parameters: %s, #feats: %s", X.shape,
            y.shape, len(parameters), len(self.scenario.feature_names))
        names = copy.deepcopy(self.scenario.feature_names)
        self.logger.debug("Features: %s", names)

        used = list(range(0, len(parameters)))
        feat_ids = {f: i for i, f in enumerate(names, len(used))}
        ids_feat = {i: f for f, i in feat_ids.items()}
        self.logger.debug("Used: %s", used)
        evaluated_feature_importance = OrderedDict()

        types, bounds = get_types(self.scenario.cs,
                                  self.scenario.feature_array)

        last_error = np.inf

        for _round in range(self.to_evaluate):  # Main Loop
            errors = []
            for f in names:
                i = feat_ids[f]
                self.logger.debug('Evaluating %s', f)
                used.append(i)
                self.logger.debug(
                    'Used features: %s',
                    str([ids_feat[j] for j in used[len(parameters):]]))

                start = time.time()
                self._refit_model(types[sorted(used)], bounds, X[:,
                                                                 sorted(used)],
                                  y)  # refit the model every round
                errors.append(self.model.rf.out_of_bag_error())
                used.pop()
                self.logger.debug('Refitted RF (sec %.2f; error: %.4f)' %
                                  (time.time() - start, errors[-1]))
            else:
                self.logger.debug('Evaluating None')
                start = time.time()
                self._refit_model(types[sorted(used)], bounds, X[:,
                                                                 sorted(used)],
                                  y)  # refit the model every round
                errors.append(self.model.rf.out_of_bag_error())
                self.logger.debug('Refitted RF (sec %.2f; error: %.4f)' %
                                  (time.time() - start, errors[-1]))
                if _round == 0:
                    evaluated_feature_importance['None'] = errors[-1]
            best_idx = np.argmin(errors)
            lowest_error = errors[best_idx]

            if best_idx == len(errors) - 1:
                self.logger.info('Best thing to do is add nothing')
                best_feature = 'None'
                # evaluated_feature_importance[best_feature] = lowest_error
                break
            elif lowest_error >= last_error:
                break
            else:
                last_error = lowest_error
                best_feature = names.pop(best_idx)
                used.append(feat_ids[best_feature])

            self.logger.debug('%s: %.4f' % (best_feature, lowest_error))
            evaluated_feature_importance[best_feature] = lowest_error

        self.logger.debug(evaluated_feature_importance)
        self.evaluated_feature_importance = evaluated_feature_importance
        return evaluated_feature_importance
Exemple #14
0
    def __init__(self, api_config, config_space, parallel_setting="LS"):
        super(SMAC4EPMOpimizer, self).__init__(api_config)
        self.cs = config_space
        self.num_hps = len(self.cs.get_hyperparameters())

        if parallel_setting not in ["CL_min", "CL_max", "CL_mean", "KB", "LS"]:
            raise ValueError(
                "parallel_setting can only be one of the following: "
                "CL_min, CL_max, CL_mean, KB, LS")
        self.parallel_setting = parallel_setting

        rng = np.random.RandomState(seed=0)
        scenario = Scenario({
            "run_obj": "quality",  # we optimize quality (alt. to runtime)
            "runcount-limit": 128,
            "cs": self.cs,  # configuration space
            "deterministic": True,
            "limit_resources": False,
        })

        self.stats = Stats(scenario)
        # traj = TrajLogger(output_dir=None, stats=self.stats)

        self.runhistory = RunHistory()

        r2e_def_kwargs = {
            "scenario": scenario,
            "num_params": self.num_hps,
            "success_states": [
                StatusType.SUCCESS,
            ],
            "impute_censored_data": False,
            "scale_perc": 5,
        }

        self.random_chooser = ChooserProb(rng=rng, prob=0.0)

        types, bounds = get_types(self.cs, instance_features=None)
        model_kwargs = {
            "configspace": self.cs,
            "types": types,
            "bounds": bounds,
            "seed": rng.randint(MAXINT),
        }

        models = []

        cov_amp = ConstantKernel(
            2.0,
            constant_value_bounds=(np.exp(-10), np.exp(2)),
            prior=LognormalPrior(mean=0.0, sigma=1.0, rng=rng),
        )

        cont_dims = np.array(np.where(np.array(types) == 0)[0], dtype=np.int)
        cat_dims = np.where(np.array(types) != 0)[0]

        if len(cont_dims) > 0:
            exp_kernel = Matern(
                np.ones([len(cont_dims)]),
                [(np.exp(-6.754111155189306), np.exp(0.0858637988771976))
                 for _ in range(len(cont_dims))],
                nu=2.5,
                operate_on=cont_dims,
            )

        if len(cat_dims) > 0:
            ham_kernel = HammingKernel(
                np.ones([len(cat_dims)]),
                [(np.exp(-6.754111155189306), np.exp(0.0858637988771976))
                 for _ in range(len(cat_dims))],
                operate_on=cat_dims,
            )
        assert len(cont_dims) + len(cat_dims) == len(
            scenario.cs.get_hyperparameters())

        noise_kernel = WhiteKernel(
            noise_level=1e-8,
            noise_level_bounds=(np.exp(-25), np.exp(2)),
            prior=HorseshoePrior(scale=0.1, rng=rng),
        )

        if len(cont_dims) > 0 and len(cat_dims) > 0:
            # both
            kernel = cov_amp * (exp_kernel * ham_kernel) + noise_kernel
        elif len(cont_dims) > 0 and len(cat_dims) == 0:
            # only cont
            kernel = cov_amp * exp_kernel + noise_kernel
        elif len(cont_dims) == 0 and len(cat_dims) > 0:
            # only cont
            kernel = cov_amp * ham_kernel + noise_kernel
        else:
            raise ValueError()
        gp_kwargs = {"kernel": kernel}

        rf_kwargs = {}
        rf_kwargs["num_trees"] = model_kwargs.get("num_trees", 10)
        rf_kwargs["do_bootstrapping"] = model_kwargs.get(
            "do_bootstrapping", True)
        rf_kwargs["ratio_features"] = model_kwargs.get("ratio_features", 1.0)
        rf_kwargs["min_samples_split"] = model_kwargs.get(
            "min_samples_split", 2)
        rf_kwargs["min_samples_leaf"] = model_kwargs.get("min_samples_leaf", 1)
        rf_kwargs["log_y"] = model_kwargs.get("log_y", True)

        rf_log = RandomForestWithInstances(**model_kwargs, **rf_kwargs)

        rf_kwargs = copy.deepcopy(rf_kwargs)
        rf_kwargs["log_y"] = False
        rf_no_log = RandomForestWithInstances(**model_kwargs, **rf_kwargs)

        rh2epm_cost = RunHistory2EPM4Cost(**r2e_def_kwargs)
        rh2epm_log_cost = RunHistory2EPM4LogScaledCost(**r2e_def_kwargs)
        rh2epm_copula = RunHistory2EPM4GaussianCopulaCorrect(**r2e_def_kwargs)

        self.combinations = []

        # 2 models * 4 acquisition functions
        acq_funcs = [EI, PI, LogEI, LCB]
        acq_func_instances = []
        # acq_func_maximizer_instances = []

        n_sls_iterations = {
            1: 10,
            2: 10,
            3: 10,
            4: 10,
            5: 10,
            6: 10,
            7: 8,
            8: 6,
        }.get(len(self.cs.get_hyperparameters()), 5)

        acq_func_maximizer_kwargs = {
            "config_space": self.cs,
            "rng": rng,
            "max_steps": 5,
            "n_steps_plateau_walk": 5,
            "n_sls_iterations": n_sls_iterations,
        }
        self.idx_ei = 0

        self.num_models = len(models)
        self.num_acq_funcs = len(acq_funcs)

        no_transform_gp = GaussianProcess(**copy.deepcopy(model_kwargs),
                                          **copy.deepcopy(gp_kwargs))
        ei = EI(model=no_transform_gp)
        acq_func_maximizer_kwargs["acquisition_function"] = ei
        ei_opt = LocalAndSortedRandomSearch(**acq_func_maximizer_kwargs)
        self.combinations.append((no_transform_gp, ei, ei_opt, rh2epm_cost))

        pi = PI(model=no_transform_gp)
        acq_func_maximizer_kwargs["acquisition_function"] = pi
        pi_opt = LocalAndSortedRandomSearch(**acq_func_maximizer_kwargs)
        self.combinations.append((no_transform_gp, pi, pi_opt, rh2epm_cost))

        lcb = LCB(model=no_transform_gp)
        acq_func_maximizer_kwargs["acquisition_function"] = lcb
        lcb_opt = LocalAndSortedRandomSearch(**acq_func_maximizer_kwargs)
        self.combinations.append((no_transform_gp, lcb, lcb_opt, rh2epm_cost))

        gp = GaussianProcess(**copy.deepcopy(model_kwargs),
                             **copy.deepcopy(gp_kwargs))
        ei = EI(model=gp)
        acq_func_maximizer_kwargs["acquisition_function"] = ei
        ei_opt = LocalAndSortedRandomSearch(**acq_func_maximizer_kwargs)
        self.combinations.append((gp, ei, ei_opt, rh2epm_copula))

        gp = GaussianProcess(**copy.deepcopy(model_kwargs),
                             **copy.deepcopy(gp_kwargs))
        ei = LogEI(model=gp)
        acq_func_maximizer_kwargs["acquisition_function"] = ei
        ei_opt = LocalAndSortedRandomSearch(**acq_func_maximizer_kwargs)
        self.combinations.append((gp, ei, ei_opt, rh2epm_log_cost))

        ei = EI(model=rf_no_log)
        acq_func_maximizer_kwargs["acquisition_function"] = ei
        ei_opt = LocalAndSortedRandomSearch(**acq_func_maximizer_kwargs)
        self.combinations.append((rf_no_log, ei, ei_opt, rh2epm_cost))

        ei = LogEI(model=rf_log)
        acq_func_maximizer_kwargs["acquisition_function"] = ei
        ei_opt = LocalAndSortedRandomSearch(**acq_func_maximizer_kwargs)
        self.combinations.append((rf_log, ei, ei_opt, rh2epm_log_cost))

        ei = EI(model=rf_no_log)
        acq_func_maximizer_kwargs["acquisition_function"] = ei
        ei_opt = LocalAndSortedRandomSearch(**acq_func_maximizer_kwargs)
        self.combinations.append((rf_no_log, ei, ei_opt, rh2epm_copula))

        self.num_acq_instances = len(acq_func_instances)
        self.best_observation = np.inf

        self.next_evaluations = []
Exemple #15
0
    def __init__(self, model_type: str = "gp_mcmc", **kwargs: typing.Any):
        scenario = kwargs["scenario"]

        if len(scenario.cs.get_hyperparameters()) <= 21201:
            kwargs["initial_design"] = kwargs.get("initial_design", SobolDesign)
        else:
            raise ValueError(
                'The default initial design "Sobol sequence" can only handle up to 21201 dimensions. '
                'Please use a different initial design, such as "the Latin Hypercube design".',
            )
        kwargs["runhistory2epm"] = kwargs.get("runhistory2epm", RunHistory2EPM4Cost)

        init_kwargs = kwargs.get("initial_design_kwargs", dict()) or dict()
        init_kwargs["n_configs_x_params"] = init_kwargs.get("n_configs_x_params", 8)
        init_kwargs["max_config_fracs"] = init_kwargs.get("max_config_fracs", 0.25)
        kwargs["initial_design_kwargs"] = init_kwargs

        if kwargs.get("model") is None:

            model_kwargs = kwargs.get("model_kwargs", dict()) or dict()

            _, rng = get_rng(
                rng=kwargs.get("rng", None),
                run_id=kwargs.get("run_id", None),
                logger=None,
            )

            types, bounds = get_types(kwargs["scenario"].cs, instance_features=None)

            cov_amp = ConstantKernel(
                2.0,
                constant_value_bounds=(np.exp(-10), np.exp(2)),
                prior=LognormalPrior(mean=0.0, sigma=1.0, rng=rng),
            )

            cont_dims = np.where(np.array(types) == 0)[0]
            cat_dims = np.where(np.array(types) != 0)[0]

            if len(cont_dims) > 0:
                exp_kernel = Matern(
                    np.ones([len(cont_dims)]),
                    [
                        (np.exp(-6.754111155189306), np.exp(0.0858637988771976))
                        for _ in range(len(cont_dims))
                    ],
                    nu=2.5,
                    operate_on=cont_dims,
                )

            if len(cat_dims) > 0:
                ham_kernel = HammingKernel(
                    np.ones([len(cat_dims)]),
                    [
                        (np.exp(-6.754111155189306), np.exp(0.0858637988771976))
                        for _ in range(len(cat_dims))
                    ],
                    operate_on=cat_dims,
                )

            assert (len(cont_dims) + len(cat_dims)) == len(
                scenario.cs.get_hyperparameters()
            )

            noise_kernel = WhiteKernel(
                noise_level=1e-8,
                noise_level_bounds=(np.exp(-25), np.exp(2)),
                prior=HorseshoePrior(scale=0.1, rng=rng),
            )

            if len(cont_dims) > 0 and len(cat_dims) > 0:
                # both
                kernel = cov_amp * (exp_kernel * ham_kernel) + noise_kernel
            elif len(cont_dims) > 0 and len(cat_dims) == 0:
                # only cont
                kernel = cov_amp * exp_kernel + noise_kernel
            elif len(cont_dims) == 0 and len(cat_dims) > 0:
                # only cont
                kernel = cov_amp * ham_kernel + noise_kernel
            else:
                raise ValueError()

            if model_type == "gp":
                model_class = GaussianProcess  # type: typing.Type[BaseModel]
                kwargs["model"] = model_class
                model_kwargs["kernel"] = kernel
                model_kwargs["normalize_y"] = True
                model_kwargs["seed"] = rng.randint(0, 2**20)
            elif model_type == "gp_mcmc":
                model_class = GaussianProcessMCMC
                kwargs["model"] = model_class
                kwargs["integrate_acquisition_function"] = True

                model_kwargs["kernel"] = kernel

                n_mcmc_walkers = 3 * len(kernel.theta)
                if n_mcmc_walkers % 2 == 1:
                    n_mcmc_walkers += 1
                model_kwargs["n_mcmc_walkers"] = n_mcmc_walkers
                model_kwargs["chain_length"] = 250
                model_kwargs["burnin_steps"] = 250
                model_kwargs["normalize_y"] = True
                model_kwargs["seed"] = rng.randint(0, 2**20)
            else:
                raise ValueError("Unknown model type %s" % model_type)
            kwargs["model_kwargs"] = model_kwargs

        if kwargs.get("random_configuration_chooser") is None:
            random_config_chooser_kwargs = (
                kwargs.get(
                    "random_configuration_chooser_kwargs",
                    dict(),
                )
                or dict()
            )
            random_config_chooser_kwargs["prob"] = random_config_chooser_kwargs.get(
                "prob", 0.08447232371720552
            )
            kwargs["random_configuration_chooser_kwargs"] = random_config_chooser_kwargs

        if kwargs.get("acquisition_function_optimizer") is None:
            acquisition_function_optimizer_kwargs = (
                kwargs.get(
                    "acquisition_function_optimizer_kwargs",
                    dict(),
                )
                or dict()
            )
            acquisition_function_optimizer_kwargs["n_sls_iterations"] = 10
            kwargs[
                "acquisition_function_optimizer_kwargs"
            ] = acquisition_function_optimizer_kwargs

        # only 1 configuration per SMBO iteration
        intensifier_kwargs = kwargs.get("intensifier_kwargs", dict()) or dict()
        intensifier_kwargs["min_chall"] = 1
        kwargs["intensifier_kwargs"] = intensifier_kwargs
        scenario.intensification_percentage = 1e-10

        super().__init__(**kwargs)

        if self.solver.scenario.n_features > 0:
            raise NotImplementedError("BOGP cannot handle instances")

        self.logger.info(self.__class__)

        self.solver.scenario.acq_opt_challengers = 1000  # type: ignore[attr-defined] # noqa F821
        # activate predict incumbent
        self.solver.epm_chooser.predict_x_best = True
Exemple #16
0
def convert_data_for_epm(scenario: Scenario,
                         runhistory: RunHistory,
                         impute_inactive_parameters=False,
                         rng=None,
                         logger=None):
    """
    converts data from runhistory into EPM format

    Parameters
    ----------
    scenario: Scenario
        smac.scenario.scenario.Scenario Object
    runhistory: RunHistory
        smac.runhistory.runhistory.RunHistory Object with all necessary data
    impute_inactive_parameters: bool
        whether to impute all inactive parameters in all configurations - this is needed for random forests, as they do not accept nan-values

    Returns
    -------
    X: np.array
        X matrix with configuartion x features for all observed samples
    y: np.array
        y matrix with all observations
    types: np.array
        types of X cols -- necessary to train our RF implementation
    """
    if rng is None:
        rng = np.random.RandomState(42)

    if impute_inactive_parameters:
        runhistory = force_finite_runhistory(runhistory)

    types, bounds = get_types(scenario.cs, scenario.feature_array)
    if logger is not None:
        logger.debug("Types: " + str(types) + ", Bounds: " + str(bounds))
    model = RandomForestWithInstances(scenario.cs, types, bounds,
                                      rng.randint(MAXINT))

    params = scenario.cs.get_hyperparameters()
    num_params = len(params)

    run_obj = scenario.run_obj

    if run_obj == "runtime":
        # if we log the performance data,
        # the RFRImputator will already get
        # log transform data from the runhistory
        cutoff = np.log10(scenario.cutoff)
        threshold = np.log10(scenario.cutoff * scenario.par_factor)

        imputor = RFRImputator(rng=rng,
                               cutoff=cutoff,
                               threshold=threshold,
                               model=model,
                               change_threshold=0.01,
                               max_iter=10)
        # TODO: Adapt runhistory2EPM object based on scenario
        rh2EPM = RunHistory2EPM4LogCost(scenario=scenario,
                                        num_params=num_params,
                                        success_states=[
                                            StatusType.SUCCESS,
                                        ],
                                        impute_censored_data=True,
                                        impute_state=[
                                            StatusType.TIMEOUT,
                                        ],
                                        imputor=imputor)
        X, Y = rh2EPM.transform(runhistory)
    else:
        rh2EPM = RunHistory2EPM4Cost(scenario=scenario,
                                     num_params=num_params,
                                     success_states=[
                                         StatusType.SUCCESS,
                                     ],
                                     impute_censored_data=False,
                                     impute_state=None)
        X, Y = rh2EPM.transform(runhistory)

    return X, Y, types
Exemple #17
0
    def validate_epm(
        self,
        config_mode: Union[str, typing.List[Configuration]] = 'def',
        instance_mode: Union[str, typing.List[str]] = 'test',
        repetitions: int = 1,
        runhistory: typing.Optional[RunHistory] = None,
        output_fn: typing.Optional[str] = None,
        reuse_epm: bool = True,
    ) -> RunHistory:
        """
        Use EPM to predict costs/runtimes for unknown config/inst-pairs.

        side effect: if output is specified, saves runhistory to specified
        output directory.

        Parameters
        ----------
        output_fn: str
            path to runhistory to be saved. if the suffix is not '.json', will
            be interpreted as directory and filename will be
            'validated_runhistory_EPM.json'
        config_mode: str or list<Configuration>
            string or directly a list of Configuration, string from [def, inc, def+inc, wallclock_time, cpu_time, all].
            time evaluates at cpu- or wallclock-timesteps of:
            [max_time/2^0, max_time/2^1, max_time/2^3, ..., default] with max_time being the highest recorded time
        instance_mode: str or list<str>
            what instances to use for validation, either from
            [train, test, train+test] or directly a list of instances
        repetitions: int
            number of repetitions in nondeterministic algorithms
        runhistory: RunHistory
            optional, RunHistory-object to reuse runs
        reuse_epm: bool
            if true (and if `self.epm`), reuse epm to validate runs

        Returns
        -------
        runhistory: RunHistory
            runhistory with predicted runs
        """
        if not isinstance(runhistory, RunHistory) and (self.epm is None
                                                       or not reuse_epm):
            raise ValueError(
                "No runhistory specified for validating with EPM!")
        elif not reuse_epm or self.epm is None:
            # Create RandomForest
            types, bounds = get_types(
                self.scen.cs, self.scen.feature_array
            )  # type: ignore[attr-defined] # noqa F821
            epm = RandomForestWithInstances(
                configspace=self.scen.
                cs,  # type: ignore[attr-defined] # noqa F821
                types=types,
                bounds=bounds,
                instance_features=self.scen.feature_array,
                seed=self.rng.randint(MAXINT),
                ratio_features=1.0,
            )
            # Use imputor if objective is runtime
            imputor = None
            impute_state = None
            impute_censored_data = False
            if self.scen.run_obj == 'runtime':
                threshold = self.scen.cutoff * self.scen.par_factor  # type: ignore[attr-defined] # noqa F821
                imputor = RFRImputator(
                    rng=self.rng,
                    cutoff=self.scen.
                    cutoff,  # type: ignore[attr-defined] # noqa F821
                    threshold=threshold,
                    model=epm)
                impute_censored_data = True
                impute_state = [StatusType.CAPPED]
                success_states = [
                    StatusType.SUCCESS,
                ]
            else:
                success_states = [
                    StatusType.SUCCESS, StatusType.CRASHED, StatusType.MEMOUT
                ]

            # Transform training data (from given rh)
            rh2epm = RunHistory2EPM4Cost(
                num_params=len(self.scen.cs.get_hyperparameters()
                               ),  # type: ignore[attr-defined] # noqa F821
                scenario=self.scen,
                rng=self.rng,
                impute_censored_data=impute_censored_data,
                imputor=imputor,
                impute_state=impute_state,
                success_states=success_states)
            assert runhistory is not None  # please mypy
            X, y = rh2epm.transform(runhistory)
            self.logger.debug("Training model with data of shape X: %s, y:%s",
                              str(X.shape), str(y.shape))
            # Train random forest
            epm.train(X, y)
        else:
            epm = typing.cast(RandomForestWithInstances, self.epm)

        # Predict desired runs
        runs, rh_epm = self._get_runs(config_mode, instance_mode, repetitions,
                                      runhistory)

        feature_array_size = len(self.scen.cs.get_hyperparameters()
                                 )  # type: ignore[attr-defined] # noqa F821
        if self.scen.feature_array is not None:
            feature_array_size += self.scen.feature_array.shape[1]

        X_pred = np.empty((len(runs), feature_array_size))
        for idx, run in enumerate(runs):
            if self.scen.feature_array is not None and run.inst is not None:
                X_pred[idx] = np.hstack([
                    convert_configurations_to_array([run.config])[0],
                    self.scen.feature_dict[run.inst]
                ])
            else:
                X_pred[idx] = convert_configurations_to_array([run.config])[0]
        self.logger.debug("Predicting desired %d runs, data has shape %s",
                          len(runs), str(X_pred.shape))

        y_pred = epm.predict(X_pred)
        self.epm = epm

        # Add runs to runhistory
        for run, pred in zip(runs, y_pred[0]):
            rh_epm.add(
                config=run.config,
                cost=float(pred),
                time=float(pred),
                status=StatusType.SUCCESS,
                instance_id=run.inst,
                seed=-1,
                additional_info={"additional_info": "ESTIMATED USING EPM!"})

        if output_fn:
            self._save_results(rh_epm,
                               output_fn,
                               backup_fn="validated_runhistory_EPM.json")
        return rh_epm
Exemple #18
0
    def __init__(
        self,
        scenario: Scenario,
        tae_runner: Optional[Union[Type[BaseRunner], Callable]] = None,
        tae_runner_kwargs: Optional[Dict] = None,
        runhistory: Optional[Union[Type[RunHistory], RunHistory]] = None,
        runhistory_kwargs: Optional[Dict] = None,
        intensifier: Optional[Type[AbstractRacer]] = None,
        intensifier_kwargs: Optional[Dict] = None,
        acquisition_function: Optional[
            Type[AbstractAcquisitionFunction]] = None,
        acquisition_function_kwargs: Optional[Dict] = None,
        integrate_acquisition_function: bool = False,
        acquisition_function_optimizer: Optional[
            Type[AcquisitionFunctionMaximizer]] = None,
        acquisition_function_optimizer_kwargs: Optional[Dict] = None,
        model: Optional[Type[AbstractEPM]] = None,
        model_kwargs: Optional[Dict] = None,
        runhistory2epm: Optional[Type[AbstractRunHistory2EPM]] = None,
        runhistory2epm_kwargs: Optional[Dict] = None,
        initial_design: Optional[Type[InitialDesign]] = None,
        initial_design_kwargs: Optional[Dict] = None,
        initial_configurations: Optional[List[Configuration]] = None,
        stats: Optional[Stats] = None,
        restore_incumbent: Optional[Configuration] = None,
        rng: Optional[Union[np.random.RandomState, int]] = None,
        smbo_class: Optional[Type[SMBO]] = None,
        run_id: Optional[int] = None,
        random_configuration_chooser: Optional[
            Type[RandomConfigurationChooser]] = None,
        random_configuration_chooser_kwargs: Optional[Dict] = None,
        dask_client: Optional[dask.distributed.Client] = None,
        n_jobs: Optional[int] = 1,
    ):
        """
        Constructor

        Parameters
        ----------
        scenario : ~smac.scenario.scenario.Scenario
            Scenario object
        tae_runner : ~smac.tae.base.BaseRunner or callable
            Callable or implementation of
            :class:`~smac.tae.base.BaseRunner`. In case a
            callable is passed it will be wrapped by
            :class:`~smac.tae.execute_func.ExecuteTAFuncDict`.
            If not set, it will be initialized with the
            :class:`~smac.tae.execute_ta_run_old.ExecuteTARunOld`.
        tae_runner_kwargs: Optional[Dict]
            arguments passed to constructor of '~tae_runner'
        runhistory : RunHistory
            runhistory to store all algorithm runs
        runhistory_kwargs : Optional[Dict]
            arguments passed to constructor of runhistory.
            We strongly advise against changing the aggregation function,
            since it will break some code assumptions
        intensifier : Intensifier
            intensification object to issue a racing to decide the current
            incumbent
        intensifier_kwargs: Optional[Dict]
            arguments passed to the constructor of '~intensifier'
        acquisition_function : ~smac.optimizer.acquisition.AbstractAcquisitionFunction
            Class or object that implements the :class:`~smac.optimizer.acquisition.AbstractAcquisitionFunction`.
            Will use :class:`~smac.optimizer.acquisition.EI` or :class:`~smac.optimizer.acquisition.LogEI` if not set.
            `~acquisition_function_kwargs` is passed to the class constructor.
        acquisition_function_kwargs : Optional[Dict]
            dictionary to pass specific arguments to ~acquisition_function
        integrate_acquisition_function : bool, default=False
            Whether to integrate the acquisition function. Works only with models which can sample their
            hyperparameters (i.e. GaussianProcessMCMC).
        acquisition_function_optimizer : ~smac.optimizer.ei_optimization.AcquisitionFunctionMaximizer
            Object that implements the :class:`~smac.optimizer.ei_optimization.AcquisitionFunctionMaximizer`.
            Will use :class:`smac.optimizer.ei_optimization.LocalAndSortedRandomSearch` if not set.
        acquisition_function_optimizer_kwargs: Optional[Dict]
            Arguments passed to constructor of '~acquisition_function_optimizer'
        model : AbstractEPM
            Model that implements train() and predict(). Will use a
            :class:`~smac.epm.rf_with_instances.RandomForestWithInstances` if not set.
        model_kwargs : Optional[Dict]
            Arguments passed to constructor of '~model'
        runhistory2epm : ~smac.runhistory.runhistory2epm.RunHistory2EMP
            Object that implements the AbstractRunHistory2EPM. If None,
            will use :class:`~smac.runhistory.runhistory2epm.RunHistory2EPM4Cost`
            if objective is cost or
            :class:`~smac.runhistory.runhistory2epm.RunHistory2EPM4LogCost`
            if objective is runtime.
        runhistory2epm_kwargs: Optional[Dict]
            Arguments passed to the constructor of '~runhistory2epm'
        initial_design : InitialDesign
            initial sampling design
        initial_design_kwargs: Optional[Dict]
            arguments passed to constructor of `~initial_design'
        initial_configurations : List[Configuration]
            list of initial configurations for initial design --
            cannot be used together with initial_design
        stats : Stats
            optional stats object
        rng : np.random.RandomState
            Random number generator
        restore_incumbent : Configuration
            incumbent used if restoring to previous state
        smbo_class : ~smac.optimizer.smbo.SMBO
            Class implementing the SMBO interface which will be used to
            instantiate the optimizer class.
        run_id : int (optional)
            Run ID will be used as subfolder for output_dir. If no ``run_id`` is given, a random ``run_id`` will be
            chosen.
        random_configuration_chooser : ~smac.optimizer.random_configuration_chooser.RandomConfigurationChooser
            How often to choose a random configuration during the intensification procedure.
        random_configuration_chooser_kwargs : Optional[Dict]
            arguments of constructor for '~random_configuration_chooser'
        dask_client : dask.distributed.Client
            User-created dask client, can be used to start a dask cluster and then attach SMAC to it.
        n_jobs : int, optional
            Number of jobs. If > 1 or -1, this creates a dask client if ``dask_client`` is ``None``. Will
            be ignored if ``dask_client`` is not ``None``.
            If ``None``, this value will be set to 1, if ``-1``, this will be set to the number of cpu cores.
        """
        self.logger = logging.getLogger(self.__module__ + "." +
                                        self.__class__.__name__)

        self.scenario = scenario
        self.output_dir = ""
        if not restore_incumbent:
            # restore_incumbent is used by the CLI interface which provides a method for restoring a SMAC run given an
            # output directory. This is the default path.
            # initial random number generator
            run_id, rng = get_rng(rng=rng, run_id=run_id, logger=self.logger)
            self.output_dir = create_output_directory(scenario, run_id)
        elif scenario.output_dir is not None:  # type: ignore[attr-defined] # noqa F821
            run_id, rng = get_rng(rng=rng, run_id=run_id, logger=self.logger)
            # output-directory is created in CLI when restoring from a
            # folder. calling the function again in the facade results in two
            # folders being created: run_X and run_X.OLD. if we are
            # restoring, the output-folder exists already and we omit creating it,
            # but set the self-output_dir to the dir.
            # necessary because we want to write traj to new output-dir in CLI.
            self.output_dir = cast(str, scenario.output_dir_for_this_run
                                   )  # type: ignore[attr-defined] # noqa F821
        rng = cast(np.random.RandomState, rng)

        if (scenario.deterministic is
                True  # type: ignore[attr-defined] # noqa F821
                and getattr(scenario, 'tuner_timeout', None) is None
                and scenario.run_obj ==
                'quality'  # type: ignore[attr-defined] # noqa F821
            ):
            self.logger.info(
                'Optimizing a deterministic scenario for quality without a tuner timeout - will make '
                'SMAC deterministic and only evaluate one configuration per iteration!'
            )
            scenario.intensification_percentage = 1e-10  # type: ignore[attr-defined] # noqa F821
            scenario.min_chall = 1  # type: ignore[attr-defined] # noqa F821

        scenario.write()

        # initialize stats object
        if stats:
            self.stats = stats
        else:
            self.stats = Stats(scenario)

        if self.scenario.run_obj == "runtime" and not self.scenario.transform_y == "LOG":  # type: ignore[attr-defined] # noqa F821
            self.logger.warning(
                "Runtime as objective automatically activates log(y) transformation"
            )
            self.scenario.transform_y = "LOG"  # type: ignore[attr-defined] # noqa F821

        # initialize empty runhistory
        runhistory_def_kwargs = {}
        if runhistory_kwargs is not None:
            runhistory_def_kwargs.update(runhistory_kwargs)
        if runhistory is None:
            runhistory = RunHistory(**runhistory_def_kwargs)
        elif inspect.isclass(runhistory):
            runhistory = runhistory(
                **runhistory_def_kwargs)  # type: ignore[operator] # noqa F821
        elif isinstance(runhistory, RunHistory):
            pass
        else:
            raise ValueError(
                'runhistory has to be a class or an object of RunHistory')

        rand_conf_chooser_kwargs = {'rng': rng}
        if random_configuration_chooser_kwargs is not None:
            rand_conf_chooser_kwargs.update(
                random_configuration_chooser_kwargs)
        if random_configuration_chooser is None:
            if 'prob' not in rand_conf_chooser_kwargs:
                rand_conf_chooser_kwargs[
                    'prob'] = scenario.rand_prob  # type: ignore[attr-defined] # noqa F821
            random_configuration_chooser_instance = (
                ChooserProb(**rand_conf_chooser_kwargs
                            )  # type: ignore[arg-type] # noqa F821
            )  # type: RandomConfigurationChooser
        elif inspect.isclass(random_configuration_chooser):
            random_configuration_chooser_instance = random_configuration_chooser(
                **
                rand_conf_chooser_kwargs)  # type: ignore[arg-type] # noqa F821
        elif not isinstance(random_configuration_chooser,
                            RandomConfigurationChooser):
            raise ValueError(
                "random_configuration_chooser has to be"
                " a class or object of RandomConfigurationChooser")

        # reset random number generator in config space to draw different
        # random configurations with each seed given to SMAC
        scenario.cs.seed(
            rng.randint(MAXINT))  # type: ignore[attr-defined] # noqa F821

        # initial Trajectory Logger
        traj_logger = TrajLogger(output_dir=self.output_dir, stats=self.stats)

        # initial EPM
        types, bounds = get_types(
            scenario.cs,
            scenario.feature_array)  # type: ignore[attr-defined] # noqa F821
        model_def_kwargs = {
            'types': types,
            'bounds': bounds,
            'instance_features': scenario.feature_array,
            'seed': rng.randint(MAXINT),
            'pca_components': scenario.PCA_DIM,
        }
        if model_kwargs is not None:
            model_def_kwargs.update(model_kwargs)
        if model is None:
            for key, value in {
                    'log_y': scenario.transform_y
                    in ["LOG",
                        "LOGS"],  # type: ignore[attr-defined] # noqa F821
                    'num_trees': scenario.
                    rf_num_trees,  # type: ignore[attr-defined] # noqa F821
                    'do_bootstrapping': scenario.
                    rf_do_bootstrapping,  # type: ignore[attr-defined] # noqa F821
                    'ratio_features': scenario.
                    rf_ratio_features,  # type: ignore[attr-defined] # noqa F821
                    'min_samples_split': scenario.
                    rf_min_samples_split,  # type: ignore[attr-defined] # noqa F821
                    'min_samples_leaf': scenario.
                    rf_min_samples_leaf,  # type: ignore[attr-defined] # noqa F821
                    'max_depth': scenario.
                    rf_max_depth,  # type: ignore[attr-defined] # noqa F821
            }.items():
                if key not in model_def_kwargs:
                    model_def_kwargs[key] = value
            model_def_kwargs[
                'configspace'] = self.scenario.cs  # type: ignore[attr-defined] # noqa F821
            model_instance = (
                RandomForestWithInstances(
                    **model_def_kwargs)  # type: ignore[arg-type] # noqa F821
            )  # type: AbstractEPM
        elif inspect.isclass(model):
            model_def_kwargs[
                'configspace'] = self.scenario.cs  # type: ignore[attr-defined] # noqa F821
            model_instance = model(
                **model_def_kwargs)  # type: ignore[arg-type] # noqa F821
        else:
            raise TypeError("Model not recognized: %s" % (type(model)))

        # initial acquisition function
        acq_def_kwargs = {'model': model_instance}
        if acquisition_function_kwargs is not None:
            acq_def_kwargs.update(acquisition_function_kwargs)
        if acquisition_function is None:
            if scenario.transform_y in [
                    "LOG", "LOGS"
            ]:  # type: ignore[attr-defined] # noqa F821
                acquisition_function_instance = (
                    LogEI(**
                          acq_def_kwargs)  # type: ignore[arg-type] # noqa F821
                )  # type: AbstractAcquisitionFunction
            else:
                acquisition_function_instance = EI(
                    **acq_def_kwargs)  # type: ignore[arg-type] # noqa F821
        elif inspect.isclass(acquisition_function):
            acquisition_function_instance = acquisition_function(
                **acq_def_kwargs)
        else:
            raise TypeError(
                "Argument acquisition_function must be None or an object implementing the "
                "AbstractAcquisitionFunction, not %s." %
                type(acquisition_function))
        if integrate_acquisition_function:
            acquisition_function_instance = IntegratedAcquisitionFunction(
                acquisition_function=acquisition_function_instance,
                **acq_def_kwargs)

        # initialize optimizer on acquisition function
        acq_func_opt_kwargs = {
            'acquisition_function': acquisition_function_instance,
            'config_space':
            scenario.cs,  # type: ignore[attr-defined] # noqa F821
            'rng': rng,
        }
        if acquisition_function_optimizer_kwargs is not None:
            acq_func_opt_kwargs.update(acquisition_function_optimizer_kwargs)
        if acquisition_function_optimizer is None:
            for key, value in {
                    'max_steps': scenario.
                    sls_max_steps,  # type: ignore[attr-defined] # noqa F821
                    'n_steps_plateau_walk': scenario.
                    sls_n_steps_plateau_walk,  # type: ignore[attr-defined] # noqa F821
            }.items():
                if key not in acq_func_opt_kwargs:
                    acq_func_opt_kwargs[key] = value
            acquisition_function_optimizer_instance = (
                LocalAndSortedRandomSearch(
                    **
                    acq_func_opt_kwargs)  # type: ignore[arg-type] # noqa F821
            )  # type: AcquisitionFunctionMaximizer
        elif inspect.isclass(acquisition_function_optimizer):
            acquisition_function_optimizer_instance = acquisition_function_optimizer(
                **acq_func_opt_kwargs)  # type: ignore[arg-type] # noqa F821
        else:
            raise TypeError(
                "Argument acquisition_function_optimizer must be None or an object implementing the "
                "AcquisitionFunctionMaximizer, but is '%s'" %
                type(acquisition_function_optimizer))

        # initialize tae_runner
        # First case, if tae_runner is None, the target algorithm is a call
        # string in the scenario file
        tae_def_kwargs = {
            'stats': self.stats,
            'run_obj': scenario.run_obj,
            'par_factor':
            scenario.par_factor,  # type: ignore[attr-defined] # noqa F821
            'cost_for_crash':
            scenario.cost_for_crash,  # type: ignore[attr-defined] # noqa F821
            'abort_on_first_run_crash': scenario.
            abort_on_first_run_crash  # type: ignore[attr-defined] # noqa F821
        }
        if tae_runner_kwargs is not None:
            tae_def_kwargs.update(tae_runner_kwargs)

        if 'ta' not in tae_def_kwargs:
            tae_def_kwargs[
                'ta'] = scenario.ta  # type: ignore[attr-defined] # noqa F821
        if tae_runner is None:
            tae_def_kwargs[
                'ta'] = scenario.ta  # type: ignore[attr-defined] # noqa F821
            tae_runner_instance = (
                ExecuteTARunOld(
                    **tae_def_kwargs)  # type: ignore[arg-type] # noqa F821
            )  # type: BaseRunner
        elif inspect.isclass(tae_runner):
            tae_runner_instance = cast(BaseRunner, tae_runner(
                **tae_def_kwargs))  # type: ignore[arg-type] # noqa F821
        elif callable(tae_runner):
            tae_def_kwargs['ta'] = tae_runner
            tae_def_kwargs[
                'use_pynisher'] = scenario.limit_resources  # type: ignore[attr-defined] # noqa F821
            tae_runner_instance = ExecuteTAFuncDict(
                **tae_def_kwargs)  # type: ignore[arg-type] # noqa F821
        else:
            raise TypeError(
                "Argument 'tae_runner' is %s, but must be "
                "either None, a callable or an object implementing "
                "BaseRunner. Passing 'None' will result in the "
                "creation of target algorithm runner based on the "
                "call string in the scenario file." % type(tae_runner))

        # In case of a parallel run, wrap the single worker in a parallel
        # runner
        if n_jobs is None or n_jobs == 1:
            _n_jobs = 1
        elif n_jobs == -1:
            _n_jobs = joblib.cpu_count()
        elif n_jobs > 0:
            _n_jobs = n_jobs
        else:
            raise ValueError(
                'Number of tasks must be positive, None or -1, but is %s' %
                str(n_jobs))
        if _n_jobs > 1 or dask_client is not None:
            tae_runner_instance = DaskParallelRunner(
                tae_runner_instance,
                n_workers=_n_jobs,
                output_directory=self.output_dir,
                dask_client=dask_client,
            )

        # Check that overall objective and tae objective are the same
        # TODO: remove these two ignores once the scenario object knows all its attributes!
        if tae_runner_instance.run_obj != scenario.run_obj:  # type: ignore[union-attr] # noqa F821
            raise ValueError(
                "Objective for the target algorithm runner and "
                "the scenario must be the same, but are '%s' and "
                "'%s'" %
                (tae_runner_instance.run_obj,
                 scenario.run_obj))  # type: ignore[union-attr] # noqa F821

        # initialize intensification
        intensifier_def_kwargs = {
            'stats': self.stats,
            'traj_logger': traj_logger,
            'rng': rng,
            'instances':
            scenario.train_insts,  # type: ignore[attr-defined] # noqa F821
            'cutoff':
            scenario.cutoff,  # type: ignore[attr-defined] # noqa F821
            'deterministic':
            scenario.deterministic,  # type: ignore[attr-defined] # noqa F821
            'run_obj_time': scenario.run_obj ==
            "runtime",  # type: ignore[attr-defined] # noqa F821
            'instance_specifics': scenario.
            instance_specific,  # type: ignore[attr-defined] # noqa F821
            'adaptive_capping_slackfactor': scenario.
            intens_adaptive_capping_slackfactor,  # type: ignore[attr-defined] # noqa F821
            'min_chall':
            scenario.intens_min_chall  # type: ignore[attr-defined] # noqa F821
        }

        if isinstance(intensifier, Intensifier) \
                or (intensifier is not None and inspect.isclass(intensifier) and issubclass(intensifier, Intensifier)):
            intensifier_def_kwargs[
                'always_race_against'] = scenario.cs.get_default_configuration(
                )  # type: ignore[attr-defined] # noqa F821
            intensifier_def_kwargs[
                'use_ta_time_bound'] = scenario.use_ta_time  # type: ignore[attr-defined] # noqa F821
            intensifier_def_kwargs[
                'minR'] = scenario.minR  # type: ignore[attr-defined] # noqa F821
            intensifier_def_kwargs[
                'maxR'] = scenario.maxR  # type: ignore[attr-defined] # noqa F821
        if intensifier_kwargs is not None:
            intensifier_def_kwargs.update(intensifier_kwargs)

        if intensifier is None:
            intensifier_instance = (
                Intensifier(**intensifier_def_kwargs
                            )  # type: ignore[arg-type] # noqa F821
            )  # type: AbstractRacer
        elif inspect.isclass(intensifier):
            intensifier_instance = intensifier(
                **intensifier_def_kwargs)  # type: ignore[arg-type] # noqa F821
        else:
            raise TypeError(
                "Argument intensifier must be None or an object implementing the AbstractRacer, but is '%s'"
                % type(intensifier))

        # initial design
        if initial_design is not None and initial_configurations is not None:
            raise ValueError(
                "Either use initial_design or initial_configurations; but not both"
            )

        init_design_def_kwargs = {
            'cs': scenario.cs,  # type: ignore[attr-defined] # noqa F821
            'traj_logger': traj_logger,
            'rng': rng,
            'ta_run_limit':
            scenario.ta_run_limit,  # type: ignore[attr-defined] # noqa F821
            'configs': initial_configurations,
            'n_configs_x_params': 0,
            'max_config_fracs': 0.0
        }
        if initial_design_kwargs is not None:
            init_design_def_kwargs.update(initial_design_kwargs)
        if initial_configurations is not None:
            initial_design_instance = InitialDesign(**init_design_def_kwargs)
        elif initial_design is None:
            if scenario.initial_incumbent == "DEFAULT":  # type: ignore[attr-defined] # noqa F821
                init_design_def_kwargs['max_config_fracs'] = 0.0
                initial_design_instance = DefaultConfiguration(
                    **init_design_def_kwargs)
            elif scenario.initial_incumbent == "RANDOM":  # type: ignore[attr-defined] # noqa F821
                init_design_def_kwargs['max_config_fracs'] = 0.0
                initial_design_instance = RandomConfigurations(
                    **init_design_def_kwargs)
            elif scenario.initial_incumbent == "LHD":  # type: ignore[attr-defined] # noqa F821
                initial_design_instance = LHDesign(**init_design_def_kwargs)
            elif scenario.initial_incumbent == "FACTORIAL":  # type: ignore[attr-defined] # noqa F821
                initial_design_instance = FactorialInitialDesign(
                    **init_design_def_kwargs)
            elif scenario.initial_incumbent == "SOBOL":  # type: ignore[attr-defined] # noqa F821
                initial_design_instance = SobolDesign(**init_design_def_kwargs)
            else:
                raise ValueError("Don't know what kind of initial_incumbent "
                                 "'%s' is" % scenario.initial_incumbent
                                 )  # type: ignore[attr-defined] # noqa F821
        elif inspect.isclass(initial_design):
            initial_design_instance = initial_design(**init_design_def_kwargs)
        else:
            raise TypeError(
                "Argument initial_design must be None or an object implementing the InitialDesign, but is '%s'"
                % type(initial_design))

        # if we log the performance data,
        # the RFRImputator will already get
        # log transform data from the runhistory
        if scenario.transform_y in [
                "LOG", "LOGS"
        ]:  # type: ignore[attr-defined] # noqa F821
            cutoff = np.log(np.nanmin([
                np.inf, np.float_(scenario.cutoff)
            ]))  # type: ignore[attr-defined] # noqa F821
            threshold = cutoff + np.log(
                scenario.par_factor)  # type: ignore[attr-defined] # noqa F821
        else:
            cutoff = np.nanmin([np.inf, np.float_(scenario.cutoff)
                                ])  # type: ignore[attr-defined] # noqa F821
            threshold = cutoff * scenario.par_factor  # type: ignore[attr-defined] # noqa F821
        num_params = len(scenario.cs.get_hyperparameters()
                         )  # type: ignore[attr-defined] # noqa F821
        imputor = RFRImputator(rng=rng,
                               cutoff=cutoff,
                               threshold=threshold,
                               model=model_instance,
                               change_threshold=0.01,
                               max_iter=2)

        r2e_def_kwargs = {
            'scenario': scenario,
            'num_params': num_params,
            'success_states': [
                StatusType.SUCCESS,
            ],
            'impute_censored_data': True,
            'impute_state': [
                StatusType.CAPPED,
            ],
            'imputor': imputor,
            'scale_perc': 5
        }
        if scenario.run_obj == 'quality':
            r2e_def_kwargs.update({
                'success_states':
                [StatusType.SUCCESS, StatusType.CRASHED, StatusType.MEMOUT],
                'impute_censored_data':
                False,
                'impute_state':
                None,
            })

        if isinstance(
                intensifier_instance,
            (SuccessiveHalving, Hyperband)) and scenario.run_obj == "quality":
            r2e_def_kwargs.update({
                'success_states': [
                    StatusType.SUCCESS,
                    StatusType.CRASHED,
                    StatusType.MEMOUT,
                    StatusType.DONOTADVANCE,
                ],
                'consider_for_higher_budgets_state': [
                    StatusType.DONOTADVANCE,
                    StatusType.TIMEOUT,
                    StatusType.CRASHED,
                    StatusType.MEMOUT,
                ],
            })

        if runhistory2epm_kwargs is not None:
            r2e_def_kwargs.update(runhistory2epm_kwargs)
        if runhistory2epm is None:
            if scenario.run_obj == 'runtime':
                rh2epm = (
                    RunHistory2EPM4LogCost(
                        **r2e_def_kwargs)  # type: ignore[arg-type] # noqa F821
                )  # type: AbstractRunHistory2EPM
            elif scenario.run_obj == 'quality':
                if scenario.transform_y == "NONE":  # type: ignore[attr-defined] # noqa F821
                    rh2epm = RunHistory2EPM4Cost(
                        **r2e_def_kwargs)  # type: ignore[arg-type] # noqa F821
                elif scenario.transform_y == "LOG":  # type: ignore[attr-defined] # noqa F821
                    rh2epm = RunHistory2EPM4LogCost(
                        **r2e_def_kwargs)  # type: ignore[arg-type] # noqa F821
                elif scenario.transform_y == "LOGS":  # type: ignore[attr-defined] # noqa F821
                    rh2epm = RunHistory2EPM4LogScaledCost(
                        **r2e_def_kwargs)  # type: ignore[arg-type] # noqa F821
                elif scenario.transform_y == "INVS":  # type: ignore[attr-defined] # noqa F821
                    rh2epm = RunHistory2EPM4InvScaledCost(
                        **r2e_def_kwargs)  # type: ignore[arg-type] # noqa F821
            else:
                raise ValueError('Unknown run objective: %s. Should be either '
                                 'quality or runtime.' % self.scenario.run_obj)
        elif inspect.isclass(runhistory2epm):
            rh2epm = runhistory2epm(
                **r2e_def_kwargs)  # type: ignore[arg-type] # noqa F821
        else:
            raise TypeError(
                "Argument runhistory2epm must be None or an object implementing the RunHistory2EPM, but is '%s'"
                % type(runhistory2epm))

        smbo_args = {
            'scenario': scenario,
            'stats': self.stats,
            'initial_design': initial_design_instance,
            'runhistory': runhistory,
            'runhistory2epm': rh2epm,
            'intensifier': intensifier_instance,
            'num_run': run_id,
            'model': model_instance,
            'acq_optimizer': acquisition_function_optimizer_instance,
            'acquisition_func': acquisition_function_instance,
            'rng': rng,
            'restore_incumbent': restore_incumbent,
            'random_configuration_chooser':
            random_configuration_chooser_instance,
            'tae_runner': tae_runner_instance,
        }  # type: Dict[str, Any]

        if smbo_class is None:
            self.solver = SMBO(**
                               smbo_args)  # type: ignore[arg-type] # noqa F821
        else:
            self.solver = smbo_class(
                **smbo_args)  # type: ignore[arg-type] # noqa F821
Exemple #19
0
    def get_kernel(self, config_space):
        """
        Code is very similar to those from :class:`smac.SMAC4BO`.
        Length scale were chosen from SMAC where they were obtained by
        hyperparameter optimization made in https://arxiv.org/abs/1908.06674.

        :params config_space: SMAC configuration space with parameters.
        :type config_space: :class:`smac.configspace.ConfigurationSpace`
        """
        # First of all get type of kernel
        kernel_cls, nu = self._get_kernel_class_and_nu()
        # get types and bounds for config_space
        types, bounds = get_types(config_space, instance_features=None)
        # get random state for priors
        rng = self._get_random_state()

        # create kernel to hold variance
        cov_amp = smac.epm.gp_kernels.ConstantKernel(
            2.0,
            constant_value_bounds=(np.exp(-10), np.exp(2)),
            prior=LognormalPrior(mean=0.0, sigma=1.0, rng=rng),
        )

        # Understand information about parameters
        cont_dims = np.where(np.array(types) == 0)[0]
        cat_dims = np.where(np.array(types) != 0)[0]

        # bounds for length scale from https://arxiv.org/abs/1908.06674
        lslims = (np.exp(-6.754111155189306), np.exp(0.0858637988771976))

        # create kernel for continuous parameters
        if len(cont_dims) > 0:
            exp_kwargs = {
                "length_scale": np.ones([len(cont_dims)]),
                "length_scale_bounds": [lslims for _ in range(len(cont_dims))],
                "operate_on": cont_dims,
                "prior": None,
                "has_conditions": False,
            }
            if nu is not None:
                exp_kwargs["nu"] = nu
            exp_kernel = kernel_cls(**exp_kwargs)

        # kernel for categorical parameters
        if len(cat_dims) > 0:
            ham_kernel = smac.epm.gp_kernels.HammingKernel(
                length_scale=np.ones([len(cat_dims)]),
                length_scale_bounds=[lslims for _ in range(len(cat_dims))],
                operate_on=cat_dims,
            )

        # create noise kernel
        noise_kernel = smac.epm.gp_kernels.WhiteKernel(
            noise_level=1e-8,
            noise_level_bounds=(np.exp(-25), np.exp(2)),
            prior=HorseshoePrior(scale=0.1, rng=rng),
        )

        # create final kernel as combination
        if len(cont_dims) > 0 and len(cat_dims) > 0:
            # both
            kernel = cov_amp * (exp_kernel * ham_kernel) + noise_kernel
        elif len(cont_dims) > 0 and len(cat_dims) == 0:
            # only continuous parameters
            kernel = cov_amp * exp_kernel + noise_kernel
        elif len(cont_dims) == 0 and len(cat_dims) > 0:
            # only categorical parameters
            kernel = cov_amp * ham_kernel + noise_kernel

        return kernel
 def _get_types(self, scenario, features):
     types, bounds = get_types(scenario, features)
     types = np.array(types, dtype='uint')
     bounds = np.array(bounds, dtype='object')
     return types, bounds
    def __init__(self,
                 scenario: Scenario,
                 tae_runner: Optional[Union[Type[ExecuteTARun], Callable]] = None,
                 tae_runner_kwargs: Optional[dict] = None,
                 runhistory: Optional[Union[Type[RunHistory], RunHistory]] = None,
                 runhistory_kwargs: Optional[dict] = None,
                 intensifier: Optional[Type[Intensifier]] = None,
                 intensifier_kwargs: Optional[dict] = None,
                 acquisition_function: Optional[Type[AbstractAcquisitionFunction]] = None,
                 acquisition_function_kwargs: Optional[dict] = None,
                 integrate_acquisition_function: bool = False,
                 acquisition_function_optimizer: Optional[Type[AcquisitionFunctionMaximizer]] = None,
                 acquisition_function_optimizer_kwargs: Optional[dict] = None,
                 model: Optional[Type[AbstractEPM]] = None,
                 model_kwargs: Optional[dict] = None,
                 runhistory2epm: Optional[Type[AbstractRunHistory2EPM]] = None,
                 runhistory2epm_kwargs: Optional[dict] = None,
                 initial_design: Optional[Type[InitialDesign]] = None,
                 initial_design_kwargs: Optional[dict] = None,
                 initial_configurations: Optional[List[Configuration]] = None,
                 stats: Optional[Stats] = None,
                 restore_incumbent: Optional[Configuration] = None,
                 rng: Optional[Union[np.random.RandomState, int]] = None,
                 smbo_class: Optional[SMBO] = None,
                 run_id: Optional[int] = None,
                 random_configuration_chooser: Optional[Type[RandomConfigurationChooser]] = None,
                 random_configuration_chooser_kwargs: Optional[dict] = None
                 ):
        """
        Constructor

        Parameters
        ----------
        scenario : ~smac.scenario.scenario.Scenario
            Scenario object
        tae_runner : ~smac.tae.execute_ta_run.ExecuteTARun or callable
            Callable or implementation of
            :class:`~smac.tae.execute_ta_run.ExecuteTARun`. In case a
            callable is passed it will be wrapped by
            :class:`~smac.tae.execute_func.ExecuteTAFuncDict`.
            If not set, it will be initialized with the
            :class:`~smac.tae.execute_ta_run_old.ExecuteTARunOld`.
        tae_runner_kwargs: Optional[dict]
            arguments passed to constructor of '~tae_runner'
        runhistory : RunHistory
            runhistory to store all algorithm runs
        runhistory_kwargs : Optional[dict]
            arguments passed to constructor of runhistory.
            We strongly advise against changing the aggregation function,
            since it will break some code assumptions
        intensifier : Intensifier
            intensification object to issue a racing to decide the current
            incumbent
        intensifier_kwargs: Optional[dict]
            arguments passed to the constructor of '~intensifier'
        acquisition_function : ~smac.optimizer.acquisition.AbstractAcquisitionFunction
            Class or object that implements the :class:`~smac.optimizer.acquisition.AbstractAcquisitionFunction`.
            Will use :class:`~smac.optimizer.acquisition.EI` or :class:`~smac.optimizer.acquisition.LogEI` if not set.
            `~acquisition_function_kwargs` is passed to the class constructor.
        acquisition_function_kwargs : Optional[dict]
            dictionary to pass specific arguments to ~acquisition_function
        integrate_acquisition_function : bool, default=False
            Whether to integrate the acquisition function. Works only with models which can sample their
            hyperparameters (i.e. GaussianProcessMCMC).
        acquisition_function_optimizer : ~smac.optimizer.ei_optimization.AcquisitionFunctionMaximizer
            Object that implements the :class:`~smac.optimizer.ei_optimization.AcquisitionFunctionMaximizer`.
            Will use :class:`smac.optimizer.ei_optimization.InterleavedLocalAndRandomSearch` if not set.
        acquisition_function_optimizer_kwargs: Optional[dict]
            Arguments passed to constructor of '~acquisition_function_optimizer'
        model : AbstractEPM
            Model that implements train() and predict(). Will use a
            :class:`~smac.epm.rf_with_instances.RandomForestWithInstances` if not set.
        model_kwargs : Optional[dict]
            Arguments passed to constructor of '~model'
        runhistory2epm : ~smac.runhistory.runhistory2epm.RunHistory2EMP
            Object that implements the AbstractRunHistory2EPM. If None,
            will use :class:`~smac.runhistory.runhistory2epm.RunHistory2EPM4Cost`
            if objective is cost or
            :class:`~smac.runhistory.runhistory2epm.RunHistory2EPM4LogCost`
            if objective is runtime.
        runhistory2epm_kwargs: Optional[dict]
            Arguments passed to the constructor of '~runhistory2epm'
        initial_design : InitialDesign
            initial sampling design
        initial_design_kwargs: Optional[dict]
            arguments passed to constructor of `~initial_design'
        initial_configurations : List[Configuration]
            list of initial configurations for initial design --
            cannot be used together with initial_design
        stats : Stats
            optional stats object
        rng : np.random.RandomState
            Random number generator
        restore_incumbent : Configuration
            incumbent used if restoring to previous state
        smbo_class : ~smac.optimizer.smbo.SMBO
            Class implementing the SMBO interface which will be used to
            instantiate the optimizer class.
        run_id : int (optional)
            Run ID will be used as subfolder for output_dir. If no ``run_id`` is given, a random ``run_id`` will be
            chosen.
        random_configuration_chooser : ~smac.optimizer.random_configuration_chooser.RandomConfigurationChooser
            How often to choose a random configuration during the intensification procedure.
        random_configuration_chooser_kwargs : Optional[dict]
            arguments of constructor for '~random_configuration_chooser'

        """
        self.logger = logging.getLogger(
            self.__module__ + "." + self.__class__.__name__)

        aggregate_func = average_cost

        self.scenario = scenario
        self.output_dir = ""
        if not restore_incumbent:
            # restore_incumbent is used by the CLI interface which provides a method for restoring a SMAC run given an
            # output directory. This is the default path.
            # initial random number generator
            # run_id, rng = get_rng(rng=rng, run_id=run_id, logger=self.logger)
            # run_id=datetime.now().strftime("%Y%m%d%H%M%S%f")
            run_id=uuid1()
            self.output_dir = create_output_directory(scenario, run_id)   # fixme run_id
        elif scenario.output_dir is not None:
            run_id, rng = get_rng(rng=rng, run_id=run_id, logger=self.logger)
            # output-directory is created in CLI when restoring from a
            # folder. calling the function again in the facade results in two
            # folders being created: run_X and run_X.OLD. if we are
            # restoring, the output-folder exists already and we omit creating it,
            # but set the self-output_dir to the dir.
            # necessary because we want to write traj to new output-dir in CLI.
            self.output_dir = scenario.output_dir_for_this_run

        if (
            scenario.deterministic is True
            and getattr(scenario, 'tuner_timeout', None) is None
            and scenario.run_obj == 'quality'
        ):
            self.logger.info('Optimizing a deterministic scenario for quality without a tuner timeout - will make '
                             'SMAC deterministic and only evaluate one configuration per iteration!')
            scenario.intensification_percentage = 1e-10
            scenario.min_chall = 1

        scenario.write()

        # initialize stats object
        if stats:
            self.stats = stats
        else:
            self.stats = Stats(scenario,file_system=scenario.file_system)

        if self.scenario.run_obj == "runtime" and not self.scenario.transform_y == "LOG":
            self.logger.warning("Runtime as objective automatically activates log(y) transformation")
            self.scenario.transform_y = "LOG"

        # initialize empty runhistory
        runhistory_def_kwargs = {'aggregate_func': aggregate_func}
        if runhistory_kwargs is not None:
            runhistory_def_kwargs.update(runhistory_kwargs)
        if runhistory is None:
            runhistory = RunHistory(**runhistory_def_kwargs,file_system=scenario.file_system)
        elif inspect.isclass(runhistory):
            runhistory = runhistory(**runhistory_def_kwargs)
        else:
            if runhistory.aggregate_func is None:
                runhistory.aggregate_func = aggregate_func

        rand_conf_chooser_kwargs = {
           'rng': rng
        }
        if random_configuration_chooser_kwargs is not None:
            rand_conf_chooser_kwargs.update(random_configuration_chooser_kwargs)
        if random_configuration_chooser is None:
            if 'prob' not in rand_conf_chooser_kwargs:
                rand_conf_chooser_kwargs['prob'] = scenario.rand_prob
            random_configuration_chooser = ChooserProb(**rand_conf_chooser_kwargs)
        elif inspect.isclass(random_configuration_chooser):
            random_configuration_chooser = random_configuration_chooser(**rand_conf_chooser_kwargs)
        elif not isinstance(random_configuration_chooser, RandomConfigurationChooser):
            raise ValueError("random_configuration_chooser has to be"
                             " a class or object of RandomConfigurationChooser")

        # reset random number generator in config space to draw different
        # random configurations with each seed given to SMAC
        scenario.cs.seed(rng.randint(MAXINT))

        # initial Trajectory Logger
        traj_logger = TrajLogger(output_dir=self.output_dir, stats=self.stats,file_system=scenario.file_system)

        # initial EPM
        types, bounds = get_types(scenario.cs, scenario.feature_array)
        model_def_kwargs = {
            'types': types,
            'bounds': bounds,
            'instance_features': scenario.feature_array,
            'seed': rng.randint(MAXINT),
            'pca_components': scenario.PCA_DIM,
        }
        if model_kwargs is not None:
            model_def_kwargs.update(model_kwargs)
        if model is None:
            for key, value in {
                'log_y': scenario.transform_y in ["LOG", "LOGS"],
                'num_trees': scenario.rf_num_trees,
                'do_bootstrapping': scenario.rf_do_bootstrapping,
                'ratio_features': scenario.rf_ratio_features,
                'min_samples_split': scenario.rf_min_samples_split,
                'min_samples_leaf': scenario.rf_min_samples_leaf,
                'max_depth': scenario.rf_max_depth,
            }.items():
                if key not in model_def_kwargs:
                    model_def_kwargs[key] = value
            model_def_kwargs['configspace'] = self.scenario.cs
            model = RandomForestWithInstances(**model_def_kwargs)
        elif inspect.isclass(model):
            model_def_kwargs['configspace'] = self.scenario.cs
            model = model(**model_def_kwargs)
        else:
            raise TypeError(
                "Model not recognized: %s" %(type(model)))

        # initial acquisition function
        acq_def_kwargs = {'model': model}
        if acquisition_function_kwargs is not None:
            acq_def_kwargs.update(acquisition_function_kwargs)
        if acquisition_function is None:
            if scenario.transform_y in ["LOG", "LOGS"]:
                acquisition_function = LogEI(**acq_def_kwargs)
            else:
                acquisition_function = EI(**acq_def_kwargs)
        elif inspect.isclass(acquisition_function):
            acquisition_function = acquisition_function(**acq_def_kwargs)
        else:
            raise TypeError(
                "Argument acquisition_function must be None or an object implementing the "
                "AbstractAcquisitionFunction, not %s."
                % type(acquisition_function)
            )
        if integrate_acquisition_function:
            acquisition_function = IntegratedAcquisitionFunction(
                acquisition_function=acquisition_function,
                **acq_def_kwargs
            )

        # initialize optimizer on acquisition function
        acq_func_opt_kwargs = {
            'acquisition_function': acquisition_function,
            'config_space': scenario.cs,
            'rng': rng,
            }
        if acquisition_function_optimizer_kwargs is not None:
            acq_func_opt_kwargs.update(acquisition_function_optimizer_kwargs)
        if acquisition_function_optimizer is None:
            for key, value in {
                'max_steps': scenario.sls_max_steps,
                'n_steps_plateau_walk': scenario.sls_n_steps_plateau_walk,
            }.items():
                if key not in acq_func_opt_kwargs:
                    acq_func_opt_kwargs[key] = value
            acquisition_function_optimizer = InterleavedLocalAndRandomSearch(**acq_func_opt_kwargs)
        elif inspect.isclass(acquisition_function_optimizer):
            acquisition_function_optimizer = acquisition_function_optimizer(**acq_func_opt_kwargs)
        else:
            raise TypeError(
                "Argument acquisition_function_optimizer must be None or an object implementing the "
                "AcquisitionFunctionMaximizer, but is '%s'" %
                type(acquisition_function_optimizer)
            )

        # initialize tae_runner
        # First case, if tae_runner is None, the target algorithm is a call
        # string in the scenario file
        tae_def_kwargs = {
            'stats': self.stats,
            'run_obj': scenario.run_obj,
            'runhistory': runhistory,
            'par_factor': scenario.par_factor,
            'cost_for_crash': scenario.cost_for_crash,
            'abort_on_first_run_crash': scenario.abort_on_first_run_crash
            }
        if tae_runner_kwargs is not None:
            tae_def_kwargs.update(tae_runner_kwargs)
        if 'ta' not in tae_def_kwargs:
            tae_def_kwargs['ta'] = scenario.ta
        if tae_runner is None:
            tae_def_kwargs['ta'] = scenario.ta
            tae_runner = ExecuteTARunOld(**tae_def_kwargs)
        elif inspect.isclass(tae_runner):
            tae_runner = tae_runner(**tae_def_kwargs)
        elif callable(tae_runner):
            tae_def_kwargs['ta'] = tae_runner
            tae_runner = ExecuteTAFuncDict(**tae_def_kwargs)
        else:
            raise TypeError("Argument 'tae_runner' is %s, but must be "
                            "either None, a callable or an object implementing "
                            "ExecuteTaRun. Passing 'None' will result in the "
                            "creation of target algorithm runner based on the "
                            "call string in the scenario file."
                            % type(tae_runner))

        # Check that overall objective and tae objective are the same
        if tae_runner.run_obj != scenario.run_obj:
            raise ValueError("Objective for the target algorithm runner and "
                             "the scenario must be the same, but are '%s' and "
                             "'%s'" % (tae_runner.run_obj, scenario.run_obj))

        # initialize intensification
        intensifier_def_kwargs = {
            'tae_runner': tae_runner,
            'stats': self.stats,
            'traj_logger': traj_logger,
            'rng': rng,
            'instances': scenario.train_insts,
            'cutoff': scenario.cutoff,
            'deterministic': scenario.deterministic,
            'run_obj_time': scenario.run_obj == "runtime",
            'always_race_against': scenario.cs.get_default_configuration()
                                   if scenario.always_race_default else None,
            'use_ta_time_bound': scenario.use_ta_time,
            'instance_specifics': scenario.instance_specific,
            'minR': scenario.minR,
            'maxR': scenario.maxR,
            'adaptive_capping_slackfactor': scenario.intens_adaptive_capping_slackfactor,
            'min_chall': scenario.intens_min_chall
            }
        if intensifier_kwargs is not None:
            intensifier_def_kwargs.update(intensifier_kwargs)
        if intensifier is None:
            intensifier = Intensifier(**intensifier_def_kwargs)
        elif inspect.isclass(intensifier):
            intensifier = intensifier(**intensifier_def_kwargs)
        else:
            raise TypeError(
                "Argument intensifier must be None or an object implementing the Intensifier, but is '%s'" %
                type(intensifier)
            )

        # initial design
        if initial_design is not None and initial_configurations is not None:
            raise ValueError(
                "Either use initial_design or initial_configurations; but not both")

        init_design_def_kwargs = {
            'tae_runner': tae_runner,
            'scenario': scenario,
            'stats': self.stats,
            'traj_logger': traj_logger,
            'runhistory': runhistory,
            'rng': rng,
            'configs': initial_configurations,
            'intensifier': intensifier,
            'aggregate_func': aggregate_func,
            'n_configs_x_params': 0,
            'max_config_fracs': 0.0
            }
        if initial_design_kwargs is not None:
            init_design_def_kwargs.update(initial_design_kwargs)
        if initial_configurations is not None:
            initial_design = InitialDesign(**init_design_def_kwargs)
        elif initial_design is None:
            if scenario.initial_incumbent == "DEFAULT":
                init_design_def_kwargs['max_config_fracs'] = 0.0
                initial_design = DefaultConfiguration(**init_design_def_kwargs)
            elif scenario.initial_incumbent == "RANDOM":
                init_design_def_kwargs['max_config_fracs'] = 0.0
                initial_design = RandomConfigurations(**init_design_def_kwargs)
            elif scenario.initial_incumbent == "LHD":
                initial_design = LHDesign(**init_design_def_kwargs)
            elif scenario.initial_incumbent == "FACTORIAL":
                initial_design = FactorialInitialDesign(**init_design_def_kwargs)
            elif scenario.initial_incumbent == "SOBOL":
                initial_design = SobolDesign(**init_design_def_kwargs)
            else:
                raise ValueError("Don't know what kind of initial_incumbent "
                                 "'%s' is" % scenario.initial_incumbent)
        elif inspect.isclass(initial_design):
            initial_design = initial_design(**init_design_def_kwargs)
        else:
            raise TypeError(
                "Argument initial_design must be None or an object implementing the InitialDesign, but is '%s'" %
                type(initial_design)
            )

        # if we log the performance data,
        # the RFRImputator will already get
        # log transform data from the runhistory
        if scenario.transform_y in ["LOG", "LOGS"]:
            cutoff = np.log(np.nanmin([np.inf, np.float_(scenario.cutoff)]))
            threshold = cutoff + np.log(scenario.par_factor)
        else:
            cutoff = np.nanmin([np.inf, np.float_(scenario.cutoff)])
            threshold = cutoff * scenario.par_factor
        num_params = len(scenario.cs.get_hyperparameters())
        imputor = RFRImputator(rng=rng,
                               cutoff=cutoff,
                               threshold=threshold,
                               model=model,
                               change_threshold=0.01,
                               max_iter=2)

        r2e_def_kwargs = {
            'scenario': scenario,
            'num_params': num_params,
            'success_states': [StatusType.SUCCESS, ],
            'impute_censored_data': True,
            'impute_state': [StatusType.CAPPED, ],
            'imputor': imputor,
            'scale_perc': 5
            }
        if scenario.run_obj == 'quality':
            r2e_def_kwargs.update({
                'success_states': [StatusType.SUCCESS, StatusType.CRASHED],
                'impute_censored_data': False,
                'impute_state': None,
            })
        if runhistory2epm_kwargs is not None:
            r2e_def_kwargs.update(runhistory2epm_kwargs)
        if runhistory2epm is None:
            if scenario.run_obj == 'runtime':
                runhistory2epm = RunHistory2EPM4LogCost(**r2e_def_kwargs)
            elif scenario.run_obj == 'quality':
                if scenario.transform_y == "NONE":
                    runhistory2epm = RunHistory2EPM4Cost(**r2e_def_kwargs)
                elif scenario.transform_y == "LOG":
                    runhistory2epm = RunHistory2EPM4LogCost(**r2e_def_kwargs)
                elif scenario.transform_y == "LOGS":
                    runhistory2epm = RunHistory2EPM4LogScaledCost(**r2e_def_kwargs)
                elif scenario.transform_y == "INVS":
                    runhistory2epm = RunHistory2EPM4InvScaledCost(**r2e_def_kwargs)
            else:
                raise ValueError('Unknown run objective: %s. Should be either '
                                 'quality or runtime.' % self.scenario.run_obj)
        elif inspect.isclass(runhistory2epm):
            runhistory2epm = runhistory2epm(**r2e_def_kwargs)
        else:
            raise TypeError(
                "Argument runhistory2epm must be None or an object implementing the RunHistory2EPM, but is '%s'" %
                type(runhistory2epm)
            )

        smbo_args = {
            'scenario': scenario,
            'stats': self.stats,
            'initial_design': initial_design,
            'runhistory': runhistory,
            'runhistory2epm': runhistory2epm,
            'intensifier': intensifier,
            'aggregate_func': aggregate_func,
            'num_run': run_id,
            'model': model,
            'acq_optimizer': acquisition_function_optimizer,
            'acquisition_func': acquisition_function,
            'rng': rng,
            'restore_incumbent': restore_incumbent,
            'random_configuration_chooser': random_configuration_chooser
        }

        if smbo_class is None:
            self.solver = SMBO(**smbo_args)
        else:
            self.solver = smbo_class(**smbo_args)
Exemple #22
0
    def __init__(self, model_type='gp_mcmc', **kwargs):
        """
        Constructor
        see ~smac.facade.smac_facade for documentation
        """
        scenario = kwargs['scenario']

        kwargs['initial_design'] = kwargs.get('initial_design', SobolDesign)
        kwargs['runhistory2epm'] = kwargs.get('runhistory2epm',
                                              RunHistory2EPM4Cost)

        init_kwargs = kwargs.get('initial_design_kwargs', dict())
        init_kwargs['n_configs_x_params'] = init_kwargs.get(
            'n_configs_x_params', 8)
        init_kwargs['max_config_fracs'] = init_kwargs.get(
            'max_config_fracs', 0.25)
        kwargs['initial_design_kwargs'] = init_kwargs

        if kwargs.get('model') is None:
            from smac.epm.gp_kernels import ConstantKernel, Matern, WhiteKernel, HammingKernel

            model_kwargs = kwargs.get('model_kwargs', dict())

            _, rng = get_rng(rng=kwargs.get("rng", None),
                             run_id=kwargs.get("run_id", None),
                             logger=None)

            types, bounds = get_types(kwargs['scenario'].cs,
                                      instance_features=None)

            cov_amp = ConstantKernel(
                2.0,
                constant_value_bounds=(np.exp(-10), np.exp(2)),
                prior=LognormalPrior(mean=0.0, sigma=1.0, rng=rng),
            )

            cont_dims = np.nonzero(types == 0)[0]
            cat_dims = np.nonzero(types != 0)[0]

            if len(cont_dims) > 0:
                exp_kernel = Matern(
                    np.ones([len(cont_dims)]),
                    [(np.exp(-6.754111155189306), np.exp(0.0858637988771976))
                     for _ in range(len(cont_dims))],
                    nu=2.5,
                    operate_on=cont_dims,
                )

            if len(cat_dims) > 0:
                ham_kernel = HammingKernel(
                    np.ones([len(cat_dims)]),
                    [(np.exp(-6.754111155189306), np.exp(0.0858637988771976))
                     for _ in range(len(cat_dims))],
                    operate_on=cat_dims,
                )

            noise_kernel = WhiteKernel(
                noise_level=1e-8,
                noise_level_bounds=(np.exp(-25), np.exp(2)),
                prior=HorseshoePrior(scale=0.1, rng=rng),
            )

            if len(cont_dims) > 0 and len(cat_dims) > 0:
                # both
                kernel = cov_amp * (exp_kernel * ham_kernel) + noise_kernel
            elif len(cont_dims) > 0 and len(cat_dims) == 0:
                # only cont
                kernel = cov_amp * exp_kernel + noise_kernel
            elif len(cont_dims) == 0 and len(cat_dims) > 0:
                # only cont
                kernel = cov_amp * ham_kernel + noise_kernel
            else:
                raise ValueError()

            if model_type == "gp":
                model_class = GaussianProcess
                kwargs['model'] = model_class
                model_kwargs['kernel'] = kernel
                model_kwargs['normalize_y'] = True
                model_kwargs['seed'] = rng.randint(0, 2**20)
            elif model_type == "gp_mcmc":
                model_class = GaussianProcessMCMC
                kwargs['model'] = model_class
                kwargs['integrate_acquisition_function'] = True

                model_kwargs['kernel'] = kernel

                n_mcmc_walkers = 3 * len(kernel.theta)
                if n_mcmc_walkers % 2 == 1:
                    n_mcmc_walkers += 1
                model_kwargs['n_mcmc_walkers'] = n_mcmc_walkers
                model_kwargs['chain_length'] = 250
                model_kwargs['burnin_steps'] = 250
                model_kwargs['normalize_y'] = True
                model_kwargs['seed'] = rng.randint(0, 2**20)
            else:
                raise ValueError('Unknown model type %s' % model_type)
            kwargs['model_kwargs'] = model_kwargs

        if kwargs.get('random_configuration_chooser') is None:
            random_config_chooser_kwargs = kwargs.get(
                'random_configuration_chooser_kwargs', dict())
            random_config_chooser_kwargs[
                'prob'] = random_config_chooser_kwargs.get(
                    'prob', 0.08447232371720552)
            kwargs[
                'random_configuration_chooser_kwargs'] = random_config_chooser_kwargs

        if kwargs.get('acquisition_function_optimizer') is None:
            acquisition_function_optimizer_kwargs = kwargs.get(
                'acquisition_function_optimizer_kwargs', dict())
            acquisition_function_optimizer_kwargs['n_sls_iterations'] = 10
            kwargs[
                'acquisition_function_optimizer_kwargs'] = acquisition_function_optimizer_kwargs

        # only 1 configuration per SMBO iteration
        intensifier_kwargs = kwargs.get('intensifier_kwargs', dict())
        intensifier_kwargs['min_chall'] = 1
        kwargs['intensifier_kwargs'] = intensifier_kwargs
        scenario.intensification_percentage = 1e-10

        super().__init__(**kwargs)

        if self.solver.scenario.n_features > 0:
            raise NotImplementedError("BOGP cannot handle instances")

        self.logger.info(self.__class__)

        self.solver.scenario.acq_opt_challengers = 1000
        # activate predict incumbent
        self.solver.predict_incumbent = True
Exemple #23
0
    def _get_mean_var_time(self, validator, traj, use_epm, rh):
        """
        Parameters
        ----------
        validator: Validator
            validator (smac-based)
        traj: List[Configuraton]
            trajectory to set in validator
        use_epm: bool
            validated or not (no need to use epm if validated)
        rh: RunHistory
            ??

        Returns
        -------
        mean, var

        times: List[float]
            times to plot (x-values)
        configs

        """
        # TODO kinda important: docstrings, what is this function doing?
        if validator:
            validator.traj = traj  # set trajectory
        time, configs = [], []

        if use_epm and not self.block_epm:
            for entry in traj:
                time.append(entry["wallclock_time"])
                configs.append(entry["incumbent"])
                # self.logger.debug('Time: %d Runs: %d', time[-1], len(rh.get_runs_for_config(configs[-1])))

            self.logger.debug(
                "Using %d samples (%d distinct) from trajectory.", len(time),
                len(set(configs)))

            # Initialize EPM
            if validator.epm:  # not log as validator epm is trained on cost, not log cost
                epm = validator.epm
            else:
                self.logger.debug(
                    "No EPM passed! Training new one from runhistory.")
                # Train random forest and transform training data (from given rh)
                # Not using validator because we want to plot uncertainties
                rh2epm = RunHistory2EPM4Cost(num_params=len(
                    self.scenario.cs.get_hyperparameters()),
                                             scenario=self.scenario)
                X, y = rh2epm.transform(rh)
                self.logger.debug(
                    "Training model with data of shape X: %s, y: %s",
                    str(X.shape), str(y.shape))

                types, bounds = get_types(self.scenario.cs,
                                          self.scenario.feature_array)
                epm = RandomForestWithInstances(
                    self.scenario.cs,
                    types=types,
                    bounds=bounds,
                    seed=self.rng.randint(MAXINT),
                    instance_features=self.scenario.feature_array,
                    ratio_features=1.0)
                epm.train(X, y)
            config_array = convert_configurations_to_array(configs)
            mean, var = epm.predict_marginalized_over_instances(config_array)
            var = np.zeros(mean.shape)
            # We don't want to show the uncertainty of the model but uncertainty over multiple optimizer runs
            # This variance is computed in an outer loop.
        else:
            mean, var = [], []
            for entry in traj:
                #self.logger.debug(entry)
                time.append(entry["wallclock_time"])
                configs.append(entry["incumbent"])
                costs = _cost(configs[-1], rh,
                              rh.get_runs_for_config(configs[-1]))
                # self.logger.debug(len(costs), time[-1]
                if not costs:
                    time.pop()
                else:
                    mean.append(np.mean(costs))
                    var.append(0)  # No variance over instances
            mean, var = np.array(mean).reshape(-1, 1), np.array(var).reshape(
                -1, 1)
        return mean, var, time, configs
Exemple #24
0
    def __init__(
            self,
            scenario: Scenario,
            # TODO: once we drop python3.4 add type hint
            # typing.Union[ExecuteTARun, callable]
            tae_runner=None,
            runhistory: RunHistory = None,
            intensifier: Intensifier = None,
            acquisition_function: AbstractAcquisitionFunction = None,
            model: AbstractEPM = None,
            runhistory2epm: AbstractRunHistory2EPM = None,
            initial_design: InitialDesign = None,
            initial_configurations: typing.List[Configuration] = None,
            stats: Stats = None,
            rng: np.random.RandomState = None,
            run_id: int = 1):
        """Constructor"""
        self.logger = logging.getLogger(self.__module__ + "." +
                                        self.__class__.__name__)

        aggregate_func = average_cost
        self.runhistory = None
        self.trajectory = None

        # initialize stats object
        if stats:
            self.stats = stats
        else:
            self.stats = Stats(scenario, file_system=scenario.file_system)

        self.output_dir = create_output_directory(scenario, run_id)
        scenario.write()

        # initialize empty runhistory
        if runhistory is None:
            runhistory = RunHistory(aggregate_func=aggregate_func,
                                    file_system=scenario.file_system)
        # inject aggr_func if necessary
        if runhistory.aggregate_func is None:
            runhistory.aggregate_func = aggregate_func

        # initial random number generator
        num_run, rng = self._get_rng(rng=rng)

        # reset random number generator in config space to draw different
        # random configurations with each seed given to SMAC
        scenario.cs.seed(rng.randint(MAXINT))

        # initial Trajectory Logger
        traj_logger = TrajLogger(output_dir=self.output_dir,
                                 stats=self.stats,
                                 file_system=scenario.file_system)

        # initial EPM
        types, bounds = get_types(scenario.cs, scenario.feature_array)
        if model is None:
            model = RandomForestWithInstances(
                configspace=scenario.cs,
                types=types,
                bounds=bounds,
                instance_features=scenario.feature_array,
                seed=rng.randint(MAXINT),
                pca_components=scenario.PCA_DIM,
                num_trees=scenario.rf_num_trees,
                do_bootstrapping=scenario.rf_do_bootstrapping,
                ratio_features=scenario.rf_ratio_features,
                min_samples_split=scenario.rf_min_samples_split,
                min_samples_leaf=scenario.rf_min_samples_leaf,
                max_depth=scenario.rf_max_depth,
            )
        # initial acquisition function
        if acquisition_function is None:
            if scenario.run_obj == "runtime":
                acquisition_function = LogEI(model=model)
            else:
                acquisition_function = EI(model=model)
        # inject model if necessary
        if acquisition_function.model is None:
            acquisition_function.model = model

        # initialize optimizer on acquisition function
        local_search = LocalSearch(
            acquisition_function,
            scenario.cs,
            max_steps=scenario.sls_max_steps,
            n_steps_plateau_walk=scenario.sls_n_steps_plateau_walk)

        # initialize tae_runner
        # First case, if tae_runner is None, the target algorithm is a call
        # string in the scenario file
        if tae_runner is None:
            tae_runner = ExecuteTARunOld(
                ta=scenario.ta,
                stats=self.stats,
                run_obj=scenario.run_obj,
                runhistory=runhistory,
                par_factor=scenario.par_factor,
                cost_for_crash=scenario.cost_for_crash)
        # Second case, the tae_runner is a function to be optimized
        elif callable(tae_runner):
            tae_runner = ExecuteTAFuncDict(
                ta=tae_runner,
                stats=self.stats,
                run_obj=scenario.run_obj,
                memory_limit=scenario.memory_limit,
                runhistory=runhistory,
                par_factor=scenario.par_factor,
                cost_for_crash=scenario.cost_for_crash)
        # Third case, if it is an ExecuteTaRun we can simply use the
        # instance. Otherwise, the next check raises an exception
        elif not isinstance(tae_runner, ExecuteTARun):
            raise TypeError("Argument 'tae_runner' is %s, but must be "
                            "either a callable or an instance of "
                            "ExecuteTaRun. Passing 'None' will result in the "
                            "creation of target algorithm runner based on the "
                            "call string in the scenario file." %
                            type(tae_runner))

        # Check that overall objective and tae objective are the same
        if tae_runner.run_obj != scenario.run_obj:
            raise ValueError("Objective for the target algorithm runner and "
                             "the scenario must be the same, but are '%s' and "
                             "'%s'" % (tae_runner.run_obj, scenario.run_obj))

        # inject stats if necessary
        if tae_runner.stats is None:
            tae_runner.stats = self.stats
        # inject runhistory if necessary
        if tae_runner.runhistory is None:
            tae_runner.runhistory = runhistory
        # inject cost_for_crash
        if tae_runner.crash_cost != scenario.cost_for_crash:
            tae_runner.crash_cost = scenario.cost_for_crash

        # initialize intensification
        if intensifier is None:
            intensifier = Intensifier(
                tae_runner=tae_runner,
                stats=self.stats,
                traj_logger=traj_logger,
                rng=rng,
                instances=scenario.train_insts,
                cutoff=scenario.cutoff,
                deterministic=scenario.deterministic,
                run_obj_time=scenario.run_obj == "runtime",
                always_race_against=scenario.cs.get_default_configuration()
                if scenario.always_race_default else None,
                instance_specifics=scenario.instance_specific,
                minR=scenario.minR,
                maxR=scenario.maxR,
                adaptive_capping_slackfactor=scenario.
                intens_adaptive_capping_slackfactor,
                min_chall=scenario.intens_min_chall)
        # inject deps if necessary
        if intensifier.tae_runner is None:
            intensifier.tae_runner = tae_runner
        if intensifier.stats is None:
            intensifier.stats = self.stats
        if intensifier.traj_logger is None:
            intensifier.traj_logger = traj_logger

        # initial design
        if initial_design is not None and initial_configurations is not None:
            raise ValueError(
                "Either use initial_design or initial_configurations; but not both"
            )

        if initial_configurations is not None:
            initial_design = InitialDesign(tae_runner=tae_runner,
                                           scenario=scenario,
                                           stats=self.stats,
                                           traj_logger=traj_logger,
                                           runhistory=runhistory,
                                           rng=rng,
                                           configs=initial_configurations,
                                           intensifier=intensifier,
                                           aggregate_func=aggregate_func)
        elif initial_design is None:
            if scenario.initial_incumbent == "DEFAULT":
                initial_design = DefaultConfiguration(
                    tae_runner=tae_runner,
                    scenario=scenario,
                    stats=self.stats,
                    traj_logger=traj_logger,
                    runhistory=runhistory,
                    rng=rng,
                    intensifier=intensifier,
                    aggregate_func=aggregate_func,
                    max_config_fracs=0.0)
            elif scenario.initial_incumbent == "RANDOM":
                initial_design = RandomConfigurations(
                    tae_runner=tae_runner,
                    scenario=scenario,
                    stats=self.stats,
                    traj_logger=traj_logger,
                    runhistory=runhistory,
                    rng=rng,
                    intensifier=intensifier,
                    aggregate_func=aggregate_func,
                    max_config_fracs=0.0)
            else:
                raise ValueError("Don't know what kind of initial_incumbent "
                                 "'%s' is" % scenario.initial_incumbent)
        # inject deps if necessary
        if initial_design.tae_runner is None:
            initial_design.tae_runner = tae_runner
        if initial_design.scenario is None:
            initial_design.scenario = scenario
        if initial_design.stats is None:
            initial_design.stats = self.stats
        if initial_design.traj_logger is None:
            initial_design.traj_logger = traj_logger

        # initial conversion of runhistory into EPM data
        if runhistory2epm is None:

            num_params = len(scenario.cs.get_hyperparameters())
            if scenario.run_obj == "runtime":

                # if we log the performance data,
                # the RFRImputator will already get
                # log transform data from the runhistory
                cutoff = np.log(scenario.cutoff)
                threshold = np.log(scenario.cutoff * scenario.par_factor)

                imputor = RFRImputator(rng=rng,
                                       cutoff=cutoff,
                                       threshold=threshold,
                                       model=model,
                                       change_threshold=0.01,
                                       max_iter=2)

                runhistory2epm = RunHistory2EPM4LogCost(
                    scenario=scenario,
                    num_params=num_params,
                    success_states=[
                        StatusType.SUCCESS,
                    ],
                    impute_censored_data=True,
                    impute_state=[
                        StatusType.CAPPED,
                    ],
                    imputor=imputor)

            elif scenario.run_obj == 'quality':
                runhistory2epm = RunHistory2EPM4Cost(
                    scenario=scenario,
                    num_params=num_params,
                    success_states=[
                        StatusType.SUCCESS,
                    ],
                    impute_censored_data=False,
                    impute_state=None)

            else:
                raise ValueError('Unknown run objective: %s. Should be either '
                                 'quality or runtime.' % self.scenario.run_obj)

        # inject scenario if necessary:
        if runhistory2epm.scenario is None:
            runhistory2epm.scenario = scenario

        self.solver = EPILS_Solver(scenario=scenario,
                                   stats=self.stats,
                                   initial_design=initial_design,
                                   runhistory=runhistory,
                                   runhistory2epm=runhistory2epm,
                                   intensifier=intensifier,
                                   aggregate_func=aggregate_func,
                                   num_run=num_run,
                                   model=model,
                                   acq_optimizer=local_search,
                                   acquisition_func=acquisition_function,
                                   rng=rng)
Exemple #25
0
    def __init__(
        self,
        scenario: Scenario,
        tae_runner: Optional[Union[Type[BaseRunner], Callable]] = None,
        tae_runner_kwargs: Optional[Dict] = None,
        runhistory: Optional[Union[Type[RunHistory], RunHistory]] = None,
        runhistory_kwargs: Optional[Dict] = None,
        intensifier: Optional[Type[AbstractRacer]] = None,
        intensifier_kwargs: Optional[Dict] = None,
        acquisition_function: Optional[
            Type[AbstractAcquisitionFunction]] = None,
        acquisition_function_kwargs: Optional[Dict] = None,
        integrate_acquisition_function: bool = False,
        acquisition_function_optimizer: Optional[
            Type[AcquisitionFunctionMaximizer]] = None,
        acquisition_function_optimizer_kwargs: Optional[Dict] = None,
        model: Optional[Type[AbstractEPM]] = None,
        model_kwargs: Optional[Dict] = None,
        runhistory2epm: Optional[Type[AbstractRunHistory2EPM]] = None,
        runhistory2epm_kwargs: Optional[Dict] = None,
        multi_objective_algorithm: Optional[
            Type[AbstractMultiObjectiveAlgorithm]] = None,
        multi_objective_kwargs: Optional[Dict] = None,
        initial_design: Optional[Type[InitialDesign]] = None,
        initial_design_kwargs: Optional[Dict] = None,
        initial_configurations: Optional[List[Configuration]] = None,
        stats: Optional[Stats] = None,
        restore_incumbent: Optional[Configuration] = None,
        rng: Optional[Union[np.random.RandomState, int]] = None,
        smbo_class: Optional[Type[SMBO]] = None,
        run_id: Optional[int] = None,
        random_configuration_chooser: Optional[
            Type[RandomConfigurationChooser]] = None,
        random_configuration_chooser_kwargs: Optional[Dict] = None,
        dask_client: Optional[dask.distributed.Client] = None,
        n_jobs: Optional[int] = 1,
    ):
        self.logger = logging.getLogger(self.__module__ + "." +
                                        self.__class__.__name__)

        self.scenario = scenario
        self.output_dir = ""
        if not restore_incumbent:
            # restore_incumbent is used by the CLI interface which provides a method for restoring a SMAC run given an
            # output directory. This is the default path.
            # initial random number generator
            run_id, rng = get_rng(rng=rng, run_id=run_id, logger=self.logger)
            self.output_dir = create_output_directory(scenario, run_id)
        elif scenario.output_dir is not None:  # type: ignore[attr-defined] # noqa F821
            run_id, rng = get_rng(rng=rng, run_id=run_id, logger=self.logger)
            # output-directory is created in CLI when restoring from a
            # folder. calling the function again in the facade results in two
            # folders being created: run_X and run_X.OLD. if we are
            # restoring, the output-folder exists already and we omit creating it,
            # but set the self-output_dir to the dir.
            # necessary because we want to write traj to new output-dir in CLI.
            self.output_dir = cast(str, scenario.output_dir_for_this_run
                                   )  # type: ignore[attr-defined] # noqa F821
        rng = cast(np.random.RandomState, rng)

        if (scenario.deterministic is
                True  # type: ignore[attr-defined] # noqa F821
                and getattr(scenario, "tuner_timeout", None) is None
                and scenario.run_obj ==
                "quality"  # type: ignore[attr-defined] # noqa F821
            ):
            self.logger.info(
                "Optimizing a deterministic scenario for quality without a tuner timeout - will make "
                "SMAC deterministic and only evaluate one configuration per iteration!"
            )
            scenario.intensification_percentage = 1e-10  # type: ignore[attr-defined] # noqa F821
            scenario.min_chall = 1  # type: ignore[attr-defined] # noqa F821

        scenario.write()

        # initialize stats object
        if stats:
            self.stats = stats
        else:
            self.stats = Stats(scenario)

        if self.scenario.run_obj == "runtime" and not self.scenario.transform_y == "LOG":  # type: ignore[attr-defined] # noqa F821
            self.logger.warning(
                "Runtime as objective automatically activates log(y) transformation"
            )
            self.scenario.transform_y = "LOG"  # type: ignore[attr-defined] # noqa F821

        # initialize empty runhistory
        num_obj = len(scenario.multi_objectives
                      )  # type: ignore[attr-defined] # noqa F821
        runhistory_def_kwargs = {}
        if runhistory_kwargs is not None:
            runhistory_def_kwargs.update(runhistory_kwargs)
        if runhistory is None:
            runhistory = RunHistory(**runhistory_def_kwargs)
        elif inspect.isclass(runhistory):
            runhistory = runhistory(
                **runhistory_def_kwargs)  # type: ignore[operator] # noqa F821
        elif isinstance(runhistory, RunHistory):
            pass
        else:
            raise ValueError(
                "runhistory has to be a class or an object of RunHistory")

        rand_conf_chooser_kwargs = {"rng": rng}
        if random_configuration_chooser_kwargs is not None:
            rand_conf_chooser_kwargs.update(
                random_configuration_chooser_kwargs)
        if random_configuration_chooser is None:
            if "prob" not in rand_conf_chooser_kwargs:
                rand_conf_chooser_kwargs[
                    "prob"] = scenario.rand_prob  # type: ignore[attr-defined] # noqa F821
            random_configuration_chooser_instance = ChooserProb(
                **
                rand_conf_chooser_kwargs  # type: ignore[arg-type] # noqa F821  # type: RandomConfigurationChooser
            )
        elif inspect.isclass(random_configuration_chooser):
            random_configuration_chooser_instance = random_configuration_chooser(  # type: ignore # noqa F821
                **
                rand_conf_chooser_kwargs  # type: ignore[arg-type] # noqa F821
            )
        elif not isinstance(random_configuration_chooser,
                            RandomConfigurationChooser):
            raise ValueError(
                "random_configuration_chooser has to be"
                " a class or object of RandomConfigurationChooser")

        # reset random number generator in config space to draw different
        # random configurations with each seed given to SMAC
        scenario.cs.seed(
            rng.randint(MAXINT))  # type: ignore[attr-defined] # noqa F821

        # initial Trajectory Logger
        traj_logger = TrajLogger(output_dir=self.output_dir, stats=self.stats)

        # initial EPM
        types, bounds = get_types(
            scenario.cs,
            scenario.feature_array)  # type: ignore[attr-defined] # noqa F821
        model_def_kwargs = {
            "types": types,
            "bounds": bounds,
            "instance_features": scenario.feature_array,
            "seed": rng.randint(MAXINT),
            "pca_components": scenario.PCA_DIM,
        }
        if model_kwargs is not None:
            model_def_kwargs.update(model_kwargs)
        if model is None:
            for key, value in {
                    "log_y": scenario.transform_y
                    in ["LOG",
                        "LOGS"],  # type: ignore[attr-defined] # noqa F821
                    "num_trees": scenario.
                    rf_num_trees,  # type: ignore[attr-defined] # noqa F821
                    "do_bootstrapping": scenario.
                    rf_do_bootstrapping,  # type: ignore[attr-defined] # noqa F821
                    "ratio_features": scenario.
                    rf_ratio_features,  # type: ignore[attr-defined] # noqa F821
                    "min_samples_split": scenario.
                    rf_min_samples_split,  # type: ignore[attr-defined] # noqa F821
                    "min_samples_leaf": scenario.
                    rf_min_samples_leaf,  # type: ignore[attr-defined] # noqa F821
                    "max_depth": scenario.
                    rf_max_depth,  # type: ignore[attr-defined] # noqa F821
            }.items():
                if key not in model_def_kwargs:
                    model_def_kwargs[key] = value
            model_def_kwargs[
                "configspace"] = self.scenario.cs  # type: ignore[attr-defined] # noqa F821
            model_instance = RandomForestWithInstances(
                **
                model_def_kwargs  # type: ignore[arg-type] # noqa F821  # type: AbstractEPM
            )
        elif inspect.isclass(model):
            model_def_kwargs[
                "configspace"] = self.scenario.cs  # type: ignore[attr-defined] # noqa F821
            model_instance = model(
                **model_def_kwargs)  # type: ignore # noqa F821
        else:
            raise TypeError("Model not recognized: %s" % (type(model)))

        # initial acquisition function
        acq_def_kwargs = {"model": model_instance}
        if acquisition_function_kwargs is not None:
            acq_def_kwargs.update(acquisition_function_kwargs)

        acquisition_function_instance = (
            None)  # type: Optional[AbstractAcquisitionFunction]
        if acquisition_function is None:
            if scenario.transform_y in [
                    "LOG", "LOGS"
            ]:  # type: ignore[attr-defined] # noqa F821
                acquisition_function_instance = LogEI(
                    **acq_def_kwargs  # type: ignore[arg-type] # noqa F821
                )
            else:
                acquisition_function_instance = EI(
                    **acq_def_kwargs  # type: ignore[arg-type] # noqa F821
                )
        elif inspect.isclass(acquisition_function):
            acquisition_function_instance = acquisition_function(
                **acq_def_kwargs)
        else:
            raise TypeError(
                "Argument acquisition_function must be None or an object implementing the "
                "AbstractAcquisitionFunction, not %s." %
                type(acquisition_function))
        if integrate_acquisition_function:
            acquisition_function_instance = IntegratedAcquisitionFunction(
                acquisition_function=
                acquisition_function_instance,  # type: ignore
                **acq_def_kwargs,
            )

        # initialize optimizer on acquisition function
        acq_func_opt_kwargs = {
            "acquisition_function": acquisition_function_instance,
            "config_space":
            scenario.cs,  # type: ignore[attr-defined] # noqa F821
            "rng": rng,
        }
        if acquisition_function_optimizer_kwargs is not None:
            acq_func_opt_kwargs.update(acquisition_function_optimizer_kwargs)
        if acquisition_function_optimizer is None:
            for key, value in {
                    "max_steps": scenario.
                    sls_max_steps,  # type: ignore[attr-defined] # noqa F821
                    "n_steps_plateau_walk": scenario.
                    sls_n_steps_plateau_walk,  # type: ignore[attr-defined] # noqa F821
            }.items():
                if key not in acq_func_opt_kwargs:
                    acq_func_opt_kwargs[key] = value
            acquisition_function_optimizer_instance = LocalAndSortedRandomSearch(
                **acq_func_opt_kwargs  # type: ignore
            )
        elif inspect.isclass(acquisition_function_optimizer):
            acquisition_function_optimizer_instance = acquisition_function_optimizer(  # type: ignore # noqa F821
                **acq_func_opt_kwargs)  # type: ignore # noqa F821
        else:
            raise TypeError(
                "Argument acquisition_function_optimizer must be None or an object implementing the "
                "AcquisitionFunctionMaximizer, but is '%s'" %
                type(acquisition_function_optimizer))

        # initialize tae_runner
        # First case, if tae_runner is None, the target algorithm is a call
        # string in the scenario file
        tae_def_kwargs = {
            "stats": self.stats,
            "run_obj": scenario.run_obj,
            "par_factor":
            scenario.par_factor,  # type: ignore[attr-defined] # noqa F821
            "cost_for_crash":
            scenario.cost_for_crash,  # type: ignore[attr-defined] # noqa F821
            "abort_on_first_run_crash": scenario.
            abort_on_first_run_crash,  # type: ignore[attr-defined] # noqa F821
            "multi_objectives": scenario.
            multi_objectives,  # type: ignore[attr-defined] # noqa F821
        }
        if tae_runner_kwargs is not None:
            tae_def_kwargs.update(tae_runner_kwargs)

        if "ta" not in tae_def_kwargs:
            tae_def_kwargs[
                "ta"] = scenario.ta  # type: ignore[attr-defined] # noqa F821
        if tae_runner is None:
            tae_def_kwargs[
                "ta"] = scenario.ta  # type: ignore[attr-defined] # noqa F821
            tae_runner_instance = ExecuteTARunOld(
                **tae_def_kwargs
            )  # type: ignore[arg-type] # noqa F821  # type: BaseRunner
        elif inspect.isclass(tae_runner):
            tae_runner_instance = cast(
                BaseRunner, tae_runner(**tae_def_kwargs))  # type: ignore
        elif callable(tae_runner):
            tae_def_kwargs["ta"] = tae_runner
            tae_def_kwargs[
                "use_pynisher"] = scenario.limit_resources  # type: ignore[attr-defined] # noqa F821
            tae_def_kwargs[
                "memory_limit"] = scenario.memory_limit  # type: ignore[attr-defined] # noqa F821
            tae_runner_instance = ExecuteTAFuncDict(
                **tae_def_kwargs)  # type: ignore
        else:
            raise TypeError(
                "Argument 'tae_runner' is %s, but must be "
                "either None, a callable or an object implementing "
                "BaseRunner. Passing 'None' will result in the "
                "creation of target algorithm runner based on the "
                "call string in the scenario file." % type(tae_runner))

        # In case of a parallel run, wrap the single worker in a parallel
        # runner
        if n_jobs is None or n_jobs == 1:
            _n_jobs = 1
        elif n_jobs == -1:
            _n_jobs = joblib.cpu_count()
        elif n_jobs > 0:
            _n_jobs = n_jobs
        else:
            raise ValueError(
                "Number of tasks must be positive, None or -1, but is %s" %
                str(n_jobs))
        if _n_jobs > 1 or dask_client is not None:
            tae_runner_instance = DaskParallelRunner(  # type: ignore
                tae_runner_instance,
                n_workers=_n_jobs,
                output_directory=self.output_dir,
                dask_client=dask_client,
            )

        # Check that overall objective and tae objective are the same
        # TODO: remove these two ignores once the scenario object knows all its attributes!
        if tae_runner_instance.run_obj != scenario.run_obj:  # type: ignore[union-attr] # noqa F821
            raise ValueError(
                "Objective for the target algorithm runner and "
                "the scenario must be the same, but are '%s' and "
                "'%s'" %
                (tae_runner_instance.run_obj,
                 scenario.run_obj))  # type: ignore[union-attr] # noqa F821

        if intensifier is None:
            intensifier = Intensifier

        if isinstance(intensifier, AbstractRacer):
            intensifier_instance = intensifier
        elif inspect.isclass(intensifier):
            # initialize intensification
            intensifier_def_kwargs = {
                "stats": self.stats,
                "traj_logger": traj_logger,
                "rng": rng,
                "instances":
                scenario.train_insts,  # type: ignore[attr-defined] # noqa F821
                "cutoff":
                scenario.cutoff,  # type: ignore[attr-defined] # noqa F821
                "deterministic": scenario.
                deterministic,  # type: ignore[attr-defined] # noqa F821
                "run_obj_time": scenario.run_obj ==
                "runtime",  # type: ignore[attr-defined] # noqa F821
                "instance_specifics": scenario.
                instance_specific,  # type: ignore[attr-defined] # noqa F821
                "adaptive_capping_slackfactor": scenario.
                intens_adaptive_capping_slackfactor,  # type: ignore[attr-defined] # noqa F821
                "min_chall": scenario.
                intens_min_chall,  # type: ignore[attr-defined] # noqa F821
            }

            if issubclass(intensifier, Intensifier):
                intensifier_def_kwargs[
                    "always_race_against"] = scenario.cs.get_default_configuration(
                    )  # type: ignore[attr-defined] # noqa F821
                intensifier_def_kwargs[
                    "use_ta_time_bound"] = scenario.use_ta_time  # type: ignore[attr-defined] # noqa F821
                intensifier_def_kwargs[
                    "minR"] = scenario.minR  # type: ignore[attr-defined] # noqa F821
                intensifier_def_kwargs[
                    "maxR"] = scenario.maxR  # type: ignore[attr-defined] # noqa F821

            if intensifier_kwargs is not None:
                intensifier_def_kwargs.update(intensifier_kwargs)

            intensifier_instance = intensifier(
                **intensifier_def_kwargs)  # type: ignore[arg-type] # noqa F821
        else:
            raise TypeError(
                "Argument intensifier must be None or an object implementing the AbstractRacer, but is '%s'"
                % type(intensifier))

        # initialize multi objective
        # the multi_objective_algorithm_instance will be passed to the runhistory2epm object
        multi_objective_algorithm_instance = (
            None)  # type: Optional[AbstractMultiObjectiveAlgorithm]

        if scenario.multi_objectives is not None and num_obj > 1:  # type: ignore[attr-defined] # noqa F821
            # define any defaults here
            _multi_objective_kwargs = {"rng": rng, "num_obj": num_obj}

            if multi_objective_kwargs is not None:
                _multi_objective_kwargs.update(multi_objective_kwargs)

            if multi_objective_algorithm is None:
                multi_objective_algorithm_instance = MeanAggregationStrategy(
                    **_multi_objective_kwargs
                )  # type: ignore[arg-type] # noqa F821
            elif inspect.isclass(multi_objective_algorithm):
                multi_objective_algorithm_instance = multi_objective_algorithm(
                    **_multi_objective_kwargs)
            else:
                raise TypeError(
                    "Multi-objective algorithm not recognized: %s" %
                    (type(multi_objective_algorithm)))

        # initial design
        if initial_design is not None and initial_configurations is not None:
            raise ValueError(
                "Either use initial_design or initial_configurations; but not both"
            )

        init_design_def_kwargs = {
            "cs": scenario.cs,  # type: ignore[attr-defined] # noqa F821
            "traj_logger": traj_logger,
            "rng": rng,
            "ta_run_limit":
            scenario.ta_run_limit,  # type: ignore[attr-defined] # noqa F821
            "configs": initial_configurations,
            "n_configs_x_params": 0,
            "max_config_fracs": 0.0,
        }

        if initial_design_kwargs is not None:
            init_design_def_kwargs.update(initial_design_kwargs)
        if initial_configurations is not None:
            initial_design_instance = InitialDesign(**init_design_def_kwargs)
        elif initial_design is None:
            if scenario.initial_incumbent == "DEFAULT":  # type: ignore[attr-defined] # noqa F821
                init_design_def_kwargs["max_config_fracs"] = 0.0
                initial_design_instance = DefaultConfiguration(
                    **init_design_def_kwargs)
            elif scenario.initial_incumbent == "RANDOM":  # type: ignore[attr-defined] # noqa F821
                init_design_def_kwargs["max_config_fracs"] = 0.0
                initial_design_instance = RandomConfigurations(
                    **init_design_def_kwargs)
            elif scenario.initial_incumbent == "LHD":  # type: ignore[attr-defined] # noqa F821
                initial_design_instance = LHDesign(**init_design_def_kwargs)
            elif scenario.initial_incumbent == "FACTORIAL":  # type: ignore[attr-defined] # noqa F821
                initial_design_instance = FactorialInitialDesign(
                    **init_design_def_kwargs)
            elif scenario.initial_incumbent == "SOBOL":  # type: ignore[attr-defined] # noqa F821
                initial_design_instance = SobolDesign(**init_design_def_kwargs)
            else:
                raise ValueError("Don't know what kind of initial_incumbent "
                                 "'%s' is" %
                                 scenario.initial_incumbent  # type: ignore
                                 )  # type: ignore[attr-defined] # noqa F821
        elif inspect.isclass(initial_design):
            initial_design_instance = initial_design(**init_design_def_kwargs)
        else:
            raise TypeError(
                "Argument initial_design must be None or an object implementing the InitialDesign, but is '%s'"
                % type(initial_design))

        # if we log the performance data,
        # the RFRImputator will already get
        # log transform data from the runhistory
        if scenario.transform_y in [
                "LOG", "LOGS"
        ]:  # type: ignore[attr-defined] # noqa F821
            cutoff = np.log(np.nanmin([
                np.inf, np.float_(scenario.cutoff)
            ]))  # type: ignore[attr-defined] # noqa F821
            threshold = cutoff + np.log(
                scenario.par_factor)  # type: ignore[attr-defined] # noqa F821
        else:
            cutoff = np.nanmin([np.inf, np.float_(scenario.cutoff)
                                ])  # type: ignore[attr-defined] # noqa F821
            threshold = cutoff * scenario.par_factor  # type: ignore[attr-defined] # noqa F821

        num_params = len(scenario.cs.get_hyperparameters()
                         )  # type: ignore[attr-defined] # noqa F821
        imputor = RFRImputator(
            rng=rng,
            cutoff=cutoff,
            threshold=threshold,
            model=model_instance,
            change_threshold=0.01,
            max_iter=2,
        )

        r2e_def_kwargs = {
            "scenario": scenario,
            "num_params": num_params,
            "success_states": [
                StatusType.SUCCESS,
            ],
            "impute_censored_data": True,
            "impute_state": [
                StatusType.CAPPED,
            ],
            "imputor": imputor,
            "scale_perc": 5,
        }

        # TODO: consider other sorts of multi-objective algorithms
        if isinstance(multi_objective_algorithm_instance, AggregationStrategy):
            r2e_def_kwargs.update({
                "multi_objective_algorithm":
                multi_objective_algorithm_instance
            })

        if scenario.run_obj == "quality":
            r2e_def_kwargs.update({
                "success_states": [
                    StatusType.SUCCESS,
                    StatusType.CRASHED,
                    StatusType.MEMOUT,
                ],
                "impute_censored_data":
                False,
                "impute_state":
                None,
            })

        if (isinstance(intensifier_instance, (SuccessiveHalving, Hyperband))
                and scenario.run_obj == "quality"):
            r2e_def_kwargs.update({
                "success_states": [
                    StatusType.SUCCESS,
                    StatusType.CRASHED,
                    StatusType.MEMOUT,
                    StatusType.DONOTADVANCE,
                ],
                "consider_for_higher_budgets_state": [
                    StatusType.DONOTADVANCE,
                    StatusType.TIMEOUT,
                    StatusType.CRASHED,
                    StatusType.MEMOUT,
                ],
            })

        if runhistory2epm_kwargs is not None:
            r2e_def_kwargs.update(runhistory2epm_kwargs)
        if runhistory2epm is None:
            if scenario.run_obj == "runtime":
                rh2epm = RunHistory2EPM4LogCost(
                    **r2e_def_kwargs  # type: ignore
                )  # type: ignore[arg-type] # noqa F821  # type: AbstractRunHistory2EPM
            elif scenario.run_obj == "quality":
                if scenario.transform_y == "NONE":  # type: ignore[attr-defined] # noqa F821
                    rh2epm = RunHistory2EPM4Cost(
                        **r2e_def_kwargs)  # type: ignore # noqa F821
                elif scenario.transform_y == "LOG":  # type: ignore[attr-defined] # noqa F821
                    rh2epm = RunHistory2EPM4LogCost(
                        **r2e_def_kwargs)  # type: ignore # noqa F821
                elif scenario.transform_y == "LOGS":  # type: ignore[attr-defined] # noqa F821
                    rh2epm = RunHistory2EPM4LogScaledCost(
                        **r2e_def_kwargs)  # type: ignore # noqa F821
                elif scenario.transform_y == "INVS":  # type: ignore[attr-defined] # noqa F821
                    rh2epm = RunHistory2EPM4InvScaledCost(
                        **r2e_def_kwargs)  # type: ignore # noqa F821
            else:
                raise ValueError(
                    "Unknown run objective: %s. Should be either "
                    "quality or runtime." %
                    self.scenario.run_obj  # type: ignore # noqa F821
                )
        elif inspect.isclass(runhistory2epm):
            rh2epm = runhistory2epm(**
                                    r2e_def_kwargs)  # type: ignore # noqa F821
        else:
            raise TypeError(
                "Argument runhistory2epm must be None or an object implementing the RunHistory2EPM, but is '%s'"
                % type(runhistory2epm))

        smbo_args = {
            "scenario": scenario,
            "stats": self.stats,
            "initial_design": initial_design_instance,
            "runhistory": runhistory,
            "runhistory2epm": rh2epm,
            "intensifier": intensifier_instance,
            "num_run": run_id,
            "model": model_instance,
            "acq_optimizer": acquisition_function_optimizer_instance,
            "acquisition_func": acquisition_function_instance,
            "rng": rng,
            "restore_incumbent": restore_incumbent,
            "random_configuration_chooser":
            random_configuration_chooser_instance,
            "tae_runner": tae_runner_instance,
        }  # type: Dict[str, Any]

        if smbo_class is None:
            self.solver = SMBO(**
                               smbo_args)  # type: ignore[arg-type] # noqa F821
        else:
            self.solver = smbo_class(
                **smbo_args)  # type: ignore[arg-type] # noqa F821
Exemple #26
0
    def _component_builder(self, conf:typing.Union[Configuration, dict]) \
        -> typing.Tuple[AbstractAcquisitionFunction, AbstractEPM]:
        """
            builds new Acquisition function object
            and EPM object and returns these

            Parameters
            ----------
            conf: typing.Union[Configuration, dict]
                configuration specificing "model" and "acq_func"

            Returns
            -------
            typing.Tuple[AbstractAcquisitionFunction, AbstractEPM]

        """
        types, bounds = get_types(
            self.config_space, instance_features=self.scenario.feature_array)

        if conf["model"] == "RF":
            model = RandomForestWithInstances(
                configspace=self.config_space,
                types=types,
                bounds=bounds,
                instance_features=self.scenario.feature_array,
                seed=self.rng.randint(MAXINT),
                pca_components=conf.get("pca_dim", self.scenario.PCA_DIM),
                log_y=conf.get("log_y", self.scenario.transform_y
                               in ["LOG", "LOGS"]),
                num_trees=conf.get("num_trees", self.scenario.rf_num_trees),
                do_bootstrapping=conf.get("do_bootstrapping",
                                          self.scenario.rf_do_bootstrapping),
                ratio_features=conf.get("ratio_features",
                                        self.scenario.rf_ratio_features),
                min_samples_split=conf.get("min_samples_split",
                                           self.scenario.rf_min_samples_split),
                min_samples_leaf=conf.get("min_samples_leaf",
                                          self.scenario.rf_min_samples_leaf),
                max_depth=conf.get("max_depth", self.scenario.rf_max_depth),
            )

        elif conf["model"] == "GP":
            from smac.epm.gp_kernels import ConstantKernel, HammingKernel, WhiteKernel, Matern

            cov_amp = ConstantKernel(
                2.0,
                constant_value_bounds=(np.exp(-10), np.exp(2)),
                prior=LognormalPrior(mean=0.0, sigma=1.0, rng=self.rng),
            )

            cont_dims = np.nonzero(types == 0)[0]
            cat_dims = np.nonzero(types != 0)[0]

            if len(cont_dims) > 0:
                exp_kernel = Matern(
                    np.ones([len(cont_dims)]),
                    [(np.exp(-10), np.exp(2)) for _ in range(len(cont_dims))],
                    nu=2.5,
                    operate_on=cont_dims,
                )

            if len(cat_dims) > 0:
                ham_kernel = HammingKernel(
                    np.ones([len(cat_dims)]),
                    [(np.exp(-10), np.exp(2)) for _ in range(len(cat_dims))],
                    operate_on=cat_dims,
                )
            noise_kernel = WhiteKernel(
                noise_level=1e-8,
                noise_level_bounds=(np.exp(-25), np.exp(2)),
                prior=HorseshoePrior(scale=0.1, rng=self.rng),
            )

            if len(cont_dims) > 0 and len(cat_dims) > 0:
                # both
                kernel = cov_amp * (exp_kernel * ham_kernel) + noise_kernel
            elif len(cont_dims) > 0 and len(cat_dims) == 0:
                # only cont
                kernel = cov_amp * exp_kernel + noise_kernel
            elif len(cont_dims) == 0 and len(cat_dims) > 0:
                # only cont
                kernel = cov_amp * ham_kernel + noise_kernel
            else:
                raise ValueError()

            n_mcmc_walkers = 3 * len(kernel.theta)
            if n_mcmc_walkers % 2 == 1:
                n_mcmc_walkers += 1

            model = GaussianProcessMCMC(
                self.config_space,
                types=types,
                bounds=bounds,
                kernel=kernel,
                n_mcmc_walkers=n_mcmc_walkers,
                chain_length=250,
                burnin_steps=250,
                normalize_y=True,
                seed=self.rng.randint(low=0, high=10000),
            )

        if conf["acq_func"] == "EI":
            acq = EI(model=model, par=conf.get("par_ei", 0))
        elif conf["acq_func"] == "LCB":
            acq = LCB(model=model, par=conf.get("par_lcb", 0))
        elif conf["acq_func"] == "PI":
            acq = PI(model=model, par=conf.get("par_pi", 0))
        elif conf["acq_func"] == "LogEI":
            # par value should be in log-space
            acq = LogEI(model=model, par=conf.get("par_logei", 0))

        return acq, model