Esempio n. 1
0
 def test_uniformfloat_to_integer(self):
     f1 = UniformFloatHyperparameter("param", 1, 10, q=0.1, log=True)
     with pytest.warns(UserWarning, match="Setting quantization < 1 for Integer "
                                          "Hyperparameter 'param' has no effect"):
         f2 = f1.to_integer()
     # TODO is this a useful rounding?
     # TODO should there be any rounding, if e.g. lower=0.1
     self.assertEqual("param, Type: UniformInteger, Range: [1, 10], "
                      "Default: 3, on log-scale", str(f2))
 def test_uniformfloat_to_integer(self):
     f1 = UniformFloatHyperparameter("param", 1, 10, q=0.1, log=True)
     with warnings.catch_warnings():
         f2 = f1.to_integer()
         warnings.simplefilter("ignore")
         # TODO is this a useful rounding?
         # TODO should there be any rounding, if e.g. lower=0.1
         self.assertEqual("param, Type: UniformInteger, Range: [1, 10], "
                          "Default: 3, on log-scale", str(f2))
Esempio n. 3
0
    def test_log_space_conversion(self):
        lower, upper = 1e-5, 1e5
        hyper = UniformFloatHyperparameter('test', lower=lower, upper=upper, log=True)
        self.assertTrue(hyper.is_legal(hyper._transform(1.)))

        lower, upper = 1e-10, 1e10
        hyper = UniformFloatHyperparameter('test', lower=lower, upper=upper, log=True)
        self.assertTrue(hyper.is_legal(hyper._transform(1.)))
Esempio n. 4
0
def NB_from_cfg(params):

    clf = nb(**params)
    
    return 1 - cross_val_score(clf, X, y, cv=5).mean()

#logger = logging.getLogger("SVMExample")
logging.basicConfig(level=logging.INFO)  # logging.DEBUG for debug output

# Build Configuration Space which defines all parameters and their ranges
cs = ConfigurationSpace()

# We define a few possible types of SVM-kernels and add them as "kernel" to our cs

aplha = UniformFloatHyperparameter("alpha", 0.0, 5.0, default_value=1.0)

fit_prior = CategoricalHyperparameter("fit_prior", [True, False], default_value=True)

cs.add_hyperparameters([aplha, fit_prior])

# Scenario object
scenario = Scenario({"run_obj": "quality",   # we optimize quality (alternatively runtime)
                     "runcount-limit": 500,   # max. number of function evaluations; for this example set to a low number
                     "cs": cs,               # configuration space
                     "deterministic": "true"
                     })

# Example call of the function
# It returns: Status, Cost, Runtime, Additional Infos
def_value = NB_from_cfg(cs.get_default_configuration())
Esempio n. 5
0
    def test_uniformfloat_is_legal(self):
        lower = 0.1
        upper = 10
        f1 = UniformFloatHyperparameter("param", lower, upper, q=0.1, log=True)

        self.assertTrue(f1.is_legal(3.0))
        self.assertTrue(f1.is_legal(3))
        self.assertFalse(f1.is_legal(-0.1))
        self.assertFalse(f1.is_legal(10.1))
        self.assertFalse(f1.is_legal("AAA"))
        self.assertFalse(f1.is_legal(dict()))

        # Test legal vector values
        self.assertTrue(f1.is_legal_vector(1.0))
        self.assertTrue(f1.is_legal_vector(0.0))
        self.assertTrue(f1.is_legal_vector(0))
        self.assertTrue(f1.is_legal_vector(0.3))
        self.assertFalse(f1.is_legal_vector(-0.1))
        self.assertFalse(f1.is_legal_vector(1.1))
        self.assertRaises(TypeError, f1.is_legal_vector, "Hahaha")
        # get instance
        data, target = generate_instances(int(instance[0]), int(instance[1]))

        cv = StratifiedKFold(n_splits=4, random_state=seed, shuffle=True)  # to make CV splits consistent
        scores = cross_val_score(clf, data, target, cv=cv)

    return 1 - np.mean(scores)


if __name__ == "__main__":
    # Build Configuration Space which defines all parameters and their ranges
    cs = ConfigurationSpace()

    # We define a few possible parameters for the SGD classifier
    alpha = UniformFloatHyperparameter(
        "alpha", 0, 1, default_value=1.0)
    l1_ratio = UniformFloatHyperparameter(
        "l1_ratio", 0, 1, default_value=0.5)
    learning_rate = CategoricalHyperparameter(
        "learning_rate", choices=['constant', 'invscaling', 'adaptive'], default_value='constant')
    eta0 = UniformFloatHyperparameter(
        "eta0", 0.00001, 1, default_value=0.1, log=True)
    # Add the parameters to configuration space
    cs.add_hyperparameters([alpha, l1_ratio, learning_rate, eta0])

    # SMAC scenario object
    scenario = Scenario({
        "run_obj": "quality",  # we optimize quality (alternative to runtime)
        "wallclock-limit": 100,  # max duration to run the optimization (in seconds)
        "cs": cs,  # configuration space
        "deterministic": True,
Esempio n. 7
0
kf = KFold(X.shape[0], n_folds=4)

# build Configuration Space which defines all parameters and their ranges
# to illustrate different parameter types,
# we use continuous, integer and categorical parameters
cs = ConfigurationSpace()
do_bootstrapping = CategoricalHyperparameter("do_bootstrapping",
                                             ["true", "false"],
                                             default="true")
cs.add_hyperparameter(do_bootstrapping)

num_trees = UniformIntegerHyperparameter("num_trees", 10, 50, default=10)
cs.add_hyperparameter(num_trees)

frac_points_per_tree = UniformFloatHyperparameter("frac_points_per_tree",
                                                  0.001,
                                                  1,
                                                  default=1)
cs.add_hyperparameter(frac_points_per_tree)

ratio_features = UniformFloatHyperparameter("ratio_features",
                                            0.001,
                                            1,
                                            default=1)
cs.add_hyperparameter(ratio_features)

min_samples_to_split = UniformIntegerHyperparameter("min_samples_to_split",
                                                    2,
                                                    20,
                                                    default=2)
cs.add_hyperparameter(min_samples_to_split)
Esempio n. 8
0
    def get_hyperparameter_search_space(dataset_properties=None):
        cs = ConfigurationSpace()
        loss = CategoricalHyperparameter("loss", ["least_squares"],
                                         default_value="least_squares")
        learning_rate = UniformFloatHyperparameter(name="learning_rate",
                                                   lower=0.01,
                                                   upper=1,
                                                   default_value=0.1,
                                                   log=True)
        max_iter = UniformIntegerHyperparameter("max_iter",
                                                32,
                                                512,
                                                default_value=100)
        min_samples_leaf = UniformIntegerHyperparameter(
            name="min_samples_leaf",
            lower=1,
            upper=200,
            default_value=20,
            log=True)
        max_depth = UnParametrizedHyperparameter(name="max_depth",
                                                 value="None")
        max_leaf_nodes = UniformIntegerHyperparameter(name="max_leaf_nodes",
                                                      lower=3,
                                                      upper=2047,
                                                      default_value=31,
                                                      log=True)
        max_bins = Constant("max_bins", 256)
        l2_regularization = UniformFloatHyperparameter(
            name="l2_regularization",
            lower=1E-10,
            upper=1,
            default_value=1E-10,
            log=True)
        early_stop = CategoricalHyperparameter(
            name="early_stop",
            choices=["off", "train", "valid"],
            default_value="off")
        tol = UnParametrizedHyperparameter(name="tol", value=1e-7)
        scoring = UnParametrizedHyperparameter(name="scoring", value="loss")
        n_iter_no_change = UniformIntegerHyperparameter(
            name="n_iter_no_change", lower=1, upper=20, default_value=10)
        validation_fraction = UniformFloatHyperparameter(
            name="validation_fraction",
            lower=0.01,
            upper=0.4,
            default_value=0.1)

        cs.add_hyperparameters([
            loss, learning_rate, max_iter, min_samples_leaf, max_depth,
            max_leaf_nodes, max_bins, l2_regularization, early_stop, tol,
            scoring, n_iter_no_change, validation_fraction
        ])

        n_iter_no_change_cond = InCondition(n_iter_no_change, early_stop,
                                            ["valid", "train"])
        validation_fraction_cond = EqualsCondition(validation_fraction,
                                                   early_stop, "valid")

        cs.add_conditions([n_iter_no_change_cond, validation_fraction_cond])

        return cs
Esempio n. 9
0
 def get_configspace():
     cs = ConfigurationSpace()
     x1 = UniformFloatHyperparameter("x1", -5, 10, default_value=0)
     x2 = UniformFloatHyperparameter("x2", 0, 15, default_value=0)
     cs.add_hyperparameters([x1, x2])
     return cs
Esempio n. 10
0
    def get_hyperparameter_search_space(dataset_properties: Optional[Dict] = None,
                                        min_num_gropus: int = 1,
                                        max_num_groups: int = 9,
                                        min_blocks_per_groups: int = 1,
                                        max_blocks_per_groups: int = 4,
                                        min_num_units: int = 10,
                                        max_num_units: int = 1024,
                                        ) -> ConfigurationSpace:
        cs = ConfigurationSpace()

        # The number of groups that will compose the resnet. That is,
        # a group can have N Resblock. The M number of this N resblock
        # repetitions is num_groups
        num_groups = UniformIntegerHyperparameter(
            "num_groups", lower=min_num_gropus, upper=max_num_groups, default_value=5)

        activation = CategoricalHyperparameter(
            "activation", choices=list(_activations.keys())
        )
        cs.add_hyperparameters([num_groups, activation])

        # We can have dropout in the network for
        # better generalization
        use_dropout = CategoricalHyperparameter(
            "use_dropout", choices=[True, False])
        cs.add_hyperparameters([use_dropout])

        use_shake_shake = CategoricalHyperparameter("use_shake_shake", choices=[True, False])
        use_shake_drop = CategoricalHyperparameter("use_shake_drop", choices=[True, False])
        shake_drop_prob = UniformFloatHyperparameter(
            "max_shake_drop_probability", lower=0.0, upper=1.0)
        cs.add_hyperparameters([use_shake_shake, use_shake_drop, shake_drop_prob])
        cs.add_condition(CS.EqualsCondition(shake_drop_prob, use_shake_drop, True))

        # It is the upper bound of the nr of groups,
        # since the configuration will actually be sampled.
        for i in range(0, max_num_groups + 1):

            n_units = UniformIntegerHyperparameter(
                "num_units_%d" % i,
                lower=min_num_units,
                upper=max_num_units,
            )
            blocks_per_group = UniformIntegerHyperparameter(
                "blocks_per_group_%d" % i, lower=min_blocks_per_groups,
                upper=max_blocks_per_groups)

            cs.add_hyperparameters([n_units, blocks_per_group])

            if i > 1:
                cs.add_condition(CS.GreaterThanCondition(n_units, num_groups, i - 1))
                cs.add_condition(CS.GreaterThanCondition(blocks_per_group, num_groups, i - 1))

            this_dropout = UniformFloatHyperparameter(
                "dropout_%d" % i, lower=0.0, upper=1.0
            )
            cs.add_hyperparameters([this_dropout])

            dropout_condition_1 = CS.EqualsCondition(this_dropout, use_dropout, True)

            if i > 1:

                dropout_condition_2 = CS.GreaterThanCondition(this_dropout, num_groups, i - 1)

                cs.add_condition(CS.AndConjunction(dropout_condition_1, dropout_condition_2))
            else:
                cs.add_condition(dropout_condition_1)
        return cs
Esempio n. 11
0
    def get_hyperparameter_search_space(dataset_properties=None):
        cs = ConfigurationSpace()

        loss = CategoricalHyperparameter("loss", [
            "squared_loss", "huber", "epsilon_insensitive",
            "squared_epsilon_insensitive"
        ],
                                         default="squared_loss")
        penalty = CategoricalHyperparameter("penalty",
                                            ["l1", "l2", "elasticnet"],
                                            default="l2")
        alpha = UniformFloatHyperparameter("alpha",
                                           10e-7,
                                           1e-1,
                                           log=True,
                                           default=0.01)
        l1_ratio = UniformFloatHyperparameter("l1_ratio",
                                              1e-9,
                                              1.,
                                              log=True,
                                              default=0.15)
        fit_intercept = UnParametrizedHyperparameter("fit_intercept", "True")
        n_iter = UniformIntegerHyperparameter("n_iter",
                                              5,
                                              1000,
                                              log=True,
                                              default=20)
        epsilon = UniformFloatHyperparameter("epsilon",
                                             1e-5,
                                             1e-1,
                                             default=1e-4,
                                             log=True)
        learning_rate = CategoricalHyperparameter(
            "learning_rate", ["optimal", "invscaling", "constant"],
            default="optimal")
        eta0 = UniformFloatHyperparameter("eta0", 10**-7, 0.1, default=0.01)
        power_t = UniformFloatHyperparameter("power_t", 1e-5, 1, default=0.5)
        average = CategoricalHyperparameter("average", ["False", "True"],
                                            default="False")

        cs.add_hyperparameters([
            loss, penalty, alpha, l1_ratio, fit_intercept, n_iter, epsilon,
            learning_rate, eta0, power_t, average
        ])

        # TODO add passive/aggressive here, although not properly documented?
        elasticnet = EqualsCondition(l1_ratio, penalty, "elasticnet")
        epsilon_condition = InCondition(
            epsilon, loss,
            ["huber", "epsilon_insensitive", "squared_epsilon_insensitive"])
        # eta0 seems to be always active according to the source code; when
        # learning_rate is set to optimial, eta0 is the starting value:
        # https://github.com/scikit-learn/scikit-learn/blob/0.15.X/sklearn/linear_model/sgd_fast.pyx
        # eta0_and_inv = EqualsCondition(eta0, learning_rate, "invscaling")
        #eta0_and_constant = EqualsCondition(eta0, learning_rate, "constant")
        #eta0_condition = OrConjunction(eta0_and_inv, eta0_and_constant)
        power_t_condition = EqualsCondition(power_t, learning_rate,
                                            "invscaling")

        cs.add_conditions([elasticnet, epsilon_condition, power_t_condition])

        return cs
Esempio n. 12
0
 def test_normalfloat_to_uniformfloat(self):
     f1 = NormalFloatHyperparameter("param", 0, 10, q=0.1)
     f1_expected = UniformFloatHyperparameter("param", -30, 30, q=0.1)
     f1_actual = f1.to_uniform()
     self.assertEqual(f1_expected, f1_actual)
Esempio n. 13
0
    def test_uniformfloat(self):
        # TODO test non-equality
        # TODO test sampling from a log-distribution which has a negative
        # lower value!
        f1 = UniformFloatHyperparameter("param", 0, 10)
        f1_ = UniformFloatHyperparameter("param", 0, 10)
        self.assertEqual(f1, f1_)
        self.assertEqual(
            "param, Type: UniformFloat, Range: [0.0, 10.0], "
            "Default: 5.0", str(f1))

        # Test attributes are accessible
        self.assertEqual(f1.name, "param")
        self.assertAlmostEqual(f1.lower, 0.0)
        self.assertAlmostEqual(f1.upper, 10.0)
        self.assertEqual(f1.q, None)
        self.assertEqual(f1.log, False)
        self.assertAlmostEqual(f1.default_value, 5.0)
        self.assertAlmostEqual(f1.normalized_default_value, 0.5)

        f2 = UniformFloatHyperparameter("param", 0, 10, q=0.1)
        f2_ = UniformFloatHyperparameter("param", 0, 10, q=0.1)
        self.assertEqual(f2, f2_)
        self.assertEqual(
            "param, Type: UniformFloat, Range: [0.0, 10.0], "
            "Default: 5.0, Q: 0.1", str(f2))

        f3 = UniformFloatHyperparameter("param", 0.00001, 10, log=True)
        f3_ = UniformFloatHyperparameter("param", 0.00001, 10, log=True)
        self.assertEqual(f3, f3_)
        self.assertEqual(
            "param, Type: UniformFloat, Range: [1e-05, 10.0], Default: 0.01, "
            "on log-scale", str(f3))

        f4 = UniformFloatHyperparameter("param", 0, 10, default_value=1.0)
        f4_ = UniformFloatHyperparameter("param", 0, 10, default_value=1.0)
        # Test that a int default is converted to float
        f4__ = UniformFloatHyperparameter("param", 0, 10, default_value=1)
        self.assertEqual(f4, f4_)
        self.assertEqual(type(f4.default_value), type(f4__.default_value))
        self.assertEqual(
            "param, Type: UniformFloat, Range: [0.0, 10.0], Default: 1.0",
            str(f4))

        f5 = UniformFloatHyperparameter("param",
                                        0.1,
                                        10,
                                        q=0.1,
                                        log=True,
                                        default_value=1.0)
        f5_ = UniformFloatHyperparameter("param",
                                         0.1,
                                         10,
                                         q=0.1,
                                         log=True,
                                         default_value=1.0)
        self.assertEqual(f5, f5_)
        self.assertEqual(
            "param, Type: UniformFloat, Range: [0.1, 10.0], Default: 1.0, "
            "on log-scale, Q: 0.1", str(f5))

        self.assertNotEqual(f1, f2)
        self.assertNotEqual(f1, "UniformFloat")

        # test that meta-data is stored correctly
        f_meta = UniformFloatHyperparameter("param",
                                            0.1,
                                            10,
                                            q=0.1,
                                            log=True,
                                            default_value=1.0,
                                            meta=dict(self.meta_data))
        self.assertEqual(f_meta.meta, self.meta_data)
Esempio n. 14
0
    def test_uniformfloat_is_legal(self):
        lower = 0.1
        upper = 10
        f1 = UniformFloatHyperparameter("param", lower, upper, q=0.1, log=True)

        self.assertTrue(f1.is_legal(3.0))
        self.assertTrue(f1.is_legal(3))
        self.assertFalse(f1.is_legal(-0.1))
        self.assertFalse(f1.is_legal(10.1))
        self.assertFalse(f1.is_legal("AAA"))
        self.assertFalse(f1.is_legal(dict()))

        # Test legal vector values
        self.assertTrue(f1.is_legal_vector(1.0))
        self.assertTrue(f1.is_legal_vector(0.0))
        self.assertTrue(f1.is_legal_vector(0))
        self.assertTrue(f1.is_legal_vector(0.3))
        self.assertFalse(f1.is_legal_vector(-0.1))
        self.assertFalse(f1.is_legal_vector(1.1))
        self.assertRaises(TypeError, f1.is_legal_vector, "Hahaha")
Esempio n. 15
0
        def test_photon_implementation_simple(self):
            # PHOTON implementation
            self.pipe.add(PipelineElement('StandardScaler'))
            self.pipe += PipelineElement(
                'PCA', hyperparameters={'n_components': IntegerRange(5, 30)})
            self.pipe += PipelineElement('SVC',
                                         hyperparameters={
                                             'kernel':
                                             Categorical(["rbf", 'poly']),
                                             'C': FloatRange(0.5, 200)
                                         },
                                         gamma='auto')
            self.X, self.y = self.simple_classification()
            self.pipe.fit(self.X, self.y)

            # direct AUTO ML implementation
            # Build Configuration Space which defines all parameters and their ranges
            cs = ConfigurationSpace()
            n_components = UniformIntegerHyperparameter(
                "PCA__n_components", 5, 30)
            cs.add_hyperparameter(n_components)
            kernel = CategoricalHyperparameter("SVC__kernel", ["rbf", 'poly'])
            cs.add_hyperparameter(kernel)
            c = UniformFloatHyperparameter("SVC__C", 0.5, 200)
            cs.add_hyperparameter(c)

            # Scenario object
            scenario = Scenario({
                "run_obj": "quality",
                "cs": cs,
                "deterministic": "true",
                "wallclock_limit": self.time_limit,
                "limit_resources": False,
                'abort_on_first_run_crash': False
            })

            # Optimize, using a SMAC directly
            smac = SMAC4HPO(scenario=scenario,
                            rng=42,
                            tae_runner=self.objective_function_simple)
            _ = smac.optimize()

            runhistory_photon = self.smac_helper["data"].solver.runhistory
            runhistory_original = smac.solver.runhistory

            x_ax = range(
                1,
                min(len(runhistory_original._cost_per_config.keys()),
                    len(runhistory_photon._cost_per_config.keys())) + 1)
            y_ax_original = [
                runhistory_original._cost_per_config[tmp] for tmp in x_ax
            ]
            y_ax_photon = [
                runhistory_photon._cost_per_config[tmp] for tmp in x_ax
            ]

            y_ax_original_inc = [min(y_ax_original[:tmp + 1]) for tmp in x_ax]
            y_ax_photon_inc = [min(y_ax_photon[:tmp + 1]) for tmp in x_ax]

            plot = False
            if plot:
                plt.figure(figsize=(10, 7))
                plt.plot(x_ax, y_ax_original, 'g', label='Original')
                plt.plot(x_ax, y_ax_photon, 'b', label='PHOTON')
                plt.plot(x_ax, y_ax_photon_inc, 'r', label='PHOTON Incumbent')
                plt.plot(x_ax,
                         y_ax_original_inc,
                         'k',
                         label='Original Incumbent')
                plt.title('Photon Prove')
                plt.xlabel('X')
                plt.ylabel('Y')
                plt.legend(loc='best')
                plt.savefig("smac.png")

            min_len = min(len(y_ax_original), len(y_ax_photon))
            self.assertLessEqual(
                np.max(
                    np.abs(
                        np.array(y_ax_original[:min_len]) -
                        np.array(y_ax_photon[:min_len]))), 0.01)
Esempio n. 16
0
        def test_photon_implementation_switch(self):
            # PHOTON implementation
            self.pipe.add(PipelineElement('StandardScaler'))
            self.pipe += PipelineElement(
                'PCA', hyperparameters={'n_components': IntegerRange(5, 30)})
            estimator_siwtch = Switch("Estimator")
            estimator_siwtch += PipelineElement('SVC',
                                                hyperparameters={
                                                    'kernel':
                                                    Categorical(
                                                        ["rbf", 'poly']),
                                                    'C':
                                                    FloatRange(0.5, 200)
                                                },
                                                gamma='auto')
            estimator_siwtch += PipelineElement('RandomForestClassifier',
                                                hyperparameters={
                                                    'criterion':
                                                    Categorical(
                                                        ['gini', 'entropy']),
                                                    'min_samples_split':
                                                    IntegerRange(2, 4)
                                                })
            self.pipe += estimator_siwtch
            self.X, self.y = self.simple_classification()
            self.pipe.fit(self.X, self.y)

            # direct AUTO ML implementation

            # Build Configuration Space which defines all parameters and their ranges
            cs = ConfigurationSpace()
            n_components = UniformIntegerHyperparameter(
                "PCA__n_components", 5, 30)
            cs.add_hyperparameter(n_components)

            switch = CategoricalHyperparameter("Estimator_switch",
                                               ['svc', 'rf'])
            cs.add_hyperparameter(switch)

            kernel = CategoricalHyperparameter("SVC__kernel", ["rbf", 'poly'])
            cs.add_hyperparameter(kernel)
            c = UniformFloatHyperparameter("SVC__C", 0.5, 200)
            cs.add_hyperparameter(c)
            use_svc_c = InCondition(child=kernel,
                                    parent=switch,
                                    values=["svc"])
            use_svc_kernel = InCondition(child=c,
                                         parent=switch,
                                         values=["svc"])

            criterion = CategoricalHyperparameter(
                "RandomForestClassifier__criterion", ['gini', 'entropy'])
            cs.add_hyperparameter(criterion)
            minsplit = UniformIntegerHyperparameter(
                "RandomForestClassifier__min_samples_split", 2, 4)
            cs.add_hyperparameter(minsplit)

            use_rf_crit = InCondition(child=criterion,
                                      parent=switch,
                                      values=["rf"])
            use_rf_minsplit = InCondition(child=minsplit,
                                          parent=switch,
                                          values=["rf"])

            cs.add_conditions(
                [use_svc_c, use_svc_kernel, use_rf_crit, use_rf_minsplit])

            # Scenario object
            scenario = Scenario({
                "run_obj": "quality",
                "cs": cs,
                "deterministic": "true",
                "wallclock_limit": self.time_limit,
                "limit_resources": False,
                'abort_on_first_run_crash': False
            })

            # Optimize, using a SMAC directly
            smac = SMAC4HPO(scenario=scenario,
                            rng=42,
                            tae_runner=self.objective_function_switch)
            _ = smac.optimize()

            runhistory_photon = self.smac_helper["data"].solver.runhistory
            runhistory_original = smac.solver.runhistory

            x_ax = range(
                1,
                min(len(runhistory_original._cost_per_config.keys()),
                    len(runhistory_photon._cost_per_config.keys())) + 1)
            y_ax_original = [
                runhistory_original._cost_per_config[tmp] for tmp in x_ax
            ]
            y_ax_photon = [
                runhistory_photon._cost_per_config[tmp] for tmp in x_ax
            ]

            min_len = min(len(y_ax_original), len(y_ax_photon))
            self.assertLessEqual(
                np.max(
                    np.abs(
                        np.array(y_ax_original[:min_len]) -
                        np.array(y_ax_photon[:min_len]))), 0.01)
Esempio n. 17
0
    def test_against_smac(self):
        # PHOTON implementation
        self.pipe.add(PipelineElement('StandardScaler'))
        # then do feature selection using a PCA, specify which values to try in the hyperparameter search
        self.pipe += PipelineElement(
            'PCA', hyperparameters={'n_components': IntegerRange(5, 30)})
        # engage and optimize the good old SVM for Classification
        self.pipe += PipelineElement(
            'SVC',
            hyperparameters={
                'kernel': Categorical(["linear", "rbf", 'poly', 'sigmoid']),
                'C': FloatRange(0.5, 200)
            },
            gamma='auto')

        self.X, self.y = self.simple_classification()
        self.pipe.fit(self.X, self.y)

        # AUTO ML direct
        # Build Configuration Space which defines all parameters and their ranges
        cs = ConfigurationSpace()

        # We define a few possible types of SVM-kernels and add them as "kernel" to our cs
        n_components = UniformIntegerHyperparameter("PCA__n_components", 5,
                                                    30)  # , default_value=5)
        cs.add_hyperparameter(n_components)

        kernel = CategoricalHyperparameter(
            "SVC__kernel",
            ["linear", "rbf", 'poly', 'sigmoid'])  #, default_value="linear")
        cs.add_hyperparameter(kernel)

        c = UniformFloatHyperparameter("SVC__C", 0.5, 200)  #, default_value=1)
        cs.add_hyperparameter(c)

        # Scenario object
        scenario = Scenario({
            "run_obj":
            "quality",  # we optimize quality (alternatively runtime)
            "runcount-limit": 800,  # maximum function evaluations
            "cs": cs,  # configuration space
            "deterministic": "true",
            "shared_model": "false",  # !!!!
            "wallclock_limit": self.time_limit
        })

        # Optimize, using a SMAC-object
        print(
            "Optimizing! Depending on your machine, this might take a few minutes."
        )
        smac = SMAC4BO(scenario=scenario,
                       rng=np.random.RandomState(42),
                       tae_runner=self.objective_function)

        self.traurig = smac

        incumbent = smac.optimize()

        inc_value = self.objective_function(incumbent)

        print(incumbent)
        print(inc_value)

        runhistory_photon = self.smac_helper["data"].solver.runhistory
        runhistory_original = smac.solver.runhistory

        x_ax = range(
            1,
            min(len(runhistory_original.cost_per_config.keys()),
                len(runhistory_photon.cost_per_config.keys())) + 1)
        y_ax_original = [
            runhistory_original.cost_per_config[tmp] for tmp in x_ax
        ]
        y_ax_photon = [runhistory_photon.cost_per_config[tmp] for tmp in x_ax]

        y_ax_original_inc = [min(y_ax_original[:tmp + 1]) for tmp in x_ax]
        y_ax_photon_inc = [min(y_ax_photon[:tmp + 1]) for tmp in x_ax]

        plt.figure(figsize=(10, 7))
        plt.plot(x_ax, y_ax_original, 'g', label='Original')
        plt.plot(x_ax, y_ax_photon, 'b', label='PHOTON')
        plt.plot(x_ax, y_ax_photon_inc, 'r', label='PHOTON Incumbent')
        plt.plot(x_ax, y_ax_original_inc, 'k', label='Original Incumbent')
        plt.title('Photon Prove')
        plt.xlabel('X')
        plt.ylabel('Y')
        plt.legend(loc='best')
        plt.show()

        def neighbours(items, fill=None):
            before = itertools.chain([fill], items)
            after = itertools.chain(
                items,
                [fill])  # You could use itertools.zip_longest() later instead.
            next(after)
            for a, b, c in zip(before, items, after):
                yield [value for value in (a, b, c) if value is not fill]

        print("---------------")
        original_pairing = [
            sum(values) / len(values) for values in neighbours(y_ax_original)
        ]
        bias_term = np.mean([
            abs(y_ax_original_inc[t] - y_ax_photon_inc[t])
            for t in range(len(y_ax_photon_inc))
        ])
        photon_pairing = [
            sum(values) / len(values) - bias_term
            for values in neighbours(y_ax_photon)
        ]
        counter = 0
        for i, x in enumerate(x_ax):
            if abs(original_pairing[i] - photon_pairing[i]) > 0.05:
                counter += 1
            self.assertLessEqual(counter / len(x_ax), 0.15)
Esempio n. 18
0
    def get_hyperparameter_search_space(dataset_properties: Optional[Dict] = None,
                                        min_num_gropus: int = 1,
                                        max_num_groups: int = 9,
                                        min_blocks_per_groups: int = 1,
                                        max_blocks_per_groups: int = 4,
                                        min_num_units: int = 10,
                                        max_num_units: int = 1024,
                                        ) -> ConfigurationSpace:
        cs = ConfigurationSpace()

        # Support for different shapes
        resnet_shape = CategoricalHyperparameter(
            'resnet_shape',
            choices=[
                'funnel',
                'long_funnel',
                'diamond',
                'hexagon',
                'brick',
                'triangle',
                'stairs'
            ]
        )
        cs.add_hyperparameter(resnet_shape)

        # The number of groups that will compose the resnet. That is,
        # a group can have N Resblock. The M number of this N resblock
        # repetitions is num_groups
        num_groups = UniformIntegerHyperparameter(
            "num_groups", lower=min_num_gropus, upper=max_num_groups, default_value=5)

        blocks_per_group = UniformIntegerHyperparameter(
            "blocks_per_group", lower=min_blocks_per_groups, upper=max_blocks_per_groups)

        activation = CategoricalHyperparameter(
            "activation", choices=list(_activations.keys())
        )

        output_dim = UniformIntegerHyperparameter(
            "output_dim",
            lower=min_num_units,
            upper=max_num_units
        )

        cs.add_hyperparameters([num_groups, blocks_per_group, activation, output_dim])

        # We can have dropout in the network for
        # better generalization
        use_dropout = CategoricalHyperparameter(
            "use_dropout", choices=[True, False])
        cs.add_hyperparameters([use_dropout])

        use_shake_shake = CategoricalHyperparameter("use_shake_shake", choices=[True, False])
        use_shake_drop = CategoricalHyperparameter("use_shake_drop", choices=[True, False])
        shake_drop_prob = UniformFloatHyperparameter(
            "max_shake_drop_probability", lower=0.0, upper=1.0)
        cs.add_hyperparameters([use_shake_shake, use_shake_drop, shake_drop_prob])
        cs.add_condition(CS.EqualsCondition(shake_drop_prob, use_shake_drop, True))

        max_units = UniformIntegerHyperparameter(
            "max_units",
            lower=min_num_units,
            upper=max_num_units,
        )
        cs.add_hyperparameters([max_units])

        max_dropout = UniformFloatHyperparameter(
            "max_dropout", lower=0.0, upper=1.0
        )
        cs.add_hyperparameters([max_dropout])
        cs.add_condition(CS.EqualsCondition(max_dropout, use_dropout, True))

        return cs
Esempio n. 19
0
    def get_hyperparameter_search_space(dataset_properties=None):
        cs = ConfigurationSpace()

        # Parameterized Hyperparameters
        max_depth = UniformIntegerHyperparameter(
            name="max_depth", lower=1, upper=10, default_value=3
        )
        learning_rate = UniformFloatHyperparameter(
            name="learning_rate", lower=0.01, upper=1, default_value=0.1,
            log=True
        )
        n_estimators = Constant("n_estimators", 512)
        booster = CategoricalHyperparameter(
            "booster", ["gbtree", "dart"]
        )
        subsample = UniformFloatHyperparameter(
            name="subsample", lower=0.01, upper=1.0, default_value=1.0,
            log=False
        )
        min_child_weight = UniformIntegerHyperparameter(
            name="min_child_weight", lower=1e-10,
            upper=20, default_value=1, log=False
        )
        colsample_bytree = UniformFloatHyperparameter(
            name="colsample_bytree", lower=0.1, upper=1.0, default_value=1,
        )
        colsample_bylevel = UniformFloatHyperparameter(
            name="colsample_bylevel", lower=0.1, upper=1.0, default_value=1,
        )
        reg_alpha = UniformFloatHyperparameter(
            name="reg_alpha", lower=1e-10, upper=1e-1, log=True,
            default_value=1e-10)
        reg_lambda = UniformFloatHyperparameter(
            name="reg_lambda", lower=1e-10, upper=1e-1, log=True,
            default_value=1e-10)

        # DART Hyperparameters
        sample_type = CategoricalHyperparameter(
            'sample_type', ['uniform', 'weighted'], default_value='uniform',
        )
        normalize_type = CategoricalHyperparameter(
            'normalize_type', ['tree', 'forest'], default_value='tree',
        )
        rate_drop = UniformFloatHyperparameter(
            'rate_drop', 1e-10, 1 - (1e-10), default_value=0.5,
        )

        # Unparameterized Hyperparameters
        # https://xgboost.readthedocs.io/en/latest//parameter.html
        # minimum loss reduction required to make a further partition on a
        # leaf node of the tree
        gamma = UnParametrizedHyperparameter(
            name="gamma", value=0)
        # absolute regularization (in contrast to eta), comparable to
        # gradient clipping in deep learning - according to the internet this
        #  is most important for unbalanced data
        max_delta_step = UnParametrizedHyperparameter(
            name="max_delta_step", value=0)
        base_score = UnParametrizedHyperparameter(
            name="base_score", value=0.5)
        scale_pos_weight = UnParametrizedHyperparameter(
            name="scale_pos_weight", value=1)

        cs.add_hyperparameters([
            # Active
            max_depth, learning_rate, n_estimators, booster,
            subsample, colsample_bytree, colsample_bylevel,
            reg_alpha, reg_lambda,
            # DART
            sample_type, normalize_type, rate_drop,
            # Inactive
            min_child_weight, max_delta_step, gamma,
            base_score, scale_pos_weight
        ])

        sample_type_condition = EqualsCondition(
            sample_type, booster, 'dart',
        )
        normalize_type_condition = EqualsCondition(
            normalize_type, booster, 'dart',
        )
        rate_drop_condition = EqualsCondition(
            rate_drop, booster, 'dart',
        )

        cs.add_conditions([
            sample_type_condition, normalize_type_condition,
            rate_drop_condition,
        ])
        return cs
Esempio n. 20
0
    def get_hyperparameter_search_space(dataset_properties: Optional[Dict] = None,
                                        min_mlp_layers: int = 1,
                                        max_mlp_layers: int = 15,
                                        dropout: bool = True,
                                        min_num_units: int = 10,
                                        max_num_units: int = 1024,
                                        ) -> ConfigurationSpace:

        cs = ConfigurationSpace()

        # The number of hidden layers the network will have.
        # Layer blocks are meant to have the same architecture, differing only
        # by the number of units
        num_groups = UniformIntegerHyperparameter(
            "num_groups", min_mlp_layers, max_mlp_layers, default_value=5)

        activation = CategoricalHyperparameter(
            "activation", choices=list(_activations.keys())
        )
        cs.add_hyperparameters([num_groups, activation])

        # We can have dropout in the network for
        # better generalization
        if dropout:
            use_dropout = CategoricalHyperparameter(
                "use_dropout", choices=[True, False])
            cs.add_hyperparameters([use_dropout])

        for i in range(1, max_mlp_layers + 1):
            n_units_hp = UniformIntegerHyperparameter("num_units_%d" % i,
                                                      lower=min_num_units,
                                                      upper=max_num_units,
                                                      default_value=20)
            cs.add_hyperparameter(n_units_hp)

            if i > min_mlp_layers:
                # The units of layer i should only exist
                # if there are at least i layers
                cs.add_condition(
                    CS.GreaterThanCondition(
                        n_units_hp, num_groups, i - 1
                    )
                )

            if dropout:
                dropout_hp = UniformFloatHyperparameter(
                    "dropout_%d" % i,
                    lower=0.0,
                    upper=0.8,
                    default_value=0.5
                )
                cs.add_hyperparameter(dropout_hp)
                dropout_condition_1 = CS.EqualsCondition(dropout_hp, use_dropout, True)

                if i > min_mlp_layers:
                    dropout_condition_2 = CS.GreaterThanCondition(dropout_hp, num_groups, i - 1)
                    cs.add_condition(CS.AndConjunction(dropout_condition_1, dropout_condition_2))
                else:
                    cs.add_condition(dropout_condition_1)

        return cs
Esempio n. 21
0
dense_width = CategoricalHyperparameter("dense_width", [64, 128],
                                        default_value=128)
dense_length = UniformIntegerHyperparameter("dense_length",
                                            1,
                                            3,
                                            default_value=2)

optimizer = CategoricalHyperparameter("optimizer",
                                      ['adam', 'sgd', 'nadam', 'RMSprop'],
                                      default_value='RMSprop')
optimizer_lr = CategoricalHyperparameter("optimizer_lr",
                                         [.0001, .0003, .001, .003, .01],
                                         default_value=.0003)
learning_decay_rate = UniformFloatHyperparameter("learning_decay_rate",
                                                 0,
                                                 0.9,
                                                 default_value=.6)

cs.add_hyperparameters([
    first_kernel_size, conv_filters, n_conv, dropout, activation, dense_width,
    dense_length, optimizer, optimizer_lr, learning_decay_rate
])

scenario = Scenario({
    "run_obj": "quality",
    "runcount-limit": 128,
    "cs": cs,
    "deterministic": "true"
})
scenario.output_dir_for_this_run = "C:\\NNwork\\SMAC3out"
scenario.output_dir = "C:\\NNwork\\SMAC3out"
Esempio n. 22
0
    def get_hyperparameter_search_space(dataset_properties=None,
                                        optimizer='smac'):
        if optimizer == 'smac':
            cs = ConfigurationSpace()
            n_estimators = UniformIntegerHyperparameter(name="n_estimators",
                                                        lower=50,
                                                        upper=500,
                                                        default_value=50,
                                                        log=False)
            learning_rate = UniformFloatHyperparameter(name="learning_rate",
                                                       lower=0.01,
                                                       upper=2,
                                                       default_value=0.1,
                                                       log=True)
            algorithm = CategoricalHyperparameter(name="algorithm",
                                                  choices=["SAMME.R", "SAMME"],
                                                  default_value="SAMME.R")
            max_depth = UniformIntegerHyperparameter(name="max_depth",
                                                     lower=1,
                                                     upper=10,
                                                     default_value=1,
                                                     log=False)

            sampling_strategy = CategoricalHyperparameter(
                name="sampling_strategy",
                choices=["majority", "not minority", "not majority", "all"],
                default_value="not minority")
            replacement = CategoricalHyperparameter("replacement",
                                                    ["True", "False"],
                                                    default_value="False")
            cs.add_hyperparameters([
                n_estimators, learning_rate, algorithm, max_depth,
                sampling_strategy, replacement
            ])
            return cs
        elif optimizer == 'tpe':
            from hyperopt import hp
            space = {
                'n_estimators':
                hp.randint('rb_n_estimators', 451) + 50,
                'learning_rate':
                hp.loguniform('rb_learning_rate', np.log(0.01), np.log(2)),
                'algorithm':
                hp.choice('rb_algorithm', ["SAMME.R", "SAMME"]),
                'max_depth':
                hp.randint('rb_max_depth', 10) + 1,
                'sampling_strategy':
                hp.choice('rb_sampling_strategy',
                          ["majority", "not minority", "not majority", "all"]),
                'replacement':
                hp.choice('rb_replacement', ["True", "False"])
            }

            init_trial = {
                'n_estimators': 50,
                'learning_rate': 0.1,
                'algorithm': "SAMME.R",
                'max_depth': 1,
                'sampling_strategy': "not minority",
                'replacement': "False",
            }
            return space
Esempio n. 23
0
def get_configspace_instance(algo_id='random_forest'):
    cs = ConfigurationSpace()
    if algo_id == 'random_forest':
        criterion = CategoricalHyperparameter("criterion", ["gini", "entropy"],
                                              default_value="gini")

        # The maximum number of features used in the forest is calculated as m^max_features, where
        # m is the total number of features, and max_features is the hyperparameter specified below.
        # The default is 0.5, which yields sqrt(m) features as max_features in the estimator. This
        # corresponds with Geurts' heuristic.
        max_features = UniformFloatHyperparameter("max_features",
                                                  0.,
                                                  1.,
                                                  default_value=0.5)

        max_depth = UnParametrizedHyperparameter("max_depth", "None")
        min_samples_split = UniformIntegerHyperparameter("min_samples_split",
                                                         2,
                                                         20,
                                                         default_value=2)
        min_samples_leaf = UniformIntegerHyperparameter("min_samples_leaf",
                                                        1,
                                                        20,
                                                        default_value=1)
        min_weight_fraction_leaf = UnParametrizedHyperparameter(
            "min_weight_fraction_leaf", 0.)
        max_leaf_nodes = UnParametrizedHyperparameter("max_leaf_nodes", "None")
        min_impurity_decrease = UnParametrizedHyperparameter(
            'min_impurity_decrease', 0.0)
        bootstrap = CategoricalHyperparameter("bootstrap", ["True", "False"],
                                              default_value="True")
        cs.add_hyperparameters([
            criterion, max_features, max_depth, min_samples_split,
            min_samples_leaf, min_weight_fraction_leaf, max_leaf_nodes,
            bootstrap, min_impurity_decrease
        ])
    elif algo_id == 'liblinear_svc':
        penalty = CategoricalHyperparameter("penalty", ["l1", "l2"],
                                            default_value="l2")
        loss = CategoricalHyperparameter("loss", ["hinge", "squared_hinge"],
                                         default_value="squared_hinge")
        dual = CategoricalHyperparameter("dual", ['True', 'False'],
                                         default_value='True')
        # This is set ad-hoc
        tol = UniformFloatHyperparameter("tol",
                                         1e-5,
                                         1e-1,
                                         default_value=1e-4,
                                         log=True)
        C = UniformFloatHyperparameter("C",
                                       0.03125,
                                       32768,
                                       log=True,
                                       default_value=1.0)
        multi_class = Constant("multi_class", "ovr")
        # These are set ad-hoc
        fit_intercept = Constant("fit_intercept", "True")
        intercept_scaling = Constant("intercept_scaling", 1)
        cs.add_hyperparameters([
            penalty, loss, dual, tol, C, multi_class, fit_intercept,
            intercept_scaling
        ])

        penalty_and_loss = ForbiddenAndConjunction(
            ForbiddenEqualsClause(penalty, "l1"),
            ForbiddenEqualsClause(loss, "hinge"))
        constant_penalty_and_loss = ForbiddenAndConjunction(
            ForbiddenEqualsClause(dual, "False"),
            ForbiddenEqualsClause(penalty, "l2"),
            ForbiddenEqualsClause(loss, "hinge"))
        penalty_and_dual = ForbiddenAndConjunction(
            ForbiddenEqualsClause(dual, "True"),
            ForbiddenEqualsClause(penalty, "l1"))
        cs.add_forbidden_clause(penalty_and_loss)
        cs.add_forbidden_clause(constant_penalty_and_loss)
        cs.add_forbidden_clause(penalty_and_dual)
    elif algo_id == 'lightgbm':
        n_estimators = UniformFloatHyperparameter("n_estimators",
                                                  100,
                                                  1000,
                                                  default_value=500,
                                                  q=50)
        num_leaves = UniformIntegerHyperparameter("num_leaves",
                                                  31,
                                                  2047,
                                                  default_value=128)
        max_depth = Constant('max_depth', 15)
        learning_rate = UniformFloatHyperparameter("learning_rate",
                                                   1e-3,
                                                   0.3,
                                                   default_value=0.1,
                                                   log=True)
        min_child_samples = UniformIntegerHyperparameter("min_child_samples",
                                                         5,
                                                         30,
                                                         default_value=20)
        subsample = UniformFloatHyperparameter("subsample",
                                               0.7,
                                               1,
                                               default_value=1,
                                               q=0.1)
        colsample_bytree = UniformFloatHyperparameter("colsample_bytree",
                                                      0.7,
                                                      1,
                                                      default_value=1,
                                                      q=0.1)
        cs.add_hyperparameters([
            n_estimators, num_leaves, max_depth, learning_rate,
            min_child_samples, subsample, colsample_bytree
        ])
    elif algo_id == 'adaboost':
        n_estimators = UniformIntegerHyperparameter(name="n_estimators",
                                                    lower=50,
                                                    upper=500,
                                                    default_value=50,
                                                    log=False)
        learning_rate = UniformFloatHyperparameter(name="learning_rate",
                                                   lower=0.01,
                                                   upper=2,
                                                   default_value=0.1,
                                                   log=True)
        algorithm = CategoricalHyperparameter(name="algorithm",
                                              choices=["SAMME.R", "SAMME"],
                                              default_value="SAMME.R")
        max_depth = UniformIntegerHyperparameter(name="max_depth",
                                                 lower=2,
                                                 upper=8,
                                                 default_value=3,
                                                 log=False)
        cs.add_hyperparameters(
            [n_estimators, learning_rate, algorithm, max_depth])
    elif algo_id == 'lda':
        shrinkage = CategoricalHyperparameter("shrinkage",
                                              ["None", "auto", "manual"],
                                              default_value="None")
        shrinkage_factor = UniformFloatHyperparameter("shrinkage_factor", 0.,
                                                      1., 0.5)
        n_components = UniformIntegerHyperparameter('n_components',
                                                    1,
                                                    250,
                                                    default_value=10)
        tol = UniformFloatHyperparameter("tol",
                                         1e-5,
                                         1e-1,
                                         default_value=1e-4,
                                         log=True)
        cs.add_hyperparameters(
            [shrinkage, shrinkage_factor, n_components, tol])
        cs.add_condition(EqualsCondition(shrinkage_factor, shrinkage,
                                         "manual"))
    elif algo_id == 'extra_trees':
        criterion = CategoricalHyperparameter("criterion", ["gini", "entropy"],
                                              default_value="gini")

        # The maximum number of features used in the forest is calculated as m^max_features, where
        # m is the total number of features, and max_features is the hyperparameter specified below.
        # The default is 0.5, which yields sqrt(m) features as max_features in the estimator. This
        # corresponds with Geurts' heuristic.
        max_features = UniformFloatHyperparameter("max_features",
                                                  0.,
                                                  1.,
                                                  default_value=0.5)

        max_depth = UnParametrizedHyperparameter(name="max_depth",
                                                 value="None")

        min_samples_split = UniformIntegerHyperparameter("min_samples_split",
                                                         2,
                                                         20,
                                                         default_value=2)
        min_samples_leaf = UniformIntegerHyperparameter("min_samples_leaf",
                                                        1,
                                                        20,
                                                        default_value=1)
        min_weight_fraction_leaf = UnParametrizedHyperparameter(
            'min_weight_fraction_leaf', 0.)
        max_leaf_nodes = UnParametrizedHyperparameter("max_leaf_nodes", "None")
        min_impurity_decrease = UnParametrizedHyperparameter(
            'min_impurity_decrease', 0.0)

        bootstrap = CategoricalHyperparameter("bootstrap", ["True", "False"],
                                              default_value="False")
        cs.add_hyperparameters([
            criterion, max_features, max_depth, min_samples_split,
            min_samples_leaf, min_weight_fraction_leaf, max_leaf_nodes,
            min_impurity_decrease, bootstrap
        ])
    elif algo_id == 'resnet':
        batch_size = UniformIntegerHyperparameter("train_batch_size",
                                                  32,
                                                  256,
                                                  default_value=64,
                                                  q=8)
        init_lr = UniformFloatHyperparameter('init_lr',
                                             lower=1e-3,
                                             upper=0.3,
                                             default_value=0.1,
                                             log=True)
        lr_decay_factor = UnParametrizedHyperparameter('lr_decay_factor', 0.1)
        weight_decay = UniformFloatHyperparameter('weight_decay',
                                                  lower=1e-5,
                                                  upper=1e-2,
                                                  default_value=0.0002,
                                                  log=True)
        momentum = UniformFloatHyperparameter("momentum",
                                              0.5,
                                              .99,
                                              default_value=0.9)
        nesterov = CategoricalHyperparameter('nesterov', ['True', 'False'],
                                             default_value='True')
        cs.add_hyperparameters([
            batch_size, init_lr, lr_decay_factor, weight_decay, momentum,
            nesterov
        ])
    elif algo_id == 'nas':
        operation = 6
        benchmark201_choices = [
            'none', 'skip_connect', 'nor_conv_1x1', 'nor_conv_3x3',
            'avg_pool_3x3'
        ]
        for i in range(operation):
            cs.add_hyperparameter(
                CategoricalHyperparameter(
                    'op_%d' % i,
                    choices=benchmark201_choices,
                    default_value=benchmark201_choices[1]))

        return cs
    else:
        raise ValueError('Invalid algorithm - %s' % algo_id)
    return cs
Esempio n. 24
0
import json
import random
import requests
from ConfigSpace.read_and_write import json as config_json
from ConfigSpace.hyperparameters import UniformFloatHyperparameter
from openbox.config_space import ConfigurationSpace
from openbox.config_space.util import convert_configurations_to_array

user_id = 18

cs = ConfigurationSpace()
x1 = UniformFloatHyperparameter("x1", -5, 10, default_value=0)
x2 = UniformFloatHyperparameter("x2", 0, 15, default_value=0)
cs.add_hyperparameters([x1, x2])

config_space_array = config_json.write(cs)

res = requests.post('http://127.0.0.1:8001/bo_advice/task_register/',
                    data={
                        'id': user_id,
                        'config_space_array': config_space_array
                    })
print('-----------------')
print(res)
print('-----------------')
print(res.text)
print('-----------------')
Esempio n. 25
0

#logger = logging.getLogger("SVMExample")
logging.basicConfig(level=logging.INFO)  # logging.DEBUG for debug output

# Build Configuration Space which defines all parameters and their ranges
cs = ConfigurationSpace()

# We define a few possible types of SVM-kernels and add them as "kernel" to our cs
penalty = CategoricalHyperparameter("penalty",
                                    ["l1", "l2", "elasticnet", "none"],
                                    default_value="l2")

dual = CategoricalHyperparameter("dual", [True, False], default_value=False)

tol = UniformFloatHyperparameter("tol", 0.00001, 0.1, default_value=0.0001)

C = UniformFloatHyperparameter("C", 0.0, 5.0, default_value=1.0)

fit_intercept = CategoricalHyperparameter("fit_intercept", [True, False],
                                          default_value=False)

intercept_scaling = UniformFloatHyperparameter("intercept_scaling",
                                               0.0,
                                               5.0,
                                               default_value=1.0)

solver = CategoricalHyperparameter(
    "solver", ["newton-cg", "lbfgs", "liblinear", "sag", "saga"],
    default_value="liblinear")
Esempio n. 26
0
def fmin_smac(
        func: typing.Callable,
        x0: typing.List[float],
        bounds: typing.List[typing.Iterable[float]],
        maxfun: int = -1,
        rng: typing.Union[np.random.RandomState, int] = None,
        scenario_args: typing.Mapping[str, typing.Any] = None,
        tae_runner_kwargs: typing.Optional[typing.Dict[str,
                                                       typing.Any]] = None,
        **kwargs: typing.Any) -> typing.Tuple[Configuration, float, SMAC4HPO]:
    """
    Minimize a function func using the SMAC4HPO facade
    (i.e., a modified version of SMAC).
    This function is a convenience wrapper for the SMAC4HPO class.

    Parameters
    ----------
    func : typing.Callable
        Function to minimize.
    x0 : typing.List[float]
        Initial guess/default configuration.
    bounds : typing.List[typing.List[float]]
        ``(min, max)`` pairs for each element in ``x``, defining the bound on
        that parameters.
    maxfun : int, optional
        Maximum number of function evaluations.
    rng : np.random.RandomState, optional
            Random number generator used by SMAC.
    scenario_args: typing.Mapping[str,typing.Any]
        Arguments passed to the scenario
        See smac.scenario.scenario.Scenario
    **kwargs:
        Arguments passed to the optimizer class
        See ~smac.facade.smac_facade.SMAC

    Returns
    -------
    x : list
        Estimated position of the minimum.
    f : float
        Value of `func` at the minimum.
    s : :class:`smac.facade.smac_hpo_facade.SMAC4HPO`
        SMAC objects which enables the user to get
        e.g., the trajectory and runhistory.

    """
    # create configuration space
    cs = ConfigurationSpace()

    # Adjust zero padding
    tmplt = 'x{0:0' + str(len(str(len(bounds)))) + 'd}'

    for idx, (lower_bound, upper_bound) in enumerate(bounds):
        parameter = UniformFloatHyperparameter(name=tmplt.format(idx + 1),
                                               lower=lower_bound,
                                               upper=upper_bound,
                                               default_value=x0[idx])
        cs.add_hyperparameter(parameter)

    # create scenario
    scenario_dict = {
        "run_obj": "quality",
        "cs": cs,
        "deterministic": "true",
        "initial_incumbent": "DEFAULT",
    }

    if scenario_args is not None:
        scenario_dict.update(scenario_args)

    if maxfun > 0:
        scenario_dict["runcount_limit"] = maxfun
    scenario = Scenario(scenario_dict)

    # Handle optional tae  arguments
    if tae_runner_kwargs is not None:
        if 'ta' not in tae_runner_kwargs:
            tae_runner_kwargs.update({'ta': func})
    else:
        tae_runner_kwargs = {'ta': func}

    smac = SMAC4HPO(scenario=scenario,
                    tae_runner=ExecuteTAFuncArray,
                    tae_runner_kwargs=tae_runner_kwargs,
                    rng=rng,
                    **kwargs)

    smac.logger = logging.getLogger(smac.__module__ + "." +
                                    smac.__class__.__name__)
    incumbent = smac.optimize()
    config_id = smac.solver.runhistory.config_ids[incumbent]
    run_key = RunKey(config_id, None, 0)
    incumbent_performance = smac.solver.runhistory.data[run_key]
    incumbent = np.array(
        [incumbent[tmplt.format(idx + 1)] for idx in range(len(bounds))],
        dtype=np.float)
    return incumbent, incumbent_performance.cost, smac
Esempio n. 27
0
from mosaic.external.ConfigSpace.configuration_space import ConfigurationSpace
from ConfigSpace.hyperparameters import CategoricalHyperparameter, \
    UniformFloatHyperparameter, UniformIntegerHyperparameter
from ConfigSpace.conditions import InCondition

from mosaic.mosaic import Search

# Build Configuration Space which defines all parameters and their ranges
cs = ConfigurationSpace()
# We define a few possible types of SVM-kernels and add them as "kernel" to our cs
kernel = CategoricalHyperparameter("kernel",
                                   ["linear", "rbf", "poly", "sigmoid"],
                                   default_value="poly")
cs.add_hyperparameter(kernel)
# There are some hyperparameters shared by all kernels
C = UniformFloatHyperparameter("C", 0.001, 1000.0, default_value=1.0)
shrinking = CategoricalHyperparameter("shrinking", ["true", "false"],
                                      default_value="true")
cs.add_hyperparameters([C, shrinking])
# Others are kernel-specific, so we can add conditions to limit the searchspace
degree = UniformIntegerHyperparameter(
    "degree", 1, 5, default_value=3)  # Only used by kernel poly
coef0 = UniformFloatHyperparameter("coef0", 0.0, 10.0,
                                   default_value=0.0)  # poly, sigmoid
cs.add_hyperparameters([degree, coef0])
use_degree = InCondition(child=degree, parent=kernel, values=["poly"])
use_coef0 = InCondition(child=coef0, parent=kernel, values=["poly", "sigmoid"])
cs.add_conditions([use_degree, use_coef0])
# This also works for parameters that are a mix of categorical and values from a range of numbers
# For example, gamma can be either "auto" or a fixed float
gamma = CategoricalHyperparameter(
Esempio n. 28
0
#!pip install smac --no-cache

# Import ConfigSpace and different types of parameters
from smac.configspace import ConfigurationSpace
from ConfigSpace.hyperparameters import CategoricalHyperparameter, \
    UniformFloatHyperparameter, UniformIntegerHyperparameter
from ConfigSpace.conditions import InCondition

# Import SMAC-utilities
from smac.tae.execute_func import ExecuteTAFuncDict
from smac.scenario.scenario import Scenario
from smac.facade.smac_facade import SMAC

cs = ConfigurationSpace()

lr = UniformFloatHyperparameter("lr", 0.0001, 0.1, default_value=0.001)
cs.add_hyperparameter(lr)

batch_size = CategoricalHyperparameter("batch_size", [128, 256], default_value=128)
cs.add_hyperparameter(batch_size)

# Scenario object
scenario = Scenario({"run_obj": "quality",   # we optimize quality (alternatively runtime)
                     "runcount-limit": 5,  # maximum function evaluations
                     "cs": cs,               # configuration space
                     "deterministic": "true",
                     #"abort_on_first_run_crash": "false"
                     })

# Optimize, using a SMAC-object
print("Optimizing! Depending on your machine, this might take a few minutes.")
Esempio n. 29
0
from ConfigSpace.configuration_space import ConfigurationSpace
from ConfigSpace.hyperparameters import UniformFloatHyperparameter, \
    UniformIntegerHyperparameter, CategoricalHyperparameter, \
    UnParametrizedHyperparameter, Constant
from automl.utl import json_utils

C = UniformFloatHyperparameter("C", 1e-5, 10, 1.0, log=True)
fit_intercept = UnParametrizedHyperparameter("fit_intercept", "True")
loss = CategoricalHyperparameter("loss", ["hinge", "squared_hinge"],
                                 default_value="hinge")

tol = UniformFloatHyperparameter("tol",
                                 1e-5,
                                 1e-1,
                                 default_value=1e-4,
                                 log=True)
# Note: Average could also be an Integer if > 1
average = CategoricalHyperparameter('average', ['False', 'True'],
                                    default_value='False')

cs = ConfigurationSpace()
cs.add_hyperparameters([loss, fit_intercept, tol, C, average])

json_utils.write_cs_to_json_file(cs, "PassiveAggressiveClassifier")
Esempio n. 30
0
from ConfigSpace.configuration_space import ConfigurationSpace
from ConfigSpace.hyperparameters import UniformFloatHyperparameter, \
    UniformIntegerHyperparameter, CategoricalHyperparameter, \
    UnParametrizedHyperparameter, Constant
from automl.utl import json_utils

cs = ConfigurationSpace()

# the smoothing parameter is a non-negative float
# I will limit it to 1000 and put it on a logarithmic scale. (SF)
# Please adjust that, if you know a proper range, this is just a guess.
alpha = UniformFloatHyperparameter(name="alpha",
                                   lower=1e-2,
                                   upper=100,
                                   default_value=1,
                                   log=True)

fit_prior = CategoricalHyperparameter(name="fit_prior",
                                      choices=["True", "False"],
                                      default_value="True")

cs.add_hyperparameters([alpha, fit_prior])

json_utils.write_cs_to_json_file(cs, "BernoulliNB")
Esempio n. 31
0
def get_cs():
    cs = ConfigurationSpace()

    root = CategoricalHyperparameter("root", choices=["l1", "ln"])
    x1 = CategoricalHyperparameter("x1", choices=["l1", "ln"])
    x2 = CategoricalHyperparameter("x2", choices=["l1", "ln"])
    x3 = CategoricalHyperparameter("x3", choices=["l1", "ln"])
    x4 = CategoricalHyperparameter("x4", choices=["l1", "ln"])
    x5 = CategoricalHyperparameter("x5", choices=["l1", "ln"])
    x6 = CategoricalHyperparameter("x6", choices=["l1", "ln"])

    # r1 is the data associated in x1
    r1_1 = UniformFloatHyperparameter("r1_1",
                                      lower=0.01,
                                      upper=0.99,
                                      log=False)
    r1_2 = UniformFloatHyperparameter("r1_2",
                                      lower=0.01,
                                      upper=0.99,
                                      log=False)
    r1_3 = UniformFloatHyperparameter("r1_3",
                                      lower=0.01,
                                      upper=0.99,
                                      log=False)

    r2_1 = UniformFloatHyperparameter("r2_1",
                                      lower=0.01,
                                      upper=0.99,
                                      log=False)
    r2_2 = UniformFloatHyperparameter("r2_2",
                                      lower=0.01,
                                      upper=0.99,
                                      log=False)
    r2_3 = UniformFloatHyperparameter("r2_3",
                                      lower=0.01,
                                      upper=0.99,
                                      log=False)

    r3_1 = UniformFloatHyperparameter("r3_1",
                                      lower=0.01,
                                      upper=0.99,
                                      log=False)
    r3_2 = UniformFloatHyperparameter("r3_2",
                                      lower=0.01,
                                      upper=0.99,
                                      log=False)
    r3_3 = Constant("r3_3", 0.5)

    r4_1 = UniformFloatHyperparameter("r4_1",
                                      lower=0.01,
                                      upper=0.99,
                                      log=False)
    r4_2 = UniformFloatHyperparameter("r4_2",
                                      lower=0.01,
                                      upper=0.99,
                                      log=False)
    r4_3 = UniformFloatHyperparameter("r4_3",
                                      lower=0.01,
                                      upper=0.99,
                                      log=False)

    r5_1 = UniformFloatHyperparameter("r5_1",
                                      lower=0.01,
                                      upper=0.99,
                                      log=False)
    r5_2 = UniformFloatHyperparameter("r5_2",
                                      lower=0.01,
                                      upper=0.99,
                                      log=False)
    r5_3 = UniformFloatHyperparameter("r5_3",
                                      lower=0.01,
                                      upper=0.99,
                                      log=False)

    r6_1 = UniformFloatHyperparameter("r6_1",
                                      lower=0.01,
                                      upper=0.99,
                                      log=False)
    r6_2 = UniformFloatHyperparameter("r6_2",
                                      lower=0.01,
                                      upper=0.99,
                                      log=False)
    r6_3 = UniformFloatHyperparameter("r6_3",
                                      lower=0.01,
                                      upper=0.99,
                                      log=False)

    r7_1 = UniformFloatHyperparameter("r7_1",
                                      lower=0.01,
                                      upper=0.99,
                                      log=False)
    r7_2 = UniformFloatHyperparameter("r7_2",
                                      lower=0.01,
                                      upper=0.99,
                                      log=False)
    r7_3 = Constant("r7_3", 0.5)

    r8_1 = UniformFloatHyperparameter("r8_1",
                                      lower=0.01,
                                      upper=0.99,
                                      log=False)
    r8_2 = UniformFloatHyperparameter("r8_2",
                                      lower=0.01,
                                      upper=0.99,
                                      log=False)
    r8_3 = Constant("r8_3", 0.5)

    r9_1 = UniformFloatHyperparameter("r9_1",
                                      lower=0.01,
                                      upper=0.99,
                                      log=False)
    r9_2 = UniformFloatHyperparameter("r9_2",
                                      lower=0.01,
                                      upper=0.99,
                                      log=False)
    r9_3 = Constant("r9_3", 0.5)

    r10_1 = UniformFloatHyperparameter("r10_1",
                                       lower=0.01,
                                       upper=0.99,
                                       log=False)
    r10_2 = UniformFloatHyperparameter("r10_2",
                                       lower=0.01,
                                       upper=0.99,
                                       log=False)
    r10_3 = Constant("r10_3", 0.5)

    r11_1 = UniformFloatHyperparameter("r11_1",
                                       lower=0.01,
                                       upper=0.99,
                                       log=False)
    r11_2 = UniformFloatHyperparameter("r11_2",
                                       lower=0.01,
                                       upper=0.99,
                                       log=False)
    r11_3 = Constant("r11_3", 0.5)

    r12_1 = UniformFloatHyperparameter("r12_1",
                                       lower=0.01,
                                       upper=0.99,
                                       log=False)
    r12_2 = UniformFloatHyperparameter("r12_2",
                                       lower=0.01,
                                       upper=0.99,
                                       log=False)
    r12_3 = Constant("r12_3", 0.5)

    r13_1 = UniformFloatHyperparameter("r13_1",
                                       lower=0.01,
                                       upper=0.99,
                                       log=False)
    r13_2 = UniformFloatHyperparameter("r13_2",
                                       lower=0.01,
                                       upper=0.99,
                                       log=False)
    r13_3 = Constant("r13_3", 0.5)

    r14_1 = UniformFloatHyperparameter("r14_1",
                                       lower=0.01,
                                       upper=0.99,
                                       log=False)
    r14_2 = UniformFloatHyperparameter("r14_2",
                                       lower=0.01,
                                       upper=0.99,
                                       log=False)
    r14_3 = Constant("r14_3", 0.5)

    cs.add_hyperparameters([
        root,
        x1,
        x2,
        x3,
        x4,
        x5,
        x6,
        r1_1,
        r1_2,
        r1_3,
        r2_1,
        r2_2,
        r2_3,
        r3_1,
        r3_2,
        r3_3,
        r4_1,
        r4_2,
        r4_3,
        r5_1,
        r5_2,
        r5_3,
        r6_1,
        r6_2,
        r6_3,
        r7_1,
        r7_2,
        r7_3,
        r8_1,
        r8_2,
        r8_3,
        r9_1,
        r9_2,
        r9_3,
        r10_1,
        r10_2,
        r10_3,
        r11_1,
        r11_2,
        r11_3,
        r12_1,
        r12_2,
        r12_3,
        r13_1,
        r13_2,
        r13_3,
        r14_1,
        r14_2,
        r14_3,
    ])

    # add condition
    cs.add_condition(InCondition(x1, root, ["l1"]))
    cs.add_condition(InCondition(x2, root, ["ln"]))
    cs.add_condition(InCondition(r1_1, root, ["l1"]))
    cs.add_condition(InCondition(r1_2, root, ["l1"]))
    cs.add_condition(InCondition(r1_3, root, ["l1"]))
    cs.add_condition(InCondition(r2_1, root, ["ln"]))
    cs.add_condition(InCondition(r2_2, root, ["ln"]))
    cs.add_condition(InCondition(r2_3, root, ["ln"]))

    cs.add_condition(InCondition(x3, x1, ["l1"]))
    cs.add_condition(InCondition(x4, x1, ["ln"]))
    cs.add_condition(InCondition(r3_1, x1, ["l1"]))
    cs.add_condition(InCondition(r3_2, x1, ["l1"]))
    cs.add_condition(InCondition(r3_3, x1, ["l1"]))
    cs.add_condition(InCondition(r4_1, x1, ["ln"]))
    cs.add_condition(InCondition(r4_2, x1, ["ln"]))
    cs.add_condition(InCondition(r4_3, x1, ["ln"]))

    cs.add_condition(InCondition(x5, x2, ["l1"]))
    cs.add_condition(InCondition(x6, x2, ["ln"]))
    cs.add_condition(InCondition(r5_1, x2, ["l1"]))
    cs.add_condition(InCondition(r5_2, x2, ["l1"]))
    cs.add_condition(InCondition(r5_3, x2, ["l1"]))
    cs.add_condition(InCondition(r6_1, x2, ["ln"]))
    cs.add_condition(InCondition(r6_2, x2, ["ln"]))
    cs.add_condition(InCondition(r6_3, x2, ["ln"]))

    cs.add_condition(InCondition(r7_1, x3, ["l1"]))
    cs.add_condition(InCondition(r7_2, x3, ["l1"]))
    cs.add_condition(InCondition(r7_3, x3, ["l1"]))
    cs.add_condition(InCondition(r8_1, x3, ["ln"]))
    cs.add_condition(InCondition(r8_2, x3, ["ln"]))
    cs.add_condition(InCondition(r8_3, x3, ["ln"]))

    cs.add_condition(InCondition(r9_1, x4, ["l1"]))
    cs.add_condition(InCondition(r9_2, x4, ["l1"]))
    cs.add_condition(InCondition(r9_3, x4, ["l1"]))
    cs.add_condition(InCondition(r10_1, x4, ["ln"]))
    cs.add_condition(InCondition(r10_2, x4, ["ln"]))
    cs.add_condition(InCondition(r10_3, x4, ["ln"]))

    cs.add_condition(InCondition(r11_1, x5, ["l1"]))
    cs.add_condition(InCondition(r11_2, x5, ["l1"]))
    cs.add_condition(InCondition(r11_3, x5, ["l1"]))
    cs.add_condition(InCondition(r12_1, x5, ["ln"]))
    cs.add_condition(InCondition(r12_2, x5, ["ln"]))
    cs.add_condition(InCondition(r12_3, x5, ["ln"]))

    cs.add_condition(InCondition(r13_1, x6, ["l1"]))
    cs.add_condition(InCondition(r13_2, x6, ["l1"]))
    cs.add_condition(InCondition(r13_3, x6, ["l1"]))
    cs.add_condition(InCondition(r14_1, x6, ["ln"]))
    cs.add_condition(InCondition(r14_2, x6, ["ln"]))
    cs.add_condition(InCondition(r14_3, x6, ["ln"]))

    return cs
Esempio n. 32
0
def dqn_bohb_wrapper(**params):

    # Setup directories where live data is logged
    logdir = params["logdir"]
    dqn_output_dir = os.path.join(logdir, 'dqn_output')
    # if not os.path.isdir(dqn_output_dir):
    #     os.makedirs(dqn_output_dir)
    params["logdir"] = dqn_output_dir

    bohb_output_dir = os.path.join(logdir, 'bohb_output')
    # if not os.path.isdir(bohb_output_dir):
    #     os.makedirs(bohb_output_dir)

    logging.basicConfig(level=logging.INFO)  # logging.DEBUG for debug output
    logger = logging.getLogger()
    logger.propagate = False  # no duplicate logging outputs
    fh = logging.FileHandler(os.path.join(logdir, 'bohb.log'))
    fh.setLevel(logging.INFO)
    fh.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s:%(name)s: %(message)s'))
    logger.addHandler(fh)

    # Build configuration space and define all hyperparameters
    cs = ConfigurationSpace()
    epsilon = UniformFloatHyperparameter("epsilon", 0.2, 0.9, default_value=0.6)  # initial epsilon
    epsilon_decay = UniformFloatHyperparameter("epsilon_decay", 0.2, 1, default_value=0.995)  # decay rate
    lr = UniformFloatHyperparameter("lr", 0.0005, 0.01, default_value=0.005)
    units_shared_layer1 = UniformIntegerHyperparameter("units_layer1", 8, 100, default_value=24)
    units_shared_layer2 = UniformIntegerHyperparameter("units_layer2", 8, 100, default_value=24)
    units_policy_layer = UniformIntegerHyperparameter("units_layer3", 8, 100, default_value=24)
    activ_fcn = CategoricalHyperparameter("activ_fcn", ['relu6', 'elu', 'mixed'], default_value='relu6')
    gamma = UniformFloatHyperparameter("gamma", 0.6, 0.90, default_value=0.80)
    tau = UniformFloatHyperparameter("tau", 0.5, 1., default_value=0.7)
    # update_interval = UniformIntegerHyperparameter("update_interval", 1, 300, default_value=50)
    if params["architecture"] == 'lstm' or (params["architecture"] == 'gru'):
        trace_length = UniformIntegerHyperparameter("trace_length", 1, 20, default_value=8)
        # buffer_condition = LessThanCondition(child=trace_length, parent=params["buffer_size"])
        # pa["batch_size"] = 5
        cs.add_hyperparameters([units_shared_layer1, units_shared_layer2, units_policy_layer,
                                epsilon, epsilon_decay, activ_fcn, lr, gamma, tau, trace_length])
    else:
        params.pop("batch_size")
        batch_size = UniformIntegerHyperparameter("batch_size", 1, 100, default_value=30)
        # buffer_condition = LessThanCondition(child=batch_size, parent=params["buffer_size"], value=33)
        # InCondition(child=batch_size, value=33)
        cs.add_hyperparameters([units_shared_layer1, units_shared_layer2, units_policy_layer,
                                epsilon, epsilon_decay, activ_fcn, lr, gamma, tau, batch_size])

    logger.info('##############################################')
    logger.info('Run Optimization')
    logger.info('##############################################')
    if params["array_id"] == 1:
        # Setup directories where live data is logged
        # logdir = params["logdir"]
        # dqn_output_dir = os.path.join(logdir, 'dqn_output')
        if not os.path.isdir(dqn_output_dir):
            os.makedirs(dqn_output_dir)
        # params["logdir"] = dqn_output_dir

        # bohb_output_dir = os.path.join(logdir, 'bohb_output')
        if not os.path.isdir(bohb_output_dir):
            os.makedirs(bohb_output_dir)

        # start nameserver
        NS = hpns.NameServer(run_id=params["instance_id"], nic_name=params["nic_name"],
                             working_directory=bohb_output_dir)
        ns_host, ns_port = NS.start()  # stores information for workers to find in working directory

        # BOHB is usually so cheap, that we can affort to run a worker on the master node, too.
        worker = DQNWorker(nameserver=ns_host, nameserver_port=ns_port, run_id=params["instance_id"], **params)
        worker.run(background=True)

        # Create scenario object
        logger.info('##############################################')
        logger.info('Setup BOHB instance')
        logger.info('##############################################')

        logger.info('Output_dir: %s' % bohb_output_dir)
        HB = BOHB(configspace=cs,
                  run_id=params["instance_id"],
                  eta=3,
                  min_budget=params["min_resource"],
                  max_budget=params["max_resource"],
                  host=ns_host,
                  nameserver=ns_host,
                  nameserver_port=ns_port,
                  ping_interval=3600)

        res = HB.run(n_iterations=4,
                     min_n_workers=4)  # BOHB can wait until a minimum number of workers is online before starting

        # pickle result here for later analysis
        with open(os.path.join(bohb_output_dir, 'results.pkl'), 'wb') as f:
            pickle.dump(res, f)

        id2config = res.get_id2config_mapping()
        print('A total of %i unique configurations where sampled.' % len(id2config.keys()))
        print('A total of %i runs where executed.' % len(res.get_all_runs()))
        # incumbent_trajectory = res.get_incumbent_trajectory()
        # import matplotlib.pyplot as plt
        # plt.plot(incumbent_trajectory['times_finished'], incumbent_trajectory['losses'])
        # plt.xlabel('wall clock time [s]')
        # plt.ylabel('incumbent loss')
        # plt.show()

        # shutdown all workers
        HB.shutdown(shutdown_workers=True)

        # shutdown nameserver
        NS.shutdown()

    else:
        host = hpns.nic_name_to_host(params["nic_name"])

        # workers only instantiate the MyWorker, find the nameserver and start serving
        w = DQNWorker(run_id=params["instance_id"], host=host, **params)
        w.load_nameserver_credentials(bohb_output_dir)
        # run worker in the forground,
        w.run(background=False)
Esempio n. 33
0
    dimensions. The minimium is always at x_i = 1 with a function value of
    zero. All input parameters are continuous. The search domain for
    all x's is the interval [-5, 10].
    """

    x1 = x["x0"]
    x2 = x["x1"]

    val = 100. * (x2 - x1**2.)**2. + (1 - x1)**2.
    return val


if __name__ == "__main__":
    # Build Configuration Space which defines all parameters and their ranges
    cs = ConfigurationSpace()
    x0 = UniformFloatHyperparameter("x0", -5, 10, default_value=-3)
    x1 = UniformFloatHyperparameter("x1", -5, 10, default_value=-4)
    cs.add_hyperparameters([x0, x1])

    # Scenario object
    scenario = Scenario({
        "run_obj": "quality",  # we optimize quality (alternatively runtime)
        "runcount-limit": 10,  # max. number of function evaluations
        "cs": cs,  # configuration space
        "deterministic": True
    })

    # Use 'gp' or 'gp_mcmc' here
    model_type = 'gp'

    # Example call of the function
    plt.ylabel("Accuracy (%)")
    plt.title("Validation Accuracy vs Number of Iterations: Bagging") 

    for label, x, y in zip(hyper_parameters, iterations, accuracies):
        plt.text(x,y,label,fontsize=8,horizontalalignment='center',verticalalignment='center')

    save_file_name = "Accuracy_vs_Number_of_Iterations_Bagging.png" 
    plt.savefig(save_file_name)
    #plt.show()


# In[ ]:


cs = ConfigurationSpace()
C = UniformFloatHyperparameter("C", 0.001, 1000.0, default_value=1.0)
cs.add_hyperparameter(C)
scenario = Scenario({"run_obj": "quality",   # we optimize quality (alternatively runtime)
                     "runcount-limit": 50,  # maximum function evaluations
                     "cs": cs,               # configuration space
                     "deterministic": "true"
                     })


# In[ ]:


smac = SMAC(scenario=scenario, rng=np.random.RandomState(42),
        tae_runner=svm_from_cfg)
incumbent = smac.optimize()
print(incumbent)