Esempio n. 1
0
 def setUp(self):
     self.cs = ConfigurationSpace()
     self.scenario = Scenario({
         'cs': self.cs,
         'run_obj': 'quality',
         'output_dir': ''
     })
     self.sh_intensifier_kwargs = {
         'n_seeds': 1,
         'initial_budget': 1,
         'eta': 3,
         'min_chall': 1,
         'max_budget': 100,
     }
     self.output_dirs = []
Esempio n. 2
0
 def setUp(self):
     current_dir = os.path.dirname(__file__)
     self.test_files_dir = os.path.join(current_dir, '..', 'test_files')
     seed = np.random.randint(1, 100000)
     self.cs = ConfigurationSpace(seed=seed)
     x1 = UniformFloatHyperparameter("x1", -5, 5, default=5)
     self.cs.add_hyperparameter(x1)
     x2 = UniformIntegerHyperparameter("x2", -5, 5, default=5)
     self.cs.add_hyperparameter(x2)
     x3 = CategoricalHyperparameter("x3",
                                    [5, 2, 0, 1, -1, -2, 4, -3, 3, -5, -4],
                                    default=5)
     self.cs.add_hyperparameter(x3)
     x4 = UniformIntegerHyperparameter("x4", -5, 5, default=5)
     self.cs.add_hyperparameter(x4)
Esempio n. 3
0
    def test_initializations(self):
        cs = ConfigurationSpace()
        for i in range(40):
            cs.add_hyperparameter(
                UniformFloatHyperparameter('x%d' % (i + 1), 0, 1))
        scenario = Scenario({'cs': cs, 'run_obj': 'quality'})
        hb_kwargs = {'initial_budget': 1, 'max_budget': 3}
        facade = HB4AC(scenario=scenario, intensifier_kwargs=hb_kwargs)

        self.assertIsInstance(facade.solver.initial_design,
                              RandomConfigurations)
        self.assertIsInstance(facade.solver.epm_chooser.model, RandomEPM)
        self.assertIsInstance(facade.solver.intensifier, Hyperband)
        self.assertEqual(facade.solver.intensifier.min_chall, 1)
        self.output_dirs.append(scenario.output_dir)
Esempio n. 4
0
 def test_exchange_sobol_for_lhd(self):
     cs = ConfigurationSpace()
     for i in range(40):
         cs.add_hyperparameter(
             UniformFloatHyperparameter('x%d' % (i + 1), 0, 1))
     scenario = Scenario({'cs': cs, 'run_obj': 'quality'})
     facade = SMAC4HPO(scenario=scenario)
     self.assertIsInstance(facade.solver.initial_design, SobolDesign)
     cs.add_hyperparameter(UniformFloatHyperparameter('x41', 0, 1))
     with self.assertRaisesRegex(
             ValueError,
             'Sobol sequence" can only handle up to 40 dimensions. Please use a different initial design, such as '
             '"the Latin Hypercube design"',
     ):
         SMAC4HPO(scenario=scenario)
     self.output_dirs.append(scenario.output_dir)
Esempio n. 5
0
    def test_get_next_by_random_search(self, patch):
        def side_effect(size):
            return [ConfigurationMock()] * size

        patch.side_effect = side_effect
        cs = ConfigurationSpace()
        ei = EI(None)
        rs = RandomSearch(ei, cs)
        rval = rs._maximize(
            runhistory=None, stats=None, num_points=10, _sorted=False
        )
        self.assertEqual(len(rval), 10)
        for i in range(10):
            self.assertIsInstance(rval[i][1], ConfigurationMock)
            self.assertEqual(rval[i][1].origin, 'Random Search')
            self.assertEqual(rval[i][0], 0)
Esempio n. 6
0
    def config_space(self):
        """Returns the MI hyperparameter configuration space."""

        num_neighbors = UniformIntegerHyperparameter('num_neighbors',
                                                     lower=10,
                                                     upper=100,
                                                     default_value=20)
        num_features = UniformIntegerHyperparameter('num_features',
                                                    lower=2,
                                                    upper=50,
                                                    default_value=20)
        config = ConfigurationSpace()
        config.seed(self.random_state)
        config.add_hyperparameters((num_neighbors, num_features))

        return config
Esempio n. 7
0
 def setUp(self):
     logging.basicConfig()
     self.logger = logging.getLogger(self.__module__ + "." + self.__class__.__name__)
     self.logger.setLevel(logging.DEBUG)
     self.value = 0
     self.cs = ConfigurationSpace()
     self.cs.add_hyperparameters([
         UniformFloatHyperparameter('param_a', -0.2, 1.77, 1.1),
         UniformIntegerHyperparameter('param_b', -3, 10, 1),
         Constant('param_c', 'value'),
         CategoricalHyperparameter('ambigous_categorical', choices=['True', True, 5]),  # True is ambigous here
     ])
     self.test_config = Configuration(self.cs, {'param_a': 0.5,
                                                'param_b': 1,
                                                'param_c': 'value',
                                                'ambigous_categorical': 5})
def get_variables(problem):
    cs = ConfigurationSpace()
    params = []
    for i in range(problem.dims()):
        param = get_variable(problem, i)
        params.append(param)
        cs.add_hyperparameter(param)
    
    for (i, c) in enumerate(problem.dependencies()):
        if c is None:
            continue
        j = c['on']
        s = c['values']
        cs.add_condition(InCondition(params[i], params[j], list(s)))

    return cs
 def setUp(self):
     self.cs = ConfigurationSpace()
     self.cs.add_hyperparameter(
         UniformFloatHyperparameter(name="x1",
                                    lower=1,
                                    upper=10,
                                    default_value=1))
     self.scenario = Scenario({
         'cs': self.cs,
         'run_obj': 'quality',
         'output_dir': '',
         'ta_run_limit': 100,
     })
     self.stats = Stats(scenario=self.scenario)
     self.rh = RunHistory()
     self.ta = ExecuteTAFuncDict(lambda x: x["x1"]**2, stats=self.stats)
 def _create_configuration_space(self):
     config_space = ConfigurationSpace()
     min = self.parameter_domain.get_min_vector()
     max = self.parameter_domain.get_max_vector()
     for i in range(len(min)):
         param = None
         if min[i] == max[i]:
             param = UniformFloatHyperparameter(name=str(i),
                                                lower=min[i],
                                                upper=max[i] + 0.0001)
         else:
             param = UniformFloatHyperparameter(name=str(i),
                                                lower=min[i],
                                                upper=max[i])
         config_space.add_hyperparameter(param)
     return config_space
Esempio n. 11
0
def create_configspace():
    cs = ConfigurationSpace()

    n_estimators = UniformFloatHyperparameter("n_estimators",
                                              100,
                                              600,
                                              default_value=200,
                                              q=50)
    eta = UniformFloatHyperparameter("eta",
                                     0.025,
                                     0.3,
                                     default_value=0.3,
                                     q=0.025)
    min_child_weight = UniformIntegerHyperparameter("min_child_weight",
                                                    1,
                                                    10,
                                                    default_value=1)
    max_depth = UniformIntegerHyperparameter("max_depth",
                                             1,
                                             14,
                                             default_value=6)
    subsample = UniformFloatHyperparameter("subsample",
                                           0.5,
                                           1,
                                           default_value=1,
                                           q=0.05)
    gamma = UniformFloatHyperparameter("gamma", 0, 1, default_value=0, q=0.1)
    colsample_bytree = UniformFloatHyperparameter("colsample_bytree",
                                                  0.5,
                                                  1,
                                                  default_value=1.,
                                                  q=0.05)
    alpha = UniformFloatHyperparameter("alpha", 0, 10, default_value=0., q=1.)
    _lambda = UniformFloatHyperparameter("lambda",
                                         1,
                                         2,
                                         default_value=1,
                                         q=0.1)
    scale_pos_weight = CategoricalHyperparameter("scale_pos_weight",
                                                 [0.01, 0.1, 1., 10, 100],
                                                 default_value=1.)

    cs.add_hyperparameters([
        n_estimators, eta, min_child_weight, max_depth, subsample, gamma,
        colsample_bytree, alpha, _lambda, scale_pos_weight
    ])
    return cs
Esempio n. 12
0
def maxsat(n_eval, n_variables, random_seed):
    assert n_variables in [28, 43, 60]
    if n_variables == 28:
        evaluator = MaxSAT28(random_seed)
    elif n_variables == 43:
        evaluator = MaxSAT43(random_seed)
    elif n_variables == 60:
        evaluator = MaxSAT60(random_seed)
    name_tag = 'maxsat' + str(n_variables) + '_' + datetime.now().strftime(
        "%Y-%m-%d-%H:%M:%S:%f")
    cs = ConfigurationSpace()
    for i in range(n_variables):
        car_var = CategoricalHyperparameter('x' + str(i + 1).zfill(2),
                                            [str(elm) for elm in range(2)],
                                            default_value='0')
        cs.add_hyperparameter(car_var)
    init_points_numpy = evaluator.suggested_init.long().numpy()
    init_points = []
    for i in range(init_points_numpy.shape[0]):
        init_points.append(
            Configuration(
                cs, {
                    'x' + str(j + 1).zfill(2): str(init_points_numpy[i][j])
                    for j in range(n_variables)
                }))

    def evaluate(x):
        x_tensor = torch.LongTensor(
            [int(x['x' + str(j + 1).zfill(2)]) for j in range(n_variables)])
        return evaluator.evaluate(x_tensor).item()

    print('Began    at ' + datetime.now().strftime("%H:%M:%S"))
    scenario = Scenario({
        "run_obj": "quality",
        "runcount-limit": n_eval,
        "cs": cs,
        "deterministic": "true",
        'output_dir': os.path.join(EXP_DIR, name_tag)
    })
    smac = SMAC(scenario=scenario,
                tae_runner=evaluate,
                initial_configurations=init_points)
    smac.optimize()

    evaluations, optimum = evaluations_from_smac(smac)
    print('Finished at ' + datetime.now().strftime("%H:%M:%S"))
    return optimum
Esempio n. 13
0
def make_cs():
    cs = ConfigurationSpace()
    cs.add_hyperparameter(
        UniformIntegerHyperparameter("n_estimators", 1, 30, default=10))

    max_features = CategoricalHyperparameter('max_features', ['auto', 'value'],
                                             default='auto')
    max_features_value = UniformFloatHyperparameter('max_features_value', .1,
                                                    1)
    cs.add_hyperparameters([max_features, max_features_value])
    cs.add_condition(
        InCondition(child=max_features_value,
                    parent=max_features,
                    values=['value']))

    max_depth = CategoricalHyperparameter('max_depth', [None, 'value'])
    max_depth_value = UniformIntegerHyperparameter("max_depth_value", 1, 10)
    cs.add_hyperparameters([max_depth, max_depth_value])
    cs.add_condition(
        InCondition(child=max_depth_value, parent=max_depth, values=['value']))

    min_samples_split = UniformFloatHyperparameter("min_samples_split", .1, 1)
    cs.add_hyperparameter(min_samples_split)

    min_samples_leaf = UniformFloatHyperparameter("min_samples_leaf", .1, .5)
    cs.add_hyperparameter(min_samples_leaf)

    min_weight_fraction_leaf = UniformFloatHyperparameter(
        "min_weight_fraction_leaf", 0, .5)
    cs.add_hyperparameter(min_weight_fraction_leaf)

    max_leaf_nodes = CategoricalHyperparameter('max_leaf_nodes',
                                               [None, 'value'])
    max_leaf_nodes_value = UniformIntegerHyperparameter(
        'max_leaf_nodes_value', 2, 100)
    cs.add_hyperparameters([max_leaf_nodes, max_leaf_nodes_value])
    cs.add_condition(
        InCondition(child=max_leaf_nodes_value,
                    parent=max_leaf_nodes,
                    values=['value']))

    min_impurity_split = UniformFloatHyperparameter('min_impurity_split', 0, 1)
    cs.add_hyperparameter(min_impurity_split)

    bootstrap = CategoricalHyperparameter('bootstrap', [True, False],
                                          default=True)
    cs.add_hyperparameter(bootstrap)
Esempio n. 14
0
 def _get_acm_cs(self):
     """
         returns a configuration space 
         designed for querying ~smac.optimizer.smbo._component_builder
         
         Returns
         ------- 
             ConfigurationSpace
     """
     
     cs = ConfigurationSpace()
     cs.seed(self.rng.randint(0,2**20))
     
     model = CategoricalHyperparameter("model", choices=("RF", "GP"))
     
     num_trees = Constant("num_trees", value=10)
     bootstrap = CategoricalHyperparameter("do_bootstrapping", choices=(True, False), default_value=True)
     ratio_features = CategoricalHyperparameter("ratio_features", choices=(3 / 6, 4 / 6, 5 / 6, 1), default_value=1)
     min_split = UniformIntegerHyperparameter("min_samples_to_split", lower=1, upper=10, default_value=2)
     min_leaves = UniformIntegerHyperparameter("min_samples_in_leaf", lower=1, upper=10, default_value=1)
     
     cs.add_hyperparameters([model, num_trees, bootstrap, ratio_features, min_split, min_leaves])
     
     inc_num_trees = InCondition(num_trees, model, ["RF"])
     inc_bootstrap = InCondition(bootstrap, model, ["RF"])
     inc_ratio_features = InCondition(ratio_features, model, ["RF"])
     inc_min_split = InCondition(min_split, model, ["RF"])
     inc_min_leavs = InCondition(min_leaves, model, ["RF"])
     
     cs.add_conditions([inc_num_trees, inc_bootstrap, inc_ratio_features, inc_min_split, inc_min_leavs])
     
     acq  = CategoricalHyperparameter("acq_func", choices=("EI", "LCB", "PI", "LogEI"))
     par_ei = UniformFloatHyperparameter("par_ei", lower=-10, upper=10)
     par_pi = UniformFloatHyperparameter("par_pi", lower=-10, upper=10)
     par_logei = UniformFloatHyperparameter("par_logei", lower=0.001, upper=100, log=True)
     par_lcb = UniformFloatHyperparameter("par_lcb", lower=0.0001, upper=0.9999)
     
     cs.add_hyperparameters([acq, par_ei, par_pi, par_logei, par_lcb])
     
     inc_par_ei = InCondition(par_ei, acq, ["EI"])
     inc_par_pi = InCondition(par_pi, acq, ["PI"])
     inc_par_logei = InCondition(par_logei, acq, ["LogEI"])
     inc_par_lcb = InCondition(par_lcb, acq, ["LCB"])
     
     cs.add_conditions([inc_par_ei, inc_par_pi, inc_par_logei, inc_par_lcb])
     
     return cs
Esempio n. 15
0
def fmin_smac_nopynisher(func, x0, bounds, maxfun, rng):
    """
    Minimize a function using SMAC, but without pynisher, which doesn't work
    well with benchmark_minimize_callable.
    
    This function is based on SMAC's fmin_smac.
    """
    cs = ConfigurationSpace()
    tmplt = 'x{0:0' + str(len(str(len(bounds)))) + 'd}'
    for idx, (lower_bound, upper_bound) in enumerate(bounds):
        parameter = UniformFloatHyperparameter(
            name=tmplt.format(idx + 1),
            lower=lower_bound,
            upper=upper_bound,
            default_value=x0[idx],
        )
        cs.add_hyperparameter(parameter)

    scenario_dict = {
        "run_obj": "quality",
        "cs": cs,
        "deterministic": "true",
        "initial_incumbent": "DEFAULT",
        "runcount_limit": maxfun,
    }
    scenario = Scenario(scenario_dict)

    def call_ta(config):
        x = np.array(
            [val for _, val in sorted(config.get_dictionary().items())],
            dtype=np.float)
        return func(x)

    smac = SMAC4HPO(
        scenario=scenario,
        tae_runner=ExecuteTAFuncArray,
        tae_runner_kwargs={
            'ta': call_ta,
            'use_pynisher': False
        },
        rng=rng,
        initial_design=RandomConfigurations,
    )

    smac.optimize()
    return
Esempio n. 16
0
        def opt_rosenbrock():
            cs = ConfigurationSpace()

            cs.add_hyperparameter(UniformFloatHyperparameter("x1", -5, 5, default_value=-3))
            cs.add_hyperparameter(UniformFloatHyperparameter("x2", -5, 5, default_value=-4))

            scenario = Scenario({"run_obj": "quality",  # we optimize quality (alternatively runtime)
                                 "runcount-limit": 50,  # maximum function evaluations
                                 "cs": cs,  # configuration space
                                 "deterministic": "true",
                                 "intensification_percentage": 0.000000001
                                 })

            smac = SMAC4AC(scenario=scenario, rng=np.random.RandomState(42),
                           tae_runner=rosenbrock_2d)
            incumbent = smac.optimize()
            return incumbent, smac.scenario.output_dir
Esempio n. 17
0
def best_hyperparams_smac():
    iteration = 1
    cs = ConfigurationSpace()
    cs.add_hyperparameters(SVD_SMAC_SPACE.values())
    scenario = Scenario({
        "run_obj": "quality",  # we optimize quality (alternatively runtime)
        "runcount-limit":
        100,  # max. number of function evaluations; for this example set to a low number
        "cs": cs,  # configuration space
        "deterministic": "true"
    })
    smac = SMAC4HPO(
        scenario=scenario,
        rng=np.random.RandomState(42),
        tae_runner=_hyperopt,
    )
    smac.optimize()
Esempio n. 18
0
 def test_illegal_input(self):
     """
     Testing illegal input in smbo
     """
     cs = ConfigurationSpace()
     cs.add_hyperparameter(UniformFloatHyperparameter('test', 1, 10, 5))
     scen = Scenario({'run_obj': 'quality', 'cs': cs})
     stats = Stats(scen)
     # Recorded runs but no incumbent.
     stats.ta_runs = 10
     smac = SMAC(scen, stats=stats, rng=np.random.RandomState(42))
     self.assertRaises(ValueError, smac.optimize)
     # Incumbent but no recoreded runs.
     incumbent = cs.get_default_configuration()
     smac = SMAC(scen, restore_incumbent=incumbent,
                 rng=np.random.RandomState(42))
     self.assertRaises(ValueError, smac.optimize)
Esempio n. 19
0
def pest_control(n_eval, random_seed):
    evaluator = PestControl(random_seed)
    name_tag = 'pestcontrol_' + datetime.now().strftime("%Y-%m-%d-%H:%M:%S:%f")
    cs = ConfigurationSpace()
    for i in range(PESTCONTROL_N_STAGES):
        car_var = CategoricalHyperparameter(
            'x' + str(i + 1).zfill(2),
            [str(elm) for elm in range(PESTCONTROL_N_CHOICE)],
            default_value='0')
        cs.add_hyperparameter(car_var)

    init_points_numpy = sample_init_points([PESTCONTROL_N_CHOICE] *
                                           PESTCONTROL_N_STAGES, 20,
                                           random_seed).long().numpy()
    init_points = []
    for i in range(init_points_numpy.shape[0]):
        init_points.append(
            Configuration(
                cs, {
                    'x' + str(j + 1).zfill(2): str(init_points_numpy[i][j])
                    for j in range(PESTCONTROL_N_STAGES)
                }))

    def evaluate(x):
        x_tensor = torch.LongTensor([
            int(x['x' + str(j + 1).zfill(2)])
            for j in range(PESTCONTROL_N_STAGES)
        ])
        return evaluator.evaluate(x_tensor).item()

    print('Began    at ' + datetime.now().strftime("%H:%M:%S"))
    scenario = Scenario({
        "run_obj": "quality",
        "runcount-limit": n_eval,
        "cs": cs,
        "deterministic": "true",
        'output_dir': os.path.join(EXP_DIR, name_tag)
    })
    smac = SMAC(scenario=scenario,
                tae_runner=evaluate,
                initial_configurations=init_points)
    smac.optimize()

    evaluations, optimum = evaluations_from_smac(smac)
    print('Finished at ' + datetime.now().strftime("%H:%M:%S"))
    return optimum
Esempio n. 20
0
    def test_start_tae_return_abort(self, test_run):
        '''
            testing abort
        '''
        # Patch run-function for custom-return
        test_run.return_value = StatusType.ABORT, 12345.0, 1.2345, {}

        scen = Scenario(scenario={
            'cs': ConfigurationSpace(),
            'output_dir': ''
        },
                        cmd_args=None)
        stats = Stats(scen)
        stats.start_timing()
        eta = ExecuteTARun(ta=lambda *args: None, stats=stats)

        self.assertRaises(TAEAbortException, eta.start, config={}, instance=1)
Esempio n. 21
0
def branin(n_eval):
    evaluator = Branin()
    name_tag = '_'.join(
        ['branin', datetime.now().strftime("%Y-%m-%d-%H:%M:%S:%f")])
    cs = ConfigurationSpace()
    for i in range(len(evaluator.n_vertices)):
        car_var = UniformIntegerHyperparameter('x' + str(i + 1).zfill(2),
                                               0,
                                               int(evaluator.n_vertices[i]) -
                                               1,
                                               default_value=25)
        cs.add_hyperparameter(car_var)

    init_points_numpy = evaluator.suggested_init.long().numpy()
    init_points = []
    for i in range(init_points_numpy.shape[0]):
        init_points.append(
            Configuration(
                cs, {
                    'x' + str(j + 1).zfill(2): int(init_points_numpy[i][j])
                    for j in range(len(evaluator.n_vertices))
                }))

    def evaluate(x):
        x_tensor = torch.LongTensor([
            int(x['x' + str(j + 1).zfill(2)])
            for j in range(len(evaluator.n_vertices))
        ])
        return evaluator.evaluate(x_tensor).item()

    print('Began    at ' + datetime.now().strftime("%H:%M:%S"))
    scenario = Scenario({
        "run_obj": "quality",
        "runcount-limit": n_eval,
        "cs": cs,
        "deterministic": "true",
        'output_dir': os.path.join(EXP_DIR, name_tag)
    })
    smac = SMAC(scenario=scenario,
                tae_runner=evaluate,
                initial_configurations=init_points)
    smac.optimize()

    evaluations, optimum = evaluations_from_smac(smac)
    print('Finished at ' + datetime.now().strftime("%H:%M:%S"))
    return optimum
Esempio n. 22
0
    def getPCS(self):
        '''
        maxIter: [1,100]最大迭代次数,默认50
        regParam :[0,0.2] 正则化参数,默认0
        tol:[1e-6,1e-1] 迭代算法收敛性,默认 1e-6
        family ,link, variancePower 对应关系
        •   “gaussian” -> “identity”, “log”, “inverse”
        •   “binomial” -> “logit”, “probit”, “cloglog”
        •   “poisson” -> “log”, “identity”, “sqrt”
        •   “gamma” -> “inverse”, “identity”, “log”
        •   “tweedie” -> power link function specified through “linkPower”.
        The default link power in the tweedie family is 1 - variancePower.


        '''
        # Build Configuration Space which defines all parameters and their
        # ranges
        cs = ConfigurationSpace()
        maxIter = UniformIntegerHyperparameter("maxIter",
                                               1,
                                               100,
                                               default_value=50)
        regParam = UniformFloatHyperparameter("regParam",
                                              0,
                                              0.4,
                                              default_value=1e-04)
        tol = UniformFloatHyperparameter("tol",
                                         1e-06,
                                         1e-01,
                                         default_value=1e-06)
        family = CategoricalHyperparameter("family", ["gaussian", "poisson"],
                                           default_value="gaussian")
        gaussianLink = CategoricalHyperparameter(
            "gaussianLink", ["identity", "log", "inverse"],
            default_value="identity")
        poissonLink = CategoricalHyperparameter("poissonLink",
                                                ["log", "identity", "sqrt"],
                                                default_value="log")
        cs.add_hyperparameters(
            [maxIter, regParam, tol, family, gaussianLink, poissonLink])
        cs.add_condition(
            InCondition(child=gaussianLink, parent=family,
                        values=["gaussian"]))
        cs.add_condition(
            InCondition(child=poissonLink, parent=family, values=["poisson"]))
        return cs
Esempio n. 23
0
def get_gp(n_dimensions, rs, noise=1e-3, normalize_y=True, average_samples=False, n_iter=50):
    from smac.epm.gp_kernels import ConstantKernel, Matern, WhiteKernel

    cov_amp = ConstantKernel(
        2.0,
        constant_value_bounds=(1e-10, 2),
        prior=LognormalPrior(mean=0.0, sigma=1.0, rng=rs),
    )
    exp_kernel = Matern(
        np.ones([n_dimensions]),
        [(np.exp(-10), np.exp(2)) for _ in range(n_dimensions)],
        nu=2.5,
        prior=None,
    )
    noise_kernel = WhiteKernel(
        noise_level=noise,
        noise_level_bounds=(1e-10, 2),
        prior=HorseshoePrior(scale=0.1, rng=rs),
    )
    kernel = cov_amp * exp_kernel + noise_kernel

    n_mcmc_walkers = 3 * len(kernel.theta)
    if n_mcmc_walkers % 2 == 1:
        n_mcmc_walkers += 1

    bounds = [(0., 1.) for _ in range(n_dimensions)]
    types = np.zeros(n_dimensions)

    configspace = ConfigurationSpace()
    for i in range(n_dimensions):
        configspace.add_hyperparameter(UniformFloatHyperparameter('x%d' % i, 0, 1))

    model = GaussianProcessMCMC(
        configspace=configspace,
        types=types,
        bounds=bounds,
        kernel=kernel,
        n_mcmc_walkers=n_mcmc_walkers,
        chain_length=n_iter,
        burnin_steps=n_iter,
        normalize_y=normalize_y,
        seed=rs.randint(low=1, high=10000),
        mcmc_sampler='emcee',
        average_samples=average_samples,
    )
    return model
Esempio n. 24
0
def contamination(n_eval, lamda, random_seed_pair):
    evaluator = Contamination(lamda, random_seed_pair)
    name_tag = '_'.join([
        'contamination', ('%.2E' % lamda),
        datetime.now().strftime("%Y-%m-%d-%H:%M:%S:%f")
    ])
    cs = ConfigurationSpace()
    for i in range(CONTAMINATION_N_STAGES):
        car_var = CategoricalHyperparameter('x' + str(i + 1).zfill(2),
                                            [str(elm) for elm in range(2)],
                                            default_value='0')
        cs.add_hyperparameter(car_var)

    init_points_numpy = evaluator.suggested_init.long().numpy()
    init_points = []
    for i in range(init_points_numpy.shape[0]):
        init_points.append(
            Configuration(
                cs, {
                    'x' + str(j + 1).zfill(2): str(init_points_numpy[i][j])
                    for j in range(CONTAMINATION_N_STAGES)
                }))

    def evaluate(x):
        x_tensor = torch.LongTensor([
            int(x['x' + str(j + 1).zfill(2)])
            for j in range(CONTAMINATION_N_STAGES)
        ])
        return evaluator.evaluate(x_tensor).item()

    print('Began    at ' + datetime.now().strftime("%H:%M:%S"))
    scenario = Scenario({
        "run_obj": "quality",
        "runcount-limit": n_eval,
        "cs": cs,
        "deterministic": "true",
        'output_dir': os.path.join(EXP_DIR, name_tag)
    })
    smac = SMAC(scenario=scenario,
                tae_runner=evaluate,
                initial_configurations=init_points)
    smac.optimize()

    evaluations, optimum = evaluations_from_smac(smac)
    print('Finished at ' + datetime.now().strftime("%H:%M:%S"))
    return optimum
Esempio n. 25
0
 def setUp(self):
     self.cs = ConfigurationSpace()
     self.scenario_dict_default = {
         "cs": self.cs,
         "run_obj": "quality",
         "output_dir": "",
         "limit_resources": True,
         "deterministic": False,
     }
     self.scenario = Scenario(self.scenario_dict_default)
     self.sh_intensifier_kwargs = {
         "n_seeds": 1,
         "initial_budget": 1,
         "eta": 3,
         "min_chall": 1,
         "max_budget": 100,
     }
     self.output_dirs = []
Esempio n. 26
0
    def test_crashed_cost_value(self, test_run):
        '''
            test cost on crashed runs
        '''
        # Patch run-function for custom-return
        scen = Scenario(scenario={
            'cs': ConfigurationSpace(),
            'run_obj': 'quality'
        },
                        cmd_options=None)
        stats = Stats(scen)
        stats.start_timing()
        stats.submitted_ta_runs += 1

        # Check quality
        test_run.return_value = StatusType.CRASHED, np.nan, np.nan, {}
        eta = SerialRunner(ta=lambda *args: None,
                           stats=stats,
                           run_obj='quality',
                           cost_for_crash=100)
        run_info, result = eta.run_wrapper(
            RunInfo(config={},
                    instance=1,
                    instance_specific="0",
                    cutoff=None,
                    seed=None,
                    capped=False,
                    budget=0.0))
        self.assertEqual(100, result.cost)

        # Check runtime
        eta = SerialRunner(ta=lambda *args: None,
                           stats=stats,
                           run_obj='runtime',
                           cost_for_crash=10.7)
        run_info, result = eta.run_wrapper(
            RunInfo(config={},
                    instance=1,
                    instance_specific="0",
                    cutoff=20,
                    seed=None,
                    capped=False,
                    budget=0.0))
        self.assertEqual(20.0, result.cost)
Esempio n. 27
0
def main():
    cs = ConfigurationSpace()

    cell_size = CategoricalHyperparameter("cell_size", [128],
                                          default_value=128)  # kick up to 256
    n_cell = CategoricalHyperparameter("n_cell", [2], default_value=2)
    dropout = CategoricalHyperparameter("dropout", [0.5], default_value=0.5)

    activation = CategoricalHyperparameter("activation", ['sigmoid'],
                                           default_value='sigmoid')
    optimizer = CategoricalHyperparameter("optimizer", ['adam'],
                                          default_value='adam')
    optimizer_lr = CategoricalHyperparameter(
        "optimizer_lr", [.001, .003, .006, .01, 0.03, 0.1], default_value=.01)
    learning_decay_rate = UniformFloatHyperparameter("learning_decay_rate",
                                                     0,
                                                     0.9,
                                                     default_value=.6)

    nn_type = CategoricalHyperparameter("nn_type", ['RNN', 'LSTM', 'GRU'],
                                        default_value='LSTM')

    epochs = CategoricalHyperparameter("epochs", [10], default_value=10)

    cs.add_hyperparameters([
        cell_size, n_cell, dropout, nn_type, activation, optimizer,
        optimizer_lr, learning_decay_rate, epochs
    ])

    scenario = Scenario({
        "run_obj": "quality",
        "runcount-limit": 32,
        "cs": cs,
        "deterministic": "true"
    })
    scenario.output_dir_for_this_run = "C:\\NNwork\\HFSF\\SMAC3out"
    scenario.output_dir = "C:\\NNwork\\HFSF\\SMAC3out"
    smac = SMAC(scenario=scenario,
                rng=np.random.RandomState(23),
                tae_runner=rnn_from_cfg)

    best_model = smac.optimize()
    print_incumb(best_model)
    np.save("C:\\NNwork\\HFSF\\SMAC3out\\best.cfg", best_model)
Esempio n. 28
0
def build_config_space(clustering_ls=["KMeans", "DBSCAN"],
                       dim_reduction_ls=[]):
    cs = ConfigurationSpace()

    if len(clustering_ls) > 0:
        clustering_choice = CategoricalHyperparameter(
            "clustering_choice", clustering_ls, default_value=clustering_ls[0])
        cs.add_hyperparameters([clustering_choice])

    if len(dim_reduction_ls) > 0:
        dim_reduction_choice = CategoricalHyperparameter(
            "dim_reduction_choice",
            dim_reduction_ls,
            default_value=dim_reduction_ls[0])
        cs.add_hyperparameters([dim_reduction_choice])

    for idx, string in enumerate(
            itertools.chain(clustering_ls, dim_reduction_ls)):
        algorithm = Mapper.getClass(string)

        # encode parameter names
        encoded_params = []
        for param in algorithm.params:
            encoded_string = StringUtils.encode_parameter(
                param.name, algorithm.name)
            param.name = encoded_string

        # add encoded paramters to configuration space
        cs.add_hyperparameters(algorithm.params)

        # define dependency
        for param in algorithm.params:
            cs.add_condition(
                InCondition(child=param,
                            parent=clustering_choice if
                            idx < len(clustering_ls) else dim_reduction_choice,
                            values=[string]))

        # add forbidden clauses
        for condition in algorithm.forbidden_clauses:
            cs.add_forbidden_clause(condition)

    return cs
Esempio n. 29
0
    def test_run(self):
        '''
            running some simple algo in aclib 2.0 style
        '''
        scen = Scenario(scenario={
            'cs': ConfigurationSpace(),
            'run_obj': 'quality',
            'output_dir': ''
        },
                        cmd_options=None)
        stats = Stats(scen)

        eta = ExecuteTARunAClib(
            ta=shlex.split("python test/test_tae/dummy_ta_wrapper_aclib.py 1"),
            stats=stats)
        status, cost, runtime, ar_info = eta.run(config={}, instance='0')
        assert status == StatusType.TIMEOUT
        assert cost == 2.0
        assert runtime == 2.0

        print(status, cost, runtime)

        eta = ExecuteTARunAClib(
            ta=shlex.split("python test/test_tae/dummy_ta_wrapper_aclib.py 2"),
            stats=stats)
        status, cost, runtime, ar_info = eta.run(config={}, instance='0')
        assert status == StatusType.SUCCESS
        assert cost == 3.0
        assert runtime == 3.0

        print(status, cost, runtime)

        eta = ExecuteTARunAClib(
            ta=shlex.split("python test/test_tae/dummy_ta_wrapper_aclib.py 2"),
            stats=stats,
            run_obj="quality")
        status, cost, runtime, ar_info = eta.run(config={}, instance='0')
        assert status == StatusType.SUCCESS
        assert cost == 2.0
        assert runtime == 3.0

        print(status, cost, runtime, ar_info)
Esempio n. 30
0
    def test_run(self):
        '''
            running some simple algo in old style
        '''
        scen = Scenario(scenario={
            'cs': ConfigurationSpace(),
            'run_obj': 'quality',
            'output_dir': ''
        },
                        cmd_args=None)
        stats = Stats(scen)

        eta = ExecuteTARunOld(
            ta=shlex.split("python test/test_tae/dummy_ta_wrapper.py 1"),
            stats=stats)
        status, cost, runtime, ar_info = eta.run(config={})
        assert status == StatusType.SUCCESS
        assert cost == 1.0
        assert runtime == 1.0

        print(status, cost, runtime)

        eta = ExecuteTARunOld(
            ta=shlex.split("python test/test_tae/dummy_ta_wrapper.py 2"),
            stats=stats)
        status, cost, runtime, ar_info = eta.run(config={})
        assert status == StatusType.SUCCESS
        assert cost == 2.0
        assert runtime == 2.0

        print(status, cost, runtime)

        eta = ExecuteTARunOld(
            ta=shlex.split("python test/test_tae/dummy_ta_wrapper.py 2"),
            stats=stats,
            run_obj="quality")
        status, cost, runtime, ar_info = eta.run(config={}, )
        assert status == StatusType.SUCCESS
        assert cost == 4.0
        assert runtime == 2.0

        print(status, cost, runtime, ar_info)