class SchafferTest(unittest.TestCase):
    def setUp(self):
        self.cs = ConfigurationSpace()
        self.cs.add_hyperparameter(
            UniformFloatHyperparameter("x", lower=MIN_V, upper=MAX_V))

        # Scenario object
        self.scenario = Scenario({
            "run_obj":
            "quality",  # we optimize quality (alternatively runtime)
            "runcount-limit": 50,  # max. number of function evaluations
            "cs": self.cs,  # configuration space
            "deterministic": True,
            "multi_objectives": "metric1, metric2",
            "limit_resources": False,
        })

        self.facade_kwargs = {
            "scenario": self.scenario,
            "rng": np.random.RandomState(0),
            "tae_runner": tae,
        }

    def test_AC(self):
        smac = SMAC4AC(**self.facade_kwargs)
        incumbent = smac.optimize()

        f1_inc, f2_inc = schaffer(incumbent["x"])
        f1_opt, f2_opt = get_optimum()

        f2_inc = f2_inc / UPSCALING_FACTOR

        self.assertAlmostEqual(f1_inc + f2_inc, f1_opt + f2_opt, places=1)

        return smac
Exemple #2
0
def addSearchSpaceGrid(
    hp: SearchSpaceGrid, disc: int, parent_disc: Hyperparameter, cs: ConfigurationSpace
) -> None:
    smac = SearchSpaceGridtoSMAC(hp, disc)
    for hyp in smac:
        cs.add_hyperparameter(hyp)
        cs.add_condition(EqualsCondition(child=hyp, parent=parent_disc, value=disc))
Exemple #3
0
        def opt_rosenbrock():
            cs = ConfigurationSpace()

            cs.add_hyperparameter(
                UniformFloatHyperparameter("x1", -5, 5, default_value=-3)
            )
            cs.add_hyperparameter(
                UniformFloatHyperparameter("x2", -5, 5, default_value=-4)
            )

            scenario = Scenario(
                {
                    "run_obj": "quality",  # we optimize quality (alternatively runtime)
                    "runcount-limit": 50,  # maximum function evaluations
                    "cs": cs,  # configuration space
                    "deterministic": True,
                    "limit_resources": True,
                    "intensification_percentage": 0.000000001,
                }
            )

            smac = SMAC4AC(
                scenario=scenario,
                rng=np.random.RandomState(42),
                tae_runner=rosenbrock_2d,
            )
            incumbent = smac.optimize()
            return incumbent, smac.scenario.output_dir
class TestMultiInitialDesign(unittest.TestCase):

    def setUp(self):
        self.cs = ConfigurationSpace()
        self.cs.add_hyperparameter(UniformFloatHyperparameter(
            name="x1", lower=1, upper=10, default_value=2)
        )
        self.scenario = Scenario({'cs': self.cs, 'run_obj': 'quality',
                                  'output_dir': ''})
        self.ta = ExecuteTAFuncDict(lambda x: x["x1"]**2)

    def test_multi_config_design(self):
        stats = Stats(scenario=self.scenario)
        stats.start_timing()
        self.ta.stats = stats
        tj = TrajLogger(output_dir=None, stats=stats)
        rh = RunHistory(aggregate_func=average_cost)
        self.ta.runhistory = rh
        rng = np.random.RandomState(seed=12345)

        intensifier = Intensifier(tae_runner=self.ta, stats=stats, traj_logger=tj, rng=rng, instances=[None],
                                  run_obj_time=False)

        configs = [Configuration(configuration_space=self.cs, values={"x1":4}),
                   Configuration(configuration_space=self.cs, values={"x1":2})]
        dc = MultiConfigInitialDesign(tae_runner=self.ta, scenario=self.scenario, stats=stats,
                             traj_logger=tj, runhistory=rh, rng=rng, configs=configs,
                             intensifier=intensifier, aggregate_func=average_cost)

        inc = dc.run()
        self.assertTrue(stats.ta_runs==2)
        self.assertTrue(len(rh.data)==2)
        self.assertTrue(rh.get_cost(inc) == 4)
Exemple #5
0
class TestSingleInitialDesign(unittest.TestCase):
    def setUp(self):
        self.cs = ConfigurationSpace()
        self.cs.add_hyperparameter(
            UniformFloatHyperparameter(name="x1", lower=1, upper=10,
                                       default=2))
        self.scenario = Scenario({
            'cs': self.cs,
            'run_obj': 'quality',
            'output_dir': ''
        })
        self.ta = ExecuteTAFuncDict(lambda x: x["x1"]**2)

    def test_single_default_config_design(self):
        stats = Stats(scenario=self.scenario)
        stats.start_timing()
        self.ta.stats = stats
        tj = TrajLogger(output_dir=None, stats=stats)
        rh = RunHistory(aggregate_func=average_cost)

        dc = DefaultConfiguration(tae_runner=self.ta,
                                  scenario=self.scenario,
                                  stats=stats,
                                  traj_logger=tj,
                                  rng=np.random.RandomState(seed=12345))

        inc = dc.run()
        self.assertTrue(stats.ta_runs == 1)
        self.assertTrue(len(rh.data) == 0)
Exemple #6
0
        def test_facade(self):
            config_space = ConfigurationSpace()
            n_components = UniformIntegerHyperparameter(
                "PCA__n_components", 5, 30)
            config_space.add_hyperparameter(n_components)
            scenario_dict = {
                "run_obj": "quality",
                "deterministic": "true",
                "cs": config_space,
                "wallclock_limit": 60
            }

            with self.assertRaises(ValueError):
                SMACOptimizer(facade="SMAC4BOO", scenario_dict=scenario_dict)

            with self.assertRaises(ValueError):
                facade = SMAC4BO(scenario=Scenario(scenario_dict))
                SMACOptimizer(facade=facade, scenario_dict=scenario_dict)

            facades = [
                "SMAC4BO", SMAC4BO, "SMAC4AC", SMAC4AC, "SMAC4HPO", SMAC4HPO,
                "BOHB4HPO", BOHB4HPO
            ]
            for facade in facades:
                SMACOptimizer(facade=facade, scenario_dict=scenario_dict)
Exemple #7
0
def get_mixed_gp(cat_dims, cont_dims, rs, noise=1e-3, normalize_y=True):
    from smac.epm.gp_kernels import ConstantKernel, Matern, WhiteKernel, HammingKernel

    cat_dims = np.array(cat_dims, dtype=np.int)
    cont_dims = np.array(cont_dims, dtype=np.int)
    n_dimensions = len(cat_dims) + len(cont_dims)
    cov_amp = ConstantKernel(
        2.0,
        constant_value_bounds=(1e-10, 2),
        prior=LognormalPrior(mean=0.0, sigma=1.0, rng=rs),
    )

    exp_kernel = Matern(
        np.ones([len(cont_dims)]),
        [(np.exp(-10), np.exp(2)) for _ in range(len(cont_dims))],
        nu=2.5,
        operate_on=cont_dims,
    )

    ham_kernel = HammingKernel(
        np.ones([len(cat_dims)]),
        [(np.exp(-10), np.exp(2)) for _ in range(len(cat_dims))],
        operate_on=cat_dims,
    )
    noise_kernel = WhiteKernel(
        noise_level=noise,
        noise_level_bounds=(1e-10, 2),
        prior=HorseshoePrior(scale=0.1, rng=rs),
    )
    kernel = cov_amp * (exp_kernel * ham_kernel) + noise_kernel

    bounds = [0] * n_dimensions
    types = np.zeros(n_dimensions)
    for c in cont_dims:
        bounds[c] = (0., 1.)
    for c in cat_dims:
        types[c] = 3
        bounds[c] = (3, np.nan)

    cs = ConfigurationSpace()
    for c in cont_dims:
        cs.add_hyperparameter(UniformFloatHyperparameter('X%d' % c, 0, 1))
    for c in cat_dims:
        cs.add_hyperparameter(
            CategoricalHyperparameter('X%d' % c, [0, 1, 2, 3]))

    model = GaussianProcess(
        configspace=cs,
        bounds=bounds,
        types=types,
        kernel=kernel,
        seed=rs.randint(low=1, high=10000),
        normalize_y=normalize_y,
    )
    return model
Exemple #8
0
def optimize():
    # We load the iris-dataset (a widely used benchmark)
    iris = datasets.load_iris()

    #logger = logging.getLogger("SVMExample")
    logging.basicConfig(level=logging.INFO)  # logging.DEBUG for debug output

    # Build Configuration Space which defines all parameters and their ranges
    cs = ConfigurationSpace()

    # We define a few possible types of SVM-kernels and add them as "kernel" to our cs
    kernel = CategoricalHyperparameter("kernel", ["linear", "rbf", "poly", "sigmoid"], default="poly")
    cs.add_hyperparameter(kernel)

    # There are some hyperparameters shared by all kernels
    C = UniformFloatHyperparameter("C", 0.001, 1000.0, default=1.0)
    shrinking = CategoricalHyperparameter("shrinking", ["true", "false"], default="true")
    cs.add_hyperparameters([C, shrinking])

    # Others are kernel-specific, so we can add conditions to limit the searchspace
    degree = UniformIntegerHyperparameter("degree", 1, 5, default=3)     # Only used by kernel poly
    coef0 = UniformFloatHyperparameter("coef0", 0.0, 10.0, default=0.0)  # poly, sigmoid
    cs.add_hyperparameters([degree, coef0])
    use_degree = InCondition(child=degree, parent=kernel, values=["poly"])
    use_coef0 = InCondition(child=coef0, parent=kernel, values=["poly", "sigmoid"])
    cs.add_conditions([use_degree, use_coef0])

    # This also works for parameters that are a mix of categorical and values from a range of numbers
    # For example, gamma can be either "auto" or a fixed float
    gamma = CategoricalHyperparameter("gamma", ["auto", "value"], default="auto")  # only rbf, poly, sigmoid
    gamma_value = UniformFloatHyperparameter("gamma_value", 0.0001, 8, default=1)
    cs.add_hyperparameters([gamma, gamma_value])
    # We only activate gamma_value if gamma is set to "value"
    cs.add_condition(InCondition(child=gamma_value, parent=gamma, values=["value"]))
    # And again we can restrict the use of gamma in general to the choice of the kernel
    cs.add_condition(InCondition(child=gamma, parent=kernel, values=["rbf", "poly", "sigmoid"]))


    # Scenario object
    scenario = Scenario("test/test_files/svm_scenario.txt")

    # Example call of the function
    # It returns: Status, Cost, Runtime, Additional Infos
    def_value = svm_from_cfg(cs.get_default_configuration())
    print("Default Value: %.2f" % (def_value))

    # Optimize, using a SMAC-object
    print("Optimizing! Depending on your machine, this might take a few minutes.")
    smac = SMAC(scenario=scenario, rng=np.random.RandomState(42),
            tae_runner=svm_from_cfg)

    incumbent = smac.optimize()
    inc_value = svm_from_cfg(incumbent)

    print("Optimized Value: %.2f" % (inc_value))
Exemple #9
0
    def config_space(self):
        """Logistic regression hyperparameter space."""

        n_components = UniformIntegerHyperparameter(
            'n_components', lower=2, upper=50, default_value=10
        )
        # Add hyperparameters to config space.
        config = ConfigurationSpace()
        config.seed(self.random_state)
        config.add_hyperparameter(n_components)

        return config
Exemple #10
0
    def config_space(self):
        """Logistic regression hyperparameter space."""

        reg_param = UniformFloatHyperparameter(
            'reg_param', lower=1e-9, upper=1-1e-9, default_value=1e-3
        )
        # Add hyperparameters to config space.
        config = ConfigurationSpace()
        config.seed(self.random_state)
        config.add_hyperparameter(reg_param)

        return config
Exemple #11
0
    def config_space(self):
        """Logistic regression hyperparameter space."""

        alpha = UniformFloatHyperparameter(
            'alpha', lower=1e-8, upper=100, default_value=1.0
        )
        # Add hyperparameters to config space.
        config = ConfigurationSpace()
        config.seed(self.random_state)
        config.add_hyperparameter(alpha)

        return config
Exemple #12
0
    def config_space(self):
        """Returns the Fisher score hyperparameter configuration space."""

        num_features = UniformIntegerHyperparameter('num_features',
                                                    lower=2,
                                                    upper=50,
                                                    default_value=20)
        config = ConfigurationSpace()
        config.seed(self.random_state)
        config.add_hyperparameter(num_features)

        return config
Exemple #13
0
    def test_merge_foreign_data(self):
        ''' test smac.utils.merge_foreign_data '''

        scenario = Scenario(self.test_scenario_dict)
        scenario_2 = Scenario(self.test_scenario_dict)
        scenario_2.feature_dict = {"inst_new": [4]}

        # init cs
        cs = ConfigurationSpace()
        cs.add_hyperparameter(UniformIntegerHyperparameter(name='a',
                                                           lower=0,
                                                           upper=100))
        cs.add_hyperparameter(UniformIntegerHyperparameter(name='b',
                                                           lower=0,
                                                           upper=100))
        # build runhistory
        rh_merge = RunHistory()
        config = Configuration(cs, values={'a': 1, 'b': 2})

        rh_merge.add(config=config, instance_id="inst_new", cost=10, time=20,
                     status=StatusType.SUCCESS,
                     seed=None,
                     additional_info=None)

        # "d" is an instance in <scenario>
        rh_merge.add(config=config, instance_id="d", cost=5, time=20,
                     status=StatusType.SUCCESS,
                     seed=None,
                     additional_info=None)

        # build empty rh
        rh_base = RunHistory()

        merge_foreign_data(scenario=scenario, runhistory=rh_base,
                           in_scenario_list=[scenario_2], in_runhistory_list=[rh_merge])

        # both runs should be in the runhistory
        # but we should not use the data to update the cost of config
        self.assertTrue(len(rh_base.data) == 2)
        self.assertTrue(np.isnan(rh_base.get_cost(config)))

        # we should not get direct access to external run data
        runs = rh_base.get_runs_for_config(config, only_max_observed_budget=True)
        self.assertTrue(len(runs) == 0)

        rh_merge.add(config=config, instance_id="inst_new_2", cost=10, time=20,
                     status=StatusType.SUCCESS,
                     seed=None,
                     additional_info=None)

        self.assertRaises(ValueError, merge_foreign_data, **{
                          "scenario": scenario, "runhistory": rh_base,
                          "in_scenario_list": [scenario_2], "in_runhistory_list": [rh_merge]})
Exemple #14
0
def get_gp(n_dimensions,
           rs,
           noise=1e-3,
           normalize_y=True,
           average_samples=False,
           n_iter=50):
    from smac.epm.gp_kernels import ConstantKernel, Matern, WhiteKernel

    cov_amp = ConstantKernel(
        2.0,
        constant_value_bounds=(1e-10, 2),
        prior=LognormalPrior(mean=0.0, sigma=1.0, rng=rs),
    )
    exp_kernel = Matern(
        np.ones([n_dimensions]),
        [(np.exp(-10), np.exp(2)) for _ in range(n_dimensions)],
        nu=2.5,
        prior=None,
    )
    noise_kernel = WhiteKernel(
        noise_level=noise,
        noise_level_bounds=(1e-10, 2),
        prior=HorseshoePrior(scale=0.1, rng=rs),
    )
    kernel = cov_amp * exp_kernel + noise_kernel

    n_mcmc_walkers = 3 * len(kernel.theta)
    if n_mcmc_walkers % 2 == 1:
        n_mcmc_walkers += 1

    bounds = [(0., 1.) for _ in range(n_dimensions)]
    types = np.zeros(n_dimensions)

    configspace = ConfigurationSpace()
    for i in range(n_dimensions):
        configspace.add_hyperparameter(
            UniformFloatHyperparameter('x%d' % i, 0, 1))

    model = GaussianProcessMCMC(
        configspace=configspace,
        types=types,
        bounds=bounds,
        kernel=kernel,
        n_mcmc_walkers=n_mcmc_walkers,
        chain_length=n_iter,
        burnin_steps=n_iter,
        normalize_y=normalize_y,
        seed=rs.randint(low=1, high=10000),
        mcmc_sampler='emcee',
        average_samples=average_samples,
    )
    return model
    def test_initializations(self):
        cs = ConfigurationSpace()
        for i in range(40):
            cs.add_hyperparameter(UniformFloatHyperparameter('x%d' % (i + 1), 0, 1))
        scenario = Scenario({'cs': cs, 'run_obj': 'quality'})
        hb_kwargs = {'initial_budget': 1, 'max_budget': 3}
        facade = HB4AC(scenario=scenario, intensifier_kwargs=hb_kwargs)

        self.assertIsInstance(facade.solver.initial_design, RandomConfigurations)
        self.assertIsInstance(facade.solver.epm_chooser.model, RandomEPM)
        self.assertIsInstance(facade.solver.intensifier, Hyperband)
        self.assertEqual(facade.solver.intensifier.min_chall, 1)
        self.output_dirs.append(scenario.output_dir)
Exemple #16
0
    def optimize(self):

        cs = ConfigurationSpace()

        if 'optimizer' in self.params:

            optimizer = CategoricalHyperparameter('optimizer',
                                                  self.params['optimizer'])

            cs.add_hyperparameter(optimizer)

        scenario = Scenario({
            "run_obj": "quality",
            "runcount-limit": 5,
            "cutoff-time": 10,
            "cs": cs,
            "deterministic": "true"
        })

        print(cs.get_default_configuration())

        def_value = self.svm_from_config(cs.get_default_configuration())

        smac = SMAC4HPO(scenario=scenario,
                        rng=np.random.RandomState(42),
                        tae_runner=self.svm_from_config)

        incumbent = smac.optimize()

        __params = incumbent.get_dictionary()

        inc_value = self.svm_from_config(incumbent)

        print(__params)

        model = create_model(**__params)

        hist = model.fit(self.x_train, self.y_train, batch_size=128, epochs=6)

        time = smac.stats.wallclock_time_used

        loss, accuracy, f1_score, precision, recall = model.evaluate(
            self.x_test, self.y_test, verbose=0)

        del smac

        self.result = {
            'accuracy': accuracy,
            'time': time,
            'best_params': __params
        }
Exemple #17
0
    def test_initializations(self):
        cs = ConfigurationSpace()
        for i in range(40):
            cs.add_hyperparameter(
                UniformFloatHyperparameter('x%d' % (i + 1), 0, 1))
        scenario = Scenario({'cs': cs, 'run_obj': 'quality'})
        hb_kwargs = {'initial_budget': 1, 'max_budget': 3}
        facade = BOHB4HPO(scenario=scenario, intensifier_kwargs=hb_kwargs)

        self.assertIsInstance(facade.solver.initial_design,
                              RandomConfigurations)
        # ensure number of samples required is D+1
        self.assertEqual(facade.solver.epm_chooser.min_samples_model, 41)
        self.output_dirs.append(scenario.output_dir)
Exemple #18
0
def run_roar(python_path,
             w_dir,
             n_iter=5,
             input_file='../rawAllx1000.json',
             seeds=[1],
             task_ids=None,
             max_tries=10):

    from smac.configspace import ConfigurationSpace
    from ConfigSpace.hyperparameters import UniformIntegerHyperparameter
    from smac.scenario.scenario import Scenario
    from smac.facade.roar_facade import ROAR

    def test_func(cutoff):
        cutoff = cutoff.get('x1')
        print(cutoff)
        result = find_cut_off.main(python_path=python_path,
                                   w_dir=w_dir,
                                   iter=n_iter,
                                   input_file=input_file,
                                   cutoffs=[cutoff],
                                   seeds=seeds,
                                   task_ids=task_ids)
        cleaned = [x[1] for x in result if 0.0 < x[1] < 1.0]
        mean = np.mean(cleaned) if cleaned else 0.0
        mean = mean if mean != 1.0 else 0.0
        return 1.0 - mean

    cs = ConfigurationSpace()
    cutoff_parameter = UniformIntegerHyperparameter('x1',
                                                    1,
                                                    99,
                                                    default_value=50)
    cs.add_hyperparameter(cutoff_parameter)
    scenario = Scenario({
        "run_obj": "quality",  # we optimize quality (alternatively runtime)
        "runcount-limit": max_tries,  # maximum function evaluations
        "cs": cs,  # configuration space
        "deterministic": "true",
        "abort_on_first_run_crash": "false",
    })

    roar = ROAR(scenario=scenario, tae_runner=test_func, rng=1234)

    x = roar.optimize()

    cost = test_func(x)

    return x, cost, roar
Exemple #19
0
 def test_exchange_sobol_for_lhd(self):
     cs = ConfigurationSpace()
     for i in range(40):
         cs.add_hyperparameter(
             UniformFloatHyperparameter('x%d' % (i + 1), 0, 1))
     scenario = Scenario({'cs': cs, 'run_obj': 'quality'})
     facade = SMAC4HPO(scenario=scenario)
     self.assertIsInstance(facade.solver.initial_design, SobolDesign)
     cs.add_hyperparameter(UniformFloatHyperparameter('x41', 0, 1))
     with self.assertRaisesRegex(
             ValueError,
             'Sobol sequence" can only handle up to 40 dimensions. Please use a different initial design, such as '
             '"the Latin Hypercube design"',
     ):
         SMAC4HPO(scenario=scenario)
     self.output_dirs.append(scenario.output_dir)
def get_variables(problem):
    cs = ConfigurationSpace()
    params = []
    for i in range(problem.dims()):
        param = get_variable(problem, i)
        params.append(param)
        cs.add_hyperparameter(param)
    
    for (i, c) in enumerate(problem.dependencies()):
        if c is None:
            continue
        j = c['on']
        s = c['values']
        cs.add_condition(InCondition(params[i], params[j], list(s)))

    return cs
 def _create_configuration_space(self):
     config_space = ConfigurationSpace()
     min = self.parameter_domain.get_min_vector()
     max = self.parameter_domain.get_max_vector()
     for i in range(len(min)):
         param = None
         if min[i] == max[i]:
             param = UniformFloatHyperparameter(name=str(i),
                                                lower=min[i],
                                                upper=max[i] + 0.0001)
         else:
             param = UniformFloatHyperparameter(name=str(i),
                                                lower=min[i],
                                                upper=max[i])
         config_space.add_hyperparameter(param)
     return config_space
Exemple #22
0
def maxsat(n_eval, n_variables, random_seed):
    assert n_variables in [28, 43, 60]
    if n_variables == 28:
        evaluator = MaxSAT28(random_seed)
    elif n_variables == 43:
        evaluator = MaxSAT43(random_seed)
    elif n_variables == 60:
        evaluator = MaxSAT60(random_seed)
    name_tag = 'maxsat' + str(n_variables) + '_' + datetime.now().strftime(
        "%Y-%m-%d-%H:%M:%S:%f")
    cs = ConfigurationSpace()
    for i in range(n_variables):
        car_var = CategoricalHyperparameter('x' + str(i + 1).zfill(2),
                                            [str(elm) for elm in range(2)],
                                            default_value='0')
        cs.add_hyperparameter(car_var)
    init_points_numpy = evaluator.suggested_init.long().numpy()
    init_points = []
    for i in range(init_points_numpy.shape[0]):
        init_points.append(
            Configuration(
                cs, {
                    'x' + str(j + 1).zfill(2): str(init_points_numpy[i][j])
                    for j in range(n_variables)
                }))

    def evaluate(x):
        x_tensor = torch.LongTensor(
            [int(x['x' + str(j + 1).zfill(2)]) for j in range(n_variables)])
        return evaluator.evaluate(x_tensor).item()

    print('Began    at ' + datetime.now().strftime("%H:%M:%S"))
    scenario = Scenario({
        "run_obj": "quality",
        "runcount-limit": n_eval,
        "cs": cs,
        "deterministic": "true",
        'output_dir': os.path.join(EXP_DIR, name_tag)
    })
    smac = SMAC(scenario=scenario,
                tae_runner=evaluate,
                initial_configurations=init_points)
    smac.optimize()

    evaluations, optimum = evaluations_from_smac(smac)
    print('Finished at ' + datetime.now().strftime("%H:%M:%S"))
    return optimum
Exemple #23
0
def branin(n_eval):
    evaluator = Branin()
    name_tag = '_'.join(
        ['branin', datetime.now().strftime("%Y-%m-%d-%H:%M:%S:%f")])
    cs = ConfigurationSpace()
    for i in range(len(evaluator.n_vertices)):
        car_var = UniformIntegerHyperparameter('x' + str(i + 1).zfill(2),
                                               0,
                                               int(evaluator.n_vertices[i]) -
                                               1,
                                               default_value=25)
        cs.add_hyperparameter(car_var)

    init_points_numpy = evaluator.suggested_init.long().numpy()
    init_points = []
    for i in range(init_points_numpy.shape[0]):
        init_points.append(
            Configuration(
                cs, {
                    'x' + str(j + 1).zfill(2): int(init_points_numpy[i][j])
                    for j in range(len(evaluator.n_vertices))
                }))

    def evaluate(x):
        x_tensor = torch.LongTensor([
            int(x['x' + str(j + 1).zfill(2)])
            for j in range(len(evaluator.n_vertices))
        ])
        return evaluator.evaluate(x_tensor).item()

    print('Began    at ' + datetime.now().strftime("%H:%M:%S"))
    scenario = Scenario({
        "run_obj": "quality",
        "runcount-limit": n_eval,
        "cs": cs,
        "deterministic": "true",
        'output_dir': os.path.join(EXP_DIR, name_tag)
    })
    smac = SMAC(scenario=scenario,
                tae_runner=evaluate,
                initial_configurations=init_points)
    smac.optimize()

    evaluations, optimum = evaluations_from_smac(smac)
    print('Finished at ' + datetime.now().strftime("%H:%M:%S"))
    return optimum
Exemple #24
0
def contamination(n_eval, lamda, random_seed_pair):
    evaluator = Contamination(lamda, random_seed_pair)
    name_tag = '_'.join([
        'contamination', ('%.2E' % lamda),
        datetime.now().strftime("%Y-%m-%d-%H:%M:%S:%f")
    ])
    cs = ConfigurationSpace()
    for i in range(CONTAMINATION_N_STAGES):
        car_var = CategoricalHyperparameter('x' + str(i + 1).zfill(2),
                                            [str(elm) for elm in range(2)],
                                            default_value='0')
        cs.add_hyperparameter(car_var)

    init_points_numpy = evaluator.suggested_init.long().numpy()
    init_points = []
    for i in range(init_points_numpy.shape[0]):
        init_points.append(
            Configuration(
                cs, {
                    'x' + str(j + 1).zfill(2): str(init_points_numpy[i][j])
                    for j in range(CONTAMINATION_N_STAGES)
                }))

    def evaluate(x):
        x_tensor = torch.LongTensor([
            int(x['x' + str(j + 1).zfill(2)])
            for j in range(CONTAMINATION_N_STAGES)
        ])
        return evaluator.evaluate(x_tensor).item()

    print('Began    at ' + datetime.now().strftime("%H:%M:%S"))
    scenario = Scenario({
        "run_obj": "quality",
        "runcount-limit": n_eval,
        "cs": cs,
        "deterministic": "true",
        'output_dir': os.path.join(EXP_DIR, name_tag)
    })
    smac = SMAC(scenario=scenario,
                tae_runner=evaluate,
                initial_configurations=init_points)
    smac.optimize()

    evaluations, optimum = evaluations_from_smac(smac)
    print('Finished at ' + datetime.now().strftime("%H:%M:%S"))
    return optimum
Exemple #25
0
class SchafferTest(unittest.TestCase):
    def setUp(self):
        self.cs = ConfigurationSpace()
        self.cs.add_hyperparameter(
            UniformFloatHyperparameter("x", lower=MIN_V, upper=MAX_V))

        # Scenario object
        self.scenario = Scenario({
            "run_obj":
            "quality",  # we optimize quality (alternatively runtime)
            "runcount-limit": 50,  # max. number of function evaluations
            "cs": self.cs,  # configuration space
            "deterministic": True,
            "multi_objectives": "metric1, metric2",
            "limit_resources": False,
        })

        self.facade_kwargs = {
            "scenario": self.scenario,
            "rng": np.random.RandomState(5),
            "tae_runner": tae,
        }

        self.parego_facade_kwargs = {
            "scenario": self.scenario,
            "rng": np.random.RandomState(5),
            "tae_runner": tae,
            "multi_objective_algorithm": ParEGO,
            "multi_objective_kwargs": {
                "rho": 0.05
            },
        }

    def test_facades(self):
        results = []
        for facade in [SMAC4BB, SMAC4HPO, SMAC4AC]:
            smac = facade(**self.facade_kwargs)
            incumbent = smac.optimize()

            f1_inc, f2_inc = schaffer(incumbent["x"])
            f1_opt, f2_opt = get_optimum()

            self.assertAlmostEqual(f1_inc + f2_inc, f1_opt + f2_opt, places=1)
            results.append(smac)

        return results
Exemple #26
0
 def test_illegal_input(self):
     """
     Testing illegal input in smbo
     """
     cs = ConfigurationSpace()
     cs.add_hyperparameter(UniformFloatHyperparameter('test', 1, 10, 5))
     scen = Scenario({'run_obj': 'quality', 'cs': cs})
     stats = Stats(scen)
     # Recorded runs but no incumbent.
     stats.ta_runs = 10
     smac = SMAC(scen, stats=stats, rng=np.random.RandomState(42))
     self.assertRaises(ValueError, smac.optimize)
     # Incumbent but no recoreded runs.
     incumbent = cs.get_default_configuration()
     smac = SMAC(scen, restore_incumbent=incumbent,
                 rng=np.random.RandomState(42))
     self.assertRaises(ValueError, smac.optimize)
Exemple #27
0
def pest_control(n_eval, random_seed):
    evaluator = PestControl(random_seed)
    name_tag = 'pestcontrol_' + datetime.now().strftime("%Y-%m-%d-%H:%M:%S:%f")
    cs = ConfigurationSpace()
    for i in range(PESTCONTROL_N_STAGES):
        car_var = CategoricalHyperparameter(
            'x' + str(i + 1).zfill(2),
            [str(elm) for elm in range(PESTCONTROL_N_CHOICE)],
            default_value='0')
        cs.add_hyperparameter(car_var)

    init_points_numpy = sample_init_points([PESTCONTROL_N_CHOICE] *
                                           PESTCONTROL_N_STAGES, 20,
                                           random_seed).long().numpy()
    init_points = []
    for i in range(init_points_numpy.shape[0]):
        init_points.append(
            Configuration(
                cs, {
                    'x' + str(j + 1).zfill(2): str(init_points_numpy[i][j])
                    for j in range(PESTCONTROL_N_STAGES)
                }))

    def evaluate(x):
        x_tensor = torch.LongTensor([
            int(x['x' + str(j + 1).zfill(2)])
            for j in range(PESTCONTROL_N_STAGES)
        ])
        return evaluator.evaluate(x_tensor).item()

    print('Began    at ' + datetime.now().strftime("%H:%M:%S"))
    scenario = Scenario({
        "run_obj": "quality",
        "runcount-limit": n_eval,
        "cs": cs,
        "deterministic": "true",
        'output_dir': os.path.join(EXP_DIR, name_tag)
    })
    smac = SMAC(scenario=scenario,
                tae_runner=evaluate,
                initial_configurations=init_points)
    smac.optimize()

    evaluations, optimum = evaluations_from_smac(smac)
    print('Finished at ' + datetime.now().strftime("%H:%M:%S"))
    return optimum
Exemple #28
0
def fmin_smac_nopynisher(func, x0, bounds, maxfun, rng):
    """
    Minimize a function using SMAC, but without pynisher, which doesn't work
    well with benchmark_minimize_callable.
    
    This function is based on SMAC's fmin_smac.
    """
    cs = ConfigurationSpace()
    tmplt = 'x{0:0' + str(len(str(len(bounds)))) + 'd}'
    for idx, (lower_bound, upper_bound) in enumerate(bounds):
        parameter = UniformFloatHyperparameter(
            name=tmplt.format(idx + 1),
            lower=lower_bound,
            upper=upper_bound,
            default_value=x0[idx],
        )
        cs.add_hyperparameter(parameter)

    scenario_dict = {
        "run_obj": "quality",
        "cs": cs,
        "deterministic": "true",
        "initial_incumbent": "DEFAULT",
        "runcount_limit": maxfun,
    }
    scenario = Scenario(scenario_dict)

    def call_ta(config):
        x = np.array(
            [val for _, val in sorted(config.get_dictionary().items())],
            dtype=np.float)
        return func(x)

    smac = SMAC4HPO(
        scenario=scenario,
        tae_runner=ExecuteTAFuncArray,
        tae_runner_kwargs={
            'ta': call_ta,
            'use_pynisher': False
        },
        rng=rng,
        initial_design=RandomConfigurations,
    )

    smac.optimize()
    return
Exemple #29
0
def _create_config_space(dict_hyperparams):
    """Create the hyperparameters hyperspace."""
    config_space = ConfigurationSpace()

    if not isinstance(dict_hyperparams, dict):
        raise TypeError('Hyperparams must be a dictionary.')

    for name, hyperparam in dict_hyperparams.items():
        hp_type = hyperparam['type']

        if hp_type == 'int':
            hp_range = hyperparam.get('range') or hyperparam.get('values')
            hp_min = min(hp_range)
            hp_max = max(hp_range)
            hp_default = hyperparam.get('default') or hp_min
            config_space.add_hyperparameter(
                hp.UniformIntegerHyperparameter(name,
                                                hp_min,
                                                hp_max,
                                                default_value=hp_default))

        elif hp_type == 'float':
            hp_range = hyperparam.get('range') or hyperparam.get('values')
            hp_min = min(hp_range)
            hp_max = max(hp_range)
            hp_default = hyperparam.get('default') or hp_min
            config_space.add_hyperparameter(
                hp.UniformFloatHyperparameter(name,
                                              hp_min,
                                              hp_max,
                                              default_value=hp_default))

        elif hp_type == 'bool':
            hp_default = bool(hyperparam.get('default'))
            config_space.add_hyperparameter(
                hp.CategoricalHyperparameter(name, ['true', 'false'],
                                             default_value=hp_default))

        elif hp_type == 'str':
            hp_range = hyperparam.get('range') or hyperparam.get('values')
            hp_range = [_NONE if hp is None else hp for hp in hp_range]
            hp_default = hyperparam.get('default') or hp_range[0]
            hp_default = _NONE if hp_default is None else hp_default

            config_space.add_hyperparameter(
                hp.CategoricalHyperparameter(name,
                                             hp_range,
                                             default_value=hp_default))

    return config_space
Exemple #30
0
def get_gp(n_dimensions, rs, noise=1e-3, normalize_y=True) -> GaussianProcess:
    from smac.epm.gp_kernels import ConstantKernel, Matern, WhiteKernel

    cov_amp = ConstantKernel(
        2.0,
        constant_value_bounds=(1e-10, 2),
        prior=LognormalPrior(mean=0.0, sigma=1.0, rng=rs),
    )
    exp_kernel = Matern(
        np.ones([n_dimensions]),
        [(np.exp(-10), np.exp(2)) for _ in range(n_dimensions)],
        nu=2.5,
    )
    noise_kernel = WhiteKernel(
        noise_level=noise,
        noise_level_bounds=(1e-10, 2),
        prior=HorseshoePrior(scale=0.1, rng=rs),
    )
    kernel = cov_amp * exp_kernel + noise_kernel

    bounds = [(0., 1.) for _ in range(n_dimensions)]
    types = np.zeros(n_dimensions)

    configspace = ConfigurationSpace()
    for i in range(n_dimensions):
        configspace.add_hyperparameter(
            UniformFloatHyperparameter('x%d' % i, 0, 1))

    model = GaussianProcess(
        configspace=configspace,
        bounds=bounds,
        types=types,
        kernel=kernel,
        seed=rs.randint(low=1, high=10000),
        normalize_y=normalize_y,
        n_opt_restarts=2,
    )
    return model
Exemple #31
0
import fanova.visualizer

import os
path = os.path.dirname(os.path.realpath(__file__))

# get sample data from online lda
X = np.loadtxt(path + '/example_data/online_lda/online_lda_features.csv', delimiter=",")
Y = np.loadtxt(path + '/example_data/online_lda/online_lda_responses.csv', delimiter=",")

# setting up config space:
param_file = path + '/example_data/online_lda/param-file.txt'
f = open(param_file, 'rb')

cs = ConfigurationSpace()
for row in f:
    cs.add_hyperparameter(UniformFloatHyperparameter("%s" %row[0:4].decode('utf-8'), np.float(row[6:9]), np.float(row[10:13]),np.float(row[18:21])))
param = cs.get_hyperparameters()


# create an instance of fanova with data for the random forest and the configSpace
f = fANOVA(X = X, Y = Y, config_space = cs)

# marginal for first parameter
p_list = (0, )
res = f.quantify_importance(p_list)
print(res)

p2_list = ('Col1', 'Col2')
res2 = f.quantify_importance(p2_list)
print(res2)
p2_list = ('Col0', 'Col2')