Example #1
0
 def test_get_next_by_random_search(self, patch):
     def side_effect(size):
         return [ConfigurationMock()] * size
     patch.side_effect = side_effect
     smbo = SMAC(self.scenario, rng=1).solver
     rval = smbo._get_next_by_random_search(10, False)
     self.assertEqual(len(rval), 10)
     for i in range(10):
         self.assertIsInstance(rval[i][1], ConfigurationMock)
         self.assertEqual(rval[i][1].origin, 'Random Search')
         self.assertEqual(rval[i][0], 0)
Example #2
0
    def test_choose_next(self):
        seed = 42
        smbo = SMAC(self.scenario, rng=seed).solver
        smbo.runhistory = RunHistory(aggregate_func=average_cost)
        X = self.scenario.cs.sample_configuration().get_array()[None, :]
        smbo.incumbent = self.scenario.cs.sample_configuration()
        smbo.runhistory.add(smbo.incumbent, 10, 10, 1)

        Y = self.branin(X)
        x = smbo.choose_next(X, Y)[0].get_array()
        assert x.shape == (2, )
Example #3
0
    def main_cli(self):
        '''
            main function of SMAC for CLI interface
        '''

        cmd_reader = CMDReader()
        args_, misc_args = cmd_reader.read_cmd()

        logging.basicConfig(level=args_.verbose_level)

        root_logger = logging.getLogger()
        root_logger.setLevel(args_.verbose_level)

        scen = Scenario(args_.scenario_file, misc_args)

        rh = None
        if args_.warmstart_runhistory:
            aggregate_func = average_cost
            rh = RunHistory(aggregate_func=aggregate_func)

            scen, rh = merge_foreign_data_from_file(
                scenario=scen,
                runhistory=rh,
                in_scenario_fn_list=args_.warmstart_scenario,
                in_runhistory_fn_list=args_.warmstart_runhistory,
                cs=scen.cs,
                aggregate_func=aggregate_func)

        initial_configs = None
        if args_.warmstart_incumbent:
            initial_configs = [scen.cs.get_default_configuration()]
            for traj_fn in args_.warmstart_incumbent:
                trajectory = TrajLogger.read_traj_aclib_format(fn=traj_fn,
                                                               cs=scen.cs)
                initial_configs.append(trajectory[-1]["incumbent"])

        if args_.modus == "SMAC":
            optimizer = SMAC(scenario=scen,
                             rng=np.random.RandomState(args_.seed),
                             runhistory=rh,
                             initial_configurations=initial_configs)
        elif args_.modus == "ROAR":
            optimizer = ROAR(scenario=scen,
                             rng=np.random.RandomState(args_.seed),
                             runhistory=rh,
                             initial_configurations=initial_configs)
        try:
            optimizer.optimize()

        finally:
            # ensure that the runhistory is always dumped in the end
            if scen.output_dir is not None:
                optimizer.solver.runhistory.save_json(
                    fn=os.path.join(scen.output_dir, "runhistory.json"))
Example #4
0
    def test_choose_next_w_empty_rh(self):
        seed = 42
        smbo = SMAC(self.scenario, rng=seed).solver
        smbo.runhistory = RunHistory(aggregate_func=average_cost)
        X = self.scenario.cs.sample_configuration().get_array()[None, :]

        Y = self.branin(X)
        self.assertRaises(ValueError, smbo.choose_next, **{"X": X, "Y": Y})

        x = next(smbo.choose_next(X, Y, incumbent_value=0.0)).get_array()
        assert x.shape == (2, )
Example #5
0
def main_loop(problem):
    logging.basicConfig(level=logging.INFO)  # logging.DEBUG for debug output

    cs = ConfigurationSpace()

    n_neighbors = UniformIntegerHyperparameter("n_neighbors", 2,10, default_value=5)
    cs.add_hyperparameter(n_neighbors)

    weights = CategoricalHyperparameter("weights", ["uniform","distance"], default_value="uniform")
    algorithm = CategoricalHyperparameter("algorithm", ["ball_tree", "kd_tree","brute","auto"], default_value="auto")
    cs.add_hyperparameters([weights, algorithm])

    leaf_size = UniformIntegerHyperparameter("leaf_size", 1, 100, default_value=50)
    cs.add_hyperparameter(leaf_size)
    use_leaf_size= InCondition(child=leaf_size, parent=algorithm, values=["ball_tree","kd_tree"])
    cs.add_condition(use_leaf_size)

    p = UniformIntegerHyperparameter("p", 1,3, default_value=2)
    cs.add_hyperparameter(p)

    # Scenario object
    max_eval=100000
    scenario = Scenario({"run_obj": "quality",   # we optimize quality (alternatively runtime)
                         "runcount-limit": max_eval,  # maximum function evaluations
                         "cs": cs,                        # configuration space
                         "shared_model": True,
                         "output_dir": "/home/naamah/Documents/CatES/result_All/smac/KNN/run_{}_{}_{}".format(max_eval,
                                                                                                           datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d_%H:%M:%S'),
                                                                                                              problem)

                         # "output_dir": "/home/naamah/Documents/CatES/result_All/smac/KNN/{}/run_{}_{}".format(problem,max_eval, datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d_%H:%M:%S_%f')),
                         # "input_psmac_dirs":"/home/naamah/Documents/CatES/result_All/",
                         # "deterministic": "true"
                         })

    # Example call of the function
    # It returns: Status, Cost, Runtime, Additional Infos
    def_value = svm_from_cfg(cs.get_default_configuration())
    print("Default Value: %.2f" % (def_value))

    # Optimize, using a SMAC-object
    print("Optimizing! Depending on your machine, this might take a few minutes.")
    smac = SMAC(scenario=scenario,tae_runner=svm_from_cfg)

    incumbent = smac.optimize()

    inc_value = svm_from_cfg(incumbent)
    print("Optimized Value: %.2f" % (inc_value))

    return (incumbent)


# main_loop()
Example #6
0
    def test_choose_next_empty_X(self):
        smbo = SMAC(self.scenario, rng=1).solver
        smbo.acquisition_func._compute = mock.Mock(spec=RandomForestWithInstances)
        smbo._get_next_by_random_search = mock.Mock(spec=smbo._get_next_by_random_search)
        smbo._get_next_by_random_search.return_value = [[0, 0], [0, 1], [0, 2]]

        X = np.zeros((0, 2))
        Y = np.zeros((0, 1))

        x = smbo.choose_next(X, Y)
        self.assertEqual(x, [0, 1, 2])
        self.assertEqual(smbo._get_next_by_random_search.call_count, 1)
        self.assertEqual(smbo.acquisition_func._compute.call_count, 0)
Example #7
0
    def main_cli(self):
        '''
            main function of SMAC for CLI interface
        '''
        self.logger.info("SMAC call: %s" % (" ".join(sys.argv)))

        cmd_reader = CMDReader()
        args_, misc_args = cmd_reader.read_cmd()

        logging.basicConfig(level=args_.verbose_level)

        root_logger = logging.getLogger()
        root_logger.setLevel(args_.verbose_level)

        scen = Scenario(args_.scenario_file, misc_args, run_id=args_.seed)

        rh = None
        if args_.warmstart_runhistory:
            aggregate_func = average_cost
            rh = RunHistory(aggregate_func=aggregate_func)

            scen, rh = merge_foreign_data_from_file(
                scenario=scen,
                runhistory=rh,
                in_scenario_fn_list=args_.warmstart_scenario,
                in_runhistory_fn_list=args_.warmstart_runhistory,
                cs=scen.cs,
                aggregate_func=aggregate_func)

        initial_configs = None
        if args_.warmstart_incumbent:
            initial_configs = [scen.cs.get_default_configuration()]
            for traj_fn in args_.warmstart_incumbent:
                trajectory = TrajLogger.read_traj_aclib_format(fn=traj_fn,
                                                               cs=scen.cs)
                initial_configs.append(trajectory[-1]["incumbent"])

        if args_.modus == "SMAC":
            optimizer = SMAC(scenario=scen,
                             rng=np.random.RandomState(args_.seed),
                             runhistory=rh,
                             initial_configurations=initial_configs)
        elif args_.modus == "ROAR":
            optimizer = ROAR(scenario=scen,
                             rng=np.random.RandomState(args_.seed),
                             runhistory=rh,
                             initial_configurations=initial_configs)
        try:
            optimizer.optimize()
        except (TAEAbortException, FirstRunCrashedException) as err:
            self.logger.error(err)
Example #8
0
def optimize(algorithm,
             data_location,
             max_evals=10,
             output_file='results/optimization',
             do_plotting=False):
    if experiment_type == 'mnist':
        data = mnist_data
        dictionary = dict_classifiers
        loss_function = accuracy_score
        classification = True
    elif experiment_type == 'galaxy':
        data = gz_data
        dictionary = dict_regressors
        loss_function = lambda x, y: np.sqrt(mean_squared_error(x, y))
    elif experiment_type == 'neal':
        data = neal_data
        dictionary = dict_classifiers
        loss_function = accuracy_score
        classification = True

    #logger = logging.getLogger("SVMExample")
    logging.basicConfig(level=logging.INFO)  # logging.DEBUG for debug output
    cs = get_config_space()
    # Scenario object
    scenario = Scenario({
        "run_obj": "quality",  # we optimize quality (alternatively runtime)
        "runcount-limit": max_evals,  # maximum function evaluations
        "cs": cs,  # configuration space
        "deterministic": "true"
    })

    # Example call of the function
    # It returns: Status, Cost, Runtime, Additional Infos
    def_value = ml_cfg(cs.get_default_configuration())
    print("Default Value: %.2f" % (def_value))

    # Optimize, using a SMAC-object
    print(
        "Optimizing! Depending on your machine, this might take a few minutes."
    )
    model = ml_cfg(data)
    smac = SMAC(scenario=scenario,
                rng=np.random.RandomState(42),
                tae_runner=model)

    incumbent = smac.optimize()

    inc_value = svm_from_cfg(incumbent)

    print("Optimized Value: %.2f" % (inc_value))
def main():
    # Initialize scenario, using runcount_limit as budget.
    orig_scen_dict = {
        'algo': 'python cmdline_wrapper.py',
        'paramfile': 'param_config_space.pcs',
        'run_obj': 'quality',
        'runcount_limit': 25,
        'deterministic': True,
        'output_dir': 'restore_me'
    }
    original_scenario = Scenario(orig_scen_dict)
    smac = SMAC(scenario=original_scenario)
    smac.optimize()

    print(
        "\n########## BUDGET EXHAUSTED! Restoring optimization: ##########\n")

    # Now the output is in the folder 'restore_me'
    #
    # We could simply modify the scenario-object, stored in
    # 'smac.solver.scenario' and start optimization again:

    #smac.solver.scenario.ta_run_limit = 50
    #smac.optimize()

    # Or, to show the whole process of recovering a SMAC-run from the output
    # directory, create a new scenario with an extended budget:
    new_scenario = Scenario(
        orig_scen_dict,
        cmd_args={
            'runcount_limit': 50,  # overwrite these args
            'output_dir': 'restored'
        })

    # We load the runhistory, ...
    rh_path = os.path.join(original_scenario.output_dir, "runhistory.json")
    runhistory = RunHistory(aggregate_func=None)
    runhistory.load_json(rh_path, new_scenario.cs)
    # ... stats, ...
    stats_path = os.path.join(original_scenario.output_dir, "stats.json")
    stats = Stats(new_scenario)
    stats.load(stats_path)
    # ... and trajectory.
    traj_path = os.path.join(original_scenario.output_dir, "traj_aclib2.json")
    trajectory = TrajLogger.read_traj_aclib_format(fn=traj_path,
                                                   cs=new_scenario.cs)
    incumbent = trajectory[-1]["incumbent"]
    # Because we changed the output_dir, we might want to copy the old
    # trajectory-file (runhistory and stats will be complete)
    new_traj_path = os.path.join(new_scenario.output_dir, "traj_aclib2.json")
    shutil.copy(traj_path, new_traj_path)

    # Now we can initialize SMAC with the recovered objects and restore the
    # state where we left off. By providing stats and a restore_incumbent, SMAC
    # automatically detects the intention of restoring a state.
    smac = SMAC(scenario=new_scenario,
                runhistory=runhistory,
                stats=stats,
                restore_incumbent=incumbent)
    smac.optimize()
Example #10
0
    def test_comp_builder(self):
        seed = 42
        smbo = SMAC(self.scenario, rng=seed).solver
        conf = {"model": "RF", "acq_func": "EI"}
        acqf, model = smbo._component_builder(conf)

        self.assertTrue(isinstance(acqf, EI))
        self.assertTrue(isinstance(model, RandomForestWithInstances))

        conf = {"model": "GP", "acq_func": "EI"}
        acqf, model = smbo._component_builder(conf)

        self.assertTrue(isinstance(acqf, EI))
        self.assertTrue(isinstance(model, GaussianProcessMCMC))
Example #11
0
 def test_inject_stats_and_runhistory_object_to_TAE(self):
     ta = ExecuteTAFuncDict(lambda x: x**2)
     self.assertIsNone(ta.stats)
     self.assertIsNone(ta.runhistory)
     SMAC(tae_runner=ta, scenario=self.scenario)
     self.assertIsInstance(ta.stats, Stats)
     self.assertIsInstance(ta.runhistory, RunHistory)
Example #12
0
def main(b, seed):
    # Runs SMAC on a given benchmark
    scenario = Scenario({
        "run_obj": "quality",
        "runcount-limit": b.get_meta_information()['num_function_evals'],
        "cs": b.get_configuration_space(),
        "deterministic": "true",
        "output_dir": "./{:s}/run-{:d}".format(b.get_meta_information()['name'],
                                               seed)})

    smac = SMAC(scenario=scenario, tae_runner=b,
                rng=np.random.RandomState(seed))
    x_star = smac.optimize()

    print("Best value found:\n {:s}".format(str(x_star)))
    print("with {:s}".format(str(b.objective_function(x_star))))
Example #13
0
    def _main_cli(self):
        """Main function of SMAC for CLI interface
        
        Returns
        -------
        instance
            optimizer
        """
        self.logger.info("SMAC call: %s" % (" ".join(sys.argv)))

        cmd_reader = CMDReader()
        args, _ = cmd_reader.read_cmd()

        root_logger = logging.getLogger()
        root_logger.setLevel(args.verbose_level)
        logger_handler = logging.StreamHandler(stream=sys.stdout)
        if root_logger.level >= logging.INFO:
            formatter = logging.Formatter("%(levelname)s:\t%(message)s")
        else:
            formatter = logging.Formatter(
                "%(asctime)s:%(levelname)s:%(name)s:%(message)s",
                "%Y-%m-%d %H:%M:%S")
        logger_handler.setFormatter(formatter)
        root_logger.addHandler(logger_handler)
        # remove default handler
        root_logger.removeHandler(root_logger.handlers[0])

        # Create defaults
        rh = None
        initial_configs = None
        stats = None
        incumbent = None

        # Create scenario-object
        scen = Scenario(args.scenario_file, [])

        if args.mode == "SMAC":
            optimizer = SMAC(scenario=scen,
                             rng=np.random.RandomState(args.seed),
                             runhistory=rh,
                             initial_configurations=initial_configs,
                             stats=stats,
                             restore_incumbent=incumbent,
                             run_id=args.seed)
        elif args.mode == "ROAR":
            optimizer = ROAR(scenario=scen,
                             rng=np.random.RandomState(args.seed),
                             runhistory=rh,
                             initial_configurations=initial_configs,
                             run_id=args.seed)
        elif args.mode == "EPILS":
            optimizer = EPILS(scenario=scen,
                              rng=np.random.RandomState(args.seed),
                              runhistory=rh,
                              initial_configurations=initial_configs,
                              run_id=args.seed)
        else:
            optimizer = None

        return optimizer
Example #14
0
 def test_validation(self):
     with mock.patch.object(TrajLogger,
                            "read_traj_aclib_format",
                            return_value=None) as traj_mock:
         self.scenario.output_dir = "test"
         smac = SMAC(self.scenario)
         self.output_dirs.append(smac.output_dir)
         smbo = smac.solver
         with mock.patch.object(Validator, "validate",
                                return_value=None) as validation_mock:
             smbo.validate(config_mode='inc',
                           instance_mode='train+test',
                           repetitions=1,
                           use_epm=False,
                           n_jobs=-1,
                           backend='threading')
             self.assertTrue(validation_mock.called)
         with mock.patch.object(Validator,
                                "validate_epm",
                                return_value=None) as epm_validation_mock:
             smbo.validate(config_mode='inc',
                           instance_mode='train+test',
                           repetitions=1,
                           use_epm=True,
                           n_jobs=-1,
                           backend='threading')
             self.assertTrue(epm_validation_mock.called)
Example #15
0
    def test_choose_next(self):
        configspace = ConfigurationSpace()
        configspace.add_hyperparameter(UniformFloatHyperparameter('a', 0, 1))
        configspace.add_hyperparameter(UniformFloatHyperparameter('b', 0, 1))

        dataset_name = 'foo'
        func_eval_time_limit = 15
        total_walltime_limit = 15
        memory_limit = 3000

        auto = AutoMLSMBO(None, dataset_name, None, func_eval_time_limit,
                          total_walltime_limit, memory_limit, None)
        auto.config_space = configspace
        scenario = Scenario({'cs': configspace,
                             'cutoff-time': func_eval_time_limit,
                             'wallclock-limit': total_walltime_limit,
                             'memory-limit': memory_limit,
                             'run-obj': 'quality'})
        smac = SMAC(scenario)

        self.assertRaisesRegex(ValueError, 'Cannot use SMBO algorithm on '
                                           'empty runhistory',
                               auto.choose_next, smac)

        runhistory = smac.solver.runhistory
        runhistory.add(config=Configuration(configspace,
                                            values={'a': 0.1, 'b': 0.2}),
                       cost=0.5, time=0.5, status=StatusType.SUCCESS)

        auto.choose_next(smac)
Example #16
0
 def test_init_only_scenario_runtime(self):
     self.scenario.run_obj = 'runtime'
     self.scenario.cutoff = 300
     smbo = SMAC(self.scenario).solver
     self.assertIsInstance(smbo.model, RandomForestWithInstances)
     self.assertIsInstance(smbo.rh2EPM, RunHistory2EPM4LogCost)
     self.assertIsInstance(smbo.acquisition_func, LogEI)
Example #17
0
    def optimize(self):
        for i in range(self.n_jobs):
            self.optimizer_list.append(
                SMAC(
                    scenario=Scenario(self.scenario_dict),
                    rng=np.random.RandomState(
                        None),  # Different seed for different optimizers
                    tae_runner=self.evaluator))

        processes = []
        return_hist = multiprocessing.Manager().list()
        for i in range(1, self.n_jobs):
            p = multiprocessing.Process(
                target=self._optimize,
                args=[self.optimizer_list[i], return_hist])
            processes.append(p)
            p.start()
        self._optimize(self.optimizer_list[0], return_hist)
        for p in processes:
            p.join()

        for runhistory in return_hist:
            runkeys = list(runhistory.data.keys())
            for key in runkeys:
                _reward = 1. - runhistory.data[key][0]
                _config = runhistory.ids_config[key[0]]
                if _config not in self.configs:
                    self.perfs.append(_reward)
                    self.configs.append(_config)
                if _reward > self.incumbent_perf:
                    self.incumbent_perf = _reward
                    self.incumbent_config = _config
        return self.incumbent_config, self.incumbent_perf
Example #18
0
def maxsat(n_eval, n_variables, random_seed):
    assert n_variables in [28, 43, 60]
    if n_variables == 28:
        evaluator = MaxSAT28(random_seed)
    elif n_variables == 43:
        evaluator = MaxSAT43(random_seed)
    elif n_variables == 60:
        evaluator = MaxSAT60(random_seed)
    name_tag = 'maxsat' + str(n_variables) + '_' + datetime.now().strftime(
        "%Y-%m-%d-%H:%M:%S:%f")
    cs = ConfigurationSpace()
    for i in range(n_variables):
        car_var = CategoricalHyperparameter('x' + str(i + 1).zfill(2),
                                            [str(elm) for elm in range(2)],
                                            default_value='0')
        cs.add_hyperparameter(car_var)
    init_points_numpy = evaluator.suggested_init.long().numpy()
    init_points = []
    for i in range(init_points_numpy.shape[0]):
        init_points.append(
            Configuration(
                cs, {
                    'x' + str(j + 1).zfill(2): str(init_points_numpy[i][j])
                    for j in range(n_variables)
                }))

    def evaluate(x):
        x_tensor = torch.LongTensor(
            [int(x['x' + str(j + 1).zfill(2)]) for j in range(n_variables)])
        return evaluator.evaluate(x_tensor).item()

    print('Began    at ' + datetime.now().strftime("%H:%M:%S"))
    scenario = Scenario({
        "run_obj": "quality",
        "runcount-limit": n_eval,
        "cs": cs,
        "deterministic": "true",
        'output_dir': os.path.join(EXP_DIR, name_tag)
    })
    smac = SMAC(scenario=scenario,
                tae_runner=evaluate,
                initial_configurations=init_points)
    smac.optimize()

    evaluations, optimum = evaluations_from_smac(smac)
    print('Finished at ' + datetime.now().strftime("%H:%M:%S"))
    return optimum
Example #19
0
    def __init__(self,
                 evaluator,
                 config_space,
                 time_limit=None,
                 evaluation_limit=None,
                 per_run_time_limit=600,
                 per_run_mem_limit=1024,
                 output_dir='./',
                 trials_per_iter=1,
                 seed=1):
        super().__init__(evaluator, config_space, seed)
        self.time_limit = time_limit
        self.evaluation_num_limit = evaluation_limit
        self.trials_per_iter = trials_per_iter
        self.per_run_time_limit = per_run_time_limit
        self.per_run_mem_limit = per_run_mem_limit

        if not output_dir.endswith('/'):
            output_dir += '/'
        output_dir += "smac3_output_%s" % (datetime.datetime.fromtimestamp(
            time.time()).strftime('%Y-%m-%d_%H:%M:%S_%f'))
        self.scenario_dict = {
            'abort_on_first_run_crash': False,
            "run_obj": "quality",
            "cs": self.config_space,
            "deterministic": "true",
            "cutoff_time": self.per_run_time_limit,
            'output_dir': output_dir
        }

        self.optimizer = SMAC(scenario=Scenario(self.scenario_dict),
                              rng=np.random.RandomState(self.seed),
                              tae_runner=self.evaluator)
        self.trial_cnt = 0
        self.configs = list()
        self.perfs = list()
        self.incumbent_perf = -1.
        self.incumbent_config = self.config_space.get_default_configuration()
        # Estimate the size of the hyperparameter space.
        hp_num = len(self.config_space.get_hyperparameters())
        if hp_num == 0:
            self.config_num_threshold = 0
        else:
            _threshold = int(
                len(set(self.config_space.sample_configuration(12500))) * 0.8)
            self.config_num_threshold = _threshold
        self.logger.info('HP_THRESHOLD is: %d' % self.config_num_threshold)
Example #20
0
    def __init__(self,
                 evaluator,
                 config_space,
                 name,
                 n_jobs=4,
                 time_limit=None,
                 evaluation_limit=200,
                 per_run_time_limit=600,
                 per_run_mem_limit=1024,
                 output_dir='./',
                 trials_per_iter=1,
                 seed=1):
        super().__init__(evaluator, config_space, name, seed)
        self.time_limit = time_limit
        self.evaluation_num_limit = evaluation_limit
        self.trials_per_iter = trials_per_iter
        self.trials_this_run = trials_per_iter
        self.per_run_time_limit = per_run_time_limit
        self.per_run_mem_limit = per_run_mem_limit
        self.n_jobs = n_jobs

        if not output_dir.endswith('/'):
            output_dir += '/'
        self.output_dir = output_dir
        output_dir += "psmac3_output_%s" % (datetime.datetime.fromtimestamp(
            time.time()).strftime('%Y-%m-%d_%H:%M:%S_%f'))
        self.output_dir = output_dir
        self.scenario_dict = {
            'abort_on_first_run_crash': False,
            "run_obj": "quality",
            "cs": self.config_space,
            "deterministic": "true",
            "shared-model": True,  # PSMAC Entry
            "runcount-limit": self.evaluation_num_limit,
            "output_dir": output_dir,
            "cutoff_time": self.per_run_time_limit
        }
        self.optimizer_list = list()
        for _ in range(self.n_jobs):
            self.optimizer_list.append(
                SMAC(
                    scenario=Scenario(self.scenario_dict),
                    rng=np.random.RandomState(
                        None),  # Different seed for different optimizers
                    tae_runner=self.evaluator))
        self.trial_cnt = 0
        self.configs = list()
        self.perfs = list()
        self.incumbent_perf = float("-INF")
        self.incumbent_config = self.config_space.get_default_configuration()
        # Estimate the size of the hyperparameter space.
        hp_num = len(self.config_space.get_hyperparameters())
        if hp_num == 0:
            self.config_num_threshold = 0
        else:
            _threshold = int(
                len(set(self.config_space.sample_configuration(12500))) * 0.8)
            self.config_num_threshold = _threshold
        self.logger.info('HP_THRESHOLD is: %d' % self.config_num_threshold)
Example #21
0
 def test_illegal_input(self):
     """
     Testing illegal input in smbo
     """
     cs = ConfigurationSpace()
     cs.add_hyperparameter(UniformFloatHyperparameter('test', 1, 10, 5))
     scen = Scenario({'run_obj': 'quality', 'cs': cs})
     stats = Stats(scen)
     # Recorded runs but no incumbent.
     stats.ta_runs = 10
     smac = SMAC(scen, stats=stats, rng=np.random.RandomState(42))
     self.assertRaises(ValueError, smac.optimize)
     # Incumbent but no recoreded runs.
     incumbent = cs.get_default_configuration()
     smac = SMAC(scen, restore_incumbent=incumbent,
                 rng=np.random.RandomState(42))
     self.assertRaises(ValueError, smac.optimize)
Example #22
0
def branin(n_eval):
    evaluator = Branin()
    name_tag = '_'.join(
        ['branin', datetime.now().strftime("%Y-%m-%d-%H:%M:%S:%f")])
    cs = ConfigurationSpace()
    for i in range(len(evaluator.n_vertices)):
        car_var = UniformIntegerHyperparameter('x' + str(i + 1).zfill(2),
                                               0,
                                               int(evaluator.n_vertices[i]) -
                                               1,
                                               default_value=25)
        cs.add_hyperparameter(car_var)

    init_points_numpy = evaluator.suggested_init.long().numpy()
    init_points = []
    for i in range(init_points_numpy.shape[0]):
        init_points.append(
            Configuration(
                cs, {
                    'x' + str(j + 1).zfill(2): int(init_points_numpy[i][j])
                    for j in range(len(evaluator.n_vertices))
                }))

    def evaluate(x):
        x_tensor = torch.LongTensor([
            int(x['x' + str(j + 1).zfill(2)])
            for j in range(len(evaluator.n_vertices))
        ])
        return evaluator.evaluate(x_tensor).item()

    print('Began    at ' + datetime.now().strftime("%H:%M:%S"))
    scenario = Scenario({
        "run_obj": "quality",
        "runcount-limit": n_eval,
        "cs": cs,
        "deterministic": "true",
        'output_dir': os.path.join(EXP_DIR, name_tag)
    })
    smac = SMAC(scenario=scenario,
                tae_runner=evaluate,
                initial_configurations=init_points)
    smac.optimize()

    evaluations, optimum = evaluations_from_smac(smac)
    print('Finished at ' + datetime.now().strftime("%H:%M:%S"))
    return optimum
Example #23
0
        def opt_rosenbrock():
            cs = ConfigurationSpace()

            cs.add_hyperparameter(UniformFloatHyperparameter("x1", -5, 5, default_value=-3))
            cs.add_hyperparameter(UniformFloatHyperparameter("x2", -5, 5, default_value=-4))

            scenario = Scenario({"run_obj": "quality",  # we optimize quality (alternatively runtime)
                                 "runcount-limit": 50,  # maximum function evaluations
                                 "cs": cs,  # configuration space
                                 "deterministic": "true",
                                 "intensification_percentage": 0.000000001
                                 })

            smac = SMAC(scenario=scenario, rng=np.random.RandomState(42),
                        tae_runner=rosenbrock_2d)
            incumbent = smac.optimize()
            return incumbent
Example #24
0
def pest_control(n_eval, random_seed):
    evaluator = PestControl(random_seed)
    name_tag = 'pestcontrol_' + datetime.now().strftime("%Y-%m-%d-%H:%M:%S:%f")
    cs = ConfigurationSpace()
    for i in range(PESTCONTROL_N_STAGES):
        car_var = CategoricalHyperparameter(
            'x' + str(i + 1).zfill(2),
            [str(elm) for elm in range(PESTCONTROL_N_CHOICE)],
            default_value='0')
        cs.add_hyperparameter(car_var)

    init_points_numpy = sample_init_points([PESTCONTROL_N_CHOICE] *
                                           PESTCONTROL_N_STAGES, 20,
                                           random_seed).long().numpy()
    init_points = []
    for i in range(init_points_numpy.shape[0]):
        init_points.append(
            Configuration(
                cs, {
                    'x' + str(j + 1).zfill(2): str(init_points_numpy[i][j])
                    for j in range(PESTCONTROL_N_STAGES)
                }))

    def evaluate(x):
        x_tensor = torch.LongTensor([
            int(x['x' + str(j + 1).zfill(2)])
            for j in range(PESTCONTROL_N_STAGES)
        ])
        return evaluator.evaluate(x_tensor).item()

    print('Began    at ' + datetime.now().strftime("%H:%M:%S"))
    scenario = Scenario({
        "run_obj": "quality",
        "runcount-limit": n_eval,
        "cs": cs,
        "deterministic": "true",
        'output_dir': os.path.join(EXP_DIR, name_tag)
    })
    smac = SMAC(scenario=scenario,
                tae_runner=evaluate,
                initial_configurations=init_points)
    smac.optimize()

    evaluations, optimum = evaluations_from_smac(smac)
    print('Finished at ' + datetime.now().strftime("%H:%M:%S"))
    return optimum
Example #25
0
 def validate_incs(self, incs: np.ndarray):
     """
     Validation
     """
     solver = SMAC(scenario=self.scenario,
                   tae_runner=self.tae,
                   rng=self.rng,
                   run_id=MAXINT,
                   **self.kwargs)
     self.logger.info('*' * 120)
     self.logger.info('Validating')
     new_rh = solver.validate(config_mode=incs,
                              instance_mode=self.val_set,
                              repetitions=1,
                              use_epm=False,
                              n_jobs=self.n_optimizers)
     return self._get_mean_costs(incs, new_rh)
Example #26
0
 def test_rng(self):
     smbo = SMAC(self.scenario, rng=None).solver
     self.assertIsInstance(smbo.rng, np.random.RandomState)
     self.assertIsInstance(smbo.num_run, int)
     smbo = SMAC(self.scenario, rng=1).solver
     rng = np.random.RandomState(1)
     self.assertEqual(smbo.num_run, 1)
     self.assertIsInstance(smbo.rng, np.random.RandomState)
     smbo = SMAC(self.scenario, rng=rng).solver
     self.assertIsInstance(smbo.num_run, int)
     self.assertIs(smbo.rng, rng)
     # ML: I don't understand the following line and it throws an error
     self.assertRaisesRegexp(TypeError,
                             "Unknown type <(class|type) 'str'> for argument "
                             'rng. Only accepts None, int or '
                             'np.random.RandomState',
                             SMAC, self.scenario, rng='BLA')
Example #27
0
def contamination(n_eval, lamda, random_seed_pair):
    evaluator = Contamination(lamda, random_seed_pair)
    name_tag = '_'.join([
        'contamination', ('%.2E' % lamda),
        datetime.now().strftime("%Y-%m-%d-%H:%M:%S:%f")
    ])
    cs = ConfigurationSpace()
    for i in range(CONTAMINATION_N_STAGES):
        car_var = CategoricalHyperparameter('x' + str(i + 1).zfill(2),
                                            [str(elm) for elm in range(2)],
                                            default_value='0')
        cs.add_hyperparameter(car_var)

    init_points_numpy = evaluator.suggested_init.long().numpy()
    init_points = []
    for i in range(init_points_numpy.shape[0]):
        init_points.append(
            Configuration(
                cs, {
                    'x' + str(j + 1).zfill(2): str(init_points_numpy[i][j])
                    for j in range(CONTAMINATION_N_STAGES)
                }))

    def evaluate(x):
        x_tensor = torch.LongTensor([
            int(x['x' + str(j + 1).zfill(2)])
            for j in range(CONTAMINATION_N_STAGES)
        ])
        return evaluator.evaluate(x_tensor).item()

    print('Began    at ' + datetime.now().strftime("%H:%M:%S"))
    scenario = Scenario({
        "run_obj": "quality",
        "runcount-limit": n_eval,
        "cs": cs,
        "deterministic": "true",
        'output_dir': os.path.join(EXP_DIR, name_tag)
    })
    smac = SMAC(scenario=scenario,
                tae_runner=evaluate,
                initial_configurations=init_points)
    smac.optimize()

    evaluations, optimum = evaluations_from_smac(smac)
    print('Finished at ' + datetime.now().strftime("%H:%M:%S"))
    return optimum
Example #28
0
    def dont_test_smac_choice(self):

        import numpy as np
        from sklearn import svm, datasets
        from sklearn.model_selection import cross_val_score

        # Import ConfigSpace and different types of parameters
        from smac.configspace import ConfigurationSpace

        # Import SMAC-utilities
        from smac.tae.execute_func import ExecuteTAFuncDict
        from smac.scenario.scenario import Scenario
        from smac.facade.smac_facade import SMAC

        tfm = PCA() | Nystroem() | NoOp()
        planned_pipeline1 = (
            OneHotEncoder(handle_unknown='ignore', sparse=False)
            | NoOp()) >> tfm >> (LogisticRegression() | KNeighborsClassifier())

        cs: ConfigurationSpace = get_smac_space(planned_pipeline1,
                                                lale_num_grids=5)

        # Scenario object
        scenario = Scenario({
            "run_obj":
            "quality",  # we optimize quality (alternatively runtime)
            "runcount-limit": 1,  # maximum function evaluations
            "cs": cs,  # configuration space
            "deterministic": "true"
        })

        # Optimize, using a SMAC-object
        tae = test_iris_fmin_tae(planned_pipeline1, num_folds=2)
        print(
            "Optimizing! Depending on your machine, this might take a few minutes."
        )
        smac = SMAC(scenario=scenario,
                    rng=np.random.RandomState(42),
                    tae_runner=tae)

        incumbent = smac.optimize()

        inc_value = tae(incumbent)

        print("Optimized Value: %.2f" % (inc_value))
Example #29
0
 def get_smbo(intensification_perc):
     """ Return SMBO with intensification_percentage. """
     scen = Scenario({
         'cs': test_helpers.get_branin_config_space(),
         'run_obj': 'quality',
         'output_dir': '',
         'intensification_percentage': intensification_perc
     })
     return SMAC(scen, tae_runner=target, rng=1).solver
Example #30
0
 def test_abort_on_initial_design(self, patch):
     def target(x):
         return 5
     patch.side_effect = FirstRunCrashedException()
     scen = Scenario({'cs': test_helpers.get_branin_config_space(),
                      'run_obj': 'quality', 'output_dir': '',
                      'abort_on_first_run_crash': 1})
     smbo = SMAC(scen, tae_runner=target, rng=1).solver
     self.assertRaises(FirstRunCrashedException, smbo.run)
    b = hpobench.SinTwo()
elif benchmark == "bohachevsky":
    b = hpobench.Bohachevsky()
elif benchmark == "levy":
    b = hpobench.Levy()

info = b.get_meta_information()

scenario = Scenario({"run_obj": "quality",
                     "runcount-limit": n_iters,
                     "cs": b.get_configuration_space(),
                     "deterministic": "true",
                     "initial_incumbent": "RANDOM",
                     "output_dir": ""})

smac = SMAC(scenario=scenario, tae_runner=b)
smac.optimize()

rh = smac.runhistory
incs = []
inc = None
idx = 1
t = smac.get_trajectory()
for i in range(n_iters):

    if idx < len(t) and i == t[idx].ta_runs - 1:
        inc = t[idx].incumbent
        idx += 1
    incs.append(inc)

# Offline Evaluation