def test_to_ndarray_name_last_pos():
    np.random.seed(123456)
    random_state = np.random.RandomState(123456)

    config_space = CS.ConfigurationSpace()
    config_space.add_hyperparameters([
        CSH.UniformFloatHyperparameter('a', lower=0., upper=1.),
        CSH.UniformIntegerHyperparameter('b', lower=2, upper=3),
        CSH.CategoricalHyperparameter('c', choices=('1', '2', '3')),
        CSH.UniformIntegerHyperparameter('d', lower=2, upper=3),
        CSH.CategoricalHyperparameter('e', choices=('1', '2'))
    ])
    hp_a = HyperparameterRangeContinuous('a',
                                         lower_bound=0.,
                                         upper_bound=1.,
                                         scaling=LinearScaling())
    hp_b = HyperparameterRangeInteger('b',
                                      lower_bound=2,
                                      upper_bound=3,
                                      scaling=LinearScaling())
    hp_c = HyperparameterRangeCategorical('c', choices=('1', '2', '3'))
    hp_d = HyperparameterRangeInteger('d',
                                      lower_bound=2,
                                      upper_bound=3,
                                      scaling=LinearScaling())
    hp_e = HyperparameterRangeCategorical('e', choices=('1', '2'))

    for name_last_pos in ['a', 'c', 'd', 'e']:
        hp_ranges_cs = HyperparameterRanges_CS(config_space,
                                               name_last_pos=name_last_pos)
        if name_last_pos == 'a':
            lst = [hp_b, hp_c, hp_d, hp_e, hp_a]
        elif name_last_pos == 'c':
            lst = [hp_a, hp_b, hp_d, hp_e, hp_c]
        elif name_last_pos == 'd':
            lst = [hp_a, hp_b, hp_c, hp_e, hp_d]
        else:
            lst = [hp_a, hp_b, hp_c, hp_d, hp_e]
        hp_ranges = HyperparameterRanges_Impl(*lst)
        names = [hp.name for hp in hp_ranges.hp_ranges]
        config_cs = hp_ranges_cs.random_candidate(random_state)
        _config = config_cs.get_dictionary()
        config = (_config[name] for name in names)
        ndarr_cs = hp_ranges_cs.to_ndarray(config_cs)
        ndarr = hp_ranges.to_ndarray(config)
        assert_allclose(ndarr_cs, ndarr, rtol=1e-4)
示例#2
0
def to_input(config_dict: dict,
             hp_ranges: HyperparameterRanges_CS) -> np.ndarray:
    config = _to_config_cs(hp_ranges.config_space, config_dict)
    return hp_ranges.to_ndarray(config)
示例#3
0
def test_resume_hyperband_random():
    random_seed = 623478423
    num_trials1 = 20
    num_trials2 = 40
    search_options = {'random_seed': random_seed}
    scheduler_options = {
        'reward_attr': 'accuracy',
        'time_attr': 'epoch',
        'max_t': 9,
        'grace_period': 1,
        'reduction_factor': 3,
        'brackets': 1
    }

    # Note: The difficulty with HB promotion is that when configs are
    # promoted, they appear more than once in config_history (which is
    # indexed by task_id). This can lead to differences between the two
    # and one phase runs. What matters is that the same configs are
    # proposed.
    for hp_type in ['stopping', 'promotion']:
        exper_type = 'hyperband_{}_random'.format(hp_type)
        checkpoint_fname = 'tests/unittests/checkpoint_{}.ag'.format(
            exper_type)
        # First experiment: Two phases, with resume
        task_id = Task.TASK_ID.value
        scheduler1 = ag.scheduler.HyperbandScheduler(
            branin_epochs_fn,
            searcher='random',
            search_options=search_options,
            checkpoint=checkpoint_fname,
            num_trials=num_trials1,
            type=hp_type,
            **scheduler_options)
        logger.info(
            "Running [{} - two phases]: num_trials={} and checkpointing".
            format(exper_type, num_trials1))
        scheduler1.run()
        scheduler1.join_jobs()
        scheduler2 = ag.scheduler.HyperbandScheduler(
            branin_epochs_fn,
            searcher='random',
            search_options=search_options,
            checkpoint=checkpoint_fname,
            num_trials=num_trials2,
            type=hp_type,
            resume=True,
            **scheduler_options)
        logger.info(
            "Running [{} - two phases]: Resume from checkpoint, num_trials={}".
            format(exper_type, num_trials2))
        scheduler2.run()
        scheduler2.join_jobs()
        searcher = scheduler2.searcher
        results1 = {
            'task_id': task_id,
            'config_history': scheduler2.config_history,
            'best_reward': searcher.get_best_reward(),
            'best_config': searcher.get_best_config()
        }
        # DEBUG
        #print_config_history(results1['config_history'], task_id)

        # Second experiment: Just one phase
        task_id = Task.TASK_ID.value
        scheduler3 = ag.scheduler.HyperbandScheduler(
            branin_epochs_fn,
            searcher='random',
            search_options=search_options,
            checkpoint=None,
            num_trials=num_trials2,
            type=hp_type,
            **scheduler_options)
        logger.info("Running [{} - one phase]: num_trials={}".format(
            exper_type, num_trials2))
        scheduler3.run()
        scheduler3.join_jobs()
        searcher = scheduler3.searcher
        results2 = {
            'task_id': task_id,
            'config_history': scheduler3.config_history,
            'best_reward': searcher.get_best_reward(),
            'best_config': searcher.get_best_config()
        }
        # DEBUG
        #print_config_history(results2['config_history'], task_id)

        hp_ranges = HyperparameterRanges_CS(branin_epochs_fn.cs)
        assert_same_config_history(results1, results2, num_trials2, hp_ranges)
示例#4
0
def test_resume_fifo_random():
    random_seed = 623478423
    num_trials1 = 10
    num_trials2 = 20
    exper_type = 'fifo_random'
    checkpoint_fname = 'tests/unittests/checkpoint_{}.ag'.format(exper_type)
    search_options = {'random_seed': random_seed}

    # First experiment: Two phases, with resume
    task_id = Task.TASK_ID.value
    scheduler1 = ag.scheduler.FIFOScheduler(branin_fn,
                                            searcher='random',
                                            search_options=search_options,
                                            checkpoint=checkpoint_fname,
                                            num_trials=num_trials1,
                                            reward_attr='accuracy')
    logger.info(
        "Running [{} - two phases]: num_trials={} and checkpointing".format(
            exper_type, num_trials1))
    scheduler1.run()
    scheduler1.join_jobs()
    scheduler2 = ag.scheduler.FIFOScheduler(branin_fn,
                                            searcher='random',
                                            search_options=search_options,
                                            checkpoint=checkpoint_fname,
                                            num_trials=num_trials2,
                                            reward_attr='accuracy',
                                            resume=True)
    logger.info(
        "Running [{} - two phases]: Resume from checkpoint, num_trials={}".
        format(exper_type, num_trials2))
    scheduler2.run()
    scheduler2.join_jobs()
    searcher = scheduler2.searcher
    results1 = {
        'task_id': task_id,
        'config_history': scheduler2.config_history,
        'best_reward': searcher.get_best_reward(),
        'best_config': searcher.get_best_config()
    }

    # Second experiment: Just one phase
    task_id = Task.TASK_ID.value
    scheduler3 = ag.scheduler.FIFOScheduler(branin_fn,
                                            searcher='random',
                                            search_options=search_options,
                                            checkpoint=None,
                                            num_trials=num_trials2,
                                            reward_attr='accuracy')
    logger.info("Running [{} - one phase]: num_trials={}".format(
        exper_type, num_trials2))
    scheduler3.run()
    scheduler3.join_jobs()
    searcher = scheduler3.searcher
    results2 = {
        'task_id': task_id,
        'config_history': scheduler3.config_history,
        'best_reward': searcher.get_best_reward(),
        'best_config': searcher.get_best_config()
    }

    hp_ranges = HyperparameterRanges_CS(branin_fn.cs)
    assert_same_config_history(results1, results2, num_trials2, hp_ranges)
def test_to_ndarray():
    np.random.seed(123456)
    random_state = np.random.RandomState(123456)
    prob_categ = 0.3

    for iter in range(20):
        # Create ConfigurationSpace
        num_hps = np.random.randint(low=1, high=20)
        if iter == 0:
            _prob_categ = 0.
        elif iter == 1:
            _prob_categ = 1.
        else:
            _prob_categ = prob_categ
        config_space = CS.ConfigurationSpace()
        ndarray_size = 0
        _hp_ranges = dict()
        for hp_it in range(num_hps):
            name = str(hp_it)
            if np.random.random() < _prob_categ:
                num_choices = np.random.randint(low=2, high=11)
                choices = tuple([str(i) for i in range(num_choices)])
                hp = CSH.CategoricalHyperparameter(name, choices=choices)
                hp2 = HyperparameterRangeCategorical(name, choices)
                ndarray_size += num_choices
            else:
                ndarray_size += 1
                rand_coin = np.random.random()
                if rand_coin < 0.5:
                    log_scaling = (rand_coin < 0.25)
                    hp = CSH.UniformFloatHyperparameter(name=name,
                                                        lower=0.5,
                                                        upper=5.,
                                                        log=log_scaling)
                    hp2 = HyperparameterRangeContinuous(
                        name,
                        lower_bound=0.5,
                        upper_bound=5.,
                        scaling=LogScaling()
                        if log_scaling else LinearScaling())
                else:
                    log_scaling = (rand_coin < 0.75)
                    hp = CSH.UniformIntegerHyperparameter(name=name,
                                                          lower=2,
                                                          upper=10,
                                                          log=log_scaling)
                    hp2 = HyperparameterRangeInteger(
                        name=name,
                        lower_bound=2,
                        upper_bound=10,
                        scaling=LogScaling()
                        if log_scaling else LinearScaling())
            config_space.add_hyperparameter(hp)
            _hp_ranges[name] = hp2
        hp_ranges_cs = HyperparameterRanges_CS(config_space)
        hp_ranges = HyperparameterRanges_Impl(
            *[_hp_ranges[x] for x in config_space.get_hyperparameter_names()])
        # Compare ndarrays created by both codes
        for cmp_it in range(5):
            config_cs = hp_ranges_cs.random_candidate(random_state)
            _config = config_cs.get_dictionary()
            config = (_config[name]
                      for name in config_space.get_hyperparameter_names())
            ndarr_cs = hp_ranges_cs.to_ndarray(config_cs)
            ndarr = hp_ranges.to_ndarray(config)
            assert_allclose(ndarr_cs, ndarr, rtol=1e-4)