Beispiel #1
0
    def foo(self):
        self.bandit = bandit = Bandit(self.expr)
        self.algo = algo = Random(bandit)
        if hasattr(self, 'n_randints'):
            n_randints = len(filter(
                lambda x: x.name == 'randint',
                algo.vh.params.values()))
            assert n_randints == self.n_randints

        self.trials = Trials()
        self.experiment = Experiment(self.trials, algo, async=False)
        self.experiment.run(5)
        self.output = output = []
        for trial in self.trials._trials:
            print ''
            tmp = []
            for nid in trial['misc']['idxs']:
                thing = (
                        nid,
                        trial['misc']['idxs'][nid],
                        trial['misc']['vals'][nid])
                print thing
                tmp.append(thing)
            tmp.sort()
            output.append(tmp)
        print repr(output)
        print repr(self.wanted)
        # -- think of a more robust way to test these things
        #    or, if the sampling style is to be nailed down,
        #    put it in and be sure of it.
        raise nose.SkipTest()
        assert output == self.wanted
Beispiel #2
0
def nu_simple_fmin(hpo_project_key, objective, rseed=1337, full_model_string=None, notebook_name=None, verbose=True, stack=3, keep_temp=False, data_args=None):
    
    # db에서 가져오기
    db_info = asyncio.run(Requests().get_action(parameter1 = hpo_project_key, parameter2 = "null", url = hpo_url))[0]
    hpo_project_id = db_info["hpoProjectId"]
    algo, space = __transform_db_to_function(method = db_info["method"], config = db_info["config"])

    trials = Trials()
    best = fmin(objective, space, algo=algo, max_evals=50, trials=trials, rstate=np.random.RandomState(rseed), return_argmin=True)
    importances = calculate_importance(trials)
 
    # 저장 api
    all_info = dict()
    
    all_info["best_result"] = trials.best_trial['result'] 
    all_info["best_hp"] = best
    all_info["trial_result"] = trials.results
    all_info["trial_hp"] = trials.vals
    # json int64 때문에 작업
    all_info = __to_int(all_info)

    all_info["hpo_project_key"] = hpo_project_key

    tmp_importance = list()
    for i in range(len(importances)):
        for key1, value1 in importances[i].items():
            for key2, value2 in value1.items(): 
                tmp_importance.append(value2)
                break
    all_info["importances"] = tmp_importance

    asyncio.run(Requests().post_action(request_datas = all_info, url = hpo_url))

    return best, trials
Beispiel #3
0
def test_failure():

    #XXX also test the Bandit.exceptions mechanism that actually catches them
    class BanditE(Exception):
        pass

    class DummyBandit(Bandit):
        param_gen = {"loss": 10}
        def __init__(self):
            super(DummyBandit, self).__init__(self.param_gen)

        def evaluate(self, config, ctrl):
            raise BanditE()

    trials = Trials()
    bandit_algo = Random(DummyBandit())
    exp = Experiment(trials, bandit_algo, async=False)

    exp.run(1)
    trials.refresh()
    assert len(trials) == 0
    assert len(trials._dynamic_trials) == 1
    assert trials._dynamic_trials[0]['state'] == JOB_STATE_ERROR
    assert trials._dynamic_trials[0]['misc']['error'] != None

    exp.catch_bandit_exceptions = False
    nose.tools.assert_raises(BanditE, exp.run, 1)
    trials.refresh()
    assert len(trials) == 0
    assert len(trials._dynamic_trials) == 2
    assert trials._dynamic_trials[1]['state'] == JOB_STATE_ERROR
    assert trials._dynamic_trials[1]['misc']['error'] != None
Beispiel #4
0
def main():
    search_space = {}
    search_space["problem_size"] = hp.quniform("problem_size", 1, 64, 1)
    search_space["num_ranks"] = hp.quniform("num_ranks", 1, 8, 1)

    trials = Trials()

    best = fmin(fn=run_hpcg,
                space=search_space,
                algo=tpe.suggest, 
                max_evals = 50,
                trials=trials)

    print("------------------------------------------------")

    for trial in trials:
        print("{1}x({2}, {3}, {4}) = {0} GF".format(-1.0*trial["result"]["loss"],
                                                    trial["misc"]["vals"]["num_ranks"][0],
                                                    trial["misc"]["vals"]["problem_size"][0],
                                                    trial["misc"]["vals"]["problem_size"][0],
                                                    trial["misc"]["vals"]["problem_size"][0]))

    print("Saving pkl....")
    with open('trials.pkl', 'wb') as output:
        pickle.dump(trials, output)
    print("... done")
Beispiel #5
0
    def __init__(self,
                 est,
                 X,
                 y,
                 params=None,
                 iters=500,
                 time_to_search=None,
                 cv=5,
                 cv_times=1,
                 scorer="f1",
                 verbose=1,
                 random=False,
                 foldtype="Kfold"):

        # check and get skiperopt style parameters
        if params == None and not hasattr(est, "param_grid"):
            raise ValueError("No parameters supplied")
        else:
            params = est.param_grid

        self.params = params
        self.best_params = None
        self.__space = create_hyper(params)
        self.__initparams = est.get_params()
        # set hyper settings
        self.__algo = tpe.suggest
        self.__trial = Trials()

        # set run settings
        self.verbose = verbose
        self.iters = iters
        self.cv = cv
        self.cv_times = cv_times
        self.scorer = scorer
        self.__run = 0

        self.est = est

        self.stats = {}
        self.__init_score = cross_validation(self.est,
                                             X,
                                             y,
                                             cv=self.cv,
                                             cv_times=self.cv_times,
                                             scorer=self.scorer)
        self.best_score = self.__init_score

        self.__X = X
        self.__y = y

        self.__start_now = None

        self.__runok = True
        self.time_to_search = time_to_search

        self.random = random
        self.foldtype = foldtype
Beispiel #6
0
    def test_seeding(self):
        # -- assert that the seeding works a particular way

        domain = coin_flip()
        docs = rand.suggest(list(range(10)), domain, Trials(), seed=123)
        trials = trials_from_docs(docs)
        idxs, vals = miscs_to_idxs_vals(trials.miscs)

        # Passes Nov 8 / 2013
        self.assertEqual(list(idxs["flip"]), list(range(10)))
        self.assertEqual(list(vals["flip"]), [0, 1, 0, 0, 0, 0, 0, 1, 1, 0])
Beispiel #7
0
 def test_suggest_1(self):
     print 'EXPR', self.bandit.expr
     docs = self.algo.suggest([0], Trials())
     assert len(docs) == 1
     print 'DOCS', docs
     # -- assert validity of docs
     trials = trials_from_docs(docs)
     print 'TRIALS', trials
     assert docs[0]['misc']['idxs']['flip'] == [0]
     idxs, vals = miscs_to_idxs_vals(trials.miscs)
     assert idxs['flip'] == [0]
Beispiel #8
0
 def test_arbitrary_range(self):
     new_ids = [-2, 0, 7, 'a', '007']
     docs = self.algo.suggest(new_ids, Trials())
     # -- assert validity of docs
     trials = trials_from_docs(docs)
     idxs, vals = miscs_to_idxs_vals(trials.miscs)
     assert len(docs) == 5
     assert len(idxs) == 1
     assert len(vals) == 1
     print vals
     assert idxs['flip'] == new_ids
     assert np.all(vals['flip'] == [0, 1, 0, 1, 1])
Beispiel #9
0
 def test_suggest_5(self):
     docs = self.algo.suggest(range(5), Trials())
     print docs
     assert len(docs) == 5
     # -- assert validity of docs
     trials = trials_from_docs(docs)
     idxs, vals = miscs_to_idxs_vals(trials.miscs)
     print idxs
     print vals
     assert len(idxs) == 1
     assert len(vals) == 1
     assert idxs['flip'] == range(5)
     assert np.all(vals['flip'] == [1, 1, 0, 1, 0])
Beispiel #10
0
def main():

    space = {
        'ltr':
        hp.choice('ltr', [True]),
        'shuffle':
        hp.choice('shuffle', [False]),
        'num_leaves':
        hp.choice('num_leaves', list(np.arange(8, 256, 2, dtype=int))),
        'max_depth':
        hp.choice('max_depth', list(np.arange(4, 64, 2, dtype=int))),
        'max_bin':
        hp.choice('max_bin', list(np.arange(255, 255 * 4, 5, dtype=int))),
        'min_data_in_leaf':
        hp.choice('min_data_in_leaf', list(np.arange(5, 100, 5, dtype=int))),
        'learning_rate':
        hp.uniform('learning_rate', 0.01, 0.3),
        'bagging_fraction':
        hp.uniform('bagging_fraction', 0.2, 1.0),
        'feature_fraction':
        hp.uniform('feature_fraction', 0.2, 1.0),
        'early_stopping':
        hp.uniform('test_size', 100, 1000),
    }

    trials_step = 1  # how many additional trials to do after loading saved trials. 1 = save after iteration
    max_trials = 1  # initial max_trials. put something small to not have to wait

    try:  # try to load an already saved trials object, and increase the max
        trials = pickle.load(
            open(BASE_PATH + SET + TRAILKEY + '.hyperopt', "rb"))
        print("Found saved Trials! Loading...")
        max_trials = len(trials.trials) + trials_step
        print("Rerunning from {} trials to {} (+{}) trials".format(
            len(trials.trials), max_trials, trials_step))
    except:  # create a new trials object and start searching
        trials = Trials()

    best = fmin(fn=objective,
                space=space,
                algo=tpe.suggest,
                trials=trials,
                max_evals=max_trials)

    print("Best:", best)
    print("Num:", max_trials)

    # save the trials object
    with open(BASE_PATH + SET + TRAILKEY + ".hyperopt", "wb") as f:
        pickle.dump(trials, f)
Beispiel #11
0
    def test_arbitrary_range(self, N=10):
        assert N <= 10
        new_ids = [-2, 0, 7, 'a', '007', 66, 'a3', '899', 23, 2333][:N]
        docs = self.algo.suggest(new_ids, Trials())
        # -- assert validity of docs
        trials = trials_from_docs(docs)
        idxs, vals = miscs_to_idxs_vals(trials.miscs)
        assert len(docs) == N
        assert len(idxs) == 1
        assert len(vals) == 1
        print vals
        assert idxs['flip'] == new_ids

        # -- assert that the random seed matches that of Jan 8/2013
        assert np.all(vals['flip'] == [0, 1, 0, 0, 0, 0, 0, 1, 1, 0][:N])
Beispiel #12
0
 def test_suggest_N(self, N=10):
     assert N <= 10
     docs = self.algo.suggest(range(N), Trials())
     print 'docs', docs
     assert len(docs) == N
     # -- assert validity of docs
     trials = trials_from_docs(docs)
     idxs, vals = miscs_to_idxs_vals(trials.miscs)
     print 'idxs', idxs
     print 'vals', vals
     assert len(idxs) == 1
     assert len(vals) == 1
     assert idxs['flip'] == range(N)
     # -- only works when N == 5
     assert np.all(vals['flip'] == [0, 1, 0, 0, 0, 0,  0, 1, 1, 0][:N])
Beispiel #13
0
    def setup_backend(self, params, algo=tpe.suggest, rstate=None, show_progressbar=False, **options):
        """Special method to initialize the backend from params."""
        if rstate is None:
            try:
                rstate = np.random.default_rng()
            except AttributeError:
                rstate = np.random.RandomState()
        self.params = params

        space = (as_apply)(dict(((name), (create_space(name, func, *args))) for name, (func, args, kwargs) in sorted_items(params)))

        domain = Domain(self.set_current_values, space)

        self.trials = Trials()

        self.fmin_iter = FMinIter(algo, domain, self.trials, rstate, show_progressbar=show_progressbar, **options)
    def __init__(self, api_config):
        """Build wrapper class to use an optimizer in benchmark.

        Parameters
        ----------
        api_config : dict-like of dict-like
            Configuration of the optimization variables. See API description.
        """
        AbstractOptimizer.__init__(self, api_config)

        self.space_x = JointSpace(api_config)
        self.bounds = self.space_x.get_bounds()
        self.create_opt_prob(
        )  # Sets up the optimization problem (needs self.bounds)
        self.max_evals = np.iinfo(np.int32).max  # NOTE: Largest possible int
        self.turbo_batch_size = None
        self.pysot_batch_size = None
        self.history = []
        self.proposals = []
        self.lb, self.ub = self.bounds[:, 0], self.bounds[:, 1]
        self.dim = len(self.bounds)

        self.turbo = Turbo1(
            f=None,
            lb=self.bounds[:, 0],
            ub=self.bounds[:, 1],
            n_init=2 * self.dim + 1,
            max_evals=self.max_evals,
            batch_size=4,  # We need to update this later
            verbose=False,
        )

        # hyperopt
        self.random = np_random

        space, self.round_to_values = tuSOTOptimizer.get_hyperopt_dimensions(
            api_config)
        self.domain = Domain(dummy_f, space, pass_expr_memo_ctrl=None)
        self.trials = Trials()

        # Some book keeping like opentuner wrapper
        self.trial_id_lookup = {}

        # Store just for data validation
        self.param_set_chk = frozenset(api_config.keys())
Beispiel #15
0
    def __init__(self, api_config, random=np_random):
        """Build wrapper class to use hyperopt optimizer in benchmark.

        Parameters
        ----------
        api_config : dict-like of dict-like
            Configuration of the optimization variables. See API description.
        """
        AbstractOptimizer.__init__(self, api_config)
        self.random = random

        space, self.round_to_values = HyperoptOptimizer.get_hyperopt_dimensions(api_config)
        self.domain = Domain(dummy_f, space, pass_expr_memo_ctrl=None)
        self.trials = Trials()

        # Some book keeping like opentuner wrapper
        self.trial_id_lookup = {}

        # Store just for data validation
        self.param_set_chk = frozenset(api_config.keys())
Beispiel #16
0
    def __init__(self, examples, params, algo=tpe.suggest, rstate=np.random.RandomState(), show_progressbar=False, **options):
        self.init_fallback_backend()

        if not examples:
            self.current_values = {}
            return

        space = (as_apply)(dict(((name), (create_space(name, func, *args))) for name, (func, args, kwargs) in sorted_items(params)))

        domain = Domain(self.set_current_values, space)

        trial_list = examples_to_trials(examples, params)

        trials = Trials()
        trials.insert_trial_docs(trial_list)

# run one iteration of hyperparameter optimization, with values saved
#  to the self.set_current_values callback passed to Domain
        (next)(FMinIter(algo, domain, trials, rstate, show_progressbar=show_progressbar, **options))

        assert self.current_values is not None, self.current_values
        assert set(self.current_values.keys()) == set(params), self.current_values
Beispiel #17
0
 def idxs_vals_from_ids(self, ids, seed):
     docs = self.suggest(ids, self.domain, Trials(), seed)
     trials = trials_from_docs(docs)
     idxs, vals = miscs_to_idxs_vals(trials.miscs)
     return idxs, vals
Beispiel #18
0
 def setUp(self):
     self.trials = Trials()
Beispiel #19
0
 def setUp(self):
     self.bandit = coin_flip()
     self.algo = RandomStop(5, self.bandit)
     self.trials = Trials()
     self.experiment = Experiment(self.trials, self.algo, async=False)