def set_finetune_space(self, config_file):
        ''' Given the original deep net architecture, and a set of pretrained weights
        and biases, define the configuration space to search for fintuning parameters '''

        # we know these fields won't change, so go ahead and set them as
        # defaults now
        model_params = nt.get_model_params(config_file)
        optim_params = nt.get_optim_params(config_file)
        default_finetune_model_params = {k: model_params[k] for k in ('num_hids', 'activs', 'd', 'k')}
        default_finetune_model_params['loss_terms'] = ['cross_entropy']
        default_finetune_optim_params = {k: optim_params[k] for k in ('optim_method', 'optim_type')}

        # define the space of hyperparameters we wish to
        search_finetune_model_params = {'l1_reg': hp.choice('l1_reg', [None, hp.loguniform('l1_decay', log(1e-5), log(10))]),
                                        'l2_reg': hp.choice('l2_reg', [None, hp.loguniform('l2_decay', log(1e-5), log(10))])}
        search_finetune_optim_params = {'learn_rate': hp.uniform('learn_rate', 0, 1),
                                        'rho': hp.uniform('rho', 0, 1),
                                        'num_epochs': hp.qloguniform('num_epochs', log(10), log(5e3), 1),
                                        'batch_size': hp.quniform('batch_size', 128, 1024, 1),
                                        'init_method': hp.choice('init_method', ['gauss', 'fan-io']),
                                        'scale_factor': hp.uniform('scale_factor', 0, 1)}

        # combine the default and search parameters into a dictionary to define the
        # full space - this is what will be passed into the objective function
        finetune_model_params = self.merge_default_search(
            default_finetune_model_params, search_finetune_model_params)
        finetune_optim_params = self.merge_default_search(
            default_finetune_optim_params, search_finetune_optim_params)

        finetune_hyperspace = {
            'finetune_model_params': finetune_model_params, 'finetune_optim_params': finetune_optim_params}

        return finetune_hyperspace
def run_all_dl(csvfile = saving_fp, 
                space = [hp.quniform('h1', 100, 550, 1), 
                        hp.quniform('h2', 100, 550, 1),
                        hp.quniform('h3', 100, 550, 1),
                        #hp.choice('activation', ["RectifierWithDropout", "TanhWithDropout"]),
                        hp.uniform('hdr1', 0.001, 0.3),
                        hp.uniform('hdr2', 0.001, 0.3),
                        hp.uniform('hdr3', 0.001, 0.3),
                        hp.uniform('rho', 0.9, 0.999), 
                        hp.uniform('epsilon', 1e-10, 1e-4)]):
          # maxout works well with dropout (Goodfellow et al 2013), and rectifier has worked well with image recognition (LeCun et al 1998)
          start_save(csvfile = csvfile)
          trials = Trials()
          print "Deep learning..."
          best = fmin(objective,
                      space = space,
                      algo=tpe.suggest,
                      max_evals=evals,
                      trials=trials)
          print best
          print trials.losses()
          with open('output/dlbest.pkl', 'w') as output:
            pickle.dump(best, output, -1)
          with open('output/dltrials.pkl', 'w') as output:
            pickle.dump(trials, output, -1)
def optimize(max_evals):
    space = (hp.uniform('x', -3.0, 3.0),
             hp.uniform('y', -3.0, 3.0))
    trials = Trials()
    best = fmin(function, space=space, algo=tpe.suggest,
                max_evals=max_evals, trials=trials)
    return best, trials
def nn_bayes_search(train_fname, test_fname, out_fname_prefix='nn-bayes'):
    exp = ExperimentL1(train_fname=train_fname, test_fname=test_fname)
    param_keys = ['in_size', 'hid_size', 'batch_size', 'in_dropout',
                  'hid_dropout', 'nonlinearity',
                  'updates',
                  'learning_rate',
                  #'l1_reg',
                  #'l2_reg',
                  'num_epochs']
    param_space = {'in_size': exp.train_x.shape[1],
                   'hid_size': hp.quniform('hid', 10, 300, 5),
                   'batch_size': hp.quniform('bsize', 200, 5000, 50),
                   'in_dropout': hp.uniform('in_drop',  0.0, 0.5),
                   'hid_dropout': hp.uniform('hid_drop',  0.0, 0.6),
                   'updates': hp.choice('updates', [nesterov_momentum, adam]),
                   'nonlinearity': hp.choice('nonlinear',  [sigmoid, tanh, rectify]),
                   'learning_rate': hp.uniform('lr', 0.0001, 0.1),

                   #'learning_rate': 0.01,
                   #'l1_reg': hp.uniform('l1_reg', 0.0, 0.000001),
                   #'l2_reg': hp.uniform('l2_reg', 0.0, 0.000001),
                   'num_epochs': hp.quniform('epochs', 200, 1000, 50),
                   }

    bs = param_search.BayesSearch(LasagneModel, exp, model_param_keys=param_keys, model_param_space=param_space,
                     cv_out=out_fname_prefix+'-scores.pkl',
                     cv_pred_out=out_fname_prefix+'-preds.pkl',
                     refit_pred_out=out_fname_prefix+'-refit-preds.pkl',
                     dump_round=1, use_lower=0, n_folds=5)
    bs.search_by_cv(max_evals=301)
    param_search.write_cv_res_csv(bs.cv_out, bs.cv_out.replace('.pkl', '.csv'))
def test_quadratic5_tpe():
    trials = Trials()

    # XXX: I wish I could delete this file after it has been written
    #      rather than here
    try:
        os.remove('spearmint.GPEIChooser.pkl')
    except IOError:
        pass

    chooser=GPEIChooser(expt_dir=os.getcwd())

    suggest = partial(hpspearmint.suggest,
                      chooser=chooser,
                      grid_size=1000,
                      grid_seed=0,
                      expt_dir=None,
                      verbose=1)

    argmin = fmin(
            fn=lambda xs: sum((x - 3) ** 2 for x in xs),
            space=[
                hp.uniform('x0', -5, 5),
                hp.uniform('x1', -5, 5),
                hp.uniform('x2', -5, 5),
                hp.uniform('x3', -5, 5),
                hp.uniform('x4', -5, 5),
                  ],
            algo=suggest,
            max_evals=50,
            trials=trials)

    print argmin
    assert len(trials) == 50, len(trials)
    assert abs(argmin['x0'] - 3.0) < .25, argmin
def xgb_bayes_search(exp, 
                     param_keys=None, param_vals=None,
                     num_proc=None):
    if num_proc is None:
        num_proc = 4

    if param_keys is None:
        param_keys = ['model_type', 'max_depth', 'min_child_weight', 'subsample', 'colsample_bytree',
                      'learning_rate', 'silent', 'objective', 'nthread', 'n_estimators', 'seed']
    if param_vals is None:
        param_space = {'model_type': XGBClassifier, 'max_depth': hp.quniform('max_depth', 6, 9, 1),
                       'min_child_weight': hp.quniform('min_child_weight', 3, 7, 1),
                       'subsample': hp.uniform('subsample', 0.5, 1.0),
                       'colsample_bytree': hp.uniform('colsample', 0.5, 1.0),
                       'learning_rate': hp.uniform('eta', 0.01, 0.02),
                       'silent': 1, 'objective': 'binary:logistic',
                       'nthread': num_proc, 'n_estimators': 400, 'seed': 9438}
    
    
    bs = param_search.BayesSearch(SklearnModel, exp, param_keys, param_space,
                                  cv_out='xgb-bayes-scores.pkl',
                                  cv_pred_out='xgb-bayes-preds.pkl')
    best_param, best_score = bs.search_by_cv()
    param_search.write_cv_res_csv(cv_out = 'xgb-bayes-scores.pkl', 
                                  cv_csv_out = 'xgb-bayes-scores.csv')
    return best_param, best_score
def lr_bayes_search(train_fname, test_fname, out_fname_prefix='sk-svc-bayes'):
    exp = ExperimentL1(train_fname=train_fname, test_fname=test_fname)

    param_keys = ['model_type', 'C',
                  #'loss',
                  'penalty', 'tol', 'solver', 'class_weight',
                  'random_state']

    param_space = {'model_type': LogisticRegression, 'C': hp.uniform('c', 0.1, 3),
                   #'loss': hp.choice('loss', ['hinge', 'squared_hinge']),
                   #'penalty': hp.choice('pen', ['l1', 'l2']),
                   'penalty': 'l2',
                   'tol': hp.uniform('tol', 1e-6, 3e-4),
                   'solver': hp.choice('solver', ['liblinear', 'lbfgs','newton-cg']),
                   'class_weight': hp.choice('cls_w', [None, 'auto']),
                   'random_state': hp.choice('seed', [1234, 53454, 6676, 12893]),
                   #'n_jobs': 2
                   }

    bs = param_search.BayesSearch(SklearnModel, exp, param_keys, param_space,
                                  cv_out=out_fname_prefix+'-scores.pkl',
                                  cv_pred_out=out_fname_prefix+'-preds.pkl',
                                  refit_pred_out=out_fname_prefix+'-refit-preds.pkl',
                                  dump_round=1)
    best = bs.search_by_cv(max_evals=60)
    param_search.write_cv_res_csv(bs.cv_out, bs.cv_out.replace('.pkl', '.csv'))
    return best
Example #8
0
def get_cnn_model(model_num, search_space):
    space = cnn_space(search_space)
    hparams = {'model_' + model_num: 'CNN',
            'word_vectors_' + model_num: ('word2vec', True),
            'delta_' + model_num: True,
            'flex_' + model_num: (True, .15),
            'filters_' + model_num: hp.quniform('filters_' + model_num, *space['filters_'], 1),
            'kernel_size_' + model_num: hp.quniform('kernel_size_' + model_num, *space['kernel_size_'], 1),
            'kernel_increment_' + model_num: hp.quniform('kernel_increment_' + model_num, *space['kernel_increment_'], 1),
            'kernel_num_' + model_num: hp.quniform('kernel_num_' + model_num, *space['kernel_num_'], 1),
            'dropout_' + model_num: hp.uniform('dropout_' + model_num, *space['dropout_']),
            'batch_size_' + model_num: hp.quniform('batch_size_' + model_num, *space['batch_size_'], 1),
            'activation_fn_' + model_num: hp.choice('activation_fn_' + model_num, space['activation_fn_'])}

    if space['no_reg']:
        hparams['regularizer_cnn_' + model_num] = hp.choice('regularizer_cnn_' + model_num, [
                (None, 0.0),
                ('l2', hp.uniform('l2_strength_cnn_' + model_num, *space['l2_'])),
                ('l2_clip', hp.uniform('l2_clip_norm_' + model_num, *space['l2_clip_']))
            ])

    else:
        hparams['regularizer_cnn_' + model_num] = hp.choice('regularizer_cnn_' + model_num, [
                ('l2', hp.uniform('l2_strength_cnn_' + model_num, *space['l2_'])),
                ('l2_clip', hp.uniform('l2_clip_norm_' + model_num, *space['l2_clip_']))
            ])

    if space['search_lr']:
        hparams['learning_rate_' + model_num] = hp.lognormal('learning_rate_' + model_num, 0, 1) / 3000
    else:
        hparams['learning_rate_' + model_num] = .0003
def get_xgboost_params(name="xgboost_common"):
    return scope.get_xgb_model(
        n_estimators=scope.int(
            hp.quniform(
                get_full_name(name, "n_estimators"),
                1, 200, 1,
            ),
        ),
        max_depth=scope.int(
            hp.quniform(
                get_full_name(name, 'max_depth'),
                1, 13, 1,
            ),
        ),
        min_child_weight=scope.int(
            hp.quniform(
                get_full_name(name, 'min_child_weight'),
                1, 6, 1,
            ),
        ),
        subsample=scope.int(
            hp.uniform(
                get_full_name(name, 'subsample'),
                0.5, 1,
            ),
        ),
        gamma=hp.uniform(
            get_full_name(name, 'gamma'),
            0.5, 1,
        ),
        nthread=1,
        seed=RANDOM_STATE,
    )
def xgb_model_stacking(exp_l2, out_fname_prefix, use_lower=0):
    from xgboost.sklearn import XGBClassifier
    param_keys = ['model_type', 'max_depth', 'min_child_weight', 'subsample', 'colsample_bytree',
                  'learning_rate', 'silent', 'objective', 'nthread', 'n_estimators', 'seed']
    param_space = {'model_type': XGBClassifier, 'max_depth': hp.quniform('max_depth', 2, 9, 1),
                   'min_child_weight': hp.quniform('min_child_weight', 1, 7, 1),
                   'subsample': hp.uniform('subsample', 0.1, 1.0),
                   'colsample_bytree': hp.uniform('colsample', 0.3, 1.0),
                   'learning_rate': hp.uniform('eta', 0.01, 0.02),
                   'silent': 1, 'objective': 'binary:logistic',
                   'nthread': 3, 'n_estimators': hp.quniform('n', 100, 1000, 50),
                   'seed': hp.choice('seed', [1234,53454,6676,12893])}
    # param_space = {'model_type': XGBClassifier, 'max_depth': hp.quniform('max_depth', 3, 9, 1),
    #                'min_child_weight': hp.quniform('min_child_weight', 3, 7, 1),
    #                'subsample': hp.uniform('subsample', 0.1, 1.0),
    #                'colsample_bytree': hp.uniform('colsample', 0.1, 0.6),
    #                'learning_rate': hp.uniform('eta', 0.01, 0.02),
    #                'silent': 1, 'objective': 'binary:logistic',
    #                'nthread': 4, 'n_estimators': 600, 'seed': hp.choice('seed', [1234,53454,6676,12893])}
    # l2 model output
    bs = param_search.BayesSearch(SklearnModel, exp_l2, param_keys, param_space,
                                  cv_out=out_fname_prefix+'-scores.pkl',
                                  cv_pred_out=out_fname_prefix+'-preds.pkl',
                                  refit_pred_out=out_fname_prefix+'-refit-preds.pkl',
                                  dump_round=10, use_lower=use_lower)
    best = bs.search_by_cv()
    param_search.write_cv_res_csv(bs.cv_out, bs.cv_out.replace('.pkl', '.csv'))
    return best
Example #11
0
def branin():
    """
    The Branin, or Branin-Hoo, function has three global minima,
    and is roughly an angular trough across a 2D input space.

        f(x, y) = a (y - b x ** 2 + c x - r ) ** 2 + s (1 - t) cos(x) + s

    The recommended values of a, b, c, r, s and t are:
        a = 1
        b = 5.1 / (4 pi ** 2)
        c = 5 / pi
        r = 6
        s = 10
        t = 1 / (8 * pi)

    Global Minima:
      [(-pi, 12.275),
       (pi, 2.275),
       (9.42478, 2.475)]

    Source: http://www.sfu.ca/~ssurjano/branin.html
    """
    x = hp.uniform('x', -5., 10.)
    y = hp.uniform('y', 0., 15.)
    pi = float(np.pi)
    loss = ((y - (old_div(5.1, (4 * pi ** 2))) * x ** 2 + 5 * x / pi - 6) ** 2 +
            10 * (1 - old_div(1, (8 * pi))) * scope.cos(x) + 10)
    return {'loss': loss,
            'loss_variance': 0,
            'status': base.STATUS_OK}
Example #12
0
def test_duplicate_label_is_error():
    trials = Trials()

    def fn(xy):
        x, y = xy
        return x ** 2 + y ** 2

    fmin(fn=fn, space=[hp.uniform("x", -5, 5), hp.uniform("x", -5, 5)], algo=rand.suggest, max_evals=500, trials=trials)
Example #13
0
def _svm_hp_space(
        name_func,
        kernel,
        n_features=1,
        C=None,
        gamma=None,
        coef0=None,
        degree=None,
        shrinking=None,
        tol=None,
        max_iter=None,
        verbose=False,
        cache_size=_svm_default_cache_size):
    '''Generate SVM hyperparamters search space
    '''
    if kernel in ['linear', 'rbf', 'sigmoid']:
        degree_ = 1
    else:
        degree_ = (_svm_degree(name_func('degree')) 
                   if degree is None else degree)
    if kernel in ['linear']:
        gamma_ = 'auto'
    else:
        gamma_ = (_svm_gamma(name_func('gamma'), n_features=1) 
                  if gamma is None else gamma)
        gamma_ /= n_features  # make gamma independent of n_features.
    if kernel in ['linear', 'rbf']:
        coef0_ = 0.0
    elif coef0 is None:
        if kernel == 'poly':
            coef0_ = hp.pchoice(name_func('coef0'), [
                (0.3, 0),
                (0.7, gamma_ * hp.uniform(name_func('coef0val'), 0., 10.))
            ])
        elif kernel == 'sigmoid':
            coef0_ = hp.pchoice(name_func('coef0'), [
                (0.3, 0),
                (0.7, gamma_ * hp.uniform(name_func('coef0val'), -10., 10.))
            ])
        else:
            pass
    else:
        coef0_ = coef0

    hp_space = dict(
        kernel=kernel,
        C=_svm_C(name_func('C')) if C is None else C,
        gamma=gamma_,
        coef0=coef0_,
        degree=degree_,
        shrinking=(hp_bool(name_func('shrinking')) 
                   if shrinking is None else shrinking),
        tol=_svm_tol(name_func('tol')) if tol is None else tol,
        max_iter=(_svm_max_iter(name_func('maxiter'))
                  if max_iter is None else max_iter),
        verbose=verbose,
        cache_size=cache_size)
    return hp_space
Example #14
0
def sgd_regression(name,
    loss=None,            #default - 'hinge'
    penalty=None,         #default - 'l2'
    alpha=None,           #default - 0.0001
    l1_ratio=None,        #default - 0.15, must be within [0, 1]
    fit_intercept=None,   #default - True
    n_iter=None,          #default - 5
    shuffle=None,         #default - False
    random_state=None,    #default - None
    epsilon=None,         #default - 0.1
    learning_rate=None,   #default - 'invscaling'
    eta0=None,            #default - 0.01
    power_t=None,         #default - 0.5
    warm_start=False,
    verbose=0,
    ):

    def _name(msg):
      return '%s.%s_%s' % (name, 'sgd', msg)
    
    rval = scope.sklearn_SGDRegressor(
        loss=hp.pchoice(
            _name('loss'),
            [ (0.25, 'squared_loss'), 
              (0.25, 'huber'), 
              (0.25, 'epsilon_insensitive'), 
              (0.25, 'squared_epsilon_insensitive') ] ) if loss is None else loss,
        penalty=hp.pchoice(
            _name('penalty'),
            [ (0.40, 'l2'), 
              (0.35, 'l1'),
              (0.25, 'elasticnet') ] ) if penalty is None else penalty,
        alpha=hp.loguniform(
            _name('alpha'),
            np.log(1e-7),
            np.log(1)) if alpha is None else alpha,
        l1_ratio=hp.uniform(
            _name('l1_ratio'),
            0, 1 ) if l1_ratio is None else l1_ratio,
        fit_intercept=hp.pchoice(
            _name('fit_intercept'),
            [ (0.8, True), (0.2, False) ]) if fit_intercept is None else fit_intercept,
        epsilon=hp.loguniform(
            _name('epsilon'),
            np.log(1e-7),
            np.log(1)) if epsilon is None else epsilon,
        learning_rate='invscaling' if learning_rate is None else learning_rate,
        eta0=hp.loguniform(
            _name('eta0'),
            np.log(1e-5),
            np.log(1e-1)) if eta0 is None else eta0,
        power_t=hp.uniform(
            _name('power_t'),
            0, 1) if power_t is None else power_t,
        verbose=verbose,
        random_state=random_state,
        )
    return rval
Example #15
0
 def __init__(self):
     min_nb_layers = 3
     max_nb_layers = 4
     corruption = lambda name : hp.uniform(name, 0, 1)
     learning_rate = lambda name: hp.uniform(name, 0.5, 1)
     nb_neurons = lambda name: hp.quniform(name, 100, 800, 2)
     nb_epochs = lambda name: hp.quniform(name, 20, 50, 2)
     templates = {
             "corruption": corruption,
             "learning_rate": learning_rate,
             "nb_neurons": nb_neurons,
             "nb_epochs" : nb_epochs
     }
     ILC_HP_Params.__init__(self, templates, min_nb_layers=min_nb_layers, max_nb_layers=max_nb_layers)
    def set_lambda(self, max_evals=100, max_iters=100, n_folds=3, max_lambda=10):
        self.cv_indices = KFold(self.X.shape[0], n_folds=n_folds, shuffle=True)
        self.cross_max_iters = max_iters
        space = hp.choice('model', [{
                                        'lambda_1': hp.uniform('lambda_1', 0, max_lambda),
                                        'lambda_2': hp.uniform('lambda_2', 0, max_lambda)
                                    }])

        best = fmin(self.__cross_validation,
                    space=space, algo=tpe.suggest,
                    max_evals=max_evals
                    )
        self.lambda_1 = best['lambda_1']
        self.lambda_2 = best['lambda_2']
Example #17
0
    def set_multilayer_dropout_space(self):
        ''' defines a hyperspace for a "modern" neural networks: at least two layers with dropout + reLU '''

        # Force at least 2 layers, cuz we're modern
        min_layers = 2
        max_layers = 3

        # sets up the neural network
        nnets = [None] * (max_layers - min_layers + 1)

        for i, num_layers in enumerate(range(min_layers, max_layers + 1)):
            num_hids = [None] * num_layers
            for j in range(num_layers):
                num_hids[j] = hp.qloguniform(
                    'num_hid_%i%i' % (i, j), log(100), log(1000), 1)

            nnets[i] = num_hids

        default_mln_model_params = {
            'd': self.d, 'k': self.k, 'loss_terms': ['cross_entropy', 'dropout']}

        search_mln_model_params = {
            'arch': hp.choice('arch', nnets),
            'input_p': hp.uniform('ip', 0, 1),
            'hidden_p': hp.uniform('hp', 0, 1),
            'l1_reg': hp.choice('l1_reg', [None, hp.loguniform('l1_decay', log(1e-5), log(10))]),
            'l2_reg': hp.choice('l2_reg', [None, hp.loguniform('l2_decay', log(1e-5), log(10))])}

        default_mln_optim_params = {
            'optim_type': 'minibatch', 'optim_method': 'RMSPROP'}

        search_mln_optim_params = {
            'learn_rate': hp.uniform('learn_rate', 0, 1),
            'rho': hp.uniform('rho', 0, 1),
            'num_epochs': hp.qloguniform('num_epochs', log(1e2), log(2000), 1),
            'batch_size': hp.quniform('batch_size', 128, 1024, 1),
            'init_method': hp.choice('init_method', ['gauss', 'fan-io']),
            'scale_factor': hp.uniform('scale_factor', 0, 1)}

        # merge the default and search spaces
        mln_model_params = self.merge_default_search(
            default_mln_model_params, search_mln_model_params)
        mln_optim_params = self.merge_default_search(
            default_mln_optim_params, search_mln_optim_params)

        # define the hyperparamater space to search
        hyperspace = {'mln_model_params': mln_model_params,
                      'mln_optim_params': mln_optim_params}

        return hyperspace
Example #18
0
def optimize(trials):
    space = {
             'eta' : hp.uniform('eta', 0.05, 0.3),
             'max_depth' : hp.quniform('max_depth', 1, 8, int(1)),
             'min_child_weight' : hp.quniform('min_child_weight', 1, 6, 1),
             'subsample' : hp.uniform('subsample', 0.5, 1), 
             'gamma' : hp.uniform('gamma', 0.5, 1), 
             'colsample_bytree' : hp.uniform('colsample_bytree', 0.5, 1), 
             }

    best = fmin(score, space, algo=tpe.suggest, trials=trials, max_evals=500)
    print '-------------------------------'
    print 'best parameters are: '
    print best
    return best
Example #19
0
def get_xgboost_model(model_num):
    return {
            'model_' + model_num: 'XGBoost',
            'eta_' + model_num: hp.loguniform('eta_' + model_num,-5,0),
            'gamma_' + model_num: hp.uniform('gamma_' + model_num,0,10),
            'max_depth_' + model_num: hp.quniform('max_depth_' + model_num, 1,30,1),
            'min_child_weight_' + model_num: hp.uniform('min_child_weight_' + model_num, 0, 10),
            'max_delta_step_' + model_num: hp.uniform('max_delta_step_' + model_num, 0, 10),
            'num_round_' + model_num: hp.quniform('num_round_' + model_num, 1, 10, 1),
            'subsample_' + model_num: 1,
            'regularizer_xgb_' + model_num: hp.choice('regularizer_xgb_' + model_num,[
                ('l1', hp.loguniform('l1_strength_xgb_' + model_num, -5,5)),
                ('l2', hp.loguniform('l2_strength_xgb_' + model_num, -5,5))
            ])
        }
Example #20
0
def _grad_boosting_reg_loss_alpha(name):
    return hp.choice(name, [
        ('ls', 0.9), 
        ('lad', 0.9), 
        ('huber', hp.uniform(name + '.alpha', 0.85, 0.95)), 
        ('quantile', 0.5)
    ])
Example #21
0
def test_landing_screen():

    # define an objective function
    def objective(args):
        case, val = args
        if case == 'case 1':
            return val
        else:
            return val ** 2

    # define a search space
    from hyperopt import hp
    space = hp.choice('a',
        [
            ('case 1', 1 + hp.lognormal('c1', 0, 1)),
            ('case 2', hp.uniform('c2', -10, 10))
        ])

    # minimize the objective over the space
    import hyperopt
    best = hyperopt.fmin(objective, space,
        algo=hyperopt.tpe.suggest,
        max_evals=100)

    print best
    # -> {'a': 1, 'c2': 0.01420615366247227}

    print hyperopt.space_eval(space, best)
Example #22
0
def helper_neighbors():
    return hp.choice('neighbor_type', [
        {'ktype': 'kneighbors', 'n_neighbors': hp.quniform('num', 3,
                                                           19, 1)},
        {'ktype': 'radiusneighbors', 'radius': hp.uniform('rad', 0, 2),
         'out_label': 1}
    ])
Example #23
0
def main():

    #Setup log
    dir_path = os.path.dirname(os.path.realpath(__file__))
    fmt = "%(levelname) -10s %(asctime)s %(module)s:%(lineno)s %(funcName)s %(message)s"
    handler = logging.FileHandler(os.path.join(dir_path, 'optimizer.log'), mode='w')
    handler.setFormatter(logging.Formatter(fmt))
    log.addHandler(handler)
    log.setLevel(logging.DEBUG)

    try:
        optimizer = Optimizer(GhoshModel, sys.argv[3], sys.argv[1], sys.argv[2])
    except Exception as e:
        log.error(e)

    space = {
        LEARN: hp.uniform(LEARN, 0.0000001, 0.0001),
        KERNEL: hp.quniform(KERNEL, 8, 3, 1),
        BATCH: hp.quniform(BATCH, 128, 4, 1)
    }
    log.info("Space:")
    log.info(space)

    best = fmin(optimizer.objective,
         space=space,
         algo=tpe.suggest,
         max_evals=100)

    print(best)
    log.info(str(best))
Example #24
0
def _trees_max_features(name):
    return hp.pchoice(name, [
        (0.2, 'sqrt'),  # most common choice.
        (0.1, 'log2'),  # less common choice.
        (0.1, None),  # all features, less common choice.
        (0.6, hp.uniform(name + '.frac', 0., 1.))
    ])
Example #25
0
    def set_lambda(self, max_evals=10, max_iters=100, n_folds=3, max_lambda=10):
        self.cv_indices = KFold(self.X.shape[0], n_folds=n_folds, shuffle=True)
        self.cross_max_iters = max_iters
        space = hp.choice("model", [{"lambda_1": hp.uniform("lambda_1", 0, max_lambda)}])

        best = fmin(self.__cross_validation, space=space, algo=tpe.suggest, max_evals=max_evals)
        self.lambda_1 = best["lambda_1"]
Example #26
0
    def set_old_space(self):
        ''' defines an old net from the 80s - simple sigmoid layers, nothing fancy'''

        min_layers = 1
        max_layers = 3

        # sets up the neural network
        nnets = [None] * (max_layers - min_layers + 1)

        for i, num_layers in enumerate(range(min_layers, max_layers + 1)):
            num_hids = [None] * num_layers
            for j in range(num_layers):
                num_hids[j] = hp.qloguniform(
                    'num_hid_%i%i' % (i, j), log(10), log(100), 1)

            nnets[i] = num_hids

        default_mln_model_params = {
            'd': self.d, 'k': self.k, 'loss_terms': ['cross_entropy']}

        search_mln_model_params = {
            'arch': hp.choice('arch', nnets),
            'l1_reg': hp.choice('l1_reg', [None, hp.loguniform('l1_decay', log(1e-5), log(10))]),
            'l2_reg': hp.choice('l2_reg', [None, hp.loguniform('l2_decay', log(1e-5), log(10))])}

        default_mln_optim_params = {
            'optim_type': 'minibatch', 'optim_method': 'RMSPROP'}

        search_mln_optim_params = {
            'learn_rate': hp.uniform('learn_rate', 0, 1),
            'rho': hp.uniform('rho', 0, 1),
            'num_epochs': hp.qloguniform('num_epochs', log(1e2), log(2000), 1),
            'batch_size': hp.quniform('batch_size', 128, 1024, 1),
            'init_method': hp.choice('init_method', ['gauss', 'fan-io']),
            'scale_factor': hp.uniform('scale_factor', 0, 1)}

        # merge the default and search spaces
        mln_model_params = self.merge_default_search(
            default_mln_model_params, search_mln_model_params)
        mln_optim_params = self.merge_default_search(
            default_mln_optim_params, search_mln_optim_params)

        # define the hyperparamater space to search
        hyperspace = {'mln_model_params': mln_model_params,
                      'mln_optim_params': mln_optim_params}

        return hyperspace
Example #27
0
    def __init__(self, max_layerc, opts):
        self.opts = opts
        self.max_layerc = max_layerc
        dpart = [
            dict(
                [('h%dm%d'%(l,maxl), hp.choice('h%dm%d'%(l,maxl), opts['hidden'])) for l in range(1,maxl+1)] +
                [('dr%dm%d'%(l,maxl), hp.choice('dr%dm%d'%(l,maxl), opts['drate'])) for l in range(0,maxl+1)]
            ) for maxl in range(1,self.max_layerc+1)]

        self.space = {
                'activation' : hp.choice('activation', opts['activation']),
                'n_batch':hp.choice('n_batch', opts['n_batch'] ),
                'opt':hp.choice('opt', opts['opt']),
                'lr':hp.uniform('lr', *opts['lr']),
                'norm':hp.uniform('norm', *opts['norm']),
                'dpart' : hp.choice('dpart', dpart),
        }
Example #28
0
def test_space_eval():
    space = hp.choice('a',
                      [
                          ('case 1', 1 + hp.lognormal('c1', 0, 1)),
                          ('case 2', hp.uniform('c2', -10, 10))
                      ])

    assert space_eval(space, {'a': 0, 'c1': 1.0}) == ('case 1', 2.0)
    assert space_eval(space, {'a': 1, 'c2': 3.5}) == ('case 2', 3.5)
Example #29
0
def test_remove_allpaths():
    z = hp.uniform('z', 0, 10)
    a = hp.choice('a', [ z + 1, z - 1])
    hps = {}
    expr_to_config(a, (True,), hps)
    aconds = hps['a']['conditions']
    zconds = hps['z']['conditions']
    assert aconds == set([(True,)]), aconds
    assert zconds == set([(True,)]), zconds
Example #30
0
def gauss_wave2():
    """
    Variant of the GaussWave problem in which noise is added to the score
    function, and there is an option to either have no sinusoidal variation, or
    a negative cosine with variable amplitude.

    Immediate local max is to sample x from spec and turn off the neg cos.
    Better solution is to move x a bit to the side, turn on the neg cos and turn
    up the amp to 1.
    """

    rng = np.random.RandomState(123)
    var = .1
    x = hp.uniform('x', -20, 20)
    amp = hp.uniform('amp', 0, 1)
    t = (scope.normal(0, var, rng=rng) + 2 * scope.exp(-(old_div(x, 5.0)) ** 2))
    return {'loss': - hp.choice('hf', [t, t + scope.sin(x) * amp]),
            'loss_variance': var, 'status': base.STATUS_OK}
class AdvancedNeuralNetworkModel(NonTreeBasedModel):
    @classmethod
    def prepare_dataset(cls, train_data, test_data, categorical_features):
         (X_train, y_train, *other), (X_test, y_test) = \
             super(AdvancedNeuralNetworkModel, cls).prepare_dataset(train_data, test_data,
                                                                    categorical_features)
         return ((X_train.astype(np.float32), y_train.astype(np.float32).reshape((-1, 1)), *other),
                 (X_test.astype(np.float32), y_test.astype(np.float32).reshape((-1, 1))))

    @staticmethod
    def build_estimator(hyperparams, train_data, test=False):
        device = 'cuda' if torch.cuda.is_available() else 'cpu'

        # Extract info from training data
        X, y, *_ = train_data
        in_features = X.shape[1]

        callbacks = [
            ('r2_score_valid', EpochScoring('r2',
                                            lower_is_better=False)),
            ('early_stopping', EarlyStopping(monitor='valid_loss',
                                             patience=5,
                                             lower_is_better=True)),
            ('learning_rate_scheduler', LRScheduler(policy=lr_scheduler.ReduceLROnPlateau,
                                                    monitor='valid_loss',
                                                    # Following kargs are passed to the
                                                    # lr scheduler constructor
                                                    mode='min',
                                                    min_lr=1e-5
                                                    )),
        ]

        return NeuralNetRegressor(
            NNModule,
            criterion=nn.MSELoss,
            optimizer=torch.optim.SGD,
            max_epochs=300,
            iterator_train__shuffle=True, # Shuffle training data on each epoch
            callbacks=callbacks,
            device=device,
            train_split=CVSplit(cv=5, random_state=RANDOM_STATE),
            lr=hyperparams['lr'],
            batch_size=hyperparams['batch_size'],
            module__in_features=in_features,
            module__n_layers=hyperparams['n_layers'],
            module__n_neuron_per_layer=hyperparams['n_neuron_per_layer'],
            module__activation=getattr(F, hyperparams['activation']),
            module__p_dropout=hyperparams['p_dropout'],
            optimizer__momentum=hyperparams['momentum'],
            optimizer__weight_decay=hyperparams['weight_decay'],
            optimizer__nesterov=True,
            verbose=3,
            iterator_train__num_workers=4,
            iterator_valid__num_workers=4
        )

    hp_space = {
        'lr': hp.loguniform('learning_rate', np.log(1e-4), np.log(1e-1)),
        'batch_size': 128,
        'n_neuron_per_layer': scope.int(hp.quniform('layer_size', 10, 100, 3)),
        'activation': hp.choice('activation', ['relu', 'leaky_relu', 'selu']),
        'p_dropout': hp.uniform('p_dropout', 0.0, 0.5),
        'momentum': hp.uniform('momentum', 0.87, 0.99),
        'weight_decay': hp.loguniform('alpha', np.log(1e-7), np.log(1e-2)),
        'n_layers': hp.choice('n_layers', [2, 3, 4, 5])
    }
Example #32
0
def main(args):
    random.seed(args.seed)
    np.random.seed(args.seed)
    rng_state = np.random.RandomState(seed=args.seed)

    fusibles = {
        'lr': hp.uniform('lr', 0.0001, 0.01),
        'beta1': hp.uniform('beta1', 0.001, 0.999),
        'beta2': hp.uniform('beta2', 0.001, 0.999),
        'weight_decay': hp.uniform('weight_decay', 0.0, 0.5),
        'gamma': hp.uniform('gamma', 0.1, 0.9),
        'step_size': hp.choice('step_size', (5, 10, 20, 40)),
    }
    nonfusibles = {
        'batch_size': hp.choice('batch_size', (1024, 2048)),
        'version': hp.choice('version', ('v2', 'v3l')),
    }

    def _run(results_dir, epochs, iters_per_epoch, params, env_vars=None):
        # Build the cmd.
        cmd = [
            'python',
            'main.py',
            '--epochs',
            str(epochs),
            '--iters-per-epoch',
            str(iters_per_epoch),
            '--dataroot',
            args.dataroot,
            '--dataset',
            args.dataset,
            '--device',
            args.device,
            '--eval',
            '--seed',
            str(args.seed),
            '--batch_size',
            str(generate_nonfusible_param(params, 'batch_size')),
            '--version',
            str(generate_nonfusible_param(params, 'version')),
        ]
        if results_dir is not None:
            cmd.extend(['--outf', results_dir])

        cmd.extend(
            generate_fusible_param_flags(
                params,
                list(fusibles.keys()),
            ))

        if args.mode == 'hfta':
            cmd.append('--hfta')
        if args.amp:
            cmd.append('--amp')

        # Launch the training process.
        succeeded = True
        try:
            logging.info('--> Running cmd = {}'.format(cmd))
            subprocess.run(
                cmd,
                stdout=subprocess.DEVNULL if results_dir is None else open(
                    os.path.join(results_dir, 'stdout.txt'),
                    'w',
                ),
                stderr=subprocess.DEVNULL if results_dir is None else open(
                    os.path.join(results_dir, 'stderr.txt'),
                    'w',
                ),
                check=True,
                cwd=os.path.join(
                    os.path.abspath(
                        os.path.expanduser(os.path.dirname(__file__))),
                    '../mobilenet/'),
                env=env_vars,
            )
        except subprocess.CalledProcessError as e:
            logging.error(e)
            succeeded = False
        return succeeded

    def try_params(ids, epochs, params, env_vars=None):
        """ Running the training process for mobiletnet classification task.

    Args:
      ids: Either a single int ID (for serial), or a list of IDs (for HFTA).
      epochs: number of epochs to run.
      params: maps hyperparameter name to its value(s). For HFTA, the values are
        provided as a list.
      env_vars: optional, dict(str, str) that includes extra environment that
        needs to be forwarded to the subprocess call

    Returns:
      result(s): A single result dict for serial or a list of result dicts for
        HFTA in the same order as ids.
      early_stop(s): Whether the training process early stopped. A single bool
        for serial or a list of bools for HFTA in the same order as ids.
    """
        epochs = int(round(epochs))
        ids_str = (','.join([str(i) for i in ids]) if isinstance(
            ids,
            (list, tuple),
        ) else str(ids))
        # Allocate result dir.
        results_dir = os.path.join(args.outdir, ids_str)
        Path(results_dir).mkdir(parents=True, exist_ok=True)
        # Run training.
        succeeded = _run(
            results_dir,
            epochs,
            args.iters_per_epoch,
            params,
            env_vars=env_vars,
        )
        if not succeeded:
            raise RuntimeError('_run failed!')
        # Gather the results.
        results_frame = pd.read_csv(os.path.join(results_dir, 'eval.csv'))
        if isinstance(ids, (list, tuple)):
            results = [{
                'acc': acc
            } for acc in results_frame['acc:top1'].tolist()]
            assert len(results) == len(ids)
            return results, [False] * len(ids)
        else:
            return {'acc': results_frame['acc:top1'][0]}, False

    def dry_run(
        B=None,
        nonfusibles_kvs=None,
        epochs=None,
        iters_per_epoch=None,
        env_vars=None,
    ):
        params = [{
            **handle_integers(sample(fusibles, rng=rng_state)),
            **nonfusibles_kvs
        } for _ in range(max(B, 1))]
        if B > 0:
            params = fuse_dicts(params)
        else:
            params = params[0]
        return _run(None, epochs, iters_per_epoch, params, env_vars=env_vars)

    tune_hyperparameters(
        space={
            **fusibles,
            **nonfusibles
        },
        try_params_callback=try_params,
        dry_run_callback=dry_run,
        mode=args.mode,
        algorithm=args.algorithm,
        nonfusibles=nonfusibles.keys(),
        dry_run_repeats=args.dry_run_repeats,
        dry_run_epochs=args.dry_run_epochs,
        dry_run_iters_per_epoch=args.dry_run_iters_per_epoch,
        metric='acc',
        goal='max',
        algorithm_configs={
            'hyperband': args.hyperband_kwargs,
            'random': args.random_kwargs,
        },
        seed=args.seed,
        outdir=args.outdir,
    )
Example #33
0
            y_train,
            eval_set=eval_set,
            eval_metric="auc",
            early_stopping_rounds=30)

    pred = clf.predict_proba(d_val)[:, 1]
    auc = roc_auc_score(y_val, pred)
    print("SCORE:", auc)

    return {'loss': 1 - auc, 'status': STATUS_OK}


space = {
    'max_depth': hp.quniform("max_depth", 5, 20, 1),
    'min_child_weight': hp.quniform('min_child_weight', 1, 10, 1),
    'subsample': hp.uniform('subsample', 0.7, 1),
    'colsample_bytree': hp.uniform('colsample_bytree', 0.1, 0.8),
    'colsample_bylevel': hp.uniform('colsample_bylevel', 0.1, 1.0),
    'learning_rate': hp.uniform('learning_rate', 0.01, 0.2)
}

trials = Trials()
best = fmin(fn=objective,
            space=space,
            algo=tpe.suggest,
            max_evals=30,
            trials=trials)

print(best)
for key in best:
    print(key, best[key])
Example #34
0
TRIAL = 200


def hyp_obj(arg):
    return (1 - arg['x'])**2 + 100 * (arg['y'] - arg['x']**2)**2


def opt_obj(trial):
    x = trial.suggest_uniform('x', -10, 10)
    y = trial.suggest_uniform('y', -10, 10)
    return (1 - x)**2 + 100 * (y - x**2)**2


# hyperopt
trials = Trials()
space = {'x': hp.uniform('x', -10, 10), 'y': hp.uniform('y', -10, 10)}
st = time.time()
best = fmin(fn=hyp_obj,
            space=space,
            algo=tpe.suggest,
            max_evals=TRIAL,
            trials=trials)
print('hyperopt elapssed time :{:.3f}(s)'.format(time.time() - st), end='\n')
hyp_res = [t['result']['loss'] for t in trials.trials]

# optuna
study = optuna.create_study()

st = time.time()
study.optimize(opt_obj, n_trials=TRIAL, n_jobs=JOB)
print('optuna elapssed time :{:.3f}(s)'.format(time.time() - st), end='\n')
Example #35
0
        for tree in range(len(meta_classifier.estimators_)):
            predictions.append(meta_classifier.estimators_[tree].predict(
                [features])[0])

        stddev = np.std(np.array(predictions), axis=0)
        print("hello2")
        print(stddev.shape)

        loss = np.sum(stddev**2) * -1

    return {'loss': loss, 'status': STATUS_OK, 'features': features}


space = {
    'k':
    hp.choice('k_choice', [(1.0), (hp.uniform('k_specified', 0, 1))]),
    'accuracy':
    hp.choice('accuracy_choice', [(0.0),
                                  (hp.uniform('accuracy_specified', 0, 1))]),
    'fairness':
    hp.choice('fairness_choice', [(0.0),
                                  (hp.uniform('fairness_specified', 0, 1))]),
    'privacy':
    hp.choice('privacy_choice', [(None),
                                 (hp.lognormal('privacy_specified', 0, 1))]),
    'robustness':
    hp.choice('robustness_choice',
              [(0.0), (hp.uniform('robustness_specified', 0, 1))]),
}

trials = Trials()
Example #36
0
#     "num_filter_1": 100,
#     "num_filter_2": 100,
#     "num_filter_3": 100
# }
# train_once(config)
# exit()

#
from hyperopt import hp
from ray.tune.suggest.hyperopt import HyperOptSearch
from ray import tune  # not supported for windows

space = {
    "lr": hp.loguniform("lr", 1e-8, 1e-2),
    "bs": hp.randint("bs", 10),
    "decay_rate": hp.uniform("decay_rate", 0.7, 0.95),
    "num_filter_1": hp.randint("num_filter_1", 100),
    "num_filter_2": hp.randint("num_filter_2", 100),
    "num_filter_3": hp.randint("num_filter_3", 100)
}

hyperopt_search = HyperOptSearch(space,
                                 max_concurrent=2,
                                 reward_attr="mean_accuracy")

analysis = tune.run(train_once, num_samples=30, search_alg=hyperopt_search)

dfs = analysis.trial_dataframes
print("dfs", dfs, "\n")

# Plot by epoch
    # This is what it keeps track of for hyperopting.
    tune.track.log(top_1_valid=metrics['correct'],
                   minus_loss=metrics['minus_loss'],
                   plus_loss=metrics['plus_loss'])
    return metrics['minus_loss']


ops = augment_list(False)  # Get the default augmentation set.
# Define the space of our augmentations.
space = {}
for i in range(args.num_policy):
    for j in range(args.num_op):
        space['policy_%d_%d' % (i, j)] = hp.choice('policy_%d_%d' % (i, j),
                                                   list(range(0, len(ops))))
        space['prob_%d_%d' % (i, j)] = hp.uniform('prob_%d_%d' % (i, j), 0.0,
                                                  1.0)
        space['level_%d_%d' % (i, j)] = hp.uniform('level_%d_%d' % (i, j), 0.0,
                                                   1.0)

final_policy_set = []

reward_attr = 'minus_loss'

# TODO boost this.
ray.init(num_gpus=4, ignore_reinit_error=True, num_cpus=28)

import ray
from ray import tune

cv_num = 5
num_result_per_cv = 10
Example #38
0
from hyperopt import fmin, tpe, hp, STATUS_OK


def objective(x):
    return {'loss': x**2, 'status': STATUS_OK}


best = fmin(objective,
            space=hp.uniform('x', -10, 10),
            algo=tpe.suggest,
            max_evals=100)

print(best)
Example #39
0
# -*- coding: utf-8 -*-
#
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0

from hyperopt import hp

common_hyperparameters = {
    'lr': hp.uniform('lr', low=1e-4, high=3e-1),
    'weight_decay': hp.uniform('weight_decay', low=0, high=3e-3),
    'patience': hp.choice('patience', [30]),
    'batch_size': hp.choice('batch_size', [32, 64, 128, 256, 512]),
}

gcn_hyperparameters = {
    'gnn_hidden_feats':
    hp.choice('gnn_hidden_feats', [32, 64, 128, 256]),
    'predictor_hidden_feats':
    hp.choice('predictor_hidden_feats', [16, 32, 64, 128, 256, 512, 1024]),
    'num_gnn_layers':
    hp.choice('num_gnn_layers', [1, 2, 3, 4, 5]),
    'residual':
    hp.choice('residual', [True, False]),
    'batchnorm':
    hp.choice('batchnorm', [True, False]),
    'dropout':
    hp.uniform('dropout', low=0., high=0.6)
}

gat_hyperparameters = {
    'gnn_hidden_feats':
# prepare the dataset
dt_preds = pd.read_csv(os.path.join(PREDS_DIR, "preds_val_%s_%s_%s.csv" % (dataset_name, method_name, cls_method)), sep=";")

print('Optimizing parameters...')
cost_weights = [(1,1), (2,1), (3,1), (5,1), (10,1), (20,1)]
c_postpone_weight = 0
for c_miss_weight, c_action_weight in cost_weights:
    
    c_miss = c_miss_weight / (c_miss_weight + c_action_weight)
    c_action = c_action_weight / (c_miss_weight + c_action_weight)
    
    costs = np.matrix([[lambda x: 0,
                          lambda x: c_miss],
                         [lambda x: c_action + c_postpone_weight * (x['prefix_nr']-1) / x['case_length'],
                          lambda x: c_action + c_postpone_weight * (x['prefix_nr']-1) / x['case_length'] +
                          (x['prefix_nr']-1) / x['case_length'] * c_miss
                         ]])
    
    space = {'conf_threshold': hp.uniform("conf_threshold", 0, 1)}
    trials = Trials()
    best = fmin(evaluate_model_cost, space, algo=tpe.suggest, max_evals=50, trials=trials)

    best_params = hyperopt.space_eval(space, best)

    outfile = os.path.join(PARAMS_DIR, "optimal_confs_%s_%s_%s_%s_%s_%s.pickle" % (dataset_name, method_name, cls_method, 
                                                                                   c_miss_weight, c_action_weight, 
                                                                                   c_postpone_weight))
    # write to file
    with open(outfile, "wb") as fout:
        pickle.dump(best_params, fout)
Example #41
0
import os
from rlpy.Domains import InfCartPoleBalance
from rlpy.Agents import SARSA, Q_LEARNING
from rlpy.Representations import *
from rlpy.Policies import eGreedy
from rlpy.Experiments import Experiment
import numpy as np
from hyperopt import hp

param_space = {
    'discretization':
    hp.quniform("discretization", 5, 40, 1),
    'discover_threshold':
    hp.loguniform("discover_threshold", np.log(1e-2), np.log(1e1)),
    'lambda_':
    hp.uniform("lambda_", 0., 1.),
    'boyan_N0':
    hp.loguniform("boyan_N0", np.log(1e1), np.log(1e5)),
    'initial_learn_rate':
    hp.loguniform("initial_learn_rate", np.log(1e-3), np.log(1))
}


def make_experiment(exp_id=1,
                    path="./Results/Temp/{domain}/{agent}/{representation}/",
                    discover_threshold=.01256,
                    lambda_=0.81,
                    boyan_N0=9811.2337,
                    initial_learn_rate=.15,
                    discretization=22.):
    opt = {}
Example #42
0
def main(args):

    ray.init(num_cpus=args.rayNumCpu, num_gpus=args.rayNumGpu)

    t_loader, v_loader = get_loaders(train_batch_size=16,
                                     num_workers=1,
                                     data_folder=args.dataFolder,
                                     cuda_available=torch.cuda.is_available())
    pinned_obj_dict['data_loader_train'] = pin_in_object_store(t_loader)
    pinned_obj_dict['data_loader_valid'] = pin_in_object_store(v_loader)
    pinned_obj_dict['args'] = pin_in_object_store(args)

    trainable_name = 'hyp_search_train'
    register_trainable(trainable_name, TrainerClass)

    reward_attr = "acc"

    #############################
    # Define hyperband scheduler
    #############################
    hpb = AsyncHyperBandScheduler(time_attr="training_iteration",
                                  reward_attr=reward_attr,
                                  grace_period=40,
                                  max_t=300)

    ##############################
    # Define hyperopt search algo
    ##############################
    space = {
        'lr': hp.uniform('lr', 0.001, 0.1),
        'optimizer':
        hp.choice("optimizer",
                  ['SGD', 'Adam'
                   ]),  #, 'Adadelta']), # Adadelta gets the worst results
        'batch_accumulation': hp.choice("batch_accumulation", [4, 8, 16])
    }
    hos = HyperOptSearch(space, max_concurrent=4, reward_attr=reward_attr)

    #####################
    # Define experiments
    #####################
    exp_name = "resnet152_hyp_search_hyperband_hyperopt_{}".format(
        time.strftime("%Y-%m-%d_%H.%M.%S"))
    exp = Experiment(
        name=exp_name,
        run=trainable_name,
        num_samples=args.numSamples,  # the number of experiments
        resources_per_trial={
            "cpu": args.trialNumCpu,
            "gpu": args.trialNumGpu
        },
        checkpoint_freq=args.checkpointFreq,
        checkpoint_at_end=True,
        stop={
            reward_attr: 0.95,
            "training_iteration": args.
            trainingIteration,  # how many times a specific config will be trained
        })

    ##################
    # Run tensorboard
    ##################
    if args.runTensorBoard:
        thread = threading.Thread(target=launch_tensorboard, args=[exp_name])
        thread.start()
        launch_tensorboard(exp_name)

    ##################
    # Run experiments
    ##################
    run_experiments(exp, search_alg=hos, scheduler=hpb, verbose=False)
Example #43
0
def tune_xgb(train_data, test_data):
    dtrain = lgb.Dataset(train_data[features], labels)
    dtest = lgb.Dataset(test_data[features], labels_val, reference=dtrain)

    tune_reuslt_file = "./hyper_lgb_log/tune_" + model_name + ".csv"
    f_w = open(tune_reuslt_file, 'w')

    def objective(args):
        params = {
            'task': 'train',
            'boosting_type': args['boosting_type'],
            'objective': args['objective'],
            'metric': {"mae"},
            'num_leaves': int(args['num_leaves']),
            'min_sum_hessian_in_leaf': args['min_sum_hessian_in_leaf'],
            'min_data_in_leaf': int(args['min_data_in_leaf']),
            'max_depth': -1,
            'learning_rate': args['learning_rate'],
            'feature_fraction': args['feature_fraction'],
            'verbose': 1,
        }

        print "training..."
        print params
        model = xgb_train(dtrain,
                          dtest,
                          params,
                          offline=True,
                          verbose=False,
                          num_boost_round=int(args['n_estimators']))
        model.save_model('dump_lgb_model_txt')
        print "predicting..."
        pred_y = xgb_predict(model, test_data[features])
        test_y = dtest.get_label()
        mae = mean_absolute_error(pred_y, test_y)

        xgb_log.write(str(args))
        xgb_log.write('\n')
        xgb_log.write(str(mae))
        xgb_log.write('\n')
        return mae

    # Searching space
    space = {
        #'boosting_type': hp.choice("boosting_type", ["gbdt","rf","dart","goss"]),
        #'objective': hp.choice("objective", ['regression_l2','regression_l1','huber','poisson','fair','regression']),
        'boosting_type': hp.choice("boosting_type", ["gbdt"]),
        'objective': hp.choice("objective", ['regression']),
        'num_leaves': hp.quniform("num_leaves", 100, 500, 5),
        'min_sum_hessian_in_leaf': hp.uniform("min_sum_hessian_in_leaf", 0.001,
                                              10),
        'min_data_in_leaf': hp.quniform("min_data_in_leaf", 10, 100, 5),
        'learning_rate': hp.uniform("learning_rate", 0.01, 0.2),
        'feature_fraction': hp.uniform("feature_fraction", 0.5, 0.9),
        'n_estimators': hp.quniform("n_estimators", 50, 200, 5),
    }
    best_sln = fmin(objective, space, algo=tpe.suggest, max_evals=150)
    #best_sln = fmin(objective, space, algo=hyperopt.anneal.suggest, max_evals=300)
    pickle.dump(best_sln, f_w, True)
    mae = objective(best_sln)
    xgb_log.write(str(mae) + '\n')
    f_w.close()
Example #44
0
def process_video(video, params):
    os.makedirs(os.path.join(output_path, 'results', video), exist_ok=True)
    current_path = os.path.join(input_path, video, "saliency")
    pr = Processing(current_path,
                    os.path.join(output_path, 'results', video),
                    os.path.join(output_path, 'supplements', video),
                    parameters=params)
    pr.optimize()


if __name__ == '__main__':
    #optuna pour optimiser
    tpe_trials = Trials()

    space_dnf = (hp.uniform('tau_dt', 0.0001,
                            1), hp.uniform('h', -1,
                                           0), hp.uniform('gi', 0, 10),
                 hp.uniform('excitation_amplitude', 0.0001,
                            5), hp.uniform('excitation_sigma', 0.0001, 1))
    best = fmin(evaluate,
                space_dnf,
                algo=tpe.suggest,
                trials=tpe_trials,
                max_evals=700)

    full_results = pd.DataFrame({
        'loss': [x['loss'] for x in tpe_trials.results],
        'iteration':
        tpe_trials.idxs_vals[0]['tau_dt'],
        'tau_dt':
        tpe_trials.idxs_vals[1]['tau_dt'],
Example #45
0
                                              random_state=0,
                                              test_size=TEST_SIZE)
        x_train, y_train, x_val, y_val, x_test, label_binarizer, \
            clips_per_sample = load_data(train_idx, val_idx)
        '''
        dropout_coeff   = float(params["dropout_coeff"])
        reg_coeff       = float(params["reg_coeff"])
        reg_coeff2      = float(params["reg_coeff2"])
        conv_depth      = int(params["conv_depth"])
        num_hidden      = int(params["num_hidden"])
        conv_width      = int(params["conv_width"])
        conv_height     = int(params["conv_height"])
        conv_count      = int(params["conv_count"])
        '''
        hyperopt_space = {
            "dropout_coeff": hp.uniform("dropout_coeff", 0, 0.6),
            "reg_coeff": hp.uniform("reg_coeff", -10, -3),
            "reg_coeff2": hp.uniform("reg_coeff2", -10, -3),
            "conv_depth": hp.quniform("conv_depth", 16, 32, 1),
            "num_hidden": hp.quniform("num_hidden", 32, 64, 1),
            "conv_width": hp.quniform("conv_width", 2, 5, 1),
            "conv_height": hp.quniform("conv_height", 6, 12, 1),
            "conv_count": hp.choice("conv_count", [3, 4]),
        }

        best = fmin(fn=train_model,
                    space=hyperopt_space,
                    algo=tpe.suggest,
                    max_evals=HYPEROPT_EVALS)
        print("best params:", best)
Example #46
0
seed = 7
test_size = 0.33
X_train, X_test, y_train, y_test = train_test_split(X,
                                                    Y,
                                                    test_size=test_size,
                                                    random_state=seed)

scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train.values)
X_test_scaled = scaler.transform(X_test.values)

#### Hyperopt parameters ####

param = {
    'max_depth': hp.choice('max_depth', np.arange(3, 8 + 1, dtype=int)),
    'learning_rate': hp.uniform('learning_rate', 0.01, 0.2),
    'n_estimators': hp.choice('n_estimators', np.arange(150,
                                                        400 + 1,
                                                        dtype=int)),
    'gamma': hp.choice('gamma', np.arange(0, 5 + 1, dtype=int)),
    'colsample_bytree': hp.uniform('colsample_bytree', 0.3, 1),
    'dropout1': hp.choice('dropout1', [0.2, 0.3, 0.4, 0.5]),
    'dropout2': hp.choice('dropout2', [0.2, 0.3, 0.4, 0.5]),
    'dropout3': hp.choice('dropout3', [0.2, 0.3, 0.4, 0.5]),
    'epochs': hp.choice('epochs', [20, 30, 40, 50]),
    'batch_size': hp.choice('batch_size', [32, 64, 128]),
    'weight_clf': hp.uniform('weight_clf', 0, 1),
    'weight_mlp': hp.uniform('weight_mlp', 0, 1)
}

Example #47
0
        'params': params,
        "estimators": n_estimators,
        'status': STATUS_OK
    }


#num_leaves_distrib = hp.choice('num_leaves', np.arange(7, 4096, dtype = int)) #Better one
#max_depth_distrib = hp.quniform('max_depth', 2, 63,1)
max_depth_distrib = hp.choice('max_depth', np.arange(1, 20, dtype=int))

learning_rate_distrib = hp.quniform('learning_rate', 0.1, 0.3, 0.01)
subsample_for_bin_distrib = hp.quniform('subsample_for_bin', 20000, 300000,
                                        20000)
min_child_samples_distrib = hp.quniform('min_child_samples', 20, 250, 5)
reg_alpha_distrib = hp.quniform('reg_alpha', 0.0, 10, 1)
reg_lambda_distrib = hp.uniform('reg_lambda', 0, 10)

colsample_by_tree_distrib = hp.uniform('colsample_by_tree', 0.6, 1.0)
min_child_weight_distrib = hp.quniform('min_child_weight_distrib', 1, 20, 1)

#learning_rate_distrib = hp.loguniform(learning_rate', np.log(0.01), np.log(0.2))

# Define the search space (Omitted boosting_type dart and goss)
space = {
    'colsample_bytree': colsample_by_tree_distrib,
    'max_depth': max_depth_distrib,
    'learning_rate': learning_rate_distrib,
    'subsample_for_bin': subsample_for_bin_distrib,
    'min_child_samples': min_child_samples_distrib,
    'reg_alpha': reg_alpha_distrib,
    'reg_lambda': reg_lambda_distrib,
Example #48
0
# Model Specs
model_specs = {
    'input_specs': input_specs,
    'output_specs': output_specs,
    'hidden_activation': 'prelu',
    'bn': True,
    'devices': devices,
    'parallel_models': parallel_models,
    'precision': precision,
    'sequential_block': False,
    'optimizer': Adam(learning_rate=0.001),
    'output_activation': 'softmax'
}
hyperparam_space = {
    'dropout_rate': uniform('dropout_rate', 0., 0.4),
    'kernel_regularizer_l1': uniform('kernel_regularizer_l1', 0., 0.001),
    'kernel_regularizer_l2': uniform('kernel_regularizer_l2', 0., 0.001),
    'bias_regularizer_l1': uniform('bias_regularizer_l1', 0., 0.001),
    'bias_regularizer_l2': uniform('bias_regularizer_l2', 0., 0.001),
    'compression': uniform('compression', 0.3, 0.98),
    'i_n_layers': choice('i_n_layers', np.arange(1, 2)),
    'c_n_layers': choice('c_n_layers', np.arange(1, 2))
}
hyperparam_space.update(
    {'loss': choice('loss', ['mse', 'categorical_crossentropy'])})

# Make/remove important paths
path_modes = ['rm', 'make']
for path in [prep_data_path, inference_data_path, results_path]:
    path_management(path, modes=path_modes)
# # #----------------------------------------------04----------------------------------------------------------
X_train, X_test, y_train, y_test = train_test_split(X, y,test_size = 0.3, random_state=42)


dtrain = xgb.DMatrix(data=X_train,label=y_train,missing=-999.0)
dtest = xgb.DMatrix(data=X_test,label=y_test,missing=-999.0)

DATA = xgb.DMatrix(data=X,label=y,missing=-999.0)
# print(np.isnan(dtrain).any())
# print(111)
# print(np.isnan(dtest).any())
evallist = [(dtest, 'eval'), (dtrain, 'train')]

space = {"max_depth": hp.randint("max_depth", 15),
         "n_estimators": hp.randint("n_estimators", 300),
         'learning_rate': hp.uniform('learning_rate', 1e-3, 5e-1),
         'gamma': hp.randint('gamma', 5),
         "subsample": hp.randint("subsample", 5),
         "min_child_weight": hp.randint("min_child_weight", 6),
         }
#------------------------------------------------------------03------------------------------


def argsDict_tranform(argsDict, isPrint=False):
    argsDict["max_depth"] = argsDict["max_depth"] + 5
    argsDict["gamma"] = argsDict["gamma"] *0.1
    argsDict['n_estimators'] = argsDict['n_estimators'] + 150
    argsDict["learning_rate"] = argsDict["learning_rate"] * 0.02 + 0.05
    argsDict["subsample"] = argsDict["subsample"] * 0.1 + 0.5
    argsDict["min_child_weight"] = argsDict["min_child_weight"] + 1
    if isPrint:
Example #50
0
    def hypertuning(self, bounds, **kwargs):
        space = [hp.uniform(name, a, b) for name, a, b in bounds]
        best = fmin(self.func, space=space, algo=tpe.suggest, **kwargs)

        return best
Example #51
0
import numpy as np
import xgboost as xgb
from hyperopt import hp
from sklearn.decomposition import PCA
from imblearn.pipeline import Pipeline
from imblearn.under_sampling import AllKNN

from config import random_seed
from utils.python_utils import quniform_int

steps = [
    ('undersampler', AllKNN(random_state = random_seed)),
    ('pca', PCA(n_components=50,  random_state=random_seed)),
    ('xgb', xgb.XGBClassifier(n_estimators=1000, silent=True, nthread=3, seed=random_seed))
]

model = Pipeline(steps=steps)

params_space = {
    'pca__n_components': quniform_int('n_components', 20, 200, 10),
    'xgb__max_depth': quniform_int('max_depth', 10, 30, 1),
    'xgb__min_child_weight': hp.quniform('min_child_weight', 1, 20, 1),
    'xgb__subsample': hp.uniform('subsample', 0.8, 1),
    'xgb__n_estimators': quniform_int('n_estimators', 1000, 10000, 50),
    'xgb__learning_rate': hp.loguniform('learning_rate', np.log(0.0001), np.log(0.5)) - 0.0001,
    'xgb__gamma': hp.loguniform('gamma', np.log(0.0001), np.log(5)) - 0.0001,
    'xgb__colsample_bytree': hp.quniform('colsample_bytree', 0.5, 1, 0.05)
}
Example #52
0
    p = int(p)
    d = int(d)
    q = int(q)

    #print(p, d, q)
    try:
        model = ARIMA(data[:-10], order=(p, d, q))
        result = model.fit()
        #result.predict(start=n+1, end=n+10)
        #print(result.aic)
        return result.aic
    except ValueError:
        #print("not use")
        return np.inf


space1 = (hp.randint("p", 4), hp.randint("d", 4), hp.randint("q", 4))
space1 = [("p", hp.randint("p", 4)), ("d", hp.randint("d", 4)),
          ("q", hp.randint("q", 4))]
space1 = hp.uniform('p', -10, 10)

fmin(fn=arima, space=space1, algo=tpe.suggest, max_evals=100)

#python3 not working, implementated in python2
from hyperopt import fmin, tpe, hp
best = fmin(fn=lambda x: x**2,
            space=hp.uniform('x', -10, 10),
            algo=tpe.suggest,
            max_evals=100)
print(best)
Example #53
0
    def get_hyperparameter_search_space(dataset_properties=None,
                                        optimizer='smac'):
        if optimizer == 'smac':
            cs = ConfigurationSpace()

            hidden_size = UniformIntegerHyperparameter("hidden_size",
                                                       100,
                                                       500,
                                                       default_value=200)
            activation = CategoricalHyperparameter(
                "activation", ["identity", "logistic", "tanh", "relu"],
                default_value="relu")
            solver = CategoricalHyperparameter("solver", ["sgd", "adam"],
                                               default_value="adam")

            alpha = UniformFloatHyperparameter("alpha",
                                               1e-7,
                                               1.,
                                               log=True,
                                               default_value=0.0001)

            learning_rate = CategoricalHyperparameter(
                "learning_rate", ["adaptive", "invscaling", "constant"],
                default_value="constant")

            learning_rate_init = UniformFloatHyperparameter(
                "learning_rate_init",
                1e-4,
                3e-1,
                default_value=0.001,
                log=True)

            tol = UniformFloatHyperparameter("tol",
                                             1e-5,
                                             1e-1,
                                             log=True,
                                             default_value=1e-4)
            momentum = UniformFloatHyperparameter("momentum",
                                                  0.6,
                                                  1,
                                                  q=0.05,
                                                  default_value=0.9)

            nesterovs_momentum = CategoricalHyperparameter(
                "nesterovs_momentum", [True, False], default_value=True)
            beta1 = UniformFloatHyperparameter("beta1",
                                               0.6,
                                               1,
                                               default_value=0.9)
            power_t = UniformFloatHyperparameter("power_t",
                                                 1e-5,
                                                 1,
                                                 log=True,
                                                 default_value=0.5)
            cs.add_hyperparameters([
                hidden_size, activation, solver, alpha, learning_rate,
                learning_rate_init, tol, momentum, nesterovs_momentum, beta1,
                power_t
            ])

            learning_rate_condition = EqualsCondition(learning_rate, solver,
                                                      "sgd")
            momentum_condition = EqualsCondition(momentum, solver, "sgd")
            nesterovs_momentum_condition = EqualsCondition(
                nesterovs_momentum, solver, "sgd")
            beta1_condition = EqualsCondition(beta1, solver, "adam")

            power_t_condition = EqualsCondition(power_t, learning_rate,
                                                "invscaling")

            cs.add_conditions([
                learning_rate_condition, momentum_condition,
                nesterovs_momentum_condition, beta1_condition,
                power_t_condition
            ])

            return cs
        elif optimizer == 'tpe':
            space = {
                'hidden_size':
                hp.randint("mlp_hidden_size", 450) + 50,
                'activation':
                hp.choice('mlp_activation',
                          ["identity", "logistic", "tanh", "relu"]),
                'solver':
                hp.choice('mlp_solver', [("sgd", {
                    'learning_rate':
                    hp.choice('mlp_learning_rate', [
                        ("adaptive", {}), ("constant", {}),
                        ("invscaling", {
                            'power_t': hp.uniform('mlp_power_t', 1e-5, 1)
                        })
                    ]),
                    'momentum':
                    hp.uniform('mlp_momentum', 0.6, 1),
                    'nesterovs_momentum':
                    hp.choice('mlp_nesterovs_momentum', [True, False])
                }), ("adam", {
                    'beta1': hp.uniform('mlp_beta1', 0.6, 1)
                })]),
                'alpha':
                hp.loguniform('mlp_alpha', np.log(1e-7), np.log(1e-1)),
                'learning_rate_init':
                hp.loguniform('mlp_learning_rate_init', np.log(1e-6),
                              np.log(1e-1)),
                'tol':
                hp.loguniform('mlp_tol', np.log(1e-5), np.log(1e-1))
            }

            return space
Example #54
0
            hyperopt.hp.uniform('bagging_temperature', 0.0, 100),
            'random_strength':
            hyperopt.hp.uniform('random_strength', 0.0, 100),
            'scale_pos_weight':
            hyperopt.hp.uniform('scale_pos_weight', 1.0, 16.0),
            'l2_leaf_reg':
            hp.loguniform('l2_leaf_reg', 0, np.log(10)),
            'n_clusters':
            scope.int(hp.quniform('n_clusters', 2, 6, 1)),
            'n_estimators':
            hp.choice('n_estimators', [500, 1000])
        }

    elif cls_method == "rf":
        space = {
            'max_features': hp.uniform('max_features', 0, 1),
            'n_estimators': hp.choice('n_estimators', [500, 1000]),
            'n_clusters': scope.int(hp.quniform('n_clusters', 2, 6, 1)),
        }

    elif cls_method == "xgboost":
        space = {
            'learning_rate': hp.uniform("learning_rate", 0, 1),
            'subsample': hp.uniform("subsample", 0.5, 1),
            'max_depth': scope.int(hp.quniform('max_depth', 4, 30, 1)),
            'n_estimators': hp.choice('n_estimators', [500, 1000]),
            'colsample_bytree': hp.uniform("colsample_bytree", 0.5, 1),
            'n_clusters': scope.int(hp.quniform('n_clusters', 2, 6, 1)),
            'min_child_weight':
            scope.int(hp.quniform('min_child_weight', 1, 6, 1))
        }
Example #55
0
    def get_hyperparameter_search_space(dataset_properties=None,
                                        optimizer='smac'):
        if optimizer == 'smac':
            epsilon = CategoricalHyperparameter("epsilon",
                                                [1e-4, 1e-3, 1e-2, 1e-1, 1],
                                                default_value=1e-4)
            C = UniformFloatHyperparameter("C",
                                           0.03125,
                                           32768,
                                           log=True,
                                           default_value=1.0)
            # No linear kernel here, because we have liblinear
            kernel = CategoricalHyperparameter(
                name="kernel",
                choices=["rbf", "poly", "sigmoid"],
                default_value="rbf")
            degree = UniformIntegerHyperparameter("degree",
                                                  2,
                                                  5,
                                                  default_value=3)
            gamma = UniformFloatHyperparameter("gamma",
                                               3.0517578125e-05,
                                               8,
                                               log=True,
                                               default_value=0.1)
            coef0 = UniformFloatHyperparameter("coef0", -1, 1, default_value=0)
            # probability is no hyperparameter, but an argument to the SVM algo
            shrinking = CategoricalHyperparameter("shrinking",
                                                  ["True", "False"],
                                                  default_value="True")
            tol = UniformFloatHyperparameter("tol",
                                             1e-5,
                                             1e-1,
                                             default_value=1e-3,
                                             log=True)
            # cache size is not a hyperparameter, but an argument to the program!
            max_iter = UnParametrizedHyperparameter("max_iter", 2000)

            cs = ConfigurationSpace()
            cs.add_hyperparameters([
                epsilon, C, kernel, degree, gamma, coef0, shrinking, tol,
                max_iter
            ])

            degree_depends_on_poly = EqualsCondition(degree, kernel, "poly")
            coef0_condition = InCondition(coef0, kernel, ["poly", "sigmoid"])
            cs.add_condition(degree_depends_on_poly)
            cs.add_condition(coef0_condition)

            return cs
        elif optimizer == 'tpe':
            from hyperopt import hp
            coef0 = hp.uniform("libsvm_coef0", -1, 1)
            space = {
                'C':
                hp.loguniform('libsvm_C', np.log(0.03125), np.log(32768)),
                'gamma':
                hp.loguniform('libsvm_gamma', np.log(3.0517578125e-5),
                              np.log(8)),
                'shrinking':
                hp.choice('libsvm_shrinking', ["True", "False"]),
                'tol':
                hp.loguniform('libsvm_tol', np.log(1e-5), np.log(1e-1)),
                'max_iter':
                hp.choice('libsvm_max_iter', [2000]),
                'kernel':
                hp.choice('libsvm_kernel',
                          [("poly", {
                              'degree': hp.randint('libsvm_degree', 4) + 2,
                              'coef0': coef0
                          }), ("rbf", {}), ("sigmoid", {
                              'coef0': coef0
                          })])
            }

            init_trial = {
                'C': 1,
                'gamma': 0.1,
                'shrinking': "True",
                'tol': 1e-3,
                'max_iter': 2000,
                'kernel': ("rbf", {})
            }

            return space
Example #56
0
# In[2]:

import numpy as np
from hyperopt import fmin, tpe, hp, Trials
from hyperopt import STATUS_OK


def my_fcn(x):
    return np.sin(x[0] * (x[1]**2 - x[2]) / x[3]) * np.cos(x[0])


x_mins_dict = fmin(
    fn=my_fcn,
    space=[
        hp.uniform('x_1', -100, 100),  # search range for x[0] from -100 to 100
        hp.uniform('x_2', -200, 100),  # search range for x[1] from -200 to 100
        hp.uniform('x_3', 0, 50),  # search range for x[2] from 0 to 50
        hp.uniform('x_4', -100, -20)  # search range for x[3] from -100 to -20
    ],
    algo=tpe.suggest,
    max_evals=500  # stop searching after 500 iterations
)

# In[3]:

print(x_mins_dict)  # the output of fmin is a dictionary
x_mins = [e for e in x_mins_dict.values()]  # converts dictionary to list
print("my_fcn(x_mins) = " +
      str(my_fcn(x_mins)))  # print function result at x_mins
 def get_hyperparameter_search_space(dataset_properties=None,
                                     optimizer='smac'):
     if optimizer == 'smac':
         cs = ConfigurationSpace()
         n_estimators = UniformIntegerHyperparameter(name="n_estimators",
                                                     lower=50,
                                                     upper=500,
                                                     default_value=50,
                                                     log=False)
         max_features = UniformFloatHyperparameter("max_features",
                                                   0.,
                                                   1.,
                                                   default_value=0.5)
         bootstrap = CategoricalHyperparameter("bootstrap",
                                               ["True", "False"],
                                               default_value="True")
         bootstrap_features = CategoricalHyperparameter(
             "bootstrap_features", ["True", "False"], default_value="False")
         sampling_strategy = CategoricalHyperparameter(
             name="sampling_strategy",
             choices=["majority", "not minority", "not majority", "all"],
             default_value="not minority")
         replacement = CategoricalHyperparameter("replacement",
                                                 ["True", "False"],
                                                 default_value="False")
         max_depth = UniformIntegerHyperparameter(name="max_depth",
                                                  lower=1,
                                                  upper=10,
                                                  default_value=1,
                                                  log=False)
         cs.add_hyperparameters([
             n_estimators, max_features, bootstrap, bootstrap_features,
             sampling_strategy, replacement, max_depth
         ])
         return cs
     elif optimizer == 'tpe':
         from hyperopt import hp
         space = {
             'n_estimators':
             hp.randint('bal_bagging_n_estimators', 451) + 50,
             'max_features':
             hp.uniform('bal_bagging_max_features', 0, 1),
             'bootstrap':
             hp.choice('bal_bagging_bootstrap', ["True", "False"]),
             'bootstrap_features':
             hp.choice('bal_bagging_bootstrap_features', ["True", "False"]),
             'sampling_strategy':
             hp.choice('bal_bagging_sampling_strategy',
                       ["majority", "not minority", "not majority", "all"]),
             'replacement':
             hp.choice('bal_bagging_replacement', ["True", "False"]),
             'max_depth':
             hp.randint('bal_bagging_max_depth', 10) + 1
         }
         init_trial = {
             'n_estimators': 10,
             'max_features': 0.5,
             'bootstrap': "True",
             'bootstrap_features': "False",
             'sampling_strategy': "not minority",
             'replacement': "False",
             'max_depth': 1
         }
         return space
Example #58
0
        f = obj_MVRSM(x)
        #print('Objective value: ', f)
        return {'loss': f, 'status': STATUS_OK}

    # Two algorithms used within HyperOpt framework (random search and TPE)
    algo = rand.suggest
    algo2 = partial(tpe.suggest, n_startup_jobs=rand_evals)

    # Define search space for HyperOpt
    var = [None] * d  #variable for hyperopt and random search
    for i in list(range(0, d)):
        if i < num_int:
            var[i] = hp.quniform('var_d' + str(i), lb[i], ub[i],
                                 1)  # Integer variables
        else:
            var[i] = hp.uniform('var_c' + str(i), lb[i],
                                ub[i])  # Continuous variables

    print("Start HyperOpt trials")
    for i in range(n_trials):
        current_time = time.time(
        )  # time when starting the HO and RS algorithm

        trials_HO = Trials()
        time_start = time.time()  # Start timer
        hypOpt = fmin(hyp_obj,
                      var,
                      algo2,
                      max_evals=max_evals,
                      trials=trials_HO)  # Run HyperOpt
        total_time_HypOpt = time.time() - time_start  # End timer
Example #59
0
    return score


# define a search space

space = []

i = 0
for y in range(h):
    for x in range(w):
        si = str(i)
        #space.append([hp.quniform("r-"+si, 0, 255, 1), hp.quniform("g-"+si, 0, 255, 1), hp.quniform("b-"+si, 0, 255, 1)])
        #space.append([hp.uniform("r-"+si, 0, 255), hp.uniform("g-"+si, 0, 255), hp.uniform("b-"+si, 0, 255)])
        space.append([
            hp.uniform("r-" + si, 0, 1),
            hp.uniform("g-" + si, 0, 1),
            hp.uniform("b-" + si, 0, 1)
        ])
        i += 1

# minimize the objective over the space

#hyperopt.rand.suggest
best = fmin(objective, space, algo=tpe.suggest, max_evals=1000)

print(best)
# -> {'a': 1, 'c2': 0.01420615366247227}
print(space_eval(space, best))
# -> ('case 2', 0.01420615366247227}
Example #60
0
        'params': params
    }


def get_args():
    parser = argparse.ArgumentParser(description='Parameters optimization options.')
    parser.add_argument('--db_name', type=str, default='poker', help='Name of mongo database where to store data')
    parser.add_argument('--exp_name', type=str, default='exp_{}'.format(datetime.now().strftime("%d.%m.%Y-%H:%M")),
                        help='Experiment name.')
    parser.add_argument('--n_jobs', type=int,  default=cpu_count(), help='Number of workers. Negative for all cpus')
    parser.add_argument('--num_evals', type=int, default=10000000, help='Number of  evaluations')
    args = parser.parse_args()
    return args


SPACE = [[hp.uniform('p1_1', 1., 2.), hp.uniform('p1_2', 0.3, 1.5)],
         [hp.uniform('p2_1', 0.3, 1.5), hp.uniform('p2_2', 0.5, 1.5)],
         [hp.uniform('p3_1', 0.5, 2.), hp.uniform('p3_2', 0.2, 1.)],
         [hp.uniform('p4_1', 0.8, 3.), hp.uniform('p4_2', 0.3, 1.5)],
         [hp.uniform('p5_1', 0.5, 2.), hp.uniform('p5_2', 0.2, 1.)],
         [hp.uniform('p6_1', 0.3, 1.5), hp.uniform('p6_2', 0.3, 1.5)],
         hp.uniform('p7', 0.5, 2),
         [hp.uniform('p8_1', 0.5, 2.), hp.uniform('p8_2', 0.2, 1.)],
         [hp.uniform('p9_1', 0.3, 1.), hp.uniform('p9_2', 0.3, 1.5)],
]


if __name__ == '__main__':
    args = get_args()

    # create temp directory