示例#1
0
    def fit(self, X, y):
        class1_percentile = sum(y<1) / len(y) * 100
        class2_percentile = sum(y<2) / len(y) * 100
        class3_percentile = sum(y<3) / len(y) * 100
        threshold1_prior = np.percentile(X, class1_percentile)
        threshold2_prior = np.percentile(X, class2_percentile)
        threshold3_prior = np.percentile(X, class3_percentile)
        threshold2_delta_prior = threshold2_prior - threshold1_prior
        threshold3_delta_prior = threshold3_prior - threshold2_prior
        prior_std = (np.percentile(X, 99) - np.percentile(X, 1)) / 3
        space = {
            "threshold1": hp.normal("threshold1", threshold1_prior, prior_std),
            "threshold2_delta": hp.normal("threshold2_delta", threshold2_delta_prior, prior_std),
            "threshold3_delta": hp.normal("threshold3_delta", threshold3_delta_prior, prior_std)
        }

        partial_run = partial(self._run_trial, X, y)

        trials = Trials()
        fmin(partial_run, space=space,
             algo=tpe.suggest,
             max_evals=self.n_iter, rstate=np.random.RandomState(self.random_state), trials=trials)
        
        self.coef_ = trials.best_trial["result"]["coef"]
        return self
def bo_tpe_svr(X, y):
    starttime = datetime.datetime.now()

    def objective(params):
        params = {
            'C': abs(float(params['C'])),
            "kernel": str(params['kernel']),
            'epsilon': abs(float(params['epsilon'])),
        }
        clf = SVR(gamma='scale', **params)
        score = -np.mean(
            cross_val_score(
                clf, X, y, cv=3, n_jobs=-1, scoring="neg_mean_squared_error"))

        return {'loss': score, 'status': STATUS_OK}

    space = {
        'C': hp.normal('C', 0, 50),
        "kernel": hp.choice('kernel', ['poly', 'rbf', 'sigmoid']),
        'epsilon': hp.normal('epsilon', 0, 1),
    }

    trials_svr = Trials()
    best_svr = fmin(fn=objective,
                    space=space,
                    algo=tpe.suggest,
                    max_evals=20,
                    trials=trials_svr)
    print("SVM MSE score:%.4f" % min(trials_svr.losses()))
    endtime = datetime.datetime.now()
    process_time_svr = endtime - starttime
    print("程序执行时间(秒):{}".format(process_time_svr))
    print("最佳超参数值集合:", best_svr)
    save_model_object(best_svr, 'BO-TPE', 'SVR', 'SVR')
    return min(trials_svr.losses()), process_time_svr, best_svr
def pyll_example():
    import hyperopt.pyll
    from hyperopt.pyll import scope
    from hyperopt import fmin, tpe, hp, STATUS_OK, Trials

    @scope.define  # 这句话必须要有,否则显示没有foo属性
    def foo(a, b=0):
        # 显示a,b的值大小
        print('runing foo a={},b={}'.format(a, b))
        return a + b / 2

    # -- this will print 0, foo is called as usual.
    print(foo(0))

    # 検索スペースの説明では、普通のPythonのように `foo`を使うことができます。
    # これらの2つの呼び出しは実際にはfooを呼び出さず、
    # グラフを評価するためにfooを呼び出す必要があることだけを記録します。

    space1 = scope.foo(hp.uniform('a', 0, 10))
    space2 = scope.foo(hp.uniform('a', 0, 10), hp.normal('b', 0, 1))

    # -- this will print an pyll.Apply node

    # print("space1=", space1)
    # -- this will draw a sample by running foo()
    # print(hyperopt.pyll.stochastic.sample(space1))
    print(hyperopt.pyll.stochastic.sample(space2))
示例#4
0
 def parse_search_space(self, learner_space):
     '''
     search space is dictionary
     {'n_estimators': ('uniform', 1, 1000, 'discrete')}
     '''
     search_space = dict()
     for k, v in learner_space.iteritems():
         if v[2] == 'samples':
             v = (v[0], v[1], min(100, self.X.shape[0]/len(self.kf)-1), v[3])
         if v[3] == 'discrete':
             search_space[k] = hp.quniform(k, v[1], v[2], 1)
         elif v[0] == 'uniform':
             search_space[k] = hp.uniform(k, v[1], v[2])
         elif v[0] == 'loguniform':
             search_space[k] = hp.loguniform(k, v[1], v[2])
         elif v[0] == 'normal':
             search_space[k] = hp.normal(k, v[1], v[2])
         elif v[0] == 'lognormal':
             search_space[k] = hp.lognormal(k, v[1], v[2])
         elif v[0] == 'quniform':
             search_space[k] = hp.quniform(k, v[1], v[2], v[3])
         elif v[0] == 'qloguniform':
             search_space[k] = hp.qloguniform(k, v[1], v[2], v[3])
         elif v[0] == 'qnormal':
             search_space[k] = hp.qnormal(k, v[1], v[2], v[3])
         elif v[0] == 'qlognormal':
             search_space[k] = hp.qlognormal(k, v[1], v[2], v[3])
     return search_space
示例#5
0
 def normal_from_bounds(label, left_bound, right_bound, quantization=None):
     mean = (left_bound + right_bound) / 2.0
     sigma = (right_bound - left_bound) / 4.0
     hp_variable = (hp.normal(label, mean, sigma) if quantization is None
                    else hp.qnormal(label, mean, sigma, quantization))
     dist = stats.norm(mean, sigma)
     return Parameter(label, mean, hp_variable, dist.logpdf, dist.cdf)
 def _infer_hp_space(key,value):
     if isinstance(value,tuple) and len(value) == 2:
         return hp.uniform(key,value[0],value[1])
     elif isinstance(value,dict):
         return hp.normal(key,value["mu"],value["sigma"]),
     else:
         return Exception(f"Space {key} for optimization is not recognized {value}, should be tuple or dict")
示例#7
0
def main():

    SPACE = {
        'mu': hp.normal('mu', 3.5, 2),
        'sigma': hp.lognormal('sigma', 2, 4)
    }

    def objective(hps):
        return {
            'loss': evaluate(hps),
            'status': STATUS_OK,
            # -- store other results like this
            'eval_time': time.time(),
            'other_stuff': {
                'type': None,
                'value': [0, 1, 2]
            },
            # -- attachments are handled differently
            'attachments': {
                'time_module': pickle.dumps(time.time)
            }
        }

    trials = Trials()

    best = fmin(objective,
                space=SPACE,
                algo=tpe.suggest,
                max_evals=30,
                trials=trials)

    print(best)
def blja_test():
    space = {
        'n_estimators': hp.qnormal('n_estimators', 1000, 200, 10),
        'learning_rate': hp.normal('learning_rate', 0.1, 0.05)
    }
    for x in range(1000):
        print pyll.stochastic.sample(space)
示例#9
0
def create_space(name, func, *args):
    """Create a hyperopt space for the given parameter."""
    _coconut_match_to = func
    _coconut_case_check_0 = False
    if _coconut_match_to == "choice":
        _coconut_case_check_0 = True
    if _coconut_case_check_0:
        return hp.choice(name, *args)
    if not _coconut_case_check_0:
        if _coconut_match_to == "randrange":
            _coconut_case_check_0 = True
        if _coconut_case_check_0:
            start, stop, step = args
            if step != 1:
                raise ValueError("the hyperopt backend only supports a randrange step size of 1")
# despite being called randint, hp.randint is exclusive
            return start + hp.randint(name, stop - start)
    if not _coconut_case_check_0:
        if _coconut_match_to == "uniform":
            _coconut_case_check_0 = True
        if _coconut_case_check_0:
            return hp.uniform(name, *args)
    if not _coconut_case_check_0:
        if _coconut_match_to == "normalvariate":
            _coconut_case_check_0 = True
        if _coconut_case_check_0:
            return hp.normal(name, *args)
    raise TypeError("insufficiently specified parameter {_coconut_format_0}".format(_coconut_format_0=(name)))
示例#10
0
def build_dist_func_instance(hp_name, func, args, hp_size=None):
    '''
    args:
        hp_name: the name of the hyperparameter associated with this func
        func: name of hyperopt dist func 
        args: list of float values

    processing:
        instantiate the named dist func with specified args

    return:
        instance of hyperopt dist func
    '''
    if func == "choice":
        dist = hp.choice(hp_name, args)

    elif func == "randint":
        max_value = 65535 if len(args) == 0 else args[0]

        # specify "size=None" to workaround hyperopt bug
        if hp_size:
            # let size default to () (error if we try to set it explictly)
            dist = hp.randint(hp_name, max_value)
        else:
            dist = hp.randint(hp_name, max_value, size=None)

    elif func == "uniform":
        arg_check(func, args, count=2)
        dist = hp.uniform(hp_name, *args)

    elif func == "normal":
        arg_check(func, args, count=2)
        dist = hp.normal(hp_name, *args)

    elif func == "loguniform":
        arg_check(func, args, count=2)
        dist = hp.loguniform(hp_name, *args)

    elif func == "lognormal":
        arg_check(func, args, count=2)
        dist = hp.lognormal(hp_name, *args)

    elif func == "quniform":
        arg_check(func, args, count=3)
        dist = hp.quniform(hp_name, *args)

    elif func == "qnormal":
        arg_check(func, args, count=3)
        dist = hp.qnormal(hp_name, *args)

    elif func == "qloguniform":
        arg_check(func, args, count=3)
        dist = hp.qloguniform(hp_name, *args)

    elif func == "qlognormal":
        arg_check(func, args, count=3)
        dist = hp.qlognormal(hp_name, *args)

    return dist
示例#11
0
def optimize(random_state=23):
    space = {
        f'{i}'.zfill(3): hp.normal(f'{i}', VAL, 1.5)
        for i, VAL in enumerate(INITIAL_EMB)
    }

    best = fmin(score, space, algo=tpe.suggest, max_evals=5000)
    return best
示例#12
0
 def normal_from_bounds(label, left_bound, right_bound, quantization=None):
     mean = (left_bound + right_bound) / 2.0
     sigma = (right_bound - left_bound) / 4.0
     hp_variable = (
         hp.normal(label, mean, sigma) if quantization is None else hp.qnormal(label, mean, sigma, quantization)
     )
     dist = stats.norm(mean, sigma)
     return Parameter(label, mean, hp_variable, dist.logpdf, dist.cdf)
示例#13
0
 def test_read_normal(self):
     # 0 float
     # 1   hyperopt_param
     # 2     Literal{l0eg_alpha}
     # 3     normal
     # 4       Literal{0.0}
     # 5       Literal{1.0}
     normal = hp.normal("l0eg_alpha", 0.0, 1.0).inputs()[0].inputs()[1]
     ret = self.pyll_reader.read_normal(normal, "l0eg_alpha")
     expected = configuration_space.NormalFloatHyperparameter(
         "l0eg_alpha", 0.0, 1.0)
     self.assertEqual(expected, ret)
示例#14
0
 def __SpaceBuild(self,SpaceDic):
     """
     建立参数空间
     输入为参数空间Dictionary
     """
     from hyperopt import fmin, tpe, hp
     self.spaceparams={}
     self.choice={}
     for key in SpaceDic.keys():
         if self.SpaceDic[key]['type']=='choice':
             self.spaceparams.update({key:hp.choice(key,self.SpaceDic[key]['Content'])})
         if self.SpaceDic['type']=='uniform': 
             self.spaceparams.update({key:hp.uniform(key,*self.SpaceDic[key]['Content'])})#[0],self.SpaceDic[key]['Content'][1]
         if self.SpaceDic[key]['type']=='normal': 
             self.spaceparams.update({key:hp.normal(key,*self.SpaceDic[key]['Content'])}) #[0],self.SpaceDic[key]['Content'][1]
示例#15
0
def many_dists():
    a = hp.choice('a', [0, 1, 2])
    b = hp.randint('b', 10)
    c = hp.uniform('c', 4, 7)
    d = hp.loguniform('d', -2, 0)
    e = hp.quniform('e', 0, 10, 3)
    f = hp.qloguniform('f', 0, 3, 2)
    g = hp.normal('g', 4, 7)
    h = hp.lognormal('h', -2, 2)
    i = hp.qnormal('i', 0, 10, 2)
    j = hp.qlognormal('j', 0, 2, 1)
    k = hp.pchoice('k', [(.1, 0), (.9, 1)])
    z = a + b + c + d + e + f + g + h + i + j + k
    return {'loss': scope.float(scope.log(1e-12 + z ** 2)),
            'status': base.STATUS_OK}
示例#16
0
 def get_hp_space():
     space_training = {'batch_size': hopt_wrapper.quniform_int('batch_size', 50, 500, 1),
                       'temporal_order': hopt_wrapper.qloguniform_int('temporal_order', log(3), log(20), 1)
                       }
     space_regularization = {'dropout_probability': hp.choice('dropout', [
         0.0,
         hp.normal('dropout_probability', 0.5, 0.1)
     ]),
         'weight_decay_coeff': hp.choice('weight_decay_coeff', [
             0.0,
             hp.uniform('a', 1e-4, 1e-4)
         ])
     }
     space_training.update(space_regularization)
     return space_training
示例#17
0
def many_dists():
    a = hp.choice("a", [0, 1, 2])
    b = hp.randint("b", 10)
    bb = hp.randint("bb", 12, 25)
    c = hp.uniform("c", 4, 7)
    d = hp.loguniform("d", -2, 0)
    e = hp.quniform("e", 0, 10, 3)
    f = hp.qloguniform("f", 0, 3, 2)
    g = hp.normal("g", 4, 7)
    h = hp.lognormal("h", -2, 2)
    i = hp.qnormal("i", 0, 10, 2)
    j = hp.qlognormal("j", 0, 2, 1)
    k = hp.pchoice("k", [(0.1, 0), (0.9, 1)])
    z = a + b + bb + c + d + e + f + g + h + i + j + k
    return {"loss": scope.float(scope.log(1e-12 + z ** 2)), "status": base.STATUS_OK}
示例#18
0
文件: ModalBuild.py 项目: ZZY18/sgpy
 def __SpaceBuild(self):
     """
     Space空间建立
     """
     from hyperopt import fmin, tpe, hp
     self.spaceparams={}
     self.choice={}
     for key in self.params.keys():
         if self.params[key]['type']=='choice':   
             self.choice.update({key:self.params[key]['Content']})
             self.spaceparams.update({key:hp.choice(key,self.params[key]['Content'])})
         if self.params[key]['type']=='uniform': 
             self.spaceparams.update({key:hp.uniform(key,self.params[key]['Content'][0],self.params[key]['Content'][1])})
         if self.params[key]['type']=='normal': 
             self.spaceparams.update({key:hp.normal(key,self.params[key]['Content'][0],self.params[key]['Content'][1])}) 
示例#19
0
def do_test(folds):
    df = load_train()
    space = {
        'n_estimators': hp.qnormal('n_estimators', 1000, 200, 10),
        'learning_rate': hp.normal('learning_rate', 0.1, 0.05)
    }
    trials = Trials()
    best = fmin(lambda s: simple_cross_val(folds, s, df, trials),
                space=space,
                algo=tpe.suggest,
                trials=trials,
                max_evals=100)

    print best
    print get_the_best_loss(trials)
    def get_hp_space():
        space_training = {
            'temporal_order':
            hopt_wrapper.qloguniform_int('temporal_order', log(3), log(20), 1)
        }

        space_regularization = {
            'dropout_probability':
            hp.choice('dropout',
                      [0.0, hp.normal('dropout_probability', 0.5, 0.1)]),
            'weight_decay_coeff':
            hp.choice('weight_decay_coeff',
                      [0.0, hp.uniform('a', 1e-4, 1e-4)]),
        }

        space_training.update(space_regularization)
        return space_training
示例#21
0
def do_test(folds):
    df = load_train()
    space = {
        'n_estimators': hp.qnormal('n_estimators', 1000, 200, 10),
        'learning_rate': hp.normal('learning_rate', 0.1, 0.05),
        'gamma': hp.choice('gamma', [0, 0.1, 0.01, 0.2]),
        'max_depth': hp.choice('max_depth', [2, 3, 4, 5]),
        'min_child_weight': hp.choice('min_child_weight', [1, 2, 3])
    }
    trials = Trials()
    best = fmin(lambda s: man_id_cross_val(folds, s, df, trials),
                space=space,
                algo=tpe.suggest,
                trials=trials,
                max_evals=10000)

    print best
    print get_the_best_loss(trials)
示例#22
0
def svr_sigmoid(name,
                C=None,
                epsilon=None,
                gamma=None,
                coef0=None,
                shrinking=None,
                tol=None,
                max_iter=None,
                verbose=False,
                random_state=None,
                cache_size=_svc_default_cache_size):
    """
    Return a pyll graph with hyperparamters that will construct
    a sklearn.svm.SVR model with an RBF kernel.

    """
    def _name(msg):
        return '%s.%s_%s' % (name, 'sigmoid', msg)

    # -- tanh(K(x, y) + coef0)
    coef0nz = hp.choice(_name('coef0nz'), [0, 1])
    coef0 = hp.normal(_name('coef0'), 0.0, 1.0)
    sigm_coef0 = coef0nz * coef0

    rval = scope.sklearn_SVR(
        kernel='sigmoid',
        C=_svc_C(name + '.sigmoid') if C is None else C,
        epsilon=hp.lognormal(
            _name("epsilon"),
            np.log(1e-3),
            np.log(1e3)),
        gamma=_svc_gamma(name + '.sigmoid') if gamma is None else gamma,
        coef0=sigm_coef0 if coef0 is None else coef0,
        shrinking=hp_bool(
            _name('shrinking')) if shrinking is None else shrinking,
        tol=_svc_tol(name + '.sigmoid') if tol is None else tol,
        max_iter=(_svc_max_iter(name + '.sigmoid')
                  if max_iter is None else max_iter),
        verbose=verbose,
        random_state=_random_state(_name('rstate'), random_state),
        cache_size=cache_size)
    return rval
示例#23
0
def get_space(args, btc_usd_frame, init_c1, init_c2, init_c3):
    if args.coefficients:
        c1 = args.coefficients[0]
        c2 = args.coefficients[1]
        c3 = args.coefficients[2]
    else:
        c1 = hp.normal('c1', init_c1['mean'], init_c1['sigma'])
        c2 = hp.lognormal('c2', init_c2['mean'], init_c2['sigma'])
        c3 = hp.lognormal('c3', init_c3['mean'], init_c3['sigma'])

    return {
        'c1': c1,
        'c2': c2,
        'c3': c3,
        'current_price': args.current_price,
        'total_btc': args.total_btc,
        'num_runs': args.num_runs,
        'num_weeks': args.num_weeks,
        'btc_usd_frame': btc_usd_frame
    }
示例#24
0
def convert_params(params):
    """

    :param params: hyperparamter dictionary as defined in params.py
    :return: search space for hyperopt
    """
    space = []
    for param in params:
        if params[param].get_type() == 'integer':
            space.append(
                hp.quniform(params[param].get_name(), params[param].get_min(),
                            params[param].get_max(), params[param].interval))
        elif params[param].get_type() == 'continuous':
            if params[param].get_scale(
            ) == 'linear' and params[param].get_dist() == 'uniform':
                space.append(
                    hp.uniform(params[param].get_name(),
                               params[param].get_min(),
                               params[param].get_max()))
            elif params[param].get_scale(
            ) == 'linear' and params[param].get_dist() == 'normal':
                space.append(
                    hp.normal(params[param].get_name(),
                              params[param].get_min(),
                              params[param].get_max()))
            elif params[param].get_scale() == 'log' and params[param].get_dist(
            ) == 'uniform':
                space.append(
                    hp.loguniform(params[param].get_name(),
                                  params[param].get_min(),
                                  params[param].get_max()))
            elif params[param].get_scale() == 'log' and params[param].get_dist(
            ) == 'normal':
                space.append(
                    hp.lognormal(params[param].get_name(),
                                 params[param].get_min(),
                                 params[param].get_max()))

    return space
示例#25
0
def svr_sigmoid(name,
                C=None,
                epsilon=None,
                gamma=None,
                coef0=None,
                shrinking=None,
                tol=None,
                max_iter=None,
                verbose=False,
                random_state=None,
                cache_size=_svc_default_cache_size):
    """
    Return a pyll graph with hyperparamters that will construct
    a sklearn.svm.SVR model with an RBF kernel.

    """
    def _name(msg):
        return '%s.%s_%s' % (name, 'sigmoid', msg)

    # -- tanh(K(x, y) + coef0)
    coef0nz = hp.choice(_name('coef0nz'), [0, 1])
    coef0 = hp.normal(_name('coef0'), 0.0, 1.0)
    sigm_coef0 = coef0nz * coef0

    rval = scope.sklearn_SVR(
        kernel='sigmoid',
        C=_svc_C(name + '.sigmoid') if C is None else C,
        epsilon=hp.lognormal(_name("epsilon"), np.log(1e-3), np.log(1e3)),
        gamma=_svc_gamma(name + '.sigmoid') if gamma is None else gamma,
        coef0=sigm_coef0 if coef0 is None else coef0,
        shrinking=hp_bool(_name('shrinking'))
        if shrinking is None else shrinking,
        tol=_svc_tol(name + '.sigmoid') if tol is None else tol,
        max_iter=(_svc_max_iter(name +
                                '.sigmoid') if max_iter is None else max_iter),
        verbose=verbose,
        random_state=_random_state(_name('rstate'), random_state),
        cache_size=cache_size)
    return rval
示例#26
0
    def createHyperoptSpace(self, lockedValues=None):
        name = self.root

        if lockedValues is None:
            lockedValues = {}

        if 'anyOf' in self.config or 'oneOf' in self.config:
            data = []
            if 'anyOf' in self.config:
                data = self.config['anyOf']
            else:
                data = self.config['oneOf']

            subSpaces = [
                Hyperparameter(param, self, name + "." +
                               str(index)).createHyperoptSpace(lockedValues)
                for index, param in enumerate(data)
            ]
            for index, space in enumerate(subSpaces):
                space["$index"] = index

            choices = hp.choice(self.hyperoptVariableName, subSpaces)

            return choices
        elif 'enum' in self.config:
            if self.name in lockedValues:
                return lockedValues[self.name]

            choices = hp.choice(self.hyperoptVariableName, self.config['enum'])
            return choices
        elif 'constant' in self.config:
            if self.name in lockedValues:
                return lockedValues[self.name]

            return self.config['constant']
        elif self.config['type'] == 'object':
            space = {}
            for key in self.config['properties'].keys():
                config = self.config['properties'][key]
                space[key] = Hyperparameter(
                    config, self,
                    name + "." + key).createHyperoptSpace(lockedValues)
            return space
        elif self.config['type'] == 'number':
            if self.name in lockedValues:
                return lockedValues[self.name]

            mode = self.config.get('mode', 'uniform')
            scaling = self.config.get('scaling', 'linear')

            if mode == 'uniform':
                min = self.config.get('min', 0)
                max = self.config.get('max', 1)
                rounding = self.config.get('rounding', None)

                if scaling == 'linear':
                    if rounding is not None:
                        return hp.quniform(self.hyperoptVariableName, min, max,
                                           rounding)
                    else:
                        return hp.uniform(self.hyperoptVariableName, min, max)
                elif scaling == 'logarithmic':
                    if rounding is not None:
                        return hp.qloguniform(self.hyperoptVariableName,
                                              math.log(min), math.log(max),
                                              rounding)
                    else:
                        return hp.loguniform(self.hyperoptVariableName,
                                             math.log(min), math.log(max))
            if mode == 'randint':
                max = self.config.get('max', 1)
                return hp.randint(self.hyperoptVariableName, max)

            if mode == 'normal':
                mean = self.config.get('mean', 0)
                stddev = self.config.get('stddev', 1)
                rounding = self.config.get('rounding', None)

                if scaling == 'linear':
                    if rounding is not None:
                        return hp.qnormal(self.hyperoptVariableName, mean,
                                          stddev, rounding)
                    else:
                        return hp.normal(self.hyperoptVariableName, mean,
                                         stddev)
                elif scaling == 'logarithmic':
                    if rounding is not None:
                        return hp.qlognormal(self.hyperoptVariableName,
                                             math.log(mean), math.log(stddev),
                                             rounding)
                    else:
                        return hp.lognormal(self.hyperoptVariableName,
                                            math.log(mean), math.log(stddev))
示例#27
0
             "contamination": 0.05,
             "transform": hp.choice(
                 "transform_linear",
                 [None, decomp.PCA(n_components=20)])},
            {"kernel": "rbf",
             "nu": hp.uniform("nu_rbf", 0, 1),
             "gamma": hp.choice("gamma_rbf",
                                ["auto",
                                 hp.lognormal("gamma_rbf_float", np.log(1 / 2000), 1)]),
             "contamination": 0.05,
             "transform": hp.choice(
                 "transform_rbf",
                 [None, decomp.PCA(n_components=20)])},
            {"kernel": "sigmoid",
             "nu": hp.uniform("nu_sigmoid", 0, 1),
             "coef0": hp.normal("coef0", 0, 1),
             "gamma": hp.choice("gamma_sigmoid",
                                ["auto",
                                 hp.lognormal("gamma_sigmoid_float", np.log(1 / 2000), 1)]),
             "contamination": 0.05,
             "transform": hp.choice(
                 "transform_sigmoid",
                 [None, decomp.PCA(n_components=20)])},
        ]
    )
)

lof_exp = Experiment(
    clf=getattr(importlib.import_module("pyod.models.lof"), "LOF"),
    clfname="LOF",
    parameters=hp.choice(
示例#28
0
def create_space(name, func, *args):
    """Create a hyperopt space for the given parameter."""
    _coconut_case_match_to_0 = func, args
    _coconut_case_match_check_0 = False
    _coconut_match_set_name_choices = _coconut_sentinel
    if (_coconut.isinstance(_coconut_case_match_to_0, _coconut.abc.Sequence)
        ) and (_coconut.len(_coconut_case_match_to_0)
               == 2) and (_coconut_case_match_to_0[0]
                          == "choice") and (_coconut.isinstance(
                              _coconut_case_match_to_0[1],
                              _coconut.abc.Sequence)) and (_coconut.len(
                                  _coconut_case_match_to_0[1]) == 1):
        _coconut_match_set_name_choices = _coconut_case_match_to_0[1][0]
        _coconut_case_match_check_0 = True
    if _coconut_case_match_check_0:
        if _coconut_match_set_name_choices is not _coconut_sentinel:
            choices = _coconut_case_match_to_0[1][0]
    if _coconut_case_match_check_0:
        return hp.choice(name, choices)
    if not _coconut_case_match_check_0:
        _coconut_match_set_name_start = _coconut_sentinel
        _coconut_match_set_name_stop = _coconut_sentinel
        _coconut_match_set_name_step = _coconut_sentinel
        if (_coconut.isinstance(
                _coconut_case_match_to_0, _coconut.abc.Sequence)) and (
                    _coconut.len(_coconut_case_match_to_0)
                    == 2) and (_coconut_case_match_to_0[0]
                               == "randrange") and (_coconut.isinstance(
                                   _coconut_case_match_to_0[1],
                                   _coconut.abc.Sequence)) and (_coconut.len(
                                       _coconut_case_match_to_0[1]) == 3):
            _coconut_match_set_name_start = _coconut_case_match_to_0[1][0]
            _coconut_match_set_name_stop = _coconut_case_match_to_0[1][1]
            _coconut_match_set_name_step = _coconut_case_match_to_0[1][2]
            _coconut_case_match_check_0 = True
        if _coconut_case_match_check_0:
            if _coconut_match_set_name_start is not _coconut_sentinel:
                start = _coconut_case_match_to_0[1][0]
            if _coconut_match_set_name_stop is not _coconut_sentinel:
                stop = _coconut_case_match_to_0[1][1]
            if _coconut_match_set_name_step is not _coconut_sentinel:
                step = _coconut_case_match_to_0[1][2]
        if _coconut_case_match_check_0:
            if step != 1:
                raise ValueError(
                    "the hyperopt backend only supports a randrange step size of 1 (use bb.choice(name, range(start, stop, step)) instead)"
                )


# despite being called randint, hp.randint is exclusive
            return start + hp.randint(name, stop - start)
    if not _coconut_case_match_check_0:
        _coconut_match_set_name_a = _coconut_sentinel
        _coconut_match_set_name_b = _coconut_sentinel
        if (_coconut.isinstance(
                _coconut_case_match_to_0, _coconut.abc.Sequence)) and (
                    _coconut.len(_coconut_case_match_to_0)
                    == 2) and (_coconut_case_match_to_0[0]
                               == "uniform") and (_coconut.isinstance(
                                   _coconut_case_match_to_0[1],
                                   _coconut.abc.Sequence)) and (_coconut.len(
                                       _coconut_case_match_to_0[1]) == 2):
            _coconut_match_set_name_a = _coconut_case_match_to_0[1][0]
            _coconut_match_set_name_b = _coconut_case_match_to_0[1][1]
            _coconut_case_match_check_0 = True
        if _coconut_case_match_check_0:
            if _coconut_match_set_name_a is not _coconut_sentinel:
                a = _coconut_case_match_to_0[1][0]
            if _coconut_match_set_name_b is not _coconut_sentinel:
                b = _coconut_case_match_to_0[1][1]
        if _coconut_case_match_check_0:
            return hp.uniform(name, a, b)
    if not _coconut_case_match_check_0:
        _coconut_match_set_name_mu = _coconut_sentinel
        _coconut_match_set_name_sigma = _coconut_sentinel
        if (_coconut.isinstance(
                _coconut_case_match_to_0, _coconut.abc.Sequence)) and (
                    _coconut.len(_coconut_case_match_to_0)
                    == 2) and (_coconut_case_match_to_0[0]
                               == "normalvariate") and (_coconut.isinstance(
                                   _coconut_case_match_to_0[1],
                                   _coconut.abc.Sequence)) and (_coconut.len(
                                       _coconut_case_match_to_0[1]) == 2):
            _coconut_match_set_name_mu = _coconut_case_match_to_0[1][0]
            _coconut_match_set_name_sigma = _coconut_case_match_to_0[1][1]
            _coconut_case_match_check_0 = True
        if _coconut_case_match_check_0:
            if _coconut_match_set_name_mu is not _coconut_sentinel:
                mu = _coconut_case_match_to_0[1][0]
            if _coconut_match_set_name_sigma is not _coconut_sentinel:
                sigma = _coconut_case_match_to_0[1][1]
        if _coconut_case_match_check_0:
            return hp.normal(name, mu, sigma)
    raise TypeError("invalid parameter {_coconut_format_0}".format(
        _coconut_format_0=(name)))
示例#29
0
            return {
                'status': 'fail',  # or 'fail' if nan loss
                'loss': np.inf
            }

        return {
            'status': 'ok',  # or 'fail' if nan loss
            'loss': -loss,
            'loss_variance': std,
        }

#TODO: Declare your hyperparameter priors here:

    space = {
        **{
            's%d' % (i, ): hp.normal('s%d' % (i, ), -2, 5)
            for i in range(len(cur_std) - 1)
        },
        **{
            's%d' % (len(cur_std) - 1, ):
            hp.normal('s%d' % (len(cur_std) - 1, ), 3, 8)
        }
    }

    trials = Trials()

    best = fmin(run_trial,
                space=space,
                algo=tpe.suggest,
                max_evals=2500,
                trials=trials,
示例#30
0
    Distribution looks like round(exp(uniform(lower, upper)) / q) * q
    """
    assert lower < upper, "Lower bound of uniform greater than upper bound"
    return hp.qloguniform(name, lower, upper, q)


def normal(name, (mu, sigma)):
    """
    Function to create hyperopt normal variable
    Input
    ------------------
    name - Variable name
    (mu, sigma) - Tuple of mean and standard deviation.
    """
    return hp.normal(name, mu, sigma)


def qnormal(name, (mu, sigma, q)):
    """
    Function to create hyperopt qnormal variable
    Input
    ------------------
    name - Variable name
    (mu, sigma, q) - Tuple of mean, standard deviation and q value.

    Distribution looks like round(normal(mu, sigma)/ q)* q
    """
    return hp.normal(name, mu, sigma, q)

    X_train, y_train, X_test, y_test)

# Hyperopt on cnn
space4cnn = {
    # "drop_out_1": hp.normal("drop_out_1", 0.176, 0.10),  # 0.25
    # "drop_out_2": hp.normal("drop_out_2", 0.30, 0.10),   # 0.5
    # "drop_out_3": hp.normal("drop_out_3", 0.143, 0.10)   # 0.5

    # "drop_out_1": hp.normal("drop_out_1", 0.25, 0.10),  # 0.25
    # "drop_out_2": hp.normal("drop_out_2", 0.25, 0.10),   # 0.5
    # "drop_out_3": hp.normal("drop_out_3", 0.5, 0.20)   # 0.5
    # "momentum"  : hp.normal("momentum", 0.9, 0.1),
    # "decay" : hp.normal("decay", 1e-6),
    # "n_filter_1": hp.choice("n_filter_1", [32, 64]),
    # "perspect_size_1" : hp.choice("perspect_size", [3,5,8,16,32])
    "dense": hp.normal("dense", 512, 150),
}

print(
    str(space4cnn.keys()) + ' changed' +
    strftime("%Y-%m-%d %H:%M:%S", gmtime()))


def f(params):
    # do1 = params["drop_out_1"]
    # do2 = params["drop_out_2"]
    # do3 = params["drop_out_3"]
    # if (do1<0):
    #     do1 = 0.12
    # if (do2<0):
    #     do2 = 0.30
            space=hp.uniform('x', -2, 2),
            algo=tpe.suggest,
            max_evals=100)
print(best)
'''
hp.choice(label, options) 其中options应是 python 列表或元组。

hp.normal(label, mu, sigma) 其中mu和sigma分别是均值和标准差。

hp.uniform(label, low, high) 其中low和high是范围的下限和上限。
'''
from hyperopt.pyll.stochastic import sample

space = {
    'x': hp.uniform('x', 0, 1),
    'y': hp.normal('y', 0, 1),
    'name': hp.choice('name', ['alice', 'bob']),
}
#从space中去一个样本
print(sample(space))
'''
通过Trial捕获信息,如果能看到hyperopt黑匣子内发生了什么是极好的。Trials对象使我们能够做到这一点
'''
from hyperopt import fmin, tpe, hp, STATUS_OK, Trials

fspace = {'x': hp.uniform('x', -5, 5)}


def f(params):
    x = params['x']
    val = x**2
示例#33
0
def test_dist_norm():
    p1 = param_space("x", dist=scipy.stats.distributions.norm(2,3))
    p2 = param_space("x", dist=hp.normal("x", 2, 3))
    assert str(p1) == str(p2)
示例#34
0
                        model_vars_map2columns=model_vars_map2columns)

    initial_condition_dict['S'] = contry_pop_dict[country]

    master_fitter = GeneticMasterFitter(
        data=data.get_data_after_category_specification(
            chosen_categories_dict=chosen_categories_dict),
        model_class=model_class,
        initial_condition_dict=initial_condition_dict,
        metric=mse,
        iterations_cma=1000000,
        sigma_cma=1,
        popsize=popsize,
        restarts=restarts)
    coefs = master_fitter.fit_model(init_params=init_params)
    init_params_bayes = OrderedDict([(k, hp.normal(k, np.abs(v),
                                                   np.abs(v / 5)))
                                     for k, v in coefs.items()])

    master_fitter = BayesMasterFitter(
        data=data.get_data_after_category_specification(
            chosen_categories_dict=chosen_categories_dict),
        model_class=model_class,
        initial_condition_dict=initial_condition_dict,
        metric=mse,
        iterations=bayes_iter)
    coefs = master_fitter.fit_model(init_params=init_params_bayes)

    # init_params = coefs#{'b': 0.001, 'd': 0.04, 'm': 0.02}
    #
    # master_fitter = MFitter(
    #     data=data.get_data_after_category_specification(chosen_categories_dict=chosen_categories_dict),
示例#35
0
# Copyright (c) 2009 IW.
# All rights reserved.
#
# Author: liuqingwei <*****@*****.**>
# Date:   2018/7/5
# coding=utf-8
"""test the opt by optimizing a quadratic func.
"""

from hyperopt import hp, fmin, tpe, rand, space_eval

space = [hp.uniform('x', 0, 1), hp.normal('y', 0, 1)]


def f(args):
    x, y = args
    return x**2 + y**2


best = fmin(f, space, algo=tpe.suggest, max_evals=50)

print("random: {}".format(best))
示例#36
0
        def get_hyperopt_space(config):
            if config['model'] == 'simple_lstm':
                return {
                    "allocate|hidden_size":
                    hp.quniform("hidden_size", 32, 700, 2),
                    "allocate|embedding_size":
                    hp.quniform("embedding_size", 32, 700, 2),
                    "allocate|bidirectional":
                    hp.choice("bidirectional", [True, False]),
                    "allocate|num_layers":
                    hp.quniform("num_layers", 1, 5, 1),
                    "allocate|penalize_all_steps":
                    hp.choice("penalize_all_steps", [True, False])
                }
            elif config['model'] == 'awd_rnn':
                return {
                    "allocate|hidden_size":
                    hp.quniform("hidden_size", 32, 1024, 4),
                    "allocate|embedding_size":
                    hp.quniform("embedding_size", 32, 1024, 4),
                    "allocate|num_layers":
                    hp.quniform("num_layers", 1, 6, 1),
                    "allocate|penalize_all_steps":
                    hp.choice("penalize_all_steps", [True, False]),
                    "allocate|dropouto":
                    hp.normal("dropouto", 0.3, 0.2),
                    "allocate|dropouth":
                    hp.normal("dropouth", 0.3, 0.2),
                    "allocate|dropouti":
                    hp.normal("dropouti", 0.3, 0.2),
                    "allocate|dropoute":
                    hp.normal("dropoute", 0.0, 0.13),
                    # "allocate|wdrop": hp.normal("wdrop", 0.0, 0.1),
                    "allocate|ar_alpha":
                    hp.normal("ar_alpha", 2, 3),
                    "allocate|weight_decay":
                    hp.lognormal("weight_decay", -13, 5),
                    "allocate|lr":
                    hp.lognormal('lr', -6, 1),
                    "nested|tokens_config":
                    hp.choice('tokens_config', [{
                        'allocate|tokenizer':
                        'standard_tokenizer',
                        'nested|tokenization_method':
                        hp.choice('tokenization_method', [{
                            'allocate|tokenization':
                            'char'
                        }, {
                            'allocate|tokenization':
                            'word',
                            'allocate|per_class_vocab_size':
                            hp.uniform('per_class_vocab_size', 1000, 10000)
                        }])
                    }, {
                        'allocate|tokenizer':
                        'youtokentome',
                        'allocate|vocab_size':
                        hp.uniform('vocab_size', 50, 50000)
                    }])
                }

            elif config['model'] == 'vdcnn':
                return {
                    "allocate|embedding_size":
                    hp.quniform("embedding_size", 32, 1024, 4),
                    "allocate|dropout":
                    hp.normal("dropout", 0.3, 0.2),
                    "allocate|apply_shortcut":
                    hp.choice("apply_shortcut", [True, False]),
                    "allocate|k":
                    hp.normal("k", 8, 2),
                    "allocate|dense_nlayers":
                    hp.normal("dense_nlayers", 3, 1),
                    "allocate|dense_nfeatures":
                    hp.normal("dense_nfeatures", 2048, 900),
                    "allocate|conv1_nblocks":
                    hp.uniform("conv1_nblocks", 0, 10),
                    "allocate|conv2_nblocks":
                    hp.uniform("conv2_nblocks", 0, 10),
                    "allocate|conv3_nblocks":
                    hp.uniform("conv3_nblocks", 0, 5),
                    "allocate|conv4_nblocks":
                    hp.uniform("conv4_nblocks", 0, 5),
                    "allocate|conv0_nfmaps":
                    hp.normal("conv0_nfmaps", 64, 20),
                    "allocate|conv1_nfmaps":
                    hp.normal("conv1_nfmaps", 64, 20),
                    "allocate|conv2_nfmaps":
                    hp.normal("conv2_nfmaps", 128, 30),
                    "allocate|conv3_nfmaps":
                    hp.normal("conv3_nfmaps", 256, 50),
                    "allocate|conv4_nfmaps":
                    hp.normal("conv4_nfmaps", 512, 100),
                    "allocate|weight_decay":
                    hp.lognormal("weight_decay", -13, 5),
                    "allocate|lr":
                    hp.lognormal('lr', -6, 1),
                    "nested|tokens_config":
                    hp.choice('tokens_config', [{
                        'allocate|tokenizer':
                        'standard_tokenizer',
                        'nested|tokenization_method':
                        hp.choice('tokenization_method', [{
                            'allocate|tokenization':
                            'char'
                        }, {
                            'allocate|tokenization':
                            'word',
                            'allocate|per_class_vocab_size':
                            hp.uniform('per_class_vocab_size', 1000, 10000)
                        }])
                    }, {
                        'allocate|tokenizer':
                        'youtokentome',
                        'allocate|vocab_size':
                        hp.uniform('vocab_size', 50, 50000)
                    }])
                }
            elif config['model'] == 'bert':
                return {
                    "allocate|hidden_dropout":
                    hp.normal("hidden_dropout", 0.0, 0.2),
                    "allocate|att_dropout":
                    hp.normal("att_dropout", 0.0, 0.2),
                    "allocate|hidden_size":
                    hp.quniform("hidden_size", 32, 1024, 4),
                    "allocate|n_bert_layers":
                    hp.uniform("n_bert_layers", 1, 8),
                    "allocate|n_att_heads":
                    hp.uniform("n_att_heads", 1, 8),
                    "allocate|intermediate_dense_size":
                    hp.quniform("intermediate_dense_size", 32, 1024, 4),
                    "allocate|penalize_all_steps":
                    hp.choice("penalize_all_steps", [True, False]),
                    "allocate|weight_decay":
                    hp.lognormal("weight_decay", -13, 5),
                    "allocate|lr":
                    hp.lognormal('lr', -6, 1),
                    "nested|tokens_config":
                    hp.choice('tokens_config', [{
                        'allocate|tokenizer':
                        'standard_tokenizer',
                        'nested|tokenization_method':
                        hp.choice('tokenization_method', [{
                            'allocate|tokenization':
                            'char'
                        }, {
                            'allocate|tokenization':
                            'word',
                            'allocate|per_class_vocab_size':
                            hp.uniform('per_class_vocab_size', 1000, 10000)
                        }])
                    }, {
                        'allocate|tokenizer':
                        'youtokentome',
                        'allocate|vocab_size':
                        hp.uniform('vocab_size', 50, 50000)
                    }])
                }
            else:
                raise NotImplementedError()
示例#37
0
from hyperopt import hp, fmin, rand, tpe, space_eval
from hyperopt.pyll.stochastic import sample


def q(args):
    x, y = args
    return x ** 2 + y ** 2


if __name__ == '__main__':
    space = [hp.uniform('x', 0, 1), hp.normal('y', 0, 1)]
    for i in range(0, 100):
        print sample(space)