Ejemplo n.º 1
0
def sgd(
    name,
    loss=None,  #default - 'hinge'
    penalty=None,  #default - 'l2'
    alpha=None,  #default - 0.0001
    l1_ratio=None,  #default - 0.15, must be within [0, 1]
    fit_intercept=None,  #default - True
    n_iter=None,  #default - 5
    shuffle=None,  #default - False
    random_state=None,  #default - None
    epsilon=None,
    n_jobs=1,  #default - 1 (-1 means all CPUs)
    learning_rate=None,  #default - 'invscaling'
    eta0=None,  #default - 0.01
    power_t=None,  #default - 0.5
    class_weight=None,
    warm_start=False,
    verbose=False,
):
    def _name(msg):
        return '%s.%s_%s' % (name, 'sgd', msg)

    rval = scope.sklearn_SGDClassifier(
        loss=hp.pchoice(_name('loss'), [(0.25, 'hinge'), (0.25, 'log'),
                                        (0.25, 'modified_huber'),
                                        (0.05, 'squared_hinge'),
                                        (0.05, 'perceptron'),
                                        (0.05, 'squared_loss'),
                                        (0.05, 'huber'),
                                        (0.03, 'epsilon_insensitive'),
                                        (0.02, 'squared_epsilon_insensitive')])
        if loss is None else loss,
        penalty=hp.pchoice(_name('penalty'), [(0.40, 'l2'), (0.35, 'l1'),
                                              (0.25, 'elasticnet')])
        if penalty is None else penalty,
        alpha=hp.loguniform(_name('alpha'), np.log(1e-7), np.log(1))
        if alpha is None else alpha,
        l1_ratio=hp.uniform(_name('l1_ratio'), 0, 1)
        if l1_ratio is None else l1_ratio,
        fit_intercept=hp.pchoice(_name('fit_intercept'), [(0.8, True),
                                                          (0.2, False)])
        if fit_intercept is None else fit_intercept,
        learning_rate='invscaling' if learning_rate is None else learning_rate,
        eta0=hp.loguniform(_name('eta0'), np.log(1e-5), np.log(1e-1))
        if eta0 is None else eta0,
        power_t=hp.uniform(_name('power_t'), 0, 1)
        if power_t is None else power_t,
        n_jobs=n_jobs,
        verbose=verbose,
    )
    return rval
Ejemplo n.º 2
0
def sgd(name,
        loss=None,  # default - 'hinge'
        penalty=None,  # default - 'l2'
        alpha=None,  # default - 0.0001
        l1_ratio=None,  # default - 0.15, must be within [0, 1]
        fit_intercept=True,  # default - True
        n_iter=5,  # default - 5
        shuffle=True,  # default - True
        random_state=None,  # default - None
        epsilon=None,
        n_jobs=1,  # default - 1 (-1 means all CPUs)
        learning_rate=None,  # default - 'optimal'
        eta0=None,  # default - 0.0
        power_t=None,  # default - 0.5
        class_weight='choose',
        warm_start=False,
        verbose=False):

    def _name(msg):
        return '%s.%s_%s' % (name, 'sgdc', msg)

    rval = scope.sklearn_SGDClassifier(
        loss=hp.pchoice(_name('loss'), [
            (0.25, 'hinge'),
            (0.25, 'log'),
            (0.25, 'modified_huber'),
            (0.05, 'squared_hinge'),
            (0.05, 'perceptron'),
            (0.05, 'squared_loss'),
            (0.05, 'huber'),
            (0.03, 'epsilon_insensitive'),
            (0.02, 'squared_epsilon_insensitive')
        ]) if loss is None else loss,
        penalty=_sgd_penalty(_name('penalty')) if penalty is None else penalty,
        alpha=_sgd_alpha(_name('alpha')) if alpha is None else alpha,
        l1_ratio=(_sgd_l1_ratio(_name('l1ratio'))
                  if l1_ratio is None else l1_ratio),
        fit_intercept=fit_intercept,
        n_iter=n_iter,
        learning_rate=(_sgdc_learning_rate(_name('learning_rate'))
                       if learning_rate is None else learning_rate),
        eta0=_sgd_eta0(_name('eta0')) if eta0 is None else eta0,
        power_t=_sgd_power_t(_name('power_t')) if power_t is None else power_t,
        class_weight=(_class_weight(_name('clsweight'))
                      if class_weight == 'choose' else class_weight),
        n_jobs=n_jobs,
        verbose=verbose,
        random_state=_random_state(_name('rstate'), random_state),
    )
    return rval
Ejemplo n.º 3
0
def sgd(name,
        loss=None,  # default - 'hinge'
        penalty=None,  # default - 'l2'
        alpha=None,  # default - 0.0001
        l1_ratio=None,  # default - 0.15, must be within [0, 1]
        fit_intercept=True,  # default - True
        n_iter=5,  # default - 5
        shuffle=True,  # default - True
        random_state=None,  # default - None
        epsilon=None,
        n_jobs=1,  # default - 1 (-1 means all CPUs)
        learning_rate=None,  # default - 'optimal'
        eta0=None,  # default - 0.0
        power_t=None,  # default - 0.5
        class_weight='choose',
        warm_start=False,
        verbose=False):

    def _name(msg):
        return '%s.%s_%s' % (name, 'sgdc', msg)

    rval = scope.sklearn_SGDClassifier(
        loss=hp.pchoice(_name('loss'), [
            (0.25, 'hinge'),
            (0.25, 'log'),
            (0.25, 'modified_huber'),
            (0.05, 'squared_hinge'),
            (0.05, 'perceptron'),
            (0.05, 'squared_loss'),
            (0.05, 'huber'),
            (0.03, 'epsilon_insensitive'),
            (0.02, 'squared_epsilon_insensitive')
        ]) if loss is None else loss,
        penalty=_sgd_penalty(_name('penalty')) if penalty is None else penalty,
        alpha=_sgd_alpha(_name('alpha')) if alpha is None else alpha,
        l1_ratio=(_sgd_l1_ratio(_name('l1ratio'))
                  if l1_ratio is None else l1_ratio),
        fit_intercept=fit_intercept,
        n_iter=n_iter,
        learning_rate=(_sgdc_learning_rate(_name('learning_rate'))
                       if learning_rate is None else learning_rate),
        eta0=_sgd_eta0(_name('eta0')) if eta0 is None else eta0,
        power_t=_sgd_power_t(_name('power_t')) if power_t is None else power_t,
        class_weight=(_class_weight(_name('clsweight'))
                      if class_weight == 'choose' else class_weight),
        n_jobs=n_jobs,
        verbose=verbose,
        random_state=_random_state(_name('rstate'), random_state),
    )
    return rval
Ejemplo n.º 4
0
def sgd(name,
    loss=None,            #default - 'hinge'
    penalty=None,         #default - 'l2'
    alpha=None,           #default - 0.0001
    l1_ratio=None,        #default - 0.15, must be within [0, 1]
    fit_intercept=None,   #default - True
    n_iter=None,          #default - 5
    shuffle=None,         #default - False
    random_state=None,    #default - None
    epsilon=None,
    n_jobs=1,             #default - 1 (-1 means all CPUs)
    learning_rate=None,   #default - 'invscaling'
    eta0=None,            #default - 0.01
    power_t=None,         #default - 0.5
    class_weight=None,
    warm_start=False,
    verbose=False,
    ):

    def _name(msg):
      return '%s.%s_%s' % (name, 'sgd', msg)
    
    rval = scope.sklearn_SGDClassifier(
        loss=hp.pchoice(
            _name('loss'),
            [ (0.25, 'hinge'), 
              (0.25, 'log'), 
              (0.25, 'modified_huber'), 
              (0.05, 'squared_hinge'), 
              (0.05, 'perceptron'),
              (0.05, 'squared_loss'), 
              (0.05, 'huber'), 
              (0.03, 'epsilon_insensitive'),
              (0.02, 'squared_epsilon_insensitive') ] ) if loss is None else loss,
        penalty=hp.pchoice(
            _name('penalty'),
            [ (0.40, 'l2'), 
              (0.35, 'l1'),
              (0.25, 'elasticnet') ] ) if penalty is None else penalty,
        alpha=hp.loguniform(
            _name('alpha'),
            np.log(1e-7),
            np.log(1)) if alpha is None else alpha,
        l1_ratio=hp.uniform(
            _name('l1_ratio'),
            0, 1 ) if l1_ratio is None else l1_ratio,
        fit_intercept=hp.pchoice(
            _name('fit_intercept'),
            [ (0.8, True), (0.2, False) ]) if fit_intercept is None else fit_intercept,
        learning_rate='invscaling' if learning_rate is None else learning_rate,
        eta0=hp.loguniform(
            _name('eta0'),
            np.log(1e-5),
            np.log(1e-1)) if eta0 is None else eta0,
        power_t=hp.uniform(
            _name('power_t'),
            0, 1) if power_t is None else power_t,
        n_jobs=n_jobs,
        verbose=verbose,
        )
    return rval