예제 #1
0
def fit_single(
    solver,
    X,
    y,
    penalty="l2",
    single_target=True,
    C=1,
    max_iter=10,
    skip_slow=False,
    dtype=np.float64,
):
    if skip_slow and solver == "lightning" and penalty == "l1":
        print("skip_slowping l1 logistic regression with solver lightning.")
        return

    print("Solving %s logistic regression with penalty %s, solver %s." %
          ("binary" if single_target else "multinomial", penalty, solver))

    if solver == "lightning":
        from lightning.classification import SAGAClassifier

    if single_target or solver not in ["sag", "saga"]:
        multi_class = "ovr"
    else:
        multi_class = "multinomial"
    X = X.astype(dtype)
    y = y.astype(dtype)
    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        random_state=42,
                                                        stratify=y)
    n_samples = X_train.shape[0]
    n_classes = np.unique(y_train).shape[0]
    test_scores = [1]
    train_scores = [1]
    accuracies = [1 / n_classes]
    times = [0]

    if penalty == "l2":
        alpha = 1.0 / (C * n_samples)
        beta = 0
        lightning_penalty = None
    else:
        alpha = 0.0
        beta = 1.0 / (C * n_samples)
        lightning_penalty = "l1"

    for this_max_iter in range(1, max_iter + 1, 2):
        print("[%s, %s, %s] Max iter: %s" % (
            "binary" if single_target else "multinomial",
            penalty,
            solver,
            this_max_iter,
        ))
        if solver == "lightning":
            lr = SAGAClassifier(
                loss="log",
                alpha=alpha,
                beta=beta,
                penalty=lightning_penalty,
                tol=-1,
                max_iter=this_max_iter,
            )
        else:
            lr = LogisticRegression(
                solver=solver,
                multi_class=multi_class,
                C=C,
                penalty=penalty,
                fit_intercept=False,
                tol=0,
                max_iter=this_max_iter,
                random_state=42,
            )

        # Makes cpu cache even for all fit calls
        X_train.max()
        t0 = time.clock()

        lr.fit(X_train, y_train)
        train_time = time.clock() - t0

        scores = []
        for (X, y) in [(X_train, y_train), (X_test, y_test)]:
            try:
                y_pred = lr.predict_proba(X)
            except NotImplementedError:
                # Lightning predict_proba is not implemented for n_classes > 2
                y_pred = _predict_proba(lr, X)
            score = log_loss(y, y_pred, normalize=False) / n_samples
            score += 0.5 * alpha * np.sum(lr.coef_**2) + beta * np.sum(
                np.abs(lr.coef_))
            scores.append(score)
        train_score, test_score = tuple(scores)

        y_pred = lr.predict(X_test)
        accuracy = np.sum(y_pred == y_test) / y_test.shape[0]
        test_scores.append(test_score)
        train_scores.append(train_score)
        accuracies.append(accuracy)
        times.append(train_time)
    return lr, times, train_scores, test_scores, accuracies
예제 #2
0
def fit_single(solver, X, y, penalty='l2', single_target=True, C=1,
               max_iter=10, skip_slow=False):
    if skip_slow and solver == 'lightning' and penalty == 'l1':
        print('skip_slowping l1 logistic regression with solver lightning.')
        return

    print('Solving %s logistic regression with penalty %s, solver %s.'
          % ('binary' if single_target else 'multinomial',
             penalty, solver))

    if solver == 'lightning':
        from lightning.classification import SAGAClassifier

    if single_target or solver not in ['sag', 'saga']:
        multi_class = 'ovr'
    else:
        multi_class = 'multinomial'

    X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42,
                                                        stratify=y)
    n_samples = X_train.shape[0]
    n_classes = np.unique(y_train).shape[0]
    test_scores = [1]
    train_scores = [1]
    accuracies = [1 / n_classes]
    times = [0]

    if penalty == 'l2':
        alpha = 1. / (C * n_samples)
        beta = 0
        lightning_penalty = None
    else:
        alpha = 0.
        beta = 1. / (C * n_samples)
        lightning_penalty = 'l1'

    for this_max_iter in range(1, max_iter + 1, 2):
        print('[%s, %s, %s] Max iter: %s' %
              ('binary' if single_target else 'multinomial',
               penalty, solver, this_max_iter))
        if solver == 'lightning':
            lr = SAGAClassifier(loss='log', alpha=alpha, beta=beta,
                                penalty=lightning_penalty,
                                tol=-1, max_iter=this_max_iter)
        else:
            lr = LogisticRegression(solver=solver,
                                    multi_class=multi_class,
                                    C=C,
                                    penalty=penalty,
                                    fit_intercept=False, tol=1e-24,
                                    max_iter=this_max_iter,
                                    random_state=42,
                                    )
        t0 = time.clock()
        lr.fit(X_train, y_train)
        train_time = time.clock() - t0

        scores = []
        for (X, y) in [(X_train, y_train), (X_test, y_test)]:
            try:
                y_pred = lr.predict_proba(X)
            except NotImplementedError:
                # Lightning predict_proba is not implemented for n_classes > 2
                y_pred = _predict_proba(lr, X)
            score = log_loss(y, y_pred, normalize=False) / n_samples
            score += (0.5 * alpha * np.sum(lr.coef_ ** 2) +
                      beta * np.sum(np.abs(lr.coef_)))
            scores.append(score)
        train_score, test_score = tuple(scores)

        y_pred = lr.predict(X_test)
        accuracy = np.sum(y_pred == y_test) / y_test.shape[0]
        test_scores.append(test_score)
        train_scores.append(train_score)
        accuracies.append(accuracy)
        times.append(train_time)
    return lr, times, train_scores, test_scores, accuracies
예제 #3
0
def saga_cv(which, alphas, l1_ratio):

    if which == 'cdcp':
        n_folds = 3
        path = os.path.join("data", "process", "erule", "folds", "{}", "{}")
    elif which == 'ukp':
        n_folds = 5
        path = os.path.join("data", "process", "ukp-essays", "folds", "{}",
                            "{}")
    else:
        raise ValueError

    clf_link = SAGAClassifier(loss='smooth_hinge',
                              penalty='l1',
                              tol=1e-4,
                              max_iter=100,
                              random_state=0,
                              verbose=0)
    clf_prop = clone(clf_link)

    link_scores = np.zeros((n_folds, len(alphas)))
    prop_scores = np.zeros_like(link_scores)

    for k in range(n_folds):
        X_tr_link, y_tr_link = load_csr(path.format(k, 'train.npz'),
                                        return_y=True)
        X_te_link, y_te_link = load_csr(path.format(k, 'val.npz'),
                                        return_y=True)

        X_tr_prop, y_tr_prop = load_csr(path.format(k, 'prop-train.npz'),
                                        return_y=True)
        X_te_prop, y_te_prop = load_csr(path.format(k, 'prop-val.npz'),
                                        return_y=True)

        le = LabelEncoder()
        y_tr_prop_enc = le.fit_transform(y_tr_prop)
        y_te_prop_enc = le.transform(y_te_prop)

        link_sw = compute_sample_weight('balanced', y_tr_link)

        for j, alpha in enumerate(alphas):

            beta = alpha * l1_ratio
            alpha *= 1 - l1_ratio
            clf_link.set_params(alpha=alpha, beta=beta)
            clf_prop.set_params(alpha=alpha, beta=beta)

            clf_link.fit(X_tr_link, y_tr_link, sample_weight=link_sw)
            y_pred_link = clf_link.predict(X_te_link)

            clf_prop.fit(X_tr_prop, y_tr_prop_enc)
            y_pred_prop = clf_prop.predict(X_te_prop)

            with warnings.catch_warnings() as w:
                warnings.simplefilter('ignore')
                link_f = f1_score(y_te_link, y_pred_link, average='binary')
                prop_f = f1_score(y_te_prop_enc, y_pred_prop, average='macro')

            link_scores[k, j] = link_f
            prop_scores[k, j] = prop_f

    return link_scores, prop_scores