Ejemplo n.º 1
0
def test_fused():
    """
    Test that it can solve a problem with fused lasso regularization
    using only the L1-prox and an appropriate L linear operator.
    """
    for alpha in np.logspace(-3, 3, 3):

        def logloss(x):
            return logistic._logistic_loss(x, X, y, 0.)

        def fprime_logloss(x):
            return logistic._logistic_loss_and_grad(x, X, y, 0.)[1]

        def l1_prox(x, step_size):
            return np.fmax(x - step_size, 0) - np.fmax(-x - step_size, 0)

        L = sparse.diags([1, -1], [0, 1], shape=(n_features - 1, n_features))
        # solve the problem using the fused lasso proximal operator
        # (only for reference)
        opt_proximal = proximal_gradient(
            logloss, fprime_logloss, prox_tv1d, np.zeros(n_features),
            tol=1e-24, max_iter=10000, alpha=alpha)

        opt_primal_dual = primal_dual(
            logloss, fprime_logloss, None, l1_prox, L, np.zeros(n_features),
            beta=alpha, verbose=True, step_size_y=1)
        assert opt_primal_dual.success
        np.testing.assert_allclose(
            opt_proximal.x, opt_primal_dual.x, atol=1e-1)
Ejemplo n.º 2
0
def test_lasso():
    for alpha in np.logspace(-3, 3, 3):

        def logloss(x):
            return logistic._logistic_loss(x, X, y, 0.)

        def fprime_logloss(x):
            return logistic._logistic_loss_and_grad(x, X, y, 0.)[1]

        def l1_prox(x, step_size):
            return np.fmax(x - step_size, 0) - np.fmax(-x - step_size, 0)

        L = np.eye(n_features)
        opt_proximal = proximal_gradient(
            logloss, fprime_logloss, l1_prox, np.zeros(n_features),
            tol=1e-24, alpha=alpha)
        opt_primal_dual = primal_dual(
            logloss, fprime_logloss, l1_prox, None, L, np.zeros(n_features),
            alpha=alpha)
        assert opt_primal_dual.success
        np.testing.assert_allclose(
            opt_proximal.x, opt_primal_dual.x, rtol=1e-1)

        # same thing but using the other operator
        opt_primal_dual2 = primal_dual(
            logloss, fprime_logloss, None, l1_prox, L, np.zeros(n_features),
            beta=alpha)
        np.testing.assert_allclose(
            opt_proximal.x, opt_primal_dual2.x, atol=1e-3)
Ejemplo n.º 3
0
def test_optimize():

    def logloss(x):
        return logistic._logistic_loss(x, X, y, 1.)

    def fprime_logloss(x):
        return logistic._logistic_loss_and_grad(x, X, y, 1.)[1]


    # check that it rases exception when max_iter_backtracking
    # is negative
    tools.assert_raises(ValueError,
        proximal_gradient, logloss, fprime_logloss, None,
        np.zeros(n_features), max_iter_backtracking=-1)

    opt = proximal_gradient(
        logloss, fprime_logloss, None, np.zeros(n_features),
        tol=1e-3)
    assert opt.success
    sol_scipy = optimize.fmin_l_bfgs_b(
        logloss, np.zeros(n_features), fprime=fprime_logloss)[0]
    np.testing.assert_allclose(sol_scipy, opt.x, rtol=1e-1)
Ejemplo n.º 4
0
def test_sklearn():
    for alpha in np.logspace(-3, 3, 3):

        def logloss(x):
            return logistic._logistic_loss(x, X, y, 0.)

        def fprime_logloss(x):
            return logistic._logistic_loss_and_grad(x, X, y, 0.)[1]

        def g_prox(x, step_size):
            """
            L1 proximal operator
            """
            return np.fmax(x - step_size * alpha, 0) - \
                np.fmax(- x - step_size * alpha, 0)

        clf = logistic.LogisticRegression(
            penalty='l1', fit_intercept=False, C=1 / alpha)
        clf.fit(X, y)
        opt = proximal_gradient(
            logloss, fprime_logloss, g_prox, np.zeros(n_features),
            tol=1e-3)
        assert opt.success
        np.testing.assert_allclose(clf.coef_.ravel(), opt.x, rtol=1e-1)
Ejemplo n.º 5
0
f, ax = plt.subplots(2, 3, sharey=False)
all_alphas = [1e-6, 1e-3, 1e-1]
xlim = [0.02, 0.02, 0.1]
for i, alpha in enumerate(all_alphas):

    max_iter = 5000
    trace_three = Trace(lambda x: obj_fun(x) + alpha * TV(x))
    out_tos = three_split(
        obj_fun, grad, prox_tv1d_rows, prox_tv1d_cols, np.zeros(n_features),
        alpha=alpha, beta=alpha, g_prox_args=(n_rows, n_cols), h_prox_args=(n_rows, n_cols),
        callback=trace_three, max_iter=max_iter, tol=1e-16)

    trace_gd = Trace(lambda x: obj_fun(x) + alpha * TV(x))
    out_gd = proximal_gradient(
        obj_fun, grad, prox_tv2d, np.zeros(n_features),
        alpha=alpha, g_prox_args=(n_rows, n_cols, 1000, 1e-1),
        max_iter=max_iter, callback=trace_gd)

    ax[0, i].set_title(r'$\lambda=%s$' % alpha)
    ax[0, i].imshow(out_tos.x.reshape((n_rows, n_cols)),
                    interpolation='nearest', cmap=plt.cm.Blues)
    ax[0, i].set_xticks(())
    ax[0, i].set_yticks(())

    fmin = min(np.min(trace_three.values), np.min(trace_gd.values))
    scale = (np.array(trace_three.values) - fmin)[0]
    prox_split, = ax[1, i].plot(
        np.array(trace_three.times), (np.array(trace_three.values) - fmin) / scale,
        lw=4, marker='o', markevery=10,
        markersize=10, color=colors[0])
    prox_gd, = ax[1, i].plot(
Ejemplo n.º 6
0
==================================

Implementation of L1-regularized logistic regression
using copt.
"""
import numpy as np
from sklearn.linear_model import logistic
from copt import proximal_gradient

n_samples, n_features = 100, 10
X = np.random.randn(n_samples, n_features)
y = np.random.randn(n_samples)
alpha = 1.


def logloss(x):
    return logistic._logistic_loss(x, X, y, 1.)


def fprime_logloss(x):
    return logistic._logistic_loss_and_grad(x, X, y, 1.)[1]


def L1_prox(x, step_size):
    return np.fmax(x - step_size * alpha, 0) - \
        np.fmax(- x - step_size * alpha, 0)


out = proximal_gradient(logloss, fprime_logloss, L1_prox, np.zeros(n_features))
print('Solution', out)