Ejemplo n.º 1
0
def test_multitarget():
    # Assure that estimators receiving multidimensional y do the right thing
    Y = np.vstack([y, y**2]).T
    n_targets = Y.shape[1]
    estimators = [
        linear_model.LassoLars(),
        linear_model.Lars(),
        # regression test for gh-1615
        linear_model.LassoLars(fit_intercept=False),
        linear_model.Lars(fit_intercept=False),
    ]

    for estimator in estimators:
        estimator.fit(X, Y)
        Y_pred = estimator.predict(X)
        alphas, active, coef, path = (estimator.alphas_, estimator.active_,
                                      estimator.coef_, estimator.coef_path_)
        for k in range(n_targets):
            estimator.fit(X, Y[:, k])
            y_pred = estimator.predict(X)
            assert_array_almost_equal(alphas[k], estimator.alphas_)
            assert_array_almost_equal(active[k], estimator.active_)
            assert_array_almost_equal(coef[k], estimator.coef_)
            assert_array_almost_equal(path[k], estimator.coef_path_)
            assert_array_almost_equal(Y_pred[:, k], y_pred)
Ejemplo n.º 2
0
def test_lasso_lars_path_length():
    # Test that the path length of the LassoLars is right
    lasso = linear_model.LassoLars()
    lasso.fit(X, y)
    lasso2 = linear_model.LassoLars(alpha=lasso.alphas_[2])
    lasso2.fit(X, y)
    assert_array_almost_equal(lasso.alphas_[:3], lasso2.alphas_)
    # Also check that the sequence of alphas is always decreasing
    assert np.all(np.diff(lasso.alphas_) < 0)
Ejemplo n.º 3
0
def test_lasso_lars_vs_lasso_cd_ill_conditioned2():
    # Create an ill-conditioned situation in which the LARS has to go
    # far in the path to converge, and check that LARS and coordinate
    # descent give the same answers
    # Note it used to be the case that Lars had to use the drop for good
    # strategy for this but this is no longer the case with the
    # equality_tolerance checks
    X = [[1e20, 1e20, 0], [-1e-32, 0, 0], [1, 1, 1]]
    y = [10, 10, 1]
    alpha = .0001

    def objective_function(coef):
        return (1. / (2. * len(X)) * linalg.norm(y - np.dot(X, coef))**2 +
                alpha * linalg.norm(coef, 1))

    lars = linear_model.LassoLars(alpha=alpha, normalize=False)
    assert_warns(ConvergenceWarning, lars.fit, X, y)
    lars_coef_ = lars.coef_
    lars_obj = objective_function(lars_coef_)

    coord_descent = linear_model.Lasso(alpha=alpha, tol=1e-4, normalize=False)
    cd_coef_ = coord_descent.fit(X, y).coef_
    cd_obj = objective_function(cd_coef_)

    assert lars_obj < cd_obj * (1. + 1e-8)
Ejemplo n.º 4
0
def test_lars_lstsq():
    # Test that Lars gives least square solution at the end
    # of the path
    X1 = 3 * X  # use un-normalized dataset
    clf = linear_model.LassoLars(alpha=0.)
    clf.fit(X1, y)
    # Avoid FutureWarning about default value change when numpy >= 1.14
    rcond = None if LooseVersion(np.__version__) >= '1.14' else -1
    coef_lstsq = np.linalg.lstsq(X1, y, rcond=rcond)[0]
    assert_array_almost_equal(clf.coef_, coef_lstsq)
Ejemplo n.º 5
0
def test_rank_deficient_design():
    # consistency test that checks that LARS Lasso is handling rank
    # deficient input data (with n_features < rank) in the same way
    # as coordinate descent Lasso
    y = [5, 0, 5]
    for X in ([[5, 0], [0, 5], [10, 10]], [[10, 10, 0], [1e-32, 0, 0],
                                           [0, 0, 1]]):
        # To be able to use the coefs to compute the objective function,
        # we need to turn off normalization
        lars = linear_model.LassoLars(.1, normalize=False)
        coef_lars_ = lars.fit(X, y).coef_
        obj_lars = (1. /
                    (2. * 3.) * linalg.norm(y - np.dot(X, coef_lars_))**2 +
                    .1 * linalg.norm(coef_lars_, 1))
        coord_descent = linear_model.Lasso(.1, tol=1e-6, normalize=False)
        coef_cd_ = coord_descent.fit(X, y).coef_
        obj_cd = ((1. / (2. * 3.)) * linalg.norm(y - np.dot(X, coef_cd_))**2 +
                  .1 * linalg.norm(coef_cd_, 1))
        assert obj_lars < obj_cd * (1. + 1e-8)
Ejemplo n.º 6
0
def test_lasso_lars_vs_lasso_cd():
    # Test that LassoLars and Lasso using coordinate descent give the
    # same results.
    X = 3 * diabetes.data

    alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
    lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
    for c, a in zip(lasso_path.T, alphas):
        if a == 0:
            continue
        lasso_cd.alpha = a
        lasso_cd.fit(X, y)
        error = linalg.norm(c - lasso_cd.coef_)
        assert error < 0.01

    # similar test, with the classifiers
    for alpha in np.linspace(1e-2, 1 - 1e-2, 20):
        clf1 = linear_model.LassoLars(alpha=alpha, normalize=False).fit(X, y)
        clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8,
                                  normalize=False).fit(X, y)
        err = linalg.norm(clf1.coef_ - clf2.coef_)
        assert err < 1e-3

    # same test, with normalized data
    X = diabetes.data
    alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
    lasso_cd = linear_model.Lasso(fit_intercept=False,
                                  normalize=True,
                                  tol=1e-8)
    for c, a in zip(lasso_path.T, alphas):
        if a == 0:
            continue
        lasso_cd.alpha = a
        lasso_cd.fit(X, y)
        error = linalg.norm(c - lasso_cd.coef_)
        assert error < 0.01
Ejemplo n.º 7
0
def test_lasso_lars_vs_R_implementation():
    # Test that sklearn_lib LassoLars implementation agrees with the LassoLars
    # implementation available in R (lars library) under the following
    # scenarios:
    # 1) fit_intercept=False and normalize=False
    # 2) fit_intercept=True and normalize=True

    # Let's generate the data used in the bug report 7778
    y = np.array(
        [-6.45006793, -3.51251449, -8.52445396, 6.12277822, -19.42109366])
    x = np.array(
        [[0.47299829, 0, 0, 0, 0], [0.08239882, 0.85784863, 0, 0, 0],
         [0.30114139, -0.07501577, 0.80895216, 0, 0],
         [-0.01460346, -0.1015233, 0.0407278, 0.80338378, 0],
         [-0.69363927, 0.06754067, 0.18064514, -0.0803561, 0.40427291]])

    X = x.T

    ###########################################################################
    # Scenario 1: Let's compare R vs sklearn_lib when fit_intercept=False and
    # normalize=False
    ###########################################################################
    #
    # The R result was obtained using the following code:
    #
    # library(lars)
    # model_lasso_lars = lars(X, t(y), type="lasso", intercept=FALSE,
    #                         trace=TRUE, normalize=FALSE)
    # r = t(model_lasso_lars$beta)
    #

    r = np.array([[
        0, 0, 0, 0, 0, -79.810362809499026, -83.528788732782829,
        -83.777653739190711, -83.784156932888934, -84.033390591756657
    ], [0, 0, 0, 0, -0.476624256777266, 0, 0, 0, 0, 0.025219751009936],
                  [
                      0, -3.577397088285891, -4.702795355871871,
                      -7.016748621359461, -7.614898471899412,
                      -0.336938391359179, 0, 0, 0.001213370600853,
                      0.048162321585148
                  ],
                  [
                      0, 0, 0, 2.231558436628169, 2.723267514525966,
                      2.811549786389614, 2.813766976061531, 2.817462468949557,
                      2.817368178703816, 2.816221090636795
                  ],
                  [
                      0, 0, -1.218422599914637, -3.457726183014808,
                      -4.021304522060710, -45.827461592423745,
                      -47.776608869312305, -47.911561610746404,
                      -47.914845922736234, -48.039562334265717
                  ]])

    model_lasso_lars = linear_model.LassoLars(alpha=0,
                                              fit_intercept=False,
                                              normalize=False)
    model_lasso_lars.fit(X, y)
    skl_betas = model_lasso_lars.coef_path_

    assert_array_almost_equal(r, skl_betas, decimal=12)
    ###########################################################################

    ###########################################################################
    # Scenario 2: Let's compare R vs sklearn_lib when fit_intercept=True and
    # normalize=True
    #
    # Note: When normalize is equal to True, R returns the coefficients in
    # their original units, that is, they are rescaled back, whereas sklearn_lib
    # does not do that, therefore, we need to do this step before comparing
    # their results.
    ###########################################################################
    #
    # The R result was obtained using the following code:
    #
    # library(lars)
    # model_lasso_lars2 = lars(X, t(y), type="lasso", intercept=TRUE,
    #                           trace=TRUE, normalize=TRUE)
    # r2 = t(model_lasso_lars2$beta)

    r2 = np.array(
        [[0, 0, 0, 0, 0], [0, 0, 0, 8.371887668009453, 19.463768371044026],
         [0, 0, 0, 0, 9.901611055290553],
         [
             0, 7.495923132833733, 9.245133544334507, 17.389369207545062,
             26.971656815643499
         ], [0, 0, -1.569380717440311, -5.924804108067312,
             -7.996385265061972]])

    model_lasso_lars2 = linear_model.LassoLars(alpha=0, normalize=True)
    model_lasso_lars2.fit(X, y)
    skl_betas2 = model_lasso_lars2.coef_path_

    # Let's rescale back the coefficients returned by sklearn_lib before comparing
    # against the R result (read the note above)
    temp = X - np.mean(X, axis=0)
    normx = np.sqrt(np.sum(temp**2, axis=0))
    skl_betas2 /= normx[:, np.newaxis]

    assert_array_almost_equal(r2, skl_betas2, decimal=12)
Ejemplo n.º 8
0
def test_lasso_lars_vs_lasso_cd_positive():
    # Test that LassoLars and Lasso using coordinate descent give the
    # same results when using the positive option

    # This test is basically a copy of the above with additional positive
    # option. However for the middle part, the comparison of coefficient values
    # for a range of alphas, we had to make an adaptations. See below.

    # not normalized data
    X = 3 * diabetes.data

    alphas, _, lasso_path = linear_model.lars_path(X,
                                                   y,
                                                   method='lasso',
                                                   positive=True)
    lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8, positive=True)
    for c, a in zip(lasso_path.T, alphas):
        if a == 0:
            continue
        lasso_cd.alpha = a
        lasso_cd.fit(X, y)
        error = linalg.norm(c - lasso_cd.coef_)
        assert error < 0.01

    # The range of alphas chosen for coefficient comparison here is restricted
    # as compared with the above test without the positive option. This is due
    # to the circumstance that the Lars-Lasso algorithm does not converge to
    # the least-squares-solution for small alphas, see 'Least Angle Regression'
    # by Efron et al 2004. The coefficients are typically in congruence up to
    # the smallest alpha reached by the Lars-Lasso algorithm and start to
    # diverge thereafter.  See
    # https://gist.github.com/michigraber/7e7d7c75eca694c7a6ff

    for alpha in np.linspace(6e-1, 1 - 1e-2, 20):
        clf1 = linear_model.LassoLars(fit_intercept=False,
                                      alpha=alpha,
                                      normalize=False,
                                      positive=True).fit(X, y)
        clf2 = linear_model.Lasso(fit_intercept=False,
                                  alpha=alpha,
                                  tol=1e-8,
                                  normalize=False,
                                  positive=True).fit(X, y)
        err = linalg.norm(clf1.coef_ - clf2.coef_)
        assert err < 1e-3

    # normalized data
    X = diabetes.data
    alphas, _, lasso_path = linear_model.lars_path(X,
                                                   y,
                                                   method='lasso',
                                                   positive=True)
    lasso_cd = linear_model.Lasso(fit_intercept=False,
                                  normalize=True,
                                  tol=1e-8,
                                  positive=True)
    for c, a in zip(lasso_path.T[:-1], alphas[:-1]):  # don't include alpha=0
        lasso_cd.alpha = a
        lasso_cd.fit(X, y)
        error = linalg.norm(c - lasso_cd.coef_)
        assert error < 0.01