def test_enet_path():
    # We use a large number of samples and of informative features so that
    # the l1_ratio selected is more toward ridge than lasso
    X, y, X_test, y_test = build_dataset(n_samples=200, n_features=100,
                                         n_informative_features=100)
    max_iter = 150

    # Here we have a small number of iterations, and thus the
    # ElasticNet might not converge. This is to speed up tests
    clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
                       l1_ratio=[0.5, 0.7], cv=3,
                       max_iter=max_iter)
    ignore_warnings(clf.fit)(X, y)
    # Well-conditioned settings, we should have selected our
    # smallest penalty
    assert_almost_equal(clf.alpha_, min(clf.alphas_))
    # Non-sparse ground truth: we should have selected an elastic-net
    # that is closer to ridge than to lasso
    assert_equal(clf.l1_ratio_, min(clf.l1_ratio))

    clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
                       l1_ratio=[0.5, 0.7], cv=3,
                       max_iter=max_iter, precompute=True)
    ignore_warnings(clf.fit)(X, y)

    # Well-conditioned settings, we should have selected our
    # smallest penalty
    assert_almost_equal(clf.alpha_, min(clf.alphas_))
    # Non-sparse ground truth: we should have selected an elastic-net
    # that is closer to ridge than to lasso
    assert_equal(clf.l1_ratio_, min(clf.l1_ratio))

    # We are in well-conditioned settings with low noise: we should
    # have a good test-set performance
    assert_greater(clf.score(X_test, y_test), 0.99)

    # Multi-output/target case
    X, y, X_test, y_test = build_dataset(n_features=10, n_targets=3)
    clf = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7],
                                cv=3, max_iter=max_iter)
    ignore_warnings(clf.fit)(X, y)
    # We are in well-conditioned settings with low noise: we should
    # have a good test-set performance
    assert_greater(clf.score(X_test, y_test), 0.99)
    assert_equal(clf.coef_.shape, (3, 10))

    # Mono-output should have same cross-validated alpha_ and l1_ratio_
    # in both cases.
    X, y, _, _ = build_dataset(n_features=10)
    clf1 = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
    clf1.fit(X, y)
    clf2 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
    clf2.fit(X, y[:, np.newaxis])
    assert_almost_equal(clf1.l1_ratio_, clf2.l1_ratio_)
    assert_almost_equal(clf1.alpha_, clf2.alpha_)
def test_enet_path():
    # We use a large number of samples and of informative features so that
    # the l1_ratio selected is more toward ridge than lasso
    X, y, X_test, y_test = build_dataset(n_samples=200, n_features=100,
                                         n_informative_features=100)
    max_iter = 150

    # Here we have a small number of iterations, and thus the
    # ElasticNet might not converge. This is to speed up tests
    clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
                       l1_ratio=[0.5, 0.7], cv=3,
                       max_iter=max_iter)
    ignore_warnings(clf.fit)(X, y)
    # Well-conditioned settings, we should have selected our
    # smallest penalty
    assert_almost_equal(clf.alpha_, min(clf.alphas_))
    # Non-sparse ground truth: we should have selected an elastic-net
    # that is closer to ridge than to lasso
    assert_equal(clf.l1_ratio_, min(clf.l1_ratio))

    clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
                       l1_ratio=[0.5, 0.7], cv=3,
                       max_iter=max_iter, precompute=True)
    ignore_warnings(clf.fit)(X, y)

    # Well-conditioned settings, we should have selected our
    # smallest penalty
    assert_almost_equal(clf.alpha_, min(clf.alphas_))
    # Non-sparse ground truth: we should have selected an elastic-net
    # that is closer to ridge than to lasso
    assert_equal(clf.l1_ratio_, min(clf.l1_ratio))

    # We are in well-conditioned settings with low noise: we should
    # have a good test-set performance
    assert_greater(clf.score(X_test, y_test), 0.99)

    # Multi-output/target case
    X, y, X_test, y_test = build_dataset(n_features=10, n_targets=3)
    clf = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7],
                                cv=3, max_iter=max_iter)
    ignore_warnings(clf.fit)(X, y)
    # We are in well-conditioned settings with low noise: we should
    # have a good test-set performance
    assert_greater(clf.score(X_test, y_test), 0.99)
    assert_equal(clf.coef_.shape, (3, 10))

    # Mono-output should have same cross-validated alpha_ and l1_ratio_
    # in both cases.
    X, y, _, _ = build_dataset(n_features=10)
    clf1 = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
    clf1.fit(X, y)
    clf2 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
    clf2.fit(X, y[:, np.newaxis])
    assert_almost_equal(clf1.l1_ratio_, clf2.l1_ratio_)
    assert_almost_equal(clf1.alpha_, clf2.alpha_)
def test_enet_path():
    X, y, X_test, y_test = build_dataset()
    max_iter = 150

    with warnings.catch_warnings():
        # Here we have a small number of iterations, and thus the
        # ElasticNet might not converge. This is to speed up tests
        warnings.simplefilter("ignore", UserWarning)
        clf = ElasticNetCV(n_alphas=5,
                           eps=2e-3,
                           l1_ratio=[0.9, 0.95],
                           cv=3,
                           max_iter=max_iter)
        clf.fit(X, y)
        assert_almost_equal(clf.alpha_, 0.002, 2)
        assert_equal(clf.l1_ratio_, 0.95)

        clf = ElasticNetCV(n_alphas=5,
                           eps=2e-3,
                           l1_ratio=[0.9, 0.95],
                           cv=3,
                           max_iter=max_iter,
                           precompute=True)
        clf.fit(X, y)
    assert_almost_equal(clf.alpha_, 0.002, 2)
    assert_equal(clf.l1_ratio_, 0.95)

    # test set
    assert_greater(clf.score(X_test, y_test), 0.99)
def test_enet_path():
    # We use a large number of samples and of informative features so that
    # the l1_ratio selected is more toward ridge than lasso
    X, y, X_test, y_test = build_dataset(n_samples=200, n_features=100, n_informative_features=100)
    max_iter = 150

    with warnings.catch_warnings():
        # Here we have a small number of iterations, and thus the
        # ElasticNet might not converge. This is to speed up tests
        warnings.simplefilter("ignore", UserWarning)
        clf = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7], cv=3, max_iter=max_iter)
        clf.fit(X, y)
        # Well-conditionned settings, we should have selected our
        # smallest penalty
        assert_almost_equal(clf.alpha_, min(clf.alphas_))
        # Non-sparse ground truth: we should have seleted an elastic-net
        # that is closer to ridge than to lasso
        assert_equal(clf.l1_ratio_, min(clf.l1_ratio))

        clf = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7], cv=3, max_iter=max_iter, precompute=True)
        clf.fit(X, y)

    # Well-conditionned settings, we should have selected our
    # smallest penalty
    assert_almost_equal(clf.alpha_, min(clf.alphas_))
    # Non-sparse ground truth: we should have seleted an elastic-net
    # that is closer to ridge than to lasso
    assert_equal(clf.l1_ratio_, min(clf.l1_ratio))

    # We are in well-conditionned settings with low noise: we should
    # have a good test-set performance
    assert_greater(clf.score(X_test, y_test), 0.99)
def test_enet_path():

    # build an ill-posed linear regression problem with many noisy features and
    # comparatively few samples
    n_samples, n_features, max_iter = 50, 200, 50
    random_state = np.random.RandomState(0)
    w = random_state.randn(n_features)
    w[10:] = 0.0  # only the top 10 features are impacting the model
    X = random_state.randn(n_samples, n_features)
    y = np.dot(X, w)

    clf = ElasticNetCV(n_alphas=10, eps=1e-3, rho=0.95, cv=5,
            max_iter=max_iter)
    clf.fit(X, y)
    assert_almost_equal(clf.alpha, 0.002, 2)

    clf = ElasticNetCV(n_alphas=10, eps=1e-3, rho=0.95, cv=5,
                       max_iter=max_iter, precompute=True)
    clf.fit(X, y)
    assert_almost_equal(clf.alpha, 0.002, 2)

    # test set
    X_test = random_state.randn(n_samples, n_features)
    y_test = np.dot(X_test, w)
    assert clf.score(X_test, y_test) > 0.99
def test_enet_path():
    # We use a large number of samples and of informative features so that
    # the l1_ratio selected is more toward ridge than lasso
    X, y, X_test, y_test = build_dataset(n_samples=200, n_features=100,
                                         n_informative_features=100)
    max_iter = 150

    with warnings.catch_warnings():
        # Here we have a small number of iterations, and thus the
        # ElasticNet might not converge. This is to speed up tests
        warnings.simplefilter("ignore", UserWarning)
        clf = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7], cv=3,
                           max_iter=max_iter)
        clf.fit(X, y)
        # Well-conditioned settings, we should have selected our
        # smallest penalty
        assert_almost_equal(clf.alpha_, min(clf.alphas_))
        # Non-sparse ground truth: we should have seleted an elastic-net
        # that is closer to ridge than to lasso
        assert_equal(clf.l1_ratio_, min(clf.l1_ratio))

        clf = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7], cv=3,
                           max_iter=max_iter, precompute=True)
        clf.fit(X, y)

    # Well-conditioned settings, we should have selected our
    # smallest penalty
    assert_almost_equal(clf.alpha_, min(clf.alphas_))
    # Non-sparse ground truth: we should have seleted an elastic-net
    # that is closer to ridge than to lasso
    assert_equal(clf.l1_ratio_, min(clf.l1_ratio))

    # We are in well-conditioned settings with low noise: we should
    # have a good test-set performance
    assert_greater(clf.score(X_test, y_test), 0.99)
def test_enet_path():

    # build an ill-posed linear regression problem with many noisy features and
    # comparatively few samples
    n_samples, n_features, max_iter = 50, 200, 50
    random_state = np.random.RandomState(0)
    w = random_state.randn(n_features)
    w[10:] = 0.0  # only the top 10 features are impacting the model
    X = random_state.randn(n_samples, n_features)
    y = np.dot(X, w)

    clf = ElasticNetCV(n_alphas=10,
                       eps=1e-3,
                       rho=0.95,
                       cv=5,
                       max_iter=max_iter)
    clf.fit(X, y)
    assert_almost_equal(clf.alpha, 0.002, 2)

    clf = ElasticNetCV(n_alphas=10,
                       eps=1e-3,
                       rho=0.95,
                       cv=5,
                       max_iter=max_iter,
                       precompute=True)
    clf.fit(X, y)
    assert_almost_equal(clf.alpha, 0.002, 2)

    # test set
    X_test = random_state.randn(n_samples, n_features)
    y_test = np.dot(X_test, w)
    assert clf.score(X_test, y_test) > 0.99
def test_enet_path():
    X, y, X_test, y_test = build_dataset()
    max_iter = 50

    clf = ElasticNetCV(n_alphas=10, eps=1e-3, rho=0.95, cv=5,
            max_iter=max_iter)
    clf.fit(X, y)
    assert_almost_equal(clf.alpha, 0.002, 2)

    clf = ElasticNetCV(n_alphas=10, eps=1e-3, rho=0.95, cv=5,
                       max_iter=max_iter, precompute=True)
    clf.fit(X, y)
    assert_almost_equal(clf.alpha, 0.002, 2)

    # test set
    assert clf.score(X_test, y_test) > 0.99
def test_enet_path():
    X, y, X_test, y_test = build_dataset()
    max_iter = 150

    with warnings.catch_warnings():
        # Here we have a small number of iterations, and thus the
        # ElasticNet might not converge. This is to speed up tests
        warnings.simplefilter("ignore", UserWarning)
        clf = ElasticNetCV(n_alphas=5, eps=2e-3, rho=[0.9, 0.95], cv=3, max_iter=max_iter)
        clf.fit(X, y)
        assert_almost_equal(clf.alpha, 0.002, 2)
        assert_equal(clf.rho_, 0.95)

        clf = ElasticNetCV(n_alphas=5, eps=2e-3, rho=[0.9, 0.95], cv=3, max_iter=max_iter, precompute=True)
        clf.fit(X, y)
    assert_almost_equal(clf.alpha, 0.002, 2)
    assert_equal(clf.rho_, 0.95)

    # test set
    assert_greater(clf.score(X_test, y_test), 0.99)