Example #1
0
def test_tikhonov_regularization_vs_graph_net():
    # Test for one of the extreme cases of Graph-Net: That is, with
    # l1_ratio = 0 (pure Smooth), we compare Graph-Net's performance
    # with the analytical solution for Tikhonov Regularization

    # XXX A small dataset here (this test is very lengthy)
    G = get_gradient_matrix(w.size, mask)
    optimal_model = np.dot(
        sp.linalg.pinv(np.dot(X.T, X) + y.size * np.dot(G.T, G)),
        np.dot(X.T, y))
    graph_net = BaseSpaceNet(mask=mask_,
                             alphas=1. * X.shape[0],
                             l1_ratios=0.,
                             max_iter=400,
                             fit_intercept=False,
                             screening_percentile=100.,
                             standardize=False)
    graph_net.fit(X_, y.copy())
    coef_ = graph_net.coef_[0]
    graph_net_perf = 0.5 / y.size * extmath.norm(
        np.dot(X, coef_) - y) ** 2\
        + 0.5 * extmath.norm(np.dot(G, coef_)) ** 2
    optimal_model_perf = 0.5 / y.size * extmath.norm(
        np.dot(X, optimal_model) - y) ** 2\
        + 0.5 * extmath.norm(np.dot(G, optimal_model)) ** 2
    assert_almost_equal(graph_net_perf, optimal_model_perf, decimal=1)
def test_max_alpha__squared_loss():
    """Tests that models with L1 regularization over the theoretical bound
    are full of zeros, for logistic regression"""
    l1_ratios = np.linspace(0.1, 1, 3)
    reg = BaseSpaceNet(mask=mask_, max_iter=10, penalty="graph-net",
                       is_classif=False)
    for l1_ratio in l1_ratios:
        reg.l1_ratios = l1_ratio
        reg.alphas = np.max(np.dot(X.T, y)) / l1_ratio
        reg.fit(X_, y)
        assert_almost_equal(reg.coef_, 0.)
Example #3
0
def test_max_alpha__squared_loss():
    """Tests that models with L1 regularization over the theoretical bound
    are full of zeros, for logistic regression"""
    l1_ratios = np.linspace(0.1, 1, 3)
    reg = BaseSpaceNet(mask=mask_, max_iter=10, penalty="graph-net",
                       is_classif=False)
    for l1_ratio in l1_ratios:
        reg.l1_ratios = l1_ratio
        reg.alphas = np.max(np.dot(X.T, y)) / l1_ratio
        reg.fit(X_, y)
        assert_almost_equal(reg.coef_, 0.)
Example #4
0
def test_lasso_vs_graph_net():
    # Test for one of the extreme cases of Graph-Net: That is, with
    # l1_ratio = 1 (pure Lasso), we compare Graph-Net's performance with
    # Scikit-Learn lasso
    lasso = Lasso(max_iter=100, tol=1e-8, normalize=False)
    graph_net = BaseSpaceNet(mask=mask, alphas=1. * X_.shape[0],
                             l1_ratios=1, is_classif=False,
                             penalty="graph-net", max_iter=100)
    lasso.fit(X_, y)
    graph_net.fit(X, y)
    lasso_perf = 0.5 / y.size * extmath.norm(np.dot(
        X_, lasso.coef_) - y) ** 2 + np.sum(np.abs(lasso.coef_))
    graph_net_perf = 0.5 * ((graph_net.predict(X) - y) ** 2).mean()
    np.testing.assert_almost_equal(graph_net_perf, lasso_perf, decimal=3)
def test_tikhonov_regularization_vs_graph_net():
    # Test for one of the extreme cases of Graph-Net: That is, with
    # l1_ratio = 0 (pure Smooth), we compare Graph-Net's performance
    # with the analytical solution for Tikhonov Regularization

    # XXX A small dataset here (this test is very lengthy)
    G = get_gradient_matrix(w.size, mask)
    optimal_model = np.dot(sp.linalg.pinv(
        np.dot(X.T, X) + y.size * np.dot(G.T, G)), np.dot(X.T, y))
    graph_net = BaseSpaceNet(
        mask=mask_, alphas=1. * X.shape[0], l1_ratios=0., max_iter=400,
        fit_intercept=False,
        screening_percentile=100., standardize=False)
    graph_net.fit(X_, y.copy())
    coef_ = graph_net.coef_[0]
    graph_net_perf = 0.5 / y.size * extmath.norm(
        np.dot(X, coef_) - y) ** 2\
        + 0.5 * extmath.norm(np.dot(G, coef_)) ** 2
    optimal_model_perf = 0.5 / y.size * extmath.norm(
        np.dot(X, optimal_model) - y) ** 2\
        + 0.5 * extmath.norm(np.dot(G, optimal_model)) ** 2
    assert_almost_equal(graph_net_perf, optimal_model_perf, decimal=1)