示例#1
0
def test_graph_net_and_tv_same_for_pure_l1_another_test(decimal=1):
    ###############################################################
    # graph_net_solver and tvl1_solver should give same results
    # when l1_ratio = 1.
    ###############################################################

    dim = (3, 3, 3)
    X, y, _, mask = _make_data(masked=True, dim=dim)
    X, mask = to_niimgs(X, dim)
    alpha = .1
    l1_ratio = 1.
    max_iter = 20

    for standardize in [True, False]:
        sl = BaseSpaceNet(alphas=alpha,
                          l1_ratios=l1_ratio,
                          penalty="graph-net",
                          max_iter=max_iter,
                          mask=mask,
                          is_classif=False,
                          standardize=standardize,
                          verbose=0).fit(X, y)
        tvl1 = BaseSpaceNet(alphas=alpha,
                            l1_ratios=l1_ratio,
                            penalty="tv-l1",
                            max_iter=max_iter,
                            mask=mask,
                            is_classif=False,
                            standardize=standardize,
                            verbose=0).fit(X, y)

    # should be exactly the same (except for numerical errors)
    np.testing.assert_array_almost_equal(sl.coef_, tvl1.coef_, decimal=decimal)
def test_get_params():
    # Issue #12 (on github) reported that our objects
    # get_params() methods returned empty dicts.

    for penalty in ["graph-net", "tv-l1"]:
        for is_classif in [True, False]:
            kwargs = {}
            for param in ["max_iter", "alphas", "l1_ratios", "verbose",
                          "tol", "mask", "memory", "fit_intercept", "alphas"]:
                m = BaseSpaceNet(
                    mask='dummy',
                    penalty=penalty,
                    is_classif=is_classif,
                    **kwargs)
                try:
                    params = m.get_params()
                except AttributeError:
                    if "get_params" in traceback.format_exc():
                        params = m._get_params()
                    else:
                        raise

                assert_true(param in params,
                            msg="%s doesn't have parameter '%s'." % (
                                m, param))
def test_graph_net_and_tvl1_same_for_pure_l1(max_iter=100, decimal=2):
    ###############################################################
    # graph_net_solver and tvl1_solver should give same results
    # when l1_ratio = 1.
    ###############################################################

    X, y, _, mask = _make_data()
    alpha = .1
    unmasked_X = np.rollaxis(X, -1, start=0)
    unmasked_X = np.array([x[mask] for x in unmasked_X])

    # results should be exactly the same for pure lasso
    a = tvl1_solver(unmasked_X, y, alpha, 1., mask, loss="mse",
                    max_iter=max_iter)[0]
    b = _graph_net_squared_loss(unmasked_X, y, alpha, 1.,
                                max_iter=max_iter,
                                mask=mask)[0]

    mask = nibabel.Nifti1Image(mask.astype(np.float), np.eye(4))
    X = nibabel.Nifti1Image(X.astype(np.float), np.eye(4))
    for standardize in [True, False]:
        sl = BaseSpaceNet(
            alphas=alpha, l1_ratios=1., mask=mask, penalty="graph-net",
            max_iter=max_iter, standardize=standardize).fit(X, y)
        tvl1 = BaseSpaceNet(
            alphas=alpha, l1_ratios=1., mask=mask, penalty="tv-l1",
            max_iter=max_iter, standardize=standardize).fit(X, y)

        # Should be exactly the same (except for numerical errors).
        # However because of the TV-L1 prox approx, results might be 'slightly'
        # different.
        np.testing.assert_array_almost_equal(a, b, decimal=decimal)
        np.testing.assert_array_almost_equal(sl.coef_, tvl1.coef_,
                                             decimal=decimal)
示例#4
0
def test_get_params():
    # Issue #12 (on github) reported that our objects
    # get_params() methods returned empty dicts.

    for penalty in ["graph-net", "tv-l1"]:
        for is_classif in [True, False]:
            kwargs = {}
            for param in [
                    "max_iter", "alphas", "l1_ratios", "verbose", "tol",
                    "mask", "memory", "fit_intercept", "alphas"
            ]:
                m = BaseSpaceNet(mask='dummy',
                                 penalty=penalty,
                                 is_classif=is_classif,
                                 **kwargs)
                try:
                    params = m.get_params()
                except AttributeError:
                    if "get_params" in traceback.format_exc():
                        params = m._get_params()
                    else:
                        raise

                assert param in params, "%s doesn't have parameter '%s'." % (
                    m, param)
示例#5
0
def test_tikhonov_regularization_vs_graph_net():
    # Test for one of the extreme cases of Graph-Net: That is, with
    # l1_ratio = 0 (pure Smooth), we compare Graph-Net's performance
    # with the analytical solution for Tikhonov Regularization

    # XXX A small dataset here (this test is very lengthy)
    G = get_gradient_matrix(w.size, mask)
    optimal_model = np.dot(
        sp.linalg.pinv(np.dot(X.T, X) + y.size * np.dot(G.T, G)),
        np.dot(X.T, y))
    graph_net = BaseSpaceNet(mask=mask_,
                             alphas=1. * X.shape[0],
                             l1_ratios=0.,
                             max_iter=400,
                             fit_intercept=False,
                             screening_percentile=100.,
                             standardize=False)
    graph_net.fit(X_, y.copy())
    coef_ = graph_net.coef_[0]
    graph_net_perf = 0.5 / y.size * extmath.norm(
        np.dot(X, coef_) - y) ** 2\
        + 0.5 * extmath.norm(np.dot(G, coef_)) ** 2
    optimal_model_perf = 0.5 / y.size * extmath.norm(
        np.dot(X, optimal_model) - y) ** 2\
        + 0.5 * extmath.norm(np.dot(G, optimal_model)) ** 2
    assert_almost_equal(graph_net_perf, optimal_model_perf, decimal=1)
示例#6
0
def test_max_alpha__squared_loss():
    """Tests that models with L1 regularization over the theoretical bound
    are full of zeros, for logistic regression"""
    l1_ratios = np.linspace(0.1, 1, 3)
    reg = BaseSpaceNet(mask=mask_, max_iter=10, penalty="graph-net",
                       is_classif=False)
    for l1_ratio in l1_ratios:
        reg.l1_ratios = l1_ratio
        reg.alphas = np.max(np.dot(X.T, y)) / l1_ratio
        reg.fit(X_, y)
        assert_almost_equal(reg.coef_, 0.)
示例#7
0
def test_lasso_vs_graph_net():
    # Test for one of the extreme cases of Graph-Net: That is, with
    # l1_ratio = 1 (pure Lasso), we compare Graph-Net's performance with
    # Scikit-Learn lasso
    lasso = Lasso(max_iter=100, tol=1e-8, normalize=False)
    graph_net = BaseSpaceNet(mask=mask, alphas=1. * X_.shape[0],
                             l1_ratios=1, is_classif=False,
                             penalty="graph-net", max_iter=100)
    lasso.fit(X_, y)
    graph_net.fit(X, y)
    lasso_perf = 0.5 / y.size * extmath.norm(np.dot(
        X_, lasso.coef_) - y) ** 2 + np.sum(np.abs(lasso.coef_))
    graph_net_perf = 0.5 * ((graph_net.predict(X) - y) ** 2).mean()
    np.testing.assert_almost_equal(graph_net_perf, lasso_perf, decimal=3)
示例#8
0
def test_params_correctly_propagated_in_constructors_biz():
    for penalty, is_classif, alpha, l1_ratio in itertools.product(
            ["graph-net", "tv-l1"], [True, False], [.4, .01], [.5, 1.]):
        cvobj = BaseSpaceNet(
            mask="dummy", penalty=penalty, is_classif=is_classif, alphas=alpha,
            l1_ratios=l1_ratio)
        assert_equal(cvobj.alphas, alpha)
        assert_equal(cvobj.l1_ratios, l1_ratio)
示例#9
0
def test_tikhonov_regularization_vs_graph_net():
    # Test for one of the extreme cases of Graph-Net: That is, with
    # l1_ratio = 0 (pure Smooth), we compare Graph-Net's performance
    # with the analytical solution for Tikhonov Regularization

    # XXX A small dataset here (this test is very lengthy)
    G = get_gradient_matrix(w.size, mask)
    optimal_model = np.dot(sp.linalg.pinv(
        np.dot(X.T, X) + y.size * np.dot(G.T, G)), np.dot(X.T, y))
    graph_net = BaseSpaceNet(
        mask=mask_, alphas=1. * X.shape[0], l1_ratios=0., max_iter=400,
        fit_intercept=False,
        screening_percentile=100., standardize=False)
    graph_net.fit(X_, y.copy())
    coef_ = graph_net.coef_[0]
    graph_net_perf = 0.5 / y.size * extmath.norm(
        np.dot(X, coef_) - y) ** 2\
        + 0.5 * extmath.norm(np.dot(G, coef_)) ** 2
    optimal_model_perf = 0.5 / y.size * extmath.norm(
        np.dot(X, optimal_model) - y) ** 2\
        + 0.5 * extmath.norm(np.dot(G, optimal_model)) ** 2
    assert_almost_equal(graph_net_perf, optimal_model_perf, decimal=1)
示例#10
0
def test_params_correctly_propagated_in_constructors():
    for (penalty, is_classif, n_alphas, l1_ratio, n_jobs,
         cv, perc) in itertools.product(["graph-net", "tv-l1"],
                                        [True, False], [.1, .01],
                                        [.5, 1.], [1, -1], [2, 3],
                                        [5, 10]):
        cvobj = BaseSpaceNet(
            mask="dummy", n_alphas=n_alphas, n_jobs=n_jobs, l1_ratios=l1_ratio,
            cv=cv, screening_percentile=perc, penalty=penalty,
            is_classif=is_classif)
        assert_equal(cvobj.n_alphas, n_alphas)
        assert_equal(cvobj.l1_ratios, l1_ratio)
        assert_equal(cvobj.n_jobs, n_jobs)
        assert_equal(cvobj.cv, cv)
        assert_equal(cvobj.screening_percentile, perc)
示例#11
0
def test_max_alpha__squared_loss():
    """Tests that models with L1 regularization over the theoretical bound
    are full of zeros, for logistic regression"""
    l1_ratios = np.linspace(0.1, 1, 3)
    reg = BaseSpaceNet(mask=mask_, max_iter=10, penalty="graph-net",
                       is_classif=False)
    for l1_ratio in l1_ratios:
        reg.l1_ratios = l1_ratio
        reg.alphas = np.max(np.dot(X.T, y)) / l1_ratio
        reg.fit(X_, y)
        assert_almost_equal(reg.coef_, 0.)
示例#12
0
def test_tv_regression_3D_image_doesnt_crash():
    rng = check_random_state(42)
    dim = (3, 4, 5)
    W_init = np.zeros(dim)
    W_init[2:3, 3:, 1:3] = 1

    n = 10
    p = dim[0] * dim[1] * dim[2]
    X = np.ones((n, 1)) + W_init.ravel().T
    X += rng.randn(n, p)
    y = np.dot(X, W_init.ravel())
    alpha = 1.
    X, mask = to_niimgs(X, dim)

    for l1_ratio in [0., .5, 1.]:
        BaseSpaceNet(mask=mask, alphas=alpha, l1_ratios=l1_ratio,
                     penalty="tv-l1", is_classif=False, max_iter=10).fit(X, y)
示例#13
0
def test_tv_regression_simple():
    rng = check_random_state(42)
    dim = (4, 4, 4)
    W_init = np.zeros(dim)
    W_init[2:3, 1:2, -2:] = 1
    n = 10
    p = np.prod(dim)
    X = np.ones((n, 1)) + W_init.ravel().T
    X += rng.randn(n, p)
    y = np.dot(X, W_init.ravel())
    X, mask = to_niimgs(X, dim)
    print("%s %s" % (X.shape, mask.get_data().sum()))
    alphas = [.1, 1.]

    for l1_ratio in [1.]:
        for debias in [True]:
            BaseSpaceNet(mask=mask, alphas=alphas, l1_ratios=l1_ratio,
                         penalty="tv-l1", is_classif=False, max_iter=10,
                         debias=debias).fit(X, y)
def test_baseestimator_invalid_l1_ratio():
    BaseSpaceNet(l1_ratios=2.)
def test_baseestimator_invalid_l1_ratio():
    with pytest.raises(ValueError):
        BaseSpaceNet(l1_ratios=2.)