Exemplo n.º 1
0
def test_input_args_and_kwargs():
    rng = np.random.RandomState(42)
    p = 125
    noise_std = 1e-1
    sig = np.zeros(p)
    sig[[0, 2, 13, 4, 25, 32, 80, 89, 91, 93, -1]] = 1
    sig[:6] = 2
    sig[-7:] = 2
    sig[60:75] = 1
    y = sig + noise_std * rng.randn(*sig.shape)
    X = np.eye(p)
    mask = np.ones((p,)).astype(np.bool)
    alpha = .01
    alpha_ = alpha * X.shape[0]
    l1_ratio = .2
    l1_weight = alpha_ * l1_ratio
    f1 = lambda w: _squared_loss(X, y, w, compute_grad=False)
    f1_grad = lambda w: _squared_loss(X, y, w, compute_grad=True,
                                      compute_energy=False)
    f2_prox = lambda w, l, *args, **kwargs: (_prox_l1(w, l * l1_weight),
                                             dict(converged=True))
    total_energy = lambda w: f1(w) + l1_weight * np.sum(np.abs(w))
    for cb_retval in [0, 1]:
        for verbose in [0, 1]:
            for dgap_factor in [1., None]:
                best_w, objective, init = mfista(
                    f1_grad, f2_prox, total_energy, 1., p,
                    dgap_factor=dgap_factor,
                    callback=lambda _: cb_retval, verbose=verbose,
                    max_iter=100)
                assert_equal(best_w.shape, mask.shape)
                assert_true(isinstance(objective, list))
                assert_true(isinstance(init, dict))
                for key in ["w", "t", "dgap_tol", "stepsize"]:
                    assert_true(key in init)
Exemplo n.º 2
0
def test_same_energy_calculus_pure_lasso():
    rng = check_random_state(42)
    X, y, w, mask = _make_data(rng=rng, masked=True)

    # check funcvals
    f1 = _squared_loss(X, y, w)
    f2 = _squared_loss_and_spatial_grad(X, y, w.ravel(), mask, 0.)
    assert f1 == f2

    # check derivatives
    g1 = _squared_loss_grad(X, y, w)
    g2 = _squared_loss_and_spatial_grad_derivative(X, y, w.ravel(), mask, 0.)
    np.testing.assert_array_equal(g1, g2)
Exemplo n.º 3
0
def test_same_energy_calculus_pure_lasso():
    rng = check_random_state(42)
    X, y, w, mask = _make_data(rng=rng, masked=True)

    # check funcvals
    f1 = _squared_loss(X, y, w)
    f2 = _squared_loss_and_spatial_grad(X, y, w.ravel(), mask, 0.)
    assert_equal(f1, f2)

    # check derivatives
    g1 = _squared_loss_grad(X, y, w)
    g2 = _squared_loss_and_spatial_grad_derivative(X, y, w.ravel(), mask, 0.)
    np.testing.assert_array_equal(g1, g2)
Exemplo n.º 4
0
def test_input_args_and_kwargs():
    rng = np.random.RandomState(42)
    p = 125
    noise_std = 1e-1
    sig = np.zeros(p)
    sig[[0, 2, 13, 4, 25, 32, 80, 89, 91, 93, -1]] = 1
    sig[:6] = 2
    sig[-7:] = 2
    sig[60:75] = 1
    y = sig + noise_std * rng.randn(*sig.shape)
    X = np.eye(p)
    mask = np.ones((p, )).astype(np.bool)
    alpha = .01
    alpha_ = alpha * X.shape[0]
    l1_ratio = .2
    l1_weight = alpha_ * l1_ratio
    f1 = lambda w: _squared_loss(X, y, w, compute_grad=False)
    f1_grad = lambda w: _squared_loss(
        X, y, w, compute_grad=True, compute_energy=False)
    f2_prox = lambda w, l, *args, **kwargs: (_prox_l1(w, l * l1_weight),
                                             dict(converged=True))
    total_energy = lambda w: f1(w) + l1_weight * np.sum(np.abs(w))
    for cb_retval in [0, 1]:
        for verbose in [0, 1]:
            for dgap_factor in [1., None]:
                best_w, objective, init = mfista(f1_grad,
                                                 f2_prox,
                                                 total_energy,
                                                 1.,
                                                 p,
                                                 dgap_factor=dgap_factor,
                                                 callback=lambda _: cb_retval,
                                                 verbose=verbose,
                                                 max_iter=100)
                assert best_w.shape == mask.shape
                assert isinstance(objective, list)
                assert isinstance(init, dict)
                for key in ["w", "t", "dgap_tol", "stepsize"]:
                    assert key in init
Exemplo n.º 5
0
def test_tvl1_from_gradient(size=5, n_samples=10, random_state=42):
    rng = np.random.RandomState(random_state)
    shape = [size] * 3
    n_voxels = np.prod(shape)
    X = rng.randn(n_samples, n_voxels)
    y = rng.randn(n_samples)
    w = rng.randn(*shape)
    mask = np.ones_like(w).astype(np.bool)
    for alpha in [0., 1e-1, 1e-3]:
        for l1_ratio in [0., .5, 1.]:
            gradid = _gradient_id(w, l1_ratio=l1_ratio)
            assert_equal(
                _tvl1_objective(X, y,
                                w.copy().ravel(), alpha, l1_ratio, mask),
                _squared_loss(X, y, w.copy().ravel(), compute_grad=False) +
                alpha * _tvl1_objective_from_gradient(gradid))
Exemplo n.º 6
0
def test_tvl1_from_gradient(size=5, n_samples=10, random_state=42):
    rng = np.random.RandomState(random_state)
    shape = [size] * 3
    n_voxels = np.prod(shape)
    X = rng.randn(n_samples, n_voxels)
    y = rng.randn(n_samples)
    w = rng.randn(*shape)
    mask = np.ones_like(w).astype(np.bool)
    for alpha in [0., 1e-1, 1e-3]:
        for l1_ratio in [0., .5, 1.]:
            gradid = _gradient_id(w, l1_ratio=l1_ratio)
            assert_equal(_tvl1_objective(
                X, y, w.copy().ravel(), alpha, l1_ratio, mask),
                _squared_loss(X, y, w.copy().ravel(),
                              compute_grad=False
                              ) + alpha * _tvl1_objective_from_gradient(
                    gradid))