예제 #1
0
def test_prox_tvl1_approximates_prox_l1_for_lasso(size=15,
                                                  random_state=42,
                                                  decimal=4,
                                                  dgap_tol=1e-7):

    rng = np.random.RandomState(random_state)

    l1_ratio = 1.  # pure LASSO
    for ndim in range(3, 4):
        shape = [size] * ndim
        z = rng.randn(*shape)
        for weight in np.logspace(-10, 10, num=10):
            # use prox_tvl1 approximation to prox_l1
            a = _prox_tvl1(z.copy(),
                           weight=weight,
                           l1_ratio=l1_ratio,
                           dgap_tol=dgap_tol,
                           max_iter=10)[0][-1].ravel()

            # use exact closed-form soft shrinkage formula for prox_l1
            b = _prox_l1(z.copy(), weight)[-1].ravel()

            # results should be close in l-infinity norm
            np.testing.assert_almost_equal(np.abs(a - b).max(),
                                           0.,
                                           decimal=decimal)
예제 #2
0
def test_input_args_and_kwargs():
    rng = np.random.RandomState(42)
    p = 125
    noise_std = 1e-1
    sig = np.zeros(p)
    sig[[0, 2, 13, 4, 25, 32, 80, 89, 91, 93, -1]] = 1
    sig[:6] = 2
    sig[-7:] = 2
    sig[60:75] = 1
    y = sig + noise_std * rng.randn(*sig.shape)
    X = np.eye(p)
    mask = np.ones((p,)).astype(np.bool)
    alpha = .01
    alpha_ = alpha * X.shape[0]
    l1_ratio = .2
    l1_weight = alpha_ * l1_ratio
    f1 = lambda w: _squared_loss(X, y, w, compute_grad=False)
    f1_grad = lambda w: _squared_loss(X, y, w, compute_grad=True,
                                      compute_energy=False)
    f2_prox = lambda w, l, *args, **kwargs: (_prox_l1(w, l * l1_weight),
                                             dict(converged=True))
    total_energy = lambda w: f1(w) + l1_weight * np.sum(np.abs(w))
    for cb_retval in [0, 1]:
        for verbose in [0, 1]:
            for dgap_factor in [1., None]:
                best_w, objective, init = mfista(
                    f1_grad, f2_prox, total_energy, 1., p,
                    dgap_factor=dgap_factor,
                    callback=lambda _: cb_retval, verbose=verbose,
                    max_iter=100)
                assert_equal(best_w.shape, mask.shape)
                assert_true(isinstance(objective, list))
                assert_true(isinstance(init, dict))
                for key in ["w", "t", "dgap_tol", "stepsize"]:
                    assert_true(key in init)
예제 #3
0
def test_prox_l1_nonexpansiveness(n_features=10):
    rng = np.random.RandomState(42)
    x = rng.randn(n_features, 1)
    tau = 0.3
    s = _prox_l1(x.copy(), tau)
    p = x - s  # projection + shrinkage = id

    # We should have ||s(a) - s(b)||^2 <= ||a - b||^2 - ||p(a) - p(b)||^2
    # for all a and b (this is strong non-expansiveness
    for (a, b), (pa, pb), (sa, sb) in zip(*[itertools.product(z[0], z[0]) for z in [x, p, s]]):
        assert_true((sa - sb) ** 2 <= (a - b) ** 2 - (pa - pb) ** 2)
예제 #4
0
def test_prox_l1_nonexpansiveness(n_features=10):
    rng = np.random.RandomState(42)
    x = rng.randn(n_features, 1)
    tau = .3
    s = _prox_l1(x.copy(), tau)
    p = x - s  # projection + shrinkage = id

    # We should have ||s(a) - s(b)||^2 <= ||a - b||^2 - ||p(a) - p(b)||^2
    # for all a and b (this is strong non-expansiveness
    for (a, b), (pa, pb), (sa, sb) in zip(*[itertools.product(z[0], z[0])
                                            for z in [x, p, s]]):
        assert_true((sa - sb) ** 2 <= (a - b) ** 2 - (pa - pb) ** 2)
예제 #5
0
def test_prox_tvl1_approximates_prox_l1_for_lasso(size=15, random_state=42, decimal=4, dgap_tol=1e-7):

    rng = np.random.RandomState(random_state)

    l1_ratio = 1.0  # pure LASSO
    for ndim in range(3, 4):
        shape = [size] * ndim
        z = rng.randn(*shape)
        for weight in np.logspace(-10, 10, num=10):
            # use prox_tvl1 approximation to prox_l1
            a = _prox_tvl1(z.copy(), weight=weight, l1_ratio=l1_ratio, dgap_tol=dgap_tol, max_iter=10)[0][-1].ravel()

            # use exact closed-form soft shrinkage formula for prox_l1
            b = _prox_l1(z.copy(), weight)[-1].ravel()

            # results shoud be close in l-infinity norm
            np.testing.assert_almost_equal(np.abs(a - b).max(), 0.0, decimal=decimal)
예제 #6
0
def test_input_args_and_kwargs():
    rng = np.random.RandomState(42)
    p = 125
    noise_std = 1e-1
    sig = np.zeros(p)
    sig[[0, 2, 13, 4, 25, 32, 80, 89, 91, 93, -1]] = 1
    sig[:6] = 2
    sig[-7:] = 2
    sig[60:75] = 1
    y = sig + noise_std * rng.randn(*sig.shape)
    X = np.eye(p)
    mask = np.ones((p, )).astype(np.bool)
    alpha = .01
    alpha_ = alpha * X.shape[0]
    l1_ratio = .2
    l1_weight = alpha_ * l1_ratio
    f1 = lambda w: _squared_loss(X, y, w, compute_grad=False)
    f1_grad = lambda w: _squared_loss(
        X, y, w, compute_grad=True, compute_energy=False)
    f2_prox = lambda w, l, *args, **kwargs: (_prox_l1(w, l * l1_weight),
                                             dict(converged=True))
    total_energy = lambda w: f1(w) + l1_weight * np.sum(np.abs(w))
    for cb_retval in [0, 1]:
        for verbose in [0, 1]:
            for dgap_factor in [1., None]:
                best_w, objective, init = mfista(f1_grad,
                                                 f2_prox,
                                                 total_energy,
                                                 1.,
                                                 p,
                                                 dgap_factor=dgap_factor,
                                                 callback=lambda _: cb_retval,
                                                 verbose=verbose,
                                                 max_iter=100)
                assert best_w.shape == mask.shape
                assert isinstance(objective, list)
                assert isinstance(init, dict)
                for key in ["w", "t", "dgap_tol", "stepsize"]:
                    assert key in init