def test_3D__gradient_id():
    img = np.array([[1, 3], [4, 2], [1, 0]])
    for l1_ratio in [0., .1, .3, .5, .7, .9, 1.]:
        gid = _gradient_id(img, l1_ratio)

        np.testing.assert_array_equal(
            gid.shape, [img.ndim + 1] + list(img.shape))
Пример #2
0
def test_3D__gradient_id():
    img = np.array([[1, 3], [4, 2], [1, 0]])
    for l1_ratio in [0., .1, .3, .5, .7, .9, 1.]:
        gid = _gradient_id(img, l1_ratio)

        np.testing.assert_array_equal(
            gid.shape, [img.ndim + 1] + list(img.shape))
Пример #3
0
def test_2D__gradient_id():
    img = np.array([[1, 3], [4, 2]])
    for l1_ratio in [0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0]:
        gid = _gradient_id(img, l1_ratio)

        np.testing.assert_array_equal(gid.shape, [img.ndim + 1] + list(img.shape))

        np.testing.assert_array_equal(l1_ratio * img, gid[-1])
Пример #4
0
def test_1D__gradient_id():
    for size in [1, 2, 10]:
        img = np.arange(size)
        for l1_ratio in [0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0]:
            gid = _gradient_id(img, l1_ratio=l1_ratio)

            np.testing.assert_array_equal(gid.shape, [img.ndim + 1] + list(img.shape))

            np.testing.assert_array_equal(l1_ratio * img, gid[-1])
def test_1D__gradient_id():
    for size in [1, 2, 10]:
        img = np.arange(size)
        for l1_ratio in [0., .1, .3, .5, .7, .9, 1.]:
            gid = _gradient_id(img, l1_ratio=l1_ratio)

            np.testing.assert_array_equal(
                gid.shape, [img.ndim + 1] + list(img.shape))

            np.testing.assert_array_equal(l1_ratio * img, gid[-1])
def test_grad_div_adjoint_arbitrary_ndim(size=5, max_ndim=5):
    # We need to check that <D x, y> = <x, DT y> for x and y random vectors
    rng = check_random_state(42)

    for ndim in range(1, max_ndim):
        shape = tuple([size] * ndim)
        x = rng.normal(size=shape)
        y = rng.normal(size=[ndim + 1] + list(shape))
        for l1_ratio in [0., .1, .3, .5, .7, .9, 1.]:
            np.testing.assert_almost_equal(
                np.sum(_gradient_id(x, l1_ratio=l1_ratio) * y),
                -np.sum(x * _div_id(y, l1_ratio=l1_ratio)))
Пример #7
0
def test_grad_div_adjoint_arbitrary_ndim(size=5, max_ndim=5):
    # We need to check that <D x, y> = <x, DT y> for x and y random vectors
    rng = check_random_state(42)

    for ndim in range(1, max_ndim):
        shape = tuple([size] * ndim)
        x = rng.normal(size=shape)
        y = rng.normal(size=[ndim + 1] + list(shape))
        for l1_ratio in [0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0]:
            np.testing.assert_almost_equal(
                np.sum(_gradient_id(x, l1_ratio=l1_ratio) * y), -np.sum(x * _div_id(y, l1_ratio=l1_ratio))
            )
Пример #8
0
def test_tvl1_from_gradient(size=5, n_samples=10, random_state=42):
    rng = np.random.RandomState(random_state)
    shape = [size] * 3
    n_voxels = np.prod(shape)
    X = rng.randn(n_samples, n_voxels)
    y = rng.randn(n_samples)
    w = rng.randn(*shape)
    mask = np.ones_like(w).astype(np.bool)
    for alpha in [0., 1e-1, 1e-3]:
        for l1_ratio in [0., .5, 1.]:
            gradid = _gradient_id(w, l1_ratio=l1_ratio)
            assert_equal(
                _tvl1_objective(X, y,
                                w.copy().ravel(), alpha, l1_ratio, mask),
                _squared_loss(X, y, w.copy().ravel(), compute_grad=False) +
                alpha * _tvl1_objective_from_gradient(gradid))
Пример #9
0
def test_tvl1_from_gradient(size=5, n_samples=10, random_state=42):
    rng = np.random.RandomState(random_state)
    shape = [size] * 3
    n_voxels = np.prod(shape)
    X = rng.randn(n_samples, n_voxels)
    y = rng.randn(n_samples)
    w = rng.randn(*shape)
    mask = np.ones_like(w).astype(np.bool)
    for alpha in [0., 1e-1, 1e-3]:
        for l1_ratio in [0., .5, 1.]:
            gradid = _gradient_id(w, l1_ratio=l1_ratio)
            assert_equal(_tvl1_objective(
                X, y, w.copy().ravel(), alpha, l1_ratio, mask),
                _squared_loss(X, y, w.copy().ravel(),
                              compute_grad=False
                              ) + alpha * _tvl1_objective_from_gradient(
                    gradid))