def forward(self, x, weights):
        r"""Solve the total variation problem and return the solution.

        Arguments
        ---------
            x: :class:`torch:torch.Tensor`
                A tensor with shape ``(n,)`` holding the input signal.
            weights: :class:`torch:torch.Tensor`
                The edge weights.

                Shape ``(n-1,)``, or ``(1,)`` if all weights are equal.

        Returns
        -------
        :class:`torch:torch.Tensor`
            The solution to the total variation problem, of shape ``(m, n)``.
        """
        self.equal_weights = weights.size() == (1, )
        if self.equal_weights:
            opt = tv1_1d(x.numpy().ravel(), weights.numpy()[0], **self.tv_args)
        else:
            opt = tv1w_1d(x.numpy().ravel(),
                          weights.numpy().ravel(), **self.tv_args)
        opt = torch.Tensor(opt).view_as(x)

        self.save_for_backward(opt)
        return opt
Exemple #2
0
        def solve_and_refine(x, w, equal_weights=True, **tv_args):

            if equal_weights:
                opt = tv1_1d(x.reshape(-1), w[0], **tv_args)
            else:
                opt = tv1w_1d(x.reshape(-1), w.reshape(-1), **tv_args)

            return opt
def test_tv1w_1d():
    methods = ('tautstring', 'pn')
    for _ in range(20):
        dimension = np.random.randint(1e1, 1e3)
        x = 100 * np.random.randn(dimension)
        w = 20 * np.random.rand(dimension - 1)
        solutions = [tv1w_1d(x, w, method=method) for method in methods]
        for i in range(len(solutions) - 1):
            assert np.allclose(solutions[i], solutions[i + 1])
def test_tv1w_1d_uniform_weights_small_input():
    for _ in range(1000):
        dimension = np.random.randint(2, 4)
        x = 100 * np.random.randn(dimension)
        w1 = np.random.rand()
        w = np.ones(dimension - 1) * w1
        solw = tv1w_1d(x, w)
        sol = tv1_1d(x, w1)
        assert np.allclose(solw, sol)
Exemple #5
0
def test_tv1w_1d():
    methods = ('tautstring', 'pn')
    for _ in range(20):
        dimension = np.random.randint(1e1, 1e3)
        x = 100*np.random.randn(dimension)
        w = 20*np.random.rand(dimension-1)
        solutions = [tv1w_1d(x, w, method=method) for method in methods]
        for i in range(len(solutions)-1):
            assert np.allclose(solutions[i], solutions[i+1])
Exemple #6
0
def test_tv1w_1d_uniform_weights_small_input():
    for _ in range(1000):
        dimension = np.random.randint(2, 4)
        x = 100*np.random.randn(dimension)
        w1 = np.random.rand()
        w = np.ones(dimension-1) * w1 
        solw = tv1w_1d(x, w)
        sol = tv1_1d(x, w1)
        assert np.allclose(solw, sol)
Exemple #7
0
 def prox(Z, s):
     """total variation prox operator on row dimension
     """
     return ptv.tv1w_1d(Z.T, s * w).reshape(shape).T
N = 1000
s = np.zeros((N, 1))
s[N / 4:N / 2] = 1
s[N / 2:3 * N / 4] = -1
s[3 * N / 4:-N / 8] = 2

# Introduce noise
n = s + 0.5 * randn(*shape(s))

# Generate weights
lam = np.linspace(0, 2, N - 1)

# Filter using weighted TV-L1
print('Filtering signal with weighted TV-L1...')
start = time.time()
f = ptv.tv1w_1d(n, lam)
end = time.time()
print('Elapsed time ' + str(end - start))

# Plot results
plt.subplot(4, 1, 1)
plt.title('Weighted TVL1 filtering')
plt.plot(s)
plt.ylabel('Original')
grid(True)

plt.subplot(4, 1, 2)
plt.plot(n)
plt.ylabel('Noisy')
grid(True)
Exemple #9
0
### Weighted TV-L1 filtering

# Generate impulse (blocky) signal
s = _blockysignal()

# Introduce noise
n = s + 0.5*np.random.randn(*np.shape(s))

# Generate weights
lam = np.linspace(0,2,N-1)

# Filter using weighted TV-L1
print('Filtering signal with weighted TV-L1...')
start = time.time()
f = ptv.tv1w_1d(n, lam)
end = time.time()
print('Elapsed time ' + str(end-start))

# Plot results
plt.subplot(4, 1, 1)
plt.title('Weighted TVL1 filtering')
plt.plot(s)
plt.ylabel('Original')
plt.grid(True)

plt.subplot(4, 1, 2)
plt.plot(n)
plt.ylabel('Noisy')
plt.grid(True)