def forward(self, x, weights_row, weights_col): r"""Solve the total variation problem and return the solution. Arguments --------- x: :class:`torch:torch.Tensor` A tensor with shape ``(m, n)`` holding the input signal. weights_row: :class:`torch:torch.Tensor` The horizontal edge weights. Tensor of shape ``(m, n - 1)``, or ``(1,)`` if all weights are equal. weights_col: :class:`torch:torch.Tensor` The vertical edge weights. Tensor of shape ``(m - 1, n)``, or ``(1,)`` if all weights are equal. Returns ------- :class:`torch:torch.Tensor` The solution to the total variation problem, of shape ``(m, n)``. """ opt = tv1w_2d(x.numpy(), weights_col.numpy(), weights_row.numpy(), **self.tv_args) if self.refine: opt = self._refine(opt, x, weights_row, weights_col) opt = torch.Tensor(opt).view_as(x) self.save_for_backward(opt) return opt
def solve_and_refine(x, w_col, w_row, refine=True, **tv_args): opt = tv1w_2d(x, w_col, w_row, **tv_args) if refine: opt = TotalVariationBase._refine(opt, x, w_row, w_col) return opt
def test_tv1w_2d_emengd(): r"""Issue reported by emengd Make the solver fail due to missing checks on integer arguments """ a = -np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])/10. sol1 = tv1w_2d(a, np.array([[1, 1, 1], [1, 1, 1]]), np.array([[1, 1], [1, 1], [1, 1]]), max_iters=100) sol2 = tv1_2d(a, 1) assert np.allclose(sol1, sol2, atol=1e-3)
def test_tv1w_2d_uniform_weights(): for _ in range(20): x = _generate2D() rows = len(x) cols = len(x[0]) w1 = np.random.rand() w_rows = np.ones([rows - 1, cols]) * w1 w_cols = np.ones([rows, cols - 1]) * w1 solw = tv1w_2d(x, w_rows, w_cols, max_iters=5000) solw1 = tv1_2d(x, w1, max_iters=5000) assert np.allclose(solw, solw1, atol=1e-3)
def test_tv1w_2d_uniform_weights(): for _ in range(20): rows = np.random.randint(1e1, 3e1) cols = np.random.randint(1e1, 3e1) x = 100*np.random.randn(rows, cols) w1 = np.random.rand() w_rows = np.ones([rows-1, cols]) * w1 w_cols = np.ones([rows, cols-1]) * w1 solw = tv1w_2d(x, w_rows, w_cols, max_iters=5000) solw1 = tv1_2d(x, w1, max_iters=5000) assert np.allclose(solw, solw1, atol=1e-3)
def test_tv1w_2d_uniform_weights(): for _ in range(20): x = _generate2d() rows = len(x) cols = len(x[0]) w1 = np.random.rand() w_rows = np.ones([rows-1, cols]) * w1 w_cols = np.ones([rows, cols-1]) * w1 solw = tv1w_2d(x, w_rows, w_cols, max_iters=5000) solw1 = tv1_2d(x, w1, max_iters=5000) assert np.allclose(solw, solw1, atol=1e-3)
def test_tv1w_2d_uniform_weights(): for _ in range(20): rows = np.random.randint(1e1, 3e1) cols = np.random.randint(1e1, 3e1) x = 100 * np.random.randn(rows, cols) w1 = np.random.rand() w_rows = np.ones([rows - 1, cols]) * w1 w_cols = np.ones([rows, cols - 1]) * w1 solw = tv1w_2d(x, w_rows, w_cols, max_iters=5000) solw1 = tv1_2d(x, w1, max_iters=5000) assert np.allclose(solw, solw1, atol=1e-3)
def test_tv1_tv1w_2d(): """Tests that 2D-TV1w == 2D-TV1 for unit weights""" for _ in range(20): x = _generate2D() rows = len(x) cols = len(x[0]) w = 20 * np.random.rand() w_cols = w * np.ones((rows - 1, cols)) w_rows = w * np.ones((rows, cols - 1)) solution1 = tv1_2d(x, w, max_iters=5000) solutionp = tv1w_2d(x, w_cols, w_rows, max_iters=5000) assert np.allclose(solution1, solutionp, atol=1e-3)
def test_tv1w_2d_emengd(): r"""Issue reported by emengd Make the solver fail due to missing checks on integer arguments """ a = -np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) / 10. sol1 = tv1w_2d(a, np.array([[1, 1, 1], [1, 1, 1]]), np.array([[1, 1], [1, 1], [1, 1]]), max_iters=100) sol2 = tv1_2d(a, 1) assert np.allclose(sol1, sol2, atol=1e-3)
def test_tv1_tv1w_2d(): """Tests that 2D-TV1w == 2D-TV1 for unit weights""" for _ in range(20): x = _generate2d() rows = len(x) cols = len(x[0]) w = 20*np.random.rand() w_cols = w * np.ones((rows-1, cols)) w_rows = w * np.ones((rows, cols-1)) solution1 = tv1_2d(x, w, max_iters=5000) solutionp = tv1w_2d(x, w_cols, w_rows, max_iters=5000) assert np.allclose(solution1, solutionp, atol=1e-3)
def test_tv1_2d(): methods = ('yang', 'condat', 'chambolle-pock') for _ in range(20): rows = np.random.randint(1e1, 3e1) cols = np.random.randint(1e1, 3e1) x = 100*np.random.randn(rows, cols) w = 20*np.random.rand() solutions = [tv1_2d(x, w, method=method, max_iters=5000) for method in methods] solutions.append([tvp_2d(x, w, w, 1, 1, max_iters=5000)]) w_cols = w * np.ones((rows-1, cols)) w_rows = w * np.ones((rows, cols-1)) solutions.append(tv1w_2d(x, w_cols, w_rows, max_iters=5000)) for i in range(1, len(solutions)): assert np.allclose(solutions[i], solutions[0], atol=1e-3)
def test_tv1_2d(): methods = ('yang', 'condat', 'chambolle-pock') for _ in range(20): rows = np.random.randint(1e1, 3e1) cols = np.random.randint(1e1, 3e1) x = 100 * np.random.randn(rows, cols) w = 20 * np.random.rand() solutions = [ tv1_2d(x, w, method=method, max_iters=5000) for method in methods ] solutions.append([tvp_2d(x, w, w, 1, 1, max_iters=5000)]) w_cols = w * np.ones((rows - 1, cols)) w_rows = w * np.ones((rows, cols - 1)) solutions.append(tv1w_2d(x, w_cols, w_rows, max_iters=5000)) for i in range(1, len(solutions)): assert np.allclose(solutions[i], solutions[0], atol=1e-3)
-1 * (np.sum(grady_l**2, axis=2) + np.sum(grady_b**2, axis=2) + np.sum(grady_a**2, axis=2)) / 2.0) weight_x = 10**exp_factor * np.exp( -1 * (np.sum(gradx_l**2, axis=2) + np.sum(gradx_b**2, axis=2) + np.sum(gradx_a**2, axis=2)) / 2.0) #weight_y = 10 ** -4 * ( 1 - (np.sum(grady_l**2, axis=2) + np.sum(grady_b**2, axis=2) + np.sum(grady_a**2, axis=2) ) ) **2 #weight_x = 10 ** -4 * ( 1 - (np.sum(gradx_l**2, axis=2) + np.sum(gradx_b**2, axis=2) + np.sum(gradx_a**2, axis=2) ) ) **2 #print np.mean(weight_y.ravel()) #print np.mean(weight_x.ravel()) print('TV optimization: Iterration ' + str(iterations_ - iterations + 1)) #F = ptv.tvgen(X, [weight_y, weight_x], [1,2], np.array([1,1])); # Image | Penalty in each dimension | Dimensions to penalize | Norms to use F[:, :, 0] = ptv.tv1w_2d(im[:, :, 0].reshape(X_gray.shape), weight_y[1:, :], weight_x[:, 1:]) F[:, :, 1] = ptv.tv1w_2d(im[:, :, 1].reshape(X_gray.shape), weight_y[1:, :], weight_x[:, 1:]) F[:, :, 2] = ptv.tv1w_2d(im[:, :, 2].reshape(X_gray.shape), weight_y[1:, :], weight_x[:, 1:]) im = F im_lab = color.rgb2lab(F) iterations -= 1 #F = (F- np.min(F.ravel()) ) / ( np.max(F.ravel()) - np.min(F.ravel())) print np.min(F.ravel()) print np.max(F.ravel()) io.imsave('result_' + str(iterations_) + '.png', F) #io.imsave('result_'+str(iterations_)+'.png', F.clip(0.0, 1.0));
# Load image X = io.imread('colors.png') X = ski.img_as_float(X) X = color.rgb2gray(X) # Introduce noise noiseLevel = 0.01 N = util.random_noise(X, mode='speckle', var=noiseLevel) # Gradient in columns W1 = 0.01 * np.cumsum(np.ones((X.shape[0]-1, X.shape[1])), 1) W2 = 0.01 * np.ones((X.shape[0], X.shape[1]-1)) print('Solving 2D weighted TV...' ) start = time.time() FW = ptv.tv1w_2d(N, W1, W2) end = time.time() print('Elapsed time ' + str(end-start)) plt.subplot(3, 4, 1); io.imshow(W1); plt.title('Weights along columns'); plt.subplot(3, 4, 5); io.imshow(W2); plt.title('Weights along rows'); plt.subplot(3, 4, 9); io.imshow(FW); plt.title('Filter result'); # Gradient in rows W1 = 0.01 * np.ones((X.shape[0]-1, X.shape[1])) W2 = 0.01 * np.cumsum(np.ones((X.shape[0], X.shape[1]-1)), 0) print('Solving 2D weighted TV...') start = time.time() FW = ptv.tv1w_2d(N, W1, W2) end = time.time() print('Elapsed time ' + str(end-start))
# Load image X = io.imread('colors.png') X = ski.img_as_float(X) X = color.rgb2gray(X) # Introduce noise noiseLevel = 0.01 N = util.random_noise(X, mode='speckle', var=noiseLevel) # Gradient in columns W1 = 0.01 * np.cumsum(np.ones((X.shape[0] - 1, X.shape[1])), 1) W2 = 0.01 * np.ones((X.shape[0], X.shape[1] - 1)) print('Solving 2D weighted TV...') start = time.time() FW = ptv.tv1w_2d(N, W1, W2) end = time.time() print('Elapsed time ' + str(end - start)) plt.subplot(3, 4, 1) io.imshow(W1) plt.title('Weights along columns') plt.subplot(3, 4, 5) io.imshow(W2) plt.title('Weights along rows') plt.subplot(3, 4, 9) io.imshow(FW) plt.title('Filter result') # Gradient in rows W1 = 0.01 * np.ones((X.shape[0] - 1, X.shape[1]))
def forward(self, x, weights_row, weights_col): r"""Solve the total variation problem and return the solution. Arguments --------- x: :class:`torch:torch.Tensor` A tensor with shape ``(m, n)`` holding the input signal. weights_row: :class:`torch:torch.Tensor` The horizontal edge weights. Tensor of shape ``(m, n - 1)``, or ``(1,)`` if all weights are equal. weights_col: :class:`torch:torch.Tensor` The vertical edge weights. Tensor of shape ``(m - 1, n)``, or ``(1,)`` if all weights are equal. Returns ------- :class:`torch:torch.Tensor` The solution to the total variation problem, of shape ``(m, n)``. """ self.refine = True self.tv_args = {} self.average_connected = True def _linearize(y, weights_row, weights_col): """Compute a linearization of the graph-cut function at the given point. Arguments --------- y : numpy.ndarray The point where the linearization is computed, shape ``(m, n)``. weights_row : numpy.ndarray The non-negative row weights, with shape ``(m, n - 1)``. y : numpy.ndarray The non-negative column weights, with shape ``(m - 1, n)``. Returns ------- numpy.ndarray The linearization of the graph-cut function at ``y``.""" diffs_col = np.sign(y[1:, :] - y[:-1, :]) diffs_row = np.sign(y[:, 1:] - y[:, :-1]) f = np.zeros_like(y) # The linearization. f[:, 1:] += diffs_row * weights_row f[:, :-1] -= diffs_row * weights_row f[1:, :] += diffs_col * weights_col f[:-1, :] -= diffs_col * weights_col return f def _refine(opt, x, weights_row, weights_col): """Refine the solution by solving an isotonic regression. The weights can either be two-dimensional tensors, or of shape (1,).""" idx = np.argsort(opt.ravel()) # Will pick an arbitrary order cone. ordered_vec = np.zeros_like(idx, dtype=np.float) ordered_vec[idx] = np.arange(np.size(opt)) f = _linearize(ordered_vec.reshape(opt.shape), weights_row.cpu().detach().numpy(), weights_col.cpu().detach().numpy()) opt_idx = isotonic( (x.view(-1).cpu().detach().numpy() - f.ravel())[idx]) opt = np.zeros_like(opt_idx) opt[idx] = opt_idx return opt opt = tv1w_2d(x.cpu().detach().numpy(), weights_col.cpu().detach().numpy(), weights_row.cpu().detach().numpy(), **self.tv_args) if self.refine: # opt = self._refine(opt, x, weights_row, weights_col) opt = _refine(opt, x, weights_row, weights_col) opt = torch.Tensor(opt).view_as(x) self.save_for_backward(opt) return opt.to(device)
def flatten_color(im, output_folder=None, iterations=4, exp_factor=10): import time import numpy as np import prox_tv as ptv from skimage import color import json im = np.asfarray(im) row, col, ch = im.shape shape_2d = (row, col) im_lab = np.asfarray(color.rgb2lab(im)) # Hyperparameters #iterations = 1 # default 2 or 4 #exp_factor = 11 # Degree of flattening (changes according to image type) h = 1 # neighbourhood (best 1) start = time.time() iterations_ = iterations # Mem alocate grady_l = np.zeros((row, col, h)) grady_a = np.zeros((row, col, h)) grady_b = np.zeros((row, col, h)) gradx_l = np.zeros((row, col, h)) gradx_a = np.zeros((row, col, h)) gradx_b = np.zeros((row, col, h)) F = np.zeros((row, col, ch)) while iterations > 0: #for i in range(h): i = 0 # only immediate neighbourhood grady_l[:, :, i] = im_lab[:, :, 0] - np.roll( im_lab[:, :, 0].reshape(shape_2d), i + 1, axis=0) grady_a[:, :, i] = im_lab[:, :, 1] - np.roll( im_lab[:, :, 1].reshape(shape_2d), i + 1, axis=0) grady_b[:, :, i] = im_lab[:, :, 2] - np.roll( im_lab[:, :, 2].reshape(shape_2d), i + 1, axis=0) gradx_l[:, :, i] = im_lab[:, :, 0] - np.roll( im_lab[:, :, 0].reshape(shape_2d), i + 1, axis=1) gradx_a[:, :, i] = im_lab[:, :, 1] - np.roll( im_lab[:, :, 1].reshape(shape_2d), i + 1, axis=1) gradx_b[:, :, i] = im_lab[:, :, 2] - np.roll( im_lab[:, :, 2].reshape(shape_2d), i + 1, axis=1) weight_y = 10**exp_factor * np.exp( -1 * (np.sum(grady_l**2, axis=2) + np.sum(grady_b**2, axis=2) + np.sum(grady_a**2, axis=2)) / 2.0) weight_x = 10**exp_factor * np.exp( -1 * (np.sum(gradx_l**2, axis=2) + np.sum(gradx_b**2, axis=2) + np.sum(gradx_a**2, axis=2)) / 2.0) #weight_y = 10 ** -4 * ( 1 - (np.sum(grady_l**2, axis=2) + np.sum(grady_b**2, axis=2) + np.sum(grady_a**2, axis=2) ) ) **2 #weight_x = 10 ** -4 * ( 1 - (np.sum(gradx_l**2, axis=2) + np.sum(gradx_b**2, axis=2) + np.sum(gradx_a**2, axis=2) ) ) **2 #print np.mean(weight_y.ravel()) #print np.mean(weight_x.ravel()) print('TV-l1 flattening by proximal algo: Iterration ' + str(iterations_ - iterations + 1)) #F = ptv.tvgen(X, [weight_y, weight_x], [1,2], np.array([1,1])); # Image | Penalty in each dimension | Dimensions to penalize | Norms to use F[:, :, 0] = ptv.tv1w_2d(im[:, :, 0].reshape(shape_2d), weight_y[1:, :], weight_x[:, 1:]) F[:, :, 1] = ptv.tv1w_2d(im[:, :, 1].reshape(shape_2d), weight_y[1:, :], weight_x[:, 1:]) F[:, :, 2] = ptv.tv1w_2d(im[:, :, 2].reshape(shape_2d), weight_y[1:, :], weight_x[:, 1:]) # update im = F im_lab = color.rgb2lab(F) iterations -= 1 print np.min(F.ravel()) print np.max(F.ravel()) # Notify if exp_factor too much if np.max(F.ravel()) >= 1.2: with open(output_folder + 'error_log.json', 'w') as outfile: json.dump('Overflattening: Decrease exp_factor hyperparameter!', outfile) assert np.max(F.ravel( )) < 1.2, 'Overflattening: Decrease exp_factor hyperparameter!' #io.imsave('result_'+str(iterations_)+'.png', F); #io.imsave('result_'+str(iterations_)+'.png', F.clip(0.0, 1.0)); end = time.time() print('Elapsed time ' + str(end - start)) return F