def _test_tv1_methods(x, w): """For given input signal and weight, all TV1 methods must be similar""" methods = ('classictautstring', 'linearizedtautstring', 'hybridtautstring', 'pn', 'condat', 'dp', 'condattautstring', 'kolmogorov') solutions = [tv1_1d(x, w, method=method) for method in methods] solutions.append(tv1_1d(x, w, method='hybridtautstring', maxbacktracks=1.2)) for i in range(1, len(solutions)): assert np.allclose(solutions[0], solutions[i], atol=1e-3)
def prox_total_variation(M, mu): if M.ndim == 1 or M.shape[1] == 1: return (tv1_1d(M - M.min(), mu) + M.min()).reshape(M.shape) elif M.ndim == 2: return np.array([ tv1_1d(M[:, i] - M[:, i].min(), mu) + M[:, i].min() for i in range(M.shape[0]) ]) elif M.ndim == 3: return np.array([[ tv1_1d(M[:, j, k] - M[:, j, k].min(), mu) + M[:, j, k].min() for k in range(M.shape[2]) ] for j in range(M.shape[1])]).transpose(2, 0, 1)
def forward(self, x, weights): r"""Solve the total variation problem and return the solution. Arguments --------- x: :class:`torch:torch.Tensor` A tensor with shape ``(n,)`` holding the input signal. weights: :class:`torch:torch.Tensor` The edge weights. Shape ``(n-1,)``, or ``(1,)`` if all weights are equal. Returns ------- :class:`torch:torch.Tensor` The solution to the total variation problem, of shape ``(m, n)``. """ self.equal_weights = weights.size() == (1, ) if self.equal_weights: opt = tv1_1d(x.numpy().ravel(), weights.numpy()[0], **self.tv_args) else: opt = tv1w_1d(x.numpy().ravel(), weights.numpy().ravel(), **self.tv_args) opt = torch.Tensor(opt).view_as(x) self.save_for_backward(opt) return opt
def backward(ctx, grad_output): """Compute the gradient of loss + mu*reg using a prox step. The gradient is derived as the additive update that would be used in a proximal gradient descent: G(u) = (u - prox(u - eps * nabla(loss)(u), eps*lbda))/eps with a small eps (here hard coded to 1e-10). """ loss, reg, u, lbda = ctx.saved_tensors device = u.device # do clever computations eps = 1e-10 grad, = torch.autograd.grad(loss, u, only_inputs=True, retain_graph=True) x = (u - eps * grad).data lbda = lbda.data prox_x = check_tensor( np.array([prox_tv.tv1_1d(xx, eps * lbda) for xx in x]), device=device, ) grad_u = (u - prox_x) / eps grad_lbda = reg.clone() return (torch.ones(0), grad_u, grad_lbda)
def timeVariation(data): ''' Signal low frequency oscilation removal using time variation method. @param data (np.array(float): flux values of a light curve @return (np.array(float)): flux values without noise ''' return ptv.tv1_1d(data, 20)
def analysis_primal_iter_algo(x_train, x_test, A, D, L, lbda, all_n_layers, type_, max_iter=300, device=None, net_kwargs=None, verbose=1): """ Iterative-algo solver for synthesis TV problem. """ net_kwargs = dict() if net_kwargs is None else net_kwargs name = 'ISTA' if type_ == 'ista' else 'FISTA' max_iter = all_n_layers[-1] step_size = 1.0 / np.linalg.norm(A, ord=2) ** 2 _, u0_test, _ = init_vuz(A, D, x_test) _, u0_train, _ = init_vuz(A, D, x_train) momentum = None if type_ == 'ista' else type_ if verbose > 0: print(f"[analysis {name} iterative] training loss") params = dict( grad=lambda z: analysis_primal_grad(z, A, x_train), obj=lambda z: analysis_primal_obj(z, A, D, x_train, lbda), prox=lambda z, s: np.array([tv1_1d(z_, lbda * s) for z_ in z]), x0=u0_train, momentum=momentum, restarting=None, max_iter=max_iter, step_size=step_size, early_stopping=False, debug=True, verbose=verbose, ) _, train_loss = fista(**params) if verbose > 0: print(f"[analysis {name} iterative] testing loss") params = dict( grad=lambda z: analysis_primal_grad(z, A, x_test), obj=lambda z: analysis_primal_obj(z, A, D, x_test, lbda), prox=lambda z, s: np.array([tv1_1d(z_, lbda * s) for z_ in z]), x0=u0_test, momentum=momentum, restarting=None, max_iter=max_iter, step_size=step_size, early_stopping=False, debug=True, verbose=verbose, ) _, test_loss = fista(**params) train_loss = train_loss[[0] + all_n_layers] test_loss = test_loss[[0] + all_n_layers] if verbose > 0: print(f"\r[{name}] iterations finished " f"train-loss={train_loss[-1]:.6e} test-loss={test_loss[-1]:.6e}") return train_loss, test_loss
def solve_and_refine(x, w, equal_weights=True, **tv_args): if equal_weights: opt = tv1_1d(x.reshape(-1), w[0], **tv_args) else: opt = tv1w_1d(x.reshape(-1), w.reshape(-1), **tv_args) return opt
def test_tv1w_1d_uniform_weights_small_input(): for _ in range(1000): dimension = np.random.randint(2, 4) x = 100*np.random.randn(dimension) w1 = np.random.rand() w = np.ones(dimension-1) * w1 solw = tv1w_1d(x, w) sol = tv1_1d(x, w1) assert np.allclose(solw, sol)
def test_tv1_1d(): methods = ('tautstring', 'pn', 'condat', 'dp') for _ in range(20): dimension = np.random.randint(1e1, 3e1) x = 100*np.random.randn(dimension) w = 20*np.random.rand() solutions = [tv1_1d(x, w, method=method) for method in methods] for i in range(1, len(solutions)): assert np.allclose(solutions[0], solutions[i], atol=1e-3)
def test_tv1_1d(): methods = ('tautstring', 'pn', 'condat', 'dp') for _ in range(20): dimension = np.random.randint(1e1, 3e1) x = 100 * np.random.randn(dimension) w = 20 * np.random.rand() solutions = [tv1_1d(x, w, method=method) for method in methods] for i in range(1, len(solutions)): assert np.allclose(solutions[0], solutions[i], atol=1e-3)
def test_tvgen_1d(): """Tests that the general solver returns correct 1d solutions""" for _ in range(20): dimension = np.random.randint(1e1, 3e1) x = 100*np.random.randn(dimension) w = 20*np.random.rand() specific = tv1_1d(x, w) general = tvgen(x, [w], [1], [1]) assert np.allclose(specific, general, atol=1e-3)
def test_tvgen_1d(): """Tests that the general solver returns correct 1d solutions""" for _ in range(20): dimension = np.random.randint(1e1, 3e1) x = 100 * np.random.randn(dimension) w = 20 * np.random.rand() specific = tv1_1d(x, w) general = tvgen(x, [w], [1], [1]) assert np.allclose(specific, general, atol=1e-3)
def backward_step(self, current_state): #x = ptv.tv1_1d(current_state, self.gamma, method='hybridtautstring') #w = np.concatenate(([0], [self.gamma]*(len(current_state)-2))) x = ptv.tv1_1d( current_state, w=float(self.weights * self.gamma), method='hybridtautstring', ) return x
def test_tv1w_1d_uniform_weights_small_input(): for _ in range(1000): dimension = np.random.randint(2, 4) x = 100 * np.random.randn(dimension) w1 = np.random.rand() w = np.ones(dimension - 1) * w1 solw = tv1w_1d(x, w) sol = tv1_1d(x, w1) assert np.allclose(solw, sol)
def compute_prox_tv_errors(network, x, lbda): """Return the sub-optimality gap of the prox-tv at each iteration. """ if not isinstance(network, ListaTV): raise ValueError("network should be of type {'ListaTV'}.") if not hasattr(network, 'training_loss_'): warnings.warn("network has not been trained before computing " "prox_tv_errors.") x = check_tensor(x, device=network.device) _, u, _ = init_vuz(network.A, network.D, x, inv_A=network.inv_A_, device=network.device) l_diff_loss = [] for layer_id in range(network.n_layers): layer_params = network.parameter_groups[f'layer-{layer_id}'] # retrieve parameters Wx = layer_params['Wx'] Wu = layer_params['Wu'] # Get the correct prox depending on the layer_id and learn_prox mul_lbda = layer_params.get('threshold', 1.0 / network.l_) mul_lbda = max(0, mul_lbda) if network.learn_prox == LEARN_PROX_PER_LAYER: prox_tv = network.prox_tv[layer_id] else: prox_tv = network.prox_tv # apply one 'iteration' u_half = u.matmul(Wu) + x.matmul(Wx) u_half_npy = u_half.detach().cpu().numpy() # prox-tv as applied by the network z_k = prox_tv(u_half, lbda * mul_lbda) u = torch.cumsum(z_k, dim=1) approx_prox_u_npy = u.detach().cpu().numpy() # exact prox-tv with taut-string algorithm lbda_npy = float(lbda * mul_lbda) prox_u_npy = np.array([tv1_1d(u_, lbda_npy) for u_ in u_half_npy]) # log sub-optimality of the prox applied by the network diff_loss = ( loss_prox_tv_analysis(u_half_npy, approx_prox_u_npy, lbda_npy) - loss_prox_tv_analysis(u_half_npy, prox_u_npy, lbda_npy)) l_diff_loss.append(diff_loss) return l_diff_loss
def _step(self): self.x = cho_solve_banded( (self.c, False), self.y + sum(self.ρ[i] * self.D[i].T @ (self.α[i] + self.u[i]) for i in range(self.r)), check_finite=False, ) for i in range(self.r): Dx = self.D[i] @ self.x for j in range(self.x.shape[1]): self.α[i][:, j] = ptv.tv1_1d(Dx[:, j] - self.u[i][:, j], self.λ[i] / self.ρ[i]) self.u[i] += self.α[i] - Dx
def prox_FL(a, beta, lamda): """Fused Lasso prox. It is calculated as the Total variation prox + soft thresholding on the solution, as in http://ieeexplore.ieee.org/abstract/document/6579659/ """ Y = np.empty_like(a) for i in range(np.power(a.shape[1], 2)): solution = tv1_1d(a.flat[i::np.power(a.shape[1], 2)], beta) # fused-lasso (soft-thresholding on the solution) solution = soft_thresholding_sign(solution, lamda) Y.flat[i::np.power(a.shape[1], 2)] = solution return Y
def _prox_tv_multi(z, lbda, step_size): """ _prox_tv_multi Parameters ---------- z : array, shape (n_atoms, n_times_valid), temporal components lbda : float, the temporal regularization parameter step_size : float, the step-size for the gradient descent Return ------ prox_z : array, shape (n_atoms, n_times_valid), the valid approximated temporal components """ return np.vstack([tv1_1d(z_k, lbda * step_size) for z_k in z])
def tv_diff(data, lambd): """Computes time derivative at endpoint. Approximates time derivative of `data` with a second-order backward finite difference formula. Assuming the time series contains noise, datapoints are filtered using Total Variation Regularization. Args: data: Uniformly-spaced time series. lambd: Non-negative regularization parameter. Returns: Time derivative at endpoint of filtered `data`. """ u = ptv.tv1_1d(data, lambd) return (3 * u[-1] - 4 * u[-2] + u[-3]) / 2
def forward(ctx, x, lbda): # Convert input to numpy array to use the prox_tv library device = x.device x = x.detach().cpu().data # The regularization can be learnable or a float if isinstance(lbda, torch.Tensor): lbda = lbda.detach().cpu().data # Get back a tensor for the output and save it for the backward pass output = check_tensor( np.array([prox_tv.tv1_1d(xx, lbda) for xx in x]), device=device, requires_grad=True, ) z = output - torch.functional.F.pad(output, (1, 0))[..., :-1] ctx.save_for_backward(torch.sign(z)) return output
def transform(self, x, lbda, output_layer=None): if output_layer is None: output_layer = self.n_layers _, u0, _ = init_vuz(self.A, self.D, x) params = dict( grad=lambda z: analysis_primal_grad(z, self.A, x), obj=lambda z: analysis_primal_obj(z, self.A, self.D, x, lbda), prox=lambda z, s: np.array([tv1_1d(z_, lbda * s) for z_ in z]), x0=u0, momentum=self.momentum, step_size=self.step_size, restarting=None, max_iter=output_layer, early_stopping=False, debug=True, verbose=self.verbose, ) return fista(**params)[0]
def update_trend(X, z_hat, d_hat, reg_trend=0.1, ds_init=None, debug=False, solver_kwargs=dict(), sample_weights=None, verbose=0): """Learn d's in time domain. Parameters ---------- X : array, shape (n_trials, n_times) The data for sparse coding Z : array, shape (n_atoms, n_trials, n_times - n_times_atom + 1) The code for which to learn the atoms n_times_atom : int The shape of atoms. lambd0 : array, shape (n_atoms,) | None The init for lambda. debug : bool If True, check grad. solver_kwargs : dict Parameters for the solver sample_weights: array, shape (n_trials, n_times) Weights applied on the cost function. verbose : int Verbosity level. Returns ------- d_hat : array, shape (k, n_times_atom) The atom to learn from the data. lambd_hats : float The dual variables """ conv_part = construct_X(z_hat, d_hat) trend = np.zeros(X.shape) to_analyse = X-conv_part for i in range(X.shape[0]): trend[i] = tv1_1d(to_analyse[i], reg_trend) return trend
# sord = ordinator1d( # np.abs(imspace_true[ii, jj, :]), k=10, # forward=S.forward_wvlt, inverse=S.inverse_wvlt, # chunksize=10, pdf=None, pdf_metric=None, # sparse_metric=None, disp=False) sord = relaxed_ordinator( np.abs(imspace_true[ii, jj, :]), lam=.05, k=10, unsparsify=S.inverse_wvlt, norm=False, warm=False, transform_shape=None, disp=False) roi_cnt += 1 tqdm.write('ROI counter: %d' % roi_cnt) else: # Sorting doesn't suppress motion! sord = np.argsort(np.abs(imspace_true[ii, jj, :])) recon_sort[ii, jj, sord] = ptv.tv1_1d( #pylint: disable=E1137 np.abs(imspace_u[ii, jj, sord]), w) plt.plot(np.abs(imspace_true[ctr[0], ctr[1], :])) plt.plot(np.abs(recon_sort[ctr[0], ctr[1], :])) plt.plot(np.abs(recon_l1[ctr[0], ctr[1], :])) plt.show() # Take a look ims = [ imspace_u, # recon, recon_l1, recon_l2, recon_sort, imspace_true, # np.abs(imspace_true - recon0),
def admm_sep_1(i_ll, tol=1e-2, max_iter=1000): """ computation for one view of data """ obj_value = obj_value_1 i, ll = i_ll W_i = W[(i, ll)] W_i_h = np.random.uniform(size=W_i.shape) W_i_t = np.random.uniform(size=W_i.shape) Theta = np.random.uniform(size=W_i.shape) Phi = np.random.uniform(size=W_i.shape) d_i = W_i.shape[0] loss_0 = obj_value(W_i, W_i_t, W_i_h, Phi, Theta, i, ll) loss_1 = loss_0 + 1 l5 = 1e32 iter = 0 while (np.sum(np.absolute(W_i - W_i_h) + np.absolute(W_i - W_i_t)) > tol or abs(loss_0 - loss_1) > tol) and iter <= max_iter: #step 1, parallel tmp = (W_i_h + W_i_t) / 2 - (Theta + Phi) / (2.0 * nu) for j in range(d_i): W_i[j, :] = ptv.tv1_1d(tmp[j, :], 1.0 / (2 * nu)) l1 = obj_value(W_i, W_i_t, W_i_h, Phi, Theta, i, ll) if l1 >= l5 + tol_dif: test_admm(lam, mu, nu, 1, l1, l5) break #step 2 W_i_t = np.sign(W_i + Theta / nu) * np.maximum( np.absolute(W_i + Theta / nu) - 1.0 * lam / nu, 0) l2 = obj_value(W_i, W_i_t, W_i_h, Phi, Theta, i, ll) if l2 >= l1 + tol_dif: test_admm(lam, mu, nu, 2, l2, l1) break #step 3, parallel for t in range(T): ind = np.ravel_multi_index((i, t), (N, T)) A = mu * (N - 1) * UTU[ind] + nu * np.eye(d_i) b = Phi[:, t] + mu * np.dot(US[ind], P[ind][:, ll]) + nu * W_i[:, t] W_i_h[:, t] = np.linalg.solve(A, b) l3 = obj_value(W_i, W_i_t, W_i_h, Phi, Theta, i, ll) if l3 >= l2 + tol_dif: test_admm(lam, mu, nu, 3, l3, l2) break #step 4 Theta = Theta + nu * (W_i - W_i_t) l4 = obj_value(W_i, W_i_t, W_i_h, Phi, Theta, i, ll) if l4 <= l3 - tol_dif: test_admm(lam, mu, nu, 4, l4, l3) #step 5 Phi = Phi + nu * (W_i - W_i_h) l5 = obj_value(W_i, W_i_t, W_i_h, Phi, Theta, i, ll) if l5 <= l4 - tol_dif: test_admm(lam, mu, nu, 5, l5, l4) iter += 1 if iter % 10 == 0: loss_1 = loss_0 loss_0 = obj_value(W_i, W_i_t, W_i_h, Phi, Theta, i, ll) if iter > max_iter: warnings.warn( str(lam) + ' ' + str(mu) + ' ' + str(nu) + 'warning: does not converge!') return W_i_t
def prox(logy, s): """total variation prox operator""" return ptv.tv1_1d(logy, s * α_tv)
def _prox(x, step_size): return np.array([tv1_1d(x_, lbda_t * step_size) for x_ in x])
# Generate impulse (blocky) signal N = 1000 s = np.zeros((N, 1)) s[N / 4:N / 2] = 1 s[N / 2:3 * N / 4] = -1 s[3 * N / 4:-N / 8] = 2 # Introduce noise n = s + 0.5 * randn(*shape(s)) # Filter using TV-L1 lam = 20 print('Filtering signal with TV-L1...') start = time.time() f = ptv.tv1_1d(n, lam) end = time.time() print('Elapsed time ' + str(end - start)) # Plot results plt.subplot(3, 1, 1) plt.title('TVL1 filtering') plt.plot(s) plt.ylabel('Original') grid(True) plt.subplot(3, 1, 2) plt.plot(n) plt.ylabel('Noisy') grid(True)
def initial_signal(frame, filtering=False, filtered_frequency=None): x_values, y_values = frame[0], frame[1] if filtering: import prox_tv as ptv y_values = ptv.tv1_1d(y_values, filtered_frequency) return dict(x=x_values, y=y_values)
def prox(x, step_size): return np.r_[[tv1_1d(x_, lbda * step_size) for x_ in x]]
def admm_sep_2(i_ll, P, SVD_ijt, USP, tol=1e-2, max_iter=1000, with_init=with_init, lam=lam, mu=mu, nu=nu, A_inv=A_inv, cvx_val=None): """ computation for one view of data """ print 'para:', lam, mu, nu _cvx_conv = False obj_value = obj_value_2 i, ll = i_ll W_i = W[(i, ll)].copy() W_i_h = W_h[(i, ll)].copy() W_i_t = W_t[(i, ll)].copy() Theta = Theta_all[(i, ll)].copy() Phi = Phi_all[(i, ll)].copy() d_i = W_i.shape[0] loss_0 = obj_value(W_i, W_i_t, W_i_h, Phi, Theta, i, ll, SVD_ijt, lam, mu, nu) loss_1 = loss_0 + 1 _conv = True iter = 0 s_time = time.time() while iter <= max_iter: #step 1, parallel b_0 = -Phi - Theta + nu * (W_i_h + W_i_t) for t in range(T): ind = np.ravel_multi_index((i, t), (N, T)) b = USP[(i, t, ll)] + b_0[:, t] if not big_data: W_i[:, t] = np.dot(A_inv[ind], b) else: tmp = A_inv[str(i) + '/' + str(t)] W_i[:, t] = np.dot(tmp, b) #step 2 W_i_t = np.sign(W_i + Theta / nu) * np.maximum( np.absolute(W_i + Theta / nu) - 1.0 * lam / nu, 0) #step 3, parallel tmp = W_i + 1.0 * Phi / nu for j in range(d_i): #print j W_i_h[j, :] = ptv.tv1_1d(tmp[j, :], 1.0 * mu / nu) #step 4 Theta = Theta + nu * (W_i - W_i_t) #step 5 Phi = Phi + nu * (W_i - W_i_h) iter += 1 if iter % 10 == 0: loss_1 = loss_0 loss_0 = obj_value(W_i, W_i_t, W_i_h, Phi, Theta, i, ll, SVD_ijt, lam, mu, nu) if (np.sum( np.absolute(W_i - W_i_h) + np.absolute(W_i - W_i_t)) / (np.sum(abs(W_i)) + 1e-2) < tol ) and abs(loss_0 - loss_1) / loss_0 < tol and iter > 500: if (test and _cvx_conv): tmp = val_config.time val_config.time = tmp + time.time() - s_time break if not test: break if test and not _cvx_conv: if abs(org_f(W_i, i, ll, SVD_ijt) - cvx_val) / cvx_val < 0.1 or org_f(W_i, i, ll, SVD_ijt) <= cvx_val: _cvx_conv = True if iter > max_iter: warnings.warn( str(lam) + ' ' + str(mu) + ' ' + str(nu) + 'warning: does not converge!') _conv = False if d_i > 1000 and iter % 100 == 0: print time.time() - s_time T_dif_i = obj_value(W_i, W_i_t, W_i_h, Phi, Theta, i, ll, SVD_ijt, lam, mu, nu, out_put=out_put, choice='final') return W_i, _conv, T_dif_i, W_i_t, W_i_h, Theta, Phi
def admm_sep_3(i_ll, tol=1e-2, max_iter=1000): """ computation for one view of data """ i, ll = i_ll obj_value = obj_value_3 W_i = W[(i, ll)] W_i_h = np.random.uniform(size=W_i.shape) W_i_t = np.random.uniform(size=W_i.shape) Theta = np.random.uniform(size=W_i.shape) Phi = np.random.uniform(size=W_i.shape) #r,N,T Psi = dict() for t in range(T): r = SVD_x[np.ravel_multi_index((i, t), (N, T))][2].shape[1] Psi[t] = np.random.uniform(size=(r, N)) Psi[t][:, i] = 0 d_i = W_i.shape[0] l6 = 1e32 loss_0 = obj_value(W_i, W_i_t, W_i_h, Phi, Theta, Psi, i, ll) loss_1 = loss_0 + 1 iter = 0 _conv = True while (np.sum(np.absolute(W_i - W_i_h) + np.absolute(W_i - W_i_t)) > tol or abs(loss_0 - loss_1) > tol) and iter <= max_iter: #step 1, parallel for t in range(T): U_1it = SVD_x[np.ravel_multi_index((i, t), (N, T))][2] ind = np.ravel_multi_index((i, t), (N, T)) A = mu * (N - 1) * UTU[ind] + 2 * nu * np.eye(d_i) b = -Phi[:, t] - Theta[:, t] + mu * np.dot( US[ind], P[ind][:, ll]) + nu * (W_i_h[:, t] + W_i_t[:, t]) - np.dot( U_1it, np.sum(Psi[t], axis=1).reshape((-1, 1))).reshape( (-1, )) W_i[:, t] = np.linalg.solve(A, b) l1 = obj_value(W_i, W_i_t, W_i_h, Phi, Theta, Psi, i, ll) if l1 >= l6 + tol_dif: test_admm(lam, mu, nu, 1, l1, l6) break #step 2 W_i_t = np.sign(W_i + Theta / nu) * np.maximum( np.absolute(W_i + Theta / nu) - 1.0 * lam / nu, 0) l2 = obj_value(W_i, W_i_t, W_i_h, Phi, Theta, Psi, i, ll) if l2 >= l1 + tol_dif: test_admm(lam, mu, nu, 2, l2, l1) break #step 3, parallel tmp = W_i + Phi / nu for j in range(d_i): W_i_h[j, :] = ptv.tv1_1d(tmp[j, :], 1.0 / nu) l3 = obj_value(W_i, W_i_t, W_i_h, Phi, Theta, Psi, i, ll) if l3 >= l2 + tol_dif: test_admm(lam, mu, nu, 3, l3, l2) break #step 4, parallel for t in range(T): Sigma_iti, U_1it = SVD_x[np.ravel_multi_index((i, t), (N, T))][1:3] tmp = np.dot(np.transpose(U_1it), W_i[:, t]) for j in range(i + 1, N): Psi[t][:, j] = Psi[t][:, j] + mu * (tmp - np.dot( Sigma_iti, SVD_ijt[np.ravel_multi_index( (i, j, t), (N, N, T))][0][:, ll])) for w in range(i): Psi[t][:, w] = Psi[t][:, w] + mu * (tmp - np.dot( Sigma_iti, SVD_ijt[np.ravel_multi_index( (w, i, t), (N, N, T))][1][:, ll])) l4 = obj_value(W_i, W_i_t, W_i_h, Phi, Theta, Psi, i, ll) if l4 <= l3 - tol_dif: test_admm(lam, mu, nu, 4, l4, l3) break #step 5 Theta = Theta + nu * (W_i - W_i_t) l5 = obj_value(W_i, W_i_t, W_i_h, Phi, Theta, Psi, i, ll) if l5 <= l4 - tol_dif: test_admm(lam, mu, nu, 5, l5, l4) #step 6 Phi = Phi + nu * (W_i - W_i_h) l6 = obj_value(W_i, W_i_t, W_i_h, Phi, Theta, Psi, i, ll) if l6 <= l5 - tol_dif: test_admm(lam, mu, nu, 6, l6, l5) iter += 1 if iter % 10 == 0: loss_1 = loss_0 loss_0 = obj_value(W_i, W_i_t, W_i_h, Phi, Theta, Psi, i, ll) if iter > max_iter: #warnings.warn(str(lam)+' '+ str(mu)+ ' '+ str(nu)+'warning: does not converge!') _conv = False return W_i_t, _conv
def get_trend_init(X, trend_reg): trend_init = np.zeros_like(X) for i in range(X.shape[0]): trend_init[i] = tv.tv1_1d(X[i], trend_reg) return trend_init
s[int(3*N/4):int(-N/8)] = 2 return s ### TV-L1 filtering # Generate impulse (blocky) signal s = _blockysignal() # Introduce noise n = s + 0.5*np.random.rand(*np.shape(s)) # Filter using TV-L1 lam=20 print('Filtering signal with TV-L1...') start = time.time() f = ptv.tv1_1d(n,lam) end = time.time() print('Elapsed time ' + str(end-start)) # Plot results plt.subplot(3, 1, 1) plt.title('TVL1 filtering') plt.plot(s) plt.ylabel('Original') plt.grid(True) plt.subplot(3, 1, 2) plt.plot(n) plt.ylabel('Noisy') plt.grid(True)