def analysis_dual_iter_algo(x_train, x_test, A, D, L, lbda, all_n_layers, type_, max_iter=300, device=None, net_kwargs=None, verbose=1): """ Chambolle solver for analysis TV problem. """ net_kwargs = dict() if net_kwargs is None else net_kwargs Psi_A = np.linalg.pinv(A).dot(D) inv_AtA = np.linalg.pinv(A.dot(A.T)) def _grad(v, x): return analysis_dual_grad(v, A, D, x, Psi_A=Psi_A) def _obj(v, x): v = np.atleast_2d(v) u = v_to_u(v, x, A=A, D=D, inv_AtA=inv_AtA) return analysis_primal_obj(u, A, D, x, lbda) def _prox(v, step_size): # XXX step_size is here to homogenize API v = np.atleast_2d(v) return np.clip(v, -lbda, lbda) v0_test, _, _ = init_vuz(A, D, x_test) v0_train, _, _ = init_vuz(A, D, x_train) max_iter = all_n_layers[-1] step_size = 1.0 / np.linalg.norm(Psi_A, ord=2) ** 2 momentum = None if type_ == 'ista' else 'fista' name = 'ISTA' if type_ == 'ista' else 'FISTA' print("[ISTA iterative] training loss") params = dict( grad=lambda v: _grad(v, x_train), obj=lambda v: _obj(v, x_train), prox=_prox, x0=v0_train, momentum=momentum, restarting=None, max_iter=max_iter, step_size=step_size, early_stopping=False, debug=True, verbose=verbose, ) _, train_loss = fista(**params) print("[ISTA iterative] testing loss") params = dict( grad=lambda v: _grad(v, x_test), obj=lambda v: _obj(v, x_test), prox=_prox, x0=v0_test, momentum=momentum, restarting=None, max_iter=max_iter, step_size=step_size, early_stopping=False, debug=True, verbose=verbose ) _, test_loss = fista(**params) train_loss = train_loss[[0] + all_n_layers] test_loss = test_loss[[0] + all_n_layers] print(f"\r[{name}] iterations finished train-loss={train_loss[-1]:.6e} " f"test-loss={test_loss[-1]:.6e}") return train_loss, test_loss
def analysis_learned_taut_string(x_train, x_test, A, D, L, lbda, all_n_layers, type_=None, max_iter=300, device=None, net_kwargs=None, verbose=1): """ NN-algo solver for analysis TV problem. """ net_kwargs = dict() if net_kwargs is None else net_kwargs params = None l_loss = [] def record_loss(l_loss, u_train, u_test): l_loss.append(dict( train_loss=analysis_primal_obj(u_train, A, D, x_train, lbda), test_loss=analysis_primal_obj(u_test, A, D, x_test, lbda), )) return l_loss _, u0_train, _ = init_vuz(A, D, x_train) _, u0_test, _ = init_vuz(A, D, x_test) record_loss(l_loss, u0_train, u0_test) for n, n_layers in enumerate(all_n_layers): # Declare network for the given number of layers. Warm-init the first # layers with parameters learned with previous networks if any. algo = LearnTVAlgo(algo_type='lpgd_taut_string', A=A, n_layers=n_layers, max_iter=max_iter, device=device, initial_parameters=params, verbose=verbose, **net_kwargs) # train t0_ = time.time() algo.fit(x_train, lbda=lbda) delta_ = time.time() - t0_ # save parameters params = algo.export_parameters() # get train and test error u_train = algo.transform(x_train, lbda, output_layer=n_layers) u_test = algo.transform(x_test, lbda, output_layer=n_layers) l_loss = record_loss(l_loss, u_train, u_test) if verbose > 0: train_loss = l_loss[n]['train_loss'] test_loss = l_loss[n]['test_loss'] print(f"\r[{algo.name}|layers#{n_layers:3d}] model fitted " f"{delta_:4.1f}s train-loss={train_loss:.4e} " f"test-loss={test_loss:.4e}") df = pd.DataFrame(l_loss) to_return = (df['train_loss'].values, df['test_loss'].values) return to_return
def analysis_learned_algo(x_train, x_test, A, D, L, lbda, all_n_layers, type_, max_iter=300, device=None, net_kwargs=None, verbose=1): """ NN-algo solver for analysis TV problem. """ net_kwargs = dict() if net_kwargs is None else net_kwargs params = None _, u0_train, _ = init_vuz(A, D, x_train) _, u0_test, _ = init_vuz(A, D, x_test) train_loss_init = analysis_primal_obj(u0_train, A, D, x_train, lbda) test_loss_init = analysis_primal_obj(u0_test, A, D, x_test, lbda) train_loss, test_loss = [train_loss_init], [test_loss_init] algo_type = 'origtv' if ('untrained' in type_) else type_ for n_layers in all_n_layers: # declare network algo = LearnTVAlgo(algo_type=algo_type, A=A, n_layers=n_layers, max_iter=max_iter, device=device, initial_parameters=params, verbose=verbose, **net_kwargs) t0_ = time.time() if 'untrained' not in type_: algo.fit(x_train, lbda=lbda) delta_ = time.time() - t0_ # save parameters params = algo.export_parameters() # get train and test error u_train = algo.transform(x_train, lbda, output_layer=n_layers) train_loss_ = analysis_primal_obj(u_train, A, D, x_train, lbda) train_loss.append(train_loss_) u_test = algo.transform(x_test, lbda, output_layer=n_layers) test_loss_ = analysis_primal_obj(u_test, A, D, x_test, lbda) test_loss.append(test_loss_) if verbose > 0: print(f"\r[{algo.name}|layers#{n_layers:3d}] model fitted " f"{delta_:4.1f}s train-loss={train_loss_:.4e} " f"test-loss={test_loss_:.4e}") to_return = (np.array(train_loss), np.array(test_loss)) return to_return
def _get_init(self, x, lbda, force_numpy=False): x0, u0, _ = init_vuz(self.net_solver.A, self.net_solver.D, x, lbda) if force_numpy: return np.array(x0), np.array(u0) else: return x0, u0
def forward(self, x, lbda, output_layer=None): """ Forward pass of the network. """ output_layer = self.check_output_layer(output_layer) # initialized variables _, u, _ = init_vuz(self.A, self.D, x, lbda, inv_A=self.inv_A_, device=self.device) for layer_id in range(output_layer): layer_params = self.parameter_groups[f'layer-{layer_id}'] # retrieve parameters h = layer_params['h'] hth = layer_params['hth'] mul_lbda = layer_params.get('threshold', 1.0 / self.l_) mul_lbda = check_tensor(mul_lbda, device=self.device) # apply one 'iteration' u = hth_id_u_tensor(hth, u) + htx_tensor(h, x) u = ProxTV_l1.apply(u, lbda * mul_lbda) return u
def analysis_primal_dual_iter_algo(x_train, x_test, A, D, L, lbda, all_n_layers, type_, max_iter=300, device=None, net_kwargs=None, verbose=1): """ Condat-Vu solver for analysis TV problem. """ net_kwargs = dict() if net_kwargs is None else net_kwargs max_iter = all_n_layers[-1] rho = 1.0 sigma = 0.5 L_A = np.linalg.norm(A, ord=2) ** 2 L_D = np.linalg.norm(D, ord=2) ** 2 tau = 1.0 / (L_A / 2.0 + sigma * L_D) v0_test, u0_test, _ = init_vuz(A, D, x_test, force_numpy=True) v0_train, u0_train, _ = init_vuz(A, D, x_train, force_numpy=True) if verbose > 0: print("[Condat-Vu iterative] training loss") params = dict( grad=lambda u: analysis_primal_grad(u, A, x_train), obj=lambda u: analysis_primal_obj(u, A, D, x_train, lbda), prox=lambda t: _soft_th_numpy(t, lbda / sigma), psi=lambda u: u.dot(D), adj_psi=lambda v: v.dot(D.T), v0=v0_train, z0=u0_train, lbda=lbda, sigma=sigma, tau=tau, rho=rho, max_iter=max_iter, early_stopping=False, debug=True, verbose=verbose, ) _, _, train_loss = condatvu(**params) if verbose > 0: print("[Condat-Vu iterative] testing loss") params = dict( grad=lambda u: analysis_primal_grad(u, A, x_test), obj=lambda u: analysis_primal_obj(u, A, D, x_test, lbda), prox=lambda t: _soft_th_numpy(t, lbda / sigma), psi=lambda u: u.dot(D), adj_psi=lambda v: v.dot(D.T), v0=v0_test, z0=u0_test, lbda=lbda, sigma=sigma, tau=tau, rho=rho, max_iter=max_iter, early_stopping=False, debug=True, verbose=verbose, ) _, _, test_loss = condatvu(**params) train_loss = train_loss[[0] + all_n_layers] test_loss = test_loss[[0] + all_n_layers] if verbose > 0: print(f"\r[Condat-Vu] iterations finished " f"train-loss={train_loss[-1]:.4e} test-loss={test_loss[-1]:.4e}") return train_loss, test_loss
def synthesis_iter_algo(x_train, x_test, A, D, L, lbda, all_n_layers, type_, max_iter=300, device=None, net_kwargs=None, verbose=1): """ Iterative-algo solver for synthesis TV problem. """ net_kwargs = dict() if net_kwargs is None else net_kwargs name = 'ISTA' if type_ == 'chambolle' else 'FISTA' max_iter = all_n_layers[-1] LA = L.dot(A) step_size = 1.0 / np.linalg.norm(LA, ord=2) ** 2 _, _, z0_test = init_vuz(A, D, x_test) _, _, z0_train = init_vuz(A, D, x_train) momentum = None if type_ == 'ista' else type_ if verbose > 0: print("[ISTA iterative] training loss") params = dict( grad=lambda z: synthesis_primal_grad(z, A, L, x_train), obj=lambda z: synthesis_primal_obj(z, A, L, x_train, lbda), prox=lambda z, s: pseudo_soft_th_numpy(z, lbda, s), x0=z0_train, momentum=momentum, restarting=None, max_iter=max_iter, step_size=step_size, early_stopping=False, debug=True, verbose=verbose, ) _, train_loss = fista(**params) if verbose > 0: print("[ISTA iterative] testing loss") params = dict( grad=lambda z: synthesis_primal_grad(z, A, L, x_test), obj=lambda z: synthesis_primal_obj(z, A, L, x_test, lbda), prox=lambda z, s: pseudo_soft_th_numpy(z, lbda, s), x0=z0_test, momentum=momentum, restarting=None, max_iter=max_iter, step_size=step_size, early_stopping=False, debug=True, verbose=verbose, ) _, test_loss = fista(**params) train_loss = train_loss[[0] + all_n_layers] test_loss = test_loss[[0] + all_n_layers] if verbose > 0: print(f"[{name}] iterations finished " f"train-loss={train_loss[-1]:.4e} test-loss={test_loss[-1]:.4e}") return train_loss, test_loss
def analysis_primal_iter_algo(x_train, x_test, A, D, L, lbda, all_n_layers, type_, max_iter=300, device=None, net_kwargs=None, verbose=1): """ Iterative-algo solver for synthesis TV problem. """ net_kwargs = dict() if net_kwargs is None else net_kwargs name = 'ISTA' if type_ == 'ista' else 'FISTA' max_iter = all_n_layers[-1] step_size = 1.0 / np.linalg.norm(A, ord=2) ** 2 _, u0_test, _ = init_vuz(A, D, x_test) _, u0_train, _ = init_vuz(A, D, x_train) momentum = None if type_ == 'ista' else type_ if verbose > 0: print(f"[analysis {name} iterative] training loss") params = dict( grad=lambda z: analysis_primal_grad(z, A, x_train), obj=lambda z: analysis_primal_obj(z, A, D, x_train, lbda), prox=lambda z, s: np.array([tv1_1d(z_, lbda * s) for z_ in z]), x0=u0_train, momentum=momentum, restarting=None, max_iter=max_iter, step_size=step_size, early_stopping=False, debug=True, verbose=verbose, ) _, train_loss = fista(**params) if verbose > 0: print(f"[analysis {name} iterative] testing loss") params = dict( grad=lambda z: analysis_primal_grad(z, A, x_test), obj=lambda z: analysis_primal_obj(z, A, D, x_test, lbda), prox=lambda z, s: np.array([tv1_1d(z_, lbda * s) for z_ in z]), x0=u0_test, momentum=momentum, restarting=None, max_iter=max_iter, step_size=step_size, early_stopping=False, debug=True, verbose=verbose, ) _, test_loss = fista(**params) train_loss = train_loss[[0] + all_n_layers] test_loss = test_loss[[0] + all_n_layers] if verbose > 0: print(f"\r[{name}] iterations finished " f"train-loss={train_loss[-1]:.6e} test-loss={test_loss[-1]:.6e}") return train_loss, test_loss
def test_untrained_analysis_lista(lbda, parametrization, n): """ Test the gradient of z. """ rng = check_random_state(None) x, _, _, _, D, A = synthetic_1d_dataset(n=n, s=0.5, snr=0.0, seed=rng) v0, u0, _ = init_vuz(A, D, x) n_layers = 10 rho = 1.0 sigma = 0.5 L_D = np.linalg.norm(D.dot(D.T), ord=2) L_A = np.linalg.norm(A.dot(A.T), ord=2) tau = 1.0 / (L_A / 2.0 + sigma * L_D**2) lista = LearnTVAlgo(algo_type=parametrization, A=A, n_layers=n_layers, device='cpu') loss_untrained_condat = [analysis_primal_obj(u0, A, D, x, lbda)] for n_layer_ in range(1, n_layers + 1): z = lista.transform(x=x, lbda=lbda, output_layer=n_layer_) loss_untrained_condat.append(analysis_primal_obj(z, A, D, x, lbda)) loss_untrained_condat = np.array(loss_untrained_condat) v0, u0, _ = init_vuz(A, D, x, force_numpy=True) params = dict( grad=lambda u: analysis_primal_grad(u, A, x), obj=lambda u: analysis_primal_obj(u, A, D, x, lbda), prox=lambda z: pseudo_soft_th_numpy(z, lbda, 1.0 / sigma), psi=lambda u: u.dot(D), adj_psi=lambda v: v.dot(D.T), v0=v0, z0=u0, lbda=lbda, sigma=sigma, tau=tau, rho=rho, max_iter=n_layers, early_stopping=False, debug=True, verbose=0, ) _, _, loss_condat = condatvu(**params) np.testing.assert_allclose(loss_condat, loss_untrained_condat, atol=1e-20)
def test_coherence_init(lbda, seed): rng = check_random_state(seed) x, _, _, L, D, A = synthetic_1d_dataset() v0 = None v0, u0, z0 = init_vuz(A, D, x, v0=v0) cost_1 = synthesis_primal_obj(z0, A, L, x, lbda) cost_2 = analysis_primal_obj(u0, A, D, x, lbda) cost_3 = analysis_primal_obj(v_to_u(v0, x, A, D), A, D, x, lbda) np.testing.assert_allclose(cost_1, cost_2) np.testing.assert_allclose(cost_1, cost_3) v0 = rng.randn(*v0.shape) v0, u0, z0 = init_vuz(A, D, x, v0=v0) synthesis_primal_obj(z0, A, L, x, lbda) cost_1 = synthesis_primal_obj(z0, A, L, x, lbda) cost_2 = analysis_primal_obj(u0, A, D, x, lbda) cost_3 = analysis_primal_obj(v_to_u(v0, x, A, D), A, D, x, lbda) np.testing.assert_allclose(cost_1, cost_2) np.testing.assert_allclose(cost_1, cost_3)
def test_coherence_analysis_loss(parametrization, lbda, n): """ Test coherence regarding the loss function between learnt and fixed algorithms. """ rng = check_random_state(None) x, _, _, _, D, A = synthetic_1d_dataset(n=n, s=0.5, snr=0.0, seed=rng) _, _, z = init_vuz(A, D, x) z_ = check_tensor(z, device='cpu') cost = analysis_primal_obj(z, A, D, x, lbda=lbda) ltv = LearnTVAlgo(algo_type=parametrization, A=A, n_layers=10, device='cpu') cost_ref = ltv._loss_fn(x, lbda, z_) np.testing.assert_allclose(cost_ref, cost, atol=1e-30)
def transform(self, x, lbda, output_layer=None): if output_layer is None: output_layer = self.n_layers _, _, z0 = init_vuz(self.A, self.D, x) params = dict( grad=lambda z: synthesis_primal_grad(z, self.A, self.L, x), obj=lambda z: synthesis_primal_obj(z, self.A, self.L, x, lbda), prox=lambda z, mu: pseudo_soft_th_numpy(z, lbda, mu), x0=z0, momentum=self.momentum, step_size=self.step_size, restarting=None, max_iter=output_layer, early_stopping=False, debug=True, verbose=self.verbose) return fista(**params)[0]
def transform(self, x, lbda, output_layer=None): if output_layer is None: output_layer = self.n_layers _, u0, _ = init_vuz(self.A, self.D, x) params = dict( grad=lambda z: analysis_primal_grad(z, self.A, x), obj=lambda z: analysis_primal_obj(z, self.A, self.D, x, lbda), prox=lambda z, s: np.array([tv1_1d(z_, lbda * s) for z_ in z]), x0=u0, momentum=self.momentum, step_size=self.step_size, restarting=None, max_iter=output_layer, early_stopping=False, debug=True, verbose=self.verbose, ) return fista(**params)[0]
def test_coherence_training_analysis_loss(parametrization, lbda, n): """ Test coherence regarding the loss function between learnt and fixed algorithms. """ rng = check_random_state(None) x, _, _, _, D, A = synthetic_1d_dataset(n=n, s=0.5, snr=0.0, seed=rng) _, u0, _ = init_vuz(A, D, x) train_loss = [analysis_primal_obj(u0, A, D, x, lbda)] train_loss_ = [analysis_primal_obj(u0, A, D, x, lbda)] for n_layers in range(1, 10): lista = LearnTVAlgo(algo_type=parametrization, A=A, n_layers=n_layers, max_iter=10) lista.fit(x, lbda=lbda) train_loss_.append(lista.training_loss_[-1]) u = lista.transform(x, lbda, output_layer=n_layers) train_loss.append(analysis_primal_obj(u, A, D, x, lbda)) np.testing.assert_allclose(train_loss_, train_loss, atol=1e-30)
def test_untrained_synthesis_lista(lbda, parametrization, n): """ Test the gradient of z. """ rng = check_random_state(None) x, _, _, L, D, A = synthetic_1d_dataset(n=n, s=0.5, snr=0.0, seed=rng) _, _, z0 = init_vuz(A, D, x) n_layers = 10 LA = L.dot(A) step_size = 1.0 / np.linalg.norm(LA, ord=2)**2 lista = LearnTVAlgo(algo_type=parametrization, A=A, n_layers=n_layers, device='cpu') loss_untrained_lista = [ synthesis_primal_obj(z=z0, A=A, L=L, x=x, lbda=lbda) ] for n_layer_ in range(1, n_layers + 1): z_hat = lista.transform(x=x, lbda=lbda, output_layer=n_layer_) loss_untrained_lista.append( synthesis_primal_obj(z=z_hat, A=A, L=L, x=x, lbda=lbda)) loss_untrained_lista = np.array(loss_untrained_lista) params = dict( grad=lambda z: synthesis_primal_grad(z, A, L, x), obj=lambda z: synthesis_primal_obj(z, A, L, x, lbda), prox=lambda z, s: pseudo_soft_th_numpy(z, lbda, s), x0=z0, momentum=None, restarting=None, max_iter=n_layers, step_size=step_size, early_stopping=False, debug=True, verbose=0, ) _, loss_ista = fista(**params) np.testing.assert_allclose(loss_ista, loss_untrained_lista, atol=1e-20)
h=h, n_times_valid=n_times_valid, name='Iterative-z', max_iter_z=int(args.iter_mult * args.max_iter_z), solver_type='fista-z-step', verbose=1) ta_iter = TA(**params) t0 = time.time() _, _, _ = ta_iter.prox_t(y_test, args.temp_reg) print(f"ta_iterative.prox_t finished : {time.time() - t0:.2f}s") loss_ta_iter = ta_iter.l_loss_prox_t n_samples = nx * ny * nz y_test_ravel = y_test.reshape(n_samples, args.n_time_frames) _, u0, _ = init_vuz(H, D, y_test_ravel, args.temp_reg) loss_ta_learn = [_obj_t_analysis(u0, y_test_ravel, h, args.temp_reg)] init_net_params = None params = dict(t_r=t_r, h=h, n_times_valid=n_times_valid, net_solver_training_type='recursive', name='Learned-z', solver_type='learn-z-step', verbose=1, max_iter_training_net=args.max_training_iter) for i, n_layers in enumerate(all_layers): params['max_iter_z'] = n_layers