def test_init_parameters(n, lbda, parametrization): """ Test the gradient of z. """ rng = check_random_state(27) x, _, _, L, _, A = synthetic_1d_dataset(n=n, s=0.5, snr=0.0, seed=rng) n_layers = 5 # limit the number of inner layers for origtv to avoid long computations kwargs = {} if parametrization == 'origtv': kwargs['n_inner_layers'] = 5 lista_1 = LearnTVAlgo(algo_type=parametrization, A=A, n_layers=n_layers, max_iter=10, net_solver_type='one_shot', **kwargs) lista_1.fit(x, lbda=lbda) params = lista_1.export_parameters() loss_lista_1 = [] for n_layer_ in range(n_layers + 1): z_1 = lista_1.transform(x=x, lbda=lbda, output_layer=n_layer_) loss_lista_1.append(synthesis_primal_obj(z=z_1, A=A, L=L, x=x, lbda=lbda)) loss_lista_1 = np.array(loss_lista_1) lista_2 = LearnTVAlgo(algo_type=parametrization, A=A, n_layers=n_layers, initial_parameters=params, max_iter=10, **kwargs) loss_lista_2 = [] for n_layer_ in range(n_layers + 1): z_2 = lista_2.transform(x=x, lbda=lbda, output_layer=n_layer_) loss_lista_2.append(synthesis_primal_obj(z=z_2, A=A, L=L, x=x, lbda=lbda)) loss_lista_2 = np.array(loss_lista_2) np.testing.assert_allclose(z_1, z_2) np.testing.assert_allclose(loss_lista_1, loss_lista_2)
def run_experiment(methods, x_train, x_test, A, lbda, n_layers): """ Experiment launcher. """ print("=" * 80) l_diff_loss = [] for name, type_, kwargs, _, _, _ in methods: print(f"[main script] running {name}") print("-" * 80) algo_type = 'origtv' if ('untrained' in type_) else type_ network = LearnTVAlgo(algo_type=algo_type, A=A, n_layers=n_layers, max_iter=args.max_iter, device=device, verbose=1, **kwargs) if 'untrained' not in type_: network.fit(x_train, lbda=lbda) diff_loss = compute_prox_tv_errors(network, x_test, lbda) l_diff_loss.append(diff_loss) print("=" * 80) return l_diff_loss
def analysis_learned_taut_string(x_train, x_test, A, D, L, lbda, all_n_layers, type_=None, max_iter=300, device=None, net_kwargs=None, verbose=1): """ NN-algo solver for analysis TV problem. """ net_kwargs = dict() if net_kwargs is None else net_kwargs params = None l_loss = [] def record_loss(l_loss, u_train, u_test): l_loss.append(dict( train_loss=analysis_primal_obj(u_train, A, D, x_train, lbda), test_loss=analysis_primal_obj(u_test, A, D, x_test, lbda), )) return l_loss _, u0_train, _ = init_vuz(A, D, x_train) _, u0_test, _ = init_vuz(A, D, x_test) record_loss(l_loss, u0_train, u0_test) for n, n_layers in enumerate(all_n_layers): # Declare network for the given number of layers. Warm-init the first # layers with parameters learned with previous networks if any. algo = LearnTVAlgo(algo_type='lpgd_taut_string', A=A, n_layers=n_layers, max_iter=max_iter, device=device, initial_parameters=params, verbose=verbose, **net_kwargs) # train t0_ = time.time() algo.fit(x_train, lbda=lbda) delta_ = time.time() - t0_ # save parameters params = algo.export_parameters() # get train and test error u_train = algo.transform(x_train, lbda, output_layer=n_layers) u_test = algo.transform(x_test, lbda, output_layer=n_layers) l_loss = record_loss(l_loss, u_train, u_test) if verbose > 0: train_loss = l_loss[n]['train_loss'] test_loss = l_loss[n]['test_loss'] print(f"\r[{algo.name}|layers#{n_layers:3d}] model fitted " f"{delta_:4.1f}s train-loss={train_loss:.4e} " f"test-loss={test_loss:.4e}") df = pd.DataFrame(l_loss) to_return = (df['train_loss'].values, df['test_loss'].values) return to_return
def analysis_learned_algo(x_train, x_test, A, D, L, lbda, all_n_layers, type_, max_iter=300, device=None, net_kwargs=None, verbose=1): """ NN-algo solver for analysis TV problem. """ net_kwargs = dict() if net_kwargs is None else net_kwargs params = None _, u0_train, _ = init_vuz(A, D, x_train) _, u0_test, _ = init_vuz(A, D, x_test) train_loss_init = analysis_primal_obj(u0_train, A, D, x_train, lbda) test_loss_init = analysis_primal_obj(u0_test, A, D, x_test, lbda) train_loss, test_loss = [train_loss_init], [test_loss_init] algo_type = 'origtv' if ('untrained' in type_) else type_ for n_layers in all_n_layers: # declare network algo = LearnTVAlgo(algo_type=algo_type, A=A, n_layers=n_layers, max_iter=max_iter, device=device, initial_parameters=params, verbose=verbose, **net_kwargs) t0_ = time.time() if 'untrained' not in type_: algo.fit(x_train, lbda=lbda) delta_ = time.time() - t0_ # save parameters params = algo.export_parameters() # get train and test error u_train = algo.transform(x_train, lbda, output_layer=n_layers) train_loss_ = analysis_primal_obj(u_train, A, D, x_train, lbda) train_loss.append(train_loss_) u_test = algo.transform(x_test, lbda, output_layer=n_layers) test_loss_ = analysis_primal_obj(u_test, A, D, x_test, lbda) test_loss.append(test_loss_) if verbose > 0: print(f"\r[{algo.name}|layers#{n_layers:3d}] model fitted " f"{delta_:4.1f}s train-loss={train_loss_:.4e} " f"test-loss={test_loss_:.4e}") to_return = (np.array(train_loss), np.array(test_loss)) return to_return
def test_coherence_analysis_loss(parametrization, lbda, n): """ Test coherence regarding the loss function between learnt and fixed algorithms. """ rng = check_random_state(None) x, _, _, _, D, A = synthetic_1d_dataset(n=n, s=0.5, snr=0.0, seed=rng) _, _, z = init_vuz(A, D, x) z_ = check_tensor(z, device='cpu') cost = analysis_primal_obj(z, A, D, x, lbda=lbda) ltv = LearnTVAlgo(algo_type=parametrization, A=A, n_layers=10, device='cpu') cost_ref = ltv._loss_fn(x, lbda, z_) np.testing.assert_allclose(cost_ref, cost, atol=1e-30)
def test_untrained_analysis_lista(lbda, parametrization, n): """ Test the gradient of z. """ rng = check_random_state(None) x, _, _, _, D, A = synthetic_1d_dataset(n=n, s=0.5, snr=0.0, seed=rng) v0, u0, _ = init_vuz(A, D, x) n_layers = 10 rho = 1.0 sigma = 0.5 L_D = np.linalg.norm(D.dot(D.T), ord=2) L_A = np.linalg.norm(A.dot(A.T), ord=2) tau = 1.0 / (L_A / 2.0 + sigma * L_D**2) lista = LearnTVAlgo(algo_type=parametrization, A=A, n_layers=n_layers, device='cpu') loss_untrained_condat = [analysis_primal_obj(u0, A, D, x, lbda)] for n_layer_ in range(1, n_layers + 1): z = lista.transform(x=x, lbda=lbda, output_layer=n_layer_) loss_untrained_condat.append(analysis_primal_obj(z, A, D, x, lbda)) loss_untrained_condat = np.array(loss_untrained_condat) v0, u0, _ = init_vuz(A, D, x, force_numpy=True) params = dict( grad=lambda u: analysis_primal_grad(u, A, x), obj=lambda u: analysis_primal_obj(u, A, D, x, lbda), prox=lambda z: pseudo_soft_th_numpy(z, lbda, 1.0 / sigma), psi=lambda u: u.dot(D), adj_psi=lambda v: v.dot(D.T), v0=v0, z0=u0, lbda=lbda, sigma=sigma, tau=tau, rho=rho, max_iter=n_layers, early_stopping=False, debug=True, verbose=0, ) _, _, loss_condat = condatvu(**params) np.testing.assert_allclose(loss_condat, loss_untrained_condat, atol=1e-20)
def test_coherence_training_analysis_loss(parametrization, lbda, n): """ Test coherence regarding the loss function between learnt and fixed algorithms. """ rng = check_random_state(None) x, _, _, _, D, A = synthetic_1d_dataset(n=n, s=0.5, snr=0.0, seed=rng) _, u0, _ = init_vuz(A, D, x) train_loss = [analysis_primal_obj(u0, A, D, x, lbda)] train_loss_ = [analysis_primal_obj(u0, A, D, x, lbda)] for n_layers in range(1, 10): lista = LearnTVAlgo(algo_type=parametrization, A=A, n_layers=n_layers, max_iter=10) lista.fit(x, lbda=lbda) train_loss_.append(lista.training_loss_[-1]) u = lista.transform(x, lbda, output_layer=n_layers) train_loss.append(analysis_primal_obj(u, A, D, x, lbda)) np.testing.assert_allclose(train_loss_, train_loss, atol=1e-30)
def test_untrained_synthesis_lista(lbda, parametrization, n): """ Test the gradient of z. """ rng = check_random_state(None) x, _, _, L, D, A = synthetic_1d_dataset(n=n, s=0.5, snr=0.0, seed=rng) _, _, z0 = init_vuz(A, D, x) n_layers = 10 LA = L.dot(A) step_size = 1.0 / np.linalg.norm(LA, ord=2)**2 lista = LearnTVAlgo(algo_type=parametrization, A=A, n_layers=n_layers, device='cpu') loss_untrained_lista = [ synthesis_primal_obj(z=z0, A=A, L=L, x=x, lbda=lbda) ] for n_layer_ in range(1, n_layers + 1): z_hat = lista.transform(x=x, lbda=lbda, output_layer=n_layer_) loss_untrained_lista.append( synthesis_primal_obj(z=z_hat, A=A, L=L, x=x, lbda=lbda)) loss_untrained_lista = np.array(loss_untrained_lista) params = dict( grad=lambda z: synthesis_primal_grad(z, A, L, x), obj=lambda z: synthesis_primal_obj(z, A, L, x, lbda), prox=lambda z, s: pseudo_soft_th_numpy(z, lbda, s), x0=z0, momentum=None, restarting=None, max_iter=n_layers, step_size=step_size, early_stopping=False, debug=True, verbose=0, ) _, loss_ista = fista(**params) np.testing.assert_allclose(loss_ista, loss_untrained_lista, atol=1e-20)