def test_init_parameters(n, lbda, parametrization): """ Test the gradient of z. """ rng = check_random_state(27) x, _, _, L, _, A = synthetic_1d_dataset(n=n, s=0.5, snr=0.0, seed=rng) n_layers = 5 # limit the number of inner layers for origtv to avoid long computations kwargs = {} if parametrization == 'origtv': kwargs['n_inner_layers'] = 5 lista_1 = LearnTVAlgo(algo_type=parametrization, A=A, n_layers=n_layers, max_iter=10, net_solver_type='one_shot', **kwargs) lista_1.fit(x, lbda=lbda) params = lista_1.export_parameters() loss_lista_1 = [] for n_layer_ in range(n_layers + 1): z_1 = lista_1.transform(x=x, lbda=lbda, output_layer=n_layer_) loss_lista_1.append(synthesis_primal_obj(z=z_1, A=A, L=L, x=x, lbda=lbda)) loss_lista_1 = np.array(loss_lista_1) lista_2 = LearnTVAlgo(algo_type=parametrization, A=A, n_layers=n_layers, initial_parameters=params, max_iter=10, **kwargs) loss_lista_2 = [] for n_layer_ in range(n_layers + 1): z_2 = lista_2.transform(x=x, lbda=lbda, output_layer=n_layer_) loss_lista_2.append(synthesis_primal_obj(z=z_2, A=A, L=L, x=x, lbda=lbda)) loss_lista_2 = np.array(loss_lista_2) np.testing.assert_allclose(z_1, z_2) np.testing.assert_allclose(loss_lista_1, loss_lista_2)
def test_soft_thresholding(seed, shape, lbda): """ Test the gradient of z. """ z = check_random_state(seed).randn(*shape) prox_z_ref = pseudo_soft_th_tensor(torch.Tensor(z), lbda, step_size=1.0) prox_z_ref = prox_z_ref.numpy() prox_z = pseudo_soft_th_numpy(z, lbda, step_size=1.0) np.testing.assert_allclose(prox_z_ref, prox_z, rtol=1e-2)
def test_coherence_analysis_loss(parametrization, lbda, n): """ Test coherence regarding the loss function between learnt and fixed algorithms. """ rng = check_random_state(None) x, _, _, _, D, A = synthetic_1d_dataset(n=n, s=0.5, snr=0.0, seed=rng) _, _, z = init_vuz(A, D, x) z_ = check_tensor(z, device='cpu') cost = analysis_primal_obj(z, A, D, x, lbda=lbda) ltv = LearnTVAlgo(algo_type=parametrization, A=A, n_layers=10, device='cpu') cost_ref = ltv._loss_fn(x, lbda, z_) np.testing.assert_allclose(cost_ref, cost, atol=1e-30)
def test_untrained_analysis_lista(lbda, parametrization, n): """ Test the gradient of z. """ rng = check_random_state(None) x, _, _, _, D, A = synthetic_1d_dataset(n=n, s=0.5, snr=0.0, seed=rng) v0, u0, _ = init_vuz(A, D, x) n_layers = 10 rho = 1.0 sigma = 0.5 L_D = np.linalg.norm(D.dot(D.T), ord=2) L_A = np.linalg.norm(A.dot(A.T), ord=2) tau = 1.0 / (L_A / 2.0 + sigma * L_D**2) lista = LearnTVAlgo(algo_type=parametrization, A=A, n_layers=n_layers, device='cpu') loss_untrained_condat = [analysis_primal_obj(u0, A, D, x, lbda)] for n_layer_ in range(1, n_layers + 1): z = lista.transform(x=x, lbda=lbda, output_layer=n_layer_) loss_untrained_condat.append(analysis_primal_obj(z, A, D, x, lbda)) loss_untrained_condat = np.array(loss_untrained_condat) v0, u0, _ = init_vuz(A, D, x, force_numpy=True) params = dict( grad=lambda u: analysis_primal_grad(u, A, x), obj=lambda u: analysis_primal_obj(u, A, D, x, lbda), prox=lambda z: pseudo_soft_th_numpy(z, lbda, 1.0 / sigma), psi=lambda u: u.dot(D), adj_psi=lambda v: v.dot(D.T), v0=v0, z0=u0, lbda=lbda, sigma=sigma, tau=tau, rho=rho, max_iter=n_layers, early_stopping=False, debug=True, verbose=0, ) _, _, loss_condat = condatvu(**params) np.testing.assert_allclose(loss_condat, loss_untrained_condat, atol=1e-20)
def test_coherence_training_analysis_loss(parametrization, lbda, n): """ Test coherence regarding the loss function between learnt and fixed algorithms. """ rng = check_random_state(None) x, _, _, _, D, A = synthetic_1d_dataset(n=n, s=0.5, snr=0.0, seed=rng) _, u0, _ = init_vuz(A, D, x) train_loss = [analysis_primal_obj(u0, A, D, x, lbda)] train_loss_ = [analysis_primal_obj(u0, A, D, x, lbda)] for n_layers in range(1, 10): lista = LearnTVAlgo(algo_type=parametrization, A=A, n_layers=n_layers, max_iter=10) lista.fit(x, lbda=lbda) train_loss_.append(lista.training_loss_[-1]) u = lista.transform(x, lbda, output_layer=n_layers) train_loss.append(analysis_primal_obj(u, A, D, x, lbda)) np.testing.assert_allclose(train_loss_, train_loss, atol=1e-30)
def test_analysis_subgrad(n, lbda): """ Test the sub-gradient of LASSO. """ rng = check_random_state(None) x, u, _, _, D, A = synthetic_1d_dataset(n=n, s=0.5, snr=0.0, seed=rng) u = rng.rand(*u.shape) n_atoms = D.shape[0] def finite_grad(u): def f(u): u = u.reshape(n, n_atoms) # the actual considered loss is not normalized but for # convenience we want to check the sample-loss average return analysis_primal_obj(u, A, D, x, lbda=lbda) * n grad = approx_fprime(xk=u.ravel(), f=f, epsilon=1.0e-6) return grad.reshape(n, n_atoms) grad_ref = finite_grad(u) grad_test = analysis_primal_subgrad(u, A, D, x, lbda) np.testing.assert_allclose(grad_ref, grad_test, atol=1e-5) # bad precision
def test_synthesis_grad(n, m): """ Test the gradient of LASSO. """ rng = check_random_state(None) x, _, z, L, D, A = synthetic_1d_dataset(n=n, s=0.5, snr=0.0, seed=rng) z = rng.rand(*z.shape) n_atoms = D.shape[0] def finite_grad(z): def f(z): z = z.reshape(n, n_atoms) # the actual considered loss is not normalized but for # convenience we want to check the sample-loss average return synthesis_primal_obj(z, A, L, x, lbda=0.0) * n grad = approx_fprime(xk=z.ravel(), f=f, epsilon=1e-6) return grad.reshape(n, n_atoms) grad_ref = finite_grad(z) grad_test = synthesis_primal_grad(z, A, L, x) np.testing.assert_allclose(grad_ref, grad_test, rtol=5e-2) # bad precision
def test_untrained_synthesis_lista(lbda, parametrization, n): """ Test the gradient of z. """ rng = check_random_state(None) x, _, _, L, D, A = synthetic_1d_dataset(n=n, s=0.5, snr=0.0, seed=rng) _, _, z0 = init_vuz(A, D, x) n_layers = 10 LA = L.dot(A) step_size = 1.0 / np.linalg.norm(LA, ord=2)**2 lista = LearnTVAlgo(algo_type=parametrization, A=A, n_layers=n_layers, device='cpu') loss_untrained_lista = [ synthesis_primal_obj(z=z0, A=A, L=L, x=x, lbda=lbda) ] for n_layer_ in range(1, n_layers + 1): z_hat = lista.transform(x=x, lbda=lbda, output_layer=n_layer_) loss_untrained_lista.append( synthesis_primal_obj(z=z_hat, A=A, L=L, x=x, lbda=lbda)) loss_untrained_lista = np.array(loss_untrained_lista) params = dict( grad=lambda z: synthesis_primal_grad(z, A, L, x), obj=lambda z: synthesis_primal_obj(z, A, L, x, lbda), prox=lambda z, s: pseudo_soft_th_numpy(z, lbda, s), x0=z0, momentum=None, restarting=None, max_iter=n_layers, step_size=step_size, early_stopping=False, debug=True, verbose=0, ) _, loss_ista = fista(**params) np.testing.assert_allclose(loss_ista, loss_untrained_lista, atol=1e-20)
def test_coherence_init(lbda, seed): rng = check_random_state(seed) x, _, _, L, D, A = synthetic_1d_dataset() v0 = None v0, u0, z0 = init_vuz(A, D, x, v0=v0) cost_1 = synthesis_primal_obj(z0, A, L, x, lbda) cost_2 = analysis_primal_obj(u0, A, D, x, lbda) cost_3 = analysis_primal_obj(v_to_u(v0, x, A, D), A, D, x, lbda) np.testing.assert_allclose(cost_1, cost_2) np.testing.assert_allclose(cost_1, cost_3) v0 = rng.randn(*v0.shape) v0, u0, z0 = init_vuz(A, D, x, v0=v0) synthesis_primal_obj(z0, A, L, x, lbda) cost_1 = synthesis_primal_obj(z0, A, L, x, lbda) cost_2 = analysis_primal_obj(u0, A, D, x, lbda) cost_3 = analysis_primal_obj(v_to_u(v0, x, A, D), A, D, x, lbda) np.testing.assert_allclose(cost_1, cost_2) np.testing.assert_allclose(cost_1, cost_3)
def test_analysis_dual_grad(n, lbda): """ Test the gradient of dual analysis. """ rng = check_random_state(None) x, _, _, _, D, A = synthetic_1d_dataset(n=n, s=0.5, snr=0.0, seed=rng) eps = 1e-3 v_dim = D.shape[1] v = np.clip(rng.randn(n, v_dim), -(lbda - eps), (lbda - eps)) Psi_A = np.linalg.pinv(A).dot(D) # Finite grad v def finite_grad(v): def f(v): v = v.reshape(n, v_dim) # the actual considered loss is not normalized but for # convenience we want to check the sample-loss average return analysis_dual_obj(v, A, D, x, lbda, Psi_A=Psi_A) * n grad = approx_fprime(xk=v.ravel(), f=f, epsilon=1.0e-6) return grad.reshape(n, v_dim) grad_ref = finite_grad(v) grad_test = analysis_dual_grad(v, A, D, x, Psi_A=Psi_A) np.testing.assert_allclose(grad_ref, grad_test, atol=1e-4) # bad precision
########################################################################### # Define variables and data # Define variables n_samples = 1000 + 1 # training samples can't be 0 n_samples_testing = n_samples - 1 n_atoms = 40 n_dim = 40 s = 0.1 snr = 0.0 all_n_layers = logspace_layers(n_layers=10, max_depth=args.max_iter) ticks_layers = np.array([0] + all_n_layers) seed = args.seed if args.seed is not None else np.random.randint(0, 1000) rng = check_random_state(seed) print(f'Seed used = {seed}') # noqa: E999 # Generate data results = synthetic_1d_dataset(n_atoms=n_atoms, n_dim=n_dim, n=n_samples, s=s, snr=snr, seed=seed) x, _, z, L, D, A = results x_train = x[n_samples_testing:, :] x_test = x[:n_samples_testing, :] ###########################################################################