Esempio n. 1
0
def test_learn_prox_gradient(learn_prox):
    # Check that the parameter_groups' parameters are used correctly.

    n_atoms = 2
    n_samples, n_dims = 5, 3
    n_layers = 4
    lbda = .2
    max_iter = 10

    x = check_tensor(np.random.randn(n_samples, n_dims))
    A = np.random.randn(n_atoms, n_dims)

    network = ListaTV(A, n_layers, learn_prox=learn_prox,
                      n_inner_layers=5, max_iter=max_iter)
    network._compute_gradient(x, lbda, layer_id=n_layers)
    for group_name, group_params in network.parameter_groups.items():
        for name, p in group_params.items():
            assert p.requires_grad
            assert p.grad is not None, (
                f"{group_name}:{name} gradient is null"
            )

    # check that initial_parameters are registered correctly
    params = network.export_parameters()

    network_clone = ListaTV(A, n_layers, learn_prox=learn_prox,
                            n_inner_layers=5, initial_parameters=params)
    network_clone._compute_gradient(x, lbda, layer_id=n_layers)
    for group_name, group_params in network_clone.parameter_groups.items():
        for name, p in group_params.items():
            if group_name != 'layer-0' or name != 'Wu':
                assert p.requires_grad
                assert p.grad is not None, (
                    f"{group_name}:{name} gradient is null"
                )
Esempio n. 2
0
    def forward(self, x, lbda, output_layer=None):
        """ Forward pass of the network. """
        output_layer = self.check_output_layer(output_layer)

        # initialized variables
        _, u, _ = init_vuz(self.A,
                           self.D,
                           x,
                           lbda,
                           inv_A=self.inv_A_,
                           device=self.device)

        for layer_id in range(output_layer):
            layer_params = self.parameter_groups[f'layer-{layer_id}']
            # retrieve parameters
            h = layer_params['h']
            hth = layer_params['hth']
            mul_lbda = layer_params.get('threshold', 1.0 / self.l_)
            mul_lbda = check_tensor(mul_lbda, device=self.device)

            # apply one 'iteration'
            u = hth_id_u_tensor(hth, u) + htx_tensor(h, x)
            u = ProxTV_l1.apply(u, lbda * mul_lbda)

        return u
Esempio n. 3
0
def test_loss_coherence():
    """ Test the loss function coherence. """
    t_r = 1.0
    n_time_hrf = 30
    n_time_valid = 100
    n_time = n_time_valid + n_time_hrf - 1
    n_samples = 10
    h = double_gamma_hrf(t_r, n_time_hrf)
    u = np.random.randn(n_samples, n_time_valid)
    x = np.random.randn(n_samples, n_time)
    u_ = check_tensor(u)
    x_ = check_tensor(x)
    lbda = 0.1
    kwargs = dict(h=h, n_times_valid=n_time_valid, n_layers=10)
    net_solver = LpgdTautStringHRF(**kwargs)
    loss = float(net_solver._loss_fn(x_, lbda, u_))
    loss_ = _obj_t_analysis(u, x, h, lbda)
    np.testing.assert_allclose(loss, loss_, rtol=1e-3)
Esempio n. 4
0
 def _compute_loss(self, x, lbda):
     """ Return the loss evolution in the forward pass of x. """
     _, u0 = self._get_init(x, lbda)
     x_ = check_tensor(x, device=self.net_solver.device)
     l_loss = [self.net_solver._loss_fn(x_, lbda, u0)]
     with torch.no_grad():
         for n_layers in range(self.net_solver.n_layers):
             u = self.net_solver(x_, lbda, output_layer=n_layers+1)
             loss_ = self.net_solver._loss_fn(x_, lbda, u)
             l_loss.append(loss_.cpu().numpy())
     return np.array(l_loss)
Esempio n. 5
0
    def __init__(self,
                 h,
                 n_times_valid,
                 n_layers,
                 learn_th=False,
                 use_moreau=False,
                 max_iter=100,
                 net_solver_type="recursive",
                 initial_parameters=None,
                 name="LPGD - Taut-string",
                 verbose=0,
                 device=None):
        if device is not None and 'cuda' in device:
            warnings.warn("Cannot use LpgdTautString on cuda device. "
                          "Falling back to CPU.")
            device = 'cpu'

        self.use_moreau = use_moreau

        self.h = np.array(h)
        self.h_ = check_tensor(h, device=device)
        self.l_ = lipsch_cst_from_kernel(h, n_times_valid)

        self.A = make_toeplitz(h, n_times_valid).T
        self.A_ = check_tensor(self.A, device=device)
        self.inv_A_ = torch.pinverse(self.A_)
        self.D = (np.eye(n_times_valid, k=-1) -
                  np.eye(n_times_valid, k=0))[:, :-1]

        super().__init__(n_layers=n_layers,
                         learn_th=learn_th,
                         max_iter=max_iter,
                         net_solver_type=net_solver_type,
                         initial_parameters=initial_parameters,
                         name=name,
                         verbose=verbose,
                         device=device)
Esempio n. 6
0
def test_coherence_analysis_loss(parametrization, lbda, n):
    """ Test coherence regarding the loss function between learnt and fixed
    algorithms. """
    rng = check_random_state(None)
    x, _, _, _, D, A = synthetic_1d_dataset(n=n, s=0.5, snr=0.0, seed=rng)
    _, _, z = init_vuz(A, D, x)
    z_ = check_tensor(z, device='cpu')

    cost = analysis_primal_obj(z, A, D, x, lbda=lbda)
    ltv = LearnTVAlgo(algo_type=parametrization,
                      A=A,
                      n_layers=10,
                      device='cpu')
    cost_ref = ltv._loss_fn(x, lbda, z_)

    np.testing.assert_allclose(cost_ref, cost, atol=1e-30)
Esempio n. 7
0
    n_atoms = 10
    n_dim = 5
    lmbd = args.lmbd
    max_iter = 100

    params = {
        'prox': dict(loss=loss_prox, color='b'),
        'subgradient': dict(loss=loss_subgradient, color='g'),
    }

    # Layer with backprop for prox_tv
    prox = ProxTV(prox='prox_tv', n_dim=n_dim)

    # get a random vector that we try to regress
    x = torch.randn(n_samples, n_atoms)
    lmbd_ = check_tensor(lmbd)

    # compute the starting point
    if args.starting_point == 'mean':
        z0 = x.numpy().mean() * np.ones(x.shape)
    elif args.starting_point == 'random':
        z0 = np.random.randn(1, n_atoms)
    elif args.starting_point == 'perturb':
        z0 = x.numpy() + .1 * np.random.randn(*x.shape)
    else:
        raise NotImplementedError()

    # Compute and display the prox of c (our target)
    prox_x = prox(x, lmbd_)
    ax = plt.subplot(111)
    ax.plot(x.data[0], 'r', linewidth=2)