Exemplo n.º 1
0
def test_init_parameters(n, lbda, parametrization):
    """ Test the gradient of z. """
    rng = check_random_state(27)
    x, _, _, L, _, A = synthetic_1d_dataset(n=n, s=0.5, snr=0.0, seed=rng)
    n_layers = 5

    # limit the number of inner layers for origtv to avoid long computations
    kwargs = {}
    if parametrization == 'origtv':
        kwargs['n_inner_layers'] = 5

    lista_1 = LearnTVAlgo(algo_type=parametrization, A=A, n_layers=n_layers,
                          max_iter=10, net_solver_type='one_shot', **kwargs)
    lista_1.fit(x, lbda=lbda)
    params = lista_1.export_parameters()

    loss_lista_1 = []
    for n_layer_ in range(n_layers + 1):
        z_1 = lista_1.transform(x=x, lbda=lbda, output_layer=n_layer_)
        loss_lista_1.append(synthesis_primal_obj(z=z_1, A=A, L=L, x=x,
                                                 lbda=lbda))
    loss_lista_1 = np.array(loss_lista_1)

    lista_2 = LearnTVAlgo(algo_type=parametrization, A=A, n_layers=n_layers,
                          initial_parameters=params, max_iter=10, **kwargs)

    loss_lista_2 = []
    for n_layer_ in range(n_layers + 1):
        z_2 = lista_2.transform(x=x, lbda=lbda, output_layer=n_layer_)
        loss_lista_2.append(synthesis_primal_obj(z=z_2, A=A, L=L, x=x,
                                                 lbda=lbda))
    loss_lista_2 = np.array(loss_lista_2)

    np.testing.assert_allclose(z_1, z_2)
    np.testing.assert_allclose(loss_lista_1, loss_lista_2)
Exemplo n.º 2
0
def test_coherence_analysis_loss(parametrization, lbda, n):
    """ Test coherence regarding the loss function between learnt and fixed
    algorithms. """
    rng = check_random_state(None)
    x, _, _, _, D, A = synthetic_1d_dataset(n=n, s=0.5, snr=0.0, seed=rng)
    _, _, z = init_vuz(A, D, x)
    z_ = check_tensor(z, device='cpu')

    cost = analysis_primal_obj(z, A, D, x, lbda=lbda)
    ltv = LearnTVAlgo(algo_type=parametrization,
                      A=A,
                      n_layers=10,
                      device='cpu')
    cost_ref = ltv._loss_fn(x, lbda, z_)

    np.testing.assert_allclose(cost_ref, cost, atol=1e-30)
Exemplo n.º 3
0
def test_untrained_analysis_lista(lbda, parametrization, n):
    """ Test the gradient of z. """
    rng = check_random_state(None)
    x, _, _, _, D, A = synthetic_1d_dataset(n=n, s=0.5, snr=0.0, seed=rng)
    v0, u0, _ = init_vuz(A, D, x)

    n_layers = 10
    rho = 1.0
    sigma = 0.5
    L_D = np.linalg.norm(D.dot(D.T), ord=2)
    L_A = np.linalg.norm(A.dot(A.T), ord=2)
    tau = 1.0 / (L_A / 2.0 + sigma * L_D**2)

    lista = LearnTVAlgo(algo_type=parametrization,
                        A=A,
                        n_layers=n_layers,
                        device='cpu')
    loss_untrained_condat = [analysis_primal_obj(u0, A, D, x, lbda)]
    for n_layer_ in range(1, n_layers + 1):
        z = lista.transform(x=x, lbda=lbda, output_layer=n_layer_)
        loss_untrained_condat.append(analysis_primal_obj(z, A, D, x, lbda))
    loss_untrained_condat = np.array(loss_untrained_condat)

    v0, u0, _ = init_vuz(A, D, x, force_numpy=True)
    params = dict(
        grad=lambda u: analysis_primal_grad(u, A, x),
        obj=lambda u: analysis_primal_obj(u, A, D, x, lbda),
        prox=lambda z: pseudo_soft_th_numpy(z, lbda, 1.0 / sigma),
        psi=lambda u: u.dot(D),
        adj_psi=lambda v: v.dot(D.T),
        v0=v0,
        z0=u0,
        lbda=lbda,
        sigma=sigma,
        tau=tau,
        rho=rho,
        max_iter=n_layers,
        early_stopping=False,
        debug=True,
        verbose=0,
    )
    _, _, loss_condat = condatvu(**params)

    np.testing.assert_allclose(loss_condat, loss_untrained_condat, atol=1e-20)
Exemplo n.º 4
0
def test_coherence_training_analysis_loss(parametrization, lbda, n):
    """ Test coherence regarding the loss function between learnt and fixed
    algorithms. """
    rng = check_random_state(None)
    x, _, _, _, D, A = synthetic_1d_dataset(n=n, s=0.5, snr=0.0, seed=rng)

    _, u0, _ = init_vuz(A, D, x)
    train_loss = [analysis_primal_obj(u0, A, D, x, lbda)]
    train_loss_ = [analysis_primal_obj(u0, A, D, x, lbda)]

    for n_layers in range(1, 10):
        lista = LearnTVAlgo(algo_type=parametrization,
                            A=A,
                            n_layers=n_layers,
                            max_iter=10)
        lista.fit(x, lbda=lbda)
        train_loss_.append(lista.training_loss_[-1])
        u = lista.transform(x, lbda, output_layer=n_layers)
        train_loss.append(analysis_primal_obj(u, A, D, x, lbda))

    np.testing.assert_allclose(train_loss_, train_loss, atol=1e-30)
Exemplo n.º 5
0
def test_analysis_subgrad(n, lbda):
    """ Test the sub-gradient of LASSO. """
    rng = check_random_state(None)
    x, u, _, _, D, A = synthetic_1d_dataset(n=n, s=0.5, snr=0.0, seed=rng)
    u = rng.rand(*u.shape)
    n_atoms = D.shape[0]

    def finite_grad(u):
        def f(u):
            u = u.reshape(n, n_atoms)
            # the actual considered loss is not normalized but for
            # convenience we want to check the sample-loss average
            return analysis_primal_obj(u, A, D, x, lbda=lbda) * n

        grad = approx_fprime(xk=u.ravel(), f=f, epsilon=1.0e-6)
        return grad.reshape(n, n_atoms)

    grad_ref = finite_grad(u)
    grad_test = analysis_primal_subgrad(u, A, D, x, lbda)

    np.testing.assert_allclose(grad_ref, grad_test, atol=1e-5)  # bad precision
Exemplo n.º 6
0
def test_synthesis_grad(n, m):
    """ Test the gradient of LASSO. """
    rng = check_random_state(None)
    x, _, z, L, D, A = synthetic_1d_dataset(n=n, s=0.5, snr=0.0, seed=rng)
    z = rng.rand(*z.shape)
    n_atoms = D.shape[0]

    def finite_grad(z):
        def f(z):
            z = z.reshape(n, n_atoms)
            # the actual considered loss is not normalized but for
            # convenience we want to check the sample-loss average
            return synthesis_primal_obj(z, A, L, x, lbda=0.0) * n

        grad = approx_fprime(xk=z.ravel(), f=f, epsilon=1e-6)
        return grad.reshape(n, n_atoms)

    grad_ref = finite_grad(z)
    grad_test = synthesis_primal_grad(z, A, L, x)

    np.testing.assert_allclose(grad_ref, grad_test, rtol=5e-2)  # bad precision
Exemplo n.º 7
0
def test_untrained_synthesis_lista(lbda, parametrization, n):
    """ Test the gradient of z. """
    rng = check_random_state(None)
    x, _, _, L, D, A = synthetic_1d_dataset(n=n, s=0.5, snr=0.0, seed=rng)
    _, _, z0 = init_vuz(A, D, x)

    n_layers = 10
    LA = L.dot(A)
    step_size = 1.0 / np.linalg.norm(LA, ord=2)**2

    lista = LearnTVAlgo(algo_type=parametrization,
                        A=A,
                        n_layers=n_layers,
                        device='cpu')
    loss_untrained_lista = [
        synthesis_primal_obj(z=z0, A=A, L=L, x=x, lbda=lbda)
    ]
    for n_layer_ in range(1, n_layers + 1):
        z_hat = lista.transform(x=x, lbda=lbda, output_layer=n_layer_)
        loss_untrained_lista.append(
            synthesis_primal_obj(z=z_hat, A=A, L=L, x=x, lbda=lbda))
    loss_untrained_lista = np.array(loss_untrained_lista)

    params = dict(
        grad=lambda z: synthesis_primal_grad(z, A, L, x),
        obj=lambda z: synthesis_primal_obj(z, A, L, x, lbda),
        prox=lambda z, s: pseudo_soft_th_numpy(z, lbda, s),
        x0=z0,
        momentum=None,
        restarting=None,
        max_iter=n_layers,
        step_size=step_size,
        early_stopping=False,
        debug=True,
        verbose=0,
    )
    _, loss_ista = fista(**params)

    np.testing.assert_allclose(loss_ista, loss_untrained_lista, atol=1e-20)
Exemplo n.º 8
0
def test_coherence_init(lbda, seed):
    rng = check_random_state(seed)
    x, _, _, L, D, A = synthetic_1d_dataset()

    v0 = None
    v0, u0, z0 = init_vuz(A, D, x, v0=v0)
    cost_1 = synthesis_primal_obj(z0, A, L, x, lbda)
    cost_2 = analysis_primal_obj(u0, A, D, x, lbda)
    cost_3 = analysis_primal_obj(v_to_u(v0, x, A, D), A, D, x, lbda)

    np.testing.assert_allclose(cost_1, cost_2)
    np.testing.assert_allclose(cost_1, cost_3)

    v0 = rng.randn(*v0.shape)
    v0, u0, z0 = init_vuz(A, D, x, v0=v0)
    synthesis_primal_obj(z0, A, L, x, lbda)
    cost_1 = synthesis_primal_obj(z0, A, L, x, lbda)
    cost_2 = analysis_primal_obj(u0, A, D, x, lbda)
    cost_3 = analysis_primal_obj(v_to_u(v0, x, A, D), A, D, x, lbda)

    np.testing.assert_allclose(cost_1, cost_2)
    np.testing.assert_allclose(cost_1, cost_3)
Exemplo n.º 9
0
def test_analysis_dual_grad(n, lbda):
    """ Test the gradient of dual analysis. """
    rng = check_random_state(None)
    x, _, _, _, D, A = synthetic_1d_dataset(n=n, s=0.5, snr=0.0, seed=rng)
    eps = 1e-3
    v_dim = D.shape[1]
    v = np.clip(rng.randn(n, v_dim), -(lbda - eps), (lbda - eps))
    Psi_A = np.linalg.pinv(A).dot(D)

    # Finite grad v
    def finite_grad(v):
        def f(v):
            v = v.reshape(n, v_dim)
            # the actual considered loss is not normalized but for
            # convenience we want to check the sample-loss average
            return analysis_dual_obj(v, A, D, x, lbda, Psi_A=Psi_A) * n

        grad = approx_fprime(xk=v.ravel(), f=f, epsilon=1.0e-6)
        return grad.reshape(n, v_dim)

    grad_ref = finite_grad(v)
    grad_test = analysis_dual_grad(v, A, D, x, Psi_A=Psi_A)

    np.testing.assert_allclose(grad_ref, grad_test, atol=1e-4)  # bad precision
Exemplo n.º 10
0
def run_experiment(max_iter,
                   max_iter_ref=1000,
                   lmbd=.1,
                   seed=None,
                   net_solver_type='recursive',
                   n_jobs=1,
                   device=None):
    # Define variables
    n_samples_train = 1000
    n_samples_testing = 1000
    n_samples = n_samples_train + n_samples_testing
    n_atoms = 8
    n_dim = 5
    s = 0.2
    snr = 0.0

    # Layers that are sampled
    all_n_layers = logspace_layers(n_layers=10, max_depth=40)

    timestamp = datetime.now()

    print(__doc__)
    print('*' * 80)
    print(f"Script started on: {timestamp.strftime('%Y/%m/%d %Hh%M')}")

    if seed is None:
        seed = np.random.randint(0, 1000)
    print(f'Seed used = {seed}')

    # Store meta data of the problem
    meta_pb = dict(n_atoms=n_atoms,
                   n_dim=n_dim,
                   s=s,
                   snr=snr,
                   seed=seed,
                   n_samples_train=n_samples_train,
                   n_samples_testing=n_samples_testing)

    # Generate data
    x, _, z, L, D, A = synthetic_1d_dataset(n_atoms=n_atoms,
                                            n_dim=n_dim,
                                            n=n_samples,
                                            s=s,
                                            snr=snr,
                                            seed=seed)

    x_train = x[n_samples_testing:, :]
    x_test = x[:n_samples_testing, :]

    learning_parameters = dict(net_solver_type=net_solver_type,
                               max_iter=max_iter)

    methods = {
        'lista_synthesis': {
            'label': 'Synthesis LISTA',
            'network': CoupledIstaLASSO,
            'extra_args': dict(**learning_parameters),
            'style': dict(color='tab:orange', marker='*', linestyle='-')
        },
        'lpgd_taut': {
            'label': 'Analysis LPGD - taut-string',
            'network': LpgdTautString,
            'extra_args': dict(**learning_parameters),
            'style': dict(color='tab:red', marker='*', linestyle='-.')
        },
        'ista_synthesis': {
            'label': 'Synthesis ISTA',
            'network': IstaSynthesis,
            'extra_args': dict(momentum=None),
            'style': dict(color='tab:orange', marker='s', linestyle='--')
        },
        'fista_synthesis': {
            'label': 'Synthesis FISTA',
            'network': IstaSynthesis,
            'extra_args': dict(momentum='fista'),
            'style': dict(color='tab:orange', marker='*', linestyle='--')
        },
        'ista_analysis': {
            'label': 'Analysis ISTA',
            'network': IstaAnalysis,
            'extra_args': dict(momentum=None),
            'style': dict(color='tab:red', marker='s', linestyle='--')
        },
        'fista_analysis': {
            'label': 'Analysis FISTA',
            'network': IstaAnalysis,
            'extra_args': dict(momentum='fista'),
            'style': dict(color='tab:red', marker='*', linestyle='--')
        },
        # reference cost, use all_n_layers to override the computations
        'reference': {
            'label': 'Analysis FISTA',
            'network': IstaAnalysis,
            'extra_args': dict(momentum='fista'),
            'style': dict(color='tab:red', marker='*', linestyle='--'),
            'all_n_layers': [max_iter_ref]
        }
    }

    # for i, learn_prox in enumerate(['none', 'global', 'per-layer']):
    for i, learn_prox in enumerate(['none']):
        # for n_inner_layers, marker in [(10, '*'), (50, 's'), (100, 'h'),
        #                               (300, 'o'), (500, '>')]:
        for n_inner_layers, marker in [(50, 's'), (20, '*')]:
            methods[f'lpgd_lista_{learn_prox}_{n_inner_layers}'] = {
                'label':
                f'LPGD - LISTA[{learn_prox}-{n_inner_layers}]',
                'network':
                ListaTV,
                'extra_args':
                dict(n_inner_layers=n_inner_layers,
                     learn_prox=learn_prox,
                     **learning_parameters),
                'style':
                dict(color=f'C{i}', marker=marker, linestyle='-')
            }

    # launch all experiments
    print("=" * 80)
    t0 = time.time()
    results = Parallel(n_jobs=n_jobs)(
        delayed(run_one)(x_train,
                         x_test,
                         A,
                         D,
                         L,
                         lmbd=lmbd,
                         key=k,
                         network=m['network'],
                         extra_args=m['extra_args'],
                         all_n_layers=m.get('all_n_layers', all_n_layers),
                         device=device,
                         meta=meta_pb) for k, m in methods.items())

    # concatenate all results as a big list. Also update style and label
    # here to avoid recomputing the results when changing the style only.
    log = []

    for records in results:
        for rec in records:
            k = rec['key']
            m = methods.get(k, None)
            if m is None:
                from copy import deepcopy
                m = deepcopy(methods[k.replace('per-layer', 'none')])
                m['style']['color'] = 'C1'
                m['label'] = m['label'].replace('none', 'per-layer')

            rec.update(style=m['style'], label=m['label'])
            log.append(rec)

    # Save the computations in a pickle file
    df = pd.DataFrame(log)
    t_tag = timestamp.strftime('%Y-%m-%d_%Hh%M')
    tag = f'{t_tag}_{lmbd}_{seed}'
    df.to_pickle(OUTPUT_DIR / f'{SCRIPT_NAME}_{tag}.pkl')

    delta_t = time.strftime("%H h %M min %S s", time.gmtime(time.time() - t0))
    print("=" * 80)
    print("Script runs in: {}".format(delta_t))
Exemplo n.º 11
0
    n_samples_testing = n_samples - 1
    n_atoms = 40
    n_dim = 40
    s = 0.1
    snr = 0.0
    all_n_layers = logspace_layers(n_layers=10, max_depth=args.max_iter)
    ticks_layers = np.array([0] + all_n_layers)

    seed = args.seed if args.seed is not None else np.random.randint(0, 1000)
    rng = check_random_state(seed)
    print(f'Seed used = {seed}')  # noqa: E999

    # Generate data
    results = synthetic_1d_dataset(n_atoms=n_atoms,
                                   n_dim=n_dim,
                                   n=n_samples,
                                   s=s,
                                   snr=snr,
                                   seed=seed)
    x, _, z, L, D, A = results

    x_train = x[n_samples_testing:, :]
    x_test = x[:n_samples_testing, :]

    ###########################################################################
    # Main experiment
    methods = [
        ('Synthesis primal PGD', synthesis_iter_algo, 'ista', dict(),
         'tab:blue', 's', 'dashed'),
        ('Synthesis primal APGD', synthesis_iter_algo, 'fista', dict(),
         'tab:blue', 's', 'solid'),
        ('Analysis primal PGD', analysis_primal_iter_algo, 'ista', dict(),