예제 #1
0
def get_steps(n_dim, n_atoms, n_samples, n_test, n_layers, reg, rng, max_iter):
    lista_kwargs = dict(
        n_layers=n_layers,
        max_iter=max_iter)
    x, D, z = make_coding(n_samples=n_samples + n_test, n_atoms=n_atoms,
                          n_dim=n_dim, random_state=rng)
    x_test = x[n_samples:]
    x = x[:n_samples]

    c_star = Lista(D, 1000).score(x_test, reg)
    L = np.linalg.norm(D, ord=2) ** 2  # Lipschitz constant
    network = Lista(D, **lista_kwargs,
                    parametrization='step', per_layer='oneshot')
    init_score = network.score(x_test, reg)
    print(init_score)
    network.fit(x, reg)
    print(network.score(x_test, reg))
    steps = network.get_parameters(name='step_size')
    z_ = network.transform(x, reg)
    supports = np.unique(z_ != 0, axis=0)
    S_pca = []
    for support in supports:
        D_s = D[support]
        G_s = D_s.T.dot(D_s)
        S_pca.append(np.linalg.eigvalsh(G_s)[-1])
    L_s = np.max(S_pca)
    return steps, L, L_s, S_pca
예제 #2
0
def get_steps(n_dim, n_atoms, n_samples, n_test, n_layers, reg, rng, max_iter):
    lista_kwargs = dict(n_layers=n_layers, max_iter=max_iter)
    x, D, z = make_coding(n_samples=n_samples + n_test,
                          n_atoms=n_atoms,
                          n_dim=n_dim,
                          random_state=rng)
    x_test = x[n_samples:]
    x = x[:n_samples]

    c_star = Lista(D, 1000).score(x_test, reg)
    L = np.linalg.norm(D, ord=2)**2  # Lipschitz constant
    network = Lista(D,
                    **lista_kwargs,
                    parametrization='step',
                    per_layer='oneshot')
    init_score = network.score(x_test, reg)
    print(init_score)
    network.fit(x, reg)
    print(network.score(x_test, reg))
    steps = network.get_parameters(name='step_size')
    L_s = np.zeros((n_layers, n_samples))
    for layer in range(1, n_layers + 1):
        z_ = network.transform(x, reg, output_layer=layer)
        supports = z_ != 0
        S_pca = []
        for support in supports:
            idx = np.where(support)[0]
            D_s = D[idx]
            G_s = D_s.T.dot(D_s)
            S_pca.append(np.linalg.eigvalsh(G_s)[-1])
        L_s[layer - 1] = np.array(S_pca)
    return steps, L, L_s, S_pca
예제 #3
0
 def compute_loss(n_sample, parametrization):
     x_train = x[:n_sample]
     lista = Lista(D,
                   n_layers,
                   parametrization=parametrization,
                   max_iter=max_iter,
                   per_layer=training,
                   verbose=1)
     lista.fit(x_train, reg)
     sc = lista.score(x_test, reg)
     print(n_sample, parametrization, sc)
     return sc
예제 #4
0
def run_one(parametrization, data, reg, n_layer, max_iter, n_samples, n_test,
            n_atoms, n_dim, per_layer, random_state):

    # try to avoid having dangling memory
    torch.cuda.empty_cache()

    # Stread the computations on the different GPU. This strategy
    # might fail and some GPU might be overloaded if some workers are
    # re-spawned.
    if N_GPU == 0:
        device = None
    else:
        pid = os.getpid()
        device = f"cuda:{pid % N_GPU}"

    tag = f"[{parametrization} - {n_layer}]"
    current_time = time.time() - START
    msg = f"\r{tag} started at T={current_time:.0f} sec (device={device})"
    print(colorify(msg, BLUE))

    if data == "images":
        x, D, _ = make_image_coding(n_samples=n_samples + n_test,
                                    n_atoms=n_atoms,
                                    normalize=True,
                                    random_state=random_state)
    elif data == "simulations":
        x, D, _ = make_coding(n_samples=n_samples + n_test,
                              n_atoms=n_atoms,
                              n_dim=n_dim,
                              normalize=True,
                              random_state=random_state)

    x_test = x[n_samples:]
    x = x[:n_samples]

    network = Lista(D,
                    n_layers=n_layer,
                    parametrization=parametrization,
                    max_iter=max_iter,
                    per_layer=per_layer,
                    device=device,
                    name=parametrization)
    network.fit(x, reg)
    loss = network.score(x_test, reg)
    training_loss = network.training_loss_

    duration = time.time() - START - current_time
    msg = (f"\r{tag} done in {duration:.0f} sec "
           f"at T={current_time:.0f} sec")
    print(colorify(msg, GREEN))

    return (parametrization, data, reg, n_layer, max_iter, n_samples, n_test,
            n_atoms, n_dim, random_state, loss, training_loss)
예제 #5
0
def test_save(parametrization, learn_th):
    n_dim = 20
    n_atoms = 10
    n_samples = 1000
    random_state = 42

    reg = .1
    n_layers = 4

    if "step" in parametrization and not learn_th:
        pytest.skip(msg="For parametrization 'step' and 'coupled_step', "
                    "learn_th need to be set to True.")

    # Generate a problem
    x, D, z = make_coding(n_samples=n_samples,
                          n_atoms=n_atoms,
                          n_dim=n_dim,
                          random_state=random_state)

    lista = Lista(D,
                  n_layers,
                  parametrization=parametrization,
                  learn_th=learn_th,
                  max_iter=15)
    lista.fit(x, reg)
    parameters = lista.export_parameters()

    lista_ = Lista(D,
                   n_layers,
                   parametrization=parametrization,
                   learn_th=learn_th,
                   initial_parameters=parameters)
    parameters_ = lista_.export_parameters()
    assert np.all([
        np.allclose(pl[k], pl_[k]) for pl, pl_ in zip(parameters, parameters_)
        for k in pl
    ])

    cost_lista = lista.score(x, reg)
    cost_lista_ = lista_.score(x, reg)
    assert np.allclose(cost_lista, cost_lista_)

    z_lista = lista.transform(x, reg)
    z_lista_ = lista_.transform(x, reg)
    atol = abs(z_lista).max() * 1e-6
    assert np.allclose(z_lista, z_lista_, atol=atol)
예제 #6
0
def get_params(n_samples, n_atoms, n_dim, rng, max_iter, training, n_layers):
    x, D, z = make_coding(n_samples=n_samples,
                          n_atoms=n_atoms,
                          n_dim=n_dim,
                          random_state=rng)

    lista = Lista(D,
                  n_layers,
                  parametrization='coupled',
                  max_iter=max_iter,
                  per_layer=training,
                  verbose=1)
    lista.fit(x, reg)

    W_list = lista.get_parameters('W_coupled')
    thresholds = lista.get_parameters('threshold')
    return D, W_list, thresholds
예제 #7
0
파일: viz.py 프로젝트: tabeworks/adopty
        x_hat = z_hat.dot(D)
        path.append(z_hat.dot(D))
        z0 = np.zeros((1, n_atoms))
        z0[0] = 1
        z_hat = lista.transform(x, reg, z0=z0, output_layer=i_layer)
        x_hat2 = z_hat.dot(D)
        path2.append(z_hat.dot(D))
    path = np.array(path)
    path2 = np.array(path2)
    plt.plot(path[:, :n_display, 0], path[:, :n_display, 1], 'C1')
    plt.scatter(x_hat[:n_display, 0], x_hat[:n_display, 1], c='C1')
    plt.plot(path2[:, :n_display, 0], path2[:, :n_display, 1], 'C4')
    plt.scatter(x_hat2[:n_display, 0], x_hat2[:n_display, 1], c='C4')

    lista = Lista(D, n_layers, parametrization="hessian", max_iter=10000)
    lista.fit(x_train, reg)
    cmap = plt.get_cmap('viridis')
    path = []
    for i_layer in range(1, n_layers + 1, 1):
        z_hat = lista.transform(x, reg, output_layer=i_layer)
        x_hat = z_hat.dot(D)
        path.append(z_hat.dot(D))
        # plt.scatter(x_hat[:n_display, 0], x_hat[:n_display, 1],
        #             c=np.array([cmap(i_layer / n_layers)]))
    path = np.array(path)
    plt.plot(path[:, :n_display, 0], path[:, :n_display, 1], 'C2')
    plt.scatter(x_hat[:n_display, 0], x_hat[:n_display, 1], c='C2')

    for k, dk in enumerate(D):
        plt.arrow(0, 0, z_hat[0][k] * dk[0], z_hat[0][k] * dk[1], color="r")
예제 #8
0
    def get_trained_lista(D, x, reg, n_layers, max_iter):

        lista = Lista(D, n_layers, max_iter=max_iter)
        lista.fit(x, reg)
        return lista
예제 #9
0
    saved_model = {}
    for parametrization in ['alista', 'hessian', 'coupled']:
        lista = Lista(D,
                      n_layers,
                      parametrization=parametrization,
                      max_iter=5000,
                      device=device)

        z_hat_test = lista.transform(x_test, reg)
        cost_test = cost_lasso(z_hat_test, D, x_test, reg)
        print(
            format_cost.format("Un-trained[{}]".format(parametrization),
                               "test", cost_test))

        # Train and evaluate the network
        lista.fit(x, reg, tol=0)
        z_hat_test = lista.transform(x_test, reg)
        cost_test = cost_lasso(z_hat_test, D, x_test, reg)
        print(
            format_cost.format("Trained[{}]".format(parametrization), "test",
                               cost_test))
        saved_model[parametrization] = lista

        z_hat = lista.transform(x, reg)
        plt.semilogy(lista.training_loss_ - c_star, label=parametrization)

    plt.legend()

    # Save the figure if it is not displayed
    if mpl.get_backend() == 'agg':
        plt.savefig("output.pdf", dpi=300)
예제 #10
0
for parametrization in ['first_step', 'step']:
    for per_layer in ['oneshot', 'recursive']:
        #######################################################################
        #  Training
        #
        name = "{} - {}".format(parametrization, per_layer)
        print(80 * "=" + "\n" + name + "\n" + 80 * "=")
        network = Lista(D,
                        **lista_kwargs,
                        name=name,
                        parametrization=parametrization,
                        per_layer=per_layer)
        init_score = network.score(x_test, reg)
        print(init_print.format(init_score))
        network.fit(x, reg)
        losses = np.array([
            network.score(x_test, reg, output_layer=layer + 1)
            for layer in range(n_layers)
        ])
        final_score = network.score(x_test, reg)
        print(final_print.format(final_score, init_score - final_score))

        networks[name] = network

        #######################################################################
        # Compute maximal sparsity eigenvalue
        #
        L_s_list = []
        for layer in range(n_layers):
            z_ = network.transform(x, reg, output_layer=layer + 1)