コード例 #1
0
def get_steps(n_dim, n_atoms, n_samples, n_test, n_layers, reg, rng, max_iter):
    lista_kwargs = dict(n_layers=n_layers, max_iter=max_iter)
    x, D, z = make_coding(n_samples=n_samples + n_test,
                          n_atoms=n_atoms,
                          n_dim=n_dim,
                          random_state=rng)
    x_test = x[n_samples:]
    x = x[:n_samples]

    c_star = Lista(D, 1000).score(x_test, reg)
    L = np.linalg.norm(D, ord=2)**2  # Lipschitz constant
    network = Lista(D,
                    **lista_kwargs,
                    parametrization='step',
                    per_layer='oneshot')
    init_score = network.score(x_test, reg)
    print(init_score)
    network.fit(x, reg)
    print(network.score(x_test, reg))
    steps = network.get_parameters(name='step_size')
    L_s = np.zeros((n_layers, n_samples))
    for layer in range(1, n_layers + 1):
        z_ = network.transform(x, reg, output_layer=layer)
        supports = z_ != 0
        S_pca = []
        for support in supports:
            idx = np.where(support)[0]
            D_s = D[idx]
            G_s = D_s.T.dot(D_s)
            S_pca.append(np.linalg.eigvalsh(G_s)[-1])
        L_s[layer - 1] = np.array(S_pca)
    return steps, L, L_s, S_pca
コード例 #2
0
def get_steps(n_dim, n_atoms, n_samples, n_test, n_layers, reg, rng, max_iter):
    lista_kwargs = dict(
        n_layers=n_layers,
        max_iter=max_iter)
    x, D, z = make_coding(n_samples=n_samples + n_test, n_atoms=n_atoms,
                          n_dim=n_dim, random_state=rng)
    x_test = x[n_samples:]
    x = x[:n_samples]

    c_star = Lista(D, 1000).score(x_test, reg)
    L = np.linalg.norm(D, ord=2) ** 2  # Lipschitz constant
    network = Lista(D, **lista_kwargs,
                    parametrization='step', per_layer='oneshot')
    init_score = network.score(x_test, reg)
    print(init_score)
    network.fit(x, reg)
    print(network.score(x_test, reg))
    steps = network.get_parameters(name='step_size')
    z_ = network.transform(x, reg)
    supports = np.unique(z_ != 0, axis=0)
    S_pca = []
    for support in supports:
        D_s = D[support]
        G_s = D_s.T.dot(D_s)
        S_pca.append(np.linalg.eigvalsh(G_s)[-1])
    L_s = np.max(S_pca)
    return steps, L, L_s, S_pca
コード例 #3
0
 def compute_loss(n_sample, parametrization):
     x_train = x[:n_sample]
     lista = Lista(D,
                   n_layers,
                   parametrization=parametrization,
                   max_iter=max_iter,
                   per_layer=training,
                   verbose=1)
     lista.fit(x_train, reg)
     sc = lista.score(x_test, reg)
     print(n_sample, parametrization, sc)
     return sc
コード例 #4
0
def run_one(parametrization, data, reg, n_layer, max_iter, n_samples, n_test,
            n_atoms, n_dim, per_layer, random_state):

    # try to avoid having dangling memory
    torch.cuda.empty_cache()

    # Stread the computations on the different GPU. This strategy
    # might fail and some GPU might be overloaded if some workers are
    # re-spawned.
    if N_GPU == 0:
        device = None
    else:
        pid = os.getpid()
        device = f"cuda:{pid % N_GPU}"

    tag = f"[{parametrization} - {n_layer}]"
    current_time = time.time() - START
    msg = f"\r{tag} started at T={current_time:.0f} sec (device={device})"
    print(colorify(msg, BLUE))

    if data == "images":
        x, D, _ = make_image_coding(n_samples=n_samples + n_test,
                                    n_atoms=n_atoms,
                                    normalize=True,
                                    random_state=random_state)
    elif data == "simulations":
        x, D, _ = make_coding(n_samples=n_samples + n_test,
                              n_atoms=n_atoms,
                              n_dim=n_dim,
                              normalize=True,
                              random_state=random_state)

    x_test = x[n_samples:]
    x = x[:n_samples]

    network = Lista(D,
                    n_layers=n_layer,
                    parametrization=parametrization,
                    max_iter=max_iter,
                    per_layer=per_layer,
                    device=device,
                    name=parametrization)
    network.fit(x, reg)
    loss = network.score(x_test, reg)
    training_loss = network.training_loss_

    duration = time.time() - START - current_time
    msg = (f"\r{tag} done in {duration:.0f} sec "
           f"at T={current_time:.0f} sec")
    print(colorify(msg, GREEN))

    return (parametrization, data, reg, n_layer, max_iter, n_samples, n_test,
            n_atoms, n_dim, random_state, loss, training_loss)
コード例 #5
0
def get_curve(n_dim, n_atoms, n, n_samples, n_layers, reg, rng, max_iter,
              training):
    x, D, z = make_coding(n_samples=n,
                          n_atoms=n_atoms,
                          n_dim=n_dim,
                          random_state=rng)
    x_test = x[n_samples[-1]:]
    c_star = Lista(D, 1000).score(x_test, reg)

    @delayed
    def compute_loss(n_sample, parametrization):
        x_train = x[:n_sample]
        lista = Lista(D,
                      n_layers,
                      parametrization=parametrization,
                      max_iter=max_iter,
                      per_layer=training,
                      verbose=1)
        lista.fit(x_train, reg)
        sc = lista.score(x_test, reg)
        print(n_sample, parametrization, sc)
        return sc

    params = ['coupled', 'step']
    op_list = Parallel(n_jobs=n_jobs)(
        compute_loss(n_sample, param)
        for (param, n_sample) in product(params, n_samples))
    loss_lista = op_list[:len(n_samples)]
    loss_slista = op_list[len(n_samples):]
    np.save('lista.npy', loss_lista)
    np.save('slista.npy', loss_slista)
    np.save('c_star.npy', c_star)
    return np.array(loss_lista), np.array(loss_slista), c_star
コード例 #6
0
def test_lista_init(reg, n_layers, parametrization, learn_th):

    if parametrization == "alista":
        pytest.skip(msg="ALISTA is not initialized to match ISTA.")
    if "step" in parametrization and not learn_th:
        pytest.skip(msg="For parametrization 'step' and 'coupled_step', "
                    "learn_th need to be set to True.")
    n_dim = 20
    n_atoms = 10
    n_samples = 1000
    random_state = 42

    # Generate a problem
    x, D, z = make_coding(n_samples=n_samples,
                          n_atoms=n_atoms,
                          n_dim=n_dim,
                          random_state=random_state)

    z_hat, cost_ista, _ = ista(D, x, reg, max_iter=n_layers)

    lista = Lista(D, n_layers, parametrization=parametrization)
    cost_lista = lista.score(x, reg)
    assert np.isclose(cost_ista[n_layers], cost_lista)
コード例 #7
0
def get_params(n_samples, n_atoms, n_dim, rng, max_iter, training, n_layers):
    x, D, z = make_coding(n_samples=n_samples,
                          n_atoms=n_atoms,
                          n_dim=n_dim,
                          random_state=rng)

    lista = Lista(D,
                  n_layers,
                  parametrization='coupled',
                  max_iter=max_iter,
                  per_layer=training,
                  verbose=1)
    lista.fit(x, reg)

    W_list = lista.get_parameters('W_coupled')
    thresholds = lista.get_parameters('threshold')
    return D, W_list, thresholds
コード例 #8
0
ファイル: viz.py プロジェクト: tabeworks/adopty
    from adopty.lista import Lista
    n_dim = 2
    n_atoms = 3
    n_samples = 1
    n_display = 1

    reg = 0.6

    x, D, z = make_coding(n_samples=n_samples, n_atoms=n_atoms, n_dim=n_dim)
    x_train = np.random.randn(1000, n_dim)

    x = D[:1]

    n_layers = 5
    plot_coding(x[:n_display], D)
    lista = Lista(D, 40000, parametrization="hessian")
    path = []
    path2 = []
    for i_layer in range(1, n_layers + 1, 1):
        z_hat = lista.transform(x, reg, output_layer=i_layer)
        x_hat = z_hat.dot(D)
        path.append(z_hat.dot(D))
        z0 = np.zeros((1, n_atoms))
        z0[0] = 1
        z_hat = lista.transform(x, reg, z0=z0, output_layer=i_layer)
        x_hat2 = z_hat.dot(D)
        path2.append(z_hat.dot(D))
    path = np.array(path)
    path2 = np.array(path2)
    plt.plot(path[:, :n_display, 0], path[:, :n_display, 1], 'C1')
    plt.scatter(x_hat[:n_display, 0], x_hat[:n_display, 1], c='C1')
コード例 #9
0
ファイル: plot_loss_layers.py プロジェクト: tabeworks/adopty
 n_layers = 16
 reg = 0.5
 rng = 0
 x, D, z = make_coding(n_samples=n_samples,
                       n_atoms=n_atoms,
                       n_dim=n_dim,
                       random_state=rng)
 x_train = x[:n_samples_train]
 x_test = x[n_samples_train:]
 # Train
 saved_model = {}
 parametrizations = ['coupled', 'step', 'alista']
 for parametrization in parametrizations:
     lista = Lista(D,
                   n_layers,
                   parametrization=parametrization,
                   max_iter=100,
                   device=device)
     t0 = time()
     lista.fit(x_train, reg)
     print('Fitting model "{}" took {:3.0f} sec'.format(
         parametrization,
         time() - t0))
     saved_model[parametrization] = lista
 plt.figure()
 plt.xlabel('layers')
 plt.ylabel('Test loss')
 c_star = cost_lasso(Lista(D, 1000).transform(x_test, reg), D, x_test, reg)
 for parametrization in parametrizations:
     lista = saved_model[parametrization]
     loss_list = []
コード例 #10
0
    def get_trained_lista(D, x, reg, n_layers, max_iter):

        lista = Lista(D, n_layers, max_iter=max_iter)
        lista.fit(x, reg)
        return lista
コード例 #11
0
x_max = max(2, x_M)
y_min = min(-2, y_m)
y_max = max(2, y_M)
n_samples = 500

x_list = np.linspace(x_min, x_max, n_samples)
y_list = np.linspace(y_min, y_max, n_samples)

grid = np.meshgrid(x_list, y_list)
x_plot = np.c_[grid[0].ravel(), grid[1].ravel()]

n_reg = 100
for enum, reg in enumerate(np.linspace(0, 1, n_reg)[1:]):
    print(enum / n_reg)
    x_r = x_d * reg
    z_lasso = Lista(D, 1000).transform(x_plot, reg)
    loss_fit = loss_lasso(z_lasso, x_plot, reg)

    ista = Lista(D, n_layers)
    z_ista = ista.transform(x_plot, reg)
    z_ista_train = ista.transform(x, reg)
    loss_ista = loss_lasso(z_ista, x_plot, reg)
    avg_loss = np.mean(loss_lasso(z_ista_train, x, reg))
    print('Ista training loss: {}'.format(avg_loss))

    # @mem.cache
    def get_trained_lista(D, x, reg, n_layers, max_iter):

        lista = Lista(D, n_layers, max_iter=max_iter)
        lista.fit(x, reg)
        return lista
コード例 #12
0
def get_c_star(x, D, z, reg, n_iter=10000, device=None):
    ista = Lista(D, n_iter, parametrization="coupled", device=device)
    return ista.score(x, reg, z0=z)
コード例 #13
0
    x, D, z = make_coding(n_samples=n_samples, n_atoms=n_atoms, n_dim=n_dim)
    reg = .5
    n_layers = 3

    x_test = np.random.randn(*x.shape)
    x_test /= np.max(abs(x_test.dot(D.T)), axis=1, keepdims=True)

    format_cost = "{}: {} cost = {:.3e}"
    c_star = get_c_star(x, D, z, reg, device=device)

    saved_model = {}
    for parametrization in ['alista', 'hessian', 'coupled']:
        lista = Lista(D,
                      n_layers,
                      parametrization=parametrization,
                      max_iter=5000,
                      device=device)

        z_hat_test = lista.transform(x_test, reg)
        cost_test = cost_lasso(z_hat_test, D, x_test, reg)
        print(
            format_cost.format("Un-trained[{}]".format(parametrization),
                               "test", cost_test))

        # Train and evaluate the network
        lista.fit(x, reg, tol=0)
        z_hat_test = lista.transform(x_test, reg)
        cost_test = cost_lasso(z_hat_test, D, x_test, reg)
        print(
            format_cost.format("Trained[{}]".format(parametrization), "test",
コード例 #14
0
def test_save(parametrization, learn_th):
    n_dim = 20
    n_atoms = 10
    n_samples = 1000
    random_state = 42

    reg = .1
    n_layers = 4

    if "step" in parametrization and not learn_th:
        pytest.skip(msg="For parametrization 'step' and 'coupled_step', "
                    "learn_th need to be set to True.")

    # Generate a problem
    x, D, z = make_coding(n_samples=n_samples,
                          n_atoms=n_atoms,
                          n_dim=n_dim,
                          random_state=random_state)

    lista = Lista(D,
                  n_layers,
                  parametrization=parametrization,
                  learn_th=learn_th,
                  max_iter=15)
    lista.fit(x, reg)
    parameters = lista.export_parameters()

    lista_ = Lista(D,
                   n_layers,
                   parametrization=parametrization,
                   learn_th=learn_th,
                   initial_parameters=parameters)
    parameters_ = lista_.export_parameters()
    assert np.all([
        np.allclose(pl[k], pl_[k]) for pl, pl_ in zip(parameters, parameters_)
        for k in pl
    ])

    cost_lista = lista.score(x, reg)
    cost_lista_ = lista_.score(x, reg)
    assert np.allclose(cost_lista, cost_lista_)

    z_lista = lista.transform(x, reg)
    z_lista_ = lista_.transform(x, reg)
    atol = abs(z_lista).max() * 1e-6
    assert np.allclose(z_lista, z_lista_, atol=atol)
コード例 #15
0
ファイル: plot_steps.py プロジェクト: tabeworks/adopty
lista_kwargs = dict(n_layers=n_layers, max_iter=3000)

init_print = 'Initial loss: {:2e}'
final_print = 'Final loss: {:2e}, delta= {:2e}'

#######################################################################
#  Generate samples
#
x, D, z = make_coding(n_samples=n_samples + n_test,
                      n_atoms=n_atoms,
                      n_dim=n_dim,
                      random_state=rng)
x_test = x[n_samples:]
x = x[:n_samples]

c_star = Lista(D, 1000).score(x_test, reg)
L = np.linalg.norm(D, ord=2)**2  # Lipschitz constant

networks = {}

for parametrization in ['first_step', 'step']:
    for per_layer in ['oneshot', 'recursive']:
        #######################################################################
        #  Training
        #
        name = "{} - {}".format(parametrization, per_layer)
        print(80 * "=" + "\n" + name + "\n" + 80 * "=")
        network = Lista(D,
                        **lista_kwargs,
                        name=name,
                        parametrization=parametrization,