Exemple #1
0
def freeze_A_to_solution_and_fit():
    # used to debug infs
    # from tests.test_vi import test_elbo_components, test_q_E_logstick

    SCALE = 1.

    N = 500
    X = generate_gg_blocks_dataset(N, 0.05)

    model = InfiniteIBP(1.5, 6, 0.1, 0.05, 36)
    model.phi.data[:4] = SCALE * gg_blocks()
    model.init_z(N)
    model.train()

    visualize_A_save(model.phi.detach().numpy(), 0)
    visualize_nu_save(model.nu.detach().numpy(), 0)

    optimizer = torch.optim.Adam(model.parameters(), 0.1)

    for i in range(20):
        model.cavi(X)
        print("[Epoch {:<3}] ELBO = {:.3f}".format(i + 1,
                                                   model.elbo(X).item()))

    print("CHANGE OF REGIME")

    visualize_A_save(model.phi.detach().numpy(), 20)
    visualize_nu_save(model.nu.detach().numpy(), 20)
    import ipdb
    ipdb.set_trace()
Exemple #2
0
def show_that_ADVI_init_doesnt_matter():
    SCALE = 1.

    N = 500
    X = generate_gg_blocks_dataset(N, 0.05)

    model = InfiniteIBP(1.5, 6, 0.1, 0.05, 36)
    model.phi.data[:4] = SCALE * gg_blocks()
    model.init_z(N)
    model.train()

    visualize_A_save(model.phi.detach().numpy(), 0)
    visualize_nu_save(model.nu.detach().numpy(), 0)

    optimizer = torch.optim.Adam(model.parameters(), 0.003)

    for i in range(1000):
        optimizer.zero_grad()
        loss = -model.elbo(X)
        print("[Epoch {:<3}] ELBO = {:.3f}".format(i + 1, -loss.item()))
        loss.backward()

        optimizer.step()

        assert loss.item() != np.inf, "loss is inf"

    visualize_A_save(model.phi.detach().numpy(), 1000)
    visualize_nu_save(model.nu.detach().numpy(), 1000)
Exemple #3
0
def vae_test():

    N = 1500
    X = generate_gg_blocks_dataset(N, 0.05)

    model = InfiniteIBP_VAE(1.5, 6, 0.1, 0.05, 36)
    model.train()

    nu = model.nu(X)
    visualize_A_save(model.phi.detach().numpy(), 0)
    visualize_nu_save(nu.detach().numpy(), 0)

    optimizer = torch.optim.Adam([{
        'params': [model._tau]
    }, {
        'params': model.nu.parameters()
    }, {
        'params': [model._phi_var, model.phi],
        'lr': 0.003
    }],
                                 lr=0.1)

    elbo_array = []
    iter_count = 0
    for j in range(15):
        for i in range(1000):
            optimizer.zero_grad()
            loss = -model.elbo(X)

            print("[Epoch {:<3}] ELBO = {:.3f}".format(i + 1, -loss.item()))
            loss.backward()

            optimizer.step()

            iter_count += 1
            assert loss.item() != np.inf, "loss is inf"
            elbo_array.append(-loss.item())

        visualize_A_save(model.phi.detach().numpy(), iter_count)
        visualize_nu_save(model.most_recent_nu.detach().numpy(), iter_count)
        model.nu.apply(init_weights)

    plt.plot(np.arange(len(elbo_array)), np.array(elbo_array))
    plt.show()
    import ipdb
    ipdb.set_trace()
Exemple #4
0
def find_a_better_scheme():
    SCALE = 1.

    N = 500
    X = generate_gg_blocks_dataset(N, 0.05)

    model = InfiniteIBP(1.5, 6, 0.1, 0.05, 36)
    model.init_z(N)
    model.train()

    visualize_A_save(model.phi.detach().numpy(), 0)
    visualize_nu_save(model.nu.detach().numpy(), 0)

    optimizer = torch.optim.Adam([{
        'params': [model._nu, model._tau]
    }, {
        'params': [model._phi_var, model.phi],
        'lr': 0.003
    }],
                                 lr=0.1)

    elbo_array = []
    iter_count = 0
    for j in range(6):
        for i in range(1000):
            optimizer.zero_grad()
            loss = -model.elbo(X)
            print("[Epoch {:<3}] ELBO = {:.3f}".format(i + 1, -loss.item()))
            loss.backward()

            optimizer.step()

            iter_count += 1
            assert loss.item() != np.inf, "loss is inf"
            elbo_array.append(-loss.item())

        visualize_A_save(model.phi.detach().numpy(), iter_count)
        visualize_nu_save(model.nu.detach().numpy(), iter_count)
        model._nu.data = torch.randn(model._nu.shape)

    plt.plot(np.arange(len(elbo_array)), np.array(elbo_array))
    plt.show()
    import ipdb
    ipdb.set_trace()
def check_gradient_of_tau_after_some_updates():
    from matplotlib import pyplot as plt

    SCALE = 1.

    N = 500
    X = generate_gg_blocks_dataset(N, 0.05)

    model = InfiniteIBP(1.5, 6, 0.1, 0.05, 36)
    model.init_z(N)
    model.train()

    optimizer = torch.optim.Adam([{'params': [model._nu, model._tau]},
                                  {'params': [model._phi_var, model.phi], 'lr': 0.003}], lr=0.1)

    elbo_array = []
    iter_count = 0
    # values = np.zeros((6 * 3000, 12))
    for j in range(6):
        for i in range(1000):
            optimizer.zero_grad()
            loss = -model.elbo(X)
            print("[Epoch {:<3}] ELBO = {:.3f}".format(i + 1, -loss.item()))
            loss.backward()

            optimizer.step()

            assert loss.item() != np.inf, "loss is inf"
            elbo_array.append(-loss.item())
            # values[iter_count] = model.tau.detach().numpy().reshape((-1,))
            iter_count += 1

        # plt.figure()
        # for i in range(12):
        #     plt.plot(np.arange(3000), values[j*3000:(j + 1)*3000, i])
        # plt.savefig('tau_set_{}.png'.format(j))

        visualize_A_save(model.phi.detach().numpy(), iter_count)
        visualize_nu_save(model.nu.detach().numpy(), iter_count)
        model._nu.data = torch.randn(model._nu.shape)
Exemple #6
0
def freeze_A_to_solution_and_fit():
    # used to debug infs
    # from tests.test_vi import test_elbo_components, test_q_E_logstick

    SCALE = 1.

    N = 500
    X = generate_gg_blocks_dataset(N, 0.05)

    model = InfiniteIBP(1.5, 6, 0.1, 0.05, 36)
    model.phi.data[:4] = SCALE * gg_blocks()
    model.init_z(N)
    model.train()

    visualize_A_save(model.phi.detach().numpy(), 0)
    visualize_nu_save(model.nu.detach().numpy(), 0)

    optimizer = torch.optim.Adam(model.parameters(), 0.1)

    for i in range(1000):
        optimizer.zero_grad()
        loss = -model.elbo(X)
        print("[Epoch {:<3}] ELBO = {:.3f}".format(i + 1, -loss.item()))
        loss.backward()

        # zero out the grad on phi, phi_var
        model.phi.grad.zero_()
        model._phi_var.grad.zero_()

        optimizer.step()
        assert loss.item() != np.inf, "loss is inf"

    print("CHANGE OF REGIME")

    visualize_A_save(model.phi.detach().numpy(), 1000)
    visualize_nu_save(model.nu.detach().numpy(), 1000)

    for param_group in optimizer.param_groups:
        param_group['lr'] = 0.001

    for i in range(500):
        optimizer.zero_grad()
        loss = -model.elbo(X)
        print("[Epoch {:<3}] ELBO = {:.3f}".format(i + 1, -loss.item()))
        loss.backward()

        optimizer.step()
        assert loss.item() != np.inf, "loss is inf"

    visualize_A_save(model.phi.detach().numpy(), 1500)
    visualize_nu_save(model.nu.detach().numpy(), 1500)
    import ipdb; ipdb.set_trace()
Exemple #7
0
def find_a_better_scheme(nu_resets=False,tempering=False):
    from matplotlib import pyplot as plt

    print("")
    print("")
    print("")
    print("NU RESETS:",nu_resets)        
    print("TEMPERING:",tempering)

    N = 500
    sigma_n = 0.05
    print("DATA SIZE:",N)
    print("DATA NOISE:",sigma_n)
    X = generate_gg_blocks_dataset(N, sigma_n)

    alpha=1.5
    K = 6
    D = 36
    sigma_a = 0.1
    print("ALPHA:",alpha)
    print("K:",K)
    print("SIGMA N:",sigma_n)
    print("SIGMA A:",sigma_a)

    model = InfiniteIBP(alpha, K, sigma_a, sigma_n, D)
    model.init_z(N)

    if tempering:
        print("INIT TEMPERING PARAMS")
        M = 10
        print("NUM TEMPERATURES:",M)
        model.init_r_and_T(N,M)

    model.train()

    visualize_A_save(model.phi.detach().numpy(), 0)
    visualize_nu_save(model.nu.detach().numpy(), 0)

    if tempering:
        print("Initing optimizer with tempering params included")
        optimizer = torch.optim.Adam([{'params': [model._nu, model._tau, model._r]},
                                  {'params': [model._phi_var, model.phi], 'lr': 0.003}], lr=0.1)
    else:
        optimizer = torch.optim.Adam([{'params': [model._nu, model._tau]},
                                  {'params': [model._phi_var, model.phi], 'lr': 0.003}], lr=0.1)

    elbo_array = []
    iter_count = 0
    for j in range(6):
        for i in range(1000):
            optimizer.zero_grad()

            elbo = model.elbo(X)
            loss = -elbo
            if tempering:
                loss = -model.elbo_tempered(X)

            print("[Epoch {:<3}] ELBO = {:.3f}".format(i + 1, elbo.item()))
            loss.backward()

            optimizer.step()

            iter_count += 1
            assert loss.item() != np.inf, "loss is inf"
            elbo_array.append(elbo.item())

        visualize_A_save(model.phi.detach().numpy(), iter_count)
        visualize_nu_save(model.nu.detach().numpy(), iter_count)
        
        if nu_resets:
            model._nu.data = torch.randn(model._nu.shape)

    plt.plot(np.arange(len(elbo_array)), np.array(elbo_array))
    plt.show()
    import ipdb; ipdb.set_trace()
Exemple #8
0
from src.vi import InfiniteIBP
from src.utils import register_hooks, visualize_A, visualize_A_save, visualize_nu_save
from src.data import generate_gg_blocks, generate_gg_blocks_dataset, gg_blocks






N = 500
X = generate_gg_blocks_dataset(N, 0.05)
model = InfiniteIBP(1.5, 6, 0.1, 0.05, 36)
model.init_z(N)
model.train()
visualize_A_save(model.phi.detach().numpy(), 0)
visualize_nu_save(model.nu.detach().numpy(), 0)

optimizer = torch.optim.Adam([{'params': [model._nu, model._tau]},
                              {'params': [model._phi_var, model.phi], 'lr': 0.003}], lr=0.1)


elbo_array = []
iter_count = 0
for j in range(10):
    for i in range(1000):
        optimizer.zero_grad()
        loss = -model.elbo(X)
        print("[Epoch {:<3}] ELBO = {:.3f}".format(i + 1, -loss.item()))
        loss.backward()

        optimizer.step()