Example #1
0
def VAELoss(target, reconstruction, z_mu, z_logvar):
    recon_loss = nn.binary_cross_entropy(
        reconstruction, target, size_average=False) / target.size(0)
    kl_loss = torch.mean(
        0.5 * torch.sum(torch.exp(z_logvar) + z_mu**2 - 1.0 - z_logvar, 1))

    return recon_loss + kl_loss
Q_solver = optim.Adam(Q.parameters(), lr=lr)
P_solver = optim.Adam(P.parameters(), lr=lr)
T_solver = optim.Adam(T.parameters(), lr=lr)

for it in range(1000000):
    X = sample_X(mb_size)
    eps = Variable(torch.randn(mb_size, eps_dim))
    z = Variable(torch.randn(mb_size, z_dim))

    # Optimize VAE
    z_sample = Q(torch.cat([X, eps], 1))
    X_sample = P(z_sample)
    T_sample = T(torch.cat([X, z_sample], 1))

    disc = torch.mean(-T_sample)
    loglike = -nn.binary_cross_entropy(X_sample, X,
                                       size_average=False) / mb_size

    elbo = -(disc + loglike)

    elbo.backward()
    Q_solver.step()
    P_solver.step()
    reset_grad()

    # Discriminator T(X, z)
    z_sample = Q(torch.cat([X, eps], 1))
    T_q = nn.sigmoid(T(torch.cat([X, z_sample], 1)))
    T_prior = nn.sigmoid(T(torch.cat([X, z], 1)))

    T_loss = -torch.mean(log(T_q) + log(1. - T_prior))
Example #3
0
    if include_y: return X, Y
    else: return X


encoder_optim = optim.Adam(encoder.parameters(), lr=lr)
decoder_optim = optim.Adam(decoder.parameters(), lr=lr)
descriminator_optim = optim.Adam(descriminator.parameters(), lr=lr)

for it in range(1000000):
    X = sample_X(mb_size).to(0)
    X = X.view(X.shape[0], -1)
    """ Reconstruction phase """
    z_sample = encoder(X)
    X_sample = decoder(z_sample)

    recon_loss = nn.binary_cross_entropy(X_sample, X)

    recon_loss.backward()
    decoder_optim.step()
    encoder_optim.step()
    reset_grad()
    """ Regularization phase """
    # Discriminator
    z_real = torch.randn(mb_size, z_dim).to(0)
    z_fake = encoder(X)

    D_real = descriminator(z_real)
    D_fake = descriminator(z_fake)

    D_loss = -torch.mean(torch.log(D_real) + torch.log(1 - D_fake))
    return X


Q_solver = optim.Adam(Q.parameters(), lr=lr)
P_solver = optim.Adam(P.parameters(), lr=lr)
D_solver = optim.Adam(D.parameters(), lr=lr)


for it in range(1000000):
    X = sample_X(mb_size)

    """ Reconstruction phase """
    z_sample = Q(X)
    X_sample = P(z_sample)

    recon_loss = nn.binary_cross_entropy(X_sample, X)

    recon_loss.backward()
    P_solver.step()
    Q_solver.step()
    reset_grad()

    """ Regularization phase """
    # Discriminator
    z_real = Variable(torch.randn(mb_size, z_dim))
    z_fake = Q(X)

    D_real = D(z_real)
    D_fake = D(z_fake)

    D_loss = -torch.mean(torch.log(D_real) + torch.log(1 - D_fake))
Q_solver = optim.Adam(Q.parameters(), lr=lr)
P_solver = optim.Adam(P.parameters(), lr=lr)
T_solver = optim.Adam(T.parameters(), lr=lr)

for it in range(1000000):
    X = sample_X(mb_size)
    eps = Variable(torch.randn(mb_size, eps_dim))
    z = Variable(torch.randn(mb_size, z_dim))

    # Optimize VAE
    z_sample = Q(torch.cat([X, eps], 1))
    X_sample = P(z_sample)
    T_sample = T(torch.cat([X, z_sample], 1))

    disc = torch.mean(-T_sample)
    loglike = -nn.binary_cross_entropy(X_sample, X)

    elbo = -(disc + loglike)

    elbo.backward()
    Q_solver.step()
    P_solver.step()
    reset_grad()

    # Discriminator T(X, z)
    z_sample = Q(torch.cat([X, eps], 1))
    T_q = nn.sigmoid(T(torch.cat([X, z_sample], 1)))
    T_prior = nn.sigmoid(T(torch.cat([X, z], 1)))

    T_loss = -torch.mean(log(T_q) + log(1. - T_prior))