one = torch.FloatTensor([1])
mone = -1 * one
one_var = torch.autograd.Variable(one)
mone_var = torch.autograd.Variable(mone)

fix_noise = torch.FloatTensor(100, NOISE_DIM).normal_(0, 1)
fix_noise_var = torch.autograd.Variable(fix_noise)

if torch.cuda.is_available() > 0:
    NetG = NetG.cuda()
    NetD = NetD.cuda()
    one_var = one_var.cuda()
    mone_var = mone_var.cuda()
    fix_noise_var = fix_noise_var.cuda()

bar = eup.ProgressBar(EPOCHS, len(train_loader), "D Loss:%.3f;G Loss:%.3f")
for epoch in range(1, EPOCHS + 1):
    for index, (image, label) in enumerate(train_loader):
        real = image
        real_var = torch.autograd.Variable(real)
        noise = torch.randn(real_var.size(0), NOISE_DIM)
        noise_var = torch.autograd.Variable(noise)

        if torch.cuda.is_available():
            real_var = real_var.cuda()
            noise_var = noise_var.cuda()

        for parm in NetD.parameters():
            parm.data.clamp_(-CLAMP_NUM, CLAMP_NUM)

        NetD.zero_grad()
Example #2
0
    temp = torch.ones(NUM_CLASSES, 1) + i
    Predict_y = torch.cat([Predict_y, temp], 0)

Predict_y = one_hot(Predict_y.long())

if torch.cuda.is_available():
    NetD = NetD.cuda()
    NetG = NetG.cuda()
    predict_noise = predict_noise.cuda()
    criterion.cuda()
    Predict_y = Predict_y.cuda()

Predict_Noise_var = torch.autograd.Variable(predict_noise)
Predict_y_var = torch.autograd.Variable(Predict_y)

bar = eup.ProgressBar(EPOCHS, len(dataLoader), "D Loss:%.3f, G Loss:%.3f")
for epoch in range(1, EPOCHS + 1):
    if epoch % 10 == 0:
        optimizerG.param_groups[0]['lr'] /= 10
        optimizerD.param_groups[0]['lr'] /= 10

    for img_real, label_real in dataLoader:
        mini_batch = label_real.shape[0]

        label_true = torch.ones(mini_batch)
        label_false = torch.zeros(mini_batch)
        label = one_hot(label_real.long().squeeze())
        noise = torch.FloatTensor(mini_batch, NOISE_DIM).normal_(0, 1)

        if torch.cuda.is_available():
            label_true = label_true.cuda()
Example #3
0
        values = fc_layer(values)
        loss += kl_divergence(0.3, values)
    for i in range(2):
        fc_layer = list(autoencoder.decoder.children())[2 * i]
        relu = list(autoencoder.decoder.children())[2 * i + 1]
        values = fc_layer(values)
        loss += kl_divergence(0.3, values)
    return loss


model = autoencoder().cuda() if torch.cuda.is_available() else autoencoder()
criterion = torch.nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(),
                             lr=learning_rate,
                             weight_decay=1e-5)
proBar = eup.ProgressBar(num_epochs, len(dataloader), "loss:%.3f")
for epoch in range(1, num_epochs + 1):
    for data in dataloader:
        img, _ = data
        img = img.view(img.size(0), -1)
        img = torch.autograd.Variable(img).cuda() if torch.cuda.is_available(
        ) else torch.autograd.Variable(img)
        # ===================forward=====================
        output = model(img)
        mse_loss = criterion(output, img)
        kl_loss = sparse_loss(model, img)
        loss = mse_loss + kl_loss * 1e-3

        # ===================backward====================
        optimizer.zero_grad()
        loss.backward()
Example #4
0
    z = z.cuda()
    z_test = z_test.cuda()

optimizerD = torch.optim.Adam(NetD.parameters(),lr=LEARNING_RATE_D,betas=(0.5,0.999), weight_decay=0)
optimizerG = torch.optim.Adam(NetG.parameters(),lr=LEARNING_RATE_G,betas=(0.5,0.999), weight_decay=0)

transform=tv.transforms.Compose([
    tv.transforms.Resize((IMAGE_SIZE, IMAGE_SIZE)) ,
    tv.transforms.ToTensor(),
    tv.transforms.Normalize([0.5]*3,[0.5]*3)
])

dataset = tv.datasets.ImageFolder(root=DATA_PATH, transform=transform)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=True)

bar = eup.ProgressBar(EPOCH, len(dataloader), "D Loss:%.3f;G Loss:%.3f")
for epoch in range(1, EPOCH + 1):
    for i, data_batch in enumerate(dataloader, 0):
        for p in NetD.parameters():
            p.requires_grad = True

        NetD.zero_grad()
        images, labels = data_batch
        current_batch_size = images.size(0)
        images = images.cuda() if torch.cuda.is_available() else images
        x.data.resize_as_(images).copy_(images)
        y.data.resize_(current_batch_size).fill_(1)
        y_pred = NetD(x)
        errD_real = criterion(y_pred, y)
        errD_real.backward()
        D_real = y_pred.data.mean()
INPUT_IMAGE_PATH = "./inputs/Z.jpg"
density_img = si.imread(INPUT_IMAGE_PATH, True)
lut_2d = epnu.generate_lut(density_img)

visualizer = epnu.GANDemoVisualizer('GAN 2D Example Visualization of {}'.format(INPUT_IMAGE_PATH))
generator = SimpleMLP(input_size=z_dim, hidden_size=50, output_size=DIMENSION)
discriminator = SimpleMLP(input_size=DIMENSION, hidden_size=100, output_size=1)

if torch.cuda.is_available():
    generator = generator.cuda()
    discriminator = discriminator.cuda()

criterion = torch.nn.BCELoss()
d_optimizer = torch.optim.Adadelta(discriminator.parameters(), lr=1)
g_optimizer = torch.optim.Adadelta(generator.parameters(), lr=1)
proBar = eup.ProgressBar(1, iterations, "D Loss:(real/fake) %.3f/%.3f,G Loss:%.3f")

for train_iter in range(1, iterations + 1):
    for d_index in range(3):
        # 1. Train D on real+fake
        discriminator.zero_grad()

        #  1A: Train D on real
        real_samples = epnu.sample_2d(lut_2d, bs)
        d_real_data = torch.autograd.Variable(torch.Tensor(real_samples))
        if torch.cuda.is_available() > 0:
            d_real_data = d_real_data.cuda()
        d_real_decision = discriminator(d_real_data)
        labels = torch.autograd.Variable(torch.ones(bs))
        if torch.cuda.is_available() > 0:
            labels = labels.cuda()