Beispiel #1
0
                                              i:i + batch_size, :, :, :])

        images = to_var(images)

        use_penalty = True
        print "up", use_penalty

        inf = I(images)

        print "inference mean and stdv", inf.mean(), inf.std()

        outputs = D(images, inf)

        d_loss_real = gan_loss(pre_sig=outputs,
                               real=True,
                               D=True,
                               use_penalty=use_penalty,
                               grad_inp=images,
                               gamma=1.0) + gan_loss(pre_sig=outputs,
                                                     real=True,
                                                     D=True,
                                                     use_penalty=use_penalty,
                                                     grad_inp=inf,
                                                     gamma=1.0)

        #D(x,z).

        real_score = outputs

        #================GENERATOR PHASE=========================

        z = to_var(torch.randn(batch_size, 64))
        for i in xrange(0, 2):
            z_row_lst = []
            for j in xrange(0, 2):
                xs = images[:, :, i * seg_length:(i + 1) * seg_length,
                            j * seg_length:(j + 1) * seg_length]
                z_volume = inf_bot(xs, take_pre=True)
                z_row_lst.append(z_volume)
            z_col = torch.cat(z_row_lst, 3)
            z_col_lst.append(z_col)
        z_bot = torch.cat(z_col_lst, 2)

        # Feed discriminator z inferred from data ("real")
        d_out_top_real = d_top(z_bot)
        d_loss_top_real = gan_loss(pre_sig=d_out_top_real,
                                   real=True,
                                   D=True,
                                   use_penalty=True,
                                   grad_inp=z_bot,
                                   gamma=1.0)
        print 'disc score real:', d_out_top_real
        # print "d loss top real", d_loss_top_real

        # Update discrimnator with "real"
        d_top.zero_grad()
        d_loss_top_real.backward(retain_graph=True)
        d_top_optimizer.step()

        # Reconstruct x through lower level z (sanity check)
        rec_images_bot = torch.zeros(batch_size, NUM_CHANNELS, IMAGE_LENGTH,
                                     IMAGE_LENGTH)
        for seg in range(0, ns):
            i = seg / ns_per_dim
g_optimizer = torch.optim.Adam(G.parameters(), lr=0.0001, betas=(0.5, 0.99))

for epoch in range(200):
    for i in range(0, 19000, batch_size):

        images = torch.from_numpy(pacman_data[0, i:i + batch_size, :, :, :])

        images = to_var(images)

        use_penalty = True
        print "up", use_penalty

        outputs = D(images)
        d_loss_real = gan_loss(pre_sig=outputs,
                               real=True,
                               D=True,
                               use_penalty=use_penalty,
                               grad_inp=images,
                               gamma=1.0)

        real_score = outputs

        z = to_var(torch.randn(batch_size, 64))
        fake_images = G(z)
        outputs = D(fake_images)

        d_loss_fake = gan_loss(pre_sig=outputs,
                               real=False,
                               D=True,
                               use_penalty=use_penalty,
                               grad_inp=fake_images,
                               gamma=1.0)
        pacman_frame_lst = []

        for t in range(0, 5):
            pacman_frames = to_var(
                torch.from_numpy(pacman_data[t, i:i + 128, :, :, :]))
            enc = IB(pacman_frames, take_pre=True).view(128, -1)
            pacman_frame_lst.append(enc)

        real = to_var(torch.cat(pacman_frame_lst, 1).data)

        real_score = Dh(real)

        d_loss_real = gan_loss(pre_sig=real_score,
                               real=True,
                               D=True,
                               use_penalty=True,
                               grad_inp=real,
                               gamma=1.0)

        Dh.zero_grad()
        d_loss_real.backward()

        dh_optimizer.step()

        #GENERATION ===========================
        z_raw = to_var(torch.randn(128, 128))

        gen_val = Gh(z_raw)

        print gen_val.size()
Beispiel #5
0
    for i, (images, true_labels) in enumerate(data_loader):

        print "iteration", i

        batch_size = images.size(0)

        #images = to_var(images.view(batch_size, -1))
        images = to_var(images)

        true_labels = to_var(true_labels, False)

        outputs, y_label_pred_real = D(images)
        d_loss_real, grad_p_real = gan_loss(pre_sig=outputs,
                                            real=True,
                                            D=True,
                                            use_penalty=args.use_penalty,
                                            grad_inp=images,
                                            gamma=args.gamma)

        class_loss, class_acc = D.compute_loss_acc(y_label_pred_real,
                                                   true_labels)

        acc_class_clean.append(class_acc)

        image_class_adv, _ = gen_adv_example(
            D.label_classifier, images, lambda p: (nll(p, true_labels), None),
            args.epsilon)

        pred_labels = D.label_classifier(image_class_adv)

        class_loss_adv, class_acc_adv_curr = D.compute_loss_acc(
checkpoint_i = 1
inception_i = 1
start_time = timer()
for epoch in range(200):    
    print 'epoch:', epoch
    for i, (images, _) in enumerate(data_loader):
        images = to_var(images)
        # print 'min:', images.min()
        # print 'max:', images.max()
        # print 'min:', var_to_np255(images).min()
        # print 'max:', var_to_np255(images).max()
        # raise Exception()
        
        # Real images
        d_out_real = disc(images)
        d_loss_real = gan_loss(pre_sig=d_out_real, real=True, D=True, use_penalty=True, grad_inp=images, gamma=1.0)
        
        # Update for real images
        disc.zero_grad()
        d_loss_real.backward()
        disc_optimizer.step()
        
        # Generated images
        z = to_var(torch.randn(batch_size, nz))
        fake_images = gen(z)
        d_out_fake = disc(fake_images)
        d_loss_fake = gan_loss(pre_sig=d_out_fake, real=False, D=True, use_penalty=True, grad_inp=fake_images, gamma=1.0)
        g_loss_fake = gan_loss(pre_sig=d_out_fake, real=False, D=False, use_penalty=False, grad_inp=None, gamma=None, bgan=True)

        # Update for fake images
        disc.zero_grad()
Beispiel #7
0
        print "images min max", images.min(), images.max()

        z_bot_lst = []
        for seg in range(0, ns):
            # Infer lower level z from data
            i = seg / ns_per_dim
            j = seg % ns_per_dim
            xs = images[:, :, i * seg_length:(i + 1) * seg_length,
                        j * seg_length:(j + 1) * seg_length]
            zs = inf_bot(xs)

            # Feed discriminator real data
            d_out_bot = d_bot(xs, zs)
            print 'd_out_bot.size():', d_out_bot.size()
            # d_loss_bot = ((d_out_bot - real_labels)**2).mean()
            d_loss_bot = gan_loss(pre_sig=d_out_bot, real=True, D=True, use_penalty=True, grad_inp=xs, gamma=1.0) + \
                gan_loss(pre_sig=d_out_bot, real=True, D=True, use_penalty=True, grad_inp=zs, gamma=1.0)

            # Generator loss pushing real data toward boundary
            # g_loss_bot = 1.0 * ((d_out_bot - boundary_labels)**2).mean()
            g_loss_bot = gan_loss(pre_sig=d_out_bot,
                                  real=True,
                                  D=False,
                                  use_penalty=False,
                                  grad_inp=None,
                                  gamma=None,
                                  bgan=True)

            # Add z norm penalty
            if Z_NORM_MULT is not None:
                g_loss_bot += Z_NORM_MULT * zs.norm(2)