Exemple #1
0
                (test_relevance[:, :, bp:-bp, bp:-bp], real_test_relevance[:, :, bp:-bp, bp:-bp]))

            printdata = {'test_prob': test_prob.item(), 'real_test_prob': real_test_prob.item(),
                         'test_result': fake_double_check.item(), 'real_test_result': real_doublecheck_prop.item(),
                         # 'min_test_rel': torch.min(test_relevance), 'max_test_rel': torch.max(test_relevance),
                         'min_test_rel': fake_tripple_check.item(), 'max_test_rel': torch.max(test_relevance),
                         # 'min_real_rel': torch.min(real_test_relevance), 'max_real_rel': torch.max(real_test_relevance)}
                         'min_real_rel': real_tripplecheck_prop.item(), 'max_real_rel': torch.max(real_test_relevance)}

            if not opt.cont:
                log_epoch = epoch
            else:
                log_epoch = epoch + opt.cont

            img_name = logger.log_images(
                test_fake_cat.detach(), test_relevance_cat.detach(), test_fake.size(0),
                log_epoch, n_batch, len(dataloader), printdata, noLabel=opt.nolabel
            )

            # show images inline
            if opt.imgcat:
                comment = '{:.4f}-{:.4f}'.format(printdata['test_prob'], printdata['real_test_prob'])
                subprocess.call([os.path.expanduser('~/.iterm2/imgcat'),
                                 outf + '/' + opt.dataset + '/epoch_' + str(epoch) + '_batch_' + str(n_batch) + '_' + comment + '.png'])

            status = logger.display_status(epoch, opt.epochs, n_batch, len(dataloader), d_error_total, g_err,
                                           prediction_real, prediction_fake)

    Logger.epoch += 1

    # do checkpointing
    torch.save(generator.state_dict(), '%s/generator_epoch_{}.pth'.format(str(log_epoch)) % (checkpointdir))
	break
	
#print("ts shape ",test_samples.shape)
test_samples = y.permute(0, 1, 3, 2).numpy()
test_samples = images_to_vectors(test_samples)
test_samples = np.reshape(test_samples,(-1,28,28))
print(test_samples.shape)
fi,batch_mask = noisy_images(test_samples)
test_samples_nhwc = vectors_to_images(test_samples)
test_samples = images_to_vectors(test_samples)

logger = Logger(model_name='test', data_name='generator_output')
logger1 =  Logger(model_name='test', data_name='true_image')
logger2 = Logger(model_name='test', data_name='noisy_image')

logger1.log_images(test_samples_nhwc,1, 1, 1, 1, '1',format='NHWC');

test_noise_reshape = images_to_vectors(fi)
test_noise_reshape = vectors_to_images(test_noise_reshape)
logger2.log_images(test_noise_reshape,1, 1, 1, 1,'1' ,format='NHWC');

#batch_mask = np.ones((BATCH_SIZE,28,28),np.float32)
#batch_mask[:,9:19,9:19] = 0.0
batch_mask = images_to_vectors(batch_mask)
images = tf.placeholder(tf.float32,shape = (BATCH_SIZE,28*28))
recovered_images = tf.reshape(G_sample,(BATCH_SIZE,28*28))
contextual_loss = tf.reduce_sum(tf.contrib.layers.flatten(tf.abs(tf.multiply(images,batch_mask) - tf.multiply(G_sample, batch_mask))), 1)

perceptual_loss = G_loss
complete_loss = contextual_loss + 0.001 * perceptual_loss
grad_complete_loss = tf.gradients(complete_loss, Z)
Exemple #3
0
            # discriminator.eval()
            test_result = discriminator(test_fake)
            test_relevance = discriminator.relprop()
            #
            # test_relevance = vectors_to_images(test_relevance)
            test_fake = vectors_to_images(test_fake)

            # set ngpu back to opt.ngpu
            if (opt.ngpu > 1):
                discriminator.setngpu(opt.ngpu)
            # discriminator.train()

            # Add up relevance of all color channels
            # test_relevance = torch.sum(test_relevance, 1, keepdim=True)

            logger.log_images(test_fake.detach(), test_relevance.detach(), 1,
                              epoch, n_batch, len(dataloader))

            # show images inline
            subprocess.call([
                os.path.expanduser('~/.iterm2/imgcat'),
                outf + '/mnist/hori_epoch_' + str(epoch) + '_batch_' +
                str(n_batch) + '.png'
            ])

            # Display status Logs
            logger.display_status(epoch, num_epochs, n_batch, num_batches,
                                  d_error, g_error, d_pred_real, d_pred_fake)

    # do checkpointing
    torch.save(discriminator.state_dict(), '%s/generator.pth' % checkpointdir)
    torch.save(generator.state_dict(), '%s/discriminator.pth' % checkpointdir)
Exemple #4
0
        gen_iterations += 1

        print(
            '[%d/%d][%d/%d][%d] Loss_D: %f Loss_G: %f Loss_D_real: %f Loss_D_fake %f'
            %
            (epoch, opt.niter, i, len(dataloader), gen_iterations,
             errD.data[0], errG.data[0], errD_real.data[0], errD_fake.data[0]))

        if gen_iterations % 500 == 0:
            real_cpu = real_cpu.mul(0.5).add(0.5)
            vutils.save_image(real_cpu,
                              '{0}/real_samples.png'.format(opt.experiment))
            fake = netG(Variable(fixed_noise, volatile=True))
            fake.data = fake.data.mul(0.5).add(0.5)
            vutils.save_image(
                fake.data,
                '{0}/fake_samples_{1}.png'.format(opt.experiment,
                                                  gen_iterations))

            test_relevance = netD.relprop()
            test_relevance_p = torch.sum(test_relevance[0], 0, keepdim=True)
            logger.log_images(fake[0].data.unsqueeze(0),
                              test_relevance_p.unsqueeze(0), 1, epoch,
                              gen_iterations, len(dataloader))

    # do checkpointing
    torch.save(netG.state_dict(),
               '{0}/netG_epoch_{1}.pth'.format(opt.experiment, epoch))
    torch.save(netD.state_dict(),
               '{0}/netD_epoch_{1}.pth'.format(opt.experiment, epoch))
Exemple #5
0
        logger.log(d_training_loss, g_training_loss, epoch, n_batch,
                   num_batches)

        print(
            '[%d/%d][%d/%d] Loss_D: %f Loss_G: %f Loss_D_real: %f Loss_D_fake %f'
            % (epoch, num_epochs, n_batch, num_batches, d_training_loss,
               g_training_loss, d_loss_real, d_loss_fake))

        # Display Progress every few batches
        if n_batch % 100 == 0 or n_batch == num_batches:

            test_fake = generator(test_noise)
            if (opt.ngpu > 1):
                discriminator.setngpu(1)
            discriminator.eval()
            test_result = discriminator(test_fake)
            discriminator.train()
            test_relevance = discriminator.relprop()
            if (opt.ngpu > 1):
                discriminator.setngpu(opt.ngpu)
            # Add up relevance of all color channels
            test_relevance = torch.sum(test_relevance, 1, keepdim=True)
            # print('Test fake', test_fake.shape, 'test_rel', test_relevance.shape)
            logger.log_images(test_fake.data, test_relevance, num_test_samples,
                              epoch, n_batch, num_batches)

            status = logger.display_status(epoch, num_epochs, n_batch,
                                           num_batches, d_training_loss,
                                           g_training_loss, d_prediction_real,
                                           d_prediction_fake)
Exemple #6
0
        X_batch = images_to_vectors(y)

        feed_dict = {X: X_batch, Z: Z_batch}

        _, d_error, d_pred_real, d_pred_fake = session.run(
            [D_opt, D_loss, D_real, D_fake], feed_dict=feed_dict)

        # 2. Train Generator
        feed_dict = {Z: noise(BATCH_SIZE, NOISE_SIZE)}

        _, g_error = session.run([G_opt, G_loss], feed_dict=feed_dict)

        if n_batch % 100 == 0:
            display.clear_output(True)
            # Generate images from test noise
            test_images = session.run(G_sample,
                                      feed_dict={Z: noise(16, NOISE_SIZE)})
            test_images = vectors_to_images(test_images)
            # Log Images
            logger.log_images(test_images,
                              16,
                              epoch,
                              n_batch,
                              num_batches,
                              '1',
                              format='NHWC')
            # Log Status
            logger.display_status(epoch, num_epochs, n_batch, num_batches,
                                  d_error, g_error, d_pred_real, d_pred_fake)
save_path = saver.save(session, "./init_gan_weights/final_weights")