Exemplo n.º 1
0
            test_result = discriminator(test_fake)
            test_relevance = discriminator.relprop()
            #
            # test_relevance = vectors_to_images(test_relevance)
            test_fake = vectors_to_images(test_fake)

            # set ngpu back to opt.ngpu
            if (opt.ngpu > 1):
                discriminator.setngpu(opt.ngpu)
            # discriminator.train()

            # Add up relevance of all color channels
            # test_relevance = torch.sum(test_relevance, 1, keepdim=True)

            logger.log_images(test_fake.detach(), test_relevance.detach(), 1,
                              epoch, n_batch, len(dataloader))

            # show images inline
            subprocess.call([
                os.path.expanduser('~/.iterm2/imgcat'),
                outf + '/mnist/hori_epoch_' + str(epoch) + '_batch_' +
                str(n_batch) + '.png'
            ])

            # Display status Logs
            logger.display_status(epoch, num_epochs, n_batch, num_batches,
                                  d_error, g_error, d_pred_real, d_pred_fake)

    # do checkpointing
    torch.save(discriminator.state_dict(), '%s/generator.pth' % checkpointdir)
    torch.save(generator.state_dict(), '%s/discriminator.pth' % checkpointdir)
Exemplo n.º 2
0
                log_epoch = epoch
            else:
                log_epoch = epoch + opt.cont

            img_name = logger.log_images(
                test_fake_cat.detach(), test_relevance_cat.detach(), test_fake.size(0),
                log_epoch, n_batch, len(dataloader), printdata, noLabel=opt.nolabel
            )

            # show images inline
            if opt.imgcat:
                comment = '{:.4f}-{:.4f}'.format(printdata['test_prob'], printdata['real_test_prob'])
                subprocess.call([os.path.expanduser('~/.iterm2/imgcat'),
                                 outf + '/' + opt.dataset + '/epoch_' + str(epoch) + '_batch_' + str(n_batch) + '_' + comment + '.png'])

            status = logger.display_status(epoch, opt.epochs, n_batch, len(dataloader), d_error_total, g_err,
                                           prediction_real, prediction_fake)

    Logger.epoch += 1

    # do checkpointing
    torch.save(generator.state_dict(), '%s/generator_epoch_{}.pth'.format(str(log_epoch)) % (checkpointdir))
    torch.save(discriminator.state_dict(), '%s/discriminator_epoch_{}.pth'.format(str(log_epoch)) % (checkpointdir))

    dic_compare = torch.load('%s/discriminator_epoch_{}.pth'.format(str(log_epoch)) % (checkpointdir),
                             map_location='cuda:0' if torch.cuda.is_available() else 'cpu')


    def compare_models(dict1, dict2):
        models_differ = 0
        for key_item_1, key_item_2 in zip(dict1.items(), dict2.items()):
            if torch.equal(key_item_1[1], key_item_2[1]):
Exemplo n.º 3
0
        logger.log(d_training_loss, g_training_loss, epoch, n_batch,
                   num_batches)

        print(
            '[%d/%d][%d/%d] Loss_D: %f Loss_G: %f Loss_D_real: %f Loss_D_fake %f'
            % (epoch, num_epochs, n_batch, num_batches, d_training_loss,
               g_training_loss, d_loss_real, d_loss_fake))

        # Display Progress every few batches
        if n_batch % 100 == 0 or n_batch == num_batches:

            test_fake = generator(test_noise)
            if (opt.ngpu > 1):
                discriminator.setngpu(1)
            discriminator.eval()
            test_result = discriminator(test_fake)
            discriminator.train()
            test_relevance = discriminator.relprop()
            if (opt.ngpu > 1):
                discriminator.setngpu(opt.ngpu)
            # Add up relevance of all color channels
            test_relevance = torch.sum(test_relevance, 1, keepdim=True)
            # print('Test fake', test_fake.shape, 'test_rel', test_relevance.shape)
            logger.log_images(test_fake.data, test_relevance, num_test_samples,
                              epoch, n_batch, num_batches)

            status = logger.display_status(epoch, num_epochs, n_batch,
                                           num_batches, d_training_loss,
                                           g_training_loss, d_prediction_real,
                                           d_prediction_fake)