required=True,
                    help='CUDA id, -1 is for cpu mode')
args = parser.parse_args()

if args.cuda != '-1':
    os.environ['CUDA_VISIBLE_DEVICES'] = args.cuda
    device = 'cuda:0'
else:
    device = 'cpu'
path = args.path

images = np.empty((len(os.listdir(path)), 32, 32, 3))

n = 0
for i, fname in enumerate(os.listdir(path)):
    if fname.endswith('png') or fname.endswith('jpg'):
        images[i] = 2 * (scipy.misc.imread(os.path.join(path, fname)) /
                         255) - 1
        n += 1
images = images[:n].transpose((0, 3, 1, 2))
print(images.shape)
data = torch.tensor(images)
mean, std = utils.inception_score(data,
                                  device=device,
                                  batch_size=32,
                                  resize=True,
                                  splits=10)
print('Inception score: {0:.3f} +-{0:.3f}'.format(mean, std))
with open(os.path.join(path, 'inception.txt'), 'w') as f:
    f.write('Inception score: {0:.3f} +-{0:.3f}'.format(mean, std))
Example #2
0
               os.path.join(opt.dir_out, opt.exper_name) +
               "/fake_image_epoches{}_batchAll.png".format(epoch),
               nrow=int(np.sqrt(opt.batchSize)),
               normalize=True)

    # =====================
    # Save trainnig models
    # =====================
    save_checkpoint_wo_step(
        generator, device,
        os.path.join(opt.save_checkpoints_dir, opt.exper_name, "G",
                     'G_final.pth'))
    save_checkpoint_wo_step(
        discriminator, device,
        os.path.join(opt.save_checkpoints_dir, opt.exper_name, "D",
                     'D_final.pth'))

    # 生成画像のGIF作成(データサイズが増えるため適宜コメントを外す)
    # fake_images_historys.append(fake_imgs[0].transpose(0,1).transpose(1,2).cpu().clone().numpy())
    # save_image_historys_gif(fake_images_historys, os.path.join(opt.dir_out, opt.exper_name) + "/fake_image_epoches{}.gif".format( epoch ))

generator.eval()
score_z = Variable(Tensor(np.random.normal(0, 1,
                                           (1024, opt.nz, 1, 1)))).to(device)
score_imgs = generator(score_z)
score_mean, score_std = inception_score(imgs=score_imgs,
                                        cuda=cuda,
                                        batch_size=32,
                                        resize=True,
                                        splits=1)
print(score_mean, score_std)
import torchvision.datasets as dset
from torchvision.utils import save_image
from torchvision import datasets
from torch.utils.data import DataLoader

import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F

from architecture import Feature_extractor, Generator, weights_init
from utils import squared_distances, emd, gen_noise, sinkhorn_divergence, inception_score, IgnoreLabelDataset

img_size = 32
channels = 3
latent_dim = 100

img_shape = (channels, img_size, img_size)
PATH = 'results/umbSD_euclidean_reg_100.0_epoch975.pth'
print('loaded method : ', PATH)
netG = Generator(img_size, channels).cuda()
checkpoint = torch.load(PATH)

netG.load_state_dict(checkpoint['generator_state_dict'])

netG.eval()
z = Variable(gen_noise(20000, latent_dim, cuda=True))
gen_imgs = netG(z)
print ("Calculating Inception Score...")
print (inception_score(gen_imgs, cuda=True, batch_size=32, resize=True, splits=10))
        netG.eval()
        show_z_b.normal_()
        ## show eval images
        fake_img = netG(show_z_b)  #[N,1,28,28] torch.(cuda).tensor
        vutils.save_image((.5 * fake_img + .5).detach().cpu(),
                          './exp/' + opt.exp + '/results/random/' +
                          str(epoch).zfill(4) + '.png')
        ## show eval fixed images
        fake_img = netG(fix_show_z)
        vutils.save_image((.5 * fake_img + .5).detach().cpu(),
                          './exp/' + opt.exp + '/results/fix/' +
                          str(epoch).zfill(4) + '.png')

    # inception, diversity, FID scores.
    if epoch % 25 == 0:
        is_score.append(inception_score(netIncept, netG, device, opt.z_dim))
        with open('./exp/' + opt.exp + '/loss.txt', 'a') as append_File:
            print(
                '[%d], Inception Score:%.4f, D loss:%.4f, G loss:%.4f, G approx:%.4f ,Ave Update:%.4f, G norm:%.4f, D norm:%.4f'
                % (epoch, is_score[-1], loss_D[-1], loss_G[-1], approx_G,
                   grad_G[-1], normG, normD),
                file=append_File)
        print('IS score: %.4f' % is_score[-1])
        best_is = max(is_score[-1], best_is)

        fig = plt.figure()
        plt.plot(25 * (np.arange(epoch // 25 + 1)), is_score, label='IS')
        plt.xlabel('Epoch, update for each approximator G')
        plt.legend()
        fig.savefig('./exp/' + opt.exp + '/InceptionScore.png')
        plt.close()
Example #5
0
        fake_logits, fake_feats = get_inception_features(
            inception,
            fakes.detach().cpu(),
            batch_size=EVAL_BATCH_SIZE,
            device=device,
            hook=get_avgpool,
        )
        fake_logits = F.softmax(fake_logits, dim=1)
        fake_softmax.append(fake_logits)
        fake_features.append(fake_feats)

    fake_features = torch.cat(fake_features, dim=0)
    fake_softmax = torch.cat(fake_softmax, dim=0)

    # calculate inception score
    inc_score = inception_score(fake_softmax)

    # calculate fid
    mu_fake = fake_features.mean(dim=0)
    sigma_fake = get_covariance(fake_features)
    fid_score = fid(mu_real, mu_fake, sigma_real, sigma_fake)

    writer.add_scalar("scores/FID", fid_score.item(), global_step=global_step)
    writer.add_scalar("scores/IS", inc_score.item(), global_step=global_step)
    del inception

    # save best checkpoint
    if fid_score.item() < best_fid:
        torch.save(gen.state_dict(), os.path.join(logdir, "gen_best.pth"))
        best_fid = fid_score.item()