def ComputeFID(reconstructions, images):
    reconstructions = ((reconstructions + 1) / 2.0) * 255.0
    reconstructions = reconstructions.astype(np.uint8)
    images = ((images + 1) / 2.0) * 255.0
    images = images.astype(np.uint8)

    images = np.transpose(images, (0, 3, 1, 2))
    reconstructions = np.transpose(reconstructions, (0, 3, 1, 2))

    FID = fid.get_fid(images, reconstructions)
    return FID
    def measure_fis(self, epoch, tun_grid, sample_size=1000):
        for i in range(self.num_classes):
            _, batch_images = self.data_loader.load_data(
                domain=None,
                batch_size=sample_size,
                is_testing=True,
                convertRGB=False)
            _, batch_images_i = self.data_loader.load_data(
                domain=i,
                batch_size=sample_size,
                is_testing=True,
                convertRGB=True)
            for j in range(sample_size):
                batch_images_i[j] = (batch_images_i[j] +
                                     1.) * 127.5  #un-normalize
            #batch_images = np.transpose(batch_images, (0,3,1,2) )
            batch_images_i = np.transpose(batch_images_i, (0, 3, 1, 2))
            #
            labels1_all_i = to_categorical([i] * sample_size,
                                           num_classes=self.num_classes)
            zs1_, zs2_, zs3_, zs4_ = self.g_enc.predict(batch_images)
            _fake_i = self.g_dec.predict(
                [zs1_, zs2_, zs3_, zs4_, labels1_all_i])
            fake_i = np.zeros((sample_size, self.img_rows, self.img_cols, 3))
            for j in range(sample_size):
                _fake_i[j] = (_fake_i[j] + 1.) * 127.5
                fake_i[j] = cv2.cvtColor(_fake_i[j], cv2.COLOR_GRAY2RGB)
            fake_i = np.transpose(fake_i, (0, 3, 1, 2))
            fid_i = get_fid(fake_i, batch_images_i)
            print(i, fid_i)
            # update grid
            assert tun_grid[(tun_grid['d_gan_loss_w'] == self.d_gan_loss_w)
                            & (tun_grid['d_cl_loss_w'] == self.d_cl_loss_w)
                            & (tun_grid['g_gan_loss_w'] == self.g_gan_loss_w)
                            & (tun_grid['g_cl_loss_w'] == self.g_cl_loss_w)
                            & (tun_grid['rec_loss_w'] == self.rec_loss_w)
                            & (tun_grid['adam_lr'] == self.adam_lr)
                            & (tun_grid['epoch'] == epoch)].shape[0] == 1

            tun_grid.loc[(tun_grid['d_gan_loss_w'] == self.d_gan_loss_w)
                         & (tun_grid['d_cl_loss_w'] == self.d_cl_loss_w)
                         & (tun_grid['g_gan_loss_w'] == self.g_gan_loss_w)
                         & (tun_grid['g_cl_loss_w'] == self.g_cl_loss_w)
                         & (tun_grid['rec_loss_w'] == self.rec_loss_w)
                         & (tun_grid['adam_lr'] == self.adam_lr)
                         & (tun_grid['epoch'] == epoch),
                         'lab' + str(i) + '_FIS'] = fid_i
        # store
        tun_grid.to_csv(str(sys.argv[0]).split('.')[0] + '_fis.csv',
                        index=False)
Example #3
0
                        'epoch': epoch + 1,
                        'VAE_model': model.module.state_dict(),
                        'optimizer': optimizer.state_dict()
                    }, opt.model_path + f"model_{str(epoch+1)}.tar")

                # Calculate FID
                fid = "N/A"
                if opt.calc_fid:
                    fn = lambda x: model.module.decode(x).cpu()
                    generate_fid_samples(fn,
                                         epoch,
                                         opt.n_samples,
                                         opt.n_hidden,
                                         opt.fid_path_samples,
                                         device=device)
                    fid = get_fid(opt.fid_path_samples,
                                  opt.fid_path_pretrained)
                print('====> Epoch: {} Average loss: {:.4f} FID: {}'.format(
                    epoch, avg_loss, fid))

                # Log results
                logger.log({"Epoch": epoch, "Avg Loss": avg_loss, "FID": fid})

    tmp_epoch = 0
    for m in opt.load_path:
        epoch = load_model(m)

        # Quick fix to load multiple models and not have overwriting happening
        epoch = epoch if epoch is not tmp_epoch and tmp_epoch < epoch else tmp_epoch + 1
        tmp_epoch = epoch

        if opt.calc_fid:
Example #4
0
    for m in opt.load_path:
        epoch = load_model(m)

        # Quick fix to load multiple models and not have overwriting happening
        epoch = epoch if epoch is not tmp_epoch and tmp_epoch < epoch else tmp_epoch + 1
        tmp_epoch = epoch

        if opt.calc_fid:
            fn = lambda x: netEG.module.decode(x).cpu()
            generate_fid_samples(fn,
                                 epoch,
                                 opt.n_samples,
                                 opt.n_hidden,
                                 opt.fid_path_recons,
                                 device=device)
            fid = get_fid(opt.fid_path_recons, opt.fid_path_pretrained)
        if opt.test_recons:
            fn = lambda x: netEG(x.to(device))[0]
            gen_reconstructions(
                fn,
                test_loader,
                epoch,
                opt.test_results_path_recons,
                nrow=1,
                path_for_originals=opt.test_results_path_originals)
            print("Generated reconstructions")
        if opt.test_samples:
            fn = lambda x: netEG.module.decode(x).cpu()
            generate_samples(fn,
                             epoch,
                             5,
Example #5
0
    if len(image.shape) == 3:
        image = np.expand_dims(image)

    image = (image * 0.5 + 0.5) * 255

    return np.moveaxis(image, 3, 1)


if args.parser == 'compare':
    with open(args.images[0], 'rb') as f:
        image_1 = prep(load(f))

    with open(args.images[1], 'rb') as f:
        image_2 = prep(load(f))

    score = get_fid(image_1, image_2)

    print('Calulated FID: %.3f' % score)
else:
    files = [(ff[0], ff[1][1]) for ff in [(path.join(args.test_path[0], f),
                                           exists_next(args.test_path[0], f))
                                          for f in listdir(args.test_path[0])
                                          if f.split('.')[-1] == 'pickle']
             if ff[1][0]]
    count = min(len(files), args.test_count[0] or len(files))

    with open(args.generator[0], 'rb') as f:
        generator_weights, _ = load(f)

    generator = get_generator()
Example #6
0
def getFID(netG):
    realdata = getRealData(10000)
    fakedata = getFakedata(10000, netG)
    FID_val = fid.get_fid(realdata, fakedata)
    return FID_val
Example #7
0
def train(epoch):
    model.train()
    recon_enc_loss = 0
    recon_enc_loss = 0

    for batch_idx, (data, _) in tqdm(enumerate(train_loader)):
        # create labels
        fake_label = np.random.choice(a=[0.1, 0.9], p=[0.95, 0.05])
        real_label = np.random.choice(a=[0.1, 0.9], p=[0.05, 0.95])
        data = data.to(device)

        ### Discriminator ###

        netD.zero_grad()

        label = torch.full((data.size()[0], ), real_label, device=device)
        # Forward pass real batch through D
        output, sim_real = netD(data)
        # Calculate loss on all-real batch
        errD_real = criterion(output, label)
        # Calculate gradients for D in backward pass
        errD_real.backward()

        D_x = output.mean().item()

        ## Train with all-fake batch
        # Generate batch of latent vectors
        noise = torch.randn(data.size()[0], 128, device=device)
        # Generate fake image batch with G
        fake = model.module.decode(noise)
        label.fill_(fake_label)
        # Classify all fake batch with D
        output, _ = netD(fake.detach())
        # Calculate D's loss on the all-fake batch
        errD_fake = criterion(output, label)

        # Calculate the gradients for this batch
        errD_fake.backward()
        # Update D
        optimizerD.step()

        ### Decoder ###

        model.zero_grad()
        label.fill_(real_label)  # fake labels are real for generator cost

        # encoder to reuires grad = False
        model.module.features.requires_grad = False
        model.module.x_to_mu.requires_grad = False
        model.module.x_to_logvar.requires_grad = False
        model.module.preprocess.requires_grad = True
        model.module.deconv1.requires_grad = True
        model.module.act1.requires_grad = True
        model.module.deconv2.requires_grad = True
        model.module.act2.requires_grad = True
        model.module.deconv3.requires_grad = True
        model.module.act3.requires_grad = True
        model.module.deconv4.requires_grad = True
        model.module.activation.requires_grad = True
        recon_batch, mu, logvar = model(data)

        # Since we just updated D, perform another forward pass of all-fake batch through D
        output_fake, _ = netD(fake)

        # should add this too
        output_recon, sim_recon = netD(recon_batch)

        # Calculate G's loss based on this output
        errG_fake = criterion(output_fake, label)
        errG_recon = criterion(output_recon, label)
        # Calculate gradients for G
        errG_fake.backward()
        errG_recon.backward()
        recon_dec_loss = reconstruction_loss(recon_x=recon_batch.to(device),
                                             x=data,
                                             mu=mu.to(device),
                                             logvar=logvar.to(device),
                                             is_gen=True,
                                             sim_real=sim_real,
                                             sim_recon=sim_recon)
        recon_dec_loss.backward()
        optimizer.step()

        ### Encoder ###
        model.zero_grad()

        model.module.features.requires_grad = True
        model.module.x_to_mu.requires_grad = True
        model.module.x_to_logvar.requires_grad = True
        model.module.preprocess.requires_grad = False
        model.module.deconv1.requires_grad = False
        model.module.act1.requires_grad = False
        model.module.deconv2.requires_grad = False
        model.module.act2.requires_grad = False
        model.module.deconv3.requires_grad = False
        model.module.act3.requires_grad = False
        model.module.deconv4.requires_grad = False
        model.module.activation.requires_grad = False

        recon_batch, mu, logvar = model(data)

        recon_enc_loss = reconstruction_loss(recon_x=recon_batch.to(device),
                                             x=data,
                                             mu=mu.to(device),
                                             logvar=logvar.to(device),
                                             is_gen=False)
        recon_enc_loss.backward()

        train_recon_enc_loss += recon_enc_loss.item()
        train_recon_dec_loss += recon_dec_loss.item()

        optimizer.step()

    # Calculate FID score
    generate_samples(epoch, 100)
    fid = get_fid(opt.save_path + '/fid_results/', opt.fid_path_pretrained)

    # Log epoch statistics
    avg_recon_enc_loss = train_recon_enc_loss / len(train_loader.dataset)
    avg_recon_dec_loss = train_recon_dec_loss / len(train_loader.dataset)

    log({
        "Epoch": epoch,
        "Average Dec Recon loss": avg_recon_dec_loss,
        "Average Enc Recon loss": avg_recon_enc_loss,
        "D(x)": D_x,
        "FID": fid
    })
    print(
        f'====> Epoch: {epoch} Average Dec_Recon loss: {avg_recon_dec_loss:.4f} Average Dec_Enc loss: {avg_recon_enc_loss:.4f} D(x): {D_x:.4f} FID: {fid}'
    )
Example #8
0
            model_path = opt.save_path + "/models/model_%.tar"
            if os.path.isfile(model_path.replace('%', str(epoch - 5))):
                os.remove(model.replace('%', str(epoch - 5)))
            torch.save(
                {
                    'epoch': epoch + 1,
                    "encoder_decoder_model": model.module.state_dict(),
                    "discriminator_model": netD.state_dict(),
                    'encoder_decoder_optimizer': optimizer.state_dict(),
                    'discriminator_optimizer': optimizerD.state_dict(),
                }, save_path.replace('%', str(epoch + 1)))

    # Generate a cluster of images from reconstructions and samples
    elif opt.load_model and not opt.fid:
        generate_reconstructions(epoch,
                                 results_path="quick_results",
                                 singles=False,
                                 fid=False)
        generate_samples(epoch,
                         n_samples=80,
                         results_path="quick_results",
                         singles=False)
    # Generate images for FID analysis
    elif opt.load_model and opt.fid:
        generate_samples(start_epoch,
                         n_samples=1000,
                         results_path="fid_results")
        print(
            get_fid(opt.save_path + '/fid_results',
                    "/home/shared/evaluation/fid/fid_stats_celeba.npz"))