예제 #1
0
def reconstruction_loss_lpips(fake_imgs, real_imgs):
    with nn.parameter_scope("VGG"):
        lpips = LPIPS(model="vgg", params_dir="./")
        # Following the official implementation, we use F.sum here.
        loss = F.sum(lpips(fake_imgs[0], real_imgs[0]))
        loss = loss + F.sum(lpips(fake_imgs[1], real_imgs[0]))
        loss = loss + F.sum(lpips(fake_imgs[2], real_imgs[1]))

    return loss
예제 #2
0
import numpy as np
import matplotlib.pylab as plt
from matplotlib import cm
from tqdm import tqdm
from time import time
from os.path import join
import os
import sys
import lpips
from GAN_hessian_compute import hessian_compute, get_full_hessian
from torchvision.transforms import ToPILImage
from torchvision.utils import make_grid
from GAN_utils import loadBigGAN, loadStyleGAN2, BigGAN_wrapper, StyleGAN2_wrapper, loadStyleGAN, StyleGAN_wrapper
from hessian_analysis_tools import plot_spectra, compute_hess_corr
from lpips import LPIPS
ImDist = LPIPS(net="squeeze")
datadir = r"E:\OneDrive - Washington University in St. Louis\HessNetArchit\StyleGAN"
os.makedirs(datadir, exist_ok=True)
#%%
StyleGAN = loadStyleGAN()
SD = StyleGAN.state_dict()
#%%
shuffled_SD = {}
for name, Weight in SD.items():
    idx = torch.randperm(Weight.numel())
    W_shuf = Weight.view(-1)[idx].view(Weight.shape)
    shuffled_SD[name] = W_shuf
#%%
torch.save(shuffled_SD, join(datadir, "StyleGAN_shuffle.pt"))
# print(name, Weight.shape, Weight.mean().item(), Weight.std().item())
#%%
예제 #3
0
def get_lpips() -> nn.Module:
    global lpips
    if lpips is None:
        lpips = LPIPS(verbose=False)
    return lpips
예제 #4
0
def evaluation(model_name, weights, data_loader, use_cuda=False):
    '''
    Evaluate a model.

    Parameters
    ----------
    model_name : str
        Name of the model to be evaluated.
    weights : str
        Path to weights for the initialisation of the model. If None, weights
        are randomly initialized.
    data_loader : torch.utils.data.dataloader.DataLoader
        Data loader of the dataset.
    use_cuda : bool, optional
        Whether or not to use CUDA. The default is False.

    Raises
    ------
    NameError
        If the model is not recognised.

    Returns
    -------
    df : pandas.DataFrame
        Data frame with the evaluation data.

    '''
    if model_name == "Zhang16":
        model = Zhang16(weights=weights)
        resize = transforms.Resize((256, 256))
        process_output = lambda output, data: lab2rgb(
            torch.cat((data.cpu(), resize(z2ab(output.cpu()))), dim=1))
    elif model_name == "Su20":
        model = Su20(weights=weights)
        resize = transforms.Resize((256, 256))
        process_output = lambda output, data: lab2rgb(
            torch.cat((data.cpu(), resize(z2ab(output.cpu()))), dim=1))
    elif model_name == "Collage":
        model = Collage(weights=weights)
        process_output = lambda output, data: lab2rgb(
            torch.cat((data, output), dim=1).cpu())
    else:
        raise NameError(model_name)
    if use_cuda:
        print("Using GPU.")
        model.cuda()
    else:
        print("Using CPU.")
    model.eval()

    df = pd.DataFrame(columns=['name', 'nrmse', 'ssim', 'psnr', 'lpips'],
                      index=range(len(data_loader.dataset)))
    idx = 0
    lpips_loss = LPIPS(net='alex')
    for ite, (names, (data, target)) in enumerate(tqdm(data_loader)):
        if use_cuda:
            data, target = data.cuda(), target.cuda()
        output = model(data)
        images_true = lab2rgb(torch.cat((data, target), axis=1).cpu())
        images_test = process_output(output, data)
        lpips_values = lpips_loss(images_true * 2. - 1., images_test * 2. - 1.)
        for i, name in enumerate(names):
            image_true = images_true[i].permute(1, 2, 0).numpy()
            image_test = images_test[i].permute(1, 2, 0).numpy()
            nrmse = normalized_root_mse(image_true, image_test)
            ssim = structural_similarity(image_true,
                                         image_test,
                                         data_range=1.,
                                         multichannel=True)
            psnr = peak_signal_noise_ratio(image_true,
                                           image_test,
                                           data_range=1.)
            df.loc[idx, 'name'] = name
            df.loc[idx, 'nrmse'] = nrmse
            df.loc[idx, 'ssim'] = ssim
            df.loc[idx, 'psnr'] = psnr
            df.loc[idx, 'lpips'] = lpips_values[i].item()
            idx += 1

    return df
예제 #5
0
    def run(
            self,
            image,
            need_align=False,
            start_lr=0.1,
            final_lr=0.025,
            latent_level=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
                          11],  # for ffhq (0~17)
            step=100,
            mse_weight=1,
            pre_latent=None):

        if need_align:
            src_img = run_alignment(image)
        else:
            src_img = Image.open(image).convert("RGB")

        generator = self.generator
        generator.train()

        percept = LPIPS(net='vgg')
        # on PaddlePaddle, lpips's default eval mode means no gradients.
        percept.train()

        n_mean_latent = 4096

        transform = transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(256),
            transforms.Transpose(),
            transforms.Normalize([127.5, 127.5, 127.5], [127.5, 127.5, 127.5]),
        ])

        imgs = paddle.to_tensor(transform(src_img)).unsqueeze(0)

        if pre_latent is None:
            with paddle.no_grad():
                noise_sample = paddle.randn(
                    (n_mean_latent, generator.style_dim))
                latent_out = generator.style(noise_sample)

                latent_mean = latent_out.mean(0)

            latent_in = latent_mean.detach().clone().unsqueeze(0).tile(
                (imgs.shape[0], 1))
            latent_in = latent_in.unsqueeze(1).tile(
                (1, generator.n_latent, 1)).detach()

        else:
            latent_in = paddle.to_tensor(np.load(pre_latent)).unsqueeze(0)

        var_levels = list(latent_level)
        const_levels = [
            i for i in range(generator.n_latent) if i not in var_levels
        ]
        assert len(var_levels) > 0
        if len(const_levels) > 0:
            latent_fix = latent_in.index_select(paddle.to_tensor(const_levels),
                                                1).detach().clone()
            latent_in = latent_in.index_select(paddle.to_tensor(var_levels),
                                               1).detach().clone()

        latent_in.stop_gradient = False

        optimizer = optim.Adam(parameters=[latent_in], learning_rate=start_lr)

        pbar = tqdm(range(step))

        for i in pbar:
            t = i / step
            lr = get_lr(t, step, start_lr, final_lr)
            optimizer.set_lr(lr)

            if len(const_levels) > 0:
                latent_dict = {}
                for idx, idx2 in enumerate(var_levels):
                    latent_dict[idx2] = latent_in[:, idx:idx + 1]
                for idx, idx2 in enumerate(const_levels):
                    latent_dict[idx2] = (latent_fix[:, idx:idx + 1]).detach()
                latent_list = []
                for idx in range(generator.n_latent):
                    latent_list.append(latent_dict[idx])
                latent_n = paddle.concat(latent_list, 1)
            else:
                latent_n = latent_in

            img_gen, _ = generator([latent_n],
                                   input_is_latent=True,
                                   randomize_noise=False)

            batch, channel, height, width = img_gen.shape

            if height > 256:
                factor = height // 256

                img_gen = img_gen.reshape((batch, channel, height // factor,
                                           factor, width // factor, factor))
                img_gen = img_gen.mean([3, 5])

            p_loss = percept(img_gen, imgs).sum()
            mse_loss = F.mse_loss(img_gen, imgs)
            loss = p_loss + mse_weight * mse_loss

            optimizer.clear_grad()
            loss.backward()
            optimizer.step()

            pbar.set_description(
                (f"perceptual: {p_loss.numpy()[0]:.4f}; "
                 f"mse: {mse_loss.numpy()[0]:.4f}; lr: {lr:.4f}"))

        img_gen, _ = generator([latent_n],
                               input_is_latent=True,
                               randomize_noise=False)
        dst_img = make_image(img_gen)[0]
        dst_latent = latent_n.numpy()[0]

        os.makedirs(self.output_path, exist_ok=True)
        save_src_path = os.path.join(self.output_path, 'src.fitting.png')
        cv2.imwrite(save_src_path,
                    cv2.cvtColor(np.asarray(src_img), cv2.COLOR_RGB2BGR))
        save_dst_path = os.path.join(self.output_path, 'dst.fitting.png')
        cv2.imwrite(save_dst_path, cv2.cvtColor(dst_img, cv2.COLOR_RGB2BGR))
        save_npy_path = os.path.join(self.output_path, 'dst.fitting.npy')
        np.save(save_npy_path, dst_latent)

        return np.asarray(src_img), dst_img, dst_latent
예제 #6
0
import torch
import torch.nn.functional as F
import numpy as np
import matplotlib.pylab as plt
from matplotlib import cm
from tqdm import tqdm
from time import time
from os.path import join
import os
import sys
import lpips
from GAN_hessian_compute import hessian_compute, get_full_hessian
from torchvision.transforms import ToPILImage
from torchvision.utils import make_grid
from lpips import LPIPS
ImDist = LPIPS(net="squeeze")
ImDist.cuda()
rootdir = r"E:\OneDrive - Washington University in St. Louis\Hessian_summary\App\Inversion"
os.makedirs(rootdir, exist_ok=True)
#%%
data = np.load(
    r"E:\OneDrive - Washington University in St. Louis\Hessian_summary\StyleGAN\H_avg_StyleGAN.npz"
)
evc, eva = data["evc_avg"], data["eva_avg"],
evc_tsr = torch.from_numpy(evc).cuda().float()
eva_tsr = torch.from_numpy(eva).cuda().float()
#%%
from torch.optim import Adam, SGD
SGAN = loadStyleGAN()
G = StyleGAN_wrapper(SGAN)
#%%
예제 #7
0
Wmat = deformator.linear.weight.detach().clone()
#%
prev_axes = Wmat[:, list(annot_dict.values())].T.cpu().numpy()
np.savez(join(savedir, "prev_axes.npz"),
         basis=Wmat.cpu().numpy(),
         annot_dict=annot_dict,
         prev_axes=prev_axes)
#%%
refvec = torch.randn(1, 512).cuda()
movvecs = refvec + torch.linspace(-9, 9, 5).cuda().unsqueeze(1) @ Wmat[:,
                                                                       20:21].T
imgs = G((movvecs).unsqueeze(2).unsqueeze(3))  #
show_imgrid([torch.clamp((imgs + 1) / 2, 0, 1)])
#%%
from lpips import LPIPS
ImDist = LPIPS(net="squeeze").cuda()
ImDist.requires_grad_(False)


#%%
class PGGAN_wrapper2():  # nn.Module
    """
    model = loadPGGAN(onlyG=False)
    G = PGGAN_wrapper(model.avgG)

    model = loadPGGAN()
    G = PGGAN_wrapper(model)
    """
    def __init__(
        self,
        PGGAN,
예제 #8
0
from torchvision.transforms import Resize, ToTensor
import matplotlib.pylab as plt
from GAN_utils import StyleGAN2_wrapper, loadStyleGAN2
from lpips import LPIPS
from load_hessian_data import load_Haverage
from torch_utils import show_imgrid, save_imgrid
def MSE(im1, im2, mask=None):
    # mask is size [sampn, H, W]
    if mask is None:
        return (im1 - im2).pow(2).mean(dim=[1,2,3])
    else:
        valnum = mask.sum([1, 2])
        diffsum = ((im1 - im2).pow(2).mean(1) * mask).sum([1, 2])
        return diffsum / valnum
#%
D = LPIPS(net="vgg")
D.cuda()
D.requires_grad_(False)
D.spatial = True
def mask_LPIPS(im1, im2, mask=None):
    diffmap = D(im1, im2) # note there is a singleton channel dimension
    if mask is None:
        return diffmap.mean([1, 2, 3])
    else:
        diffsum = (diffmap[:, 0, :, :] * mask).sum([1, 2])
        valnum = mask.sum([1, 2])
        return diffsum / valnum

#%%
imroot = r"E:\OneDrive - Washington University in St. Louis\GAN_photoedit\src"
resdir = r"E:\OneDrive - Washington University in St. Louis\GAN_photoedit\results"
예제 #9
0
from GAN_utils import loadBigGAN, loadBigBiGAN, loadStyleGAN2, BigGAN_wrapper, BigBiGAN_wrapper, StyleGAN2_wrapper, upconvGAN

import torch
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
from numpy.linalg import norm
import matplotlib.pylab as plt
from time import time
from os.path import join
from imageio import imwrite, imsave
rootdir = r"E:\OneDrive - Washington University in St. Louis\Hessian_summary"
"""Note the loading and visualization is fully deterministic, reproducible."""
#%%
from lpips import LPIPS
ImDist = LPIPS("squeeze")
#%% BigGAN
BGAN = loadBigGAN("biggan-deep-256").cuda()
BG = BigGAN_wrapper(BGAN)
EmbedMat = BG.BigGAN.embeddings.weight.cpu().numpy()
figdir = join(rootdir, 'BigGAN')
Hessdir = join(rootdir, 'BigGAN')
data = np.load(join(Hessdir, "H_avg_1000cls.npz"))
eva_BG = data['eigvals_avg']
evc_BG = data['eigvects_avg']
evc_nois = data['eigvects_nois_avg']
evc_clas = data['eigvects_clas_avg']
eva_nois = data['eigvals_nois_avg']
eva_clas = data['eigvals_clas_avg']
evc_clas_f = np.vstack((
    np.zeros((128, 128)),