コード例 #1
0
        elif codes_arr.shape[1] == 4096: # FC6GAN
            norms_all = np.linalg.norm(codes_arr[:, :], axis=1)
            ax2.scatter(generations, norms_all, s=5, color="magenta", label="all", alpha=0.2)
        ax2.set_ylabel("L2 Norm", color="red", fontsize=14)
        plt.legend()
    plt.title("Optimization Trajectory of Score\n" + title_str)
    plt.legend()
    if show:
        plt.show()
    return figh
#%% Select GAN
BGAN = BigGAN.from_pretrained("biggan-deep-256")
BGAN.eval().cuda()
for param in BGAN.parameters():
    param.requires_grad_(False)
G = BigGAN_wrapper(BGAN)
#%%
G = upconvGAN("fc6")
G.eval().cuda()
for param in G.parameters():
    param.requires_grad_(False)
#%%
# net = tv.alexnet(pretrained=True)
from insilico_Exp import TorchScorer, ExperimentEvolve
scorer = TorchScorer("alexnet")
scorer.select_unit(("alexnet", "fc6", 2))
#%%
imgs = G.visualize(torch.randn(3, 256).cuda()).cpu()
scores = scorer.score_tsr(imgs)

#%%
コード例 #2
0
#%%
BGAN = loadBigGAN()
SD = BGAN.state_dict()
#%%
shuffled_SD = {}
for name, Weight in SD.items():
    idx = torch.randperm(Weight.numel())
    W_shuf = Weight.view(-1)[idx].view(Weight.shape)
    shuffled_SD[name] = W_shuf
#%%
torch.save(shuffled_SD, join(datadir, "BigGAN_shuffle.pt"))
# print(name, Weight.shape, Weight.mean().item(), Weight.std().item())
#%%
BGAN_sf = loadBigGAN()
BGAN_sf.load_state_dict(torch.load(join(datadir, "BigGAN_shuffle.pt")))
G_sf = BigGAN_wrapper(BGAN_sf)
#%%
img = BGAN_sf.generator(torch.randn(1, 256).cuda() * 0.05, 0.7).cpu()
ToPILImage()((1 + img[0]) / 2).show()
#%%
triali = 0
savedir = r"E:\OneDrive - Washington University in St. Louis\HessNetArchit\BigGAN\ctrl_Hessians"
for triali in tqdm(range(1, 100)):
    feat = torch.cat(
        (torch.randn(128).cuda(), BGAN_sf.embeddings.weight[:,
                                                            triali].clone()),
        dim=0)
    eigvals, eigvects, H = hessian_compute(
        G_sf,
        feat,
        ImDist,
コード例 #3
0
# import models # from PerceptualSimilarity folder
# ImDist = models.PerceptualLoss(model='net-lin', net='squeeze', use_gpu=1, gpu_ids=[0])
ImDist = lpips.LPIPS(net='squeeze', )
# model_vgg = models.PerceptualLoss(model='net-lin', net='vgg', use_gpu=1, gpu_ids=[0])
ImDist.cuda()
for param in ImDist.parameters():
    param.requires_grad_(False)
#%%
# BGAN = BigGAN.from_pretrained("biggan-deep-256")
# for param in BGAN.parameters():
#     param.requires_grad_(False)
# embed_mat = BGAN.embeddings.parameters().__next__().data
# BGAN.cuda()
from GAN_utils import BigGAN_wrapper, loadBigGAN
BGAN = loadBigGAN()
G = BigGAN_wrapper(BGAN)


#%%
def LExpMap(refvect, tangvect, ticks=11, lims=(-1, 1)):
    refvect, tangvect = refvect.reshape(1, -1), tangvect.reshape(1, -1)
    steps = np.linspace(lims[0], lims[1], ticks)[:, np.newaxis]
    interp_vects = steps @ tangvect + refvect
    return interp_vects


def SExpMap(refvect, tangvect, ticks=11, lims=(-1, 1)):
    refvect, tangvect = refvect.reshape(1, -1), tangvect.reshape(1, -1)
    steps = np.linspace(lims[0], lims[1], ticks)[:, np.newaxis] * np.pi / 2
    interp_vects = steps @ tangvect + refvect
    return interp_vects
コード例 #4
0
elif args.G == "fc6":
    Hdata = np.load(Hdir_fc6)
#%%
"""with a correct cmaes or initialization, BigGAN can match FC6 activation."""
#%% Select GAN
from GAN_utils import BigGAN_wrapper, upconvGAN, loadBigGAN
from insilico_Exp import TorchScorer, ExperimentEvolve
if args.G == "BigGAN":
    if sys.platform == "linux":
        BGAN = get_BigGAN(version="biggan-deep-256")
    else:
        BGAN = BigGAN.from_pretrained("biggan-deep-256")
    BGAN.eval().cuda()
    for param in BGAN.parameters():
        param.requires_grad_(False)
    G = BigGAN_wrapper(BGAN)
elif args.G == "fc6":
    G = upconvGAN("fc6")
    G.eval().cuda()
    for param in G.parameters():
        param.requires_grad_(False)
#%%
# net = tv.alexnet(pretrained=True)
scorer = TorchScorer(args.net)
if args.RFresize:
    from torch_net_utils import receptive_field, receptive_field_for_unit
    from layer_hook_utils import get_module_names, register_hook_by_module_names, layername_dict
    rf_dict = receptive_field(scorer.model.features, (3, 227, 227),
                              device="cuda")
    layername = layername_dict[args.net]
    layer_name_map = {layer: str(i + 1) for i, layer in enumerate(layername)}
コード例 #5
0
    vis_eigen_action(evc_avg[:, -eigidx - 1],
                     ref_codes,
                     BBG,
                     figdir=figdir,
                     namestr="BBG_eig%d_sph" % (eigidx + 1),
                     maxdist=0.4,
                     rown=7,
                     sphere=True,
                     transpose=False,
                     RND=RND)
    # if eigidx==10:break

#%% BigGAN
"""BigGAN"""
BGAN = loadBigGAN()
BG = BigGAN_wrapper(BGAN)
EmbedMat = BG.BigGAN.embeddings.weight.cpu().numpy()
figdir = join(axesdir, 'BigGAN')
os.makedirs(figdir, exist_ok=True)
data = np.load(join(rootdir, 'BigGAN', "H_avg_1000cls.npz"))
eva_BG = data['eigvals_avg']
evc_BG = data['eigvects_avg']
H_BG = data['H_avg']
evc_nois = data['eigvects_nois_avg']
evc_clas = data['eigvects_clas_avg']
eva_nois = data['eigvals_nois_avg']
eva_clas = data['eigvals_clas_avg']
evc_clas_f = np.vstack((
    np.zeros((128, 128)),
    evc_clas,
))
コード例 #6
0
    cc_21 = np.corrcoef(vHv21, eigvals1)[0, 1]
    cclog_21 = np.corrcoef(np.log(np.abs(vHv21) + 1E-8),
                           np.log(np.abs(eigvals1 + 1E-8)))[0, 1]
    if show:
        print("Applying eigvec 1->2: corr %.3f (lin) %.3f (log)" %
              (cc_12, cclog_12))
        print("Applying eigvec 2->1: corr %.3f (lin) %.3f (log)" %
              (cc_21, cclog_21))
    return cc_12, cclog_12, cc_21, cclog_21


#%%
saveroot = r"E:\Cluster_Backup"
#%%
BGAN = loadBigGAN()
G = BigGAN_wrapper(BGAN)
savedir = join(saveroot, "ImDist_cmp\\BigGAN")
os.makedirs(savedir, exist_ok=True)
SSIM_stat_col = []
MSE_stat_col = []
for idx in range(100):
    refvec = G.sample_vector(1, device="cuda")  # 0.1 * torch.randn(1, 256)
    eigvals_PS, eigvects_PS, H_PS = hessian_compute(G,
                                                    refvec,
                                                    ImDist,
                                                    hessian_method="BP")
    eigvals_SSIM, eigvects_SSIM, H_SSIM = hessian_compute(G,
                                                          refvec,
                                                          D,
                                                          hessian_method="BP")
    eigvals_MSE, eigvects_MSE, H_MSE = hessian_compute(G,
コード例 #7
0
from numpy.linalg import norm
import matplotlib.pylab as plt
from time import time
import os
from os.path import join
from torchvision.utils import make_grid
from torchvision.transforms import ToPILImage
#%%
rootdir = r"E:\OneDrive - Washington University in St. Louis\Hessian_summary"
compressdir = r"E:\OneDrive - Washington University in St. Louis\GANcompress"
figdir = r"E:\OneDrive - Washington University in St. Louis\GANcompress\BigGAN"
os.makedirs(figdir, exist_ok=True)
os.makedirs(compressdir, exist_ok=True)
#%% BigGAN
BGAN = loadBigGAN()
BG = BigGAN_wrapper(BGAN)
EmbedMat = BG.BigGAN.embeddings.weight.cpu().numpy()
data = np.load(join(rootdir, 'BigGAN', "H_avg_1000cls.npz"))
eva_BG = data['eigvals_avg']
evc_BG = data['eigvects_avg']
H_BG = data['H_avg']
#%%
cutoff = 100
samp_n = 10
classid = np.random.randint(0, 1000, samp_n)
refvec = np.vstack((0.7 * np.random.randn(128, samp_n), EmbedMat[:,
                                                                 classid])).T
refvec_proj = refvec @ evc_BG[:, -cutoff:] @ evc_BG[:, -cutoff:].T
orig_img = BG.visualize_batch_np(refvec)
proj_img = BG.visualize_batch_np(refvec_proj)
mtg = make_grid(torch.cat((orig_img, proj_img)), nrow=samp_n)
コード例 #8
0
RGB_std  = torch.tensor([0.229, 0.224, 0.225]).view(1,-1,1,1).cuda()
preprocess = Compose([lambda img: (F.interpolate(img, (224, 224), mode='bilinear', align_corners=True) - RGB_mean) / RGB_std])
preprocess_resize = Compose([lambda img: F.interpolate(img, (224, 224), mode='bilinear', align_corners=True) ])
#%%
import torch.nn as nn
from GAN_utils import BigGAN_wrapper
# class BigGAN_wrapper():#nn.Module
#     def __init__(self, BigGAN, space="class"):
#         self.BigGAN = BigGAN
#         self.space = space
#
#     def visualize(self, code, scale=1.0):
#         imgs = self.BigGAN.generator(code, 0.6) # Matlab version default to 0.7
#         return torch.clamp((imgs + 1.0) / 2.0, 0, 1) * scale

G = BigGAN_wrapper(BGAN)
# H = get_full_hessian()
#%%
savedir = r"E:\iclr2021\Results"
savedir = r"E:\OneDrive - Washington University in St. Louis\HessGANCmp"
#%%
T00 = time()
for class_id in [17, 79, 95, 107, 224, 346, 493, 542, 579, 637, 667, 754, 761, 805, 814, 847, 856, 941, 954, 968]:
    classvec = embed_mat[:, class_id:class_id+1].cuda().T
    noisevec = torch.from_numpy(truncated_noise_sample(1, 128, 0.6)).cuda()
    ref_vect = torch.cat((noisevec, classvec, ), dim=1).detach().clone()
    mov_vect = ref_vect.detach().clone().requires_grad_(True)
    #%%
    imgs1 = G.visualize(ref_vect)
    imgs2 = G.visualize(mov_vect)
    dsim = ImDist(imgs1, imgs2)
コード例 #9
0
    import sys
    sys.path.append(r"D:\Github\PerceptualSimilarity")
    sys.path.append(r"E:\Github_Projects\PerceptualSimilarity")
    import models  # from PerceptualSimilarity folder
    ImDist = models.PerceptualLoss(model='net-lin', net='squeeze', use_gpu=1, gpu_ids=[0])
    for param in ImDist.parameters():
        param.requires_grad_(False)

    from GAN_utils import BigGAN_wrapper
    BGAN = BigGAN.from_pretrained("biggan-deep-256")
    BGAN.cuda()
    BGAN.eval()
    for param in BGAN.parameters():
        param.requires_grad_(False)
    EmbedMat = BGAN.embeddings.weight
    G = BigGAN_wrapper(BGAN)

    data = np.load("N:\Hess_imgs\summary\Hess_mat.npz")
    refvec = data["vect"]
    evc_clas = data['eigvects_clas']
    evc_clas_tsr = torch.from_numpy(data['eigvects_clas'][:, ::-1].copy()).float().cuda()
    eva_clas = data['eigvals_clas'][::-1]
    evc_nois = data['eigvects_nois']
    evc_nois_tsr = torch.from_numpy(data['eigvects_nois'][:, ::-1].copy()).float().cuda()
    eva_nois = data['eigvals_nois'][::-1]
    reftsr = torch.tensor(refvec).float().cuda()
    refimg = G.visualize(reftsr)
    ToPILImage()(refimg[0, :].cpu()).show()
    #%%
    # targ_val = np.array([0.12, 0.24, 0.36, 0.48, 0.6])
    targ_val = np.array([0.1, 0.2, 0.3, 0.4, 0.5])
コード例 #10
0
 eva_FI, evc_FI, H_FI = hessian_compute(G,
                                        feat,
                                        ImDist,
                                        hessian_method="ForwardIter")
 print("%.2f sec" % (time() - T0))  # 325.83 sec
 T0 = time()
 eva_BP, evc_BP, H_BP = hessian_compute(G,
                                        feat,
                                        ImDist,
                                        hessian_method="BP")
 print("%.2f sec" % (time() - T0))  # 2135.00 sec
 #%% BigGAN
 from GAN_utils import loadBigGAN, BigGAN_wrapper
 BGAN = loadBigGAN()
 BGAN.cuda().eval()
 G = BigGAN_wrapper(BGAN)
 feat = 0.05 * torch.randn(1, 256).detach().clone().cuda()
 EPS = 1E-2
 T0 = time()
 eva_BI, evc_BI, H_BI = hessian_compute(G,
                                        feat,
                                        ImDist,
                                        hessian_method="BackwardIter")
 print("%.2f sec" % (time() - T0))  # 70.57 sec
 T0 = time()
 eva_FI, evc_FI, H_FI = hessian_compute(G,
                                        feat,
                                        ImDist,
                                        hessian_method="ForwardIter")
 print("%.2f sec" % (time() - T0))  # 67.02 sec
 T0 = time()
コード例 #11
0
                     transpose=False)
    #%%
    vis_eigen_action(eigvect_avg[:, -5],
                     None,
                     figdir=figdir,
                     page_B=50,
                     maxdist=20,
                     rown=5,
                     transpose=False)

    #%% BigGAN on ImageNet Class Specific
    from GAN_utils import BigGAN_wrapper, loadBigGAN
    from pytorch_pretrained_biggan import BigGAN
    from torchvision.transforms import ToPILImage
    BGAN = loadBigGAN("biggan-deep-256").cuda()
    BG = BigGAN_wrapper(BGAN)
    EmbedMat = BG.BigGAN.embeddings.weight.cpu().numpy()
    #%%
    figdir = r"E:\OneDrive - Washington University in St. Louis\Hessian_summary\BigGAN"
    Hessdir = r"E:\OneDrive - Washington University in St. Louis\Hessian_summary\BigGAN"
    data = np.load(join(Hessdir, "H_avg_1000cls.npz"))
    eva_BG = data['eigvals_avg']
    evc_BG = data['eigvects_avg']
    evc_nois = data['eigvects_nois_avg']
    evc_clas = data['eigvects_clas_avg']
    #%%
    imgs = BG.render(np.random.randn(1, 256) * 0.06)
    #%%
    eigi = 5
    refvecs = np.vstack((EmbedMat[:, np.random.randint(0, 1000, 10)],
                         0.5 * np.random.randn(128, 10))).T