"ForwardIter": "frwlancz"
}
if len(args.idx_rg) == 2:
    id_str, id_end = args.idx_rg[0], args.idx_rg[1]
    id_end = min(id_end, codes_all.shape[0])
else:
    print("doing it all! ")
    id_str, id_end = 0, codes_all.shape[0]

t0 = time()
for imgi in range(id_str, id_end):  #range(pasu_codes.shape[0] - 1, 0, -1):
    code = codes_all[imgi, :]
    feat = torch.from_numpy(code[np.newaxis, :])
    feat.requires_grad_(False)
    if hessian_method == "BackwardIter":
        metricHVP = GANHVPOperator(G, feat, model_squ)
        eigvals, eigvects = lanczos(
            metricHVP, num_eigenthings=800,
            use_gpu=True)  # takes 113 sec on K20x cluster,
        eigvects = eigvects.T  # note the output shape from lanczos is different from that of linalg.eigh, row is eigvec
        # the spectrum has a close correspondance with the full Hessian. since they use the same graph.
    elif hessian_method == "ForwardIter":
        metricHVP = GANForwardMetricHVPOperator(G,
                                                feat,
                                                model_squ,
                                                preprocess=lambda img: img,
                                                EPS=args.EPS)  #1E-3,)
        eigvals, eigvects = lanczos(
            metricHVP,
            num_eigenthings=800,
            use_gpu=True,
示例#2
0
#%% Compute Hessian decomposition and get the vectors
print("Computing Hessian Decomposition Through Lanczos decomposition")
t0 = time()
feat = torch.from_numpy(sphere_norm * PC1_vect).float().requires_grad_(False).cuda()
eigenvals, eigenvecs = compute_hessian_eigenthings(G, feat, model_squ,
    num_eigenthings=800, mode="lanczos", use_gpu=True)
print("%.2f sec"% (time() - t0))  # 31.75 secs for 300 eig, 87.52 secs for 800 eigs. 
eigenvals = eigenvals[::-1]
eigenvecs = eigenvecs[::-1,:]
#% Angle with PC1 vector
innerprod2PC1 = PC1_vect @ eigenvecs.T
print("EigenDecomposition of Hessian of Image Similarity Metric\nEigen value: max %.3E min %.3E std %.3E \nEigen vector: Innerproduct max %.3E min %.3E std %.3E"%
      (eigenvals.max(), eigenvals.min(), eigenvals.std(), innerprod2PC1.max(), innerprod2PC1.min(), innerprod2PC1.std()))
#%% Use the metric on the PC space. Compute the vHv value for the PC vectors
GHVP = GANHVPOperator(G, torch.tensor(sphere_norm * PC1_vect).float(), model_squ, use_gpu=True)
PC_vHv_vals = []
PC_vecs_tsr = torch.tensor(PC_vectors).float().cuda()
for i in range(PC_vecs_tsr.shape[0]):
    PC_vHv_vals.append(GHVP.vHv_form(PC_vecs_tsr[i, :]).item())
print("vHv of top PC space: mean %.2E max %.2E (rank in spectrum %d)"%(np.mean(PC_vHv_vals), np.max(PC_vHv_vals), 1 + sum(eigenvals > np.mean(PC_vHv_vals))))
# vHv of top PC space: mean 1.14E-04 max 1.75E-04 (rank in spectrum 235)
# So normally the vHv is small in top PC space.
#%% Create images along the spectrum
save_indiv = True
save_row = False
vec_norm = sphere_norm
ang_step = 180 / 10
theta_arr_deg = ang_step * np.linspace(-5,5,11)# np.arange(-5, 6)
theta_arr = theta_arr_deg / 180 * np.pi
img_list_all = []
示例#3
0
            H = get_full_hessian(dsim, mov_z)
            del dsim
            torch.cuda.empty_cache()
            eigvals, eigvects = np.linalg.eigh(H)
        elif args.method == "ForwardIter":
            G.select_trunc(truncation, mean_latent)
            SGhvp = GANForwardMetricHVPOperator(G, ref_z, ImDist, preprocess=lambda img: img, EPS=5E-2, )
            eigenvals, eigenvecs = lanczos(SGhvp, num_eigenthings=250, max_steps=200, tol=1e-5, )
            eigenvecs = eigenvecs.T
            sort_idx = np.argsort(np.abs(eigenvals))
            eigvals = np.abs(eigenvals[sort_idx])
            eigvects = eigenvecs[:, sort_idx]
            H = eigvects @ np.diag(eigvals) @ eigvects.T
        elif args.method == "BackwardIter":
            G.select_trunc(truncation, mean_latent)
            SGhvp = GANHVPOperator(G, ref_z, ImDist, preprocess=lambda img: img)
            eigenvals, eigenvecs = lanczos(SGhvp, num_eigenthings=250, max_steps=200, tol=1e-5, )
            eigenvecs = eigenvecs.T
            sort_idx = np.argsort(np.abs(eigenvals))
            eigvals = np.abs(eigenvals[sort_idx])
            eigvects = eigenvecs[:, sort_idx]
            H = eigvects @ np.diag(eigvals) @ eigvects.T
        print("Computing Hessian Completed, %.1f sec" %(time()-T00))
#%%
        plt.figure(figsize=[7,5])
        plt.subplot(1, 2, 1)
        plt.plot(eigvals[::-1])
        plt.ylabel("eigenvalue")
        plt.xlim(right=latent)
        plt.subplot(1, 2, 2)
        plt.plot(np.log10(eigvals[::-1]))
示例#4
0
# alexnet = tv.models.alexnet(pretrained=True).cuda()
# for param in alexnet.parameters():
#     param.requires_grad_(False)

#%% Load the pasupathy codes
from scipy.io import loadmat
code_path = r"E:\OneDrive - Washington University in St. Louis\ref_img_fit\Pasupathy\pasu_fit_code.mat"
out_dir = r"E:\OneDrive - Washington University in St. Louis\ref_img_fit\Pasupathy\Nullspace"
data = loadmat(code_path)
pasu_codes = data['pasu_code']
#%% Compute the Hessian around a certain Pasupathy image.
t0 = time()
for imgi, code in enumerate(pasu_codes[:, :]):
    feat = torch.from_numpy(code[np.newaxis, :])
    feat.requires_grad_(False)
    metricHVP = GANHVPOperator(G, feat, model_squ)
    eigvals, eigvects = lanczos(metricHVP, num_eigenthings=800, use_gpu=True)
    print("Finish computing img %d %.2f sec passed, max %.2e min %.2e 10th %.1e 50th %.e 100th %.1e" % (imgi,
        time() - t0, max(np.abs(eigvals)), min(np.abs(eigvals)), eigvals[-10], eigvals[-50], eigvals[-100]))
    np.savez(join(out_dir, "pasu_%03d.npz" % imgi), eigvals=eigvals, eigvects=eigvects, code=code)
#%%
imgi, imgj = 0, 1
with np.load(join(out_dir, "pasu_%03d.npz" % imgi)) as data:
    basisi = data["eigvects"]
    eigvi = data["eigvals"]
    codei = data["code"]

with np.load(join(out_dir, "pasu_%03d.npz" % imgj)) as data:
    basisj = data["eigvects"]
    eigvj = data["eigvals"]
    codej = data["code"]
示例#5
0
def hessian_compute(G,
                    feat,
                    ImDist,
                    hessian_method="BackwardIter",
                    cutoff=None,
                    preprocess=lambda img: img,
                    EPS=1E-2,
                    device="cuda"):
    """Higher level API for GAN hessian compute
    Parameters:
        G: GAN, usually wrapped up by a custom class. Equipped with a `visualize` function that takes a torch vector and
           output a torch image
        feat: a latent code as input to the GAN.
        ImDist: the image distance function. Support dsim = ImDist(img1, img2). takes in 2 torch images and output a
           scalar distance. Pass gradient.
       hessian_method: Currently, "BP" "ForwardIter" "BackwardIter" are supported
       preprocess: or post processing is the operation on the image generated by GAN. Default to be an identity map.
            `lambda img: F.interpolate(img, (256, 256), mode='bilinear', align_corners=True)` is a common choice.
        cutoff: For iterative methods, "ForwardIter" "BackwardIter" this specify how many eigenvectors it's going to
            compute.
    """
    if cutoff is None: cutoff = feat.numel() // 2 - 1
    if 'to' in dir(ImDist): ImDist.to(device)
    if hessian_method == "BackwardIter":
        metricHVP = GANHVPOperator(G, feat, ImDist, preprocess=preprocess)
        eigvals, eigvects = lanczos(
            metricHVP, num_eigenthings=cutoff,
            use_gpu=True)  # takes 113 sec on K20x cluster,
        eigvects = eigvects.T  # note the output shape from lanczos is different from that of linalg.eigh, row is eigvec
        H = eigvects @ np.diag(eigvals) @ eigvects.T
        # the spectrum has a close correspondance with the full Hessian. since they use the same graph.
    elif hessian_method == "ForwardIter":
        metricHVP = GANForwardMetricHVPOperator(G,
                                                feat,
                                                ImDist,
                                                preprocess=preprocess,
                                                EPS=EPS)  # 1E-3,)
        eigvals, eigvects = lanczos(
            metricHVP,
            num_eigenthings=cutoff,
            use_gpu=True,
            max_steps=200,
            tol=1e-6,
        )
        eigvects = eigvects.T
        H = eigvects @ np.diag(eigvals) @ eigvects.T
        # EPS=1E-2, max_steps=20 takes 84 sec on K20x cluster.
        # The hessian is not so close
    elif hessian_method == "BP":  # 240 sec on cluster
        ref_vect = feat.detach().clone().float().to(device)
        mov_vect = ref_vect.float().detach().clone().requires_grad_(True)
        imgs1 = G.visualize(ref_vect)
        imgs2 = G.visualize(mov_vect)
        dsim = ImDist(preprocess(imgs1), preprocess(imgs2))
        H = get_full_hessian(
            dsim, mov_vect
        )  # 122 sec for a 256d hessian, # 240 sec on cluster for 4096d hessian
        eigvals, eigvects = np.linalg.eigh(H)
    else:
        raise NotImplementedError
    return eigvals, eigvects, H
示例#6
0
                                                             -1).mean(axis=1)

        return neuron_objective


# for name, hk in feat_dict.items():
#     hk.close()
#%%
import torchvision as tv
# VGG = tv.models.vgg16(pretrained=True)
alexnet = tv.models.alexnet(pretrained=True).cuda()
for param in alexnet.parameters():
    param.requires_grad_(False)
#%% This is not working.... The local 2nd order derivative is 0
feat = torch.randn((4096), dtype=torch.float32).requires_grad_(False).cuda()
GHVP = GANHVPOperator(G, feat, model_squ)
GHVP.apply(torch.randn((4096)).requires_grad_(False).cuda())
#%%
weight = torch.randn(512, 32, 32).cuda()
objective = FeatLinModel(VGG,
                         layername='features_19',
                         type="weight",
                         weight=weight)
activHVP = GANHVPOperator(G, 5 * feat, objective, activation=True)
#%
activHVP.apply(5 * torch.randn((4096)).requires_grad_(False).cuda())
#%%
feat = torch.randn(4096).cuda()
feat.requires_grad_(True)
objective = FeatLinModel(VGG,
                         layername='features_4',