コード例 #1
0
def compute_minEig(SKS, eps=1e-16):
    mult = 1.0
    minEig = torch.lobpcg(SKS.expand(1, -1, -1),
                          k=1,
                          largest=False,
                          method="ortho")[0]
    # minEig = min(0, eigh(SKS, eigvals_only=True, subset_by_index=[0,0])) - eps
    return minEig
コード例 #2
0
def get_top_k_eigenvalues(matrix_, num_eigs):
    try:
        return torch.lobpcg(matrix_.expand(1, -1, -1),
                            k=num_eigs,
                            largest=True,
                            method="ortho")[0].type(torch.complex64)
    except:
        return torch.sort(torch.symeig(matrix_, eigenvectors=False)[0],
                          descending=True)[0][0:num_eigs].type(torch.complex64)
コード例 #3
0
 def max_frequency(self, lap_type: str = "sym"):
     lap = self.L(lap_type)
     if self._max_fs is not None:
         max_fs = self._max_fs
     else:
         max_fs = torch.lobpcg(lap.to_torch_sparse_coo_tensor(), k=1, largest=True)[
             0
         ].item()
         if self.cache:
             self._max_fs = max_fs
     return max_fs
コード例 #4
0
ファイル: eig_lobpcg.py プロジェクト: jurajHasik/peps-torch
    def forward(self, M, k, tol):
        r"""
        :param M: square symmetric matrix :math:`N \times N`
        :param k: desired rank (must be smaller than :math:`N`)
        :type M: torch.tensor
        :type k: int
        :return: eigenvalues D, leading k eigenvectors U
        :rtype: torch.tensor, torch.tensor

        **Note:** `depends on scipy`

        Return leading k-eigenpairs of a matrix M, where M is symmetric 
        :math:`M=M^T`, by computing the symmetric decomposition :math:`M= UDU^T` 
        up to rank k. Partial eigendecomposition is done through LOBPCG method.
        """
        # (optional) verify hermicity
        M_asymm_norm = torch.norm(M - M.t())
        assert M_asymm_norm / torch.abs(M).max() < 1.0e-8, "M is not symmetric"

        # X (tensor, optional) - the input tensor of size (∗,m,n) where k <= n <= m.
        #                        When specified, it is used as initial approximation of eigenvectors.
        #                        X must be a dense tensor.
        # iK (tensor, optional) - the input tensor of size (∗,m,m). When specified, it will be used
        #                         as preconditioner.
        # tol (float, optional) - residual tolerance for stopping criterion. Default is
        #                         feps ** 0.5 where feps is smallest non-zero floating-point
        #                         number of the given input tensor A data type.
        # niter (int, optional) - maximum number of iterations. When reached, the iteration
        #                         process is hard-stopped and the current approximation of eigenpairs
        #                         is returned. For infinite iteration but until convergence criteria is met,
        #                         use -1.
        # tracker (callable, optional) - a function for tracing the iteration process. When specified,
        #                                it is called at each iteration step with LOBPCG instance as an argument.
        #                                The LOBPCG instance holds the full state of the iteration process in
        #                                the following attributes:
        #     iparams, fparams, bparams - dictionaries of integer, float, and boolean valued input parameters,
        #                                 respectively
        #     ivars, fvars, bvars, tvars - dictionaries of integer, float, boolean, and Tensor valued
        #                                  iteration variables, respectively.
        #     A, B, iK - input Tensor arguments.
        #     E, X, S, R - iteration Tensor variables.
        # ortho_fparams, ortho_bparams (ortho_iparams,) - various parameters to LOBPCG algorithm when
        #                                                 using (default) method=”ortho”.
        D, U= torch.lobpcg(M, k=k, \
            X=None, n=None, iK=None, largest=True, tol=tol, niter=None, \
            tracker=None, ortho_iparams=None, ortho_fparams=None, ortho_bparams=None)

        self.save_for_backward(D, U)
        return D, U
コード例 #5
0
 def forward(ctx, A):
     #n = int(len(S) ** 0.5)
     #A = S.reshape(n, n)
     print('A=', A)
     n = len(A)
     k = n
     if 1:
         e, v = T.symeig(-A, eigenvectors=True)
         e = -e[:k]
         v = v[:, :k]
     else:
         e, v = T.lobpcg(A, k=k)
     r = T.cat((T.flatten(v), e), 0)
     ctx.save_for_backward(e, v, A)
     print('r=', r)
     return e, v
コード例 #6
0
ファイル: benchmark.py プロジェクト: rfeinman/Torch-ARPACK
def eigen_solve(A, mode):
    """solve for eigenpairs using a specified method"""
    if mode == 'arpack_eigsh':
        return arpack.eigsh(A, largest=True, tol=1e-4)
    elif mode == 'arpack_eigsh_mkl':
        return arpack.eigsh_mkl(A, largest=True, tol_dps=4)
    elif mode == 'torch_eigh':
        return torch.linalg.eigh(A)
    elif mode == 'torch_lobpcg':
        return torch.lobpcg(A, k=1, largest=True, tol=1e-4)
    elif mode == 'scipy_eigsh':
        # For some reason scipy's eigsh requires slightly smaller tolerance
        # (1e-5 vs 1e-4) to reach equiavelent accuracy
        return splinalg.eigsh(A.numpy(), k=1, which="LA", tol=1e-5)
    elif mode == 'scipy_lobpcg':
        X = A.new_empty(A.size(0), 1).normal_()
        return splinalg.lobpcg(A.numpy(), X.numpy(), largest=True, tol=1e-4)
    else:
        raise ValueError
コード例 #7
0
def evaluate_with_lambda(net: torch.nn.Module,
                         dataloader: torch.utils.data.DataLoader, criterion):
    net.eval()
    avg_loss = AverageMeter()
    avg_acc = AverageMeter()
    avg_acc5 = AverageMeter()
    device = next(net.parameters()).device
    num_data = 0
    total_loss = 0
    K_mat = torch.zeros(num_parameters(net),
                        num_parameters(net),
                        device=device)
    for data, label in dataloader:
        data, label = data.to(device), label.to(device)
        output = net(data)
        loss = criterion(output, label)
        batch_size = data.size(0)
        num_data += batch_size
        total_loss = (total_loss + loss * batch_size) / num_data
    total_loss.backward()
    g = extract_grad_vec(net)
    for data, label in dataloader:
        net.zero_grad()
        data, label = data.to(device), label.to(device)
        output = net(data)
        loss = criterion(output, label)
        loss.backward()
        g_i = extract_grad_vec(net)
        d = (g_i - g).to(device)
        K_mat.addr_(d, d)
        # K_mat += d.ger(d) / len(dataloader)

        prec1, prec5 = accuracy(output.data, label.data, topk=(1, 5))
        avg_loss.update(loss.item(), data.size(0))
        avg_acc.update(prec1.item())
        avg_acc5.update(prec5.item())
    s, v = torch.lobpcg(K_mat, k=1)
    return avg_loss.avg, avg_acc.avg, avg_acc5.avg, s.item()
コード例 #8
0
ファイル: moments.py プロジェクト: rish-raghu/otdd
def compute_label_stats(data,
                        targets=None,
                        indices=None,
                        classnames=None,
                        online=True,
                        batch_size=100,
                        to_tensor=True,
                        eigen_correction=False,
                        eigen_correction_scale=1.0,
                        nworkers=0,
                        diagonal_cov=False,
                        embedding=None,
                        device=None,
                        dtype=torch.FloatTensor):
    """
    Computes mean/covariance of examples grouped by label. Data can be passed as
    a pytorch dataset or a dataloader. Uses dataloader to avoid loading all
    classes at once.

    Arguments:
        data (pytorch Dataset or Dataloader): data to compute stats on
        targets (Tensor, optional): If provided, will use this target array to
            avoid re-extracting targets.
        indices (array-like, optional): If provided, filtering is based on these
            indices (useful if e.g. dataloader has subsampler)
        eigen_correction (bool, optional):  If ``True``, will shift the covariance
            matrix's diagonal by :attr:`eigen_correction_scale` to ensure PSD'ness.
        eigen_correction_scale (numeric, optional): Magnitude of eigenvalue
            correction (used only if :attr:`eigen_correction` is True)

    Returns:
        M (dict): Dictionary with sample means (Tensors) indexed by target class
        S (dict): Dictionary with sample covariances (Tensors) indexed by target class
    """

    device = process_device_arg(device)
    M = {}  # Means
    S = {}  # Covariances

    ## We need to get all targets in advance, in order to filter.
    ## Here we assume targets is the full dataset targets (ignoring subsets, etc)
    ## so we need to find effective targets.
    if targets is None:
        targets, classnames, indices = extract_data_targets(data)
    else:
        assert (indices
                is not None), "If targets are provided, so must be indices"
    if classnames is None:
        classnames = sorted([a.item() for a in torch.unique(targets)])

    effective_targets = targets[indices]

    if nworkers > 1:
        import torch.multiprocessing as mp  # Ugly, sure. But useful.
        mp.set_start_method('spawn', force=True)
        M = mp.Manager().dict()  # Alternatively, M = {}; M.share_memory
        S = mp.Manager().dict()
        processes = []
        for i, c in enumerate(classnames):  # No. of processes
            label_indices = indices[effective_targets == i]
            p = mp.Process(target=_single_label_stats,
                           args=(data, i, c, label_indices, M, S),
                           kwargs={
                               'device': device,
                               'online': online
                           })
            p.start()
            processes.append(p)
        for p in processes:
            p.join()
    else:
        for i, c in enumerate(classnames):
            label_indices = indices[effective_targets == i]
            μ, Σ, n = _single_label_stats(data,
                                          i,
                                          c,
                                          label_indices,
                                          device=device,
                                          dtype=dtype,
                                          embedding=embedding,
                                          online=online,
                                          diagonal_cov=diagonal_cov)
            M[i], S[i] = μ, Σ

    if to_tensor:
        ## Warning: this assumes classes are *exactly* {0,...,n}, might break things
        ## downstream if data is missing some classes
        M = torch.stack(
            [μ.to(device) for i, μ in sorted(M.items()) if μ is not None],
            dim=0)
        S = torch.stack(
            [Σ.to(device) for i, Σ in sorted(S.items()) if Σ is not None],
            dim=0)

    ### Shift the Covariance matrix's diagonal to ensure PSD'ness
    if eigen_correction:
        logger.warning('Applying eigenvalue correction to Covariance Matrix')
        λ = eigen_correction_scale
        for i in range(S.shape[0]):
            if eigen_correction == 'constant':
                S[i] += torch.diag(λ * torch.ones(S.shape[1], device=device))
            elif eigen_correction == 'jitter':
                S[i] += torch.diag(
                    λ *
                    torch.ones(S.shape[1], device=device).uniform_(0.99, 1.01))
            elif eigen_correction == 'exact':
                s, v = torch.symeig(S[i])
                print(s.min())
                s, v = torch.lobpcg(S[i], largest=False)
                print(s.min())
                s = torch.eig(S[i], eigenvectors=False).eigenvalues
                print(s.min())
                pdb.set_trace()
                s_min = s.min()
                if s_min <= 1e-10:
                    S[i] += torch.diag(λ * torch.abs(s_min) *
                                       torch.ones(S.shape[1], device=device))
                raise NotImplemented()
    return M, S
コード例 #9
0
def r_kernel(samples1,samples2,score_p_func,threshold=30,num_selection=None,**kwargs):
    # for kernel_smooth active slice
    samples1 = samples1.clone().detach()
    samples1.requires_grad_()
    if 'lobpcg' in kwargs:
        flag_lobpcg=kwargs['lobpcg']
    else:
        flag_lobpcg=False

    if 'fix_sample' in kwargs:
        flag_fix_sample = kwargs['fix_sample']
    else:
        flag_fix_sample = False

    if 'kernel' in kwargs:
        kernel = kwargs['kernel']
    else:
        kernel = SE_kernel_multi

    if flag_fix_sample:
        # sample1 =sample 2
        samples2=samples1.clone().detach()
        samples2.requires_grad_()
    else:
        samples2=samples2.clone().detach()
        samples2.requires_grad_()

    score_p=score_p_func(samples1) # N
    score_p=torch.autograd.grad(score_p.sum(),samples1)[0] # N x D

    with torch.no_grad():
        score_p = score_p.reshape((score_p.shape[0], 1, score_p.shape[1]))  # N x 1 x D
        # median distance
        median_dist = median_heruistic(samples1[0:100, :], samples1[0:100, :].clone())
        bandwidth = 2 * torch.pow(0.5 * median_dist, 0.5)
        kernel_hyper_KSD = {
            'bandwidth': 2*bandwidth
        }

        K = kernel(torch.unsqueeze(samples1, dim=1), torch.unsqueeze(samples2, dim=0)
                   , kernel_hyper=kernel_hyper_KSD)  # N x N

        Term1 = (torch.unsqueeze(K, dim=-1) * score_p)  # N x N x D

        Term2 = repulsive_SE_kernel_multi(samples1, samples2, kernel_hyper=kernel_hyper_KSD,
                                 K=torch.unsqueeze(K, dim=-1))  # N x N xD

        force = torch.mean(Term1 + 1* Term2, dim=0)  # N x D
        score_diff_1 = force.unsqueeze(-1)  # 1000 x D x 1
        score_diff_2 = force.unsqueeze(-2)  # 1000 x 1 x D

        score_mat_kernel = torch.matmul(score_diff_1, score_diff_2)
        mean_score_mat = score_mat_kernel.mean(0)  # D x D

        if flag_lobpcg==True and num_selection is not None and (num_selection*3)<mean_score_mat.shape[0]:
            # approximate eigenvectors
            eig_value_appro, eig_vec_appro = torch.lobpcg(mean_score_mat, k=num_selection, niter=10)
            r=eig_vec_appro.t()
        else:
            eig_value, eig_vec = torch.symeig(mean_score_mat, eigenvectors=True)

            # Selection top large eigenvectors
            r_th = threshold
            r_candidate = eig_vec.t()
            r_candidate = r_candidate.flip(0)
            eig_value_cand = eig_value.flip(0)
            max_eig_value = eig_value_cand[0]
            if num_selection is None:
                for d in range(samples1.shape[-1]):
                    eig_cand = eig_value_cand[d]
                    if torch.abs(max_eig_value / (eig_cand + 1e-10)) < r_th:
                        r_comp = r_candidate[d, :].unsqueeze(0)  # 1x D
                        if d == 0:
                            r = r_comp
                        else:
                            r = torch.cat((r, r_comp), dim=0)  # n x D
            else:
                for d in range(num_selection):
                    r_comp = r_candidate[d, :].unsqueeze(0)  # 1x D
                    if d == 0:
                        r = r_comp
                    else:
                        r = torch.cat((r, r_comp), dim=0)  # n x D
    return r
コード例 #10
0
def Poincare_g_kernel_SVD(samples1,samples2,score_p_func,**kwargs):
    # active slice with kernel smooth estimated score_q
    samples1 = samples1.clone().detach()
    samples1.requires_grad_()
    if 'lobpcg' in kwargs:
        # enable fast approximation for eigenvalue decomposition
        flag_lobpcg=kwargs['lobpcg']
    else:
        flag_lobpcg=False


    if 'fix_sample' in kwargs:
        flag_fix_sample=kwargs['fix_sample']
    else:
        flag_fix_sample=False

    if 'kernel' in kwargs:
        kernel=kwargs['kernel']
    else:
        kernel=SE_kernel_multi

    if 'r' in kwargs:
        r=kwargs['r']
    else:
        r=torch.eye(samples1.shape[-1]) # default value for r (eye initialization)

    if flag_fix_sample: # samples1=samples2

        samples2=samples1.clone().detach()
        samples2.requires_grad_()
    else:
        samples2=samples2.clone().detach()
        samples2.requires_grad_()

    num_r=r.shape[0]
    median_dist = median_heruistic(samples1[0:100, :], samples1[0:100, :].clone())
    bandwidth = 2 * torch.pow(0.5 * median_dist, 0.5)
    kernel_hyper_KSD = {
        'bandwidth': 1*bandwidth
    }
    score_p=score_p_func(samples1)# N
    score_p=torch.autograd.grad(score_p.sum(),samples1,create_graph=True,retain_graph=True)[0]# N x D


    # compute kernel matrix
    K = kernel(torch.unsqueeze(samples1, dim=1), torch.unsqueeze(samples2, dim=0),kernel_hyper=kernel_hyper_KSD) # N x N
    K_exp=K.unsqueeze(-1)
    score_p_exp = score_p.unsqueeze(-2)  # N x 1 x D
    samples1_exp = samples1.unsqueeze(-2)  # N x 1 x D
    samples2_exp = samples2.unsqueeze(-3)  # 1 x N2 x D

    for d in range(num_r):
        r_candidate = r[d, :].unsqueeze(0).unsqueeze(0)  # 1 x 1 x D


        Term1=torch.einsum('ijk,pqr,ilr->q',K_exp,r_candidate,score_p_exp)
        # only for RBF kernel. We explicit derive its derivative of this kernel,
        Term2_c1=-2./(bandwidth**2+1e-9)*torch.einsum('ijk,imr,pqr->m',K_exp,samples1_exp,r_candidate)
        Term2_c2=-2./(bandwidth**2+1e-9)*torch.einsum('ijk,mjr,pqr->m',K_exp,samples2_exp,r_candidate)
        Term2=Term2_c1-Term2_c2
        force = (Term1 + 1*Term2)  # 1
        grad_force = torch.autograd.grad(force.sum(), samples2, retain_graph=True)[0]  # sam2 x D
        H = grad_force.unsqueeze(-1) * grad_force.unsqueeze(-2)  # sam2 x D x D
        H = H.mean(0)  # D x D


        if flag_lobpcg:
            _, eig_vec_appro = torch.lobpcg(H.clone().detach(), k=1, niter=30) # approximated eigen decomposition
            if d == 0:
                g_comp = eig_vec_appro.t()  # 1 x D
                g = g_comp
            else:
                g_comp = eig_vec_appro.t()  # 1 x D
                g = torch.cat((g, g_comp), dim=0)  # r x D
        else:
            eig_value, eig_vec = torch.symeig(H.clone().detach(), eigenvectors=True) # eigenvalue decomposition
            vec_candidate = eig_vec.t().flip(0)
            if d == 0:
                g_comp = vec_candidate[0, :].unsqueeze(0)  # 1 x D
                g = g_comp
            else:
                g_comp = vec_candidate[0, :].unsqueeze(0)  # 1 x D
                g = torch.cat((g, g_comp), dim=0)  # r x D

    return g
コード例 #11
0
def wrapper_for_recNystrom(similarity_matrix,
                           K,
                           num_imp_samples,
                           runs=1,
                           mode="normal",
                           normalize="rows",
                           expand=False):
    eps = 1e-3
    mult = 1.5
    error_list = []
    abs_error_list = []
    n = len(similarity_matrix)
    list_of_available_indices = list(range(len(similarity_matrix)))
    avg_min_eig = 0

    for r in range(runs):
        if mode == "eigI":
            if expand:
                new_num = int(np.sqrt(num_imp_samples * n))
                sample_indices_bar = np.sort(
                    random.sample(list_of_available_indices, new_num))
                min_eig_A = torch.lobpcg(similarity_matrix[np.ix_(sample_indices_bar, sample_indices_bar)].expand(1, -1, -1), \
                    k=1, largest=False, method="ortho")[0]
                min_eig_A = min(0, min_eig_A) - eps
            else:
                pass
        pass

        if mode == "eigI":
            pass

        C, W, error, minEig = recursiveNystrom(similarity_matrix, num_imp_samples, \
            correction=True, minEig=mult*min_eig_A, expand_eigs=True, eps=eps)

        # rank_l_K = (C @ W) @ C.T
        rank_l_K = torch.matmul(torch.matmul(C, W), torch.transpose(C, 0, 1))

        # if np.iscomplexobj(rank_l_K):
        #     rank_l_K = np.absolute(rank_l_K)

        # if normalize == "rows":
        #     rank_l_K = utils.row_norm_matrix(rank_l_K)
        #     similarity_matrix = utils.row_norm_matrix(similarity_matrix)
        #     pass
        # if normalize == "laplacian":
        #     rank_l_K = utils.laplacian_norm_matrix(similarity_matrix, rank_l_K)
        #     pass
        if normalize == "original":
            pass

        abs_error = torch.norm(K - rank_l_K) / torch.norm(K)
        error_list.append(error.detach().cpu().numpy())
        abs_error_list.append(abs_error.detach().cpu().numpy())
        avg_min_eig += min_eig_A
        if r < runs - 1:
            del rank_l_K
        pass

    avg_min_eig = avg_min_eig / len(error_list)
    avg_error = np.sum(np.array(error_list)) / len(error_list)
    avg_abs_error = np.sum(np.array(abs_error_list)) / len(abs_error_list)

    return avg_error, avg_abs_error, avg_min_eig, rank_l_K


# K = np.random.random((1000,600))
# K = torch.from_numpy(K).to(0)
# gamma = 40
# K = torch.cdist(K, K)
# K = torch.exp(-gamma*K)
# # print(K)
# s = 100
# #"""
# avg_error, avg_abs_error, avg_min_eig, rank_l_K = \
#     wrapper_for_recNystrom(K, K, s, runs=10, mode="eigI", normalize="original", expand=True)

# print(avg_error, avg_abs_error, avg_min_eig)
#"""
コード例 #12
0
ファイル: utils.py プロジェクト: skepsun/SAGN_with_SLE
def compute_spectral_emb(adj, K):
    A = to_scipy(adj.to("cpu"))
    L = from_scipy(sp.csgraph.laplacian(A, normed=True))
    _, spectral_emb = torch.lobpcg(L, K)
    return spectral_emb.to(adj.device)
コード例 #13
0
def main(argv):
    start_time = time.time()

    num_cores = multiprocessing.cpu_count()

    step = 50
    norm_type = "original"
    expand_eigs = True
    mode = "eigI"
    runs_ = 3
    code_mode = "CPU"
    from utils import read_file, read_mat_file

    """
    20ng2_new_K_set1.mat  oshumed_K_set1.mat  recipe_K_set1.mat  recipe_trainData.mat  twitter_K_set1.mat  twitter_set1.mat
    """
    # approximation_type = "leverage_"
    approximation_type = "uniform_"

    if approximation_type == "leverage_":
        from recursiveNystromGPU import wrapper_for_recNystrom as simple_nystrom
        pass
    if approximation_type == "uniform_":
        from Nystrom import simple_nystrom
        pass

    filename = "stsb"
    #similarity_matrix = read_file(pred_id_count=id_count, file_=filename+".npy")
    print("Reading file ...")
    # similarity_matrix = read_mat_file(file_="WordMoversEmbeddings/mat_files/20ng2_new_K_set1.mat", version="v7.3")
    similarity_matrix = read_file("../GYPSUM/"+filename+"_predicts_0.npy")

    print("File read. Beginning preprocessing ...")
    #number_of_runs = id_count / step
    error_list = []
    abs_error_list = []
    avg_min_eig_vec = []

    # check for similar rows or columns
    unique_rows, indices = np.unique(similarity_matrix, axis=0, return_index=True)
    similarity_matrix_O = similarity_matrix[indices][:, indices]
    similarity_matrix = (similarity_matrix_O + similarity_matrix_O.T) / 2.0
    if filename == "rte":
        similarity_matrix = 1-similarity_matrix

    if code_mode == "GPU":
        import torch
        device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        similarity_matrix = torch.from_numpy(similarity_matrix).to(device)
    print("Preprocessing done.")
    # print()

    similarity_matrix_O = deepcopy(similarity_matrix)
    # similarity_matrix_O = similarity_matrix

    # """
    def top_level_function(id_count):
        error, abs_error, avg_min_eig, rank_l_k = simple_nystrom(\
                    similarity_matrix, similarity_matrix_O, id_count, runs=runs_, \
                    mode='eigI', normalize=norm_type, expand=expand_eigs)
        # processed_list = [error, abs_error, avg_min_eig, rank_l_k]
        print(error, abs_error)
        return error, abs_error, avg_min_eig

    id_count = 500 #len(similarity_matrix) #1000
    inputs = tqdm(range(10, id_count, 10))


    # for k in tqdm(range(10, id_count, 10)):
    #     error, abs_error, avg_min_eig, _ = simple_nystrom(similarity_matrix, similarity_matrix_O, k, runs=runs_, mode='eigI', normalize=norm_type, expand=expand_eigs)
    #     error_list.append(error)
    #     abs_error_list.append(abs_error)
    #     avg_min_eig_vec.append(avg_min_eig)
    #     del _
    #     pass

    print("Beginning approximation parallely ...")
    e = Parallel(n_jobs=num_cores, backend="threading")(map(delayed(top_level_function), inputs))
    print("Approximation done. Beginning write out to files.")

    for i in range(len(e)):
        tuple_out = e[i]
        error_list.append(tuple_out[0])
        abs_error_list.append(tuple_out[1])
        avg_min_eig_vec.append(tuple_out[2])    

    # print(len(error_list), len(abs_error_list), len(avg_min_eig_vec))
    error_list = np.array(error_list)
    abs_error_list = np.array(abs_error_list)

    print("check for difference: ", np.linalg.norm(error_list-abs_error_list))
    # min_eig = np.real(np.min(np.linalg.eigvals(similarity_matrix)))
    if mode == "GPU":
        min_eig = torch.lobpcg(similarity_matrix.expand(1, -1, -1), k=1, largest=False, method="ortho")[0].cpu().numpy()
    else:
        min_eig = np.real(np.min(np.linalg.eigvals(similarity_matrix)))
        min_eig = round(min_eig, 2)

    if mode == "GPU":
        avg_min_eig_vec = [x.cpu().numpy() for x in avg_min_eig_vec]
    else:
        pass

    # display
    # sns.set()
    # flatui = ["#9b59b6", "#3498db", "#95a5a6", "#e74c3c", "#34495e", "#2ecc71"]
    # cmap = ListedColormap(sns.color_palette(flatui).as_hex())

    x_axis = list(range(10, id_count, 10))
    # fig, ax = plt.subplots(figsize=(15, 8))
    # for label in (ax.get_xticklabels() + ax.get_yticklabels()):
    #   label.set_fontsize(16)

    plt.rc('axes', titlesize=13)
    plt.rc('axes', labelsize=13)
    plt.rc('xtick', labelsize=13)
    plt.rc('ytick', labelsize=13)
    plt.rc('legend', fontsize=11)

    STYLE_MAP = {"symmetrized error": {"color": "#4d9221",  "marker": "s", "markersize": 7, 'label': 'avg error wrt similarity matrix', 'linewidth': 1},
                 "true error": {"color": "#7B3294",  "marker": ".", "markersize": 7, 'label': 'avg error wrt true matrix', 'linewidth': 1},
                 "min eig": {"color": "#f1a340", "marker": ".", "markersize": 7, 'label': "True minimum eigenvalue = "+str(min_eig), 'linewidth': 1},
                }

    def plot_me():
        plt.gcf().clear()
        scale_ = 0.55
        new_size = (scale_ * 10, scale_ * 8.5)
        plt.gcf().set_size_inches(new_size)
        sim_error_pairs = [(x, y) for x, y in zip(x_axis, error_list)]
        true_error_pairs = [(x, y) for x, y in zip(x_axis, abs_error_list)]
        arr1 = np.array(sim_error_pairs)
        arr2 = np.array(true_error_pairs)
        print(arr1.shape, arr2.shape)
        plt.plot(arr1[:, 0], arr1[:, 1], **STYLE_MAP['symmetrized error'])
        plt.plot(arr2[:, 0], arr2[:, 1], **STYLE_MAP['true error'])
        plt.locator_params(axis='x', nbins=6)
        plt.xlabel("Number of landmark samples")
        plt.ylabel("Approximation error")
        plt.title("Plot of average errors using Nystrom on "+filename+" BERT", fontsize=13)
        plt.tight_layout()
        plt.legend(loc='upper right')
        plt.savefig("./test1.pdf")
        # plt.savefig("figures/final_"+approximation_type+"_nystrom_errors_"+filename+".pdf")
        # plt.close()

        
        # plt.gcf().clear()

        # scale_ = 0.55
        # new_size = (scale_ * 10, scale_ * 8.5)
        # plt.gcf().set_size_inches(new_size)
        # eigval_estimate_pairs = [(x, y) for x, y in zip(x_axis, list(np.squeeze(avg_min_eig_vec)))]
        # arr1 = np.array(eigval_estimate_pairs)
        # plt.plot(arr1[:, 0], arr1[:, 1], **STYLE_MAP['min eig'])
        # plt.locator_params(axis='x', nbins=6)
        # plt.xlabel("Number of landmark samples")
        # plt.ylabel("Minimum eigenvalue estimate")
        # plt.tight_layout(rect=[0, 0.03, 1, 0.95])
        # plt.legend(loc='upper right')
        # plt.title("Plot of minimum eigenvalue estimate", fontsize=13)
        # plt.savefig("./test2.pdf")
        # # plt.savefig("figures/final_"+approximation_type+filename+"_min_eigenvalue_estimate.pdf")
        # plt.close()        

    # plt.plot(x, error_list, label="average errors")
    # plt.plot(x, abs_error_list, label="average errors wrt original similarity_matrix")
    # plt.xlabel("number of reduced samples", fontsize=20)
    # plt.ylabel("error score", fontsize=20)
    # plt.legend(loc="upper right", fontsize=20)
    # plt.title("plot of average errors using Nystrom on "+filename+" BERT", fontsize=20)
    # if mode == "eigI":
    #     plt.savefig("figures/"+approximation_type+"nystrom_errors_new_"+mode+"_"+norm_type+"_"+str(int(expand_eigs))+"_"+filename+".pdf")
    # else:
    #     plt.savefig("figures/"+approximation_type+"nystrom_errors_new_"+mode+"_"+norm_type+"_"+filename+".pdf")
    # plt.clf()

    # plt.plot(x, np.squeeze(avg_min_eig_vec), label="average min eigenvalues")
    # plt.xlabel("number of reduced samples", fontsize=20)
    # plt.ylabel("minimum eigenvalues", fontsize=20)
    # plt.legend(loc="upper right", fontsize=20)
    # plt.title("plot of average eigenvalues for original values: "+str(min_eig), fontsize=20) 
    # plt.savefig("figures/"+approximation_type+filename+"_min_eigenvalue_estimate.pdf")
    # #"""
    plot_me()
    end_time = time.time()
    print("total time for execution:", end_time-start_time)