Beispiel #1
0
def merw_inverse_p_distance(A, alfa=0.7):
    ev, v = power_iteration.power_iteration(A)
    D_v = np.diag(v)
    I = np.identity(len(v))
    matr = alfa * A / ev
    P_d = np.dot(np.dot(matr, np.linalg.inv(D_v)), np.dot(np.linalg.inv(I - matr), D_v))
    return P_d
Beispiel #2
0
def run_method(method, *argv):
    """
        Pure syntax sugar.
        method: string 
    """
    # Pure syntax sugar.
    from power_iteration import get_hessian_trace, power_iteration, get_trace_family, power_iteration_eigenvecs

    if method == 'trace':
        get_hessian_trace(*argv)
    elif method == 'tracefamily':
        get_trace_family(*argv)
    elif method == 'poweriter':
        power_iteration(*argv)
    elif method == 'poweriter-eigvecs':
        power_iteration_eigenvecs(*argv)
    else:
        raise NotImplementedError("Only support trace and poweriteration.")
def kernel_gram(method, X, sigma=1, k=3, d=1, e=11, beta=0.1, Nystrom=False, subA=4, A=None, neighbours=None):
    """
    computes Gram Matrix
    with Nystrom approximation if True
    sigma : float rbf
    k : to k-spectrum
    d,e,beta: LA
    A, subA, neighbours: (mismatch kernel) substring alphabet , substring length and neighbours 
    """
    n=X.shape[0]
    
    if Nystrom == False: 
        K=np.zeros((n,n))
        for i in tqdm(range(n)): 
            K[i,i] = kernel(method, X[i,] ,X[i,], sigma, k, d, e, beta, subA, A, neighbours)
            for j in range(i):
                K[i,j] = K[j,i] = kernel(method, X[i,] ,X[j,], sigma, k, d, e, beta, subA, A, neighbours)
    else:
        #randomly generating m indices
        m = 100 
        np.random.seed(42)
        ind=np.random.choice(n,size=m, replace=False) #without replacement
        Xm=X[ind,]
        #defining Kmm
        Kmm=np.zeros((m,m))
        for i in range(m):
            Kmm[i,i] = kernel(method, Xm[i,] ,Xm[i,], sigma, k, d, e, beta, subA, A, neighbours)
            for j in range(i):
                Kmm[i,j] = Kmm[j,i] = kernel(method, Xm[i,] ,Xm[j,], sigma, k, d, e, beta, subA, A, neighbours)

        #defining Knm and Kmn 
        Knm=np.zeros((n,m))
        Kmn=np.zeros((m,n))
        for i in range(n):
            for j in range(m):
                Knm[i,j] = Kmn[j,i] = kernel(method, X[i,], Xm[j,], sigma, k, d, e, beta, subA, A, neighbours)
        #defining K~
        K = np.dot(Knm, np.dot(la.inv(Kmm),Kmn))

    if method == "LA":
        eig = -power_iteration(-K)[0]
        if eig > 0:
            min_neg_eig = 0
        else:
            min_neg_eig = eig
        K = K-min_neg_eig*np.eye(n) #LA-eig
     
    #normalization
    K_norm = np.zeros((n,n))
    for i in range (n):
        K_norm[i,i] = 1
        for j in range(i):
            K_norm[i,j] = K_norm[j,i] = K[i,j] / ( sqrt(K[i,i]) * sqrt(K[j,j]) )
    
    return(K_norm)
Beispiel #4
0
def merw_simrank(A, iterations=6):
    neighbours_indices = power_iteration.compute_neighbours(A)
    scores = np.identity(np.shape(A)[0])
    eigenvalue, eigenvector = power_iteration.power_iteration(A)
    consts = compute_merw_consts(eigenvalue, eigenvector, len(A))
    for i in range(iterations):
        old_scores = scores.copy()
        for j in range(len(A)):
            for k in range(len(A[j])):
                if j != k:
                    const = consts[j][k]
                    tmp_score = 0
                    for k1 in neighbours_indices[j]:
                        for j1 in neighbours_indices[k]:
                            tmp_score += old_scores[j1][k1] / eigenvector[
                                j1] * eigenvector[k1]
                    scores[j][k] = const * tmp_score
    return scores
Beispiel #5
0
def sym_norm_me_graph_laplacian(A):
    ev, v = power_iteration.power_iteration(A)
    I = np.identity(len(A))
    return I - A / ev
Beispiel #6
0
def me_combinatorial_graph_laplacian(A):
    ev, v = power_iteration.power_iteration(A)
    D_v = np.diag(v)
    return np.power(D_v, 2) - np.dot(D_v, np.dot(A, D_v)) / ev