Exemple #1
0
def regularized_eigenvectors(A, d, K, alpha):

    import numpy as np
    import math as math
    from scipy.sparse.linalg.eigen.arpack import eigsh as largest_eigsh

    d = np.array(d)
    n = len(A[:, 1])
    # Compute the degrees and other related metrics
    dalpha = np.power(d, -alpha)

    d1alpha = np.power(d, (alpha - 1))
    invDalpha = np.diag(dalpha[:, 0])
    invD1alpha = np.diag(d1alpha[:, 0])

    # Compute the affinity matrix L_alpha
    B = (A - d.dot(np.transpose(d)) / (np.transpose(d).dot((np.ones(
        (n, 1)))))) / math.sqrt(n)
    L_alpha = (invDalpha.dot(B)).dot(invDalpha)

    # Compute the dominant eigenvectors of L_alpha
    evals_large_sparse, evecs_large_sparse = largest_eigsh(L_alpha,
                                                           K - 1,
                                                           which='LM')

    #Normalize the dominant eigenvectors
    normalized_evecs = invD1alpha.dot(evecs_large_sparse)

    return normalized_evecs
Exemple #2
0
def sample_dual_dpp(L, q, k=None):
    '''
    Wrapper function for the sample_dual_dpp Matlab code written by Alex Kulesza
    Given a kernel matrix L, returns a sample from a k-DPP.
    
    L is the kernel matrix
    q is the number of used eigenvalues
    k is the number of elements in the sample from the DPP
    '''
    # Matlab link
    global mtb
    if mtb == None:
        import matlab_wrapper
        mtb = matlab_wrapper.MatlabSession()

    # Extract the feature matrix from the kernel
    evals, evecs = largest_eigsh(L, q, which='LM')
    B = np.dot(evecs, np.diag(evals))

    # load values in Matlab and get sample
    mtb.put('B', B)

    if k != None:
        k = np.array([[k]])  # matlab only undernstand matrices
        mtb.put('k', k)
        mtb.eval("dpp_sample = sample_dual_dpp(B,decompose_kernel(B'*B),k)")
    else:
        mtb.eval("dpp_sample = sample_dual_dpp(B,decompose_kernel(B'*B))")

    dpp_sample = mtb.get('dpp_sample')
    return dpp_sample.astype(int)
Exemple #3
0
def sample_dual_dpp(L, q, k=None):
    '''
    Wrapper function for the sample_dual_dpp Matlab code written by Alex Kulesza
    Given a kernel matrix L, returns a sample from a k-DPP.
    
    L is the kernel matrix
    q is the number of used eigenvalues
    k is the number of elements in the sample from the DPP
    '''
    # Matlab link
    global mtb
    if mtb == None:
        mtb = initialize_mtb()

    # Extract the feature matrix from the kernel
    evals, evecs = largest_eigsh(L, q, which='LM')
    B = np.dot(evecs, np.diag(evals))

    # load values in Matlab and get sample
    mtb.put('B', B)
    print("sampling {} items from a dual DPP of size {}...".format(k, len(L))),
    start_time = time.time()
    sys.stdout.flush()
    if k != None:
        k = np.array([[k]])  # matlab only undernstand matrices
        mtb.put('k', k)
        mtb.eval("dpp_sample = sample_dual_dpp(B,decompose_kernel(B'*B),k)")
    else:
        mtb.eval("dpp_sample = sample_dual_dpp(B,decompose_kernel(B'*B))")

    dpp_sample = mtb.get('dpp_sample')
    print("done! took {} seconds".format(round(time.time() - start_time, 2)))
    sys.stdout.flush()
    return dpp_sample.astype(int)
Exemple #4
0
def subgrad(z, mu, k, gamma_t):
    nx = len(z)
    uns_index = [i for i in range(nx) if mu[i] < T[i]]
    val = 0.0
    for i in uns_index:
        val = val + (1 - mu[i] / T[i]) * S[i]
    a, b = largest_eigsh(val, 1)  # compute the largest eigenvalue

    val = 0.0
    subg = [0.0] * nx
    for i in range(nx):
        subg[i] = math.pow(float(b.T * V[i].T), 2.0)
    for i in range(nx):
        subg[i] = z[i] - subg[i] / float(T[i])  #supgradient at mu

    musol = np.array(mu) - gamma_t * np.array(subg)
    # compute projection and find mu_{t+1}
    musol[musol < 0] = 0
    for i in range(nx):
        if musol[i] > T[i]:
            musol[i] = T[i]

    zsol = np.array(z) + gamma_t * np.array(mu)
    zsol = proj(zsol, nx, k)  # compute projection and find z_{t+1}

    lb = a[0] + np.dot(mu, z)  # lower bound

    zub = [0] * nx
    index = np.argsort(-np.array(mu))
    for i in range(k):
        ind = index[i]
        zub[ind] = 1
    ub = a[0] + np.dot(mu, zub)  # upper bound

    return lb, ub, a, musol, zsol
def truncation(n, k):
    start = datetime.datetime.now()
    LB = [0] * n
    for i in range(n):
        a = A[i]
        a = abs(a)
        sindex = np.argsort(-a)
        b = [0] * n
        for j in range(k):
            b[sindex[0, j]] = a[0, sindex[0, j]]

        bnorm = np.linalg.norm(b, 2)
        b = b / bnorm

        b = np.matrix(b)

        LB[i] = (b * A * b.T)[0, 0]
    LB1 = max(max(LB), max(np.diag(A)))

    a, b = largest_eigsh(A, 1)
    b = b[:, 0]
    b = abs(b)
    sindex = np.argsort(-b)
    x = [0] * n
    for j in range(k):
        x[sindex[j]] = b[sindex[j]]
    xnorm = np.linalg.norm(x, 2)
    x = x / xnorm
    x = np.matrix(x)
    LB2 = (x * A * x.T)[0, 0]

    end = datetime.datetime.now()
    time = (end - start).seconds

    return time, max(LB1, LB2)
def EDC(w,Ares,k):
    wadj = np.tensordot(w,Ares,axes=1);
    v,u = largest_eigsh(wadj,k+1,which='LM');
    v_abs = abs(v);
    u = np.real(u[:,v_abs.argsort()]);
    v = v[v_abs.argsort()];
    return v,u
def SC(matrix,k):
    evals, evecs = largest_eigsh(matrix,k,which='LM')
    evals_abs = abs(evals)
    evecs = evecs[:,evals_abs.argsort()]
    evals = evals[evals_abs.argsort()]
    model = KMeans(n_clusters=k).fit(np.real(evecs))
    label = model.predict(np.real(evecs))
    return label
Exemple #8
0
 def K(self, X):
     n = len(X)
     H = self.H(X)
     D = self.GeoDesicMatrix(X)
     K = -0.5 * np.matmul(np.matmul(H, D), H)
     M = np.block([[np.zeros([n, n]), 2 * K], [np.eye(n), -4 * K]])
     evals_large_sparse, evec = largest_eigsh(M, 1, which='LM')
     c = evals_large_sparse[0]
     return K + 2 * c * K + 0.5 * c * c * H
Exemple #9
0
def calc_eigenvalues(graph, num_ev=100):
    num_ev = min(100, num_ev)
    print_f("Extracting adjacency matrix!")
    adj_mat = adjacency(graph, weight=None)
    print_f("Starting calculation of {} Eigenvalues".format(num_ev))
    evals_large_sparse, evecs_large_sparse = largest_eigsh(adj_mat, num_ev * 2, which='LM')
    print_f("Finished calculating Eigenvalues")
    weights = sorted([float(x) for x in evals_large_sparse], reverse=True)[:num_ev]
    graph.gp["top_eigenvalues"] = graph.new_graph_property("vector<float>", weights)
    return graph
Exemple #10
0
def calc_eigenvalues(graph, num_ev=100):
    num_ev = min(100, num_ev)
    print_f("Extracting adjacency matrix!")
    adj_mat = adjacency(graph, weight=None)
    print_f("Starting calculation of {} Eigenvalues".format(num_ev))
    evals_large_sparse, evecs_large_sparse = largest_eigsh(adj_mat, num_ev * 2, which='LM')
    print_f("Finished calculating Eigenvalues")
    weights = sorted([float(x) for x in evals_large_sparse], reverse=True)[:num_ev]
    graph.gp["top_eigenvalues"] = graph.new_graph_property("vector<float>", weights)
    return graph
 def calc_eigenvalues(self, num_ev=100):
     num_ev = min(100, num_ev)
     self.debug_msg("Extracting adjacency matrix!")
     A = adjacency(self.graph, weight=None)
     self.debug_msg("Starting calculation of {} Eigenvalues".format(num_ev))
     evals_large_sparse, evecs_large_sparse = largest_eigsh(A,
                                                            num_ev * 2,
                                                            which='LM')
     self.debug_msg("Finished calculating Eigenvalues")
     evs = sorted([float(x) for x in evals_large_sparse],
                  reverse=True)[:num_ev]
     self.graph.graph_properties[
         "top_eigenvalues"] = self.graph.new_graph_property("object", evs)
 def wam(self,w,n_init=10):
     
     evals, evecs = largest_eigsh(np.tensordot(w,self.Alist,axes=1),self.k, which='LM')
     evals_abs = abs(evals)
     evecs = evecs[:,evals_abs.argsort()]
     evals = evals[evals_abs.argsort()]
     
     if self.method =='gmm':
         evecs_mean = np.zeros((self.k,self.k))
         label = GaussianMixture(n_components=self.k,n_init=n_init).fit_predict(np.real(evecs[:,:]))
     else:
         label = KMeans(n_clusters=self.k,n_init=n_init).fit_predict(np.real(evecs[:,:]))
     return label
Exemple #13
0
def compute_egiv(P, al, ga):
    n = int(P.max())
    v = np.ones(n)
    print("Computing the super-spacey random surfer vector")
    x = sf.shift_fix(P, v, al, ga, n)
    xT = x.T
    print("Generating Transition Matrix: P[x]")
    RT = tc.tran_matrix(P, x)
    #A = mymult(output, b, RT, xT)
    # check accuracy of this.
    A = np.ones((n, n))
    print("Solving the eigenvector problem for P[x]")
    eigenvalue, eigenvector = largest_eigsh(A, 2, which='LM')
    return (eigenvector, RT, x)
Exemple #14
0
    def computeLmaxes_Thresh(self, nSteps):
        self.LM = np.zeros(nSteps)
        self.LM_vect = {}
        for n_i in range(nSteps):
            print("computing step " + str(n_i))
            self.setThresholdMask(nSteps=nSteps, step_i=n_i)
            self.computeNewAreaFromThresholdMask(n_i)
            self.computeFitnessFromThreshold(n_i)
            self.computeLandscapeMatrix()

            evals_large_sparse, evecs_large_sparse = largest_eigsh(self.M,
                                                                   1,
                                                                   which='LA')
            self.LM[n_i] = evals_large_sparse
            self.LM_vect[n_i] = evecs_large_sparse
Exemple #15
0
def getmainEig(dim,X):
    print  "Computing means of data set"
    means = mean(X.T, axis=1)
    print "Computing covariance matrix"
    X_ = (X.T- dot(ones((X.shape[0],1)),matrix(means.T)).T)/std(X.T)
    covmat = cov(X_)
    print "Computing eigenvalues"
    eigvalues, eigvectors = largest_eigsh(covmat,k=754)
    #Sort the eigvalues and eigvectors
    indices = eigvalues[::-1].argsort()
    eigvalues = eigvalues[indices]
    eigvectors = eigvectors[:,indices].T
    error = abs(sum(eigvalues[dim:]))/abs(sum(eigvalues))
    print "With ", dim," dimensions , information loss is approximately ", (error)*100, "%"
    display_patches(eigvectors[0:dim])
    return eigvectors, eigvalues
Exemple #16
0
def validcut(z, n):
    nu = 0
    mu = [0] * n

    sel_index = [i for i in range(n) if z[i] == 1]
    uns_index = [i for i in range(n) if z[i] == 0]

    val = 0
    for i in sel_index:
        val = val + S[i]

    a, b = largest_eigsh(val, 1)  # compute the largest eigenvalue
    nu = max(a)
    mu = [0] * n
    for i in uns_index:
        mu[i] = T[i]
    return nu, mu
    def K(self, X):
        self.nbrs_ = NearestNeighbors(self.n_neighbors,
                                      algorithm=self.neighbors_algorithm,
                                      n_jobs=self.n_jobs)

        random_state = check_random_state(self.random_state)
        X = check_array(X, dtype=float)
        self.nbrs_.fit(X)
        n=len(X)
        M= locally_linear_embedding(
                self.nbrs_, self.n_neighbors,
                eigen_solver=self.eigen_solver, 
                max_iter=self.max_iter, 
                random_state=random_state, reg=self.reg, n_jobs=self.n_jobs)
        lambdamax, evec = largest_eigsh(M, 1, which='LM')
        L=lambdamax*np.eye(n)-M
        """ Center L
        m=len(L)
        e=1/np.sqrt(m)*np.ones([m,1])
        S=np.eye(len(L))-np.matmul(e.T,e)
        L=np.matmul(S,L,S)
        """
        return L
 def weight_multi(self,init = False,label=[],n_init=10):
     w = np.empty(self.L)
     
     for i in range(len(self.Alist)):
         
         if init:
             evals, evecs = largest_eigsh(self.Alist[i].astype(float),self.k, which='LM')
             evals_abs = abs(evals)
             evecs = evecs[:,evals_abs.argsort()]
             evals = evals[evals_abs.argsort()]
             
             if self.method == 'gmm':
                 evecs_mean = np.zeros((self.k,self.k))
                 label = GaussianMixture(n_components=self.k,n_init=n_init).fit_predict(np.real(evecs[:,:]))
             else:
                 label = KMeans(n_clusters=self.k,n_init=n_init).fit_predict(np.real(evecs[:,:]))
 
     w[i] = self.weight_single(self.Alist[i],label)
     
     w = np.maximum(w,np.zeros(self.L))
     if np.sum(w)==0:
         w = np.random.randint(low=1,high=10,size=self.L)
     return w/np.sum(w)
# 0 and we can chnage it to many other things like. let sy to value 2 or we can implement for loop
# to iterate the function and get required values. 
N=5000
k=10
X = np.random.random((N,N)) - 2.5
X = np.dot(X, X.T) #create a symmetric matrix

# Benchmark the dense routine
start = clock()
evals_large, evecs_large = largest_eigh(X, eigvals=(N-k,N-1))
elapsed = (clock() - start)
print("eigh elapsed time: ", elapsed)
print(X)
# Benchmark the sparse routine
start = clock()
evals_large_sparse, evecs_large_sparse = largest_eigsh(X, k, which='LM')
elapsed = (clock() - start)
print("eigsh elapsed time: ", elapsed)

#################333

# approach 2 Numpy universial functions
import numpy as np
import time
import sys # library to see the memoray occupied by list and numpy array. 
N = int(input('Please enter the diemention you want (N*N):'))
N = (N)
d = np.random.random((N,N)) -0.5

print(d)
start = time.time()
Exemple #20
0
def select(rule, x, A, b, loss, args, iteration):
    if rule is None:
      return None, args

    """ Adaptive selection rules """
    n_params = x.size
    block_size = args["block_size"]
    it = iteration
    lipschitz = loss.lipschitz

    if "Tree" not in rule:
      assert block_size > 0
    else:
      assert block_size == -1
      
    g_func = loss.g_func

    
    if rule == "all":
       """ select all coordinates """
       block = np.arange(n_params)

    elif rule == "Random":
       """ randomly select a coordinate"""
       all_block = np.random.permutation(n_params)
       block = all_block[:block_size]

       #block = np.unravel_index(block,  (n_features, n_classes))


    elif rule in ["Perm", "Cyclic"]:
      """Select next coordinate"""
      
      if iteration % n_params == 0:
         args["perm_coors"] = np.random.permutation(n_params)

      emod = it % int((n_params/block_size))
      block = args["perm_coors"][emod*block_size: (emod + 1)*block_size]
      
      #block = np.unravel_index(block,  (n_features, n_classes))

    elif rule == "Lipschitz":
      """non-uniform sample based on lipschitz values"""
      L = lipschitz

      block = np.random.choice(x.size, block_size, replace=False,
                               p=L/L.sum())
    
    elif rule in ["GS"]:
      """ select coordinates based on largest gradients"""
      g = g_func(x, A, b, block=None)
      s = np.abs(g)

      block = np.argsort(s, axis=None)[-block_size:]

    elif rule in ["GSDLi", "GSD"]:
      """ select coordinates based on largest individual lipschitz"""
      L = lipschitz
      g = g_func(x, A, b, block=None)

      s = np.abs(g) / np.sqrt(L)
                                     
      block = np.argsort(s, axis=None)[-block_size:]

    elif rule in ["GSDHb"]:
      """ select coordinates based on the uper bound of the hessian"""
      g = g_func(x, A, b, block=None)

      if "GSD_L" not in args:
        Hb = loss.Hb_func(x, A, b, block=None)
        
        args["GSD_L"] = np.sum(np.abs(Hb), 1)

      s = np.abs(g) / np.sqrt(args["GSD_L"])
                                     
      block = np.argsort(s, axis=None)[-block_size:]

    elif rule in ["GSQ-IHT", "IHT"]:
      """ select coordinates based on largest individual lipschitz"""
      L = lipschitz
      if "Hb_IHT" not in args:
        args["Hb_IHT"] = loss.Hb_func(x, A, b, block=None)

        #args["mu_IHT"] = 1. / np.max(np.linalg.eigh(args["Hb_IHT"])[0])
        args["mu_IHT"] = 1. / largest_eigsh(args["Hb_IHT"], 1, which='LM')[0]
      Hb = args["Hb_IHT"]
      mu = args["mu_IHT"]

      G = g_func(x, A, b, block=None)

      d = G / np.sqrt(L)
      d_old = d.copy()

      for i in range(10):

        d = d - mu*(G + Hb.dot(d))
        ind = np.argsort(np.abs(d))
        d[ind[:-block_size]]= 0

        if np.linalg.norm(d_old - d) < 1e-10:

          block = ind[-block_size:]
          break
        #print "norm diff: %.3f" % np.linalg.norm(d_old - d)
        d_old = d.copy()
        block = ind[-block_size:]
      #block = np.where(d != 0)
      return np.array(block), args

    elif rule == "gsq-nn":
      """ select coordinates based on largest individual lipschitz"""
      g = g_func(x, A, b, block=None)
      L = lipschitz
      d = -g / L

      x_new = x + d
      neg = x_new < 0

      pos = (1 - neg).astype(bool)
      
      # SANITY CHECK
      assert x.size == (neg.sum() + pos.sum())

      s = np.zeros(x.size)
      d = -g[pos] / L[pos]
      s[pos] = g[pos] * d + (L[pos]/2.) * d**2

      d = - x[neg]
      s[neg] = g[neg] * d + (L[neg]/2.) * d**2
                        
      block = np.argsort(s, axis=None)[:block_size]
    
    elif rule in ["GSDTree", "GSTree","RTree", "GSLTree"]:

      """ select coordinates that form a forest based on BGS or BGSC """
      g_func = loss.g_func

      
      if "GSDTree" == rule:
        lipschitz = np.sum(np.abs(A), 1)
        score_list = np.abs(g_func(x, A, b, None)) / np.sqrt(lipschitz)
        sorted_indices = np.argsort(score_list)[::-1] 

      elif "GSLTree" == rule:
        lipschitz = lipschitz
        score_list = np.abs(g_func(x, A, b, None)) / np.sqrt(lipschitz)
        sorted_indices = np.argsort(score_list)[::-1]  

      elif "GSTree" == rule:
        score_list = np.abs(g_func(x, A, b, None))    
        sorted_indices = np.argsort(score_list)[::-1]  

      elif "RTree" == rule:
        sorted_indices = np.random.permutation(np.arange(A.shape[0]))

      block = ta.get_tree_slow(sorted_indices, adj=A)
      
      if iteration == 0:
        xr =  np.random.randn(*x.shape)
        xE, _ = ur.update("bpExact", xr.copy(), 
                                 A, b, loss, copy.deepcopy(args), block, iteration=iteration)
        xG, _ = ur.update("bpGabp", xr.copy(), 
                                A, b, loss, copy.deepcopy(args) , block, iteration=iteration)

        np.testing.assert_array_almost_equal(xE, xG, 3)

        print("Exact vs GaBP Test passed...")


    elif rule == "GSExactTree":
      """ select coordinates based on largest individual lipschitz"""

      g = g_func(x, A, b, block=None)

      s = np.abs(g)
      
      block_size = int(loss.n_params**(1./3))
      block = np.argsort(s, axis=None)[-block_size:]

    elif rule == "GSLExactTree":
      """ select coordinates based on largest individual lipschitz"""

      l = lipschitz
      g = g_func(x, A, b, block=None)

      s = np.abs(g) / np.sqrt(l)
      
      block_size = int(loss.n_params**(1./3))
      block = np.argsort(s, axis=None)[-block_size:]


    elif rule in ["TreePartitions", "RedBlackTree", 
                  "TreePartitionsRandom", 
                  "RedBlackTreeRandom"]:
      """ select coordinates that form a forest based on BGS or BGSC """
           
      g_func = loss.g_func 

      if "graph_blocks" not in args:       
        yb = args["data_y"]
        unlabeled = np.where(yb == 0)[0]
        Wb = args["data_W"][unlabeled][:, unlabeled]

        #################### GET GRAPH BLOCKS
        if args["data_lattice"] == False:     
          if rule == "RedBlackTree":
            graph_blocks = ta.get_rb_general_graph(Wb, L=lipschitz)

          elif rule == "TreePartitions":     
            graph_blocks = ta.get_tp_general_graph(Wb, L=lipschitz)

          elif rule == "RedBlackTreeRandom":
            graph_blocks = ta.get_rb_general_graph(Wb, L=np.ones(lipschitz.size))

          elif rule == "TreePartitionsRandom":     
            graph_blocks = ta.get_tp_general_graph(Wb, L=np.ones(lipschitz.size))


          else:

            raise ValueError("%s - No" % rule)

        if args["data_lattice"] == True:     
          if rule == "RedBlackTree":
            graph_blocks = ta.get_rb_indices(args["data_nrows"], 
                                             args["data_ncols"])

          elif rule == "TreePartitions":     
            graph_blocks = ta.get_tp_indices(args["data_nrows"], 
                                             args["data_ncols"])

          else:
            raise ValueError("%s - No" % rule)

          graph_blocks = ta.remove_labeled_nodes(graph_blocks, args["data_y"])

        
        #################### SANITY CHECK
        if rule in ["RedBlackTree", "RedBlackTreeRandom"]:
          # Assert all blocks have diagonal dependencies
          for tmp_block in graph_blocks:
            tmp = A[tmp_block][:, tmp_block]
            assert np.all(tmp == np.diag(np.diag(tmp)))

        elif rule in ["TreePartitions","TreePartitionsRandom"]:
          # Assert all blocks are forests/acyclic
          for tmp_block in graph_blocks:
            W_tmp = (Wb[tmp_block][:, tmp_block] != 0).astype(int)
            assert ta.isForest(W_tmp) 
        else:
          raise ValueError("%s - No" % rule)

        args["graph_blocks"] = cycle(graph_blocks)
       
        
      block = next(args["graph_blocks"])

      block.sort()  

      # check if block is diag
      # tmp = A[block][:, block]
      # assert np.all(tmp == np.diag(np.diag(tmp)))  

      if iteration == 0:
        x = np.random.randn(A.shape[1])
        xr =  np.random.randn(*x.shape)
        xE, _ = ur.update("bpExact", xr.copy(), 
                                 A, b, loss, copy.deepcopy(args), block, iteration)
        xG, _ = ur.update("bpGabp", xr.copy(), 
                                A, b, loss, copy.deepcopy(args) , block, iteration)

        np.testing.assert_array_almost_equal(xE, xG, 3)
        print("Exact vs GaBP Test passed...")

    else:
      raise ValueError("selection rule %s doesn't exist" % rule)

    
    if "Tree" not in rule:
      assert block_size == block.size


    assert np.unique(block).size == block.size
    block.sort()
    return block, args
	n = i
	
	conv = 1

	times[i-2][0] = i

	# Benchmark the dense routine
	start = clock()
	evals_large, evecs_large = largest_eigh(H[0:n,0:n], eigvals=(n-k,n-1))
	elapsed = (clock() - start)
	times[i-2][1] = elapsed
# 	print "eigh elapsed time: ", elapsed

	# Benchmark the sparse routine
	start = clock()
	evals_large_sparse, evecs_large_sparse = largest_eigsh(H[0:n,0:n], k, which='LM')
	elapsed = (clock() - start)
	times[i-2][2] = elapsed
# 	print "eigsh elapsed time: ", elapsed

	start = clock()

	phi0 = np.random.rand(n)
	# print la.eig(H)[1].T
	CayleyN = (np.identity(n)-0.5*H[0:n,0:n])
	CayleyP = (np.identity(n)+0.5*H[0:n,0:n])

	while(conv > eps):
		phi1 = la.solve(CayleyP,CayleyN.dot(phi0))
		mu = math.sqrt(phi1.dot(phi1))
		phi1 = phi1/mu  
                    inSize = U.shape[0]
                    outSize = inSize
                    resSize = 1000
                    a = 0.9         # leaking rate
                    K = 0.99         # spectial radius
                    reg = 1e-6       # regularization coefficient
                    input_scaling = 1
                    N_c = 100


                    # generation of random weights
                    Win = (np.random.rand(resSize,1+inSize)-0.5) * input_scaling
                    W = np.random.rand(resSize,resSize)-0.5

                    largest_eigvals, _ = largest_eigsh(W @ W.T, 1, which='LM')
                    rhoW = np.sqrt(largest_eigvals[0])
                    W = W/rhoW*(K-1+a)/a
                    X = np.zeros((resSize,U.shape[1]))
                    x = np.zeros([resSize,1])

                    for t in range(U.shape[1]):
                        u = U[:,t:t+1]
                        x = (1-a) * x + a * np.tanh( Win @ np.vstack((1,u)) + W @ x )
                        X[:,t:t+1] = x


                    # offline train
                    U_train = U[:,train_start : train_start + num_train]
                    X_train = X[:,train_start : train_start + num_train]
                    Y_train = U[:,train_start + 1 : train_start + num_train + 1]
Exemple #23
0
    def generateEmbeddedCoordinates(self, distanceMatrix, numCoords=0):
        """ Computes the diffusion coordinates from the distance matrix supplied.
        The diffusion kernel is $k(x,y) = exp{ distance(x,y)^2 / epsilon }
        SVD is used to compuet the singular values of a matrix that is conjugated
        to the kernel. Then the corrsponding vectors are rescaled to get the
        eigen vectors. (Original version: Miro Kramar, 2013, in MATLAB)
        
        NOTES: - The first eigenvalue is always +1, so it is not returned.

        Rachel Levanger, 2014."""
        if numCoords==0:
            numCoords=distanceMatrix.shape[0] - 2

        min_coords = min(numCoords, distanceMatrix.shape[1] - 2)
        if min_coords < numCoords:
            w.warn('Number of coordinates truncated to max value of [ndim(distmat)-2].')
            numCoords = min_coords

        numCoords = numCoords + 1 # Make up for zero-based indexing

        matrix_size = distanceMatrix.shape
        K = np.zeros(matrix_size)

        # Construct the kernel
        for i in range(0,matrix_size[0]):
            for j in range(0,matrix_size[1]):
                K[i,j] = np.exp(-((distanceMatrix[i,j])/self.epsilon))
                
        # Construct the symmetric conjugate (by \sqrt(pi) ) of the kernel 
        dx = sum(K)
        dx_square_root = np.sqrt(dx)
        
        P = np.zeros(matrix_size)
        for i in range(0,matrix_size[0]):
            for j in range(0,matrix_size[1]):
                P[i,j] = np.divide(K[i,j], dx_square_root[i] * dx_square_root[j] )
                
        # Get the largest eigenvalues and corresponding eigenvectors
        EigenValues, EigenVectors = largest_eigsh(P, numCoords, which="LM")

        # Transform the eigen vectors to get the (right) eigenvectors of
        # k(x,y)/dx(x) 

        d = sum(dx)
        pi_sqrt = np.sqrt(np.divide(dx, d))

        for i in range(0,numCoords):
            for j in range(0,EigenVectors.shape[0]):
                EigenVectors[j,i] = np.divide(EigenVectors[j,i], pi_sqrt[j])

        # Sort by eigenvalue descending
        I = EigenValues.argsort()[::-1]
        EigenValues = EigenValues[I]
        EigenVectors = EigenVectors[:,I]

        # Account for sign changes in eigenvectors and scale by eigenvalues
        for i in range(0,EigenValues.shape[0]):
            EigenVectors[:,i] = EigenVectors[:,i]*EigenValues[i]
            if EigenVectors[0,i] < 0:
                EigenVectors[:,i] = EigenVectors[:,i]*(-1)

        return EigenVectors[:,1:], EigenValues[1:]
Exemple #24
0
 def run(self):
     L = nx.normalized_laplacian_matrix(self.graph)
     evalues, evectors = a,b = largest_eigsh(L, k=self.parameters['dim'])
     self.embeddings = {str(n):v for n,v in zip(self.graph.nodes, evectors)}
Exemple #25
0
import numpy as np
from time import clock
from scipy.linalg import eigh as largest_eigh
from scipy.sparse.linalg.eigen.arpack import eigsh as largest_eigsh

np.set_printoptions(suppress=True)
np.random.seed(0)
N=500
k=10
X = np.random.random((N,N)) - 0.5
X = np.dot(X, X.T) #create a symmetric matrix

# Benchmark the dense routine
start = clock()
evals_large, evecs_large = largest_eigh(X, eigvals=(N-k,N-1))
elapsed = (clock() - start)
v

# Benchmark the sparse routine
start = clock()
evals_large_sparse, evecs_large_sparse = largest_eigsh(X, k, which='LM')
elapsed = (clock() - start)
print "eigsh elapsed time: ", elapsed
Exemple #26
0
    def compute_eigen_functions(self, domain, num_eigens):

        X = self.compute_k_matrix(domain)
        return largest_eigsh(X, num_eigens, which='LM')