Ejemplo n.º 1
0
    def addforwardscale(self):
        """This method adds a unit gain node to all nodes with an out-degree
        of 1; now all of these nodes should have an out-degree of 2.
        Therefore all nodes with pointers should have 2 or more edges pointing
        away from them.

        It uses the number of dummy variables to construct these gain,
        connection and variable name matrices.

        """
        m_graph = nx.DiGraph()
        # Construct the graph with connections
        for u in range(self.nodummy_nodes):
            for v in range(self.nodummy_nodes):
                if (self.nodummyconnection[u, v] != 0):
                    m_graph.add_edge(self.nodummyvariablelist[v],
                                     self.nodummyvariablelist[u],
                                     weight=self.nodummygain[u, v])
        # Add connections where out degree == 1
        counter = 1
        for node in m_graph.nodes():
            if m_graph.out_degree(node) == 1:
                nameofscale = 'DV' + str(counter)
                m_graph.add_edge(node, nameofscale, weight=1.0)
                counter = counter + 1

        self.scaledforwardconnection = transpose(
            nx.to_numpy_matrix(m_graph, weight=None))
        self.scaledforwardgain = transpose(
            nx.to_numpy_matrix(m_graph, weight='weight'))
        self.scaledforwardvariablelist = m_graph.nodes()
Ejemplo n.º 2
0
    def energy(self):
        e = 0.0
        data = self.data
        for i,p in self.graph.node.iteritems():
            thisval = data[:,i]
            if np.isnan(p['eta']):
                marg = p['marginal']
                e -= np.log((thisval*marg + (1-thisval)*(1-marg))).sum()
            else:
                delta = p['delta']
                eta = p['eta']
                parval = data[:,self.graph.predecessors(i)[0]]
                prob = thisval*(parval*(1-delta) + (1-parval)*eta) + \
                        (1-thisval)*(parval*delta + (1-parval)*(1-eta))
                np.clip(prob, 1e-300, 1.0)
                e -= np.log(prob).sum()

        mat = np.array(nx.to_numpy_matrix(self.graph),dtype=np.int32)
        if self.template:
            tempmat = np.array(nx.to_numpy_matrix(self.template),dtype=np.int32)
        else:
            tempmat = np.zeros_like(mat)
        e += self.priorweight * float(np.abs(mat - tempmat).sum())

        return e
Ejemplo n.º 3
0
    def addforwardScale(self):
        """This method should add a unit gain node to all nodes with an out-degree
        of 1; now all of these nodes should have an out-degree of 2. Therefore
        all nodes with pointers should have 2 or more edges pointing away from 
        them.
        
        It uses the no dummy variables to construct these gain, connection
        and variable name matrices. """

        M = nx.DiGraph()
        #construct the graph with connections
        for u in range(self.nodummyN):
            for v in range(self.nodummyN):
                if (self.nodummyconnection[u, v] != 0):
                    M.add_edge(self.nodummyvariablelist[v], self.nodummyvariablelist[u], weight = self.nodummygain[u,v])
        
        
        #now add connections where out degree == 1
        counter = 1
        
        for node in M.nodes():
            if M.out_degree(node) == 1:
                nameofscale = 'DV'+str(counter)
                M.add_edge(node, nameofscale, weight = 1.0)
                counter = counter + 1
                
                

        self.scaledforwardconnection = transpose(nx.to_numpy_matrix(M, weight = None))
        self.scaledforwardgain = transpose(nx.to_numpy_matrix(M, weight = 'weight'))
        self.scaledforwardvariablelist = M.nodes() #i sincerely hope this works!... After some testing, I think it does!!!
Ejemplo n.º 4
0
def map_flows(catalog):
    import analysis as trans
    fm = trans.FlowMapper()

    read_exceptions = {}
    for i,fn in enumerate(os.listdir('.\\repository_data\\')):
        print i, fn
        try:
            sys = catalog.read(''.join(['.\\repository_data\\',fn]))
        except Exception as e:
            read_exceptions[fn] = e
            print '\t',e.message
        fm.add_system(sys)
        if i > 5:
            break

    graph = fm.transformation_graph()
    fm.stats()
    nx.draw_graphviz(graph,prog='dot',root='energy')
    print nx.to_numpy_matrix(graph) > 0
#    pdg = nx.to_pydot(graph)
#    pdg.write_png('transform.png')
#    nx.graphviz_layout(graph,prog='neato')
#    nx.draw_graphviz(graph)
    plt.show()
 def test_weight_keyword(self):
     WP4 = nx.Graph()
     WP4.add_edges_from( (n,n+1,dict(weight=0.5,other=0.3)) for n in range(3) )
     P4 = path_graph(4)
     A = nx.to_numpy_matrix(P4)
     np_assert_equal(A, nx.to_numpy_matrix(WP4,weight=None))
     np_assert_equal(0.5*A, nx.to_numpy_matrix(WP4))
     np_assert_equal(0.3*A, nx.to_numpy_matrix(WP4,weight='other'))
Ejemplo n.º 6
0
 def test_round_trip(self):
     W_ = W.from_networkx(self.known_nx)
     np.testing.assert_allclose(W_.sparse.toarray(), self.known_amat)
     nx2 = W_.to_networkx()
     np.testing.assert_allclose(nx.to_numpy_matrix(nx2), self.known_amat)
     nxsquare = self.known_W.to_networkx()
     np.testing.assert_allclose(self.known_W.sparse.toarray(), nx.to_numpy_matrix(nxsquare))
     W_square = W.from_networkx(nxsquare)
     np.testing.assert_allclose(self.known_W.sparse.toarray(), W_square.sparse.toarray())
 def test_numpy_multigraph(self):
     G=nx.MultiGraph()
     G.add_edge(1,2,weight=7)
     G.add_edge(1,2,weight=70)
     A=nx.to_numpy_matrix(G)
     assert_equal(A[1,0],77)
     A=nx.to_numpy_matrix(G,multigraph_weight=min)
     assert_equal(A[1,0],7)
     A=nx.to_numpy_matrix(G,multigraph_weight=max)
     assert_equal(A[1,0],70)
Ejemplo n.º 8
0
def original_generate_token_graph():
    corp = []
    sentences = []      # Initialize an empty list of sentences
    
    input_folders = [ sub_dir for sub_dir in listdir(dataset_folder) if isdir(join(dataset_folder, sub_dir)) ]
    
    for folder in input_folders:
        dir_path = dataset_folder + os.sep + folder + os.sep
        files = [ f for f in listdir(dir_path) if isfile(join(dir_path,f)) ]
        
        for file in files:
            file_path = dir_path + file
            file_name, file_extension = splitext(file_path)
            doc = ""
            
            if file_extension == ".pdf":
                doc = convert_pdf_to_txt(file_path)
            elif file_extension == ".docx":
                doc = convert_docx_to_txt(file_path)
            else:
                continue
                
            if doc != "":
                doc = doc.decode("utf8")
                #doc = words_to_phrases(doc)
                doc = doc.lower()
                doc = doc_to_wordlist(doc,True)
                corp = it.chain(corp,doc)
                #sentences += doc_to_sentences(doc, tokenizer, remove_stopwords=False)
    
    corp = list(corp)
    graph = nx.Graph()
    weights = Counter()
    edges = set()
    window = corp[0:5]
    
    for tup in it.permutations(window,2):
        weights[tup] += 1
    for i in range(3,len(corp)-2):
        for j in range(i-2,i+2):
            weights[(corp[j],corp[i+2])] += 1
            weights[(corp[i+2],corp[j])] += 1
            edges.add((corp[i+2],corp[j]))
            
    for e in edges:
        graph.add_edge(e[0], e[1], {'weight':weights[e]})
    
    print graph
    nx.write_weighted_edgelist(graph, "graph.g")
    print nx.to_numpy_matrix(graph)
    np.savetxt("graph.adj", nx.to_numpy_matrix(graph))
    print "finished"
Ejemplo n.º 9
0
def plot_weight_distribution(brain, output_file=None, **kwargs):
    """
    It uses matplotlib to plot a histogram of the weights of the edges.
    Requires that the brain was thresholded before and ignores NaNs for plotting

    Parameters
    ----------
    brain: maybrain.brain.Brain
        An instance of the `Brain` class
    output_file: str
        If you want to create a file. It then calls fig.savefig(output_file) from matplotlib
    kwargs
        keyword arguments if you need to pass them to matplotlib's hist()

    Returns
    -------
    fig, ax : tuple
        if output_file is None, this returns (fig, ax) from the figure created
    """
    fig, ax = plt.subplots()

    if isinstance(brain, nx.Graph):
        arr = np.copy(nx.to_numpy_matrix(brain, nonedge=np.nan))
    else:
        arr = np.copy(nx.to_numpy_matrix(brain.G, nonedge=np.nan))

    upper_values = np.triu_indices(np.shape(arr)[0], k=1)
    weights = np.array(arr[upper_values])

    # If directed, also add the lower down part of the adjacency matrix
    if not isinstance(brain, nx.Graph) and brain.directed:
        below_values = np.tril_indices(np.shape(arr)[0], k=-1)
        weights.extend(np.array(below_values))

    # Removing NaNs for correct plotting
    weights = weights[~np.isnan(weights)]

    # the histogram of the data
    ax.hist(weights, **kwargs)
    ax.set_title('Weights')

    # Tweak spacing to prevent clipping of ylabel
    fig.tight_layout()

    # If outputfile is defined, fig is correctly closed,
    # otherwise returned so others can add more information to it
    if output_file is not None:
        fig.savefig(output_file)
        plt.close(fig)
    else:
        return fig, ax
Ejemplo n.º 10
0
def stakeholder_analysis(args):
	
	G, nodes = create_graph_from_csv((args.input_file)[0])
	print "\n\nAdjacency Matrix:\n"
	print nx.to_numpy_matrix(G)
	print "\n"
	status, cycles = get_cycles(G, (args.you_stakeholder)[0])

	if status == 1:
		print "No cycles found. Did you mispell the name of your system (" + (args.you_stakeholder)[0] + ")?\n"
	else:
		loopWeight = calculate_cycles_weights(G, cycles)
		endAveragesSorted = get_stakeholders_importances(loopWeight, cycles, nodes)
		if args.print_graph:
			draw_graph(G, "graph.png")
def directed_weighted_clustering(g,weightString):
	
	
	n = g.number_of_nodes()
	from numpy import linalg as LA
	#adjacency matrix
	A = nx.to_numpy_matrix(g,nodelist=g.nodes(),weight=None)
	A2 = LA.matrix_power(A,2)
	AT = A.T
	Asum = AT + A
	cVector = [i for i in range(n)]
	cVector = np.asmatrix(cVector)
	
	kin = {i:np.dot(AT[i],cVector.T) for i in range(n)}
	kout = {i:np.dot(A[i],cVector.T)for i in range(n)}
	kparallel = {i:np.dot(Asum[i],cVector.T)for i in range(n)}

	#print "kin"
	#print kin
	#weight matrix
	W = nx.to_numpy_matrix(g,nodelist=g.nodes(),weight=weightString)
	WT = W.T
	W2 = LA.matrix_power(W,2)
	W3 = LA.matrix_power(W,3)
			
	WWTW =  W*WT*W
	WTW2 = WT*W2
	W2WT = W2*WT
	
	ccycle = {i:0 for i in range(n)}
	cmiddle = {i:0 for i in range(n)}
	cin = {i:0 for i in range(n)}
	cout = {i:0 for i in range(n)}

	for i in range(n):
			
			if kin[i]*kout[i]  - kparallel[i] > 0:
				ccycle[i] = W3[i,i] / float((kin[i]*kout[i] - kparallel[i]))
				cmiddle[i] = WWTW[i,i] / float((kin[i]*kout[i] - kparallel[i]))
			if kin[i] > 1: 
				cin[i] = WTW2[i,i] / float((kin[i]*(kin[i]-1)))
			if kout[i] > 1: 
				cout[i] = W2WT[i,i] / float((kout[i]*(kout[i]-1))) 
	#print type((np.mean(ccycle.values()),np.mean(cmiddle.values()),np.mean(cin.values()),np.mean(cout.values())))
	#print "here"
	#input()
	#return (np.mean(ccycle.values()),np.mean(cmiddle.values()),np.mean(cin.values()),np.mean(cout.values()))
	return (ccycle,cmiddle,cin,cout)
Ejemplo n.º 12
0
    def brute_iso (self) :
        print("--- BRUTE ---")
        # disregard graphs that are equal, have same amount of nodes,
        # same amount of edges and that are nor balanced
        if (self.isEqual()) :
            return True
        if (self.l1 != self.l2) :
            return False
        if (len(self.g1.edges()) != len(self.g2.edges())) :
            return False
        if (not self.is_balanced()):
            return False
        start = time.clock()
        # compute all permutations on color classes
        dictionary = list(chain.from_iterable(self.cc2))
        permut = self.partial_permutations(self.cc1)
        g1_permutations = self.translate_permutations(permut, dictionary)
        # print("dictionary: ", dictionary)
        # print("permut: ", permut)
        # print("g1_permutations: ", g1_permutations)
        # print("g1_Nodes: ", self.g1.nodes())
        # print("g2_Nodes: ", self.g2.nodes())
        #print(g1_permutations)
        #elapsed = time.clock()
        #elapsed = elapsed - start
        #num_perm = len(g1_permutations)
        #print("Time spent in (generating permutations) is: ", elapsed)
        #print("Number of permutations generated: ", num_perm)
        #progress = math.floor(num_perm/10)
        ad_mat_g2 = nx.to_numpy_matrix(self.g2)
        #i=0
        # compare each permutation of G with H

        for perms in g1_permutations:
            #i = i+1
            '''
            if i % progress == 0:
                print("10%% of brute_iso is done.")
            '''
            ad_mat_g1 = nx.to_numpy_matrix(self.g1, perms)
            # print(nx.to_numpy_matrix(self.g1))
            # print("ad_mat_g1: ")
            # print(ad_mat_g1)
            # print("ad_mat_g2: ")
            # print(ad_mat_g2)
            if (np.array_equal(ad_mat_g1, ad_mat_g2)):
                return True
        return False
Ejemplo n.º 13
0
def adj_matrix(G,nodelist=None):
    """Return adjacency matrix of G.

    Parameters
    ----------
    G : graph
       A NetworkX graph 

    nodelist : list, optional       
       The rows and columns are ordered according to the nodes in nodelist.
       If nodelist is None, then the ordering is produced by G.nodes().

    Returns
    -------
    A : numpy matrix
      Adjacency matrix representation of G.

    Notes
    -----
    If you want a pure Python adjacency matrix representation try
    networkx.convert.to_dict_of_dicts which will return a
    dictionary-of-dictionaries format that can be addressed as a
    sparse matrix.

    See Also
    --------
    to_numpy_matrix
    to_dict_of_dicts
    """
    return nx.to_numpy_matrix(G,nodelist=nodelist)
Ejemplo n.º 14
0
    def test_adjacency_interface_numpy(self):
        A=nx.to_numpy_matrix(self.Gs)
        pos=nx.drawing.layout._fruchterman_reingold(A)
        pos=nx.drawing.layout._sparse_fruchterman_reingold(A)

        pos=nx.drawing.layout._fruchterman_reingold(A,dim=3)
        assert_equal(pos.shape,(6,3))
Ejemplo n.º 15
0
def summarize_precoth(dwi_network_file, fdg_stats_file, subject_id):
    import os.path as op
    import scipy.io as sio
    import networkx as nx

    fdg = sio.loadmat(fdg_stats_file)
    dwi_ntwk = nx.read_gpickle(dwi_network_file)

    # Thal L-1 R-2
    # Cortex 3 and 4
    # Prec L-5 R-6
    titles = ["subjid"]
    fdg_avg = ["LTh_CMR_avg","RTh_CMR_avg","LCo_CMR_avg","RCo_CMR_avg","LPre_CMR_avg","RPre_CMR_avg"]
    f_avg = [fdg["func_mean"][0][0],fdg["func_mean"][1][0],fdg["func_mean"][2][0],
               fdg["func_mean"][3][0],fdg["func_mean"][4][0],fdg["func_mean"][5][0]]

    fdg_max = ["LTh_CMR_max","RTh_CMR_max","LCo_CMR_max","RCo_CMR_max","LPre_CMR_max","RPre_CMR_max"]
    f_max = [fdg["func_max"][0][0],fdg["func_max"][1][0],fdg["func_max"][2][0],
               fdg["func_max"][3][0],fdg["func_max"][4][0],fdg["func_max"][5][0]]

    fdg_min = ["LTh_CMR_min","RTh_CMR_min","LCo_CMR_min","RCo_CMR_min","LPre_CMR_min","RPre_CMR_min"]
    f_min = [fdg["func_min"][0][0],fdg["func_min"][1][0],fdg["func_min"][2][0],
               fdg["func_min"][3][0],fdg["func_min"][4][0],fdg["func_min"][5][0]]

    fdg_std = ["LTh_CMR_std","RTh_CMR_std","LCo_CMR_std","RCo_CMR_std","LPre_CMR_std","RPre_CMR_std"]
    f_std = [fdg["func_stdev"][0][0],fdg["func_stdev"][1][0],fdg["func_stdev"][2][0],
               fdg["func_stdev"][3][0],fdg["func_stdev"][4][0],fdg["func_stdev"][5][0]]

    fdg_titles = fdg_avg + fdg_max + fdg_min + fdg_std

    dwi = nx.to_numpy_matrix(dwi_ntwk, weight="weight")

    l_thal = ["LTh_RTh","LTh_LCo","LTh_RCo","LTh_LPre","LTh_RPre"]
    l_th   = [dwi[0,1], dwi[0,2], dwi[0,3], dwi[0,4], dwi[0,5]]
    r_thal = ["RTh_LCo","RTh_RCo","RTh_LPre","RTh_RPre"]
    r_th   = [dwi[1,2], dwi[1,3], dwi[1,4], dwi[1,5]]
    l_co   = ["LCo_RCo","LCo_LPre","LCo_RPre"]
    l_cor  = [dwi[2,3], dwi[2,4], dwi[2,5]]
    r_co   = ["RCo_LPre","RCo_RPre"]
    r_cor  = [dwi[3,4], dwi[3,5]]
    l_pre  = ["LPre_RPre"]
    l_prec = [dwi[4,5]]
    conn_titles = l_thal + r_thal + l_co + r_co + l_pre

    all_titles = titles + fdg_titles + conn_titles
    volume_titles = ["VoxLTh","VoxRTh","VoxLCo", "VoxRCo", "VoxLPre", "VoxRPre"]
    all_titles = all_titles + volume_titles
    volumes = fdg["number_of_voxels"]

    all_data = f_avg + f_max + f_min + f_std + l_th + r_th + l_cor + r_cor + l_prec + volumes[:,0].tolist()

    out_file = op.abspath(subject_id + "_precoth.csv")
    f = open(out_file, "w")
    title_str = ",".join(all_titles) + "\n"
    f.write(title_str)
    all_data = map(float, all_data)
    data_str = subject_id + "," + ",".join(format(x, "10.5f") for x in all_data) + "\n"
    f.write(data_str)
    f.close()
    return out_file
def normalized_min_cut(graph):
    """Clusters graph nodes according to normalized minimum cut algorithm.
    All nodes must have at least 1 edge. Uses zero as decision boundary. 
    
    Parameters
    -----------
        graph: a networkx graph to cluster
        
    Returns
    -----------
        vector containing -1 or 1 for every node
    References
    ----------
        J. Shi and J. Malik, *Normalized Cuts and Image Segmentation*, 
        IEEE Transactions on Pattern Analysis and Machine Learning, vol. 22, pp. 888-905
    """
    m_adjacency = np.array(nx.to_numpy_matrix(graph))

    D = np.diag(np.sum(m_adjacency, 0))
    D_half_inv = np.diag(1.0 / np.sqrt(np.sum(m_adjacency, 0)))
    M = np.dot(D_half_inv, np.dot((D - m_adjacency), D_half_inv))

    (w, v) = np.linalg.eig(M)
    #find index of second smallest eigenvalue
    index = np.argsort(w)[1]

    v_partition = v[:, index]
    v_partition = np.sign(v_partition)
    return v_partition
Ejemplo n.º 17
0
def kmeans_cluster(G, graph_name, num_clusters):
    subgraphs = []
    #Find a way to figure out clusters number automatically
    write_directory = os.path.join(Constants.KMEANS_PATH,graph_name)
    if not os.path.exists(write_directory):
        os.makedirs(write_directory)
    nodeList = G.nodes()
    matrix_data = nx.to_numpy_matrix(G, nodelist = nodeList)
    kmeans = KMeans(init='k-means++', n_clusters=num_clusters, n_init=10)   
    kmeans.fit(matrix_data)
    label = kmeans.labels_
    clusters = {}
    
    for nodeIndex, nodeLabel in enumerate(label):
        if nodeLabel not in clusters:
            clusters[nodeLabel] = []
        clusters[nodeLabel].append(nodeList[nodeIndex])
        
    #countNodes is used to test whether we have all the nodes in the clusters 
    countNodes = 0    
    for clusterIndex, subGraphNodes in enumerate(clusters.keys()):
        subgraph = G.subgraph(clusters[subGraphNodes])
        subgraphs.append(subgraph)
        nx.write_gexf(subgraph, os.path.join(write_directory,graph_name+str(clusterIndex)+Constants.GEXF_FORMAT))
        #countNodes = countNodes + len(clusters[subGraphNodes])
        pass
    return num_clusters
Ejemplo n.º 18
0
def normalized_laplacian(G,nodelist=None):
    """Return normalized Laplacian of G as a numpy matrix.

    See Spectral Graph Theory by Fan Chung-Graham.
    CBMS Regional Conference Series in Mathematics, Number 92,
    1997.

    """
    # FIXME: this isn't the most efficient way to do this...
    try:
        import numpy as np
    except ImportError:
        raise ImportError, \
          "normalized_laplacian() requires numpy: http://scipy.org/ "
    n=G.order()
    I=np.identity(n)
    A=np.asarray(networkx.to_numpy_matrix(G,nodelist=nodelist))
    d=np.sum(A,axis=1)
    L=I*d-A
    osd=np.zeros(len(d))
    for i in range(len(d)):
        if d[i]>0: osd[i]=np.sqrt(1.0/d[i])
    T=I*osd
    L=np.dot(T,np.dot(L,T))
    return L
Ejemplo n.º 19
0
def graphToCSV(G,graphtype, section, test):
    directory = "Datarows/"+graphtype+"/"
    if not os.path.exists(directory):
        os.makedirs(directory)
    writer_true = csv.writer(open(directory+section+"_true.csv", "a"))
    writer_false = csv.writer(open(directory+section+"_false.csv", "a"))
    A = nx.to_numpy_matrix(G)
    A = np.reshape(A, -1)
    arrGraph = np.squeeze(np.asarray(A))

    nb_nodes = 0
    for node in nx.nodes_iter(G):
        if len(G.neighbors(node))>0:
            nb_nodes += 1

    meta_info = [test,nb_nodes,G.number_of_edges(),nx.number_connected_components(G)]
    # On garde la même taille d'élemt de valeur de vérité #
    if test:
        if os.path.getsize(directory+section+"_true.csv") <= os.path.getsize(directory+section+"_false.csv"):
            writer_true.writerow(np.append(arrGraph, meta_info))
            return True
        else:
            return False
    else:
        if os.path.getsize(directory+section+"_false.csv") <= os.path.getsize(directory+section+"_true.csv"):
            writer_false.writerow(np.append(arrGraph, meta_info))
            return True
        else:
            return False
Ejemplo n.º 20
0
def draw_adjacency_matrix(G, node_order=None, partitions=[], colors=[]):
    """
    - G is a netorkx graph
    - node_order (optional) is a list of nodes, where each node in G
          appears exactly once
    - partitions is a list of node lists, where each node in G appears
          in exactly one node list
    - colors is a list of strings indicating what color each
          partition should be
    If partitions is specified, the same number of colors needs to be
    specified.
    """
    adjacency_matrix = nx.to_numpy_matrix(G, dtype=np.bool, nodelist=node_order)

    #Plot adjacency matrix in toned-down black and white
    fig = pyplot.figure(figsize=(5, 5)) # in inches
    pyplot.imshow(adjacency_matrix,
                  cmap="Greys",
                  interpolation="none")

    # The rest is just if you have sorted nodes by a partition and want to
    # highlight the module boundaries
    assert len(partitions) == len(colors)
    ax = pyplot.gca()
    for partition, color in zip(partitions, colors):
        current_idx = 0
        for module in partition:
            ax.add_patch(patches.Rectangle((current_idx, current_idx),
                                          len(module), # Width
                                          len(module), # Height
                                          facecolor="none",
                                          edgecolor=color,
                                          linewidth="1"))
            current_idx += len(module)
Ejemplo n.º 21
0
    def __init__(self, G_list, max_num_nodes, features='id'):
        self.max_num_nodes = max_num_nodes
        self.adj_all = []
        self.len_all = []
        self.feature_all = []

        for G in G_list:
            adj = nx.to_numpy_matrix(G)
            # the diagonal entries are 1 since they denote node probability
            self.adj_all.append(
                    np.asarray(adj) + np.identity(G.number_of_nodes()))
            self.len_all.append(G.number_of_nodes())
            if features == 'id':
                self.feature_all.append(np.identity(max_num_nodes))
            elif features == 'deg':
                degs = np.sum(np.array(adj), 1)
                degs = np.expand_dims(np.pad(degs, [0, max_num_nodes - G.number_of_nodes()], 0),
                                      axis=1)
                self.feature_all.append(degs)
            elif features == 'struct':
                degs = np.sum(np.array(adj), 1)
                degs = np.expand_dims(np.pad(degs, [0, max_num_nodes - G.number_of_nodes()],
                                             'constant'),
                                      axis=1)
                clusterings = np.array(list(nx.clustering(G).values()))
                clusterings = np.expand_dims(np.pad(clusterings, 
                                                    [0, max_num_nodes - G.number_of_nodes()],
                                                    'constant'),
                                             axis=1)
                self.feature_all.append(np.hstack([degs, clusterings]))
Ejemplo n.º 22
0
def clustering_coefficient(G,  nbunch = None):
    
    G = G.to_undirected()
    selfloops = G.selfloop_edges()
    G.remove_edges_from(selfloops) # exclude selfloops
    
    if nbunch is not None:
        nodes = np.sort(G.nodes())
        nbunch = np.sort(nbunch)
        fnodes = np.setdiff1d(nodes,nbunch)
        nodelist = np.append(nbunch, fnodes)

    else:
        nbunch = G.nodes()
        nodelist = G.nodes()
        
    n = len(nbunch)
    W = nx.to_numpy_matrix(G, nodelist = nodelist) # we take into account all links
    A  =  np.matrix(1. * (W > 0))
    degree = G.degree(nbunch = nbunch)
    degree = np.array([degree[i] for i in nbunch])
    Td = degree * (degree - 1) 
    indices = np.where(Td == 0)
    Td[indices] = 1.
    triangles = np.diag(A**3)[:n]
    cc = triangles / Td
    cc = cc.tolist()
    
    return cc,degree
Ejemplo n.º 23
0
def Net2AdjMatrix(red,sep,form):

  Red = GetNetX(red,sep,form)
  genesDIC = Red[1]
  G = Red[0]
  sep = separador(sep)
  
  name = red.split('.')
  output = name[0]+'.txt'
  SALIDA = open(output,"w")  
  
  
  nodes = []

  for i in G.nodes():
    nodes.append(genesDIC[i-1])
    #print i
  
  Q = nx.to_numpy_matrix(G,weight='w')  
  
  SALIDA.write(sep)  
  for i in nodes:
    SALIDA.write(("%s"+sep) % i)  
  SALIDA.write("\n")  

  for i in range(len(Q)):
    SALIDA.write(("%s"+sep) %  nodes[i])  
    for j in range(len(Q)):
      SALIDA.write(("%s"+sep) %  Q.item((i,j)))     
    SALIDA.write("\n")

  SALIDA.close()     
def simulate():
  data = get_data()

  adjacency = data["adjacency"]
  t = 10
  t_f = 100
  t = np.linspace(0, t, num=t_f).astype(np.float32)

  # a = 0.
  # b = 100.
  # r = np.array([
  #     [a, 0.],
  #     [a+2.,0.],
  # ])
  # v = np.array([
  #     [0.,10.],
  #     [0., -10.],
  # ])
  #
  # w = np.array([
  #   [0,1],
  #   [1,0]
  # ]).astype(np.float32)

  n = 5
  G = nx.grid_2d_graph(n,n)
  N = 25
  w = nx.to_numpy_matrix(G)*10
  r = np.random.rand(N,3)
  d = r.shape[-1]
  v = r*0.
  k=1.
  return sim_particles(t,r,v,w)
Ejemplo n.º 25
0
def process_graph_component(graph):
    #print("Processing component of size {}".format(graph.size()))
    X = nx.to_numpy_matrix(graph)
    n_clusters = min(10, int(graph.size()/20))
    sp = SpectralClustering(
            n_clusters=n_clusters, n_components=min(1000,graph.size()), affinity='precomputed').fit(X)
    return sp, X
Ejemplo n.º 26
0
def generate_systems(prop_step=5, o_size=20):
    """ Generate population of systems
    """
    para_range = np.linspace(0, 1, prop_step)
    o_range = np.random.uniform(0, 3, o_size)

    systs = []
    for p in para_range:
        systs.append([])

        # setup network
        graph = nx.gnp_random_graph(20, p)

        dim = len(graph.nodes())
        Bvec = np.random.uniform(0, 5, size=dim)

        for omega in o_range:
            # setup dynamical system
            system_config = get_base_config(
                nx.to_numpy_matrix(graph), Bvec, omega)

            systs[-1].append(DW({
                'graph': graph,
                'system_config': system_config
            }))

    return systs, para_range
    def mapper_sh(self, _, node):

        sub = self.G.subgraph([x for x in self.G.neighbors(node)]+[node])
        self.A = nx.to_numpy_matrix(sub)
        self.p = structural_holes._calcProportionalTieStrengths(self.A)

        constraint = {"C-Index": 0.0, "C-Size": 0.0, "C-Density": 0.0, "C-Hierarchy": 0.0}

        Vi = structural_holes._neighborsIndexes(sub, node, self.options.includeOutLinks, self.options.includeInLinks)

        # i is the node we are calculating constraint for
        # and is thus the ego of the ego net
        i = sub.nodes().index(node)

        if not self.options.wholeNetwork:
            # need to recalculate p w/r/t this node
            pq = structural_holes._calcProportionaTieStrengthWRT(sub, i)
        else:
            # don't need to calculate p w/r/t any node
            pq = self.p

        for j in Vi:
            Pij = self.p[i, j]
            constraint["C-Size"] += Pij ** 2
            innerSum = 0.0
            for q in Vi:
                if q == j or q == i: continue
                innerSum += self.p[i, q] * pq[q, j]

            constraint["C-Hierarchy"] += innerSum ** 2
            constraint["C-Density"] += 2 * Pij * innerSum

        constraint["C-Index"] = constraint["C-Size"] + constraint["C-Density"] + constraint["C-Hierarchy"]
        yield None, {node:constraint}
def bipart(net):
        print 6.7 
        A = nx.to_numpy_matrix(net)
        A[A!=0.0] = 1
        expA = linalg.expm(A)
        chA = linalg.coshm(A)
        return ((np.trace(chA)/np.trace(expA),'bipartitivity'),)
Ejemplo n.º 29
0
def bipartivity_exact(G, focal=None):
    G_MAT=NX.to_numpy_matrix(G) # Convert NX network to adjacency matrix
    ei,ev=N.linalg.eig(G_MAT)   # Calculate eigenvalues abd eigenvectors

    SC_even=0       # Sum of the contributions from even closed walks in G
    SC_all=0        # Sum of the contributions of all closed walks in G

    if focal is None:
        ''' Formulas described on page 2
        First code block calculates bipartivity of G globally'''
        for j in range(0,G.number_of_nodes()):
            SC_even=SC_even+N.cosh(ei[j].real)
            SC_all=SC_all+N.e**(ei[j].real)
        # Proportion of even closed walks over all closed walks
        B=SC_even/SC_all    
    else:
        # If focal node is passed as str, or object
        if focal is not int:
            n=G.nodes()
            focal=n.index(focal)
        '''Second code block calculates contibution of 'focal' to bipartivity'''
        for j in range(0,G.number_of_nodes()):
            SC_even=SC_even+((ev[focal,j].real)**2)*(N.cosh(ei[j].real))
            SC_all=SC_all+((ev[focal,j].real)**2)*(N.e**(ei[j].real))
            
        B=SC_even/SC_all    # Proportion of even CW for focal node, ie how much
                            # the focal node contributes to bipartivity.
    return B
Ejemplo n.º 30
0
def main():
    
    options,args = parseCommandLine()
    
    pickleFileName = args[0];
    outputFileName = None
    if (len(args) == 2):
        outputFileName = args[1]
                

    print ("Loading %s...\n") % (pickleFileName)
    g = nx.read_gpickle(pickleFileName);
    
    # Convert graph to matrix form
    for u,v,d in g.edges_iter(data=True):
        g.edge[u][v]['weight'] = g.edge[u][v]['number_of_fibers']
    cmat = nx.to_numpy_matrix(g)
    
    mean = np.mean(cmat)
    std = np.std(cmat) 
    print('Total number of connections: %d') % (np.sum(cmat))
    print('Connection Matrix Mean: %f Std: %f' ) % (mean, std)

    
    # Compute binarized stats
    binarized_cmat= np.zeros(cmat.shape)
    binarized_cmat[cmat>0] = 1
    print('Binarized connection matrix Mean: %f Std: %f' ) % (np.mean(binarized_cmat),
                                                                np.std(binarized_cmat))

    
    if outputFileName != None :
        f = open(outputFileName, 'at')
        f.write( ('%f,%f\n') % (mean, std) )
        f.close() 
Ejemplo n.º 31
0
 def test_identity_graph_matrix(self):
     "Conversion from graph to matrix to graph."
     A = nx.to_numpy_matrix(self.G1)
     self.identity_conversion(self.G1, A, nx.Graph())
Ejemplo n.º 32
0
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt

# load karate network 
zkc = nx.karate_club_graph()
order = sorted(list(zkc.nodes()))

# input parameters
A = nx.to_numpy_matrix(zkc, nodelist=order)
I = np.eye(zkc.number_of_nodes())
A_hat = A + I
D_hat = np.array(np.sum(A_hat, axis=1))
D_hat = [x[0] for x in D_hat]
D_hat = np.matrix(np.diag(D_hat))

# initializing weight
# Normal Distribution or Gaussian Distribution
W_1 = np.random.normal(loc=0, scale=1, size=(zkc.number_of_nodes(), 4))  
W_2 = np.random.normal(loc=0, size=(W_1.shape[1], 2))
# loc is Mean (“centre”) of the distribution
# scale is Standard deviation (spread or “width”) of the distribution
# size is Output shape

# GCN Model
def relu(x):
    return np.maximum(x, 0)

def gcn_layer(A, D, X, W):
    return relu(D**-1*A*X*W)
Ejemplo n.º 33
0
def get_adjacency_matrix(ts, L, edge_type):
    G = nx.Graph(list(GetEdges(ts, L, edge_type)))
    A = np.array(nx.to_numpy_matrix(G, nodelist=[i for i in range(n_node)]))
    return A
Ejemplo n.º 34
0
from networkx import karate_club_graph, to_numpy_matrix

zkc = karate_club_graph()  # zkc: 数据集
# print(zkc)
# print(zkc.edges)
# Output:
#     Zachary's Karate Club
#     [(0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6), (0, 7), (0, 8), (0, 10), (0, 11), (0, 12), (0, 13), (0, 17), (0, 19), (0, 21), (0, 31), (1, 2), (1, 3), (1, 7), (1, 13), (1, 17), (1, 19), (1, 21), (1, 30), (2, 3), (2, 7), (2, 8), (2, 9), (2, 13), (2, 27), (2, 28), (2, 32), (3, 7), (3, 12), (3, 13), (4, 6), (4, 10), (5, 6), (5, 10), (5, 16), (6, 16), (8, 30), (8, 32), (8, 33), (9, 33), (13, 33), (14, 32), (14, 33), (15, 32), (15, 33), (18, 32), (18, 33), (19, 33), (20, 32), (20, 33), (22, 32), (22, 33), (23, 25), (23, 27), (23, 29), (23, 32), (23, 33), (24, 25), (24, 27), (24, 31), (25, 31), (26, 29), (26, 33), (27, 33), (28, 31), (28, 33), (29, 32), (29, 33), (30, 32), (30, 33), (31, 32), (31, 33), (32, 33)]

order = sorted(list(zkc.nodes()))  # 排序
# print(sorted(list(zkc.nodes())))
"""
    函数原型:to_numpy_matrix(G, nodelist=None, dtype=None, order=None, multigraph_weight=<built-in function sum>, weight='weight')
    功能:以NumPy矩阵的形式返回图邻接矩阵。 
"""
A = to_numpy_matrix(zkc, nodelist=order)  # A: 数据集zkc的邻接矩阵
I = np.eye(zkc.number_of_nodes())  # I:单位矩阵
# print(A)
# print(zkc.number_of_nodes())
# print(I)

A_hat = A + I  # A_hat: 加入自环后的邻接矩阵
D_hat = np.array(np.sum(A_hat, axis=0))[0]  # D_hat: 度序列
'''np.array()[0]的目的在于结果多一个中括号,去掉中括号'''
D_hat = np.matrix(np.diag(D_hat))  # D_hat: 度矩阵
# print(D_hat)
"""
    函数原型:numpy.random.normal(loc=0.0, scale=1.0, size=None)
    参数:
        loc:float
            此概率分布的均值(对应着整个分布的中心centre)
Ejemplo n.º 35
0
    def __init__(self,
                 n_sample,
                 n,
                 k=2,
                 mode='invariant',
                 type_graph='normal',
                 type_label='shortest_path',
                 transform_data=True):
        """
        Input:
          n_sample : number of samples
          n : number of nodes
          k : tensorization inside networks, 2 or 3
          mode : 'invariant' or 'equivariant'
          type_graphs:
            'normal': normal edge weights (symmetrized, with absolute value)
            'SBM': stochastic block model with K=sqrt(log(n))+1 communities
          type_label:
            'shortest_path': diameter (longest shortest path) if invariant, longest shortest path from each node if equivariant
            'eigen': second largest eigenvalue is invariant, second eigenvector if equivariant
        Data:
          self.equi (n_sample * n^k * b(k+2)): data, equivariant basis form
          self.label (n_sample if invariant, n_sample * n if equivariant)
        """
        self.n = n
        self.n_sample = n_sample
        self.mode = mode
        self.k = k

        if type_graph == 'normal':
            self.Ws = torch.randn(n_sample, n, n)
            for i in range(n_sample):
                self.Ws[i, :, :] = torch.abs(self.Ws[i, :, :] +
                                             self.Ws[i, :, :].t())
        elif type_graph == 'SBM':
            self.Ws, _ = torch.tensor(
                SBM(n_sample,
                    n,
                    K=int(np.sqrt(np.log(n))) + 1,
                    connectivity=(0.85, 4 * np.log(n) / n)))
        elif type_graph == 'special':
            choice = np.random.randint(5, size=n_sample)
            weights = torch.abs(
                torch.randn(n_sample, n,
                            n)) + 0.01  #*(torch.randn(n_sample))[:,None,None])
            self.Ws = torch.zeros(n_sample, n, n)
            for i in range(n_sample):
                weight = weights[i, :, :] + weights[i, :, :].t()
                if choice[i] == 0:
                    A = nx.complete_graph(n)
                elif choice[i] == 1:
                    A = nx.cycle_graph(n)
                elif choice[i] == 2:
                    A = nx.path_graph(n)
                elif choice[i] == 3:
                    A = nx.star_graph(n - 1)
                elif choice[i] == 4:
                    A = nx.wheel_graph(n)
                self.Ws[i, :, :] = torch.tensor(
                    nx.to_numpy_matrix(A)).float() * weight

        if mode == 'equivariant':
            self.label = torch.zeros(n_sample, n)
        else:
            self.label = torch.zeros(n_sample)
        for i in range(n_sample):
            if type_label == 'shortest_path':
                if mode == 'equivariant':
                    self.label[i, :] = torch.tensor(
                        np.max(dijkstra(self.Ws[i, :, :], directed=False),
                               axis=0))
                else:
                    self.label[i] = np.max(
                        dijkstra(self.Ws[i, :, :], directed=False))
            elif type_label == 'eigen':
                A = self.Ws[i, :, :].numpy()
                eig_values, eig_vec = eigh(A.dot(A), eigvals=[n - 1, n - 1])
                if mode == 'equivariant':
                    self.label[i, :] = torch.tensor(eig_vec).t()
                else:
                    self.label[i] = torch.tensor(eig_values)

        print('Create equivariant basis...')
        self.equi = transforminput(self.Ws, k)
        print('Done.')
Ejemplo n.º 36
0
 def test_identity_digraph_matrix(self):
     """Conversion from digraph to matrix to digraph."""
     A = nx.to_numpy_matrix(self.G2)
     self.identity_conversion(self.G2, A, nx.DiGraph())
Ejemplo n.º 37
0
def spectral_layout(G, weight='weight', scale=1, center=None, dim=2):
    """Position nodes using the eigenvectors of the graph Laplacian.

    Parameters
    ----------
    G : NetworkX graph or list of nodes
        A position will be assigned to every node in G.

    weight : string or None   optional (default='weight')
        The edge attribute that holds the numerical value used for
        the edge weight.  If None, then all edge weights are 1.

    scale : number (default: 1)
        Scale factor for positions.

    center : array-like or None
        Coordinate pair around which to center the layout.

    dim : int
        Dimension of layout.

    Returns
    -------
    pos : dict
        A dictionary of positions keyed by node

    Examples
    --------
    >>> G = nx.path_graph(4)
    >>> pos = nx.spectral_layout(G)

    Notes
    -----
    Directed graphs will be considered as undirected graphs when
    positioning the nodes.

    For larger graphs (>500 nodes) this will use the SciPy sparse
    eigenvalue solver (ARPACK).
    """
    # handle some special cases that break the eigensolvers
    import numpy as np

    G, center = _process_params(G, center, dim)

    if len(G) <= 2:
        if len(G) == 0:
            pos = np.array([])
        elif len(G) == 1:
            pos = np.array([center])
        else:
            pos = np.array([np.zeros(dim), np.array(center) * 2.0])
        return dict(zip(G, pos))
    try:
        # Sparse matrix
        if len(G) < 500:  # dense solver is faster for small graphs
            raise ValueError
        A = nx.to_scipy_sparse_matrix(G, weight=weight, dtype='d')
        # Symmetrize directed graphs
        if G.is_directed():
            A = A + np.transpose(A)
        pos = _sparse_spectral(A, dim)
    except (ImportError, ValueError):
        # Dense matrix
        A = nx.to_numpy_matrix(G, weight=weight)
        # Symmetrize directed graphs
        if G.is_directed():
            A = A + np.transpose(A)
        pos = _spectral(A, dim)

    pos = rescale_layout(pos, scale) + center
    pos = dict(zip(G, pos))
    return pos
Ejemplo n.º 38
0
def fruchterman_reingold_layout(G,
                                k=None,
                                pos=None,
                                fixed=None,
                                iterations=50,
                                weight='weight',
                                scale=1,
                                center=None,
                                dim=2):
    """Position nodes using Fruchterman-Reingold force-directed algorithm.

    Parameters
    ----------
    G : NetworkX graph or list of nodes
        A position will be assigned to every node in G.

    k : float (default=None)
        Optimal distance between nodes.  If None the distance is set to
        1/sqrt(n) where n is the number of nodes.  Increase this value
        to move nodes farther apart.

    pos : dict or None  optional (default=None)
        Initial positions for nodes as a dictionary with node as keys
        and values as a coordinate list or tuple.  If None, then use
        random initial positions.

    fixed : list or None  optional (default=None)
        Nodes to keep fixed at initial position.

    iterations : int  optional (default=50)
        Number of iterations of spring-force relaxation

    weight : string or None   optional (default='weight')
        The edge attribute that holds the numerical value used for
        the edge weight.  If None, then all edge weights are 1.

    scale : number (default: 1)
        Scale factor for positions. Not used unless `fixed is None`.

    center : array-like or None
        Coordinate pair around which to center the layout.
        Not used unless `fixed is None`.

    dim : int
        Dimension of layout.

    Returns
    -------
    pos : dict
        A dictionary of positions keyed by node

    Examples
    --------
    >>> G = nx.path_graph(4)
    >>> pos = nx.spring_layout(G)

    # The same using longer but equivalent function name
    >>> pos = nx.fruchterman_reingold_layout(G)
    """
    import numpy as np

    G, center = _process_params(G, center, dim)

    if fixed is not None:
        nfixed = dict(zip(G, range(len(G))))
        fixed = np.asarray([nfixed[v] for v in fixed])

    if pos is not None:
        # Determine size of existing domain to adjust initial positions
        dom_size = max(coord for pos_tup in pos.values() for coord in pos_tup)
        if dom_size == 0:
            dom_size = 1
        shape = (len(G), dim)
        pos_arr = np.random.random(shape) * dom_size + center
        for i, n in enumerate(G):
            if n in pos:
                pos_arr[i] = np.asarray(pos[n])
    else:
        pos_arr = None

    if len(G) == 0:
        return {}
    if len(G) == 1:
        return {nx.utils.arbitrary_element(G.nodes()): center}

    try:
        # Sparse matrix
        if len(G) < 500:  # sparse solver for large graphs
            raise ValueError
        A = nx.to_scipy_sparse_matrix(G, weight=weight, dtype='f')
        if k is None and fixed is not None:
            # We must adjust k by domain size for layouts not near 1x1
            nnodes, _ = A.shape
            k = dom_size / np.sqrt(nnodes)
        pos = _sparse_fruchterman_reingold(A, k, pos_arr, fixed, iterations,
                                           dim)
    except:
        A = nx.to_numpy_matrix(G, weight=weight)
        if k is None and fixed is not None:
            # We must adjust k by domain size for layouts not near 1x1
            nnodes, _ = A.shape
            k = dom_size / np.sqrt(nnodes)
        pos = _fruchterman_reingold(A, k, pos_arr, fixed, iterations, dim)
    if fixed is None:
        pos = rescale_layout(pos, scale=scale) + center
    pos = dict(zip(G, pos))
    return pos
Ejemplo n.º 39
0
 def generate_sigma(self):
     import networkx as nx
     from scipy.linalg import inv, schur, eig
     from scipy.sparse import lil_matrix
     from numpy import asarray_chkfinite, isfinite, inf
     from aux import SparseBlocks, eigenvector_from_eigenvalue, all_elements_are_unique
     from scipy import argsort, dot, eye, hstack, vstack, zeros, complex128, split, asarray, diag, array
     # should be able to turn zplus off, but then i need better eigenvalue comparison
     E = self.p.E + self.p.zplus
     Ndim = len(self.p.tuple_canvas_coordinates)
     block = self.graph.order() / 2
     I = eye(block)
     Zeros = zeros((block, block))
     Hlead = nx.to_numpy_matrix(self.graph,
                                nodelist=self.nodelist,
                                dtype=complex128)
     self.Hlead = Hlead
     #import pudb; pudb.set_trace()
     # add 4*self.t0 because the matrix from graph lacks diagonal (no self-loops)
     try:
         H00 = asarray_chkfinite(Hlead[:block, :block]) + 4 * self.p.t0 * I
         self.H00 = H00
     except ValueError:
         print 'H00 contains infs or NaNs'
         import pudb
         pudb.set_trace()
     try:
         H01 = asarray_chkfinite(Hlead[:block, block:])
         self.H01 = H01
     except ValueError:
         print 'H01 contains infs or NaNs'
         import pudb
         pudb.set_trace()
     inv_H01 = inv(H01)
     while not isfinite(inv_H01).all():
         print 'inv_H01 contains infs or NaNs, repeating'
         inv_H01 = inv(H01)
         #import pudb; pudb.set_trace()
     self.inv_H01 = inv_H01
     CompanionMatrix_array = vstack(
         (hstack((Zeros, I)),
          hstack((dot(-inv_H01,
                      H01.conj().T), dot(inv_H01, E * I - H00)))))
     if not isfinite(CompanionMatrix_array).all():
         print 'CompanionMatrix contains infs or NaNs'
         import pudb
         pudb.set_trace()
     #CompanionMatrix_array =vstack((hstack((dot(inv_H01,E*I-H00),dot(-inv_H01,H01.conj().T))),hstack((I,Zeros))))
     # the following 'complex' might be superfluous and only for real input matrices.
     T, Z, number_sorted = schur(CompanionMatrix_array, sort='iuc')
     eigenvalues = diag(T)
     # propagating_eigenvalues = []
     # propagating_eigenvectors = []
     # for eigenvalue in eigenvalues:
     #     if abs(abs(eigenvalue)-1) < 0.01:
     #         propagating_eigenvalues.append(eigenvalue)
     #         eigenvector = eigenvector_from_eigenvalue(CompanionMatrix_array, eigenvalue)
     #         propagating_eigenvectors.append(eigenvector)
     # prop_eig_array = array(propagating_eigenvectors).T
     if not all_elements_are_unique(eigenvalues):
         print "--------------WARNING!!!!!---------------"
         print "One or more eigenvalues are identical, please rotate eigenvectors, I don't know how to do that"
     #v,d = eig(CompanionMatrix_array)
     #ndx = argsort(v)
     #S=d[:,ndx]
     #v=v[ndx]
     #Sleft,Sright = split(S,2,axis=1)
     #S4,S3 = split(Sright,2,axis=0)
     #S2,S1 = split(Sleft,2,axis=0)
     #S2 =S[:block*self.p.multi,:block*self.p.multi]
     #S1= S[block*self.p.multi:,:block*self.p.multi]
     #self.S2 = S2
     #self.S1 = S1
     #self.S4 = S4
     #self.S3 = S3
     #print 'S1 shape: ',S1.shape
     #print 'S2 shape: ',S2.shape
     # sort eigenvalues and Z according to acending abs(eigenvalue), TODO: do better sorting, now it
     # depends on luck. sort using the calulated eigenvectors above
     # sorting_indices = abs(eigenvalues).argsort()
     #T = T[:,sorting_indices][sorting_indices,:]
     #Z = Z[:,sorting_indices][sorting_indices,:]
     Zleft, Zright = split(Z, 2, axis=1)
     #S4,S3 = split(Sright,2,axis=0)
     Z11, Z21 = split(Zleft, 2, axis=0)
     self.Z11 = Z11
     self.Z21 = Z21
     if self.index == 0:
         SigmaRet = dot(H01, dot(Z21, inv(Z11)))
     else:
         SigmaRet = dot(H01.conj(), dot(Z21, inv(Z11)))
     self.SigmaRet = SigmaRet
     print '- SimgaRet (', self.index, ') shape: ', SigmaRet.shape
     sigma = lil_matrix((self.p.multi * Ndim, self.p.multi * Ndim),
                        dtype=complex128)
     print '- sigma shape: ', sigma.shape
     #implement polarization like (for spin up) reshape(-1,2) --> [:,1] = 0 --> reshape(shape(SigmaRet))
     if self.index == 0:
         if 'up' in self.current:
             print 'Up polarized input'
             SigmaRet.reshape(-1, 2)[:, 1] = 0
         elif 'down' in self.current:
             print 'Down polarized input'
             SigmaRet.reshape(-1, 2)[:, 0] = 0
         sigma[0:SigmaRet.shape[0], 0:SigmaRet.shape[1]] = SigmaRet
     elif self.index == 1:
         sigma[-SigmaRet.shape[0]:, -SigmaRet.shape[1]:] = SigmaRet
     self.sigma = sigma
     return sigma
Ejemplo n.º 40
0
 def test_identity_weighted_digraph_array(self):
     """Conversion from weighted digraph to array to weighted digraph."""
     A = nx.to_numpy_matrix(self.G4)
     A = np.asarray(A)
     self.identity_conversion(self.G4, A, nx.DiGraph())
Ejemplo n.º 41
0
 def test_identity_weighted_graph_matrix(self):
     """Conversion from weighted graph to matrix to weighted graph."""
     A = nx.to_numpy_matrix(self.G3)
     self.identity_conversion(self.G3, A, nx.Graph())
Ejemplo n.º 42
0
 def test_identity_digraph_array(self):
     """Conversion from digraph to array to digraph."""
     A = nx.to_numpy_matrix(self.G2)
     A = np.asarray(A)
     self.identity_conversion(self.G2, A, nx.DiGraph())
def rank_nodes_by_divrank(
    graph: nx.Graph,
    r: Optional[np.ndarray] = None,
    lambda_: float = 0.5,
    alpha: float = 0.5,
) -> Dict[str, float]:
    """
    Rank nodes in a network using the DivRank algorithm that attempts to
    balance between node centrality and diversity.

    Args:
        graph
        r: The "personalization vector"; by default, ``r = ones(1, n)/n``
        lambda_: Float in [0.0, 1.0]
        alpha: Float in [0.0, 1.0] that controls the strength of self-links.

    Returns:
        Mapping of node to score ordered by descending divrank score

    References:
        Mei, Q., Guo, J., & Radev, D. (2010, July). Divrank: the interplay of
        prestige and diversity in information networks. In Proceedings of the
        16th ACM SIGKDD international conference on Knowledge discovery and data
        mining (pp. 1009-1018). ACM.
        http://clair.si.umich.edu/~radev/papers/SIGKDD2010.pdf
    """
    # check function arguments
    if len(graph) == 0:
        LOGGER.warning("`graph` is empty")
        return {}

    # specify the order of nodes to use in creating the matrix
    # and then later recovering the values from the order index
    nodes_list = [node for node in graph]
    # create adjacency matrix, i.e.
    # n x n matrix where entry W_ij is the weight of the edge from V_i to V_j
    W = nx.to_numpy_matrix(graph, nodelist=nodes_list, weight="weight").A
    n = W.shape[1]
    # create flat prior personalization vector if none given
    if r is None:
        r = np.array([n * [1 / float(n)]])
    # Specify some constants
    max_iter = 1000
    diff = 1e10
    tol = 1e-3
    pr = np.array([n * [1 / float(n)]])
    # Get p0(v -> u), i.e. transition probability prior to reinforcement
    tmp = np.reshape(np.sum(W, axis=1), (n, 1))
    idx_nan = np.flatnonzero(tmp == 0)
    W0 = W / np.tile(tmp, (1, n))
    W0[idx_nan, :] = 0
    del W

    # DivRank algorithm
    i = 0
    while i < max_iter and diff > tol:
        W1 = alpha * W0 * np.tile(pr, (n, 1))
        W1 = W1 - np.diag(W1[:, 0]) + (1 - alpha) * np.diag(pr[0, :])
        tmp1 = np.reshape(np.sum(W1, axis=1), (n, 1))
        P = W1 / np.tile(tmp1, (1, n))
        P = ((1 - lambda_) * P) + (lambda_ * np.tile(r, (n, 1)))
        pr_new = np.dot(pr, P)
        i += 1
        diff = np.sum(np.abs(pr_new - pr)) / np.sum(pr)
        pr = pr_new

    # sort nodes by divrank score
    results = sorted(
        ((i, score) for i, score in enumerate(pr.flatten().tolist())),
        key=operator.itemgetter(1),
        reverse=True,
    )

    # replace node number by node value
    divranks = {nodes_list[result[0]]: result[1] for result in results}

    return divranks
Ejemplo n.º 44
0
 def test_identity_graph_array(self):
     "Conversion from graph to array to graph."
     A = nx.to_numpy_matrix(self.G1)
     A = np.asarray(A)
     self.identity_conversion(self.G1, A, nx.Graph())
Ejemplo n.º 45
0
    def _encode_to_numeric(self):

        # Unique_layers = set(n[1] for n in self.core_network.nodes())
        # individual_adj = defaultdict(list)

        # for layer in unique_layers:

        if self.network_type != "multiplex":
            new_edges = []
            nmap = {}
            n_count = 0
            n1 = []
            n2 = []
            w = []

            if self.directed:
                simple_graph = nx.DiGraph()
            else:
                simple_graph = nx.Graph()

            for edge in self.core_network.edges(data=True):
                node_first = edge[0]
                node_second = edge[1]
                if node_first not in nmap:
                    nmap[node_first] = n_count
                    n_count += 1
                if node_second not in nmap:
                    nmap[node_second] = n_count
                    n_count += 1
                try:
                    weight = float(edge[2]['weight'])
                except:
                    weight = 1

                simple_graph.add_edge(nmap[node_first],
                                      nmap[node_second],
                                      weight=weight)
            vectors = nx.to_scipy_sparse_matrix(simple_graph)
            self.numeric_core_network = vectors
            self.node_order_in_matrix = simple_graph.nodes()

        else:
            unique_layers = set(n[1] for n in self.core_network.nodes())
            individual_adj = []
            all_nodes = []
            for layer in unique_layers:
                layer_nodes = [
                    n for n in self.core_network.nodes() if n[1] == layer
                ]
                H = self.core_network.subgraph(layer_nodes)
                adj = nx.to_numpy_matrix(H)
                all_nodes += list(H.nodes())
                individual_adj.append(adj)

            whole_mat = []
            for en, adj_mat in enumerate(individual_adj):
                cross = np.identity(adj_mat.shape[0])
                one_row = []
                for j in range(len(individual_adj)):
                    if j < en or j > en:
                        one_row.append(cross)
                    if j == en:
                        one_row.append(adj_mat)

                whole_mat.append(np.hstack((x for x in one_row)))
                vectors = np.vstack((x for x in whole_mat))
            self.numeric_core_network = vectors
            self.node_order_in_matrix = all_nodes
Ejemplo n.º 46
0
	ix=[dictEigval[k] for k in kEig]
	return eigval[ix],eigvec[:,ix]

def checkResult(Lbar,eigvec,eigval,k):
	"""
	"input
	"matrix Lbar and k eig values and k eig vectors
	"print norm(Lbar*eigvec[:,i]-lamda[i]*eigvec[:,i])
	"""
	check=[np.dot(Lbar,eigvec[:,i])-eigval[i]*eigvec[:,i] for i in range(0,k)]
	length=[np.linalg.norm(e) for e in check]/np.spacing(1)
	print("Lbar*v-lamda*v are %s*%s" % (length,np.spacing(1)))

g=nx.karate_club_graph()
nodeNum=len(g.nodes())
m=nx.to_numpy_matrix(g)
Lbar=getNormLaplacian(m)
k=2
kEigVal,kEigVec=getKSmallestEigVec(Lbar,k)
print("k eig val are %s" % kEigVal)
print("k eig vec are %s" % kEigVec)
checkResult(Lbar,kEigVec,kEigVal,k)

#跳过k means,用最简单的符号判别的方法来求点的归属

clusterA=[i for i in range(0,nodeNum) if kEigVec[i,1]>0]
clusterB=[i for i in range(0,nodeNum) if kEigVec[i,1]<0]

#draw graph
colList=dict.fromkeys(g.nodes())
for node,score in colList.items():
Ejemplo n.º 47
0
import networkx as nx
import numpy as np
import csv
# from math import inf

G = nx.DiGraph()

nodes = ['a', 'b', 'c', 'd']
arcs = [('a', 'b', 1), ('a', 'd', 10), ('b', 'a', 13), ('b', 'c', 2),
        ('c', 'b', 12), ('c', 'd', 3), ('d', 'a', 4), ('d', 'c', 11)]

G.add_nodes_from(nodes)
G.add_weighted_edges_from(arcs)

adj = np.array(nx.to_numpy_matrix(G), dtype=float)

adj[0][2] = np.inf
adj[1][2] = np.inf
adj[2][0] = np
np.set_printoptions(infstr='∞', precision=0)

with open('adj1.csv', 'w') as f:
    csv.writer(f).writerows(adj)
    print('', file=f)

print(adj)
Ejemplo n.º 48
0
    def infer_trajectory(self,
                         init_node: int,
                         labels=None,
                         cutoff: Optional[float] = None,
                         is_plot: bool = True,
                         path: Optional[str] = None):
        '''Infer the trajectory.        

        Parameters
        ----------
        init_node : int
            The initial node for the inferred trajectory.
        cutoff : string, optional
            The threshold for filtering edges with scores less than cutoff.
        is_plot : boolean, optional
            Whether to plot or not.
        path : string, optional  
            The path to save figure, or don't save if it is None.

        Returns
        ----------
        G : nx.Graph 
            The modified graph that indicates the inferred trajectory.
        w : np.array
            \([N,k]\) The modified \(\\tilde{w}\).
        pseudotime : np.array
            \([N,]\) The pseudotime based on projected trajectory.      
        '''
        # select edges
        if len(self.edges) == 0:
            milestone_net = select_edges = []
            G = nx.Graph()
            G.add_nodes_from(self.G.nodes)
        else:
            if self.no_loop:
                G = nx.maximum_spanning_tree(self.G)
            else:
                G = self.G
            if cutoff is None:
                cutoff = 0.01
            graph = nx.to_numpy_matrix(G)
            graph[graph <= cutoff] = 0
            G = nx.from_numpy_array(graph)
            connected_comps = nx.node_connected_component(G, init_node)
            subG = G.subgraph(connected_comps)
            if len(subG.edges) > 0:
                milestone_net = self.build_milestone_net(subG, init_node)
                if self.no_loop is False and milestone_net.shape[0] < len(
                        G.edges):
                    warnings.warn(
                        "The directed graph shown is a minimum spanning tree of the estimated trajectory backbone to avoid arbitrary assignment of the directions."
                    )
                select_edges = milestone_net[:, :2]
                select_edges_score = graph[select_edges[:, 0], select_edges[:,
                                                                            1]]
                if select_edges_score.max() - select_edges_score.min() == 0:
                    select_edges_score = select_edges_score / select_edges_score.max(
                    )
                else:
                    select_edges_score = (select_edges_score -
                                          select_edges_score.min()) / (
                                              select_edges_score.max() -
                                              select_edges_score.min()) * 3
            else:
                milestone_net = select_edges = []

        # modify w_tilde
        w = self.modify_wtilde(self.w_tilde, select_edges)

        # compute pseudotime
        pseudotime = self.comp_pseudotime(milestone_net, init_node, w)

        if is_plot:
            fig, ax = plt.subplots(1, 1, figsize=(20, 10))

            cmap = matplotlib.cm.get_cmap('viridis')
            colors = [
                plt.cm.jet(float(i) / self.NUM_CLUSTER)
                for i in range(self.NUM_CLUSTER)
            ]
            if np.sum(pseudotime > -1) > 0:
                norm = matplotlib.colors.Normalize(vmin=np.min(
                    pseudotime[pseudotime > -1]),
                                                   vmax=np.max(pseudotime))
                sc = ax.scatter(*self.embed_z[pseudotime > -1, :].T,
                                norm=norm,
                                c=pseudotime[pseudotime > -1],
                                s=16,
                                alpha=0.5)
                plt.colorbar(sc, ax=[ax], location='right')
            else:
                norm = None

            if np.sum(pseudotime == -1) > 0:
                ax.scatter(*self.embed_z[pseudotime == -1, :].T,
                           c='gray',
                           s=16,
                           alpha=0.4)

            for i in range(len(select_edges)):
                points = self.embed_z[
                    np.sum(w[:, select_edges[i, :]] > 0, axis=-1) == 2, :]
                points = points[points[:, 0].argsort()]
                try:
                    x_smooth, y_smooth = _get_smooth_curve(
                        points, self.embed_mu[select_edges[i, :], :])
                except:
                    x_smooth, y_smooth = self.embed_mu[select_edges[
                        i, :], 0], self.embed_mu[select_edges[i, :], 1]
                ax.plot(x_smooth,
                        y_smooth,
                        '-',
                        linewidth=1 + select_edges_score[0, i],
                        color="black",
                        alpha=0.8,
                        path_effects=[
                            pe.Stroke(linewidth=1 + select_edges_score[0, i] +
                                      1.5,
                                      foreground='white'),
                            pe.Normal()
                        ],
                        zorder=1)

                delta_x = self.embed_mu[select_edges[i, 1], 0] - x_smooth[-2]
                delta_y = self.embed_mu[select_edges[i, 1], 1] - y_smooth[-2]
                length = np.sqrt(delta_x**2 + delta_y**2) * 50
                ax.arrow(
                    self.embed_mu[select_edges[i, 1], 0] - delta_x / length,
                    self.embed_mu[select_edges[i, 1], 1] - delta_y / length,
                    delta_x / length,
                    delta_y / length,
                    color='black',
                    alpha=1.0,
                    shape='full',
                    lw=0,
                    length_includes_head=True,
                    head_width=0.02,
                    zorder=2)

            for i in range(len(self.CLUSTER_CENTER)):
                ax.scatter(
                    *self.embed_mu[i:i + 1, :].T,
                    c=[colors[i]],
                    edgecolors='white',  # linewidths=10,
                    norm=norm,
                    s=250,
                    marker='*',
                    label=str(i))
                ax.text(self.embed_mu[i, 0],
                        self.embed_mu[i, 1],
                        '%02d' % i,
                        fontsize=16)

            plt.setp(ax, xticks=[], yticks=[])
            box = ax.get_position()
            ax.set_position([
                box.x0, box.y0 + box.height * 0.1, box.width, box.height * 0.9
            ])
            ax.legend(loc='upper center',
                      bbox_to_anchor=(0.5, -0.05),
                      fancybox=True,
                      shadow=True,
                      ncol=5)

            ax.set_title('Trajectory')
            if path is not None:
                plt.savefig(path, dpi=300)
            plt.show()
        return G, w, pseudotime
Ejemplo n.º 49
0
Archivo: main.py Proyecto: kpj/OsciPy
def main(inp):
    """
    Main interface
    """
    if inp is None:
        # generate basis of system
        graph = nx.gnp_random_graph(10, 0.6)
        dim = len(graph.nodes())
        assert nx.is_connected(graph), 'Graph not connected'

        orig_A = nx.to_numpy_matrix(graph)
        orig_B = np.random.uniform(10, 20, size=dim)

        nz = np.nonzero(orig_A)
        orig_A[nz] = np.random.uniform(2, 5, size=len(nz[0]))

        print('Original A:\n', orig_A)
        print('Original B:', orig_B)

        omega = 3
        OMEGA_list = [2.9, 3.05, 3.1, 3.2]  #np.arange(3.7, 4.3, 0.05)

        # generate solutions
        data = []
        for OMEGA in tqdm(OMEGA_list):
            runs = []
            for i in trange(dim):
                mask = np.ones(dim, dtype=bool)
                mask[i] = 0
                Bvec = orig_B.copy()
                Bvec[mask] = 0

                syst = System(orig_A, Bvec, omega, OMEGA)
                sols, ts = syst.solve(0.01, 100)

                pdiffs = Reconstructor.extract_phase_differences(
                    sols, ts, syst.Phi)
                #print(pdiffs)
                #System.plot_solution(syst.Phi(ts), sols, ts)

                if pdiffs is not None:
                    runs.append(pdiffs)

            if len(runs) > 0:
                data.append(((OMEGA, omega), runs))

        # cache results
        fname = '{}_{}'.format(datetime.now().strftime('%Y%m%d%H%M%S'), dim)
        np.save('cache/{}'.format(fname), {
            'data': data,
            'ts': ts,
            'orig_A': orig_A,
            'orig_B': orig_B
        })
    else:
        data, ts = inp.item()['data'], inp.item()['ts']
        orig_A, orig_B = inp.item()['orig_A'], inp.item()['orig_B']
        dim = orig_A.shape[0]

        print('Original A:\n', orig_A)
        print('Original B:', orig_B)

    # reconstruct parameters
    recon = Reconstructor(ts, data, dim)
    rec_A, rec_B = recon.reconstruct()

    print('Reconstructed A:\n', rec_A)
    print('Reconstructed B:', rec_B)

    # plot result
    bundle = DictWrapper({
        'orig_A': orig_A,
        'orig_B': orig_B,
        'rec_A': rec_A,
        'rec_B': rec_B
    })
    show_reconstruction_overview(bundle, verbose=dim < 20)
# Loads the class labels
class_labels = np.loadtxt('../data/karate_labels.txt',
                          delimiter=',',
                          dtype=np.int32)
idx_to_class_label = dict()
for i in range(class_labels.shape[0]):
    idx_to_class_label[class_labels[i, 0]] = class_labels[i, 1]

y = list()
for node in G.nodes():
    y.append(idx_to_class_label[node])

y = np.array(y)
n_class = 2

adj = nx.to_numpy_matrix(G)  # Obtains the adjacency matrix
adj = normalize_adjacency(adj)  # Normalizes the adjacency matrix

############## Task 12
# Set the feature of all nodes to the same value
features = np.eye(n)  # Generates node features
# features = np.ones((n,1))

# Yields indices to split data into training and test sets
idx = np.random.RandomState(seed=42).permutation(n)
idx_train = idx[:int(0.8 * n)]
idx_test = idx[int(0.8 * n):]

# Transforms the numpy matrices/vectors to torch tensors
features = torch.FloatTensor(features)
y = torch.LongTensor(y)
Ejemplo n.º 51
0
def run(session, config, model, loader, verbose=False):
    total_cost = 0.

    num_ = 0.

    auc = 0.
    f1_score = F1score(model.batch_size)
    prediction_l = [0.] * model.batch_size
    prediction_n_l = [0.] * model.batch_size
    t_auc = 0.
    t_num = 0.

    def _add_list(x, y):
        for idx in range(len(x)):
            x[idx] += y[idx]
        return x

    time_consume = 0.
    # this_max_node = nx.to_numpy_matrix(loader.graph_now).shape[0]
    # print this_max_node
    normal_adj = model.normalize_adj(nx.to_numpy_matrix(loader.graph_now))
    feature_h0 = model.degree_feature(nx.to_numpy_matrix(loader.graph_now))
    # print "feature_h0", feature_h0
    for batch in loader.generate_batch_data(batchsize=model.batch_size,
                                            mode=model.mode):

        batch_id, batch_num, nodelist1, nodelist2, negative_list = batch

        if model.mode == "Train":
            # for idx in range(model.batch_size):
            #
            #     if nodelist1[idx] > this_max_node:
            #         print nodelist1[idx], this_max_node
            #     if nodelist2[idx] > this_max_node:
            #         print nodelist2[idx], this_max_node

            feed = {
                model.input_x: nodelist1,
                model.input_y: nodelist2,
                model.negative_sample: negative_list,
                model.input_adj: normal_adj,
                model.feature_h0: feature_h0
            }
            out = [
                model.cost, model.optimizer, model.auc_result, model.auc_opt,
                model.prediction, model.prediction_n, model.test1, model.test2
            ]

            begin_time = time.time()
            output = session.run(out, feed)
            time_consume += time.time() - begin_time
            cost, _, auc, _, prediction, prediction_n, test1, test2 = output

            # print 'test1', test1
            # print 'test2', test2
            prediction_l = _add_list(prediction_l, prediction)
            prediction_n_l = _add_list(prediction_n_l, prediction_n)

        if model.mode == "Valid":
            # print "nodelist1", np.asarray(nodelist1 * 2).shape
            # print np.asarray(nodelist2 + negative_list).shape
            # print np.asarray([1] * (model.batch_size / 2) + [0] * (model.batch_size /2)).shape

            feed = {
                model.input_x:
                np.asarray(nodelist1 * 2),
                model.input_y:
                np.asarray(nodelist2 + negative_list),
                model.label_xy:
                np.asarray([1] * (model.batch_size / 2) + [0] *
                           (model.batch_size / 2)),
                model.input_adj:
                normal_adj,
                model.feature_h0:
                feature_h0
            }

            out = [
                model.cost, model.optimizer, model.auc_result, model.auc_opt,
                model.prediction
            ]

            begin_time = time.time()
            output = session.run(out, feed)
            time_consume += time.time() - begin_time
            # print output
            cost, _, auc, _, prediction = output

            for idx in range(len(prediction) / 2):
                if prediction[idx] > prediction[idx + len(prediction) / 2]:
                    t_auc += 1
                t_num += 1
            # print prediction
        # print "TEST",prediction
        if model.mode == "Train":
            auc = 0.
            total_cost += cost
        else:
            f1_score.add_f1(
                np.asarray([1] * (model.batch_size / 2) + [0] *
                           (model.batch_size / 2)), prediction)
            cost = 0.
            total_cost += cost

        num_ += 1.
        if verbose and batch_id % int(
                batch_num / 5.) == 1 and model.mode == "Valid":
            INFO_LOG(
                "{}/{}, cost: {}, auc: {}, f1_score: {}".format(
                    batch_id, batch_num, total_cost / num_, auc,
                    f1_score.return_f1_score()), True)
    if num_ == 0:
        INFO_LOG("===failed graph===" + str(loader.present_graph), True)
    # if model.mode == "Train":
    #     print("prediction_l", [x / batch_num for x in prediction_l])
    #     print("prediction_l_n", [x / batch_num for x in prediction_n_l])
    # else:
    #     print("valid prediction", f1_score.return_predict_mean())
    if not model.mode == "Train":
        # print "auc", t_auc / t_num
        auc = t_auc / t_num

    return total_cost / num_, {
        "auc": auc,
        "f1_score": f1_score.return_f1_score(),
        "time_consume": time_consume
    }
Ejemplo n.º 52
0
def rank_nodes_by_divrank(graph, r=None, lambda_=0.5, alpha=0.5):
    """
    Rank nodes in a network using the [DivRank]_ algorithm that attempts to
    balance between node centrality and diversity.

    Args:
        graph (:class:`networkx.Graph <networkx.Graph>`):
        r (:class:`numpy.array`,): the "personalization vector";
            by default, ``r = ones(1, n)/n``
        lambda_ (float): must be in [0.0, 1.0]
        alpha (float): controls the strength of self-links;
            must be in [0.0, 1.0]

    Returns:
        List[Tuple[str, float]]: list of (node, score) tuples ordered by desc. divrank score

    References:
        .. [DivRank] Mei, Q., Guo, J., & Radev, D. (2010, July). Divrank: the interplay
           of prestige and diversity in information networks. In Proceedings of the
           16th ACM SIGKDD international conference on Knowledge discovery and data
           mining (pp. 1009-1018). ACM. http://clair.si.umich.edu/~radev/papers/SIGKDD2010.pdf
    """
    # check function arguments
    if len(graph) == 0:
        LOGGER.warning('``graph`` is empty!')
        return {}

    # create adjacency matrix, i.e.
    # n x n matrix where entry W_ij is the weight of the edge from V_i to V_j
    W = nx.to_numpy_matrix(graph, weight='weight').A
    n = W.shape[1]

    # create flat prior personalization vector if none given
    if r is None:
        r = np.array([n * [1 / float(n)]])

    # Specify some constants
    max_iter = 1000
    diff = 1e+10
    tol = 1e-3

    pr = np.array([n * [1 / float(n)]])

    # Get p0(v -> u), i.e. transition probability prior to reinforcement
    tmp = np.reshape(np.sum(W, axis=1), (n, 1))
    idx_nan = np.flatnonzero(tmp == 0)
    W0 = W / np.tile(tmp, (1, n))
    W0[idx_nan, :] = 0

    del W

    # DivRank algorithm
    i = 0
    while i < max_iter and diff > tol:
        W1 = alpha * W0 * np.tile(pr, (n, 1))
        W1 = W1 - np.diag(W1[:, 0]) + (1 - alpha) * np.diag(pr[0, :])
        tmp1 = np.reshape(np.sum(W1, axis=1), (n, 1))
        P = W1 / np.tile(tmp1, (1, n))
        P = ((1 - lambda_) * P) + (lambda_ * np.tile(r, (n, 1)))
        pr_new = np.dot(pr, P)
        i += 1
        diff = np.sum(np.abs(pr_new - pr)) / np.sum(pr)
        pr = pr_new

    # sort nodes by divrank score
    results = sorted(((i, score) for i, score in enumerate(pr.flatten().tolist())),
                     key=itemgetter(1), reverse=True)

    # replace node number by node value
    nodes_list = graph.nodes()
    divranks = {nodes_list[result[0]]: result[1] for result in results}

    return divranks
Ejemplo n.º 53
0
    # A = nx.adjacency_matrix(G).toarray()

    K = 4
    import os

    DATA_PATH = '../data/vk/'
    ego_paths = [f for f in os.listdir(DATA_PATH) if f.endswith(".ego")]
    # ego_paths = ego_paths[:2]
    inits = ['cond_new']
    Fss = []
    initFss = []
    itersLLHs = []
    for indx, ego in enumerate(ego_paths):
        D = cPickle.load(file('../data/vk/{}'.format(ego)))
        G = nx.Graph(D)
        A = np.array(nx.to_numpy_matrix(G))
        Fs = []
        initFs = []
        itersLLH = []
        for init in inits:
            bigClam = BigClam(A,
                              K,
                              initF=init,
                              debug_output=False,
                              LLH_output=False,
                              eps=1e-4,
                              iter_output=1,
                              processesNo=1)
            res = bigClam.fit()
            initFs.append(bigClam.initFmode)
            itersLLH.append(bigClam.LLH_output_vals)
Ejemplo n.º 54
0
 def connectivity(self):
     connectivity = nx.to_numpy_matrix(self._graph)
     connectivity = np.array(connectivity, dtype=int)
     return connectivity
Ejemplo n.º 55
0
Archivo: p4.py Proyecto: fsoest/netdyn
plt.savefig('local_epidemic.png', dpi=300)
# %%
# Global pandemic
import pandas as pd
countries = pd.read_csv('countriesToCountries.csv')
# %%
import networkx as nx
g = nx.DiGraph()
g.add_nodes_from(countries['country departure'].unique())
for i in range(len(countries)):
    g.add_edge(countries['country departure'].iloc[i],
               countries['country arrival'].iloc[i],
               weight=countries['number of routes'].iloc[i])
#nx.draw(g)
# %%
adj_matrix = nx.to_numpy_matrix(g)
# Set self links to zero
for i in range(np.shape(adj_matrix)[0]):
    adj_matrix[i, i] = 0
# %%
adj_matrix = adj_matrix.astype(np.float64)
for i in range(np.shape(adj_matrix)[0]):
    adj_matrix[i] = adj_matrix[i] / adj_matrix[i].sum()

# %%
weighted = 1 - np.log(adj_matrix)
weighted = np.where(weighted == np.inf, 0, weighted)
ng = nx.from_numpy_matrix(weighted,
                          create_using=nx.DiGraph,
                          parallel_edges=False)
Ejemplo n.º 56
0
def hub_matrix(G,nodelist=None):
    """Return the HITS hub matrix."""
    M=nx.to_numpy_matrix(G,nodelist=nodelist)
    return M*M.T
Ejemplo n.º 57
0
    betw = nx.algorithms.centrality.betweenness_centrality(g)
    print("\nIntermediación:\n ")
    # b_{i} = sum from{ h <> j <> i} {sigma_{hj}(i)} over {sigma_{hj}}
    for b in sorted(betw.items(), key=lambda x: x[1], reverse=True):
        print(f"Nodo: {b}")        
    
    print("")
    
    
    
    

#Matriz de adyacencias
nodos_ordenados = np.sort(g.nodes())
print("Matriz de adyacencias:")
print(nx.to_numpy_matrix(g, nodelist=nodos_ordenados))
A = nx.adjacency_matrix(g)
print(A.todense())

#Nodos de la gráfica
print("")
print("Nodos de la gráfica:")
print(g.nodes)
print("")
medidasCentralidad()

sp = dict(nx.all_pairs_shortest_path(g))
print("Camino para llegar de una ciudad a otra, Baja Sur a Yucatán;")
#orig = str(input("Ciudad Origen: "))
#dstn = str(input("Ciudad Destino: "))
orig = "BS2"
Ejemplo n.º 58
0
        if u == v:
            nodes_in_selfloops.append(u)
    return nodes_in_selfloops

# Check whether number of self loops equals the number of nodes in self loops
# (assert throws an error if the statement evaluates to False)
assert T.number_of_selfloops() == len(find_selfloop_nodes(T))


# Network visualization ------------------------------------------------------------------------------------------------
# Matrix plot
m = nv.MatrixPlot(T)
m.draw()
plt.show()
# Convert T to a matrix format: A
A = nx.to_numpy_matrix(T)
# Convert A back to the NetworkX form as a directed graph: T_conv
T_conv = nx.from_numpy_matrix(A, create_using=nx.DiGraph())
# Check that the `category` metadata field is lost from each node
for n, d in T_conv.nodes(data=True):
    assert 'category' not in d.keys()

# Circos plot
c = nv.CircosPlot(T)
c.draw()
plt.show()

# Arc plot
a = ArcPlot(T, node_order='category', node_color='category')
a.draw()
plt.show()
Ejemplo n.º 59
0
    def ChangeCommunityColorAndInstantiateHierarchy(self, level=-1):
        self.g = self.Graphwidget.Graph_data().DrawHighlightedGraph(
            self.Graphwidget.EdgeSliderValue)
        self.ColorNodesBasedOnCorrelation = False
        self.partition = cm.best_partition(self.g)
        self.induced_graph = cm.induced_graph(self.partition, self.g)

        if not (level == -1):
            dendo = cm.generate_dendrogram(self.g)
            g = cm.partition_at_level(dendo, level)
            self.induced_graph1 = cm.induced_graph(g, self.g)
            self.partition = g
            self.induced_graph = self.induced_graph1

        # Induced graph is the data structure responsible for the adjacency matrix of the community
        # Matrix Before calculating the correlation strength
        # finding out the lower half values of the matrix, can discard other values as computationally intensive

        # self.Find_InterModular_Edge_correlativity()
        self.Matrix = nx.to_numpy_matrix(self.induced_graph)

        # Triggering a new window with the same color
        # If the Gray out option is clicked then gray out the nodes without the colors
        self.ColorForCommunities(len(set(self.partition.values())))
        self.ColorForVisit(self.partition)

        nodes1 = [
            item for item in self.Graphwidget.scene().items()
            if isinstance(item, Node)
        ]
        count = 0
        for community in set(self.partition.values()):
            #Ensuring the right color to the right community is delivered
            list_nodes = [
                nodes for nodes in self.partition.keys()
                if self.partition[nodes] == community
            ]

            for node in nodes1:
                if node.counter - 1 in list_nodes:
                    node.PutColor(self.clut[count])
            count = count + 1

        for node in nodes1:
            node.allnodesupdate()
            break

        clut = self.clut
        Max = self.Graphwidget.Max
        Graph = self.Graphwidget
        Matrix = self.Matrix
        ma = np.ma.masked_equal(Matrix, 0.0)
        Min1 = ma.min()
        Max1 = Matrix.max()
        Pos = self.Find_Initial_Positions()
        """
        Generates a new window so that you can access the views related to community 
        analysis
        """
        def newwindow():
            for i in reversed(range(self.Graphwidget.hbox.count())):
                self.Graphwidget.hbox.itemAt(i).widget().close()

            community = CommunityWidget(
                self.Graphwidget, self.induced_graph,
                self.Graphwidget.correlationTableObject, clut, Max, Matrix, ma,
                Min1, Max1, Pos)
            Dendogram = dendogram(self.Graphwidget, self.g, clut)

            self.Graphwidget.hbox.setContentsMargins(0, 0, 0, 0)

            self.Graphwidget.hbox.addWidget(community)
            self.Graphwidget.hbox.setContentsMargins(0, 0, 0, 0)

            self.Graphwidget.hbox.addWidget(Dendogram)
            self.Graphwidget.hbox.setContentsMargins(0, 0, 0, 0)

            self.communityObject = community
            self.dendogramObject = Dendogram

            self.Graphwidget.hbox.setContentsMargins(0, 0, 0, 0)
            self.Graphwidget.wid.setContentsMargins(0, 0, 0, 0)

            self.Graphwidget.wid.setLayout(self.Graphwidget.hbox)

        newwindow()
        self.Graphwidget.CommunityColorAndDict.emit(self.ColorToBeSentToVisit,
                                                    self.partition)
Ejemplo n.º 60
0
def authority_matrix(G,nodelist=None):
    """Return the HITS authority matrix."""
    M=nx.to_numpy_matrix(G,nodelist=nodelist)
    return M.T*M