コード例 #1
0
ファイル: asn2.py プロジェクト: suzywho/Million-Song-Database
def make_connections(n,density=0.25):
	"""
	This function will return a random adjacency matrix of size
	n x n. You read the matrix like this:
	
	if matrix[2,7] = 1, then cities '2' and '7' are connected.
	if matrix[2,7] = 0, then the cities are _not_ connected.
	
	:param n: number of cities
	:param density: controls the ratio of 1s to 0s in the matrix
	
	:returns: an n x n adjacency matrix
	"""
	
	import networkx
	
	# Generate a random adjacency matrix and use it to build a networkx graph
	a=numpy.int32(numpy.triu((numpy.random.random_sample(size=(n,n))<density)))
	G=networkx.from_numpy_matrix(a)
	
	# If the network is 'not connected' (i.e., there are isolated nodes)
	# generate a new one. Keep doing this until we get a connected one.
	# Yes, there are more elegant ways to do this, but I'm demonstrating
	# while loops!
	while not networkx.is_connected(G):
		a=numpy.int32(numpy.triu((numpy.random.random_sample(size=(n,n))<density)))
		G=networkx.from_numpy_matrix(a)
	
	# Cities should be connected to themselves.
	numpy.fill_diagonal(a,1)
	
	return a + numpy.triu(a,1).T
コード例 #2
0
 def calc_covariance(self, method="graphlassocv", values="cov"):
     """
     Cacl coveriance matrix to make graph
     parameters
     ----------
     method: string
         Type of algorithm for covariance, graphlassocv
     values: string
         Type of values for matrix for graph
         cov: covariance_
         pre: precision_
     """
     if method == "graphlassocv":
         self._model = covariance.GraphLassoCV()
     else:
         assert NotImplementedError
     self._model_name = method
     self._model.fit(self._data)
     if values == "cov":
         self._graph = nx.from_numpy_matrix(self._model.covariance_)
     elif values == "pre":
         self._graph = nx.from_numpy_matrix(self._model.precision_)
     else:
         assert NotImplementedError
     self._modeled = True
コード例 #3
0
ファイル: reconstruction.py プロジェクト: kpj/OsciPy
def plot_reconstruction_result(res):
    """ Plot original and reconstructed graph plus time series
    """
    fig = plt.figure(figsize=(32, 8))
    gs = mpl.gridspec.GridSpec(1, 4)

    # original graph
    orig_ax = plt.subplot(gs[0])
    plot_graph(nx.from_numpy_matrix(res.A.orig), orig_ax)
    orig_ax.set_title('Original graph')

    # time series
    ax = plt.subplot(gs[1:3])
    sns.tsplot(
        time='time', value='theta',
        unit='source', condition='oscillator',
        estimator=np.mean, legend=False,
        data=compute_solutions(res),
        ax=ax)
    ax.set_title(r'$A_{{err}} = {:.2}, B_{{err}} = {:.2}$'.format(*compute_error(res)))

    # reconstructed graph
    rec_ax = plt.subplot(gs[3])
    tmp = res.A.rec
    tmp[abs(tmp) < 1e-1] = 0
    plot_graph(nx.from_numpy_matrix(tmp), rec_ax)
    rec_ax.set_title('Reconstructed graph')

    plt.tight_layout()
    save(fig, 'reconstruction_overview')
コード例 #4
0
    def test_from_numpy_matrix_parallel_edges(self):
        """Tests that the :func:`networkx.from_numpy_matrix` function
        interprets integer weights as the number of parallel edges when
        creating a multigraph.

        """
        A = np.matrix([[1, 1], [1, 2]])
        # First, with a simple graph, each integer entry in the adjacency
        # matrix is interpreted as the weight of a single edge in the graph.
        expected = nx.DiGraph()
        edges = [(0, 0), (0, 1), (1, 0)]
        expected.add_weighted_edges_from([(u, v, 1) for (u, v) in edges])
        expected.add_edge(1, 1, weight=2)
        actual = nx.from_numpy_matrix(A, parallel_edges=True,
                                      create_using=nx.DiGraph())
        assert_graphs_equal(actual, expected)
        actual = nx.from_numpy_matrix(A, parallel_edges=False,
                                      create_using=nx.DiGraph())
        assert_graphs_equal(actual, expected)
        # Now each integer entry in the adjacency matrix is interpreted as the
        # number of parallel edges in the graph if the appropriate keyword
        # argument is specified.
        edges = [(0, 0), (0, 1), (1, 0), (1, 1), (1, 1)]
        expected = nx.MultiDiGraph()
        expected.add_weighted_edges_from([(u, v, 1) for (u, v) in edges])
        actual = nx.from_numpy_matrix(A, parallel_edges=True,
                                      create_using=nx.MultiDiGraph())
        assert_graphs_equal(actual, expected)
        expected = nx.MultiDiGraph()
        expected.add_edges_from(set(edges), weight=1)
        # The sole self-loop (edge 0) on vertex 1 should have weight 2.
        expected[1][1][0]['weight'] = 2
        actual = nx.from_numpy_matrix(A, parallel_edges=False,
                                      create_using=nx.MultiDiGraph())
        assert_graphs_equal(actual, expected)
コード例 #5
0
ファイル: nbs.py プロジェクト: bpinsard/nipype
    def _run_interface(self, runtime):

        if not have_cv:
            raise ImportError("cviewer library is not available")

        THRESH = self.inputs.threshold
        K = self.inputs.number_of_permutations
        TAIL = self.inputs.t_tail
        edge_key = self.inputs.edge_key
        details = edge_key + '-thresh-' + str(THRESH) + '-k-' + str(K) + '-tail-' + TAIL + '.pck'

        # Fill in the data from the networks
        X = ntwks_to_matrices(self.inputs.in_group1, edge_key)
        Y = ntwks_to_matrices(self.inputs.in_group2, edge_key)

        PVAL, ADJ, _ = nbs.compute_nbs(X, Y, THRESH, K, TAIL)

        iflogger.info('p-values:')
        iflogger.info(PVAL)

        pADJ = ADJ.copy()
        for idx, _ in enumerate(PVAL):
            x, y = np.where(ADJ == idx + 1)
            pADJ[x, y] = PVAL[idx]

        # Create networkx graphs from the adjacency matrix
        nbsgraph = nx.from_numpy_matrix(ADJ)
        nbs_pval_graph = nx.from_numpy_matrix(pADJ)

        # Relabel nodes because they should not start at zero for our convention
        nbsgraph = nx.relabel_nodes(nbsgraph, lambda x: x + 1)
        nbs_pval_graph = nx.relabel_nodes(nbs_pval_graph, lambda x: x + 1)

        if isdefined(self.inputs.node_position_network):
            node_ntwk_name = self.inputs.node_position_network
        else:
            node_ntwk_name = self.inputs.in_group1[0]

        node_network = nx.read_gpickle(node_ntwk_name)
        iflogger.info('Populating node dictionaries with attributes from %s',
                      node_ntwk_name)

        for nid, ndata in node_network.nodes(data=True):
            nbsgraph.nodes[nid] = ndata
            nbs_pval_graph.nodes[nid] = ndata

        path = op.abspath('NBS_Result_' + details)
        iflogger.info(path)
        nx.write_gpickle(nbsgraph, path)
        iflogger.info('Saving output NBS edge network as %s', path)

        pval_path = op.abspath('NBS_P_vals_' + details)
        iflogger.info(pval_path)
        nx.write_gpickle(nbs_pval_graph, pval_path)
        iflogger.info('Saving output p-value network as %s', pval_path)
        return runtime
コード例 #6
0
def get_connection_densities(network, community_affiliation):
    #================================
    # Get density of within and between module connections
    #================================
    """
    inputs:
    network: adjacency_matrix (NumPy array)
    community_affiliation: array that indicates which community/module an node belongs to

    outputs:
    density of connections within modules
    density of connections between modules
    """

    import networkx as nx
    import numpy as np
    
    network[network > 0] = 1. # binarizing the network
    
    G = nx.from_numpy_matrix(network) # original network
    for node in G.nodes():
         G.node[node]['community'] = community_affiliation[node]

    within_weights = list()
    between_weights = list()

    for edge in G.edges():
        if G.node[edge[0]]['community'] == G.node[edge[1]]['community']:
            within_weights.append(G.edge[edge[0]][edge[1]]['weight'])
        else:
            between_weights.append(G.edge[edge[0]][edge[1]]['weight'])

    connected_G = nx.from_numpy_matrix(np.ones(shape=network.shape)) # fully-connected network
    full_within_weights = list()
    full_between_weights = list()

    for node in connected_G.nodes():
         connected_G.node[node]['community'] = community_affiliation[node]

    for edge in connected_G.edges():
        if connected_G.node[edge[0]]['community'] == connected_G.node[edge[1]]['community']:
            full_within_weights.append(connected_G.edge[edge[0]][edge[1]]['weight'])
        else:
            full_between_weights.append(connected_G.edge[edge[0]][edge[1]]['weight'])

    within_density = sum(within_weights)/sum(full_within_weights)
    between_density = sum(between_weights)/sum(full_between_weights)
    
    return(within_density, between_density)
コード例 #7
0
ファイル: passes_nx.py プロジェクト: mhong19414/anelka
def df_to_graph(df):
    G = nx.from_numpy_matrix(df.values, create_using=nx.DiGraph())
    G = nx.relabel_nodes(G, dict(enumerate(df.columns)))
    weights = nx.get_edge_attributes(G, 'weight')
    invW = dict([(k, 1/float(v)) for (k,v) in weights.items()])
    nx.set_edge_attributes(G, 'distance', invW)
    return G
コード例 #8
0
 def identity_conversion(self, G, A, create_using):
     GG = nx.from_numpy_matrix(A, create_using=create_using)
     self.assert_equal(G, GG)
     GW = nx.to_networkx_graph(A, create_using=create_using)
     self.assert_equal(G, GW)
     GI = create_using.__class__(A)
     self.assert_equal(G, GI)
コード例 #9
0
ファイル: idtxl_io.py プロジェクト: SimonStreicher/IDTxl
def export_networkx_graph(adjacency_matrix, weights):
    """Export networkx graph object for an inferred network.

    Export a weighted, directed graph object from the network of inferred
    (multivariate) interactions (e.g., multivariate TE), using the networkx
    class for directed graphs (DiGraph). Multiple options for the weight are
    available (see documentation of method get_adjacency_matrix for details).

    Args:
        adjacency_matrix : 2D numpy array
            adjacency matrix to be exported, returned by get_adjacency_matrix()
            method of Results() class
        weights : str
            weights for the adjacency matrix (see documentation of method
            get_adjacency_matrix for details)
        fdr : bool [optional]
            return FDR-corrected results (default=True)

    Returns: DiGraph instance
        directed graph of networkx package's DiGraph() class
    """
    # use 'weights' parameter (string) as networkx edge property name and use
    # adjacency matrix entries as edge property values
    custom_type = [(weights, type(adjacency_matrix[0, 0]))]
    custom_npmatrix = np.matrix(adjacency_matrix, dtype=custom_type)
    return nx.from_numpy_matrix(custom_npmatrix, create_using=nx.DiGraph())
コード例 #10
0
def closenessCentrality(A):  
    H = nx.from_numpy_matrix(A);
    length = list(nx.all_pairs_shortest_path_length(H));
    print(length)
    distanceMatrix = [];
    rows = len(length);
    for i in range(0, rows):
        x = length[i];
        y = x[1];
        for j in range(0, rows):
            distanceMatrix.append(y[j]);
      
    a = np.array(distanceMatrix);
    a = a.reshape(rows, rows);
    sum = 0;
    result1 = [];
    rows = a.shape[0];
    cols = a.shape[1];
    for r in range(0, rows):
        sum = 0;
        for c in range(0, cols):
            if(r != c):
                sum += a[r][c];
        result1.append((rows - 1) / sum);
    return result1   
コード例 #11
0
def coword_network(mesh_df, start, end,topic_count=0):
        """
        constructs a coword network for the years supplied;
        nodes will be labelled by topic, have a 'weight' of co-occurrence,
        a 'start_year' attribute,
        and an 'end_year' attribute which is the end year of the search

        Parameters
        ----------------
        mesh_df: a dataframe with at least the topics and years columns
        start: start year
        end: end year
        topic_count: the number of the topics to use
        (not too big, otherwise coword matrix will be huge
        """

        # determine the number of topics to count
        all_topics = [t for top in mesh_df.topics.dropna() for t in top]
        topic_collection = collections.Counter(all_topics)
        if topic_count > 0 and topic_count < len(topic_collection):
            common_topics = [k[0] for k in topic_collection.most_common(topic_count)]
        else:
            common_topics = sorted(topic_collection.keys())

        cow_df = coword_matrix_years(mesh_df, start, end, common_topics)
        cow_nx = nx.from_numpy_matrix(cow_df.as_matrix())
        col_names = cow_df.columns.tolist()
        labels = {col_names.index(l): l for l in col_names}
        start_year = {i: end for i in range(0, len(col_names))}
        end_year = {i: start for i in range(0, len(col_names))}
        nx.set_node_attributes(cow_nx, 'start_year', start_year)
        nx.set_node_attributes(cow_nx, 'end_year', end_year)
        nx.relabel_nodes(cow_nx, labels, copy=False)
        return cow_nx
コード例 #12
0
ファイル: cbpktst.py プロジェクト: smkia/cbpktst
def compute_clusters_statistic(test_statistic, proximity_matrix, verbose=False):
    """Given a test statistic for each unit and a boolean proximity
    matrix among units, compute the cluster statistic using the
    connected components graph algorithm. It works for sparse
    proximity matrices as well.

    Returns the clusters and their associated cluster statistic.
    """
    # Build a graph from the proximity matrix:
    if issparse(proximity_matrix):
        graph = from_scipy_sparse_matrix(proximity_matrix)
    else:
        graph = from_numpy_matrix(proximity_matrix)

    # Compute connected components:
    clusters = connected_components(graph)
    if verbose: print("Nr. of clusters: %s. Clusters sizes: %s" % (len(clusters), np.array([len(cl) for cl in clusters])))
    # Compute the cluster statistic:
    cluster_statistic = np.zeros(len(clusters))
    for i, cluster in enumerate(clusters):
        cluster_statistic[i] = test_statistic[cluster].sum()

    # final cleanup to prepare easy-to-use results:
    idx = np.argsort(cluster_statistic)[::-1]
    clusters = np.array([np.array(cl, dtype=np.int) for cl in clusters], dtype=np.object)[idx]
    if clusters[0].dtype == np.object: # THIS FIXES A NUMPY BUG (OR FEATURE?)
        # The bug: it seems not possible to create ndarray of type
        # np.object from arrays all of the *same* lenght and desired
        # dtype, i.e. dtype!=np.object. In this case the desired dtype
        # is automatically changed into np.object. Example:
        # array([array([1], dtype=int)], dtype=object)
        clusters = clusters.astype(np.int)

    cluster_statistic = cluster_statistic[idx]
    return clusters, cluster_statistic
コード例 #13
0
ファイル: visual.py プロジェクト: sen48/SemanticCore
def plot_clustered_graph(dist_matrix, labels=None):
    plt.close('all')
    plt.figure(1)
    plt.clf()
    n_clusters = max(labels) + 1
    print('n_clusters = {}'.format(n_clusters))
    g_g = nx.Graph()
    for k in range(0, n_clusters):
        class_members = labels == k
        class_dist = dist_matrix[class_members].T[class_members]
        g = nx.from_numpy_matrix(class_dist)
        g_g = nx.disjoint_union(g_g, g)

    # color nodes the same in each connected subgraph
    for g in nx.connected_component_subgraphs(g_g):
        c = [random.random()] * nx.number_of_nodes(g)  # random color...
        nx.draw(g,

                node_size=40,
                node_color=c,
                vmin=0.0,
                vmax=1.0,
                with_labels=False)
    plt.savefig("atlas.png", dpi=75)
    plt.show()
コード例 #14
0
def draw_network_by_years(df, start_year, end_year, trim):

    """ Constructs and draws the co-word networks for the years
    Parameters
    -----------------------------------------
    df: WoS references
    start_year:
    end_year:
    trim: degree of nodes to include in the graph

    Returns
    ----------------------------------
    coword networkx object
    """

    df_sub = df[(df.PY> start_year) & (df.PY <= end_year)]
    keys = keyword_counts(df_sub)

    print('Calculating co-word matrix')
    coword_df = coword_matrix(df_sub,keys.keys())

    coword_array = coword_df.as_matrix()
    np.fill_diagonal(coword_array, 0)
    coword_net  = nx.from_numpy_matrix(coword_array)
    col_names = coword_df.columns.tolist()
    labels = {col_names.index(l):l for l in col_names}
    nx.set_node_attributes(coword_net, 'keyword', labels)
    nx.set_node_attributes(coword_net, 'between_central', nx.betweenness_centrality(coword_net))
    if trim > 0:
        coword_net = trim_nodes(coword_net, trim)
        labels = {n:labels[n] for n in coword_net.nodes()}

    return coword_net
コード例 #15
0
ファイル: grandom.py プロジェクト: liupenggl/dpr
def r_perturbR(g,R):
    '''可变参数的随机扰动方法'''
    A=nx.to_scipy_sparse_matrix(g)
    B=sparse.triu(A).toarray()
    #print B
    n=len(g)
    i = 0
    ts=0

    while i<n:
        j=i+1
        while j<n:
            if(B[i,j]==1):
                if R[i,j]<1:
                    B[i,j] = stats.bernoulli.rvs(R[i,j])#参数p伯努利实验成功的概率
                else:
                    B[i, j] = stats.bernoulli.rvs(1)  #其实可以去掉
                ts=ts + 1
                #print "+",ts, ":", i, ",", j, ",", B[i, j]
            else:
                if R[i,j]<1:
                    B[i,j] = stats.bernoulli.rvs(R[i,j])#参数q伯努利实验成功的概率
                else:
                    B[i, j] = stats.bernoulli.rvs(0)  #其实可以去掉
                ts=ts + 1
                #print "-",ts, ":", i, ",", j, ",", B[i, j]
            j = j + 1
        i=i+1

    return nx.from_numpy_matrix(B,create_using=nx.Graph())#重新构建了Graph类型的返回对象
コード例 #16
0
def atoms_to_nxgraph(atoms, cutoff):
    ni, nj = neighbour_list('ij', atoms, cutoff)
    adjacency_matrix = np.zeros((len(atoms), len(atoms))).astype(np.int)
    for i, j in zip (ni, nj):
        adjacency_matrix[i,j] = 1
    graph = nx.from_numpy_matrix(np.array(adjacency_matrix))
    return graph
コード例 #17
0
ファイル: grandom.py プロジェクト: liupenggl/dpr
def r_perturbSa(g,p=None):
    '''固定参数的随机扰动方法,p伯努利实验成功的概率'''
    A=nx.to_scipy_sparse_matrix(g)
    B=sparse.triu(A).toarray()
    #print B
    n=len(g)
    e_num=len(g.edges())#图中存在的边数

    q = e_num * (1 - p) / ((n * (n - 1)) / 2 - e_num)
    #print q
    i = 0
    ts=0
    listp=stats.bernoulli.rvs(p,size=e_num)
    listp=listp.tolist()
    listq=stats.bernoulli.rvs(q,size=(n * (n - 1)) / 2 - e_num)
    listq=listq.tolist()

    while i<n:
        j=i+1#略过对角线上的0
        while j<n:
            if(B[i,j]==1):
                B[i,j] = listp.pop()#参数p伯努利实验成功的概率
                #ts=ts + 1
                # print "+",ts, ":", i, ",", j, ",", B[i, j]
            else:
                B[i,j] = listq.pop()#参数q伯努利实验成功的概率
                #ts=ts + 1
                # print "-",ts, ":", i, ",", j, ",", B[i, j]
            j = j + 1
        i=i+1

    return nx.from_numpy_matrix(B,create_using=nx.Graph())#重新构建了Graph类型的返回对象
コード例 #18
0
ファイル: util.py プロジェクト: maybefeicun/TextRank4ZH
def sort_sentences(sentences, words, sim_func = get_similarity, pagerank_config = {'alpha': 0.85,}):
    """将句子按照关键程度从大到小排序

    Keyword arguments:
    sentences         --  列表,元素是句子
    words             --  二维列表,子列表和sentences中的句子对应,子列表由单词组成
    sim_func          --  计算两个句子的相似性,参数是两个由单词组成的列表
    pagerank_config   --  pagerank的设置
    """
    sorted_sentences = []
    _source = words
    sentences_num = len(_source)        
    graph = np.zeros((sentences_num, sentences_num))
    
    for x in xrange(sentences_num):
        for y in xrange(x, sentences_num):
            similarity = sim_func( _source[x], _source[y] )
            graph[x, y] = similarity
            graph[y, x] = similarity
            
    nx_graph = nx.from_numpy_matrix(graph)
    scores = nx.pagerank(nx_graph, **pagerank_config)              # this is a dict
    sorted_scores = sorted(scores.items(), key = lambda item: item[1], reverse=True)

    for index, score in sorted_scores:
        item = AttrDict(index=index, sentence=sentences[index], weight=score)
        sorted_sentences.append(item)

    return sorted_sentences
コード例 #19
0
def GetGraphFromGobnilpFile(gobnilpResult):
 matrix = "";
 with open(gobnilpResult, "r") as gobnilpResultFile:
  for line in gobnilpResultFile:
   matrix += line.strip('\n') + "; ";
 matrix = matrix[:-2];
 return networkx.from_numpy_matrix(numpy.matrix(matrix), create_using=networkx.DiGraph());
コード例 #20
0
ファイル: Util.py プロジェクト: weka-lishihui/TextAnalysis
def sortSentences(sentences, words, sim_func = getSimilarity, pagerank_config = {'alpha': 0.85,}):
    '''
    :param sentences: 用于计算权重的句子列表
    :param words: 与sentences相对应的每个句子的单词列表,该参数的类型为二维列表
    :param sim_func: 用于计算句子相似度的函数名
    :param pagerank_config:
    :return:
    '''
    sortedSentences = []
    _source = words
    sentencesNum = len(_source)   #获得图的大小
    graph = np.zeros((sentencesNum, sentencesNum))

    for x in xrange(sentencesNum):
        for y in xrange(x, sentencesNum):
            similarity = sim_func( _source[x], _source[y] )
            graph[x, y] = similarity
            graph[y, x] = similarity

    nx_graph = nx.from_numpy_matrix(graph)
    scores = nx.pagerank(nx_graph, **pagerank_config)              # this is a dict
    sorted_scores = sorted(scores.items(), key = lambda item: item[1], reverse=True)

    for index, score in sorted_scores:
        item = AttrDict(sentence=sentences[index], weight=score)
        sortedSentences.append(item)

    return sortedSentences
コード例 #21
0
ファイル: AdjMatrix_Analisys.py プロジェクト: Joan93/BigData
def run_main(file):

    NumberOfStations=465
    print file
    adjmatrix = np.loadtxt(file,delimiter=' ',dtype=np.dtype('int32'))

    # for i in range (0,NumberOfStations):
    #     if(adjmatrix[i,i]==1):
    #         print "posicion: ["+str(i)+","+str(i)+"]"


    g = nx.from_numpy_matrix(adjmatrix, create_using = nx.MultiGraph())
    degree = g.degree()
    density = nx.density(g)
    degree_centrality = nx.degree_centrality(g)
    clossness_centrality = nx.closeness_centrality(g)
    betweenless_centrality = nx.betweenness_centrality(g)

    print degree
    print density
    print degree_centrality
    print clossness_centrality
    print betweenless_centrality
    #nx.draw(g)
#    np.savetxt(OutputFile, Matrix, delimiter=' ',newline='\n',fmt='%i')
コード例 #22
0
    def __init__(self, graph, communities=None):
        """ initialize partition of graph, with optional communities

        Parameters
        ----------
        graph : networkx graph
        communities : list of sets, optional
            a list of sets with nodes in each set
            if communities is None, will initialize with
            one  per node

        Returns
        -------
        part : WeightedPartition object
        """
        # assert graph has edge weights, and no negative weights
        mat = nx.adjacency_matrix(graph).todense()
        if mat.min() < 0:
            raise ValueError("Graph has invalid negative weights")

        self.graph = nx.from_numpy_matrix(mat)
        if communities is None:
            self._communities = self._init_communities_from_nodes()
        else:
            self.set_communities(communities)
        self.total_edge_weight = graph.size(weight="weight")
        self.degrees = graph.degree(weight="weight")
コード例 #23
0
ファイル: fragments.py プロジェクト: gratelpy/gratelpy
def get_all_substance_combinations_with_cycles(alpha, beta):
    try:
        import numpy
        alpha = numpy.array(alpha)
        beta = numpy.array(beta)
    except ImportError:
        print('This method requires that alpha and beta are NumPy arrays.'
              'NumPy does not appear to be installed. Please install NumPy.')
        raise

    # alpha, beta are stoichiometry matrices as used throughout code

    # number of reactions = number of columns of alpha
    no_rxn = alpha.shape[1]
    # number of substance = number of rows of alpha
    no_sub = alpha.shape[0]

    # check
    if no_rxn != beta.shape[1] or no_sub != beta.shape[0]:
        raise

    # get substance adjacency matrix
    subs_adj = get_substance_adjacency(alpha, beta)

    # get directed substance graph
    subs_G = nx.from_numpy_matrix(subs_adj, create_using=nx.DiGraph())

    # get cycles in substance graph
    subs_cycles  = nx.simple_cycles(subs_G)
    # remove substance index repetitions
    for c_i in range(len(subs_cycles)):
        subs_cycles[c_i] = list(set(subs_cycles[c_i]))
コード例 #24
0
ファイル: code.py プロジェクト: kirk86/Task-1
def edge_betweeness_centrality(X):
    """
    based on networkx function: edge_betweenness_centrality
    """
    XX = np.zeros(X.shape)
    for i, value in enumerate(X):
        adj_mat = value.reshape((np.sqrt(len(value)),-1))
        adj_mat = (adj_mat - np.min(adj_mat)) / (np.max(adj_mat) - np.min(adj_mat))
        adj_mat = 1 - adj_mat

#        th = np.mean(adj_mat) + 0.1
#        adj_mat = np.where(adj_mat < th, adj_mat, 0.)

        percent, th, adj_mat, triu = percentage_removed(adj_mat, 0.43) # 43 #63 #73
        print("percent = {0}, threshold position = {1}, threshold = {2}\n".format(percent, th, triu[th]))

        g = nx.from_numpy_matrix(adj_mat)
        print "Graph Nodes = {0}, Graph Edges = {1} ".format(g.number_of_nodes(), g.number_of_edges())
        print "\nEdge kept ratio, {0}".format(float(g.number_of_edges())/((g.number_of_nodes()*(g.number_of_nodes()-1))/2))

        bet_cent = nx.edge_betweenness_centrality(g, weight = 'weight', normalized = True)
        edge_cent = np.zeros(adj_mat.shape)

        for k in bet_cent:
            edge_cent[k[0],k[1]] = bet_cent[k]
        XX[i] = edge_cent.reshape(-1)
        print "graph {0} => mean {1}, min {2}, max {3}".format(i, np.mean(XX[i]), np.min(XX[i]), np.max(XX[i]))

    return XX
コード例 #25
0
ファイル: ex3_em.py プロジェクト: maagaard/dmup
def coauth():
	authors = pd.read_csv('coauthors.csv', header=None, index_col=False, sep='\t')
	author_counts = pd.read_csv('coauthors.csv', header=0, index_col=False, sep='\t')
	matrix = np.matrix(authors.as_matrix())
	matrix_counts = np.matrix(author_counts.as_matrix())
	# print matrix
	# print authors
	# print matrix[0,1]

	author_dict = {}
	sums = []

	for row in matrix_counts[0:,1:]:
		sums.append(sum(row))
		
	count = 0

	for row in matrix:
		++count
		# author_dict[row[0]] = 
		# for author_dict[row[0]] = sum(row[0,1:])

	max_index = sums.index(max(sums))

	# print max_index
	# print matrix[max_index+1]
	# print sum(matrix_counts[max_index,1:])


	# G = nx.Graph()
	print matrix_counts[0:,1:]
	dt=[('weight',int),('cost',int)]
	A = np.matrix(matrix_counts[0:,1:], dt)

	G=nx.from_numpy_matrix(A)
コード例 #26
0
ファイル: code.py プロジェクト: kirk86/Task-1
def node_closeness_centrality(X):
    """
    based on networkx function: closeness_centrality
    """
    XX = np.zeros((X.shape[0], np.sqrt(X.shape[1])))
    for i, value in enumerate(X):
        adj_mat = value.reshape((np.sqrt(len(value)), -1))
        adj_mat = (adj_mat - np.min(adj_mat)) / (np.max(adj_mat) - np.min(adj_mat))
        adj_mat = 1 - adj_mat

#        th = np.mean(adj_mat) - 0.23
#        adj_mat = np.where(adj_mat < th, adj_mat, 0.)

        percent, th, adj_mat, triu = percentage_removed(adj_mat, 0.27) # in this context the percentage
        print("percent = {0}, threshold position = {1}, threshold = {2}\n".format(percent, th, triu[th]))

        g = nx.from_numpy_matrix(adj_mat)
        print "Graph Nodes = {0}, Graph Edges = {1} ".format(g.number_of_nodes(), g.number_of_edges())
        print "\nEdge kept ratio, {0}".format(float(g.number_of_edges())/((g.number_of_nodes()*(g.number_of_nodes()-1))/2))

        deg_cent = nx.closeness_centrality(g, normalized=True)
        node_cent = np.zeros(g.number_of_nodes())

        for k in deg_cent:
            node_cent[k] = deg_cent[k]
        XX[i] = node_cent
        print "graph {0} => mean {1}, min {2}, max {3}".format(i, np.mean(XX[i]), np.min(XX[i]), np.max(XX[i]))

    return XX
コード例 #27
0
ファイル: code.py プロジェクト: kirk86/Task-1
def node_current_flow_closeness_centrality(X):
    """
    based on networkx function: current_flow_closeness_centrality
    """
    XX = np.zeros((X.shape[0], np.sqrt(X.shape[1])))
    for i, value in enumerate(X):
        adj_mat = value.reshape((np.sqrt(len(value)),-1))
        adj_mat = (adj_mat - np.min(adj_mat)) / (np.max(adj_mat) - np.min(adj_mat))
        adj_mat = 1 - adj_mat

#        th = np.mean(adj_mat) - 0.05
#        adj_mat = np.where(adj_mat < th, adj_mat, 0.)

        percent, th, adj_mat, triu = percentage_removed(adj_mat, 0.64) #74
        print("percent = {0}, threshold position = {1}, threshold = {2}\n".format(percent, th, triu[th]))

        g = nx.from_numpy_matrix(adj_mat)
        print "Graph Nodes = {0}, Graph Edges = {1} ".format(g.number_of_nodes(), g.number_of_edges())
        print "\nEdge kept ratio, {0}".format(float(g.number_of_edges())/((g.number_of_nodes()*(g.number_of_nodes()-1))/2))

        deg_cent = nx.current_flow_closeness_centrality(g)
        node_cent = np.zeros(g.number_of_nodes())

        for k in deg_cent:
            node_cent[k] = deg_cent[k]
        XX[i] = node_cent
        print "graph {0} => mean {1}, min {2}, max {3}".format(i, np.mean(XX[i]), np.min(XX[i]), np.max(XX[i]))
#    XX = XX*100
    ss = StandardScaler()
    XX = ss.fit_transform(XX.T).T

    return XX
コード例 #28
0
ファイル: test_util.py プロジェクト: denfromufa/py2cytoscape
    def test_networkx_matrix(self):
        print('\n---------- Matrix Test Start -----------\n')

        g = nx.barabasi_albert_graph(30, 2)
        nodes = g.nodes()
        edges = g.edges()
        print(edges)

        mx1 = nx.adjacency_matrix(g)
        fp = tempfile.NamedTemporaryFile()
        file_name = fp.name
        sp.savetxt(file_name, mx1.toarray(), fmt='%d')

        # Load it back to matrix
        mx2 = sp.loadtxt(file_name)
        fp.close()

        g2 = nx.from_numpy_matrix(mx2)
        cyjs_g = util.from_networkx(g2)

        #print(json.dumps(cyjs_g, indent=4))

        self.assertIsNotNone(cyjs_g)
        self.assertIsNotNone(cyjs_g['data'])
        self.assertEqual(len(nodes), len(cyjs_g['elements']['nodes']))
        self.assertEqual(len(edges), len(cyjs_g['elements']['edges']))

        # Make sure all edges are reproduced
        print(set(edges))
        diff = compare_edge_sets(set(edges), cyjs_g['elements']['edges'])
        self.assertEqual(0, len(diff))
コード例 #29
0
    def clustering_function_mean_shift(data):
        def mean_shift(data):
            K = number_of_points  # n is the number of points
            L = number_of_dimensions   # d is the number of dimensions.
            k = number_of_neighbors # number of neighbors
            f = glasslab_cluster.cluster.FAMS(data, seed = 100) #FAMS Fast Adaptive Mean Shift
            pilot = f.RunFAMS(K, L, k)
            modes = f.GetModes()
            umodes = glasslab_cluster.utils.uniquerows(modes)
            labels = numpy.zeros(modes.shape[0])
            for i, m in enumerate(umodes):
                labels[numpy.all(modes == m, axis = 1)] = i
            return umodes, labels, pilot
        means, sub_labels, pilot = mean_shift(data)
        print 'means.shape' + str(means.shape)
        distance_matrix = scipy.spatial.distance.pdist(means)
        print "distance matrix min max:", distance_matrix.min(), distance_matrix.max()
        distance_matrix[distance_matrix > threshold] = 0
        H = networkx.from_numpy_matrix(scipy.spatial.distance.squareform(distance_matrix))
        connected_components = networkx.connected_components(H)
        print len(connected_components), "components:", map(len, connected_components)
        def merge_cluster(pattern, lbl_composites):
            try:
                pattern.shape #test if pattern is a NUMPY array, convert if list
            except:
                pattern = numpy.array(pattern)
            for i, composite in enumerate(lbl_composites):
                for label in composite:
                    if label != i:
                        pattern[numpy.where(pattern == label)] = i
            return pattern

        labels = merge_cluster(sub_labels, connected_components) # modify in order  to merge means ...
        return labels
コード例 #30
0
ファイル: sleuth_out.py プロジェクト: eproche/mrSleuthy
def draw_spring_graph(results, thresh, sep):
	"""Draw graph in spring layoout, 
	force-directed algorithm puts similar image nodes close to eachother
	Assumes symmetric split with the two categories (artificial/natural + indoor/outdoor for ComCon)"""
	img_colors = ['green'] * (sep/2) #add or modify indices to get new colors 
	img_colors2 = ['red'] * (sep/2)
	sce_colors = ['blue'] * ((len(results) - sep)/2)
	sce_colors2 = ['yellow'] * ((len(results) - sep)/2)
	node_colors = img_colors + img_colors2 + sce_colors +sce_colors2
	res2 = np.copy(results)
	low_val = res2 < thresh
	res2[low_val] = 0
	graph = nx.from_numpy_matrix(res2)
	pos = nx.spring_layout(graph)
	nx.draw_networkx_nodes(graph, pos=pos, node_color = node_colors)
	nx.draw_networkx_edges(graph, pos=pos)
	# xs = [] # Add labels (looks pretty messy with large graph)
	# ys = []
	# for i in range(len(pos)):
	# 	xs.append(pos[i][0])
	# 	ys.append(pos[i][1])
	# for label, x, y, in zip(names, xs, ys):
	# 	plt.annotate(
	# 	label,
	# 	xy = (x, y), xytext = (-10, 35),
	# 	textcoords = 'offset points', ha = 'right', va = 'bottom',
	# 	bbox = dict(boxstyle = 'round,pad=0.5', fc = 'green', alpha = 0.7),
		# arrowprops = dict(arrowstyle = '->', connectionstyle = 'arc3,rad=0'))
	plt.show()
コード例 #31
0
 def make_pytorch_graph(self, np_distance_matrix, verts):
     G = nx.from_numpy_matrix(np_distance_matrix)
     disp = self.count_dispersion(verts)
     G = from_networkx(G)
     G = self.set_features_to_vertices(G, verts)
     return G, disp
コード例 #32
0
    print(cnt_avg_g1)

    dir = 'seed_' + str(np.int(i))
    h5file.create_group(dir)
    h5file.create_dataset(dir + '/adjacency_matrix',
                          data=adj_g1,
                          compression='gzip',
                          compression_opts=9)
    h5file.create_dataset(dir + '/average_count', data=cnt_avg_g1)
    h5file.flush()

h5file.flush()
h5file.close()

# %%
input_file = 'GA_seed_13_0.05_50.h5'
h5file = h5py.File(input_file, "r")

folder = 'seed_' + str(448)
adj1 = h5file[folder + "/adjacency_matrix"].value

g1 = nx.from_numpy_matrix(adj1)
nx.draw(g1, node_size=10, alpha=0.5)
#plt.savefig('torus.png')
#plt.savefig('disordered.png')
plt.show()

# %%
h5file.close()
# %%
コード例 #33
0
def log_graph(adj, batch_num_nodes, writer, epoch, batch_idx, assign_tensor=None):
    plt.switch_backend('agg')
    fig = plt.figure(figsize=(8,6), dpi=300)

    for i in range(len(batch_idx)):
        ax = plt.subplot(2, 2, i+1)
        num_nodes = batch_num_nodes[batch_idx[i]]
        adj_matrix = adj[batch_idx[i], :num_nodes, :num_nodes].cpu().data.numpy()
        G = nx.from_numpy_matrix(adj_matrix)
        nx.draw(G, pos=nx.spring_layout(G), with_labels=True, node_color='#336699',
                edge_color='grey', width=0.5, node_size=300,
                alpha=0.7)
        ax.xaxis.set_visible(False)

    plt.tight_layout()
    fig.canvas.draw()

    data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
    data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
    writer.add_image('graphs', data, epoch)

    # log a label-less version
    #fig = plt.figure(figsize=(8,6), dpi=300)
    #for i in range(len(batch_idx)):
    #    ax = plt.subplot(2, 2, i+1)
    #    num_nodes = batch_num_nodes[batch_idx[i]]
    #    adj_matrix = adj[batch_idx[i], :num_nodes, :num_nodes].cpu().data.numpy()
    #    G = nx.from_numpy_matrix(adj_matrix)
    #    nx.draw(G, pos=nx.spring_layout(G), with_labels=False, node_color='#336699',
    #            edge_color='grey', width=0.5, node_size=25,
    #            alpha=0.8)

    #plt.tight_layout()
    #fig.canvas.draw()

    #data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
    #data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
    #writer.add_image('graphs_no_label', data, epoch)

    # colored according to assignment
    assignment = assign_tensor.cpu().data.numpy()
    fig = plt.figure(figsize=(8,6), dpi=300)

    num_clusters = assignment.shape[2]
    all_colors = np.array(range(num_clusters))

    for i in range(len(batch_idx)):
        ax = plt.subplot(2, 2, i+1)
        num_nodes = batch_num_nodes[batch_idx[i]]
        adj_matrix = adj[batch_idx[i], :num_nodes, :num_nodes].cpu().data.numpy()

        label = np.argmax(assignment[batch_idx[i]], axis=1).astype(int)
        label = label[: batch_num_nodes[batch_idx[i]]]
        node_colors = all_colors[label]

        G = nx.from_numpy_matrix(adj_matrix)
        nx.draw(G, pos=nx.spring_layout(G), with_labels=False, node_color=node_colors,
                edge_color='grey', width=0.4, node_size=50, cmap=plt.get_cmap('Set1'),
                vmin=0, vmax=num_clusters-1,
                alpha=0.8)

    plt.tight_layout()
    fig.canvas.draw()

    data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
    data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
    writer.add_image('graphs_colored', data, epoch)
コード例 #34
0
from networkx import from_numpy_matrix, set_node_attributes, relabel_nodes, DiGraph
from numpy import matrix
from data import DISTANCES, DEMANDS
import sys

sys.path.append("../../")
from vrpy import VehicleRoutingProblem

# Transform distance matrix to DiGraph
A = matrix(DISTANCES, dtype=[("cost", int)])
G = from_numpy_matrix(A, create_using=DiGraph())

# Set demands
set_node_attributes(G, values=DEMANDS, name="demand")

# Relabel depot
G = relabel_nodes(G, {0: "Source", 17: "Sink"})

if __name__ == "__main__":

    prob = VehicleRoutingProblem(G, load_capacity=15)
    prob.solve()
    print(prob.best_value)
    print(prob.best_routes)
コード例 #35
0
ファイル: All_calc_data1.py プロジェクト: dugzzuli/taskPython
def draw_net(net,i):
    graph = nx.from_numpy_matrix(net)
    # nx.draw(graph, pos=nx.random_layout(graph), node_color='b', edge_color='r', with_labels=True, font_size=18,
    #         node_size=20)
    nx.write_pajek(graph,i+'Pajek.net')
コード例 #36
0
ファイル: main_network.py プロジェクト: Leo02016/Miscgan
def save_graph(args, R, edges):
    filename = args.filename
    A = np.load('{}/org_network.npy'.format(args.output_dir))
    network_B = np.load('{}/{}.npy'.format(args.output_dir, filename))
    # netgan = np.load('{}/netgan_network.npy'.format(args.output_dir))[1]
    tfconfig = tf.ConfigProto(allow_soft_placement=True)
    tfconfig.gpu_options.allow_growth = True
    sess = tf.Session(config=tfconfig)
    n = R[0, 0].shape[1]
    # if netgan.shape[0] < n:
    #     # Net = np.zeros((n, n))
    #     R[0, 0] = R[0, 0][:netgan.shape[0], :netgan.shape[1]]
    # netgan_copy = netgan
    # netgan_disp = [netgan]
    # for i in range(1, args.layer):
    #     adjacent_matrix = tf.placeholder(tf.float32, shape=netgan_copy.shape)
    #     R_matrix = tf.placeholder(tf.float32, shape=R[i - 1, 0].shape)
    #     netgan_copy = sess.run(tf.matmul(tf.matmul(R_matrix, adjacent_matrix), tf.transpose(R_matrix)),
    #                       feed_dict={R_matrix: R[i - 1, 0].todense(), adjacent_matrix: netgan_copy})
    #     DD = np.sort(netgan_copy.flatten())[::-1]
    #     threshold = DD[edges[0, i]]
    #     network_C = np.array([[0 if netgan_copy[i, j] < threshold else 1 for i in range(netgan_copy.shape[0])] for j in
    #                           range(netgan_copy.shape[1])], dtype=np.int8)
    #     netgan_disp.append(network_C)
    for l in range(5):
        # netgan_G = nx.from_numpy_matrix(netgan_disp[l])
        # netgan_pos = nx.spring_layout(netgan_G)

        G = nx.from_numpy_matrix(A)
        pos = nx.spring_layout(G)
        gen_G = nx.from_numpy_matrix(network_B)
        gen_pos = nx.spring_layout(gen_G)
        nx.draw_networkx_nodes(gen_G,
                               gen_pos,
                               node_size=10,
                               edge_vmin=0.0,
                               edge_vmax=0.1,
                               node_color='blue',
                               alpha=0.8)
        # nx.draw_networkx_edges(gen_G, gen_pos,  alpha=0.7)
        plt.savefig('./{}/generated_graph_l_{}_without_edge'.format(
            args.output_dir, l + args.starting_layer),
                    dpi=1000)
        plt.close()

        nx.draw_networkx_nodes(G,
                               pos,
                               node_size=10,
                               edge_vmin=0.0,
                               edge_vmax=0.1,
                               node_color='blue',
                               alpha=0.8)
        # nx.draw_networkx_edges(G, pos, alpha=0.7)
        plt.savefig('./{}/original_graph_l_{}_without_edge'.format(
            args.output_dir, l + args.starting_layer),
                    dpi=1000)
        plt.close()

        nx.draw_networkx_nodes(gen_G,
                               gen_pos,
                               node_size=10,
                               edge_vmin=0.0,
                               edge_vmax=0.1,
                               node_color='blue',
                               alpha=0.8)
        nx.draw_networkx_edges(gen_G, gen_pos, alpha=0.1)
        plt.savefig('./{}/generated_graph_l_{}_with_edge'.format(
            args.output_dir, l + args.starting_layer),
                    dpi=1000)
        plt.close()

        nx.draw_networkx_nodes(G,
                               pos,
                               node_size=10,
                               edge_vmin=0.0,
                               edge_vmax=0.1,
                               node_color='blue',
                               alpha=0.8)
        nx.draw_networkx_edges(G, pos, alpha=0.1)
        plt.savefig('./{}/original_graph_l_{}_with_edge'.format(
            args.output_dir, l + args.starting_layer),
                    dpi=1000)
        plt.close()
コード例 #37
0
def to_networkx_graph(data, create_using=None, multigraph_input=False):
    """Make a NetworkX graph from a known data structure.

    The preferred way to call this is automatically
    from the class constructor

    >>> d = {0: {1: {'weight':1}}} # dict-of-dicts single edge (0,1)
    >>> G = nx.Graph(d)

    instead of the equivalent

    >>> G = nx.from_dict_of_dicts(d)

    Parameters
    ----------
    data : object to be converted

        Current known types are:
         any NetworkX graph
         dict-of-dicts
         dict-of-lists
         container (e.g. set, list, tuple) of edges
         iterator (e.g. itertools.chain) that produces edges
         generator of edges
         Pandas DataFrame (row per edge)
         numpy matrix
         numpy ndarray
         scipy sparse matrix
         pygraphviz agraph

    create_using : NetworkX graph constructor, optional (default=nx.Graph)
        Graph type to create. If graph instance, then cleared before populated.

    multigraph_input : bool (default False)
        If True and  data is a dict_of_dicts,
        try to create a multigraph assuming dict_of_dict_of_lists.
        If data and create_using are both multigraphs then create
        a multigraph from a multigraph.

    """
    # NX graph
    if hasattr(data, "adj"):
        try:
            result = from_dict_of_dicts(
                data.adj,
                create_using=create_using,
                multigraph_input=data.is_multigraph(),
            )
            if hasattr(data, "graph"):  # data.graph should be dict-like
                result.graph.update(data.graph)
            if hasattr(data, "nodes"):  # data.nodes should be dict-like
                # result.add_node_from(data.nodes.items()) possible but
                # for custom node_attr_dict_factory which may be hashable
                # will be unexpected behavior
                for n, dd in data.nodes.items():
                    result._node[n].update(dd)
            return result
        except Exception as e:
            raise nx.NetworkXError(
                "Input is not a correct NetworkX graph.") from e

    # pygraphviz  agraph
    if hasattr(data, "is_strict"):
        try:
            return nx.nx_agraph.from_agraph(data, create_using=create_using)
        except Exception as e:
            raise nx.NetworkXError(
                "Input is not a correct pygraphviz graph.") from e

    # dict of dicts/lists
    if isinstance(data, dict):
        try:
            return from_dict_of_dicts(data,
                                      create_using=create_using,
                                      multigraph_input=multigraph_input)
        except:
            try:
                return from_dict_of_lists(data, create_using=create_using)
            except Exception as e:
                raise TypeError("Input is not known type.") from e

    # Pandas DataFrame
    try:
        import pandas as pd

        if isinstance(data, pd.DataFrame):
            if data.shape[0] == data.shape[1]:
                try:
                    return nx.from_pandas_adjacency(data,
                                                    create_using=create_using)
                except Exception as e:
                    msg = "Input is not a correct Pandas DataFrame adjacency matrix."
                    raise nx.NetworkXError(msg) from e
            else:
                try:
                    return nx.from_pandas_edgelist(data,
                                                   edge_attr=True,
                                                   create_using=create_using)
                except Exception as e:
                    msg = "Input is not a correct Pandas DataFrame edge-list."
                    raise nx.NetworkXError(msg) from e
    except ImportError:
        msg = "pandas not found, skipping conversion test."
        warnings.warn(msg, ImportWarning)

    # numpy matrix or ndarray
    try:
        import numpy

        if isinstance(data, (numpy.matrix, numpy.ndarray)):
            try:
                return nx.from_numpy_matrix(data, create_using=create_using)
            except Exception as e:
                raise nx.NetworkXError(
                    "Input is not a correct numpy matrix or array.") from e
    except ImportError:
        warnings.warn("numpy not found, skipping conversion test.",
                      ImportWarning)

    # scipy sparse matrix - any format
    try:
        import scipy

        if hasattr(data, "format"):
            try:
                return nx.from_scipy_sparse_matrix(data,
                                                   create_using=create_using)
            except Exception as e:
                raise nx.NetworkXError(
                    "Input is not a correct scipy sparse matrix type.") from e
    except ImportError:
        warnings.warn("scipy not found, skipping conversion test.",
                      ImportWarning)

    # Note: most general check - should remain last in order of execution
    # Includes containers (e.g. list, set, dict, etc.), generators, and
    # iterators (e.g. itertools.chain) of edges

    if isinstance(data, (Collection, Generator, Iterator)):
        try:
            return from_edgelist(data, create_using=create_using)
        except Exception as e:
            raise nx.NetworkXError("Input is not a valid edge list") from e

    raise nx.NetworkXError("Input is not a known data type for conversion.")
コード例 #38
0
ファイル: Graphing.py プロジェクト: mrakovic/Graph_lit_texts
unique_list = list(set(unique_list))

#unique_list=sorted(unique_list , key = len, reverse=True)[68:368] # prune list to get ridof very long entities

adj_mat = pd.DataFrame(0, index=unique_list, columns=unique_list)

for i in range(len(cumb_cooc)):
    for j in range(len(cumb_cooc[i])):
        for k in range(len(cumb_cooc[i])):
            adj_mat[cumb_cooc[i][j]][cumb_cooc[i][k]] += 1

### Make a basic graph
A = np.matrix(adj_mat)

G = nx.from_numpy_matrix(A)
labels = adj_mat.columns.values

### Create dictionary to map terms from node numbers
terms_dict = {}
for i in range(len(nx.nodes(G))):
    terms_dict[list(nx.nodes(G))[i]] = adj_mat.columns.values[i]

### Relabel nodes to terms
nx.relabel_nodes(G, mapping=terms_dict, copy=False)

### Prune thegraph by removing nodes with very long names and very short names ###

G.remove_nodes_from(sorted(G.nodes, key=len, reverse=True)[0:68])
G.remove_nodes_from(
    sorted(G.nodes, key=len, reverse=True)[270:len(G.nodes) - 1])
コード例 #39
0
        array_for_adj[username_list_dict.get(username_list[parselist])][username_list_dict.get(Parenttweet_user)] = \
            array_for_adj[username_list_dict.get(username_list[parselist])][username_list_dict.get(Parenttweet_user)] + 1

# Transpose of a adjacency matrix as part of spectral clustering algorithm.

Atrasnpose = np.transpose(adjacency_mat)
laplasian = np.subtract(degree_mat,adjacency_mat)

(eigval,eigvect) = np.linalg.eigh(laplasian)
eigval = np.array(eigval)
eigval = eigval.astype(int)

ei = np.argsort(eigval)
# Saving the eigen values into a text file.
np.savetxt("eigenvalues.csv", eigval, delimiter=" ")
G = nx.from_numpy_matrix(eigvect)
nx.draw_networkx(G,with_labels=True)

firstkmat = eigvect[ei[::-1][0:4]]
firstkmat = np.transpose(firstkmat)

# Clustering using kmeans++ and the number of clusters are chosen from the eigen value plot.
kmeans = KMeans(n_clusters=np.alen(firstkmat[0]), init='k-means++', max_iter=100, precompute_distances=True)

kmeans.fit(firstkmat)
labels = kmeans.predict(firstkmat)

clusters_dict = {}
def get_key(val):
    for key, value in username_list_dict.items():
         if val == value:
コード例 #40
0
def Nx(ts):
    return  nx.from_numpy_matrix(mmread(InputDir + '/adjacency' + str(ts)).toarray())
コード例 #41
0
names2=[character[i] for i in values.argsort()]
print(names2)
values2=np.sort(values)
print(values2)
plt.bar(names2, values2)
plt.xticks(rotation=45)
plt.yticks(rotation=90)
plt.show()
"""
A = np.matrix(EdgesValues)
A = np.delete(A, 1, 0)
A = np.delete(A, 1, 1)
A = np.delete(A, 0, 0)
A = np.delete(A, 0, 1)
print(A)
G = nx.from_numpy_matrix(A, create_using=nx.MultiDiGraph())
##Edge calculation
colors = [A[x, y] for x, y in G.edges()]
##label design
labels = {}
print(character)
character.pop(0)
character.pop(0)
print(character)
for i, char in enumerate(character):
    labels[i] = char
fig = plt.figure()
nx.draw(G,
        pos=nx.nx_agraph.graphviz_layout(G),
        labels=labels,
        with_labels=True,
コード例 #42
0
def cmat(
    track_file,
    roi_file,
    resolution_network_file,
    matrix_name,
    matrix_mat_name,
    endpoint_name,
    intersections=False,
):
    """ Create the connection matrix for each resolution using fibers and ROIs. """
    import scipy.io as sio

    stats = {}
    iflogger.info("Running cmat function")
    # Identify the endpoints of each fiber
    en_fname = op.abspath(endpoint_name + "_endpoints.npy")
    en_fnamemm = op.abspath(endpoint_name + "_endpointsmm.npy")

    iflogger.info("Reading Trackvis file %s", track_file)
    fib, hdr = nb.trackvis.read(track_file, False)
    stats["orig_n_fib"] = len(fib)

    roi = nb.load(roi_file)
    # Preserve on-disk type unless scaled
    roiData = np.asanyarray(roi.dataobj)
    roiVoxelSize = roi.header.get_zooms()
    (endpoints, endpointsmm) = create_endpoints_array(fib, roiVoxelSize)

    # Output endpoint arrays
    iflogger.info("Saving endpoint array: %s", en_fname)
    np.save(en_fname, endpoints)
    iflogger.info("Saving endpoint array in mm: %s", en_fnamemm)
    np.save(en_fnamemm, endpointsmm)

    n = len(fib)
    iflogger.info("Number of fibers: %i", n)

    # Create empty fiber label array
    fiberlabels = np.zeros((n, 2))
    final_fiberlabels = []
    final_fibers_idx = []

    # Add node information from specified parcellation scheme
    path, name, ext = split_filename(resolution_network_file)
    if ext == ".pck":
        gp = nx.read_gpickle(resolution_network_file)
    elif ext == ".graphml":
        gp = nx.read_graphml(resolution_network_file)
    else:
        raise TypeError("Unable to read file:", resolution_network_file)
    nROIs = len(gp.nodes())

    # add node information from parcellation
    if "dn_position" in gp.nodes[list(gp.nodes())[0]]:
        G = gp.copy()
    else:
        G = nx.Graph()
        for u, d in gp.nodes(data=True):
            G.add_node(int(u), **d)
            # compute a position for the node based on the mean position of the
            # ROI in voxel coordinates (segmentation volume )
            xyz = tuple(
                np.mean(
                    np.where(
                        np.flipud(roiData) == int(d["dn_correspondence_id"])),
                    axis=1,
                ))
            G.nodes[int(u)]["dn_position"] = tuple([xyz[0], xyz[2], -xyz[1]])

    if intersections:
        iflogger.info("Filtering tractography from intersections")
        intersection_matrix, final_fiber_ids = create_allpoints_cmat(
            fib, roiData, roiVoxelSize, nROIs)
        finalfibers_fname = op.abspath(endpoint_name +
                                       "_intersections_streamline_final.trk")
        stats["intersections_n_fib"] = save_fibers(hdr, fib, finalfibers_fname,
                                                   final_fiber_ids)
        intersection_matrix = np.matrix(intersection_matrix)
        I = G.copy()
        H = nx.from_numpy_matrix(np.matrix(intersection_matrix))
        H = nx.relabel_nodes(
            H, lambda x: x + 1)  # relabel nodes so they start at 1
        I.add_weighted_edges_from(
            ((u, v, d["weight"]) for u, v, d in H.edges(data=True)))

    dis = 0
    for i in range(endpoints.shape[0]):

        # ROI start => ROI end
        try:
            startROI = int(roiData[endpoints[i, 0, 0], endpoints[i, 0, 1],
                                   endpoints[i, 0, 2]])
            endROI = int(roiData[endpoints[i, 1, 0], endpoints[i, 1, 1],
                                 endpoints[i, 1, 2]])
        except IndexError:
            iflogger.error(
                "AN INDEXERROR EXCEPTION OCCURED FOR FIBER %s. "
                "PLEASE CHECK ENDPOINT GENERATION",
                i,
            )
            break

        # Filter
        if startROI == 0 or endROI == 0:
            dis += 1
            fiberlabels[i, 0] = -1
            continue

        if startROI > nROIs or endROI > nROIs:
            iflogger.error(
                "Start or endpoint of fiber terminate in a voxel which is labeled higher"
            )
            iflogger.error(
                "than is expected by the parcellation node information.")
            iflogger.error("Start ROI: %i, End ROI: %i", startROI, endROI)
            iflogger.error("This needs bugfixing!")
            continue

        # Update fiber label
        # switch the rois in order to enforce startROI < endROI
        if endROI < startROI:
            tmp = startROI
            startROI = endROI
            endROI = tmp

        fiberlabels[i, 0] = startROI
        fiberlabels[i, 1] = endROI

        final_fiberlabels.append([startROI, endROI])
        final_fibers_idx.append(i)

        # Add edge to graph
        if G.has_edge(startROI,
                      endROI) and "fiblist" in G.edge[startROI][endROI]:
            G.edge[startROI][endROI]["fiblist"].append(i)
        else:
            G.add_edge(startROI, endROI, fiblist=[i])

    # create a final fiber length array
    finalfiberlength = []
    if intersections:
        final_fibers_indices = final_fiber_ids
    else:
        final_fibers_indices = final_fibers_idx

    for idx in final_fibers_indices:
        # compute length of fiber
        finalfiberlength.append(length(fib[idx][0]))

    # convert to array
    final_fiberlength_array = np.array(finalfiberlength)

    # make final fiber labels as array
    final_fiberlabels_array = np.array(final_fiberlabels, dtype=int)

    iflogger.info(
        "Found %i (%f percent out of %i fibers) fibers that start or "
        "terminate in a voxel which is not labeled. (orphans)",
        dis,
        dis * 100.0 / n,
        n,
    )
    iflogger.info("Valid fibers: %i (%f%%)", n - dis, 100 - dis * 100.0 / n)

    numfib = nx.Graph()
    numfib.add_nodes_from(G)
    fibmean = numfib.copy()
    fibmedian = numfib.copy()
    fibdev = numfib.copy()
    for u, v, d in G.edges(data=True):
        G.remove_edge(u, v)
        di = {}
        if "fiblist" in d:
            di["number_of_fibers"] = len(d["fiblist"])
            idx = np.where((final_fiberlabels_array[:, 0] == int(u))
                           & (final_fiberlabels_array[:, 1] == int(v)))[0]
            di["fiber_length_mean"] = float(
                np.mean(final_fiberlength_array[idx]))
            di["fiber_length_median"] = float(
                np.median(final_fiberlength_array[idx]))
            di["fiber_length_std"] = float(np.std(
                final_fiberlength_array[idx]))
        else:
            di["number_of_fibers"] = 0
            di["fiber_length_mean"] = 0
            di["fiber_length_median"] = 0
            di["fiber_length_std"] = 0
        if not u == v:  # Fix for self loop problem
            G.add_edge(u, v, **di)
            if "fiblist" in d:
                numfib.add_edge(u, v, weight=di["number_of_fibers"])
                fibmean.add_edge(u, v, weight=di["fiber_length_mean"])
                fibmedian.add_edge(u, v, weight=di["fiber_length_median"])
                fibdev.add_edge(u, v, weight=di["fiber_length_std"])

    iflogger.info("Writing network as %s", matrix_name)
    nx.write_gpickle(G, op.abspath(matrix_name))

    numfib_mlab = nx.to_numpy_matrix(numfib, dtype=int)
    numfib_dict = {"number_of_fibers": numfib_mlab}
    fibmean_mlab = nx.to_numpy_matrix(fibmean, dtype=np.float64)
    fibmean_dict = {"mean_fiber_length": fibmean_mlab}
    fibmedian_mlab = nx.to_numpy_matrix(fibmedian, dtype=np.float64)
    fibmedian_dict = {"median_fiber_length": fibmedian_mlab}
    fibdev_mlab = nx.to_numpy_matrix(fibdev, dtype=np.float64)
    fibdev_dict = {"fiber_length_std": fibdev_mlab}

    if intersections:
        path, name, ext = split_filename(matrix_name)
        intersection_matrix_name = op.abspath(name + "_intersections") + ext
        iflogger.info("Writing intersection network as %s",
                      intersection_matrix_name)
        nx.write_gpickle(I, intersection_matrix_name)

    path, name, ext = split_filename(matrix_mat_name)
    if not ext == ".mat":
        ext = ".mat"
        matrix_mat_name = matrix_mat_name + ext

    iflogger.info("Writing matlab matrix as %s", matrix_mat_name)
    sio.savemat(matrix_mat_name, numfib_dict)

    if intersections:
        intersect_dict = {"intersections": intersection_matrix}
        intersection_matrix_mat_name = op.abspath(name +
                                                  "_intersections") + ext
        iflogger.info("Writing intersection matrix as %s",
                      intersection_matrix_mat_name)
        sio.savemat(intersection_matrix_mat_name, intersect_dict)

    mean_fiber_length_matrix_name = op.abspath(name +
                                               "_mean_fiber_length") + ext
    iflogger.info("Writing matlab mean fiber length matrix as %s",
                  mean_fiber_length_matrix_name)
    sio.savemat(mean_fiber_length_matrix_name, fibmean_dict)

    median_fiber_length_matrix_name = op.abspath(name +
                                                 "_median_fiber_length") + ext
    iflogger.info(
        "Writing matlab median fiber length matrix as %s",
        median_fiber_length_matrix_name,
    )
    sio.savemat(median_fiber_length_matrix_name, fibmedian_dict)

    fiber_length_std_matrix_name = op.abspath(name + "_fiber_length_std") + ext
    iflogger.info(
        "Writing matlab fiber length deviation matrix as %s",
        fiber_length_std_matrix_name,
    )
    sio.savemat(fiber_length_std_matrix_name, fibdev_dict)

    fiberlengths_fname = op.abspath(endpoint_name + "_final_fiberslength.npy")
    iflogger.info("Storing final fiber length array as %s", fiberlengths_fname)
    np.save(fiberlengths_fname, final_fiberlength_array)

    fiberlabels_fname = op.abspath(endpoint_name + "_filtered_fiberslabel.npy")
    iflogger.info("Storing all fiber labels (with orphans) as %s",
                  fiberlabels_fname)
    np.save(fiberlabels_fname, np.array(fiberlabels, dtype=np.int32))

    fiberlabels_noorphans_fname = op.abspath(endpoint_name +
                                             "_final_fiberslabels.npy")
    iflogger.info("Storing final fiber labels (no orphans) as %s",
                  fiberlabels_noorphans_fname)
    np.save(fiberlabels_noorphans_fname, final_fiberlabels_array)

    iflogger.info("Filtering tractography - keeping only no orphan fibers")
    finalfibers_fname = op.abspath(endpoint_name + "_streamline_final.trk")
    stats["endpoint_n_fib"] = save_fibers(hdr, fib, finalfibers_fname,
                                          final_fibers_idx)
    stats["endpoints_percent"] = (float(stats["endpoint_n_fib"]) /
                                  float(stats["orig_n_fib"]) * 100)
    stats["intersections_percent"] = (float(stats["intersections_n_fib"]) /
                                      float(stats["orig_n_fib"]) * 100)

    out_stats_file = op.abspath(endpoint_name + "_statistics.mat")
    iflogger.info("Saving matrix creation statistics as %s", out_stats_file)
    sio.savemat(out_stats_file, stats)
コード例 #43
0
# Now when I create my network/graph, I can visualize it with...
# nx.draw(Graph, pos = pos_dict)
# plt.show()

#######################################################################################
# Now I tried to build the network/graph in a few different ways, the third is still
# not entirely correct... below are my three attempts with detailed explanation:
#######################################################################################

### Attempt 1: Simply build from 'adj_mat.txt'

A = np.genfromtxt('adj_mat.txt', delimiter=",",
                  skip_header=1)  # Load in adj_mat
adj_mat = A > 0  # my adj_mat has values that correspond to edge length attribute.
# These don't matter, we just want 1's
G1 = nx.from_numpy_matrix(adj_mat,
                          create_using=None)  # Generate graph from adj_mat

print(adj_mat)

#plt.figure(1)
#nx.draw(G1, pos=pos_dict)
#plt.show()

# ISSUE 1: No idea the output is weird... this should work

#######################################################################################

### Attempt 2: Load in "connectivity list" as .txt file from Matlab
# 'conn_list.txt' : specifies the nodes that node_i is connected to,
#                   where node_i is given by the row number
# Ex:
コード例 #44
0
ファイル: write_graph.py プロジェクト: schmidtj/store
DATATYPE = "clipped"  # May use: "clipped" | "real", referring to whether or not a threshold was used

start_time = time.time()
print "Started: " + datetime.datetime.fromtimestamp(start_time).strftime(
    '%Y-%m-%d %H:%M:%S')
output_string = ""

# load output from parseCSV
distances = loadtxt(settings.OUTPUT + "\\" + DATATYPE + "_distances+SD" +
                    str(settings.NUM_STANDARD_DEVIATIONS) + ".dat")
csv_reader = csv.DictReader(
    open(settings.OUTPUT + "\\dict_data_selective_attributes.dat", "U"))

# Create a networkx graph
G = nx.from_numpy_matrix(distances)

# Add node data tags / attributes
fieldnames = csv_reader.fieldnames
counter = 0
for row in csv_reader:
    if row == "":
        pass
    for index in xrange(settings.ATTRIBUTE_STRING_KEYS.__len__()):
        G.node[counter][settings.ATTRIBUTE_STRING_KEYS[index][1]] = row[
            settings.ATTRIBUTE_STRING_KEYS[index][0]]
    counter += 1

# Add edge data, change color of edges above the average weight

# output_string += str(G.edges()) + "\n"
コード例 #45
0
        BRAIN_LABELS.append(row[0])
print('Read: ' + filename)

FUNC_IDS = []
filename = os.path.join(NOTEBOOK_INPUT_DIR, 'input/brain_functional_ids.txt')
with open(filename, 'r') as f:
    reader = csv.reader(f, delimiter=',')
    for row in reader:
        FUNC_IDS.append(row[0])
print('Read: ' + filename)

filename = os.path.join(NOTEBOOK_INPUT_DIR, 'input/brain_graph_68.txt')
adj = np.loadtxt(open(filename, "rb"), dtype=int, delimiter=',', skiprows=0)
# Remove diagonal elements to have a real adjacency matrix
adj = adj - np.diag(np.diag(adj))
AG = nx.from_numpy_matrix(adj)
print('Read: ' + filename), '\n'
print 'Average ajacency matrix:', adj.shape

FUNC_ZONES = 7
FUNC_NODE_COUT = AG.number_of_nodes()
X_SPACING = 3
Y_SPACING = 5


def create_node_data(layer_id,
                     input_id,
                     weight,
                     brain_lab=BRAIN_LABELS,
                     brain_func_ids=FUNC_IDS):
    data = {}
コード例 #46
0
# creating a step function
fs = np.ones((n * n, 1))
fs[n * n // 2:, 0] = -1.0
fs = fs + np.random.uniform(low=-0.3, high=0.3, size=(n * n, 1))

# creating a spike function
nodes = np.random.randint(low=0, high=n * n - 1, size=5)
fspk = np.zeros((n * n, 1))
fspk[nodes] = 1.0

# for rendering purpose only
X = np.asarray([[i, j] for j in range(0, n) for i in range(0, n)], dtype=float)
X[:, 0] = X[:, 0] / n
X[:, 1] = X[:, 1] / n
G = ntx.from_numpy_matrix(A.toarray())

my_glog = glog.g_log(Adj=A)

#######################################
######   Smoothing the Spike   ########
#######################################

###### smoothing a spike with smooth_cheby ########
fspk_smoothed = my_glog.smooth(fspk, smooth_type='ARMA', plambda=5)

fig1, axs = plt.subplots(1, 2, sharex=True, sharey=True)
ax = axs[0]
ax.set_title('graph spike signal')
ax.set_facecolor("white")
ax.axes.get_xaxis().set_visible(False)
コード例 #47
0
ファイル: graphs.py プロジェクト: LauraGwilliams/jr-tools
def plot_graph(X,
               directional=False,
               prune=None,
               negative_weights=True,
               weights_scale=10,
               iterations=1000,
               fixed=None,
               init_pos=None,
               node_size=100,
               node_color=None,
               node_alpha=.5,
               edge_curve=False,
               edge_width=None,
               edge_width_scale=1,
               edge_color=None,
               pos=None,
               edge_alpha=.5,
               self_edge=False,
               wlim=[.1, 2],
               clim=None,
               ax=None,
               final_pos='auto',
               arrowstyle='-'):
    """
    Parameters
    ----------
    X : connectivity matrix shape(n_nodes, n_nodes)
    prune : significant connections (p_values < .05)

    Returns
    -------
    G : the network
    nodes: Paths Collection of all nodes
    """
    import copy
    import networkx as nx
    from sklearn.decomposition import PCA
    X = copy.deepcopy(X)

    # default parameters
    n_nodes = len(X)
    if not directional:
        np.fill_diagonal(X, np.diag(X) / 2)
        X = (X + X.T) / 2.
        # for ii in range(n_nodes - 1):
        #     for jj in range(ii + 1, n_nodes):
        #         X[ii, jj] = 0.
    if negative_weights:
        weights = np.abs(X * weights_scale)
    else:
        # only use positive connections
        weights = X * weights_scale
        weights *= weights > 0.

    # --- network shape
    # # ----- TODO first and last nodes need to be empty
    if directional:
        G = nx.from_numpy_matrix(weights, create_using=nx.DiGraph())
    else:
        G = nx.from_numpy_matrix(weights, create_using=nx.MultiGraph())
    # ---- bias for t0 left
    if init_pos is None:
        r = np.linspace(-np.pi, np.pi, n_nodes)
        init_pos = np.vstack((np.cos(r), np.sin(r)))
        # init_pos += np.random.randn(*init_pos.shape) / 1000.
    init_pos = dict(zip(range(n_nodes), init_pos.T))
    # ---- compute graph
    if pos is None:
        pos = nx.spring_layout(G,
                               pos=init_pos,
                               iterations=iterations,
                               fixed=fixed)

    # ATTRIBUTES
    # ---- nodes color
    if node_color is None:
        node_color = plt.cm.rainbow
    if isinstance(node_color, mcol.LinearSegmentedColormap):
        node_color = plt.get_cmap(node_color)
        node_color = np.array(
            [node_color(float(ii) / n_nodes) for ii in range(n_nodes)])
    elif np.ndim(node_color) == 1 or isinstance(node_color, str):
        node_color = [node_color] * n_nodes

    # ---- node size
    if isinstance(node_size, (float, int)):
        node_size = [node_size] * n_nodes

    # ---- edge width
    if edge_width is None:
        edge_width = np.abs(weights) / weights_scale
        edge_width[edge_width < wlim[0]] = wlim[0]
        edge_width[edge_width > wlim[1]] = wlim[1]
    if isinstance(edge_width, (float, int)):
        edge_width = edge_width * np.ones_like(weights)

    # ---- edge color
    if clim is None:
        clim = np.min(X), np.max(X)
    if edge_color is None:
        edge_color = white_black
    if isinstance(edge_color, mcol.LinearSegmentedColormap):
        cmap = plt.get_cmap(edge_color)
        edge_color = (X - float(clim[0])) / float(np.ptp(clim))
        edge_color[edge_color > 1.] = 1.
        edge_color[edge_color < 0.] = 0.
        edge_color = cmap(edge_color)
    elif isinstance(edge_color, str) or np.ndim(edge_color) == 1:
        edge_color = np.tile(edge_color, weights.shape)
    else:
        raise ValueError('unknown edge color')

    # ---- add attributes to graph
    for ii in G.nodes():
        G.node[ii]['pos'] = pos[ii]
        G.node[ii]['color'] = node_color[ii]
        G.node[ii]['size'] = node_size[ii]

    for (ii, jj) in G.edges():
        if directional:
            G.edge[ii][jj]['width'] = edge_width[ii, jj]
            G.edge[ii][jj]['color'] = edge_color[ii, jj]
        else:
            G.edge[ii][jj][0]['width'] = edge_width[ii, jj]
            G.edge[ii][jj][0]['color'] = edge_color[ii, jj]

    # ---- prune graph for plotting
    if prune is None:
        prune = np.zeros_like(X)
    for (ii, jj) in G.edges():
        if prune[ii, jj]:
            G.remove_edge(ii, jj)

    outdeg = G.degree()
    to_remove = [n for n in outdeg if outdeg[n] == 0]
    G.remove_nodes_from(to_remove)

    # ---- Rotate graph for horizontal axis
    xy = np.squeeze([pos[xy] for xy in pos if xy not in to_remove])
    if n_nodes > 1:
        if final_pos == 'auto':
            pca = PCA(whiten=False)
            pca.fit(xy)
            xy = pca.transform(xy)
        elif final_pos == 'horizontal':
            # center
            center = np.tile(xy[0, :], (xy.shape[0], 1))
            xy -= center
            # flip
            xy[:, 0] *= -1
            # polar coordinate
            angles = np.arctan2(xy[:, 1], xy[:, 0])
            radius = np.sqrt(np.sum(xy**2, axis=1))
            angles = angles - angles[-1]
            xy = np.vstack(
                (np.cos(angles) * radius, np.sin(angles) * radius)).T
            xy += center
    xy_ = np.zeros((n_nodes, 2))
    xy_[np.array([ii for ii in range(n_nodes) if ii not in to_remove]), :] = xy
    pos = dict(zip(range(n_nodes), xy_))

    # update G nodes pos
    for ii, xy in zip(G.nodes(), xy_):
        G.node[ii]['pos'] = xy

    # plot
    if ax is None:
        fig, ax = plt.subplots(1)

    node_color = [G.node[node]['color'] for node in G.nodes()]
    node_size = [G.node[node]['size'] for node in G.nodes()]
    if directional:
        edge_color = [G.edge[ii][jj]['color'] for (ii, jj) in G.edges()]
        edge_width = [G.edge[ii][jj]['width'] for (ii, jj) in G.edges()]
    else:
        edge_color = [G.edge[ii][jj][0]['color'] for (ii, jj) in G.edges()]
        edge_width = [G.edge[ii][jj][0]['width'] for (ii, jj) in G.edges()]

    draw_net = draw_curve_network if edge_curve else nx.draw_networkx_edges
    if self_edge is True:
        self_edge = np.max(node_size)
    edges = draw_net(G,
                     pos,
                     ax=ax,
                     edge_color=edge_color,
                     width=np.array(edge_width) * edge_width_scale,
                     self_edge=self_edge,
                     edge_alpha=edge_alpha,
                     arrowstyle=arrowstyle)
    if edge_alpha is not None and not edge_curve:
        edge_colors = edges.get_edgecolors()
        edge_colors[:, 3] = edge_alpha
        edges.set_edgecolors(edge_colors)
    nodes = nx.draw_networkx_nodes(G,
                                   pos,
                                   ax=ax,
                                   alpha=node_alpha,
                                   node_color=node_color,
                                   node_size=node_size)
    ax.autoscale()
    ax.set_aspect('equal')
    ax.set_axis_off()

    return G, nodes, edges
コード例 #48
0
def Nx(ts):
    return nx.from_numpy_matrix(
        np.load(InputDir + '/adjacency' + str(ts) + '.npy'))
コード例 #49
0
def link_prediction(n_appeared, p_appeared, n_disappeared, p_disappeared,
                    n_new, p_new, n_lost, p_lost, is_train, is_valid, is_test):

    probability_appeared_InputDir, num_appeared_InputDir = get_appeared_InputDirs(
        p_appeared, n_appeared)
    appeared_edge_pred_set_list, appeared_edge_true_set_list, recall_appeared_edge, precision_appeared_edge, f1_score_appeared_edge = get_component_result(
        "edge", probability_appeared_InputDir, num_appeared_InputDir,
        all_node_num, is_train, is_valid, is_test)

    probability_disappeared_InputDir, num_disappeared_InputDir = get_disappeared_InputDirs(
        p_disappeared, n_disappeared)
    disappeared_edge_pred_set_list, disappeared_edge_true_set_list, recall_disappeared_edge, precision_disappeared_edge, f1_score_disappeared_edge = get_component_result(
        "edge", probability_disappeared_InputDir, num_disappeared_InputDir,
        all_node_num + n_expanded, is_train, is_valid, is_test)

    probability_new_InputDir, num_new_InputDir = get_new_InputDirs(
        p_new, n_new)
    new_edge_pred_set_list, new_edge_true_set_list, recall_new_edge, precision_new_edge, f1_score_new_edge = get_component_result(
        "edge", probability_new_InputDir, num_new_InputDir,
        all_node_num + n_expanded, is_train, is_valid, is_test)

    probability_lost_InputDir, num_lost_InputDir = get_lost_InputDirs(
        p_lost, n_lost)
    lost_node_pred_set_list, lost_node_true_set_list, recall_lost_node, precision_lost_node, f1_score_lost_node = get_component_result(
        "node", probability_lost_InputDir, num_lost_InputDir,
        all_node_num + n_expanded, is_train, is_valid, is_test)
    lost_edge_pred_set_list, lost_edge_true_set_list, recall_lost_edge, precision_lost_edge, f1_score_lost_edge = get_edge_connected_lost_node(
        probability_lost_InputDir, lost_node_pred_set_list,
        lost_node_true_set_list, is_train, is_valid, is_test)

    # 総合結果を計算
    # 「tのlink集合 」 + 「appeared (link) 集合」+ 「new (link) 集合」- 「disappeared (link) 集合」- 「lost (link) 集合」
    ts_list = get_ts_list(probability_appeared_InputDir, is_train, is_valid,
                          is_test)
    ts_c_pred_A = []
    for i, ts in enumerate(ts_list):
        ts_train, ts_test, ts_all = TsSplit(ts, L)
        t_edge_set = set()
        for edge in nx.from_numpy_matrix(
                mmread(MakeSample_node_prediction_lost_InputDir +
                       '/adjacency' + str(ts_train[-1])).toarray()).edges:
            t_edge_set.add(frozenset({edge[0], edge[1]}))

        appeared_edge_pred_set = appeared_edge_pred_set_list[i]
        appeared_edge_true_set = appeared_edge_true_set_list[i]
        assert len(
            t_edge_set
            & appeared_edge_true_set) == 0, "tのlink集合とappeared(link)集合は被らない"
        assert len(
            t_edge_set
            & appeared_edge_pred_set) == 0, "tのlink集合とappeared(link)集合は被らない"

        disappeared_edge_pred_set = disappeared_edge_pred_set_list[i]
        disappeared_edge_true_set = disappeared_edge_true_set_list[i]
        assert len(t_edge_set & disappeared_edge_true_set) == len(
            disappeared_edge_true_set), "tのlink集合とdisappeared(link)集合は被るべき"
        assert len(t_edge_set & disappeared_edge_pred_set) == len(
            disappeared_edge_pred_set), "tのlink集合とdisappeared(link)集合は被るべき"

        new_edge_pred_set = new_edge_pred_set_list[i]
        new_edge_true_set = new_edge_true_set_list[i]
        assert len(t_edge_set
                   & new_edge_true_set) == 0, "tのlink集合とnew(link)集合は被らない"
        assert len(t_edge_set
                   & new_edge_pred_set) == 0, "tのlink集合とnew(link)集合は被らない"

        lost_node_pred_set = lost_node_pred_set_list[i]
        lost_edge_pred_set = lost_edge_pred_set_list[i]
        lost_edge_true_set = lost_edge_true_set_list[i]
        assert len(t_edge_set & lost_edge_true_set) == len(
            lost_edge_true_set), "tのlink集合とlost(link)集合は被るべき"
        assert len(t_edge_set & lost_edge_pred_set) == len(
            lost_edge_pred_set), "tのlink集合とlost(link)集合は被るべき"

        pred_set = [set() for _ in range(16)]

        # appeared : disappeared : new : lost
        # 何もしない場合 0000
        pred_set[0] = t_edge_set
        # lostのみをbest methodにした時 0001
        pred_set[1] = t_edge_set - lost_edge_pred_set
        pred_set[1] = delete_lost_node(pred_set[1], lost_node_pred_set)
        # newのみをbest methodにした時 0010
        pred_set[2] = t_edge_set | new_edge_pred_set
        # lostとnewのみをbest methodにした時 0011
        pred_set[3] = (t_edge_set | new_edge_pred_set) - lost_edge_pred_set
        pred_set[3] = delete_lost_node(pred_set[3], lost_node_pred_set)
        # disappearedのみをbest methodにした時 0100
        pred_set[4] = t_edge_set - disappeared_edge_pred_set
        # disappearedとlostをbest methodにした時 0101
        pred_set[5] = (t_edge_set -
                       disappeared_edge_pred_set) - lost_edge_pred_set
        pred_set[5] = delete_lost_node(pred_set[5], lost_node_pred_set)
        # disappearedとnewをbest methodにした時 0110
        pred_set[6] = (t_edge_set
                       | new_edge_pred_set) - disappeared_edge_pred_set
        # disappearedとnewとlostをbest methodにした時 0111
        pred_set[7] = ((t_edge_set | new_edge_pred_set) -
                       disappeared_edge_pred_set) - lost_edge_pred_set
        pred_set[7] = delete_lost_node(pred_set[7], lost_node_pred_set)
        # appearedのみをbest methodにした時 1000
        pred_set[8] = t_edge_set | appeared_edge_pred_set
        # appearedとlostをbest methodにした時 1001
        pred_set[9] = (t_edge_set
                       | appeared_edge_pred_set) - lost_edge_pred_set
        pred_set[9] = delete_lost_node(pred_set[9], lost_node_pred_set)
        # appearedとnewをbest methodにした時 1010
        pred_set[10] = (t_edge_set
                        | appeared_edge_pred_set) | new_edge_pred_set
        # appearedとnewとlostをbest methodにした時 1011
        pred_set[11] = ((t_edge_set | appeared_edge_pred_set)
                        | new_edge_pred_set) - lost_edge_pred_set
        pred_set[11] = delete_lost_node(pred_set[11], lost_node_pred_set)
        # appearedとdisappearedのみをbest methodにした時 1100
        pred_set[12] = (t_edge_set
                        | appeared_edge_pred_set) - disappeared_edge_pred_set
        # appearedとdisappearedとlostのみをbest methodにした時 1101
        pred_set[13] = ((t_edge_set | appeared_edge_pred_set) -
                        disappeared_edge_pred_set) - lost_edge_pred_set
        pred_set[13] = delete_lost_node(pred_set[13], lost_node_pred_set)
        # appearedとdisappearedとnewのみをbest methodにした時 1110
        pred_set[14] = ((t_edge_set | appeared_edge_pred_set)
                        | new_edge_pred_set) - disappeared_edge_pred_set
        # appearedとdisappearedとnewとlostをbest methodにした時 1111
        pred_set[15] = ((
            (t_edge_set | appeared_edge_pred_set) | new_edge_pred_set) -
                        disappeared_edge_pred_set) - lost_edge_pred_set
        pred_set[15] = delete_lost_node(pred_set[15], lost_node_pred_set)

        pred_A_list = []
        for c_idx in range(16):
            pred_G = nx.Graph()
            pred_G.add_edges_from(
                [tuple(froset) for froset in pred_set[c_idx]])
            pred_A = np.array(
                nx.to_numpy_matrix(
                    pred_G,
                    nodelist=[
                        node for node in range(all_node_num + n_expanded)
                    ]))
            pred_A_list.append(pred_A)
        ts_c_pred_A.append(pred_A_list)

    return np.array(ts_c_pred_A)
コード例 #50
0
def ST(model):

    model.st_saver.restore(model.session, "/data/wuning/learnAstar/beijingComplete/pre_all_train_neural_network_epoch49.ckpt")
    OSMadj = pickle.load(open("/data/wuning/map-matching/fmm-master/fmm-master/example/ofoOSMadjMat", "rb"))
    trainData = pickle.load(open("/data/wuning/map-matching/taxiTrainData_", "rb"))
    trainTimeData = pickle.load(open("/data/wuning/map-matching/taxiTrainDataTime_", "rb"))
    trainUserData = pickle.load(open("/data/wuning/map-matching/taxiTrainDataUser_", "rb"))
    historyData = pickle.load(open("/data/wuning/map-matching/userIndexedHistoryAttention", "rb"))
    maskData = pickle.load(open("/data/wuning/map-matching/taxiTrainDataMask", "rb"))
    graphData = pickle.load(open("/data/wuning/map-matching/allGraph", "rb"))

    trainData = pickle.load(open("/data/wuning/map-matching/fmm-master/fmm-master/example/OSMBeijingCompleteTrainData_", "rb"))
#    variable_names = [v.name for v in tf.trainable_variables()]
#    print(variable_names)
    location_embeddings = model.session.run(tf.get_default_graph().get_tensor_by_name("st_network/location_embedding/embeddings:0"))
    print(np.array(location_embeddings).shape)

    adj = np.matrix(graphData)[:block_num, :block_num]
    print(adj.shape)
    G = nx.from_numpy_matrix(adj, create_using=nx.DiGraph())
    inv_G = nx.from_numpy_matrix(adj.T, create_using=nx.DiGraph())
    train_size = 1500
    losses = []                                    
    for episode in range(PRE_EPISODE):
      for tra_bat, hour_bat, day_bat, his_bat, his_hour_bat, his_day_bat, his_mask_bat in generate_batch(maskData[:train_size], historyData, trainData[:train_size], trainTimeData[:train_size], trainUserData[:train_size]):
        counter = 0
        heuristics_batches = []
        if(len(tra_bat[0]) < 5):
          continue
#        print(tra_bat.shape, tra_mask_bat.shape, hour_bat.shape, day_bat.shape, his_bat.shape, his_hour_bat.shape, his_day_bat.shape, his_mask_bat.shape)
        for k in range(len(tra_bat[0]) - 1, 0, -1):
          item_heu_batch = []
          for ite in tra_bat:
            item_heu_batch.append(float(len(tra_bat[0]) - k))      

          item_known_batch = tra_bat[:, :k]
          item_des_batch = tra_bat[:, -1]

          heuristics_batches.append([item_known_batch, item_des_batch, item_heu_batch])
        feed_data = {}
        for heu_batch in heuristics_batches:

#            print(np.array(heu_batch[0]).shape)
#            print(np.array(heu_batch[1]).shape)
#            print(np.array(heu_batch[2]).shape)
#            print(np.array(heu_batch[3]).shape)
#            print(np.array(heu_batch[4]).shape)
#            print(np.array(heu_batch[5]).shape)
#            print(np.array(heu_batch[6]).shape)
#            print(np.array(heu_batch[7]).shape)
#            print(np.array(heu_batch[8]).shape)

            model.optimizer.run(feed_dict = {
                model.st_known_:heu_batch[0],
                model.st_destination_:np.array(heu_batch[1])[:, np.newaxis],
                model.heuristics_input:heu_batch[2],
#                model.src_bias_mat:heu_batch[3],
#                model.des_bias_mat:heu_batch[4],
#                model.src_embedding:heu_batch[5],
#                model.des_embedding:heu_batch[6],
#                model.src_mask:heu_batch[7],
#                model.des_mask:heu_batch[8]
                }
            )
#                _ = model.st_all_optimizer.run(feed_dict=policy_feed_data)
            heuristics_cost = model.heuristics_cost.eval(feed_dict={
                model.st_known_:heu_batch[0],
                model.st_destination_:np.array(heu_batch[1])[:, np.newaxis],
                model.heuristics_input:heu_batch[2],
#                model.src_bias_mat:heu_batch[3],
#                model.des_bias_mat:heu_batch[4],
#                model.src_embedding:heu_batch[5],
#                model.des_embedding:heu_batch[6],
#                model.src_mask:heu_batch[7],
#                model.des_mask:heu_batch[8]
            })
        heuristics = model.heuristics.eval(feed_dict={
                model.st_known_:heu_batch[0],
                model.st_destination_:np.array(heu_batch[1])[:, np.newaxis],
                model.heuristics_input:heu_batch[2],
#                model.src_bias_mat:heu_batch[3],
#                model.des_bias_mat:heu_batch[4],
#                model.src_embedding:heu_batch[5],
#                model.des_embedding:heu_batch[6],
#                model.src_mask:heu_batch[7],
#                model.des_mask:heu_batch[8]
        })
#        print("heuristics:", heuristics)
        heuristics_batches = []
        losses.append(heuristics_cost)
        print("loss:", heuristics_cost, "counter:", counter)
      print(losses)
      print("heuristics:", heuristics)
      model.all_saver.save(model.session, "/data/wuning/AstarRNN/train_complete_heuristics_ST_two_task_epoch{}.ckpt".format( episode))
コード例 #51
0
    def _gen(self, gname: str, gen_id: int) -> nx.Graph:
        g = self.input_graph

        # fix BTER to use the directory..
        CP.print_blue('Starting BTER...')

        graph_path = f'./src/bter/{g.name}_{self.trial}.mat'
        np.savetxt(graph_path, nx.to_numpy_matrix(g), fmt='%d')

        matlab_code = [
            'mex -largeArrayDims tricnt_mex.c;',
            'mex -largeArrayDims ccperdegest_mex.c;',
            f"G = dlmread('{g.name}_{self.trial}.mat');", 'G = sparse(G);',
            f"graphname = '{g.name}_{self.trial}';", '',
            'nnodes = size(G, 1);', 'nedges = nnz(G) / 2;',
            r"fprintf('nodes: %d edges: %d\n', nnodes, nedges);", '',
            'nd = accumarray(nonzeros(sum(G,2)),1);',
            "maxdegree = find(nd>0,1,'last');",
            r"fprintf('Maximum degree: %d\n', maxdegree);", '',
            '[ccd,gcc] = ccperdeg(G);',
            r"fprintf('Global clustering coefficient: %.2f\n', gcc);", '',
            r"fprintf('Running BTER...\n');", 't1=tic;',
            '[E1,E2] = bter(nd,ccd);', 'toc(t1);',
            r"fprintf('Number of edges created by BTER: %d\n', size(E1,1) + size(E2,1));",
            '',
            "fprintf('Turning edge list into adjacency matrix (including dedup)...');",
            't2=tic;', 'G_bter = bter_edges2graph(E1,E2);', 'toc(t2);',
            r"fprintf('Number of edges in dedup''d graph: %d\n', nnz(G)/2);",
            '', 'G_bter = full(G_bter);',
            r"dlmwrite('{}_{}_bter.mat', G_bter, ' ');".format(
                g.name, self.trial), 'quit;'
        ]

        matlab_code_filename = f'{g.name}_{self.trial}_code.m'
        matlab_code_path = f'./src/bter/{matlab_code_filename}'

        print('\n'.join(matlab_code), file=open(matlab_code_path, 'w'))

        output_path = f'./src/bter/{g.name}_{self.trial}_bter.mat'

        start_time = time()
        completed_process = sub.run(
            f'cd src/bter; cat {matlab_code_filename} | matlab -nosplash -nodesktop',
            shell=True,
            stdout=sub.DEVNULL,
            stderr=sub.DEVNULL)
        CP.print_blue(f'BTER ran in {round(time() - start_time, 3)} secs')

        if completed_process.returncode != 0 or not check_file_exists(
                output_path):
            CP.print_blue('BTER failed!')
            raise Exception('Generation failed!')

        else:
            bter_mat = np.loadtxt(output_path, dtype=int)
            g_bter = nx.from_numpy_matrix(bter_mat, create_using=nx.Graph())
            g_bter.name = gname

        g_bter.gen_id = gen_id
        delete_files(graph_path, output_path, matlab_code_path)

        return g_bter
コード例 #52
0
def Time_diff(model):
#    model.st_saver.restore(model.session, "/data/wuning/learnAstar/pre_all_train_neural_network_epoch49.ckpt")
#/data/wuning/learnAstar/beijingComplete/pre_all_train_neural_network_epoch
    model.st_saver.restore(model.session, "/data/wuning/learnAstar/beijingComplete/pre_all_train_neural_network_epoch49.ckpt")
    OSMadj = pickle.load(open("/data/wuning/map-matching/fmm-master/fmm-master/example/ofoOSMadjMat", "rb"))
    trainData = pickle.load(open("/data/wuning/map-matching/taxiTrainData_", "rb"))
    trainTimeData = pickle.load(open("/data/wuning/map-matching/taxiTrainDataTime_", "rb"))
    trainUserData = pickle.load(open("/data/wuning/map-matching/taxiTrainDataUser_", "rb"))
    historyData = pickle.load(open("/data/wuning/map-matching/userIndexedHistoryAttention", "rb"))
    maskData = pickle.load(open("/data/wuning/map-matching/taxiTrainDataMask", "rb"))
    graphData = pickle.load(open("/data/wuning/map-matching/allGraph", "rb"))

    trainData = pickle.load(open("/data/wuning/map-matching/fmm-master/fmm-master/example/OSMBeijingCompleteTrainData_", "rb"))
#    variable_names = [v.name for v in tf.trainable_variables()]
#    print(variable_names)
    location_embeddings = model.session.run(tf.get_default_graph().get_tensor_by_name("st_network/location_embedding/embeddings:0"))
    print(np.array(location_embeddings).shape)

    adj = np.matrix(graphData)[:block_num, :block_num]
    print(adj.shape)
    G = nx.from_numpy_matrix(adj, create_using=nx.DiGraph())
    inv_G = nx.from_numpy_matrix(adj.T, create_using=nx.DiGraph())
    train_size = 1500
                                        
    for episode in range(PRE_EPISODE):
      for tra_bat, hour_bat, day_bat, his_bat, his_hour_bat, his_day_bat, his_mask_bat in generate_batch(maskData[:train_size], historyData, trainData[:train_size], trainTimeData[:train_size], trainUserData[:train_size]):
        counter = 0
        heuristics_batches = []
        if(len(tra_bat[0]) < 5):
          continue
#        print(tra_bat.shape, tra_mask_bat.shape, hour_bat.shape, day_bat.shape, his_bat.shape, his_hour_bat.shape, his_day_bat.shape, his_mask_bat.shape)
        eval_policy = model.session.run([model.st_all_prob],feed_dict={
          model.st_known_:tra_bat[:, :-1],
          model.st_destination_:tra_bat[:, -1][:, np.newaxis],
          model.st_output_:tra_bat[:, 1:],
#          model.trans_mat:batch[3]
          model.st_time:hour_bat,
          model.st_day:day_bat,
 #         model.padding_mask:tra_mask_bat,
          model.his_tra:his_bat,
          model.his_time:his_hour_bat,
          model.his_day:his_day_bat,
          model.his_padding_mask:his_mask_bat
        })
        eval_policy = np.array(eval_policy[0])
        wait_next_actions = np.argsort(-eval_policy, axis=2)[:, :, :NEXT_ACTION_NUM]
        policy_value =  -np.sort(-eval_policy, axis=2)[:, :, :NEXT_ACTION_NUM]
        sum_heu_batch = []
        for k in range(wait_next_actions.shape[1], 0, -1):
            item_heu_batch = []
            item_known_batch = []
            item_action_batch = []
            item_des_batch = []
            for l in range(0, NEXT_ACTION_NUM):
                item_heu_batch.extend(policy_value[:, k - 1, l].tolist())
                item_known_batch.extend(tra_bat[:, :-1][:, :k])
                item_action_batch.extend(wait_next_actions[:, k - 1, l].tolist())
                item_des_batch.extend(tra_bat[:, -1])
#                print(np.array(item_known_batch).shape, np.array(item_heu_batch).shape, np.array(item_action_batch).shape, np.array(item_des_batch).shape)
#            print(np.array(item_known_batch).shape, np.array(item_action_batch)[:, np.newaxis].shape)
            item_known_batch_ = np.concatenate((item_known_batch, np.array(item_action_batch)[:, np.newaxis]), axis=1)
            if not k == wait_next_actions.shape[1]:
#                    if len(sum_heu_batch) == 0:
#                        sum_heu_batch = np.zeros(np.array(item_heu_batch).shape)
#                    sum_heu_batch += item_heu_batch
                last_adj = []
                src_adj = []
                des_adj = []
                last_emb = []
                src_emb = []
                des_emb = []
                last_mask = []
                des_mask = []
                src_mask = []
                for last, src, des in zip(np.array(item_known_batch)[:, -1], item_action_batch, item_des_batch):
                  item_1, item_2, item_3, item_4, item_5, item_6, _, _ = generate_sub_graph(location_embeddings, G, inv_G, 10, src=src, des=des)
                  l_item_1, l_item_2, l_item_3 = generate_one_graph(location_embeddings, G, 10, src=last)
                  src_adj.append(item_1)
                  des_adj.append(item_2)
                  des_emb.append(item_3)
                  src_emb.append(item_4)  
                  des_mask.append(item_5)
                  src_mask.append(item_6)
                  last_adj.append(l_item_1)
                  last_emb.append(l_item_2)
                  last_mask.append(l_item_3)
#                print("mask:", np.array(src_mask).shape, np.array(des_mask).shape, np.array(src_emb).shape, np.array(des_emb).shape, np.array(src_adj).shape, np.array(des_adj).shape)  
                item_heu_batch += 0.92 * model.heuristics.eval(
                    feed_dict={
                        model.st_known_:item_known_batch_,
                        model.st_destination_:np.array(item_des_batch)[:, np.newaxis],
                        model.src_bias_mat:src_adj,
                        model.des_bias_mat:des_adj,
                        model.src_embedding:src_emb,
                        model.des_embedding:des_emb,
                        model.src_mask:src_mask,
                        model.des_mask:des_mask
                    }
                )
#                    print(sum_heu_batch)
                heuristics_batches.append([item_known_batch, item_des_batch, item_heu_batch, last_adj, des_adj, last_emb, des_emb, last_mask, des_mask])
        feed_data = {}
        for heu_batch in heuristics_batches:

#            print(np.array(heu_batch[0]).shape)
#            print(np.array(heu_batch[1]).shape)
#            print(np.array(heu_batch[2]).shape)
#            print(np.array(heu_batch[3]).shape)
#            print(np.array(heu_batch[4]).shape)
#            print(np.array(heu_batch[5]).shape)
#            print(np.array(heu_batch[6]).shape)
#            print(np.array(heu_batch[7]).shape)
#            print(np.array(heu_batch[8]).shape)

            model.optimizer.run(feed_dict = {
                model.st_known_:heu_batch[0],
                model.st_destination_:np.array(heu_batch[1])[:, np.newaxis],
                model.heuristics_input:heu_batch[2],
                model.src_bias_mat:heu_batch[3],
                model.des_bias_mat:heu_batch[4],
                model.src_embedding:heu_batch[5],
                model.des_embedding:heu_batch[6],
                model.src_mask:heu_batch[7],
                model.des_mask:heu_batch[8]
                }
            )
#                _ = model.st_all_optimizer.run(feed_dict=policy_feed_data)
            heuristics_cost = model.heuristics_cost.eval(feed_dict={
                model.st_known_:heu_batch[0],
                model.st_destination_:np.array(heu_batch[1])[:, np.newaxis],
                model.heuristics_input:heu_batch[2],
                model.src_bias_mat:heu_batch[3],
                model.des_bias_mat:heu_batch[4],
                model.src_embedding:heu_batch[5],
                model.des_embedding:heu_batch[6],
                model.src_mask:heu_batch[7],
                model.des_mask:heu_batch[8]
            })
        heuristics = model.heuristics.eval(feed_dict={
                model.st_known_:heu_batch[0],
                model.st_destination_:np.array(heu_batch[1])[:, np.newaxis],
                model.heuristics_input:heu_batch[2],
                model.src_bias_mat:heu_batch[3],
                model.des_bias_mat:heu_batch[4],
                model.src_embedding:heu_batch[5],
                model.des_embedding:heu_batch[6],
                model.src_mask:heu_batch[7],
                model.des_mask:heu_batch[8]
        })
#        print("heuristics:", heuristics)
        heuristics_batches = []

        print("loss:", heuristics_cost, "counter:", counter)
      print("heuristics:", heuristics)
      model.all_saver.save(model.session, "/data/wuning/AstarRNN/train_complete_heuristics_TD1_two_task_epoch{}.ckpt".format( episode))
コード例 #53
0
def node_weighted_condense(A, num_thresholds=8, threshold_distribution=None):
    """Returns a series of node_weighted condensed graphs (DAGs) [1]_ and their original nx_graphs.

    Parameters
    ----------
    A: numpy array
        Adjacency matrix, as square 2d numpy array
    num_thresholds: int, default: 8
        Number of thresholds and resultant sets of node-weighted Directed Acyclic Graphs
    threshold_distribution: float, optional
        If true or float, distributes the thresholds exponentially, with an exponent equal to the float input.

    Returns
    -------
    largest_condensed_graphs: list of networkX Graphs
        list of node weighted condensed networkx graphs reduced from unweighted digraphs determined by thresholds. (See note)
    nx_graphs: list of networkX Graphs
        list of unweighted graphs produced from applying thresholds to the original weighted network

    Examples
    --------
    Graphing the resultant network is recommended, as otherwise this is difficult to visualize...

    a = np.array([
        [0, 0.2, 0, 0, 0],
        [0, 0, 0, 0.7, 0],
        [0, 0.4, 0, 0, 0],
        [0, 0, 0.1, 0, 1.0],
        [0, 0, 0, 0, 0],
    ])
    condensed_networks, base_binary_networks = hc.node_weighted_condense(a)
    for network in condensed_networks:
        print(nx.to_numpy_array(network))

    Notes
    ------
    TODO: As multiple independent graphs may form from applying threshold cutoffs to a weighted graph,
    only the largest is considered. This might be worth considering in re-evaluating the meaning of
    weighted network hierarchy coordinate evaluations. (See pages 7, 8 of [1]_, supplementary material)

    An threshold_distribution of None results in a linear distribution, otherwise
     the exponential distribution is sampled from exp(x) \in (0, 1)

    .. [1] "On the origins of hierarchy in complex networks."
     Corominas-Murtra, Bernat, Joaquín Goñi, Ricard V. Solé, and Carlos Rodríguez-Caso,
     Proceedings of the National Academy of Sciences 110, no. 33 (2013)
    """

    # Establishing Thresholds
    if num_thresholds == 1 or np.isclose(np.max(A) - np.min(A), 0, 1e-2):
        nx_graphs = [nx.from_numpy_matrix(A, create_using=nx.DiGraph)]
    else:
        if threshold_distribution is None:
            try:
                thresholds = list(np.round(np.arange(np.min(A), np.max(A), (np.max(A - np.min(A))) / num_thresholds), 4))  # linear distribution
            except:
                thresholds = [np.max(A)]*num_thresholds
        else:
            thresholds = distribute(dist=threshold_distribution, end_value_range=(np.min(A), np.max(A)), n=num_thresholds)
        # Converting to binary nx_graphs according to thresholds:
        nx_graphs = [nx.from_numpy_matrix(np.where(A > threshold, 1, 0), create_using=nx.DiGraph) for threshold in thresholds]
    nx_graphs = [graph for graph in nx_graphs if not nx.is_empty(graph)]  # eliminates empty graphs
    # TODO: Possibly better to count empty graphs as a 0
    condensed_graphs = [nx.condensation(nx_graphs[index]) for index in range(len(nx_graphs))]
    largest_condensed_graphs = []
    for condensed_graph in condensed_graphs:
        largest_condensed_graphs.append(nx.convert_node_labels_to_integers(
            max(weakly_connected_component_subgraphs(condensed_graph, copy=True), key=len)))
        # networkx.weakly_connected_component_subgraphs comes from networkx 1.10 documentation, and has sense been discontinued.
        # For ease of access and future networkx compatibility, it was copied directly to this file before the class declaration.
        members = nx.get_node_attributes(largest_condensed_graphs[-1], 'members')
        node_weights = [len(w) for w in members.values()]
        for node_index in range(len(node_weights)):
            largest_condensed_graphs[-1].nodes[node_index]["weight"] = node_weights[node_index]

    return largest_condensed_graphs, nx_graphs
コード例 #54
0
            nx.draw_networkx_nodes(g, pos=pos, node_size=80, nodelist=prot0, node_color='steelblue', label='S = 0')
            nx.draw_networkx_nodes(g, pos=pos, node_size=80, nodelist=prot1, node_color='gold', label='S = 1')
            nx.draw_networkx_nodes(g, pos=pos, node_size=80, nodelist=prot2, node_color='firebrick', label='S = 2')
            nx.draw_networkx_edges(g, pos=pos)
            plt.legend(loc="upper left", scatterpoints=1, prop={'size': 15})
            plt.tight_layout()
            plt.savefig('results/'+synthetic_case+'.eps', bbox_inches='tight', format='eps')
            plt.show()

    if number_class == 'binary':

        # Correct the graph with emd
        print("Correcting the graph with EMD")
        new_adj, s, gamma, M = total_repair_emd(g,  metric='euclidean',
        case='weighted', log=False)
        new_g = nx.from_numpy_matrix(new_adj)

        # Filter out the smallest weights to keep a reasonable density
        list_edge = [(u, v) for (u, v, d) in new_g.edges(data=True) if d['weight'] <= 0.5]
        new_g.remove_edges_from (list_edge)

        # Coefficient of assortativity
        dict_s = {i: s[i] for i in range(0, len(s))}
        nx.set_node_attributes(new_g, dict_s, 's')
        ass_rep.append(nx.attribute_assortativity_coefficient(new_g, 's'))

        # Density
        density_old.append(nx.density(g))
        density_rep.append(nx.density(new_g))
    elif number_class == "multi":
        X0 = []
コード例 #55
0
ファイル: heatmap_plotly.py プロジェクト: Lawreros/m2g-paper
    plt.setp(ax.get_yticklabels(), fontsize=6)

    plt.colorbar(im, aspect=30)
    ax.set_title("Averaged Connections")

    fig.tight_layout()

    plt.show()

    plt.savefig(f'{localpath}/con_avg/heatmap.png', dpi=1000)

##### END

if PLOTLY:
    m = np.asmatrix(heatmap, dtype=float)
    Q = nx.from_numpy_matrix(m)
    Q.remove_node(0)

    nx.write_gml(Q, 'avg_edges.gml')

    G = ig.Graph.Read_GML('avg_edges.gml')

    V = list(G.vs)

    labels = [v['label'] for v in V]

    G.es.attributes()  # the edge attributes

    E = [e.tuple for e in G.es]  #list of edges

    # Get the list of Contestant countries
コード例 #56
0
    def train(self,
              text,
              window=2,
              lower=False,
              speech_tag_filter=True,
              vertex_source='all_filters',
              edge_source='no_stop_words'):

        self.text = text
        self.word_index = {}
        self.index_word = {}
        self.keywords = []
        self.graph = None
        self.words_copy = []

        (_, self.words_no_filter, self.words_no_stop_words,
         self.words_all_filters) = self.seg.segment(
             text=text, lower=lower, speech_tag_filter=speech_tag_filter)

        if vertex_source == 'no_filter':
            vertex_source = self.words_no_filter
        elif vertex_source == 'no_stop_words':
            vertex_source = self.words_no_stop_words
        else:
            vertex_source = self.words_all_filters

        if edge_source == 'no_filter':
            edge_source = self.words_no_filter
        elif vertex_source == 'all_filters':
            edge_source = self.words_all_filters
        else:
            edge_source = self.words_no_stop_words

        for words in vertex_source:
            for word in words:
                self.words_copy.append(word)
        #print self.words_copy
        index = 0
        for words in vertex_source:
            for word in words:
                #print word
                if not word in self.word_index:
                    #print word
                    self.word_index[word] = index
                    self.index_word[index] = word
                    index += 1

        words_number = index  # 单词数量

        self.graph = np.zeros((words_number, words_number))

        for word_list in edge_source:
            for w1, w2 in self.combine(word_list, window):
                if not w1 in self.word_index:
                    continue
                if not w2 in self.word_index:
                    continue
                index1 = self.word_index[w1]
                index2 = self.word_index[w2]
                self.graph[index1][index2] = 1.0
                self.graph[index2][index1] = 1.0


#         for x in xrange(words_number):
#             row_sum = np.sum(self.graph[x, :])
#             if row_sum > 0:
#                 self.graph[x, :] = self.graph[x, :] / row_sum

        nx_graph = nx.from_numpy_matrix(self.graph)
        scores = nx.pagerank(nx_graph)  # this is a dict
        for word in scores:
            #print word,scores[word],self.words_copy.count(word)
            scores[word] = scores[word] * self.words_copy.count(
                self.index_word[word])
        sorted_scores = sorted(scores.items(),
                               key=lambda item: item[1],
                               reverse=True)
        #print sorted_scores
        for index, _ in sorted_scores:
            self.keywords.append(self.index_word[index])
            #print(self.index_word[index],_ )
        return [(self.index_word[index], _) for index, _ in sorted_scores]
コード例 #57
0
ファイル: buck_microgrid.py プロジェクト: asokraju/kristools
    def __init__(self, dt=1e-5):
        super(Buck_micrgrid, self).__init__()

        #parameters 1+
        self.Vs = np.array([400, 400, 400, 400])
        self.L = np.diag(np.array(
            [1.0, 1.0, 1.0,
             1.0]))  #np.diag(np.array([1.8, 2.0, 3.0, 2.2])*1e-3)
        self.C = np.diag(np.array(
            [1.0, 1.0, 1.0,
             1.0]))  #np.diag(np.array([2.2, 1.9, 2.5, 1.7])*1e-3)
        self.R = np.diag(np.array(
            [1.0, 1.0, 1.0, 1.0]))  #np.diag(np.array([1.5, 2.3, 1.7, 2.1])*0)
        self.G = np.diag(np.array(
            [0.1, 0.1, 0.1, 0.1]))  # np.diag(1/np.array([16.7, 50, 16.7, 20]))

        self.Lt = np.diag(np.array([2.1, 2, 3, 2.2]) * 1e-3)
        self.Rt = np.diag(np.array([7, 5, 8, 6]) * 1e-2)
        """
        W = inv(diag([0.4 0.2 0.15 0.25]));
        D = 100*[1 -1 0 0; -1 2 -1 0; 0 -1 2 -1; 0 0 -1 1];
        B = [-1 0 0 -1;
              1 -1 0 0; 
              0 1 -1 0; 
              0 0 1 1  ];
        """
        #Graph structure
        self.inc_mat = np.array([[-1, 0, 0, -1], [1, -1, 0, 0], [0, 1, -1, 0],
                                 [0, 0, 1, 1]])
        self.adj_mat = (np.dot(self.inc_mat, self.inc_mat.T) -
                        2 * np.eye(4)).astype(int)
        self.Graph = nx.from_numpy_matrix(self.adj_mat)
        self.pos = nx.spring_layout(self.Graph)  #networkx.random_layout(G)
        self.options = {
            'node_color': 'red',
            'node_size': 1300,
            'width': 1,
            'arrowstyle': '-|>',
            'arrowsize': 12,
            'pos': self.pos
        }

        #step size; since L and C are very low, the ode becomes stiff
        #For the default parameters the step size should in the order of 1e-6
        self.T = dt

        #the steady-state equilibrium of the system is
        self.Vdes = np.array([230, 230, 230, 230])
        self.Itdes = -np.dot(np.linalg.inv(self.Rt),
                             np.dot(self.inc_mat.T, self.Vdes))
        self.Ides = np.dot(self.G, self.Vdes) - np.dot(self.inc_mat,
                                                       self.Itdes)

        self.udes = (1 / self.Vs) * (np.dot(self.R, self.Ides) + self.Vdes)
        self.action_des = 2 * self.udes - 1
        if any(self.Vs <= self.Vdes):
            raise ValueError(
                "for buck converter desired voltage should be less the source Voltage: Vdes < Vs "
            )

        #The control action is duty-ratio which lies between 0 and 1 (We are assuming that the switching frequency is very High)
        #However, RL algos work with symmetric control actions
        # hence we transform the action space between -1 and 1
        # action = 2*duty-ratio -1
        #duty-ratio = 0.5*(action + 1)
        #lists to save the states and actions
        self.state_trajectory = []
        self.action_trajectory = []
        self.count_steps = 0  # counts the number of steps taken

        self.action_space = spaces.Box(low=np.array([-1, -1, -1, -1]),
                                       high=np.array([+1, +1, +1, +1],
                                                     dtype=np.float64))

        low_obs = np.full(shape=(12, ), fill_value=-np.inf, dtype=np.float64)
        high_obs = np.full(shape=(12, ), fill_value=np.inf, dtype=np.float64)

        self.observation_space = spaces.Box(low=low_obs,
                                            high=high_obs,
                                            dtype=np.float64)

        self._get_state()
コード例 #58
0
def networkx_from_matrix_and_list(adj, names):
    G = nx.from_numpy_matrix(adj)
    G.relabel_nodes(G,{ind:names(ind) for ind in range(len(names))})
    return G    
コード例 #59
0
ax = fig.add_subplot(1, 1, 1)
data = [d for n, d in G.out_degree() if d > 0]
powerlaw.plot_pdf(data, linear_bins=False, color='b')
ax.set_xlabel('out-degree')
ax.set_ylabel('p(out-degree)')
set_size(4, 4, ax)

data = [d for n, d in G.in_degree() if d > 0]
powerlaw.plot_pdf(data, linear_bins=False, color='b')

data = [d for n, d in G.degree() if d > 0]
powerlaw.plot_pdf(data, linear_bins=False, color='b')

p0 = np.average([i[1] for i in G.out_degree()]) / (nonodes - 1)
ws_mat = watts_strogatz(L=nonodes, p0=p0, beta=0.5, directed=True)
G_ws_mat = nx.from_numpy_matrix(ws_mat, create_using=nx.DiGraph())
G_ws_mat.number_of_edges()

p = noedges / nonodes / (nonodes - 1)
G_rd = nx.generators.gnp_random_graph(nonodes, p=p, directed=True)
G_rd.number_of_edges()


def calGlobalEfficiency(G, lst_nodes, N):  #N = 0,
    #if N == 0: N = G.number_of_nodes()
    #if lst_nodes == None: lst_nodes = G.nodes()
    shortest_path = nx.shortest_path(G)
    acc = 0.0
    for i in lst_nodes:
        for j in lst_nodes:
            if i != j and (j in shortest_path[i]):
コード例 #60
0
 def keyword(self,
             text,
             num=6,
             score_min=0.025,
             win_size=3,
             type_sim="total",
             type_encode="avg",
             config={
                 "alpha": 0.86,
                 "max_iter": 100
             }):
     """
         关键词抽取, textrank of word2vec cosine
     :param text: str, doc. like "大漠帝国是历史上存在的国家吗?你知不知道?嗯。"
     :param num: int, length of sentence like 6
     :param win_size: int, windows size of combine. like 2
     :param type_sim: str, type of simiilarity. like "total", "cosine"
     :param config: dict, config of pagerank. like {"alpha": 0.86, "max_iter":100}
     :return: list, result of keyword. like [(0.020411696169510562, '手机'), (0.016149784106276977, '夏普')]
     """
     # 切句
     if type(text) == str:
         self.sentences = cut_sentence(text)
     elif type(text) == list:
         self.sentences = text
     else:
         raise RuntimeError("text type must be list or str")
     # macropodus_cut 切词
     self.macropodus_word = [
         macropodus_cut(sentence) for sentence in self.sentences
     ]
     # 去除停用词等
     self.sentences_word = [[
         w for w in mw if w not in self.stop_words.values()
     ] for mw in self.macropodus_word]
     # 构建图的顶点
     word2index = {}
     index2word = {}
     word_index = 0
     for sent_words in self.sentences_word:
         for word in sent_words:
             if not word in word2index:  # index
                 word2index[word] = word_index
                 index2word[word_index] = word
                 word_index += 1
     graph_words = np.zeros((word_index, word_index))
     # 构建图的边, 以两个词语的余弦相似度为基础
     for sent_words in self.sentences_word:
         for cw_1, cw_2 in self.cut_window(sent_words, win_size=win_size):
             if cw_1 in word2index and cw_2 in word2index:
                 idx_1, idx_2 = word2index[cw_1], word2index[cw_2]
                 score_w2v_cosine = self.similarity(cw_1,
                                                    cw_2,
                                                    type_sim=type_sim,
                                                    type_encode=type_encode)
                 graph_words[idx_1][idx_2] = score_w2v_cosine
                 graph_words[idx_2][idx_1] = score_w2v_cosine
     # 构建相似度矩阵
     w2v_cosine_sim = nx.from_numpy_matrix(graph_words)
     # nx.pagerank
     sens_scores = nx.pagerank(w2v_cosine_sim, **config)
     # 得分排序
     sen_rank = sorted(sens_scores.items(),
                       key=lambda x: x[1],
                       reverse=True)
     # 保留topk个, 防止越界
     topk = min(len(sen_rank), num)
     # 返回原句子和得分
     return [(sr[1], index2word[sr[0]]) for sr in sen_rank
             if len(index2word[sr[0]]) > 1 and score_min <= sr[1]][0:topk]