Пример #1
0
def drow_l_s():  # lista sasiedstwa
    inciliststr = stext4.get('1.0', 'end-1c')
    incidencelist = inciliststr.split('\n')
    P = nx.parse_adjlist(incidencelist, nodetype=int)

    nx.draw_networkx(P)
    plt.show()
Пример #2
0
def overall_call(original_file, attributes_file, start_point_id, length_path,
                 set_progress_funk):
    data = pd.read_csv(attributes_file)
    list_of_adjacencies = data[['fid', 'fid_2']]
    list_of_adjacencies = np.array(list_of_adjacencies)
    lines = prepare(list_of_adjacencies)
    G = nx.parse_adjlist(lines, nodetype=str)
    l_dataframe = pd.read_csv(length_path)
    l_dataframe = l_dataframe.astype({'id': 'str'})
    last_vertex = distance_attr(G, str(start_point_id), l_dataframe)
    rank_set(G, str(start_point_id), str(last_vertex), set_progress_funk)
    iter_set_values(G, str(start_point_id))
    dataframe = make_dataframe(G)
    rivers = pd.read_csv(original_file)
    rivers = rivers.astype({'fid': 'str'})
    data_merged = pd.merge(rivers, dataframe, on='fid')
    rows_count = data_merged.shape[0]
    df_dict = {}
    for i in range(rows_count):
        df_dict[int(data_merged.iloc[i]['fid'])] = [
            int(data_merged.iloc[i]['Rank']),
            int(data_merged.iloc[i]['Value']),
            int(data_merged.iloc[i]['Distance'])
        ]
    return df_dict
Пример #3
0
def draw_graph_from_adjlist(adjlist, prog='dot', **kwargs):
    lines = [' '.join(str(x) for x in line) for line in adjlist]
    g = nx.parse_adjlist(lines, create_using=nx.DiGraph(), nodetype=int)
    pos = nx.drawing.nx_pydot.graphviz_layout(g, prog=prog)
    nx.draw(g, pos, with_labels=True, arrows=True, **kwargs)
    #plt.show()
    return pos
Пример #4
0
def create_graph_enzymes(file):
    
    f = open(file, 'r')
    lines = f.read().splitlines()
    f.close()
    
    # get the indices of the vertext, adj list and class
    idx_vertex = lines.index("#v - vertex labels")
    idx_adj_list = lines.index("#a - adjacency list")
    idx_clss = lines.index("#c - Class")
    
    # node label    
    vl = [int(ivl) for ivl in lines[idx_vertex+1:idx_adj_list]]
    
    adj_list = lines[idx_adj_list+1:idx_clss]
    sources = list(range(1,len(adj_list)+1))

    for i in range(len(adj_list)):
        if not adj_list[i]:
            adj_list[i] = str(sources[i])
        else:
            adj_list[i] = str(sources[i])+","+adj_list[i]

    g = nx.parse_adjlist(adj_list, nodetype=int, delimiter=",")
    
    for i in range(1, g.number_of_nodes()+1):
        g.node[i]['labels'] = np.array(vl[i-1])
    
    c = int(lines[idx_clss+1])
    
    return g, c
Пример #5
0
    def from_adjlist(self, filename, delimiter=None, nodetype=int):
        """ Loads a Cluster from its adjacency list stored in a file `filename`
        """
        with open(filename, 'r') as f:
            graph = nx.parse_adjlist(lines=f,
                                     nodetype=nodetype,
                                     delimiter=delimiter)

        self.graph = graph
Пример #6
0
def MCL(cdr3, edgelist=None, mcl_hyper=[1.2, 2], outfile=None):
    """
    Perform clustering on a network of CDR3 amino acid sequences with
    a known hamming distance, using the Markov clustering (MCL) algorithm.
    For more info about the inflation and expansion parameters,
    visit: https://micans.org/mcl/


    Parameters
    ----------
    edgelist : set, optional
        Tab-separated edgelist. The default is None.
    mcl_hyper : list, optional
        MCL hyperparameters: inflation and expansion.
        The default is [1.2,2].
    outfile : str, optional
        Name of outfile. The default is None.

    Returns
    -------
    clusters : pd.DataFrame
        pd.DataFrame containing two columns: 'CDR3' and 'cluster'.
        The first column contains CDR3 sequences, the second column
        contains the corresponding cluster ids.
    """
    if edgelist is None:
        edgelist = create_edgelist(cdr3)

    try:
        G = nx.parse_adjlist(edgelist, nodetype=str)
        m = nx.to_scipy_sparse_array(G)
    
        # Run MCL
        result = mcl.run_mcl(m, inflation=mcl_hyper[0], expansion=mcl_hyper[1])
        mcl_output = mcl.get_clusters(result)
        identifiers = list(G.nodes())
    
        # Map cluster ids back to seqs
        cluster_ids = dict()
        for i in range(len(mcl_output)):
            cluster_ids[i] = list(identifiers[i] for i in mcl_output[i])
    
        # Generate nodelist
        clusters = {"CDR3": [], "cluster": []}
        for c in cluster_ids:
            for seq in cluster_ids[c]:
                clusters["CDR3"].append(seq)
                clusters["cluster"].append(c)
        clusters = pd.DataFrame(data=clusters)
    
        # Write to file
        if outfile is not None:
            clusters.to_csv(outfile, sep="\t", index=False)
    except nx.NetworkXError:
        clusters = pd.DataFrame({"CDR3": [], "cluster": []})

    return clusters
Пример #7
0
    def fromDict(dict: Dict):
        adj_list = dict["adj_list"]
        graph = nx.parse_adjlist(adj_list, create_using=nx.DiGraph)

        seed_id = dict["seed_id"]
        params = dict["params"]

        social_graph = SocialGraph(graph, seed_id, params)

        return social_graph
Пример #8
0
 def read_file(self, file):
     graph_list = []
     with open(file, 'r') as graph:
         i = 0
         for line in graph.readlines():
             if i > 0:
                 graph_list.append(str(i) + ' ' + line)
             i += 1
     Adjlist = nx.parse_adjlist(graph_list, nodetype=int)
     return Adjlist
def readFromFile():
    with open("files/adjList.txt", 'r') as f:
        tab = f.readlines()
		
    ll = []
    x = 0
    for i in tab:
        ii = str(x) + " " + i[1:-2].replace(',', '')
        ll.append(ii)
        x += 1
    return nx.parse_adjlist(ll, nodetype = int)
Пример #10
0
 def read_graph(self, filename):
     G = nx.Graph()
     lines = []
     with open(filename, 'r') as graph:
         num_data = graph.readline().split(' ')
         num_node, num_edge = int(num_data[0]), int(num_data[1])  # not used
         i = 0
         for line in graph:
             i = i + 1
             lines.append(str(i) + ' ' + line[:-2])
     G = nx.parse_adjlist(lines[:-1], nodetype=int)
     return G
Пример #11
0
def readFromFile():
    with open("files/adjList.txt", 'r') as f:
        tab = f.readlines()

    adjlist = []
    x = 0
    for i in tab:
        i = str(x) + " " + i[1:-2].replace(',', '')
        adjlist.append(i)
        x += 1

    return nx.parse_adjlist(adjlist)
def generate_adj_network_viz(networks):
    '''
    generates a networkx graph and from an adjacency list and saves as a pyplot drawing
    :param networks: a list of lists where the inner lists are a list of characters that are a network, the first one
    being the 'protagonist' or POV holder
    :return: None, saves to png files
    '''
    # TODO remove the hardcoding of test indices
    for index in range(1, 50, 5):
        # this is janky. Because nx expects a string
        curr_graph = nx.parse_adjlist([' '.join(networks[index])])
        nx.draw_networkx(curr_graph)
        plt.savefig('Chapter {} network.png'.format(index))
Пример #13
0
def get_fastg_digraph(fastg_name):
    """ scans through fastg headers as an adjacency list
        builds and returns a nx directed graph using adjacencies
        note: no connections are created between each node and its 
        rc node - we need to take care to maintain these 
    """
    lines = []
    fp = open(fastg_name, 'r')
    for name,seq,qual in readfq(fp):
        name = re.sub('[:,]'," ", name[:-1])
        lines.append(name)
    G = nx.DiGraph()
    return nx.parse_adjlist(lines, create_using=G)
Пример #14
0
def get_fastg_digraph(fastg_name):
    """ scans through fastg headers as an adjacency list
        builds and returns a nx directed graph using adjacencies
        note: no connections are created between each node and its
        rc node - we need to take care to maintain these
    """
    lines = []
    fp = open(fastg_name, 'r')
    for name, seq, qual in readfq(fp):
        name = re.sub('[:,]', " ", name[:-1])
        lines.append(name)
    G = nx.DiGraph()
    return nx.parse_adjlist(lines, create_using=G)
Пример #15
0
def louvain(cdr3, edgelist=None):
    if edgelist is None:
        edgelist = create_edgelist(cdr3)

    try:
        G = nx.parse_adjlist(edgelist, nodetype=str)
        partition = community.best_partition(G)
    except nx.NetworkXError:
        partition = pd.DataFrame({"CDR3": [], "cluster": []})
        
    return pd.DataFrame.from_dict(
        partition, orient="index", columns=["cluster"]
        ).reset_index().rename(columns={'index': 'CDR3'})
Пример #16
0
 def test_parse_adjlist(self):
     lines = ["1 2 5", "2 3 4", "3 5", "4", "5"]
     nx.parse_adjlist(lines, nodetype=int)  # smoke test
     with pytest.raises(TypeError):
         nx.parse_adjlist(lines, nodetype="int")
     lines = ["1 2 5", "2 b", "c"]
     with pytest.raises(TypeError):
         nx.parse_adjlist(lines, nodetype=int)
Пример #17
0
    def calculate_cetrality(self, data):

        logger.info("Starting calculating centrality")
        adj_list = []

        for user in data:
            adj_list.append(str(user.user_oid_i) + " " + str(user.user_oid_j))

        g = nx.parse_adjlist(adj_list, nodetype=int)
        logger.info(str(len(adj_list)))
        # calculo centralidad de grados
        degree_centrality = nx.degree_centrality(g)
        logger.info("Finish calculating centrality")

        return degree_centrality
Пример #18
0
    def calculate_cetrality(self, data):

        logger.info("Starting calculating centrality")
        adj_list = []

        for user in data:
            adj_list.append(str(user.user_oid_i) + " " + str(user.user_oid_j))

        g = nx.parse_adjlist(adj_list, nodetype=int)
        logger.info(str(len(adj_list)))
        # calculo centralidad de grados
        degree_centrality = nx.degree_centrality(g)
        logger.info("Finish calculating centrality")

        return degree_centrality
Пример #19
0
 def test_graph_conversion_to_text(self):
     """
     convert graph to text, as adjacency list.
     :return:
     """
     yt_script.convert_graph_to_text(self.MOCK_GRAPH, self.MOCK_FILE_OUTPUT)
     with open(self.MOCK_FILE_OUTPUT) as f:
         output = f.read()
         output = output.split('\n')
     result_graph = nx.parse_adjlist(output)
     for node in self.MOCK_GRAPH.nodes():
         self.assertIn(node, result_graph.nodes())
     for edge in self.MOCK_GRAPH.edges():
         try:
             self.assertIn(edge, result_graph.edges())
         except AssertionError:
             edge = (edge[1], edge[0])
             self.assertIn(edge, result_graph.edges())
             continue
 def test_graph_conversion_to_text(self):
     """
     convert graph to text, as adjacency list.
     :return:
     """
     yt_script.convert_graph_to_text(self.MOCK_GRAPH, self.MOCK_FILE_OUTPUT)
     with open(self.MOCK_FILE_OUTPUT) as f:
         output = f.read()
         output = output.split('\n')
     result_graph = nx.parse_adjlist(output)
     for node in self.MOCK_GRAPH.nodes():
         self.assertIn(node, result_graph.nodes())
     for edge in self.MOCK_GRAPH.edges():
         try:
             self.assertIn(edge, result_graph.edges())
         except AssertionError:
             edge = (edge[1], edge[0])
             self.assertIn(edge, result_graph.edges())
             continue
Пример #21
0
def main():
    maximum_word_length = input("Enter max word length: ")
    while type(maximum_word_length) != int and (int(maximum_word_length)<1 or int(maximum_word_length)>16 ):
        maximum_word_length = input("please input a real integer value: ")
    
    maximum_word_length = int(maximum_word_length)
    board = random_board_2d()
    display_2d_board(board)
    board = unravel_board(board) #change this soon
    #words have an n+1 length for an n length path
    word_list = make_word_list('word_lists/words')
    
    #make the graph
    adj = build_adjacency_list(board)
    G = nx.parse_adjlist(adj)
    substrings = find_valid_paths(G,board,maximum_word_length)
    valid_words = check_substrings_for_words(word_list,substrings)
    print(len(valid_words))
    for word in valid_words:
       print(word)
Пример #22
0
    def get_centrality_by_user(self, users):

        # armo el "adjencency list" como string
        adj_list = []

        for user in users:
            followers = ""
            user_followers = [follower.user_id for follower in user.followers.all()]
            if user.followers:
                followers = " " + " ".join(str(x) for x in user_followers)
            adj_list.append(str(user.user_id) + followers)

        # armo el grafo a partir del string
        g = nx.parse_adjlist(adj_list, nodetype=int)

        # calculo centralidad de grados
        degree_centrality = nx.degree_centrality(g)
        # closeness_centrality = nx.closeness_centrality(g)
        # betweenness_centrality = nx.betweenness_centrality(g)
        # eigenvector_centrality = nx.eigenvector_centrality(g)

        return degree_centrality
Пример #23
0
def get(path):
    with open(path, 'r') as f:
        tab = f.readlines()
    
    list = []
    for i in tab:
        list.append(i[2:-2])
        list[len(list)-1] = list[len(list)-1].replace(' ' , '')
        
    adj_list = []
    for k, v in enumerate(list):
        adj_list.append([])
        for key, value in enumerate(v):
            if int(value) == 1:
                adj_list[k].append(key)
                
    list = []
    for k, v in enumerate(adj_list):
        list.append(str(k))
        for i in v:
            list[k] += " " + str(i)
            
    G = nx.parse_adjlist(list)
    return G
Пример #24
0
 def draw_graph_from_adjacency_list(self, adjacency_list):
     self.graph = nx.parse_adjlist(adjacency_list, nodetype=int)
     self.draw_graph()
Пример #25
0
def bnb(inst, cutoff, seed):
    graph = nx.Graph()
    adj_list = []
    v = 0
    e = 0
    with open(inst, 'r') as readGraph:
        i = 0
        for line in readGraph.readlines():
            if i == 0:
                tmp = line.split()
                v, e = int(tmp[0]), int(tmp[1])
                i += 1

            else:
                adj_list.append(str(i) + ' ' + line)
                i += 1

    graph = nx.parse_adjlist(adj_list, nodetype=int)

    graphDup = graph.copy()
    vDup = maxDeg(graphDup)
    f = []
    f.append((vDup[0], 0, (-1, -1)))
    f.append((vDup[0], 1, (-1, -1)))

    best = []
    current = []
    sol = []
    ub = graph.number_of_nodes()

    s = time.time()
    end = s
    t = end - s

    while f != [] and t < cutoff:
        (vx, state, parent) = f.pop()
        bt = False
        if state == 0:
            for i in list(graphDup.neighbors(vx)):
                current.append((i, 1))
                graphDup.remove_node(i)
        elif state == 1:
            graphDup.remove_node(vx)
        else:
            pass

        current.append((vx, state))
        currentSz = vcsz(current)

        if graphDup.number_of_edges() == 0:
            if currentSz < ub:
                best = current.copy()

                ub = currentSz
                sol.append([time.time() - s, currentSz])
            bt = True
        else:
            currlb = currentSz + lb(graphDup)
            if currlb < ub:
                vy = maxDeg(graphDup)
                f.append((vy[0], 0, (vx, state)))
                f.append((vy[0], 1, (vx, state)))
            else:
                bt = True

        if bt:
            if f:
                #nnp = f[-1][2]
                if f[-1][2] in current:
                    i = current.index(f[-1][2]) + 1
                    while i < len(current):
                        currN, currState = current.pop()
                        graphDup.add_node(currN)

                        currvcn = list(map(lambda t: t[0], current))
                        for n in graph.neighbors(currN):
                            if n in graphDup.nodes() and n not in currvcn:
                                graphDup.add_edge(n, currN)
                elif f[-1][2] == (-1, -1):
                    current.clear()
                    graphDup = graph.copy()

        end = time.time()
        t = end - s

    for v in best:
        if v[1] == 0:
            best.remove(v)

    vc = []
    for v, d in best:
        vc.append(v)

    output_file(inst, cutoff, len(vc), vc, sol)
Пример #26
0
def create_graph(from_seconds=30):
    D = nx.DiGraph()
    return nx.parse_adjlist(extract_nodes(from_seconds), create_using=D)
Пример #27
0
        for record in tweet:
            users_topico.append(record['user']['id'])

        users_filter = db.user.find({"id": {"$in": users_topico}})

        # armo el "adjencency list" como string
        adj_list = []

        for user in users_filter:
            followers = ""
            if user['followers']:
                followers = " " + " ".join(str(x) for x in user['followers'])
            adj_list.append(str(user['id']) + followers)

        # armo el grafo a partir del string
        g = nx.parse_adjlist(adj_list, nodetype = int)

        # calculo centralidad de grado
        degree_centrality = nx.degree_centrality(g)
        # closeness_centrality = nx.closeness_centrality(g)
        # betweenness_centrality = nx.betweenness_centrality(g)
        # eigenvector_centrality = nx.eigenvector_centrality(g)

        # obtengo el usuario con mayor centralidad
        max = max(degree_centrality.iterkeys(), key = lambda k: degree_centrality[k])
        user = db.user.find_one({u'id': max})

if __name__ == '__main__':

    if len(sys.argv) != 2:
        print "Missing topic"
Пример #28
0
def draw_graph_from_adjlist(adjlist):
    lines = [' '.join(str(x) for x in line) for line in adjlist]
    g = nx.parse_adjlist(lines, create_using=nx.DiGraph(), nodetype=int)
    pos = nx.drawing.nx_pydot.graphviz_layout(g, prog='dot')
    nx.draw(g, pos, with_labels=True, arrows=True)
    plt.show()
Пример #29
0
def read_txt(f):
	graph = nx.parse_adjlist(f)
	return process_graph(graph)
Пример #30
0
#!/usr/bin/python
import sys
#import csv
import networkx as nx

# for line in sys.stdin:
#     print line
    # edge_list = [node[0].replace("\t",",") for node in line]
    # G = nx.parse_adjlist(edge_list, delimiter=",", nodetype=int)
    # print G
input = sys.argv[0]

with open(input, "rb") as connect:
    connections = csv.reader(connect, delimiter="\n")
    edge_list = [node[0].replace("\t",",") for node in connections]
    G = nx.parse_adjlist(edge_list, delimiter=",", nodetype=int)
    # print edge_list
    for node in nx.nodes(G):
        for non_con in nx.non_neighbors(G, node):
            for result in nx.common_neighbors(G, node, non_con):
                print '{}\t{}\t{}'.format(node, non_con, 1)
Пример #31
0
def load_network(input_data):
    # Create country_decade feature
    dt_collect = input_data[['eventid', 'country_txt', 'iyear', 'gname']]
    dt_collect["decade"] = (dt_collect['iyear'] // 10) * 10
    dt_collect['country_decade'] = dt_collect[
        'country_txt'] + '_' + dt_collect['decade'].map(str) + 's'
    dt_collect = dt_collect[dt_collect.gname != 'Unknown']

    # Create a country_decade edgelist
    gnames_country_decade = dt_collect.groupby(['gname',
                                                'country_decade']).agg({
                                                    'eventid':
                                                    'count'
                                                }).reset_index()
    gnames_country_decade_edgelist = pd.merge(gnames_country_decade,
                                              gnames_country_decade,
                                              on='country_decade',
                                              how='left')
    gnames_country_decade_edgelist.drop(['eventid_x', 'eventid_y'],
                                        axis=1,
                                        inplace=True)
    gnames_country_decade_edgelist.columns = [
        'source', 'country_decade', 'target'
    ]
    gnames_country_decade_edgelist = gnames_country_decade_edgelist[
        gnames_country_decade_edgelist.source !=
        gnames_country_decade_edgelist.target]

    G_country_decade = nx.from_pandas_edgelist(gnames_country_decade_edgelist,
                                               source='source',
                                               target='target')

    # Create edgelist from the related column
    dt_collect = input_data['related']
    dt_collect.dropna(inplace=True)
    gname_event_mapping = input_data[['eventid', 'gname']].drop_duplicates()
    gname_event_mapping.eventid = gname_event_mapping.eventid.astype(str)

    G_related = nx.parse_adjlist(dt_collect.values,
                                 delimiter=', ')  # attacks that are related
    df_related = nx.to_pandas_edgelist(G_related)
    df_related.replace(' ', '', regex=True, inplace=True)
    df_source_gname = pd.merge(df_related,
                               gname_event_mapping,
                               how='left',
                               left_on='source',
                               right_on='eventid')
    df_source_gname.rename(columns={'gname': 'gname_source'}, inplace=True)
    df_target_gname = pd.merge(df_source_gname,
                               gname_event_mapping,
                               how='left',
                               left_on='target',
                               right_on='eventid')
    df_target_gname.rename(columns={'gname': 'gname_target'}, inplace=True)

    # Filtering and cleaning
    gnames_relations_edgelist = df_target_gname[
        df_target_gname.gname_source != df_target_gname.gname_target]
    gnames_relations_edgelist = gnames_relations_edgelist[
        gnames_relations_edgelist.gname_source != 'Unknown']
    gnames_relations_edgelist = gnames_relations_edgelist[
        gnames_relations_edgelist.gname_target != 'Unknown']
    gnames_relations_edgelist = gnames_relations_edgelist[[
        'gname_source', 'gname_target'
    ]]
    gnames_relations_edgelist.dropna(inplace=True)

    G_rel = nx.from_pandas_edgelist(gnames_relations_edgelist,
                                    source='gname_source',
                                    target='gname_target')

    # Merging two graphs
    G = nx.compose(G_country_decade, G_rel)

    return G
Пример #32
0
    def get_user_to_friends_graph(self, user):
        query = self.user_friends_graph_collection.find_one({'user': user})
        graph = nx.parse_adjlist(query['adj_list'])

        return graph
Пример #33
0
def load_network(input_data):
    # Create country_decade feature
    dt_collect = input_data[["eventid", "country_txt", "iyear", "gname"]]
    dt_collect["decade"] = (dt_collect["iyear"] // 10) * 10
    dt_collect["country_decade"] = (dt_collect["country_txt"] + "_" +
                                    dt_collect["decade"].map(str) + "s")
    dt_collect = dt_collect[dt_collect.gname != "Unknown"]

    # Create a country_decade edgelist
    gnames_country_decade = (dt_collect.groupby(["gname",
                                                 "country_decade"]).agg({
                                                     "eventid":
                                                     "count"
                                                 }).reset_index())
    gnames_country_decade_edgelist = pd.merge(gnames_country_decade,
                                              gnames_country_decade,
                                              on="country_decade",
                                              how="left")
    gnames_country_decade_edgelist.drop(["eventid_x", "eventid_y"],
                                        axis=1,
                                        inplace=True)
    gnames_country_decade_edgelist.columns = [
        "source", "country_decade", "target"
    ]
    gnames_country_decade_edgelist = gnames_country_decade_edgelist[
        gnames_country_decade_edgelist.source !=
        gnames_country_decade_edgelist.target]

    G_country_decade = nx.from_pandas_edgelist(gnames_country_decade_edgelist,
                                               source="source",
                                               target="target")

    # Create edgelist from the related column
    dt_collect = input_data["related"]
    dt_collect.dropna(inplace=True)
    gname_event_mapping = input_data[["eventid", "gname"]].drop_duplicates()
    gname_event_mapping.eventid = gname_event_mapping.eventid.astype(str)

    G_related = nx.parse_adjlist(dt_collect.values,
                                 delimiter=", ")  # attacks that are related
    df_related = nx.to_pandas_edgelist(G_related)
    df_related.replace(" ", "", regex=True, inplace=True)
    df_source_gname = pd.merge(
        df_related,
        gname_event_mapping,
        how="left",
        left_on="source",
        right_on="eventid",
    )
    df_source_gname.rename(columns={"gname": "gname_source"}, inplace=True)
    df_target_gname = pd.merge(
        df_source_gname,
        gname_event_mapping,
        how="left",
        left_on="target",
        right_on="eventid",
    )
    df_target_gname.rename(columns={"gname": "gname_target"}, inplace=True)

    # Filtering and cleaning
    gnames_relations_edgelist = df_target_gname[
        df_target_gname.gname_source != df_target_gname.gname_target]
    gnames_relations_edgelist = gnames_relations_edgelist[
        gnames_relations_edgelist.gname_source != "Unknown"]
    gnames_relations_edgelist = gnames_relations_edgelist[
        gnames_relations_edgelist.gname_target != "Unknown"]
    gnames_relations_edgelist = gnames_relations_edgelist[[
        "gname_source", "gname_target"
    ]]
    gnames_relations_edgelist.dropna(inplace=True)

    G_rel = nx.from_pandas_edgelist(gnames_relations_edgelist,
                                    source="gname_source",
                                    target="gname_target")

    # Merging two graphs
    G = nx.compose(G_country_decade, G_rel)

    return G
Пример #34
0
 def __init__(self, adjlist, root=int()):
     self.__root = root
     self.__graph = nx.parse_adjlist(adjlist,
                                     create_using=nx.DiGraph,
                                     nodetype=int)
Пример #35
0
			for (node, status, time_step) in new_infected: 
				state_dict[node] = (status, time_step)

			if len(new_infected) == 0:
				break
		
		for key, (a,b) in state_dict.iteritems(): 
			if a == 1: 
				print "%s %s %s" % (iteration, key, b)

	



urls = ["http://people.seas.harvard.edu/~yaron/cs284rFall2013/data/epinions.txt"] # ["http://people.seas.harvard.edu/~yaron/cs284rFall2013/data/enron.txt"] "http://people.seas.harvard.edu/~yaron/cs284rFall2013/data/livejournal.txt"]
for url in urls : 
	line_array = download_graphs(url)
	graph = nx.parse_adjlist(line_array,nodetype=int)
	voter_model(graph, 1000)
	title = url
	

	#make_graphs(graph, title)






Пример #36
0
 def test_clustering(self):
     adjlist = self.test_case[0].strip().split('\n')
     g = networkx.parse_adjlist(adjlist, create_using=PrecedenceGraph())
     for a, b in zip(self.test_case[1], g.clustering()):
         self.assertSetEqual(set(a), set(b))
Пример #37
0
    def load(self, filename=None):
        '''
        Load adjacency list and data from file. This will remove the graph 
        created by the editor in the current session.
        
        Lines at the beginning starting with # are ignored. 
        The adjacency list for the matrix is first.
        The line following DATA that contains a list of all the nodes with 
        attributes must be on a single line (no line breaks). 
        It should be the output of graph.nodes(data=True).
        Lines after this line are ignored.
        
        Example file:
        #weighted_graph.py
        # GMT Fri Aug 16 22:24:35 2013
        #
        N0 N1 N2 N3
        N1 
        N2 
        N3 

        DATA
        [('N0', {'weight': -3, 'parent': -1}), ('N1', {'weight': -3, 'parent': 
        0}), ('N2', {'weight': -3, 'parent': 0}), ('N3', {'weight': -3, 
        'parent': 0})]
        '''
        if not filename: 
            # open file options
            options = {}
            options['defaultextension'] = '.txt'
            options['filetypes'] = [('all files', '.*'), ('text files', '.txt')]
            filename = tkFileDialog.askopenfilename(**options)

        if filename == '': # canceled
            return
        # attempt to open file
        try:
            adjfile = open(filename, 'r')
            self.info = filename
        except:
            if self.gui:
                tkMessageBox.showwarning('Open file',
                                         'Cannot open file %s.' %filename \
                                    +'Aborting operation; please try again.')
            raise IOError('failed to open file')
        # get the adjacency list
        adjlist = [] # list of strings
        try:
            while 1:
                line = adjfile.readline()
                if not line: # 'DATA' line should come before EOF
                    raise IOError('failed to load - possibly no graph data')
                if line[:4] == 'DATA' or line[:4] == 'data':
                    break
                if line[0] == '#' or line == '' or line == '\n': # skip
                    continue
                adjlist.append(line[:-1]) # don't want \n char
            self.graph = nx.parse_adjlist(adjlist) # got graph
            self.num_nodes = len(self.graph.nodes(data=False))            
            if not is_forest(self.graph):
                raise ValueError('not a forest (disjoint union of trees)')
            missing_nodes = self.missing_nodes()
            if missing_nodes != set([]):
                raise ValueError('missing node(s)')
            # get the node data- must be preceded by a line starting with 'DATA'
            while 1:
                node_data = adjfile.readline()
                if not node_data: # EOF
                    raise IOError('no data')
                if node_data != '' and node_data != '\n':
                    break
            # use eval convert node_data (string) to list; make sure is a list
            if node_data.strip()[0] != '[' or node_data.strip()[-1] != ']':
                raise ValueError('cannot parse data')
            # add node attributes (weight, parent)
            for node in eval(node_data):
                for attr in node[1].keys():
                    self.graph.node[node[0]][attr] = node[1][attr] 
                    # node[1] is a dict of attr
            # update self.nodes (keep in order)
            self.nodes = []
            for index in range(self.num_nodes):
                self.nodes.append('N%i' % index)
        except Exception as error:
            if self.gui:
                tkMessageBox.showwarning('Loading', 'Loading failed - %s%s\n%s'\
                                %(type(error),filename, traceback.format_exc()))
            print traceback.print_exc()
            return
        # update graph control options
        if self.gui:
            self.n_parent_opt = range(-1, self.num_nodes)
            self.n_parentmenu = OptionMenu(self.frame, self.n_parent_var, 
                                           *self.n_parent_opt)
            self.n_parentmenu.grid(row=1, column=1)
            self.e_node_opt = range(self.num_nodes)
            self.e_nodemenu = OptionMenu(self.frame, self.e_node_var, 
                                         *self.e_node_opt)        
            self.e_nodemenu.grid(row=4, column=1)        
            self.e_parent_opt = ['same']
            self.e_parent_opt.extend(range(-1, self.num_nodes))
            self.e_parentmenu = OptionMenu(self.frame, self.e_parent_var, 
                                           *self.e_parent_opt)                
            self.e_parentmenu.grid(row=4, column=2)
            
            self.del_node.destroy()
            self.del_node = Label(self.frame,text='Node #%i'%(self.num_nodes-1))
            self.del_node.grid(row=6, column=1)

            self.update_graph()
        print 'Graph data successfully loaded from %s' % filename        
Пример #38
0
 def convert_networkx(self):
     '''Saves as a networkx graph object'''
     adj_list = [' '.join(map(str, i)) for i in self.graph]
     self.G = nx.parse_adjlist(adj_list)