def read_net(self, net_file, net_type): ''' Read from file Create networkX graph object (directed or undirected) ''' if net_file == None or not os.path.isfile(net_file): stderr_write(['WARN: Missing net file. '+ \ 'Empty network created.']) if net_type == 'undir': self.g = nx.Graph() self.directed = False else: self.g = nx.DiGraph() self.directed = True return if net_type == 'undir': self.g = \ nx.parse_edgelist(file_line_iterator(net_file), data=(('weight',float),), create_using=nx.Graph()) self.directed = False else: self.g = \ nx.parse_edgelist(file_line_iterator(net_file), data=(('weight',float),), create_using=nx.DiGraph()) self.directed = True
def add_edge(G, val): """ :param G: The original graph, used to check whether the new graph is a valid network. :param val: estimated cost of choosing each vertex. :return: possible best result """ best_graph = nx.Graph() edge_list = [] new_edge = "" node = val.index(max(val)) new_edge += str(node) max_val = -float("inf") max_neighbour = 0 for neighbour in G.neighbors(node): if val[neighbour] > max_val and neighbour != node: max_val = val[neighbour] max_neighbour = neighbour new_edge += " " + str(max_neighbour) new_edge += " " + str(G.get_edge_data(node, max_neighbour).get("weight")) edge_list.append(new_edge) new_graph = nx.parse_edgelist(edge_list, nodetype=int, data=(('weight', float), )) val[node], val[max_neighbour] = -float("inf"), -float("inf") if is_valid_network(G, new_graph): # if average_pairwise_distance_fast(new_graph) < best_so_far: # best_so_far = average_pairwise_distance_fast(new_graph) best_graph = copy.deepcopy(new_graph) return best_graph while True: new_edge = "" max_val = -float("inf") max_neighbour = 0 max_node = 0 check = False for node in new_graph.nodes(): for neighbour in G.neighbors(node): if val[neighbour] > max_val: max_val = val[neighbour] max_neighbour = neighbour max_node = node check = True if not check: break new_edge += str(max_node) new_edge += " " + str(max_neighbour) new_edge += " " + str( G.get_edge_data(max_node, max_neighbour).get("weight")) edge_list.append(new_edge) new_graph = nx.parse_edgelist(edge_list, nodetype=int, data=(('weight', float), )) val[max_node], val[max_neighbour] = -float("inf"), -float("inf") if is_valid_network(G, new_graph): best_graph = copy.deepcopy(new_graph) break return best_graph
def _generate_graph(self, directed=True): """Builds a networkx graph from the existing json_graph""" if directed: graph = nx.DiGraph() else: graph = None nx.parse_edgelist([ i["source"] + "," + i["target"] for i in self.json_graph["links"] ], delimiter=",", nodetype=str, create_using=graph) self.g = graph return self
def test_1(self): orbit = nx.parse_edgelist([ 'COM)B', 'B)C', 'C)D', 'D)E', 'E)F', 'B)G', 'G)H', 'D)I', 'E)J', 'J)K', 'K)L' ], delimiter=')') self.assertEqual(total_orbit_count(orbit), 42)
def test_2(self): orbit = nx.parse_edgelist([ 'COM)B', 'B)C', 'C)D', 'D)E', 'E)F', 'B)G', 'G)H', 'D)I', 'E)J', 'J)K', 'K)L', 'K)YOU', 'I)SAN' ], delimiter=')') self.assertEqual(shortest_orbit_transfer(orbit), 4)
def test_solveMaze ( filenum, prefix=None, verbose=False ): command = prefix if prefix else "." command += "/solveMaze {}tests/test{}.txt {}queries/query{}.txt".format(prefix if prefix else "",filenum,prefix if prefix else "",filenum) if verbose: print (command) try: with open("{}queries/query{}.txt".format(prefix if prefix else "",filenum), "r") as qfile: source = int(qfile.readline()) target = int(qfile.readline()) with open("{}edgelists/edgelist{}.txt".format(prefix if prefix else "",filenum), "r") as edgelistfile: mazeGraph = nx.read_edgelist(edgelistfile,nodetype=int) except EnvironmentError: # parent of IOError, OSError print ("edgelists/edgelist{}.txt missing".format(filenum)) try: result = subprocess.check_output(command, shell=True).decode('ascii').strip() lines = result.split('\n') resultGraph = nx.parse_edgelist(lines,nodetype=int) for edge in resultGraph.edges: # print(edge) assert edge in mazeGraph.edges, "The edge {} is not part of the original graph.".format(edge) assert(approx.local_node_connectivity(resultGraph,source,target)==1), "The edges you returned do not connect the source and target nodes listed in queries/query{}.txt.".format(filenum) return True except subprocess.CalledProcessError as e: # print (e.output) print ("Calling ./solveMaze returned non-zero exit status.") except AssertionError as e: print (result) print (e.args[0]) return False
def read_input_file(path, max_size=None): """ Parses and validates an input file :param path: str, a path :return: networkx Graph is the input is well formed, AssertionError thrown otherwise """ with open(path, "r") as fo: n = fo.readline().strip() assert n.isdigit() n = int(n) lines = fo.read().splitlines() fo.close() # validate lines for line in lines: tokens = line.split(" ") assert len(tokens) == 3 assert tokens[0].isdigit() and int(tokens[0]) < n assert tokens[1].isdigit() and int(tokens[1]) < n assert bool(re.match(r"(^\d+\.\d{1,3}$|^\d+$)", tokens[2])) assert 0 < float(tokens[2]) < 100 G = nx.parse_edgelist(lines, nodetype=int, data=(("weight", float), )) G.add_nodes_from(range(n)) assert nx.is_connected(G) if max_size is not None: assert len(G) <= max_size return G
def test_grid_to_navigation_graph_without_data(): # 012 # 345 grid = Grid(region=None, cols=3, rows=2) graph = grid_to_navigation_graph(grid) expected = nx.parse_edgelist(""" 0 1 KEY_RIGHT 0 3 KEY_DOWN 1 0 KEY_LEFT 1 2 KEY_RIGHT 1 4 KEY_DOWN 2 1 KEY_LEFT 2 5 KEY_DOWN 3 0 KEY_UP 3 4 KEY_RIGHT 4 1 KEY_UP 4 3 KEY_LEFT 4 5 KEY_RIGHT 5 2 KEY_UP 5 4 KEY_LEFT """.split("\n"), create_using=nx.DiGraph(), nodetype=int, data=[("key", str)]) assert sorted(expected.edges(data=True)) == sorted(graph.edges(data=True))
def test_grid_to_navigation_graph(): grid = Grid(region=None, data=["ABC", "DEF"]) graph = grid_to_navigation_graph(grid) expected = nx.parse_edgelist(""" A B KEY_RIGHT A D KEY_DOWN B A KEY_LEFT B C KEY_RIGHT B E KEY_DOWN C B KEY_LEFT C F KEY_DOWN D A KEY_UP D E KEY_RIGHT E B KEY_UP E D KEY_LEFT E F KEY_RIGHT F C KEY_UP F E KEY_LEFT """.split("\n"), create_using=nx.DiGraph(), data=[("key", str)]) assert sorted(expected.edges(data=True)) == sorted(graph.edges(data=True)) assert graph["A"]["B"] == {"key": "KEY_RIGHT"} assert graph["B"] == { "A": { "key": "KEY_LEFT" }, "C": { "key": "KEY_RIGHT" }, "E": { "key": "KEY_DOWN" } }
def main(): pos_dict = {} # stores cities and their coords edge_dict = {} # stores edges and their distances list_edge = [] # used to create initial networkx graph with open('data.csv', newline='') as f: reader = csv.reader(f, dialect='unix') # if second value in csv is int, add pos dict, else add edge dict for row in reader: if str.isdigit(row[1]): pos_dict[row[0]] = (int(row[1]), int(row[2])) else: edge_dict[(row[0], row[1])] = float(row[2]) list_edge.append(','.join(row)) # create nx graph using edgelist from csv file G = nx.parse_edgelist(list_edge, delimiter=',', data=(('weight', float),)) # show original map show_graph(G, pos_dict, edge_dict, 'Game of Nodes Map') js = JonSnow(G, 'Trader Town', 'The Wall', pos_dict).search() js_edgelist = [] for i in range(len(js) - 1): js_edgelist.append((js[i], js[i + 1])) print(js_edgelist) # show path Jon Snow took show_graph(G, pos_dict, edge_dict, 'Jon Snow A* Search', js_edgelist, 'Jon Snow', 'b') ww = WhiteWalker(G, pos_dict).search() print(ww) # show path white walkers took show_graph(G, pos_dict, edge_dict, 'White Walker DFS on All Nodes', ww, 'White Walkers', 'r')
def get_embeddings(self, inst, th=1): G = nx.parse_edgelist(self._compose_edge_list(inst.dist_mat, th), create_using=nx.DiGraph(), nodetype=None, data=[('weight', float)]) if self._embedding == 'deepwalk': model = DeepWalk(G, walk_length=10, num_walks=80, workers=1) model.train(window_size=5, iter=3) elif self._embedding == 'node2vec': model = Node2Vec(G, walk_length=10, num_walks=80, p=0.25, q=4, workers=1) # init model model.train(window_size=5, iter=3) # train model elif self._embedding == 'line': model = LINE(G, embedding_size=128, order='second') # init model,order can be ['first','second','all'] model.train(batch_size=1024, epochs=50, verbose=2) # train model elif self._embedding == 'sdne': model = SDNE(G, hidden_size=[256, 128]) # init model model.train(batch_size=3000, epochs=40, verbose=2) # train model elif self._embedding == 'struc2vec': model = Struc2Vec(G, 10, 80, workers=4, verbose=40, ) # init model model.train(window_size=5, iter=3) # train model else: return self._normalise(inst) ebds = model.get_embeddings() coords = [] for i in range(inst.n): coords.append(ebds[str(i)]) return np.array(coords)
def show(self, filename=''): """ Uses the networkx/matplotlib.pyplot modules to graphically show what network was created. Nodes should have labels. Shows the resultant graph in a temporary window. If [filename] is provided, instead saves result in [filename] """ try: import networkx except ImportError: print "Please install networkx via 'easy_install networkx', 'pip install networkx' or some other method." print "You will not have access to the full functionality of this module until then" sys.exit(1) try: import matplotlib.pyplot as plt except ImportError: print "Please install matplotlib via 'easy_install matplotlib', 'pip install matplotlib' or some other method." print "You will not have access to the full functionality of this module until then" sys.exit(1) string_edges = map(lambda x: "%s %s" % (x[0], x[1]), self.edge_list) graph = networkx.parse_edgelist(string_edges) networkx.draw_circular(graph,prog='neato',width=1,node_size=300,font_size=14,overlap='scalexy') if filename: plt.savefig(filename) else: plt.show()
def build_graph(self): """ convert the edge list to what the `nx` can take in example of edge line: 1 2\n :return: """ if os.path.exists(self.cf.graph_cache): self.logging.info("load graph from cache file ...") G = nx.read_gpickle(path=self.cf.graph_cache) self.logging.info("load {} nodes and {} edges from data".format( len(G.nodes), len(G.edges()))) return G else: with open(self.cf.edge_file) as handler: edge_lines = handler.readlines() edge_lines = list(map(lambda x: x.strip(), edge_lines)) edge_counter = Counter(edge_lines) weighted_edges = [] for edge, weight in edge_counter.items(): # freshness between 2 nodes is initialized to 1 weighted_edges.append("{} {} 1".format(edge, weight)) G = nx.parse_edgelist(weighted_edges, nodetype=int, data=(('weight', float), ('freshness', float))) self.logging.info( "load {} nodes and {} edges from data".format( len(G.nodes), len(G.edges()))) # save to cache file nx.write_gpickle(G, path=self.cf.graph_cache) return G
def convert_mtx(file_in, file_out1, file_out2): with open(file_out1, 'w') as fout1: with open(file_out2, 'w') as fout2: with open(file_in, 'r') as fin: lines = fin.readlines() comment = True i = 0 while comment: if lines[i][0] == "%": i += 1 continue else: comment = False graph = nx.parse_edgelist(lines[i + 1:], create_using=nx.Graph(), data=False) remove_self_loops(graph) fout1.write(str(graph.number_of_nodes())) fout1.write(" ") fout1.write(str(graph.number_of_edges())) fout1.write("\n") for edge in graph.edges(): fout1.write(str(edge[0])) fout1.write(" ") fout1.write(str(edge[1])) fout1.write("\n") fout2.write(str(edge[0])) fout2.write(" ") fout2.write(str(edge[1])) fout2.write("\n")
def load_graph(): Data = open('musae_facebook_edges.csv', "r") next(Data, None) # skip the first line in the input file Graphtype = nx.Graph() G = nx.parse_edgelist(Data, delimiter=',', create_using=Graphtype, nodetype=str) return G
def read_edgelist(filename): """Read edgelist. Parameters ---------- filename : string Edgelist filename Returns ------- Networkx Graph Networkx Graph formed from input file """ # Split file extension name, extension = os.path.splitext(filename) # Read file if extension == '.edgelist': with open(filename, 'r') as edgelist: lines = edgelist.readlines() G = nx.parse_edgelist(lines[1:]) else: G = nx.read_edgelist(filename) return G
def parse_edgelist(graph): """Create a `networkx.DiGraph` from a string specification of the graph. This is useful when you want to specify part of the keyboard's navigation graph programmatically using `stbt.grid_to_navigation_graph` (for the parts of the keyboard that are laid out in a grid and behave regularly) but you still need to specify some extra edges that behave differently. For example:: letters = stbt.Grid(...) space_bar = stbt.Keyboard.parse_edgelist(''' C SPACE KEY_DOWN V SPACE KEY_DOWN B SPACE KEY_DOWN SPACE C KEY_UP SPACE V KEY_UP SPACE B KEY_UP ''') keyboard = stbt.Keyboard(networkx.compose_all([ stbt.grid_to_navigation_graph(letters), space_bar])) :param str graph: See the `Keyboard` constructor. :returns: A new `networkx.DiGraph` instance. """ return nx.parse_edgelist(graph.split("\n"), create_using=nx.DiGraph(), data=[("key", str)])
def parse_graph(parse__graph_str_list, graph_type=nx.DiGraph): """ :param parse__graph_str_list: :param graph_type: :param vocabulary: :return: """ parse__graph_list = [] for graph_str in parse__graph_str_list: graph = nx.parse_edgelist(lines=graph_str, nodetype=str, create_using=graph_type) parse__graph_list.append(graph) """ # Building dictionary TODO: Test this nodes = graph.nodes() size = len(vocabulary) # TODO: Using dictionary may cause excessive usage memory consumption for node in nodes: if node not in vocabulary.keys(): size = size + 1 vocabulary[node] = size parse__graph_list.append(graph) """ return parse__graph_list
def test_parse_edgelist_with_data_dict(example_graph): G = example_graph H = nx.parse_edgelist( ["1 2 {'weight': 3}", "2 3 {'weight': 27}", "3 4 {'weight': 3.0}"], nodetype=int ) assert nodes_equal(G.nodes, H.nodes) assert edges_equal(G.edges(data=True), H.edges(data=True))
def expectation_CM(edgelist): # Find highest layer to loop over. Also functions to confirm data is structured coming in. m = max(edgelist['layer']) # p: an empty list to contain graph objects appended each loop p = list() for i in range(0, m + 1): # Sub contains the two node columns from the edgelist for a given layer i sub = edgelist[edgelist['layer'] == i][['node1', 'node2']] # NetworkX.parse_edgelist() works on a list of strings of format "x y". Here we preprocess for that call lines = list() # Iterate over the rows of the sub and join the two values of the nodes together with a " " delimiter for _, row in sub.iterrows(): lines.append(" ".join([str(row['node1']), str(row['node2'])])) # Finally, call NetworkX.parse_edgelist on the list of formatted node string pairs graph = nx.parse_edgelist(lines) # Graph.degree() is an array-like object but has no value extraction method, so we wrap it as a dictionary and # extract the values from there, then convert to a list so we can wrap it finally in a NumPy Array. degrees = np.array(list(dict(graph.degree()).values())) degree_total = degrees.sum() # NumPy.dot() returns a scalar is we don't shape the vectors beforehand. By reshaping the two vectors with # dimensions n x m and m x n, the resultant matrix is of dimension n x n expected = np.dot(degrees.reshape(degrees.size, 1), degrees.reshape(1, degrees.size)) / degree_total # Append the resulting NetworkX graph object to the list P p.append(nx.from_numpy_matrix(np.asarray(expected), False)) return p
def test_edgelist(filenum, prefix=None, verbose=False): command = prefix if prefix else "." command += "/edgelist {}tests/test{}.txt".format(prefix if prefix else "", filenum) if verbose: print(command) try: with open( "{}answers/answer{}.txt".format(prefix if prefix else "", filenum), "r") as outfile: answerGraph = nx.read_edgelist(outfile) except EnvironmentError: # parent of IOError, OSError print("answers/answer{}.txt missing".format(filenum)) try: result = subprocess.check_output(command, shell=True).decode('ascii') lines = result.split('\n') resultGraph = nx.parse_edgelist(lines) assert answerGraph.nodes == resultGraph.nodes, "The nodes in your graph don't match the nodes in the graph in answers/answer{}.txt.".format( filenum) assert answerGraph.edges == resultGraph.edges, "The edge list doesn't match answers/answer{}.txt.".format( filenum) return True except subprocess.CalledProcessError as e: # print (e.output) print("Calling ./edgelist returned non-zero exit status.") except AssertionError as e: print(result) print(e.args[0]) return False
def test_parse_edgelist_with_data_list(example_graph): G = example_graph H = nx.parse_edgelist( ["1 2 3", "2 3 27", "3 4 3.0"], nodetype=int, data=(("weight", float),) ) assert nodes_equal(G.nodes, H.nodes) assert edges_equal(G.edges(data=True), H.edges(data=True))
def create_graph_mutag(file): f = open(file, 'r') lines = f.read().splitlines() f.close() # get the indices of the vertext, adj list and class idx_vertex = lines.index("#v - vertex labels") idx_edge = lines.index("#e - edge labels") idx_clss = lines.index("#c - Class") # node label vl = [int(ivl) for ivl in lines[idx_vertex + 1:idx_edge]] edge_list = lines[idx_edge + 1:idx_clss] g = nx.parse_edgelist(edge_list, nodetype=int, data=(('weight', float), ), delimiter=",") for i in range(1, g.number_of_nodes() + 1): g.node[i]['labels'] = np.array(vl[i - 1]) c = int(lines[idx_clss + 1]) return g, c
def from_edgelist(self, filename, delimiter=None, nodetype=int): """ Loads a Cluster from its edge list stored in a file `filename` """ with open(filename, 'r') as f: self.graph = nx.parse_edgelist(lines=f, nodetype=nodetype, delimiter=delimiter)
def get_commits_graph(path): context = snap.TTableContext() e_schema = snap.Schema() e_schema.Add(snap.TStrTAttrPr("source", snap.atStr)) e_schema.Add(snap.TStrTAttrPr("target", snap.atStr)) e_schema.Add(snap.TStrTAttrPr("weight", snap.atStr)) n_schema = snap.Schema() n_schema.Add(snap.TStrTAttrPr("id", snap.atStr)) n_schema.Add(snap.TStrTAttrPr("username", snap.atStr)) n_schema.Add(snap.TStrTAttrPr("size", snap.atStr)) edgetable = snap.TTable.LoadSS(e_schema, path + '{}_edges.csv'.format(pname), context, ",", snap.TBool(True)) nodetable = snap.TTable.LoadSS(n_schema, path + '{}_nodes.csv'.format(pname), context, ",", snap.TBool(True)) edgeattrv = snap.TStrV() nodeattrv = snap.TStrV() net = snap.ToNetwork(snap.PNEANet, edgetable, "source", "target", edgeattrv, nodetable, "id", nodeattrv, snap.aaFirst) snap.DelSelfEdges(net) snap.SaveEdgeList(net, 'temp/commits_temp_edgelist.csv') Data = open('temp/commits_temp_edgelist.csv', 'r') Graphtype = nx.Graph() G = nx.parse_edgelist(Data, delimiter='\t', create_using=Graphtype, nodetype=int, data=(('weight', float),), comments='#') return G
def import_ppi(self) -> None: """Generates a dictionary mapping unique integer identifiers to HGNC symbols extracted from PPI relationship list and compiles relations into a nx.Graph. """ nodes = set() relations = self.__read_ppis() for rel in relations: nodes.add(rel[0]) nodes.add(rel[2]) # Make a dictionary relating symbol to identifier node_mapper = {symbol: index + 1 for index, symbol in enumerate(nodes)} self.nodes = { node_id: { SYMBOL: symbol, MOLECULE: PROTEIN } for symbol, node_id in node_mapper.items() } edge_list = [ f"{node_mapper[rel[0]]} {node_mapper[rel[2]]} {rel[1].replace(' ', '_')}" for rel in relations ] self.graph = nx.parse_edgelist(edge_list, delimiter=" ", nodetype=int, data=(('rel_type', str), ))
def create_graph(f_name: str, n: int = 2, sep: str = " ") -> Graph: """Creates a graph from a file Args: f_name: The filename/path n: The number of lines on the top of the file to skip sep: The value separator in the file Returns: G: The graph """ data = open(f_name, "r") for i in range(n): next(data, None) # Delete a line in the input file # Define the graph type Graphtype = nx.Graph() # Define the graph G = nx.parse_edgelist(data, delimiter=sep, create_using=Graphtype, nodetype=int) return G
def make_graph(V, e): ''' Generate unimprovable compelete graph of |V| vertices. Args: V: # of vertices e: smallest edge length Returns: G: undirected graph with V vertices and min(edge length) = e ''' def edge_maker(s, d): c1 = s for c2 in range(s + d, V, d): edges.append( str(c1) + ' ' + str(c2) + ' ' + str(format(e * d, '.3f'))) c1 = c2 edges = [] for i in range(V): for j in range(1, V): if i < j: edge_maker(i, j) G = nx.parse_edgelist(edges, nodetype=int, data=(("weight", float), )) G.add_nodes_from(range(V)) return G
def main(): """Entry point for the Chinese Whispers command-line interface.""" parser = argparse.ArgumentParser() parser.add_argument('--weighting', choices=WEIGHTING.keys(), default='lin') parser.add_argument('--delimiter', default='\t') parser.add_argument('--iterations', type=int, default=20) parser.add_argument('--seed', type=int, default=None) parser.add_argument('--version', action='version', version='Chinese Whispers v' + version) parser.add_argument('edges', type=argparse.FileType('r', encoding='UTF-8')) args = parser.parse_args() lines = (line.rstrip() for line in args.edges) # noinspection PyPep8Naming G = nx.parse_edgelist(lines, delimiter=args.delimiter, comments='\n', data=[('weight', float)]) chinese_whispers(G, args.weighting, args.iterations, args.seed) for label, elements in aggregate_clusters(G).items(): label = str(label) length = str(len(elements)) elements = ', '.join(elements) print('\t'.join((label, length, elements)))
def fr(fn): with open(fn) as f: n, e = map(int, f.readline().strip().split(" ")) G = nx.parse_edgelist(f.readlines(), create_using=nx.DiGraph(), nodetype=int) assert G.number_of_nodes() == n assert G.number_of_edges() == e return G
def fr(fn): with open(fn) as f: _t = f.read().strip().split("\n\n") n = int(_t[0].strip()) lst = [l.strip().split("\n") for l in _t[1:]] N = [nx.parse_edgelist(l, create_using=nx.DiGraph(), nodetype=int) for l in lst] assert len(N) == n return N
def df_to_nxgraph(df): graph_csv = df.to_csv(sep=" ", index=False, header=False) with open("./graph.csv", "w") as f: f.write(graph_csv) graph = nx.parse_edgelist(graph_csv.split("\n"), nodetype=int, data=(('weight', float), )) return graph
def fr(f): with open(f) as f: t = f.read().strip().split("\n\n") n = int(t[0]) lines = [x.split("\n")[1:] for x in t[1:]] N = [nx.parse_edgelist(l, create_using=nx.DiGraph(), nodetype=int) for l in lines] assert len(N) == n return N
def fr(fn): with open(fn) as f: lines = f.readlines() n, e = map(int, lines[0].strip().split(" ")) G = nx.parse_edgelist(lines[1:], create_using=nx.Graph(), nodetype=int) [G.add_node(i) for i in set(range(1, n + 1)) - set(G.nodes())] assert len(G.edges()) == e return G
def fr(f): with open(f) as f: lst = re.split("\n\d+ \d+\n", f.read()) lst = [l.strip().split("\n") for l in lst[1:]] first_edges = [map(int, l[0].split(" "))[:2] for l in lst] N = [nx.parse_edgelist(l, create_using=nx.DiGraph(), nodetype=int, \ data=(('weight', int),)) for l in lst] return N, first_edges
def read_mir_target_sif(sif_f): with open(sif_f, 'rb') as handle: lines = handle.read().splitlines() edges = [x.split() for x in lines] edges_data = ["%s %s %s" % (x[0], x[2], x[1]) for x in edges] g = nx.parse_edgelist(edges_data, nodetype=str, data=(('strand', float),)) return g
def read_mir_target_sif(sif_f): with open(sif_f, 'rb') as handle: lines = handle.read().splitlines() edges = [x.split() for x in lines] edges_data = ["%s %s %s" % (x[0], x[2], x[1]) for x in edges] g = nx.parse_edgelist(edges_data, nodetype=str, data=(('strand', float), )) return g
def allFiles(groupName): fdRead = open('%s' % groupName, 'r') #open file with edge list lines = fdRead.readlines() #read all lines fdRead.close() #close file G = nx.parse_edgelist(lines, nodetype=int) #builte graph with edge list file nx.write_adjlist(G, 'adj_list.txt') #write graph as adjacency matrix to file getPartitions(G) #get partitions graph edgesInCommunitiesGraph('adj_list.txt', 'partitions.txt') #add edges to graph of partitions
def fr(f): with open(f) as f: n = int(f.readline().strip()) lst = [] for _ in range(n): _l = [] n, e = map(int, f.readline().strip().split(" ")) for __ in range(e): _l.append(f.readline().strip()) lst.append(_l) return [nx.parse_edgelist(l, create_using=nx.DiGraph(), nodetype=int) for l in lst]
def graphfromsif(sif): s = pd.read_table(sif, header =None, delim_whitespace=True) s = s[[0,2,1]] s = s.values.tolist() def formatfunction(lista): return "{} {} {}".format(lista[0], lista[1], lista[2]) reformat = [] for elem in s: reformat.append(formatfunction(elem)) g = nx.parse_edgelist(reformat, nodetype = str, data=(('weight',float),) ) return(g)
def readFromFileW(path): with open(path, 'r') as f: tab = f.readlines() edgelist = [] x = 0 for i in tab: i = str(x) + " " + i[1:-2].replace(',', ' ' + str(x) + ' ').replace('(', " {'weight' : ").replace(')','}') k = i.rsplit('} ') edge = [] for z in k: if z[-1] != '}': z += '}' edgelist.append(z) x += 1 return nx.parse_edgelist(edgelist, nodetype = int)
def handle_file(path): i = open(path, "r") lines = i.readlines() g = nx.parse_edgelist(lines[1:], nodetype=int, data=(('weight', float),)) x1, x2, vol = lines[0].rstrip("\n").split(" ") i.close() x1 = int(x1) x2 = int(x2) vol = float(vol) g.add_node(0) g.add_weighted_edges_from([(0, x1, 0), (0, x2, 0)]) calculate(g, vol) # create directed graph for presentation result = nx.DiGraph() result.add_nodes_from(nx.nodes(g)) for x1, x2 in nx.edges(g): sx1, sx2 = tuple(sorted((x1, x2))) if g[x1][x2]['current'] > 0: result.add_edge(sx1, sx2, current=g[x1][x2]['current']) else: result.add_edge(sx2, sx1, current=-g[x1][x2]['current']) pos = graphviz_layout(result, prog='sfdp',) nx.draw_networkx_nodes(result, pos=pos, node_size=250, node_color='white') edges = nx.edges(result) currents_dict = nx.get_edge_attributes(result, 'current') currents_list = tuple(currents_dict[e] for e in edges) widths_list = tuple(0.3 + 4 * x/max(currents_list) for x in currents_list) colors = ("green", "yellow", "red") colors_list = [colors[(math.floor(len(colors)*x/max(widths_list) - 0.1))] for x in widths_list] for key, val in currents_dict.items(): currents_dict[key] = "{:.1f}".format(val) nx.draw_networkx_edges(result, pos=pos, edgelist=edges, width=widths_list, edge_color=colors_list) bbox_props = dict(boxstyle="square,pad=0", fc="none", ec='none', lw=2) nx.draw_networkx_edge_labels(result, pos=pos, edge_labels=currents_dict, font_size=8, bbox=bbox_props) nx.draw_networkx_labels(result, pos=pos, font_size=8) plt.get_current_fig_manager().full_screen_toggle() plt.show()
def inputConvert(): fdread = open('txtGraphs/Edges.csv', 'r') #open .csv file fdWrite = open('edgeList.txt', 'w') #open write file for line in fdread: #for every line in csv file ids = line.split('\t') #split line as tab ids[0] = int(ids[0]) #conversion 0-element to int ids[1] = int(ids[1]) #conversion 1-element to int fdWrite.write("{} {}\n".format(ids[0], ids[1])) #write 0 and 1 elements to file fdread.close() #close file fdWrite.close() #close file fdRead = open('edgeList.txt', 'r') #open file with edge list lines = fdRead.readlines() #read all lines fdRead.close() #close file G = nx.parse_edgelist(lines, nodetype=int) #builte graph with edge list file nx.write_adjlist(G, 'adj_list.txt') #write graph as adjacency matrix to file getPartitions(G) #get partitions graph edgesInCommunitiesGraph('adj_list.txt', 'partitions.txt') #add edges to graph of partitions
def edge2w(edgelist, nodetype=str): """ Create a PySAL W object from an edgelist Parameters ---------- edge_file: file with edgelist nodetype: type for node (str, int, float) Returns ------- W: PySAL W Example ------- >>> lines = ["1 2", "2 3", "3 4", "4 5"] >>> w = edge2w(lines) >>> w.n 5 >>> w.neighbors["2"] ['1', '3'] >>> w = edge2w(lines, nodetype=int) >>> w.neighbors[2] [1, 3] >>> lines = ["1 2 {'weight':1.0}", "2 3 {'weight':0.5}", "3 4 {'weight':3.0}"] >>> w = edge2w(lines, nodetype=int) >>> w.neighbors[2] [1, 3] >>> w.weights[2] [1.0, 0.5] """ G = nx.parse_edgelist(edgelist, nodetype=nodetype) return dwg2w(G)
def show(self,filename=''): try: import networkx except ImportError: print "Please install networkx via 'easy_install networkx', 'pip install networkx' or some other method." print "You will not have access to the full functionality of this module until then" sys.exit(0) try: import matplotlib.pyplot as plt except ImportError: print "Please install matplotlib via 'easy_install matplotlib', 'pip install matplotlib' or some other method." print "You will not have access to the full functionality of this module until then" sys.exit(0) string_edges = map(lambda x: "%s %s" % (x[0], x[1]), self.mst) graph = networkx.parse_edgelist(string_edges) networkx.draw_circular(graph,prog='neato',width=1,node_size=300,font_size=14,overlap='scalexy') if filename: plt.savefig(filename) else: plt.show()
def fr(fn): with open(fn) as f: f.readline() G = nx.parse_edgelist(f.readlines(), create_using=nx.DiGraph(), nodetype=int, data=(('weight', int),)) return G
import networkx as nx import pickle import operator ################# SETTING UP OF NETWORK ################# edge_list = pickle.load(open("ingredients.edgelist", "rb")) G = nx.parse_edgelist(edge_list, nodetype=str, data=(("weight", int),)) ################# INPUTTING RECIPE ################# print "Enter ingredients in the recipe" print "with the ingredient to substitute FIRST" print "and ingredients separated by COMMAS: ", filename = raw_input() # get individual ingredients, all separated by a comma temp_ingreds = filename.split(",") ingreds = [] for i in temp_ingreds: # remove whitespace, replace space with _, make all lowercase ingreds.append(i.strip().replace(" ", "_").lower()) ################# ANALYSIS OF NETWORK ################# ### setting up variables ### # ingredient to substitute x = ingreds[0] # potential substitutions
def parse_ucinet(lines): """Parse UCINET format graph from string or iterable. Currently only the 'fullmatrix', 'nodelist1' and 'nodelist1b' formats are supported. Parameters ---------- lines : string or iterable Data in UCINET format. Returns ------- G : NetworkX graph See Also -------- read_ucinet() References ---------- See UCINET User Guide or http://www.analytictech.com/ucinet/help/hs5000.htm for full format information. Short version on http://www.analytictech.com/networks/dataentry.htm """ from numpy import genfromtxt, reshape, insert, isnan G = nx.MultiDiGraph() if not is_string_like(lines): s = '' for line in lines: if type(line) == bytes: s += line.decode('utf-8') else: s += line lines = s lexer = shlex.shlex(lines.lower()) lexer.whitespace += ',=' lexer.whitespace_split = True number_of_nodes = 0 number_of_matrices = 0 nr = 0 # number of rows (rectangular matrix) nc = 0 # number of columns (rectangular matrix) ucinet_format = 'fullmatrix' # Format by default labels = {} # Contains labels of nodes row_labels_embedded = False # Whether labels are embedded in data or not cols_labels_embedded = False diagonal = True # whether the main diagonal is present or absent KEYWORDS = ('format', 'data:', 'labels:') # TODO remove ':' in keywords while lexer: try: token = next(lexer) except StopIteration: break # print "Token : %s" % token if token.startswith('n'): if token.startswith('nr'): nr = int(get_param("\d+", token, lexer)) number_of_nodes = max(nr, nc) elif token.startswith('nc'): nc = int(get_param("\d+", token, lexer)) number_of_nodes = max(nr, nc) elif token.startswith('nm'): number_of_matrices = int(get_param("\d+", token, lexer)) else: number_of_nodes = int(get_param("\d+", token, lexer)) nr = number_of_nodes nc = number_of_nodes elif token.startswith("diagonal"): diagonal = get_param("present|absent", token, lexer) elif token.startswith("format"): ucinet_format = get_param("""^(fullmatrix|upperhalf|lowerhalf|nodelist1|nodelist2|nodelist1b|\ edgelist1|edgelist2|blockmatrix|partition)$""", token, lexer) # TODO : row and columns labels elif token.startswith("row"): # Row labels pass elif token.startswith("column"): # Columns labels pass elif token.startswith("labels"): token = next(lexer) i = 0 while token not in KEYWORDS: if token.startswith('embedded'): row_labels_embedded = True cols_labels_embedded = True break else: labels[i] = token.replace('"', '') # for labels with embedded spaces i += 1 try: token = next(lexer) except StopIteration: break elif token.startswith('data'): break data_lines = lines.lower().split("data:", 1)[1] # Generate edges params = {} if cols_labels_embedded: # params['names'] = True labels = dict(zip(range(0, nc), data_lines.splitlines()[1].split())) # params['skip_header'] = 2 # First character is \n if row_labels_embedded: # Skip first column # TODO rectangular case : labels can differ from rows to columns # params['usecols'] = range(1, nc + 1) pass if ucinet_format == 'fullmatrix': # In Python3 genfromtxt requires bytes string try: data_lines = bytes(data_lines, 'utf-8') except TypeError: pass # Do not use splitlines() because it is not necessarily written as a square matrix data = genfromtxt([data_lines], case_sensitive=False, **params) if cols_labels_embedded or row_labels_embedded: # data = insert(data, 0, float('nan')) data = data[~isnan(data)] mat = reshape(data, (max(number_of_nodes, nr), -1)) G = nx.from_numpy_matrix(mat, create_using=nx.MultiDiGraph()) elif ucinet_format in ('nodelist1', 'nodelist1b'): # Since genfromtxt only accepts square matrix... s = '' for i, line in enumerate(data_lines.splitlines()): row = line.split() if row: if ucinet_format == 'nodelist1b' and row[0] == '0': pass else: for neighbor in row[1:]: if ucinet_format == 'nodelist1': source = row[0] else: source = str(i) s += source + ' ' + neighbor + '\n' G = nx.parse_edgelist(s.splitlines(), nodetype=str if row_labels_embedded and cols_labels_embedded else int, create_using=nx.MultiDiGraph()) if not row_labels_embedded or not cols_labels_embedded: nx.relabel_nodes(G, dict(zip(list(G.nodes()), [i-1 for i in G.nodes()])), copy=False) elif ucinet_format == 'edgelist1': G = nx.parse_edgelist(data_lines.splitlines(), nodetype=str if row_labels_embedded and cols_labels_embedded else int, create_using=nx.MultiDiGraph()) if not row_labels_embedded or not cols_labels_embedded: nx.relabel_nodes(G, dict(zip(list(G.nodes()), [i-1 for i in G.nodes()])), copy=False) # Relabel nodes if labels: try: if len(list(G.nodes())) < number_of_nodes: G.add_nodes_from(labels.values() if labels else range(0, number_of_nodes)) nx.relabel_nodes(G, labels, copy=False) except KeyError: pass # Nodes already labelled return G
guid_1 = [str(key) for key in guid_1] guid_2 = [str(key) for key in guid_2] distance = [(1 - float(key)) for key in tcs] # In[335]: adj_list=[] for i in range(0,len(guid_1)): adj_list.append(guid_1[i] + ' ' + guid_2[i] + ' ' + str(distance[i])) # In[361]: G = nx.parse_edgelist(adj_list, nodetype = str, data=(('weight',float),)) # In[362]: paths = nx.shortest_path(G,'SEED-1','SEED-2',weight='weight') # In[363]: path_distance = 0 for i in range(0,len(paths)-2): node1 = paths[i] node2 = paths[i+1] path_distance += G[node1][node2]['weight'] print "Final path distance"
import networkx as nx import matplotlib.pyplot as plt import csv, ast #Create graph G = nx.MultiDiGraph(summary="Albus mutant gene regulatory network") #Read edges list edges = [] with open("albus_edges.tsv") as f: edges = f.readlines() G = nx.parse_edgelist(edges) #G = nx.read_edgelist('albus_edges.tsv', nodetype=str, delimiter='\t', data=(('weight',float),('IsDirected',bool),('Source',str))) print G.edges() for edge in G.edges(): print G.get_edge_data(*edge) #Read nodes list with open("albus_nodes.tsv") as tsv: for line in csv.reader(tsv, delimiter="\t"): G.add_node(line[0], ast.literal_eval(line[1])) #remove nodes without edges outdeg = G.degree() to_remove = [n for n in outdeg if outdeg[n] == 0] G.remove_nodes_from(to_remove) #Draw the graph nodes = []
#!/usr/bin/env python import os, os.path import gzip import networkx as nx import urllib import jointrw data_path = 'data/wiki-Talk.txt.gz' if __name__ == '__main__': if not os.path.exists(data_path): print "pulling data..." d = os.path.dirname(data_path) if not os.path.exists(d): os.makedirs(d) urllib.urlretrieve('http://snap.stanford.edu/data/wiki-Talk.txt.gz',data_path) print 'reading data...' f = gzip.open(data_path,'r') digraph = nx.DiGraph() nx.parse_edgelist(f, comments="#", delimiter="\t", nodetype = int, create_using=digraph) f.close() print 'random walk...' jointrw.main(digraph, digraph.order() // 100, also_recip=False)
import networkx as nx f = open("stats_chord.txt",'r') lignes = f.readlines() f.close() for ligne in lignes: ligne = ligne.split(',') for i in ligne : i = i.replace("\n", "") G = nx.parse_edgelist(ligne, nodetype = int) print nx.is_chordal(G)
def fr(f): t = open(f).readlines() _, e = map(int, t[0].strip().split(" ")) G = nx.parse_edgelist(t[1:], create_using=nx.DiGraph(), nodetype=int) assert len(G.edges()) == e return G
cur2 = conn.cursor() # I took the middle of house to not have a 0.0 distance between two direct neighboor houses (Reihenhaeuser) cur2.execute("Select t1.osm_id, t2.osm_id, ST_Distance(ST_Centroid(t1.way), ST_Centroid(t2.way)) from planet_osm_polygon as t1, planet_osm_polygon as t2 where not t1.building = '' and not t2.building = '' and ST_Within(ST_Centroid(t1.way), %s) and ST_Within(ST_Centroid(t2.way), %s) and t1.osm_id != t2.osm_id;", [row[1], row[1]]) # cur2.execute("Select t1.osm_id, t2.osm_id, ST_Distance(ST_Centroid(t1.way), ST_Centroid(t2.way)) as dist from planet_osm_polygon as t1, planet_osm_polygon as t2 where not t1.building = '' and not t2.building = '' and ST_Within(ST_Centroid(t1.way), %s) and ST_Within(ST_Centroid(t2.way), %s) and t1.osm_id != t2.osm_id order by dist DESC;", [row[1], row[1]]) #cur2.execute("Select t1.osm_id, t2.osm_id, ST_Distance(t1.way, t2.way) as dist from planet_osm_polygon as t1, planet_osm_polygon as t2 where not t1.building = '' and not t2.building = '' and ST_Within(ST_Centroid(t1.way), %s) and ST_Within(ST_Centroid(t2.way), %s) and t1.osm_id != t2.osm_id order by dist DESC;", [row[1], row[1]]) graph = [] for row2 in cur2: # generates connections between the houses graph.append(str(row2[0]) + " " + str(row2[1]) + " {'weight':" + str(row2[2]) + "}") #print row2[0], "\t", row2[1], "\t", row2[2] cur3 = conn.cursor() cur3.execute("Select t1.osm_id, t2.osm_id, ST_Distance(ST_Centroid(t1.way), t2.way) from planet_osm_polygon as t1, planet_osm_point as t2 where not t1.building = '' and t2.osm_id = %s and t2.power = 'transformer' and ST_Within(ST_Centroid(t1.way), %s);", [row[0], row[1]]) for row3 in cur3: # generate connections between transformer and every house graph.append(str(row3[0]) + " " + str(row3[1]) + " {'weight':" + str(row3[2]) + "}") G = nx.parse_edgelist(graph, nodetype = int) minspantree = G.edges() for edge in minspantree: (startnode, endnode) = edge cur4 = conn.cursor() cur4.execute("Insert into networkx_test (power, way, startosm_id, endosm_id) VALUES ('line', ST_MakeLine((SELECT ST_Centroid(way) as way from planet_osm_polygon WHERE osm_id = %s UNION Select way from planet_osm_point WHERE osm_id = %s), (SELECT ST_Centroid(way) as way from planet_osm_polygon WHERE osm_id = %s UNION Select way from planet_osm_point WHERE osm_id = %s)), %s, %s);", [startnode, startnode, endnode, endnode, startnode, endnode]) conn.commit() print "line " + str(i) + " (transformer_id: " + str(row[0]) + ") added" i = i + 1 conn.commit() cur4.close() cur3.close()
def fr(f): with open(f) as f: l = f.read().strip().split("\n")[1:] G = nx.parse_edgelist(l, create_using=nx.DiGraph(), nodetype=int) return G
def createGraphFromEdgeList(lines): G = nx.parse_edgelist(lines, delimiter=',', nodetype=int) return G
def extractAnnotationData(pmcid, annDir): ''' Extracts data from annotations into a dictionary ''' def parseBoolean(s): return True if s == 'true' else False pdir = os.path.join(annDir, pmcid) fsections = os.path.join(pdir, 'sections.txt') with open(fsections) as f: real_sections = [l[:-1] for l in f] sections = filter(lambda s: not s.startswith('fig'), real_sections) ftitles = os.path.join(pdir, 'titles.txt') with open(ftitles) as f: titles = map(lambda x: x[1], filter(lambda x: not x[0].startswith('fig'), zip(real_sections, [parseBoolean(l[:-1]) for l in f]))) fcitations = os.path.join(pdir, 'citations.txt') with open(fcitations) as f: citations = map(lambda x: x[1], filter(lambda x: not x[0].startswith('fig'), zip(real_sections, [parseBoolean(l[:-1]) for l in f]))) fdocnums = os.path.join(pdir, 'docnums.txt') with open(fdocnums) as f: docnums = map(lambda x: x[1], filter(lambda x: not x[0].startswith('fig'), zip(real_sections, [int(l[:-1]) for l in f]))) fpostags = os.path.join(pdir, 'pos.txt') with open(fpostags) as f: postags = {int(k):v.split(' ') for k,v in [l[:-1].split('\t') for l in f]} fdeps = os.path.join(pdir, 'deps.txt') with open(fdeps) as f: # The file has an edge_list format to be parsed by networkx lines = [l[:-1].split('\t') for l in f] # Group the entries by their sentence index gb = it.groupby(lines, operator.itemgetter(0)) # Create a networkx graph from the edge list for each sentence deps = {int(k):nx.parse_edgelist([x[1] for x in v], nodetype=int) for k, v in gb} fdiscourse = os.path.join(pdir, 'disc.txt') with open(fdiscourse) as f: # Parse the tsv. Cols: 1-starting sentence, 2-finishin sentence + 1, 3-Tree-like dict lines = [l[:-1].split('\t') for l in f] disc = dict() for s, e, t in lines: s, e = int(s), int(e)-1 # The second number is deliberately +1 try: t = eval(t) disc[(s, e)] = t except: print "Error parsing discourse in %s for ix: %i-%i" % (fdiscourse, s, e) fmentions = os.path.join(pdir, 'mention_intervals.txt') with open(fmentions) as f: indices = defaultdict(list) for line in f: line = line[:-1] tokens = [t for t in line.split(' ') if t != ''] ix = int(tokens[0]) if ix < len(real_sections): if real_sections[ix].startswith('fig'): continue ix = map_2_filtered_ix(ix, real_sections) intervals = [] for t in tokens[1:]: x = t.split('%', 3) grounding_id = x[3].split(':')[0] word = x[2].lower() if grounding_id.lower() not in not_permited_context and word not in not_permited_words: intervals.append((int(x[0]), int(x[1]), x[3])) # Merge succesive intervals merged = [] if len(intervals) > 0: prev = intervals[0] for curr in intervals[1:]: if prev[1] == curr[0]: x = (prev[0], curr[1]) merged.append(x) prev = x else: merged.append(prev) merged.append(curr) prev = curr # Pick only one interval per line # if len(merged) > 1: # merged = [merged[0]] indices[ix] += merged tuples = [] counter = 0 for i, s in enumerate(real_sections): if s.startswith('fig'): counter += 1 tuples.append((i, s, i-counter)) #mentions = [indices[i] for i in xrange(max(indices.keys())+1)] mentions = [indices[j] for i, s, j in tuples if not s.startswith('fig')] # Do the mention counts # First count the reach mentions ctxCounts = Counter(n[2].upper() for n in it.chain(*[m for m in mentions if m])) # Normalize it total = sum(ctxCounts.values()) for key in ctxCounts: ctxCounts[key] /= total fmanual_context_intervals = os.path.join(pdir, 'manual_context_intervals.txt') with open(fmanual_context_intervals) as f: manual_context_intervals = {} for l in f: l = l[:-1] line, interval, cid = l.split() interval = interval.split('-') manual_context_intervals[cid] = int(interval[0]) # Do the manual event intervals fsentences = os.path.join(pdir, 'sentences.txt') with open(fsentences) as f: sentences = map(lambda x: x[1], filter(lambda x: not x[0].startswith('fig'), zip(real_sections, [l[:-1] for l in f]))) manual_event_triggers = {i:find_evt_anchors(s, triggers) for i, s in enumerate(sentences)} return { 'real_sections':real_sections, 'sections':sections, 'titles':titles, 'citations':citations, 'mentions':mentions, 'docnums':docnums, 'postags':postags, 'deps':deps, 'disc':disc, 'manual_context_intervals':manual_context_intervals, 'manual_event_triggers':manual_event_triggers, 'sentences':sentences, 'ctxCounts':ctxCounts }
short = a elif o in ("-p", "--petersen"): petersen= True else: assert False, "unhandled option" if test: if __test(): print "tutte.py: Test passed" sys.exit() else: print "tutte.py: Test failed" sys.exit(1) if petersen: G = nx.petersen_graph() elif short: lines = map(lambda e: re.sub('--',' ',e), short.split()) G = nx.parse_edgelist(lines) else: lines= sys.stdin.readlines() G = nx.parse_edgelist(lines) print tutte_poly(G, output) if __name__ == "__main__": import sys import getopt main(sys.argv)
def read_from_file(self, path): """ Reads a LOMAP Markov object from a given file """ ## # Open and read the file ## try: with open(path, 'r') as f: lines = f.read().splitlines() except: raise FileError('Problem opening file %s for reading.' % path) line_cnt = 0 ## # Part-1: Model attributes ## # Name of the model try: m = re.match(r'name (.*$)', lines[line_cnt]) self.name = m.group(1) line_cnt += 1 except: raise FileError("Line 1 must be of the form: 'name name_of_the_transition_system', read: '%s'." % lines[line_cnt]) # Initial distribution of the model # A dictionary of the form {'state_label': probability} try: m = re.match(r'init (.*$)', lines[line_cnt]) self.init = eval(m.group(1)) line_cnt += 1 except: raise FileError("Line 2 must give the initial distribution of the form {'state_label': 1}, read: '%s'." % lines[line_cnt]) # Initial distribution sum must be equal to 1 init_prob_sum = 0 for init in self.init: init_prob_sum += self.init[init] if init_prob_sum != 1: raise FileError('Initial distribution of a Markov model must sum to 1, you have %f.' % (init_prob_sum)) ## # End of part-1 ## if(lines[line_cnt] != ';'): raise FileError("Expected ';' after model attributes, read: '%s'." % (line_cnt, lines[line_cnt])) line_cnt += 1 ## # Part-2: State attributes ## # We store state attributes in a dict keyed by states as # we haven't defined them yet state_attr = dict(); try: while(line_cnt < len(lines) and lines[line_cnt] != ';'): m = re.search('(\S*) (.*)$', lines[line_cnt]); exec("state_attr['%s'] = %s" % (m.group(1),m.group(2))); line_cnt += 1 line_cnt+=1 except: raise FileError('Problem parsing state attributes.') ## # Part-3: Edge list with attributes ## try: self.g = nx.parse_edgelist(lines[line_cnt:], comments='#', create_using=nx.MultiDiGraph()) except: raise FileError('Problem parsing definitions of the transitions.') # Add state attributes to nodes of the graph try: for node in state_attr.keys(): # Reset label of the node self.g.node[node]['label'] = node for key in state_attr[node].keys(): # Copy defined attributes to the node in the graph # This is a shallow copy, we don't touch state_attr[node][key] afterwards self.g.node[node][key] = state_attr[node][key] # Define custom node label self.g.node[node]['label'] = r'%s\n%s: %s' % (self.g[node]['label'], key, state_attr[node][key]) except: raise FileError('Problem setting state attributes.')
if F_z < F_y : unexplored = set([node]) break else : unexplored.add(node) return L, None for node in G.nodes() : if labels[node] is None : L, label = stream(node) if label is None : num_labels += 1 for node in L : labels[node] = num_labels else : for node in L : labels[node] = num_labels return labels if __name__ == '__main__' : lines = ["1 2 3", "2 3 27", "3 4 3.0", "3 5 2.0"] G = networkx.parse_edgelist(lines, nodetype = int, data=(('weight',float),)) print watershedCuts(G)