def xnet2graphml(filename, outfilename='/tmp/mygraph.graphml'): """ Convert xnet object to a .graphml format Parameters ---------- g : xnet graph Returns ------- strOut : string String that can be written to a .xnet file. """ g = xnet2igraph(filename) if not 'x' in g.vertex_attributes() and 'posx' in g.vertex_attributes(): g.vs['x'] = g.vs['posx']; del g.vs['posx'] if not 'y' in g.vertex_attributes() and 'posy' in g.vertex_attributes(): g.vs['y'] = g.vs['posy']; del g.vs['posy'] for attr in g.vertex_attributes(): if type(g.vs[0][attr]) != float: continue g.vs[attr] = np.array(g.vs[attr]).astype(str).tolist() import igraph igraph.write(g, outfilename)
def main(): parser = argparse.ArgumentParser(description="Convert an igraph to a csc object") parser.add_argument("graph_fn", action="store", help="The name of the igraph to read from disk. *Must be gml format!") parser.add_argument("-g", "--gen_graph", action="store_true", help="Generate a new ER graph") parser.add_argument("-n", "--num_nodes", action="store", type=int, help="The number of nodes in the ER graph") parser.add_argument("-p", "--probability", action="store", type=float, help="The probability of connectivity of each node to another in the graph") parser.add_argument("-s", "--save", action="store_true", help="Save conversion to disk") parser.add_argument("-f", "--save_fn", action="store", default="csc_matlab", help="Save file name") parser.add_argument("-t", "--test", action="store_true", help="Run test only!") result = parser.parse_args() if result.test: test() exit(1) if os.path.exists(result.graph_fn): g = igraph.read(result.graph_fn, format="gml") elif result.gen_graph or result.num_nodes or result.probability: assert (result.gen_graph and result.num_nodes and result.probability), "You must set all ER parameters i.e. n, p" g = igraph.Graph.Erdos_Renyi(n=result.num_nodes, p=result.probability) igraph.write(g, result.save_fn+".gml", format="gml") else: sys.stderr.writelines("Invalid path %s ... and all (i.e. n, p, g) ER parameters not set so no action taken. \n EXITING NOW! \n") exit(-1) igraph_to_csc(g, result.save, result.save_fn)
def gml2core(gmlfolder, svgfolder): fw = open(svgoutpath + 'vecount.txt', 'w') svgtype = [ '_weakGaint', '_strongGaint', '_weakGaintSPT', '_strongGaintSPT' ] if os.path.exists(svgoutpath) == 0: os.mkdir(svgoutpath) for file in os.listdir(gmlinpath): if os.path.splitext(file)[1] == '.gml': print 'Reading graph from ...', file try: gmlfile = open(gmlinpath + file) g = ig.Graph.Read_GML(gmlfile) gg = clus.VertexClustering.giant(g.clusters(mode='weak')) print g.vcount(), gg.vcount() # es=ig.EdgeSeq(gg) vs = ig.VertexSeq(gg) for v in vs: indeg = v.indegree() outdeg = v.outdegree() if indeg < 1 or outdeg < 1: gg.delete_vertices(v) # if outdeg<1: # gg.delete_vertices(v) print gg.vcount() ig.write(gg, svgoutpath + file + svgtype[1] + '.gml') # ig.Graph.write_svg(gg, svgoutpath+file+svgtype[1]+'.svg', layout='large') except Exception, e: print gmlinpath + file, ' failed', e pass gmlfile.close()
def main(): # Head of output console print("-----------------------------------------------------") print("-------------------Program Start---------------------") print("-----------------------------------------------------") # Record time startTime = time.time() # Get args args = get_args(sys.argv[1:], "CSV to GML script") # Set input and output steam filename_in = args.filename_in filename_out = args.filename_out # Import data toleratio = args.toleratio data, data_trimmed = readin(filename_in, toleratio) print("Data imported.") # Get the names dimension_names = [data[1:]] #unused in this program node_names = [vector[0] for vector in data][1:] #remove 'gene_id' # Get adjacency matrix print("-----------------------------------------------------") print("Computing adj_matrix...") sim_meas = args.sim_meas edge_ratio = args.edge_ratio print("Similarity measure: " + sim_meas) adjMatrix = getAdjacency(data_trimmed, sim_meas, edge_ratio) print("Done.") # Now we can convert adjacency matrix to graph vertices = node_names edges = [] n = len(adjMatrix) for i in range(0, n): for j in range(0, n): if adjMatrix[i][j] == 1: edge = tuple([i, j]) edges.insert(len(edges), edge) print("-----------------------------------------------------") print("Start Generating Graph...") g = Graph(vertex_attrs={"label": vertices}, edges=edges, directed=False) print("Graph Generated. There are " + str(len(vertices)) + " vertices and " + str(len(edges)) + " edges.") # Export to gml file print("-----------------------------------------------------") print("Start Writing to GML file...") ig.write(g, format="gml", filename=filename_out) print("Writing Completed. Output file is named " + filename_out) # Tail of output console # Report time print("Finished in %0.4f seconds" % (time.time() - startTime)) print("-----------------------------------------------------") print("------------------Program Finish---------------------") print("-----------------------------------------------------")
def write(self, filename): ''' Save the graph into gramml format ''' #startBudgetYear = int(str(budgetYears[0])) #endBudgetYear = int(str(budgetYears[-1])) #filename = '%s/result/%d-%d.%s'%(root_folder(),startBudgetYear, endBudgetYear, format) if extra == None else '%s/result/%d-%d.%s.%s'%(root_folder(),startBudgetYear, endBudgetYear, extra,format) igraph.write(self.g, filename, format='graphml')
def multi_run(node_list, run_time, model_type): i = 0 graph_list = [] while i < run_time: if model_type == 4: IG = model_4(nodes) elif model_type == 5: IG = model_5(nodes) graph_list.append(IG) i += 1 ig.write( graph_list[0], r'G:\HFS\WeiboData\HFSWeiboGML\test\simulation_' + str(model_type) + '.gml') return graph_list
def main() -> None: parser = cli_parser() args = parser.parse_args() output_path: Path = args.output if output_path is None: output_path = Path('./model.graphml') log.info(f'Using default output path {output_path.absolute()}') else: if output_path.exists(): log.warning(f'Will overwrite file {output_path.absolute()}') if output_path.is_dir(): raise RuntimeError(f'Specified output path {output_path.absolute()} is a directory.') network = model.generate(vars(args)) if output_path.is_file(): log.warning(f'Overwriting file {output_path}') with open(output_path.absolute(), 'w') as f: ig.write(network, f, format='graphml')
def do_experiment(node_file_path, gml_file_path, runtime): name = os.path.split(os.path.basename(gml_file_path))[0] nodes = read_list_nodes_from_file(node_file_path, 2) BA_list = multi_run(nodes, 30, 5, name) FAN_list = multi_run(nodes, 30, 4, name) RG = real_data(gml_file_path) ig.write( BA_list[0], r'G:\HFS\WeiboData\HFSWeiboGML\test\simulation_BA_' + name + '.gml') ig.write( FAN_list[0], r'G:\HFS\WeiboData\HFSWeiboGML\test\simulation_Fans_' + name + '.gml') ig.write(RG, r'G:\HFS\WeiboData\HFSWeiboGML\test\real_' + name + '.gml')
def convert_graph(linein, lineout): datastruct = np.load(linein) #the attribute where the timeseries is stored data=datastruct['roi_data'] numrois=data.shape[0] cormtx=np.corrcoef(data) cormtx=abs(cormtx) #subtract out the diagonal cormtx=(cormtx-numpy.eye(numrois)) #initialize empty dictionary of floats edge_dict=defaultdict(float) #add element wise to a dictionary of the edges for i in range(0, numrois) : #print("Processing roi {0}...".format(i)) for j in range(0, numrois) : edge_dict[(i, j)] = cormtx[i,j]; #print("Adding edges to the graph...") new_graph = ig.Graph(n=numrois, directed=False) new_graph+=edge_dict.keys() #print("Adding edge weights to the graph...") new_graph.es["weight"] = edge_dict.values() z=ig.write(new_graph, lineout, format="graphml")
visual_style['edge_width'] = [10*float(w) \ for w in graph.es['weight']] ig.plot(graph, filename, **visual_style) plot(tg) #%% EDGES print('NO DIRIGIDO') for idx, e in enumerate(tg2.es): print(idx, e.tuple, e['weight']) print('DIRIGIDO') for idx, e in enumerate(tg.es): print(idx, e.tuple, e['weight']) #%% VERTEX print('NO DIRIGIDO') for idx, v in enumerate(tgs2.vs): print(idx, v.index, v.degree(), v['label']) #print('DIRIGIDO') #for idx, v in enumerate(tg.vs): # print(idx, v.index, v['label']) #%% ig.write(tg2, 'test-undirected.gml')
def real_data(file_path): name = os.path.split(os.path.basename(file_path))[0] IG = ig.Graph.Read_GML(file_path) IG = IG.subgraph_edges(IG.es.select(plzftype_in=['2', '8'])) ig.write(IG, r'G:\HFS\WeiboData\HFSWeiboGML\test\real_' + name + '.gml') return IG
def SR(): nodeAttrFolder = r'G:\HFS\WeiboData\HFSWeiboNodeData' gmlFolder = r'G:\HFS\WeiboData\HFSWeiboGML' mentionlist = wt.csv2list_new(csvfilepath=r"G:\HFS\WeiboData\HFSWeiboStatNet\Stat\Mentioncntlist.txt") meta = wt.csv2list(r'G:\HFS\WeiboData\Statistics\meta_successed308.txt') metadic = wt.list2dict(meta) print metadic netAttr_R = [] netAttr_S = [] indegree_R = [] indegree_S = [] outdegree_R = [] outdegree_S = [] netAttr_R_writer = csv.writer(file(r'G:\HFS\WeiboData\HFSWeiboStatNet\RS\netAttr_R.csv','w')) netAttr_S_writer = csv.writer(file(r'G:\HFS\WeiboData\HFSWeiboStatNet\RS\netAttr_S.csv','w')) indegree_R_writer = csv.writer(file(r'G:\HFS\WeiboData\HFSWeiboStatNet\RS\indegree_R.csv','w')) indegree_S_writer = csv.writer(file(r'G:\HFS\WeiboData\HFSWeiboStatNet\RS\indegree_S.csv','w')) outdegree_R_writer = csv.writer(file(r'G:\HFS\WeiboData\HFSWeiboStatNet\RS\outdegree_R.csv','w')) outdegree_S_writer = csv.writer(file(r'G:\HFS\WeiboData\HFSWeiboStatNet\RS\outdegree_S.csv','w')) errorlist = [] i = 0 for filef in os.listdir(gmlFolder): # try: i+=1 if i<3000: filename = os.path.splitext(filef)[0] forwordcnt = 10000000 try: forwordcnt = metadic.get(str(filename))[4] except: pass if float(forwordcnt)<2001: print i, filename,time.clock() IG_r=real_data(gmlFolder+'\\'+filename+'.gml')#real_data(r"G:\MyPapers\CMO\testData4Cui\3343740313561521.gml") "two steps nwtwork growth, one by one " nodes = read_list_nodes_from_file(nodeAttrFolder+'\\'+filename+'.repost', start_line=2) IG_s=model_4(nodes) mention_list = find_mention_list_from_file(mentionlist, filename) IG_s=mention_model_1(IG_s, mention_list, len(IG_r.vs)) gmlf_s = gmlFolder+'\\SimulationGml\\sim'+filename+'.gml' ig.write(IG_s,gmlf_s) # IG_r=clus.VertexClustering.giant(IG_r.clusters(mode='weak')) ra = analysisNet(IG_r)#(ig.VertexClustering.giant(IG_r.clusters(mode='weak'))) sa = analysisNet(IG_s) ra.insert(0,filename) sa.insert(0,filename) netAttr_R.append(ra) netAttr_S.append(sa) rIndegree = IG_r.indegree() sIndegree = IG_s.indegree() rOutdegree = IG_r.outdegree() sOutdegree = IG_s.outdegree() rIndegree.insert(0,filename) sIndegree.insert(0,filename) rOutdegree.insert(0,filename) sOutdegree.insert(0,filename) indegree_R.append(rIndegree) indegree_S.append(sIndegree) outdegree_R.append(rOutdegree) outdegree_S.append(sOutdegree) netAttr_R_writer.writerow(ra) netAttr_S_writer.writerow(sa) indegree_R_writer.writerow(rIndegree) indegree_S_writer.writerow(sIndegree) outdegree_R_writer.writerow(rOutdegree) outdegree_S_writer.writerow(sOutdegree) else: pass # except: # errorlist.append(filef) # print 'error==========================:',filef print 'all the errors==========================:',len(errorlist),errorlist
def save_graph(self, graph_path): write(self.graph, graph_path)
mention_list = read_mention_list_from_file(r"G:\HFS\WeiboData\HFSWeiboStatNet\Stat\test\Mentioncntlist.txt", 3) print "mention list================" print len(mention_list) print sum(mention_list) nodes = read_list_nodes_from_file(r"G:\HFS\WeiboData\HFSWeiboNodeData\3343740313561521.repost", 2) IG_s=model_4(nodes) print "withou mention ================" print len(IG_s.vs) print len(IG_s.es) IG_s=mention_model_1(IG_s, mention_list, len(IG_r.vs)) print "with mention ================" print len(IG_s.vs) print len(IG_s.es) gmlf_s = 'temp.gml' ig.write(IG_s,gmlf_s) print IG_r.vs.attributes() ss = IG_s.vs.attributes() print ss # for v in IG_s.vs: # print v['label'] #wt.list_2_Distribution(IG_s.indegree(), IG_r.indegree()) #degree_list = multi_run(nodes, 1, 5) #all_degree=[] #for degree in degree_list:
def export_graph(graph, filename): """Saves graph in GML format""" print("Exporting...") igraph.write(graph, filename, format="graphmlz") print("Exported file to {}".format(filename))
def main(): parser = argparse.ArgumentParser( description="Convert an igraph to a csc object") parser.add_argument( "graph_fn", action="store", help="The name of the igraph to read from disk. *Must be gml format!") parser.add_argument("-g", "--gen_graph", action="store_true", help="Generate a new ER graph") parser.add_argument("-n", "--num_nodes", action="store", type=int, help="The number of nodes in the ER graph") parser.add_argument( "-p", "--probability", action="store", type=float, help= "The probability of connectivity of each node to another in the graph") parser.add_argument("-s", "--save", action="store_true", help="Save conversion to disk") parser.add_argument("-f", "--save_fn", action="store", default="csc_matlab", help="Save file name") parser.add_argument("-t", "--test", action="store_true", help="Run test only!") result = parser.parse_args() if result.test: test() exit(1) if os.path.exists(result.graph_fn): g = igraph.read(result.graph_fn, format="gml") elif result.gen_graph or result.num_nodes or result.probability: assert (result.gen_graph and result.num_nodes and result.probability), "You must set all ER parameters i.e. n, p" g = igraph.Graph.Erdos_Renyi(n=result.num_nodes, p=result.probability) igraph.write(g, result.save_fn + ".gml", format="gml") else: sys.stderr.writelines( "Invalid path %s ... and all (i.e. n, p, g) ER parameters not set so no action taken. \n EXITING NOW! \n" ) exit(-1) igraph_to_csc(g, result.save, result.save_fn)
def write_graph(self, G, dest_filename): ig.write(G, dest_filename, format="graphml")
#igraph.summary(c.Graph) #c.make_Graph(15, 200, prob = 0.1) #igraph.summary(c.Graph) #print len(c.clique_list) #g = c.Graph.copy() #d = DataPolishing(g) #igraph.summary(d.Graph) #print len(d.Graph.maximal_cliques(min = 3)) #igraph.write(d.Graph, "randam_clique_5000.gml") #d.data_polish(polish_ratio = pr) #igraph.summary(d.Graph) #print len(d.Graph.maximal_cliques(min = 3)) #igraph.write(d.Graph, "polished_clique_5000.gml") #print "recall = " , c.recall(d.Graph) #print "precision = " , c.precision(d.Graph) #print "accuracy = " , c.accuracy(d.Graph) g = igraph.read("twitter_graph.gml") a = DataPolishing(g) print "original:" igraph.summary(a.Graph) print len(a.Graph.maximal_cliques(min=3)) a.data_polish(polish_ratio=pr) print "polished" igraph.summary(a.Graph) print len(a.Graph.maximal_cliques(min=3)) igraph.write(a.Graph, "polished_twitter_grapht.gml")
if item["name"] in actors and item["name"] not in dic.keys(): roles.append("a") #g.vs["role"][counter] = "a" elif item["name"] in dic.keys() and item["name"] not in actors: #g.vs["role"][counter] = "d" roles.append("d") else: roles.append("b") g.vs["role"] = roles color_dict = {"a": "red", "d": "blue", "b": "green"} g.vs["color"] = [color_dict[role] for role in g.vs["role"]] # if eid >= 0: # print(eid) # g.es[eid]["weight"] += 1 # else: # g.add_edge(i, j, weight=1) # # # # pl=gr.plot(g); # pl.add(g); # pl._windows_hacks=True; # pl.show() gr.write( g, "E:/Related to Univesity/Arshad/term 1/Network Science(Teimourpour)/exercise4(project)/graph2.gml" )
def save_cell_graph(self, filename='cell_graph.gml'): """Save the cell to cell connectivity graph in a GML file. """ igraph.write(self.__cell_graph, filename, format='gml') print 'Saved cell-to-cell connectivity data in', filename
def analyze_json(worker): """ Take in a set of json community detection results files and a graphml file representing the raw graph and output a graphml file that contains, as attributes, the results of the algorithms Args: worker: Named tuple of json_path raw_graph_path output_path timeout """ signal.signal(signal.SIGALRM, __handle_timeout) signal.setitimer(signal.ITIMER_REAL, worker.timeout) print('Loading raw Graphml file truth file: %s'%worker.raw_graph_path) if worker.raw_graph_path is not None: G = igraph.load(worker.raw_graph_path) else: print("ERROR: Not able to load graph") return try: for json_path in worker.json_path: with open(json_path) as f: data = json.load(f) (name, algorithm) = data['job_name'].split('--')[:2] algo_name = 'algo_%s'%algorithm # Only if we are pulling least frequent if worker.pick_least_frequent or worker.pick_most_frequent: # Calculate number of nodes in each community community_counts = {} for node in data['membership']: for community in node: if community in community_counts: community_counts[community] += 1 else: community_counts[community] = 1 # Add property to graph for node in G.vs(): # Get cover Array # TODO: Fix this hacky way to turn node id (i.e. "n1") into node index (i.e. 1) try: community_array = data['membership'][int(node['id'][1:])] except IndexError: community_array= [] if worker.pick_least_frequent: least_frequent_community = __get_least_frequent_community(community_array, community_counts, reverse=False) if least_frequent_community is None: least_frequent_community = -1 G.vs[node.index][algo_name] = str(least_frequent_community) elif worker.pick_most_frequent: least_frequent_community = __get_least_frequent_community(community_array, community_counts, reverse=True) if least_frequent_community is None: least_frequent_community = -1 G.vs[node.index][algo_name] = str(least_frequent_community) else: G.vs[node.index][algo_name] = ','.join([str(x) for x in community_array]) except TimeoutError as t: print("\t+Timeout ERROR: was analyzing: ", data['job_name']) signal.alarm(0) return except Exception as e: print(e) traceback.print_exc(file=sys.stdout) return graphml_file_output = os.path.join(worker.output_path, "%s.graphml"% name) print("Writing Graph: %s"%graphml_file_output ) igraph.write(G, graphml_file_output)
#igraph.summary(c.Graph) #c.make_Graph(15, 200, prob = 0.1) #igraph.summary(c.Graph) #print len(c.clique_list) #g = c.Graph.copy() #d = DataPolishing(g) #igraph.summary(d.Graph) #print len(d.Graph.maximal_cliques(min = 3)) #igraph.write(d.Graph, "randam_clique_5000.gml") #d.data_polish(polish_ratio = pr) #igraph.summary(d.Graph) #print len(d.Graph.maximal_cliques(min = 3)) #igraph.write(d.Graph, "polished_clique_5000.gml") #print "recall = " , c.recall(d.Graph) #print "precision = " , c.precision(d.Graph) #print "accuracy = " , c.accuracy(d.Graph) g = igraph.read("twitter_graph.gml") a = DataPolishing(g) print "original:" igraph.summary(a.Graph) print len(a.Graph.maximal_cliques(min = 3)) a.data_polish(polish_ratio = pr) print "polished" igraph.summary(a.Graph) print len(a.Graph.maximal_cliques(min = 3)) igraph.write(a.Graph, "polished_twitter_grapht.gml")
def analyze_json(worker): """ Take in a set of json community detection results files and a graphml file representing the raw graph and output a graphml file that contains, as attributes, the results of the algorithms Args: worker: Named tuple of json_path raw_graph_path output_path timeout """ signal.signal(signal.SIGALRM, __handle_timeout) signal.setitimer(signal.ITIMER_REAL, worker.timeout) print('Loading raw Graphml file truth file: %s' % worker.raw_graph_path) if worker.raw_graph_path is not None: G = igraph.load(worker.raw_graph_path) else: print("ERROR: Not able to load graph") return try: for json_path in worker.json_path: with open(json_path) as f: data = json.load(f) (name, algorithm) = data['job_name'].split('--')[:2] algo_name = 'algo_%s' % algorithm # Only if we are pulling least frequent if worker.pick_least_frequent or worker.pick_most_frequent: # Calculate number of nodes in each community community_counts = {} for node in data['membership']: for community in node: if community in community_counts: community_counts[community] += 1 else: community_counts[community] = 1 # Add property to graph for node in G.vs(): # Get cover Array # TODO: Fix this hacky way to turn node id (i.e. "n1") into node index (i.e. 1) try: community_array = data['membership'][int( node['id'][1:])] except IndexError: community_array = [] if worker.pick_least_frequent: least_frequent_community = __get_least_frequent_community( community_array, community_counts, reverse=False) if least_frequent_community is None: least_frequent_community = -1 G.vs[node.index][algo_name] = str( least_frequent_community) elif worker.pick_most_frequent: least_frequent_community = __get_least_frequent_community( community_array, community_counts, reverse=True) if least_frequent_community is None: least_frequent_community = -1 G.vs[node.index][algo_name] = str( least_frequent_community) else: G.vs[node.index][algo_name] = ','.join( [str(x) for x in community_array]) except TimeoutError as t: print("\t+Timeout ERROR: was analyzing: ", data['job_name']) signal.alarm(0) return except Exception as e: print(e) traceback.print_exc(file=sys.stdout) return graphml_file_output = os.path.join(worker.output_path, "%s.graphml" % name) print("Writing Graph: %s" % graphml_file_output) igraph.write(G, graphml_file_output)
#%% ## EDGES idxi = 0 idxf = 7 print('NO DIRIGIDO') for idx, e in enumerate(g2.es[idxi:idxf]): print(idx, e.tuple, e['weight']) print('DIRIGIDO') for idx, e in enumerate(g.es[idxi:idxf]): print(idx, e.tuple, e['weight']) #%% ''' GUARDAR ''' <<<<<<< HEAD ig.write(g,'forks-directed.gml') ig.write(g2, 'users-undirected.gml') ======= #ig.write(g,'users-directed.gml') ig.write(rcg, 'repos-CG-undirected.gml') >>>>>>> 223870946b8243d9fcad6eb1e1904594f77d1ea7
#! /usr/bin/python import json import numpy as np import networkx as nx import matplotlib.pyplot as plt from networkx.readwrite import json_graph import igraph g = igraph.read("weighted_karate.gml") h = g.community_leading_eigenvector(clusters=2) [a,b] = h.subgraphs(); igraph.write(a, "a", format="gml"); igraph.write(b, "b", format="gml"); c = nx.read_gml("a"); data = json_graph.node_link_data(c); with open('c.json', 'w') as f: json.dump(data, f, indent=4); d = nx.read_gml("b"); data = json_graph.node_link_data(d); with open('d.json', 'w') as f: json.dump(data, f, indent=4); #with open('rgraph.json', 'w') as f: # json.dump(data, f, indent=4)