def get_commits_graph(path): context = snap.TTableContext() e_schema = snap.Schema() e_schema.Add(snap.TStrTAttrPr("source", snap.atStr)) e_schema.Add(snap.TStrTAttrPr("target", snap.atStr)) e_schema.Add(snap.TStrTAttrPr("weight", snap.atStr)) n_schema = snap.Schema() n_schema.Add(snap.TStrTAttrPr("id", snap.atStr)) n_schema.Add(snap.TStrTAttrPr("username", snap.atStr)) n_schema.Add(snap.TStrTAttrPr("size", snap.atStr)) edgetable = snap.TTable.LoadSS(e_schema, path + '{}_edges.csv'.format(pname), context, ",", snap.TBool(True)) nodetable = snap.TTable.LoadSS(n_schema, path + '{}_nodes.csv'.format(pname), context, ",", snap.TBool(True)) edgeattrv = snap.TStrV() nodeattrv = snap.TStrV() net = snap.ToNetwork(snap.PNEANet, edgetable, "source", "target", edgeattrv, nodetable, "id", nodeattrv, snap.aaFirst) snap.DelSelfEdges(net) snap.SaveEdgeList(net, 'temp/commits_temp_edgelist.csv') Data = open('temp/commits_temp_edgelist.csv', 'r') Graphtype = nx.Graph() G = nx.parse_edgelist(Data, delimiter='\t', create_using=Graphtype, nodetype=int, data=(('weight', float),), comments='#') return G
def parseGraph(filename="./GG-NE/test.tsv"): edgefilename = filename # A file containing the graph, where each row contains an edge # and each edge is represented with the source and dest node ids, # the edge attributes, and the source and destination node attributes # separated by a tab. context = snap.TTableContext( ) # When loading strings from different files, it is important to use the same context # so that SNAP knows that the same string has been seen before in another table. schema = snap.Schema() schema.Add(snap.TStrTAttrPr("srcID", snap.atInt)) schema.Add(snap.TStrTAttrPr("dstID", snap.atInt)) schema.Add(snap.TStrTAttrPr("weight", snap.atFlt)) table = snap.TTable.LoadSS(schema, edgefilename, context, "\t", snap.TBool(False)) # In this example, we add both edge attributes to the network, # but only one src node attribute, and no dst node attributes. edgeattrv = snap.TStrV() edgeattrv.Add("weight") srcnodeattrv = snap.TStrV() dstnodeattrv = snap.TStrV() # net will be an object of type snap.PNEANet G = snap.ToNetwork(snap.PNEANet, table, "srcID", "dstID", srcnodeattrv, dstnodeattrv, edgeattrv, snap.aaFirst) labels = pd.read_table("SS-Butterfly_labels.tsv") G.AddIntAttrN("label") for index, row in labels.iterrows(): G.AddIntAttrDatN(row["# Node_ID"], row["Species"], "label") return G
def getLblGraph(self, fileName): context = snap.TTableContext() schema = snap.Schema() schema.Add(snap.TStrTAttrPr("srcLabel", snap.atStr)) schema.Add(snap.TStrTAttrPr("srcId", snap.atInt)) schema.Add(snap.TStrTAttrPr("dstLabel", snap.atStr)) schema.Add(snap.TStrTAttrPr("dstId", snap.atInt)) table = snap.TTable.LoadSS(schema, fileName, context, " ", snap.TBool(False)) #print table edgeattrv = snap.TStrV() edgeattrv.Add("srcLabel") edgeattrv.Add("dstLabel") # edgeattrv.Add("edgeattr2") srcnodeattrv = snap.TStrV() # srcnodeattrv.Add("srcLabel") dstnodeattrv = snap.TStrV() # srcnodeattrv.Add("dstLabel") # net will be an object of type snap.PNEANet return snap.ToNetwork(snap.PNEANet, table, "srcId", "dstId", srcnodeattrv, dstnodeattrv, edgeattrv, snap.aaFirst)
def main(args): if len(args) < 3: print(get_usage()) sys.exit(1) root = sys.argv[1] mid_date = sys.argv[2] mid_ticks = utils.date_to_ticks(mid_date) file_cache = { TCOLLAB: None, TPULL: None, TREPO: None, TFOLLOW: None, TWATCH: None, TFORK: None } for file in os.listdir(root): if file.endswith(".tsv"): file_cache[file] = os.path.join(root, file) print file_cache[file] for key, val in file_cache.iteritems(): if val == None: print("One of the required files not found.") print(get_usage()) sys.exit(1) t = testutils.Timer(ENABLE_TIMER) context = snap.TTableContext() S1 = snap.Schema() S1.Add(snap.TStrTAttrPr("userid1", snap.atStr)) S1.Add(snap.TStrTAttrPr("userid2", snap.atStr)) S1.Add(snap.TStrTAttrPr("created_at", snap.atInt)) Tfollow = snap.TTable.LoadSS("Tfollow", S1, file_cache[TFOLLOW], context, '\t', snap.TBool(False)) t.show("load follow") S2 = snap.Schema() S2.Add(snap.TStrTAttrPr("userid", snap.atStr)) S2.Add(snap.TStrTAttrPr("owner", snap.atStr)) S2.Add(snap.TStrTAttrPr("name", snap.atStr)) S2.Add(snap.TStrTAttrPr("created_at", snap.atInt)) Tcollab = snap.TTable.LoadSS("Tcollab", S2, file_cache[TCOLLAB], context, '\t', snap.TBool(False)) t.show("load collab") S3 = snap.Schema() S3.Add(snap.TStrTAttrPr("userid", snap.atStr)) S3.Add(snap.TStrTAttrPr("owner", snap.atStr)) S3.Add(snap.TStrTAttrPr("name", snap.atStr)) S3.Add(snap.TStrTAttrPr("pullid", snap.atInt)) S3.Add(snap.TStrTAttrPr("status", snap.atStr)) S3.Add(snap.TStrTAttrPr("created_at", snap.atInt)) Tpull = snap.TTable.LoadSS("Tpull", S3, file_cache[TPULL], context, '\t', snap.TBool(False)) t.show("load pull") # If (u,v) collaborated on the same repository - determined by the owner, name pair, # are added as collaborators. #TODO Better column renaming V = snap.TStrV() V.Add("created_at") Tcollab.Order(V, "", snap.TBool(False), snap.TBool(True)) V.Clr() V.Add("owner") V.Add("name") V.Add("userid") Tcollab.Group(V, "UserRepoId") V.Clr() V.Add("UserRepoId") Tcollab.Unique(V) Tcollab_merge = Tcollab.SelfJoin("owner") Tcollab_merge.SelectAtomic("Tcollab_1.name", "Tcollab_2.name", snap.EQ) Tcollab_merge.SelectAtomic("Tcollab_1.userid", "Tcollab_2.userid", snap.NEQ) # BUGBUG - Commenting this line will mean created_at is not present in Tcollab_merge. # However, the ProjectInPlace will not complain and silently exclude created_at from the # result. This leads to the Index:-1 error in SelectAtomicIntConst on created_at later in the code. Tcollab_merge.ColMin("Tcollab_1.created_at", "Tcollab_2.created_at", "created_at") V = snap.TStrV() V.Add("Tcollab_1.userid") V.Add("Tcollab_2.userid") V.Add("created_at") Tcollab_merge.ProjectInPlace(V) Tcollab_merge.Rename("Tcollab_1.userid", "userid1") Tcollab_merge.Rename("Tcollab_2.userid", "userid2") t.show("merge collab", Tcollab_merge) #testutils.dump(Tcollab_merge, 50) # If (u,v) worked on the same pull request on the same repository, they are added # as (soft) collaborators. V = snap.TStrV() V.Add("created_at") Tpull.Order(V, "", snap.TBool(False), snap.TBool(True)) V.Clr() V.Add("owner") V.Add("name") V.Add("userid") Tpull.Group(V, "UserRepoId") V.Clr() V.Add("UserRepoId") Tpull.Unique(V) Tpull_merge = Tpull.SelfJoin("owner") Tpull_merge.SelectAtomic("Tpull_1.name", "Tpull_2.name", snap.EQ) Tpull_merge.SelectAtomic("Tpull_1.pullid", "Tpull_2.pullid", snap.EQ) Tpull_merge.SelectAtomic("Tpull_1.userid", "Tpull_2.userid", snap.NEQ) Tpull_merge.ColMin("Tpull_1.created_at", "Tpull_2.created_at", "created_at") V = snap.TStrV() V.Add("Tpull_1.userid") V.Add("Tpull_2.userid") V.Add("created_at") Tpull_merge.ProjectInPlace(V) Tpull_merge.Rename("Tpull_1.userid", "userid1") Tpull_merge.Rename("Tpull_2.userid", "userid2") t.show("merge pull", Tpull_merge) # BUGBUG: UnionAll is returning unexpected result at this point #Tmerge = Tcollab_merge.UnionAll(Tpull_merge, "Tmerge") Tmerge = Tpull_merge # Select the base and delta tables from the merged table. Tbase = snap.TTable.New(Tmerge, "Base") Tdelta = snap.TTable.New(Tmerge, "Delta") Tbase.SelectAtomicIntConst("created_at", mid_ticks, snap.LTE) Tdelta.SelectAtomicIntConst("created_at", mid_ticks, snap.GTE) #TODO: Union Tbase with collab and pull to include (userid, owner) edge t.show("collab union") # Convert base table to base graph Gbase = snap.ToNetwork(snap.PNEANet, Tbase, "userid1", "userid2", snap.aaFirst) Gdelta = snap.ToNetwork(snap.PNEANet, Tdelta, "userid1", "userid2", snap.aaFirst) t.show("base graph", Gbase) t.show("delta graph", Gdelta) NITERS = 20 total_preck = 0 print("Userid\tPrec@%d\tAverage Index" % (N_TOP_RECOS)) # Random walk with restarts # BUGBUG: Returns the same id everytime # userid = Gbase.GetRndNId() for i in range(NITERS): # Randomly choose a starting node userid = random.choice([node.GetId() for node in Gbase.Nodes()]) user = Gbase.GetNI(userid) # Perform random walk with restarts on base graph HT = snap.TIntFltH() snap.GetRndWalkRestart_PNEANet(Gbase, ALPHA, userid, HT) HT.SortByDat(False) j = 0 cnt = 0 preck = 0 average_index = -1 # Calculate precision while cnt < N_TOP_RECOS and j < HT.Len(): recoid = HT.GetKey(j) pagerank = HT.GetDat(recoid) #print recoid, pagerank if recoid != userid: # If the edge is not in base graph but is present in delta graph, we made an accurate prediction. if not Gbase.IsEdge(userid, recoid) and Gdelta.IsNode( userid) and Gdelta.IsNode(recoid) and (Gdelta.IsEdge( userid, recoid) or Gdelta.IsEdge(recoid, userid)): preck += 1 cnt += 1 j += 1 # Calculate average index try: node = Gdelta.GetNI(userid) edges = [nid for nid in node.GetOutEdges() ] + [nid for nid in node.GetInEdges()] #print edges #print([HT.GetKeyId(nid) for nid in edges]) index = 0 for nid in edges: index += HT.GetKeyId(nid) average_index = index / len(edges) except: # Node not present in delta graph implies no new edges formed pass total_preck += preck print("%d\t%d\t%f" % (userid, preck, average_index)) #rank = snap.TTable.New("Rank", HT, "User", PAGE_RANK_ATTRIBUTE, context, snap.TBool(True)) print("Average Precision@%d = %f" % (N_TOP_RECOS, total_preck / float(NITERS)))
if __name__ == '__main__': if len(sys.argv) < 2: print "Usage: " + sys.argv[0] + " <srcfile>" sys.exit(1) srcfile = sys.argv[1] context = snap.TTableContext() t = testutils.Timer() r = testutils.Resource() FIn = snap.TFIn(srcfile) table = snap.TTable.Load(FIn, context) t.show("load bin", table) r.show("__loadbin__") S = map(lambda x: x.GetVal1(), table.GetSchema()) assert (len(S) >= 2) graph = snap.ToNetwork(snap.PNEANet, table, S[0], S[1], snap.aaFirst) t.show("create graph", graph) r.show("__creategraph__") print "graph type", type(graph) table2 = snap.TTable.GetEdgeTable(graph, "1", context) t.show("table from graph", table2) r.show("__tablefromgraph__")
snap.TBool(False)) print "node_rows", node_table.GetNumValidRows() srcattrv = snap.TStrV() srcattrv.Add("edgeattr1") dstattrv = snap.TStrV() dstattrv.Add("edgeattr1") edgeattrv = snap.TStrV() edgeattrv.Add("edgeattr1") nodeattrv = snap.TStrV() nodeattrv.Add("name") net1 = snap.ToNetwork(snap.PNEANet, edge_table, "srcID", "dstID", srcattrv, dstattrv, edgeattrv, snap.aaFirst) print "nodes1", net1.GetNodes() print "edges1", net1.GetEdges() net2 = snap.ToNetwork(snap.PNEANet, edge_table, "srcID", "dstID", snap.aaFirst) print "nodes2", net2.GetNodes() print "edges2", net2.GetEdges() net3 = snap.ToNetwork(snap.PNEANet, edge_table, "srcID", "dstID", edgeattrv, snap.aaFirst) print "nodes3", net3.GetNodes() print "edges3", net3.GetEdges() net4 = snap.ToNetwork(snap.PNEANet, edge_table, "srcID", "dstID", edgeattrv, node_table, "nodeID", nodeattrv, snap.aaFirst) print "nodes4", net4.GetNodes()
table = snap.TTable.LoadSS(schema, edgefilename, context, " ", snap.TBool(False)) # In this example, we add both edge attributes to the network, # but only one src node attribute, and no dst node attributes. edgeattrv = snap.TStrV() edgeattrv.Add("timestamp") # edgeattrv.Add("edgeattr2") srcnodeattrv = snap.TStrV() # srcnodeattrv.Add("srcnodeattr1") dstnodeattrv = snap.TStrV() # net will be an object of type snap.PNEANet G = snap.ToNetwork(snap.PNEANet, table, "srcID", "dstID", srcnodeattrv, dstnodeattrv, edgeattrv, snap.aaFirst) graph = MG.MultiGraph() graph.setGraph(G) ## Node level attributes NodeA = NodeA.NodeAttribute(G) ## Edge level attributes EdgeA = EdgeA.EdgeAttribute(G) graph.walkNodes() graph.walkEdges() ##### init edge attributes attribute_type = 1
useredges.to_csv('temp/mergededges.csv', index=None) # Build graph from temp files using SNAP library context = snap.TTableContext() e_schema = snap.Schema() e_schema.Add(snap.TStrTAttrPr("source", snap.atStr)) e_schema.Add(snap.TStrTAttrPr("target", snap.atStr)) n_schema = snap.Schema() n_schema.Add(snap.TStrTAttrPr("username", snap.atStr)) edgetable = snap.TTable.LoadSS(e_schema, 'temp/mergededges.csv', context, ",", snap.TBool(True)) nodetable = snap.TTable.LoadSS(n_schema, 'temp/mergednodes.csv', context, ",", snap.TBool(True)) edgeattrv = snap.TStrV() nodeattrv = snap.TStrV() nodeattrv.Add("username") net = snap.ToNetwork(snap.PNEANet, edgetable, "source", "target", edgeattrv, nodetable, "username", nodeattrv, snap.aaFirst) # Need to remove self-edges to compute rich club coefficient snap.DelSelfEdges(net) # Store the results name = str(pid) + '_usergraph' snap.SaveEdgeListNet(net, outpath + name + '.csv', 'Network of issues, PR and commits') generateTables(outpath, name, net)
schema.Add(snap.TStrTAttrPr("OwnerUserId", snap.atInt)) schema.Add(snap.TStrTAttrPr("AcceptedAnswerId", snap.atInt)) schema.Add(snap.TStrTAttrPr("CreationDate", snap.atInt)) schema.Add(snap.TStrTAttrPr("Score", snap.atInt)) schema.Add(snap.TStrTAttrPr("Tag", snap.atStr)) table = snap.TTable.LoadSS("1", schema, srcfile, context, "\t", snap.TBool(False)) t.show("load posts text", table) r.show("__loadpoststext__") questions = snap.TTable.New("2", table.GetSchema(), context) table.SelectAtomicStrConst("Tag", "python", snap.EQ, questions) t.show("selected tag = 'python'", questions) r.show("__selectedtagpython__") qa = questions.Join("AcceptedAnswerId", table, "Id") graph = snap.ToNetwork(snap.PNEANet, qa, "2.OwnerUserId", "1.OwnerUserId", snap.aaFirst) t.show("join", qa) r.show("__join__") t.show("graph", graph) r.show("__graph__") PRankH = snap.TIntFltH() snap.GetPageRank(graph, PRankH, 0.85, 1e-4, 100) prtable = snap.TTable.New("PR", PRankH, "UserId", "PageRank", context, snap.TBool(True)) t.show("pagerank", prtable) r.show("__pagerank__") FOut = snap.TFOut(dstfile) prtable.Save(FOut)
def main(args): if len(args) < 1: print("python github-join.py <path_to_tsv_file>") sys.exit(1) filename = args[0] t = testutils.Timer(ENABLE_TIMER) context = snap.TTableContext() S = snap.Schema() S.Add(snap.TStrTAttrPr("userid", snap.atStr)) S.Add(snap.TStrTAttrPr("owner", snap.atStr)) S.Add(snap.TStrTAttrPr("name", snap.atStr)) S.Add(snap.TStrTAttrPr("pullid", snap.atInt)) S.Add(snap.TStrTAttrPr("status", snap.atStr)) S.Add(snap.TStrTAttrPr("created_at", snap.atInt)) Tpull = snap.TTable.LoadSS("Tpull", S, filename, context, '\t', snap.TBool(False)) t.show("load pull") V = snap.TStrV() V.Add("created_at") Tpull.Order(V, "", snap.TBool(False), snap.TBool(True)) V.Clr() V.Add("owner") V.Add("name") V.Add("userid") Tpull.Group(V, "TagId") V.Clr() V.Add("TagId") Tpull.Unique(V) t.show("Unique", Tpull) Tpull_merge = Tpull.SelfJoin("owner") t.show("Merge", Tpull_merge) # Things work fine till this point Tpull_merge.SelectAtomic("Tpull_1.name", "Tpull_2.name", snap.EQ) Tpull_merge.SelectAtomic("Tpull_1.pullid", "Tpull_2.pullid", snap.EQ) Tpull_merge.SelectAtomic("Tpull_1.userid", "Tpull_2.userid", snap.NEQ) Tpull_merge.ColMin("Tpull_1.created_at", "Tpull_2.created_at", "created_at") V = snap.TStrV() V.Add("Tpull_1.userid") V.Add("Tpull_2.userid") V.Add("created_at") Tpull_merge.ProjectInPlace(V) Tpull_merge.Rename("Tpull_1.userid", "userid1") Tpull_merge.Rename("Tpull_2.userid", "userid2") # Copy the Tpull_merge to form two graphs - base and delta. Select all rows in base for created_at < x and all dates in delta for created_at > x Tbase = snap.TTable.New(Tpull_merge, "Base") Tdelta = snap.TTable.New(Tpull_merge, "Delta") #Tbase.SelectAtomicIntConst("created_at", x, snap.LTE) #Tdelta.SelectAtomicIntConst("created_at", x, snap.GTE) G = snap.ToNetwork(snap.PNEANet, Tbase, "userid1", "userid2", snap.aaFirst) t.show("graph", G)
schema.Add(snap.TStrTAttrPr("OwnerUserId", snap.atInt)) schema.Add(snap.TStrTAttrPr("AcceptedAnswerId", snap.atInt)) schema.Add(snap.TStrTAttrPr("CreationDate", snap.atInt)) schema.Add(snap.TStrTAttrPr("Score", snap.atInt)) schema.Add(snap.TStrTAttrPr("Tag", snap.atStr)) table = snap.TTable.LoadSS(schema, srcfile, context, "\t", snap.TBool(False)) t.show("load posts text", table) r.show("__loadpoststext__") questions = snap.TTable.New(table.GetSchema(), context) table.SelectAtomicStrConst("Tag", "python", snap.EQ, questions) t.show("selected tag = 'python'", questions) r.show("__selectedtagpython__") qa = questions.Join("AcceptedAnswerId", table, "Id") graph = snap.ToNetwork(snap.PNEANet, qa, "OwnerUserId-2", "OwnerUserId-1", snap.aaFirst) t.show("join", qa) r.show("__join__") t.show("graph", graph) r.show("__graph__") PRankH = snap.TIntFltH() snap.GetPageRank(graph, PRankH, 0.85, 1e-4, 100) prtable = snap.TTable.New(PRankH, "UserId", "PageRank", context, snap.TBool(True)) t.show("pagerank", prtable) r.show("__pagerank__") FOut = snap.TFOut(dstfile) prtable.Save(FOut)