def main(args): if len(args) < 3: print(get_usage()) sys.exit(1) votes = sys.argv[1] outFile = sys.argv[2] t = testutils.Timer(ENABLE_TIMER) context = snap.TTableContext() VoteS = snap.Schema() VoteS.Add(snap.TStrTAttrPr("UserId", snap.atInt)) VoteS.Add(snap.TStrTAttrPr("AdminId", snap.atInt)) TVotes = snap.TTable.LoadSS("WikiVotes", VoteS, votes, context, '\t', snap.TBool(False)) t.show("load Votes", TVotes) GroupBy = snap.TStrV() GroupBy.Add("UserId") JointTable = TVotes.SelfSimJoinPerGroup(GroupBy, "AdminId", DISTANCE_ATTRIBUTE, snap.Jaccard, 0.5) t.show("SimJoinPerGroup complete", JointTable) JointTable.SelectAtomic("WikiVotes_1.UserId", "WikiVotes_2.UserId", snap.NEQ) t.show("Select complete", JointTable) testutils.dump(JointTable, 20) JointTable.SaveSS(outFile)
def get_activity_graph(path, atype): context = snap.TTableContext() e_schema = snap.Schema() e_schema.Add(snap.TStrTAttrPr("source", snap.atStr)) e_schema.Add(snap.TStrTAttrPr("target", snap.atStr)) e_schema.Add(snap.TStrTAttrPr("count", snap.atStr)) n_schema = snap.Schema() n_schema.Add(snap.TStrTAttrPr("id_user", snap.atStr)) n_schema.Add(snap.TStrTAttrPr("id", snap.atStr)) edgetable = snap.TTable.LoadSS(e_schema, path + '{}_{}_edges_reduced.csv'.format(p, atype), context, ",", snap.TBool(True)) nodetable = snap.TTable.LoadSS(n_schema, path + '{}_{}_nodes_reduced.csv'.format(p, atype), context, ",", snap.TBool(True)) edgeattrv = snap.TStrV() nodeattrv = snap.TStrV() net = snap.ToNetwork(snap.PNEANet, edgetable, "source", "target", edgeattrv, nodetable, "id", nodeattrv, snap.aaFirst) snap.DelSelfEdges(net) snap.SaveEdgeList(net, 'temp/{}_temp_edgelist.csv'.format(atype)) Data = open('temp/{}_temp_edgelist.csv'.format(atype), 'r') Graphtype = nx.Graph() G = nx.parse_edgelist(Data, delimiter='\t', create_using=Graphtype, nodetype=int, data=(('weight', float),), comments='#') return G
def getLblGraph(self, fileName): context = snap.TTableContext() schema = snap.Schema() schema.Add(snap.TStrTAttrPr("srcLabel", snap.atStr)) schema.Add(snap.TStrTAttrPr("srcId", snap.atInt)) schema.Add(snap.TStrTAttrPr("dstLabel", snap.atStr)) schema.Add(snap.TStrTAttrPr("dstId", snap.atInt)) table = snap.TTable.LoadSS(schema, fileName, context, " ", snap.TBool(False)) #print table edgeattrv = snap.TStrV() edgeattrv.Add("srcLabel") edgeattrv.Add("dstLabel") # edgeattrv.Add("edgeattr2") srcnodeattrv = snap.TStrV() # srcnodeattrv.Add("srcLabel") dstnodeattrv = snap.TStrV() # srcnodeattrv.Add("dstLabel") # net will be an object of type snap.PNEANet return snap.ToNetwork(snap.PNEANet, table, "srcId", "dstId", srcnodeattrv, dstnodeattrv, edgeattrv, snap.aaFirst)
def parseGraph(filename="./GG-NE/test.tsv"): edgefilename = filename # A file containing the graph, where each row contains an edge # and each edge is represented with the source and dest node ids, # the edge attributes, and the source and destination node attributes # separated by a tab. context = snap.TTableContext( ) # When loading strings from different files, it is important to use the same context # so that SNAP knows that the same string has been seen before in another table. schema = snap.Schema() schema.Add(snap.TStrTAttrPr("srcID", snap.atInt)) schema.Add(snap.TStrTAttrPr("dstID", snap.atInt)) schema.Add(snap.TStrTAttrPr("weight", snap.atFlt)) table = snap.TTable.LoadSS(schema, edgefilename, context, "\t", snap.TBool(False)) # In this example, we add both edge attributes to the network, # but only one src node attribute, and no dst node attributes. edgeattrv = snap.TStrV() edgeattrv.Add("weight") srcnodeattrv = snap.TStrV() dstnodeattrv = snap.TStrV() # net will be an object of type snap.PNEANet G = snap.ToNetwork(snap.PNEANet, table, "srcID", "dstID", srcnodeattrv, dstnodeattrv, edgeattrv, snap.aaFirst) labels = pd.read_table("SS-Butterfly_labels.tsv") G.AddIntAttrN("label") for index, row in labels.iterrows(): G.AddIntAttrDatN(row["# Node_ID"], row["Species"], "label") return G
def load_mode_to_graph(mode, filename, Graph, context): modeId = mode + 'Id' schema = snap.Schema() schema.Add(snap.TStrTAttrPr(modeId, snap.atStr)) schema.Add(snap.TStrTAttrPr("datasetId", snap.atStr)) modenet = snap.TTable.LoadSS(schema, filename, context, "\t", snap.TBool(False)) snap.LoadModeNetToNet(Graph, mode, modenet, modeId, snap.TStrV())
def ttable(): t0=t() context = snap.TTableContext() schema = snap.Schema() schema.Add(snap.TStrTAttrPr("Col1", snap.atInt)) schema.Add(snap.TStrTAttrPr("Col2", snap.atInt)) table = snap.TTable.LoadSS(schema, NW.twitter, context, "\t", snap.TBool(False)) reportTime(t0, "TTABLE")
def test_table_merge(self): context = snap.TTableContext() schema = snap.Schema() schema.Add(snap.TStrTAttrPr("Col1", snap.atInt)) schema.Add(snap.TStrTAttrPr("Col2", snap.atInt)) schema.Add(snap.TStrTAttrPr("Col3", snap.atFlt)) filename = "data/data-table.txt" grade_table = snap.TTable.LoadSS(schema, filename, context, "\t", snap.TBool(False)) int_vec = snap.TIntV()
def load_physician_referral_data(infilename): """ Load the US physician referral data from specified zipfile Parameters: infilename - path name of zipflie to load from Return value: SNAP TNGraph object built from the data """ tmpdir = tempfile.mkdtemp() try: archive = zipfile.ZipFile(infilename, 'r') archive.extract('physician-shared-patient-patterns-2014-days30.txt', tmpdir) filename = os.path.join( tmpdir, "physician-shared-patient-patterns-2014-days30.txt") archive.close() context = snap.TTableContext() schema = snap.Schema() ## schema.Add(snap.TStrTAttrPr("NPI_1", snap.atInt)) ## schema.Add(snap.TStrTAttrPr("NPI_2", snap.atInt)) # the above 2 lines worked with SNAP 4.0.0 on VLSCI # but now using SNAP 4.1.0 # on hpc.ics.usi.ch find that all ids are -1 so graph wrong. # Cannot work out why so changed to string not int to try to fix it: schema.Add(snap.TStrTAttrPr("NPI_1", snap.atStr)) schema.Add(snap.TStrTAttrPr("NPI_2", snap.atStr)) ## schema.Add(snap.TStrTAttrPr("count", snap.atInt)) ## schema.Add(snap.TStrTAttrPr("unique_bene", snap.atInt)) ## schema.Add(snap.TStrTAttrPr("same_day_count", snap.atInt)) # The above 3 lines also worked fine with SNAP 4.0.0 before but # now fail on SNAP 4.1.0 (seems to be due to spaces in CSV fields, # not inexplicable like first two which have no spaces) but not using # them at the moment anyway so easier to just make (unused) strings: schema.Add(snap.TStrTAttrPr("count", snap.atStr)) schema.Add(snap.TStrTAttrPr("unique_bene", snap.atStr)) schema.Add(snap.TStrTAttrPr("same_day_count", snap.atStr)) table = snap.TTable.LoadSS(schema, filename, context, ",", snap.TBool(False)) G = snap.ToGraph(snap.PNGraph, table, "NPI_1", "NPI_2", snap.aaFirst) finally: cleanup_tmpdir(tmpdir) return G
def test_table_getitem(self): context = snap.TTableContext() schema = snap.Schema() schema.Add(snap.TStrTAttrPr("Col1", snap.atInt)) schema.Add(snap.TStrTAttrPr("Col2", snap.atInt)) schema.Add(snap.TStrTAttrPr("Col3", snap.atFlt)) filename = "data/data-table.txt" grade_table = snap.TTable.LoadSS(schema, filename, context, "\t", snap.TBool(False)) int_vec = snap.TIntV() int_vec.Add(0) int_vec.Add(1) int_vec.Add(2) int_vec.Add(3) int_vec.Add(4) # unsure about next line! col2_vec = grade_table[]
def main(args): if len(args) < 3: print(get_usage()) sys.exit(1) yelp = sys.argv[1] outFile = sys.argv[2] t = testutils.Timer(ENABLE_TIMER) context = snap.TTableContext() YelpS = snap.Schema() YelpS.Add(snap.TStrTAttrPr("Name", snap.atStr)) YelpS.Add(snap.TStrTAttrPr("City", snap.atStr)) YelpS.Add(snap.TStrTAttrPr("State", snap.atStr)) YelpS.Add(snap.TStrTAttrPr("Latitude", snap.atFlt)) YelpS.Add(snap.TStrTAttrPr("Longitude", snap.atFlt)) TYelp = snap.TTable.LoadSS("Yelp", YelpS, yelp, context, '\t', snap.TBool(True)); t.show("load Yelp", TYelp) Cols = snap.TStrV() Cols.Add("Latitude") Cols.Add("Longitude") # Get all business within 5 kilometers of each other JointTable = TYelp.SelfSimJoin(Cols, DISTANCE_ATTRIBUTE, snap.Haversine, 2) t.show("SimJoin complete", JointTable) ProjectionV = snap.TStrV() ProjectionV.Add("Yelp_1.Name") ProjectionV.Add("Yelp_1.City") ProjectionV.Add("Yelp_1.State") ProjectionV.Add("Yelp_2.Name") ProjectionV.Add("Yelp_2.City") ProjectionV.Add("Yelp_2.State") ProjectionV.Add(DISTANCE_ATTRIBUTE) JointTable.ProjectInPlace(ProjectionV) t.show("Project complete") testutils.dump(JointTable, 100); JointTable.SaveSS(outFile)
def load_crossnet_to_graph(context, edgeId, srcName, dstName, filepath, Graph, prefix="miner"): srcId = srcName + "SrcId" dstId = dstName + "DstId" schema = snap.Schema() schema.Add(snap.TStrTAttrPr(edgeId, snap.atStr)) schema.Add(snap.TStrTAttrPr("datasetId", snap.atStr)) schema.Add(snap.TStrTAttrPr(srcId, snap.atStr)) schema.Add(snap.TStrTAttrPr(dstId, snap.atStr)) crossnet = snap.TTable.LoadSS(schema, filepath, context, DELIMITER, snap.TBool(False)) crossName = prefix + "-" + dstName + "-" + srcName Graph.AddCrossNet(srcName, dstName, crossName, False) snap.LoadCrossNetToNet(Graph, srcName, dstName, crossName, crossnet, srcId, dstId, snap.TStrV())
def main(): S = snap.Schema() context = snap.TTableContext() S.Add(snap.TStrTAttrPr("Animal", snap.atStr)) S.Add(snap.TStrTAttrPr("Size", snap.atStr)) S.Add(snap.TStrTAttrPr("Location", snap.atStr)) S.Add(snap.TStrTAttrPr("Number", snap.atInt)) Animals = snap.TTable.LoadSS("Animals", S, "/dfs/ilfs2/0/ringo/tests/animals.txt", context, '\t', snap.TBool(False)) # Gets animals with size=big pred_size = snap.TAtomicPredicate(snap.atStr, snap.TBool(True), snap.EQ, "Size", "", 0, 0, "big") node_size = snap.TPredicateNode(pred_size) # Get animals with location=Australia pred_location = snap.TAtomicPredicate(snap.atStr, snap.TBool(True), snap.EQ, "Location", "", 0, 0, "Australia") node_location = snap.TPredicateNode(pred_location) # size=big and location=Australia node1 = snap.TPredicateNode(snap.AND) node1.AddLeftChild(node_size) node1.AddRightChild(node_location) # Get animals with name==location (fabricated to show a non const case pred_animal_location = snap.TAtomicPredicate(snap.atStr, snap.TBool(False), snap.EQ, "Animal", "Location") node2 = snap.TPredicateNode(pred_animal_location) # (size=big and location=Australia) or Animal==Location node_root = snap.TPredicateNode(snap.OR) node_root.AddLeftChild(node1) node_root.AddRightChild(node2) pred = snap.TPredicate(node_root) Animals.Select(pred) testutils.dump(Animals)
def load_chchse(path): #load table context = snap.TTableContext() schema = snap.Schema() schema.Add(snap.TStrTAttrPr("STITCH 1", snap.atStr)) schema.Add(snap.TStrTAttrPr("STITCH 2", snap.atStr)) schema.Add(snap.TStrTAttrPr("Polypharmacy Side Effect", snap.atStr)) schema.Add(snap.TStrTAttrPr("Side Effect Name", snap.atStr)) table = snap.TTable.LoadSS(schema, path, context, ",", snap.TBool(True)) #reformat CIDs and seIDs as strings raw_cid1s = snap.TStrV() cid1s = snap.TIntV() table.ReadStrCol("STITCH 1", raw_cid1s) for raw_cid in raw_cid1s: cid = format_cid(raw_cid) cid1s.Add(cid) table.StoreIntCol("cid1", cid1s) raw_cid2s = snap.TStrV() cid2s = snap.TIntV() table.ReadStrCol("STITCH 2", raw_cid2s) for raw_cid in raw_cid2s: cid = format_cid(raw_cid) cid2s.Add(cid) table.StoreIntCol("cid2", cid2s) #save table as binary #cache_path = "../cache/ChChSe-decagon_table.tsv" #table.Save(snap.TFOut(cache_path)) #TEST: checks the number of side effect types seVec = snap.TStrV() table.ReadStrCol("Side Effect Name", seVec) print len(set(list(seVec))) return table
def ttableToTmmnet(): # load table t0 = t() context = snap.TTableContext() schema = snap.Schema() schema.Add(snap.TStrTAttrPr("srcID", snap.atInt)) schema.Add(snap.TStrTAttrPr("dstID", snap.atInt)) edge_table = snap.TTable.LoadSS(schema, NW.small, context, "\t", snap.TBool(False)) t1 = reportTime(t0, "TTABLE") # convert table to TMMNet mmnet = snap.TMMNet.New() edgeattrv = snap.TStrV() edgeattrv.Add("edgeattr1") CrossG = snap.LoadCrossNetToNet(mmnet, "Mode1", "Mode2", "Cross1", edge_table, "srcID", "dstID", edgeattrv) reportTime(t1, "convert TTABLE to CrossNet")
#def Save(self,*args): #self().Save(*args) if __name__ == '__main__': if len(sys.argv) < 3: print "Usage: " + sys.argv[0] + " <srcfile> <dstfile>" sys.exit(1) srcfile = sys.argv[1] dstfile = sys.argv[2] context = snap.TTableContext() t = testutils.Timer() r = testutils.Resource() schema = snap.Schema() schema.Add(snap.TStrTAttrPr("Src", snap.atInt)) schema.Add(snap.TStrTAttrPr("Dst", snap.atInt)) table = snap.TTable.LoadSS(schema, srcfile, context, "\t", snap.TBool(False)) t.show("load text", table) r.show("__loadtext__") FOut = snap.TFOut(dstfile) table.Save(FOut) t.show("save bin", table) r.show("__savebin__")
def main(args): if len(args) < 3: print(get_usage()) sys.exit(1) root = sys.argv[1] mid_date = sys.argv[2] mid_ticks = utils.date_to_ticks(mid_date) file_cache = { TCOLLAB: None, TPULL: None, TREPO: None, TFOLLOW: None, TWATCH: None, TFORK: None } for file in os.listdir(root): if file.endswith(".tsv"): file_cache[file] = os.path.join(root, file) print file_cache[file] for key, val in file_cache.iteritems(): if val == None: print("One of the required files not found.") print(get_usage()) sys.exit(1) t = testutils.Timer(ENABLE_TIMER) context = snap.TTableContext() S1 = snap.Schema() S1.Add(snap.TStrTAttrPr("userid1", snap.atStr)) S1.Add(snap.TStrTAttrPr("userid2", snap.atStr)) S1.Add(snap.TStrTAttrPr("created_at", snap.atInt)) Tfollow = snap.TTable.LoadSS("Tfollow", S1, file_cache[TFOLLOW], context, '\t', snap.TBool(False)) t.show("load follow") S2 = snap.Schema() S2.Add(snap.TStrTAttrPr("userid", snap.atStr)) S2.Add(snap.TStrTAttrPr("owner", snap.atStr)) S2.Add(snap.TStrTAttrPr("name", snap.atStr)) S2.Add(snap.TStrTAttrPr("created_at", snap.atInt)) Tcollab = snap.TTable.LoadSS("Tcollab", S2, file_cache[TCOLLAB], context, '\t', snap.TBool(False)) t.show("load collab") S3 = snap.Schema() S3.Add(snap.TStrTAttrPr("userid", snap.atStr)) S3.Add(snap.TStrTAttrPr("owner", snap.atStr)) S3.Add(snap.TStrTAttrPr("name", snap.atStr)) S3.Add(snap.TStrTAttrPr("pullid", snap.atInt)) S3.Add(snap.TStrTAttrPr("status", snap.atStr)) S3.Add(snap.TStrTAttrPr("created_at", snap.atInt)) Tpull = snap.TTable.LoadSS("Tpull", S3, file_cache[TPULL], context, '\t', snap.TBool(False)) t.show("load pull") # If (u,v) collaborated on the same repository - determined by the owner, name pair, # are added as collaborators. #TODO Better column renaming V = snap.TStrV() V.Add("created_at") Tcollab.Order(V, "", snap.TBool(False), snap.TBool(True)) V.Clr() V.Add("owner") V.Add("name") V.Add("userid") Tcollab.Group(V, "UserRepoId") V.Clr() V.Add("UserRepoId") Tcollab.Unique(V) Tcollab_merge = Tcollab.SelfJoin("owner") Tcollab_merge.SelectAtomic("Tcollab_1.name", "Tcollab_2.name", snap.EQ) Tcollab_merge.SelectAtomic("Tcollab_1.userid", "Tcollab_2.userid", snap.NEQ) # BUGBUG - Commenting this line will mean created_at is not present in Tcollab_merge. # However, the ProjectInPlace will not complain and silently exclude created_at from the # result. This leads to the Index:-1 error in SelectAtomicIntConst on created_at later in the code. Tcollab_merge.ColMin("Tcollab_1.created_at", "Tcollab_2.created_at", "created_at") V = snap.TStrV() V.Add("Tcollab_1.userid") V.Add("Tcollab_2.userid") V.Add("created_at") Tcollab_merge.ProjectInPlace(V) Tcollab_merge.Rename("Tcollab_1.userid", "userid1") Tcollab_merge.Rename("Tcollab_2.userid", "userid2") t.show("merge collab", Tcollab_merge) #testutils.dump(Tcollab_merge, 50) # If (u,v) worked on the same pull request on the same repository, they are added # as (soft) collaborators. V = snap.TStrV() V.Add("created_at") Tpull.Order(V, "", snap.TBool(False), snap.TBool(True)) V.Clr() V.Add("owner") V.Add("name") V.Add("userid") Tpull.Group(V, "UserRepoId") V.Clr() V.Add("UserRepoId") Tpull.Unique(V) Tpull_merge = Tpull.SelfJoin("owner") Tpull_merge.SelectAtomic("Tpull_1.name", "Tpull_2.name", snap.EQ) Tpull_merge.SelectAtomic("Tpull_1.pullid", "Tpull_2.pullid", snap.EQ) Tpull_merge.SelectAtomic("Tpull_1.userid", "Tpull_2.userid", snap.NEQ) Tpull_merge.ColMin("Tpull_1.created_at", "Tpull_2.created_at", "created_at") V = snap.TStrV() V.Add("Tpull_1.userid") V.Add("Tpull_2.userid") V.Add("created_at") Tpull_merge.ProjectInPlace(V) Tpull_merge.Rename("Tpull_1.userid", "userid1") Tpull_merge.Rename("Tpull_2.userid", "userid2") t.show("merge pull", Tpull_merge) # BUGBUG: UnionAll is returning unexpected result at this point #Tmerge = Tcollab_merge.UnionAll(Tpull_merge, "Tmerge") Tmerge = Tpull_merge # Select the base and delta tables from the merged table. Tbase = snap.TTable.New(Tmerge, "Base") Tdelta = snap.TTable.New(Tmerge, "Delta") Tbase.SelectAtomicIntConst("created_at", mid_ticks, snap.LTE) Tdelta.SelectAtomicIntConst("created_at", mid_ticks, snap.GTE) #TODO: Union Tbase with collab and pull to include (userid, owner) edge t.show("collab union") # Convert base table to base graph Gbase = snap.ToNetwork(snap.PNEANet, Tbase, "userid1", "userid2", snap.aaFirst) Gdelta = snap.ToNetwork(snap.PNEANet, Tdelta, "userid1", "userid2", snap.aaFirst) t.show("base graph", Gbase) t.show("delta graph", Gdelta) NITERS = 20 total_preck = 0 print("Userid\tPrec@%d\tAverage Index" % (N_TOP_RECOS)) # Random walk with restarts # BUGBUG: Returns the same id everytime # userid = Gbase.GetRndNId() for i in range(NITERS): # Randomly choose a starting node userid = random.choice([node.GetId() for node in Gbase.Nodes()]) user = Gbase.GetNI(userid) # Perform random walk with restarts on base graph HT = snap.TIntFltH() snap.GetRndWalkRestart_PNEANet(Gbase, ALPHA, userid, HT) HT.SortByDat(False) j = 0 cnt = 0 preck = 0 average_index = -1 # Calculate precision while cnt < N_TOP_RECOS and j < HT.Len(): recoid = HT.GetKey(j) pagerank = HT.GetDat(recoid) #print recoid, pagerank if recoid != userid: # If the edge is not in base graph but is present in delta graph, we made an accurate prediction. if not Gbase.IsEdge(userid, recoid) and Gdelta.IsNode( userid) and Gdelta.IsNode(recoid) and (Gdelta.IsEdge( userid, recoid) or Gdelta.IsEdge(recoid, userid)): preck += 1 cnt += 1 j += 1 # Calculate average index try: node = Gdelta.GetNI(userid) edges = [nid for nid in node.GetOutEdges() ] + [nid for nid in node.GetInEdges()] #print edges #print([HT.GetKeyId(nid) for nid in edges]) index = 0 for nid in edges: index += HT.GetKeyId(nid) average_index = index / len(edges) except: # Node not present in delta graph implies no new edges formed pass total_preck += preck print("%d\t%d\t%f" % (userid, preck, average_index)) #rank = snap.TTable.New("Rank", HT, "User", PAGE_RANK_ATTRIBUTE, context, snap.TBool(True)) print("Average Precision@%d = %f" % (N_TOP_RECOS, total_preck / float(NITERS)))
# Tests the Union operation import sys sys.path.append("../use-cases") import snap import testutils import pdb P1 = snap.TStrTAttrPr("col1", snap.atInt) P2 = snap.TStrTAttrPr("col2", snap.atInt) S = snap.Schema() S.Add(P1) S.Add(P2) Context = snap.TTableContext() T1 = snap.TTable.LoadSS("1", S, "test2.tsv", Context) testutils.dump(T1) V = snap.TStrV() V.Add("col1") T2 = T1.Project(V, "2") testutils.dump(T2) V = snap.TStrV() V.Add("col2") T3 = T1.Project(V, "3") testutils.dump(T3) T3.Rename("col2", "col1") T4 = T2.Union(T3, "4") testutils.dump(T4)
import sys import os import snap #Load Tables context = snap.TTableContext() friends_file = "friends.txt" schema = snap.Schema() schema.Add(snap.TStrTAttrPr("Student1ID", snap.atInt)) schema.Add(snap.TStrTAttrPr("Student2ID", snap.atInt)) friends_table = snap.TTable.LoadSS(schema, friends_file, context, "\t", snap.TBool(False)) enrollments_file = "enrollments.txt" schema = snap.Schema() schema.Add(snap.TStrTAttrPr("StudentID", snap.atInt)) schema.Add(snap.TStrTAttrPr("CourseID", snap.atInt)) enrollments_table = snap.TTable.LoadSS(schema, enrollments_file, context, "\t", snap.TBool(False)) advisors_file = "advisors.txt" schema = snap.Schema() schema.Add(snap.TStrTAttrPr("StudentID", snap.atInt)) schema.Add(snap.TStrTAttrPr("ProfID", snap.atInt)) advisors_table = snap.TTable.LoadSS(schema, advisors_file, context, "\t", snap.TBool(False)) prereqs_file = "prereqs.txt" schema = snap.Schema() schema.Add(snap.TStrTAttrPr("CourseID", snap.atInt)) schema.Add(snap.TStrTAttrPr("PrereqCourseID", snap.atInt)) prereqs_table = snap.TTable.LoadSS(schema, prereqs_file, context, "\t", snap.TBool(False))
import snap edgefilename = "imdb_actor_edges.tsv" nodefilename = "imdb_actors_key.tsv" context = snap.TTableContext() edgeschema = snap.Schema() edgeschema.Add(snap.TStrTAttrPr("srcID", snap.atStr)) edgeschema.Add(snap.TStrTAttrPr("dstID", snap.atStr)) edgeschema.Add(snap.TStrTAttrPr("edgeattr1", snap.atStr)) nodeschema = snap.Schema() nodeschema.Add(snap.TStrTAttrPr("nodeID", snap.atStr)) nodeschema.Add(snap.TStrTAttrPr("name", snap.atStr)) nodeschema.Add(snap.TStrTAttrPr("movies", snap.atStr)) nodeschema.Add(snap.TStrTAttrPr("main_genre", snap.atStr)) nodeschema.Add(snap.TStrTAttrPr("genres", snap.atStr)) edge_table = snap.TTable.LoadSS(edgeschema, edgefilename, context, "\t", snap.TBool(False)) print "edge_rows", edge_table.GetNumValidRows() node_table = snap.TTable.LoadSS(nodeschema, nodefilename, context, "\t", snap.TBool(False)) print "node_rows", node_table.GetNumValidRows() srcattrv = snap.TStrV() srcattrv.Add("edgeattr1") dstattrv = snap.TStrV() dstattrv.Add("edgeattr1")
import Centrality as centrality # A file containing the graph, where each row contains an edge # and each edge is represented with the source and dest node ids, # the edge attributes, and the source and destination node attributes # separated by a tab. edgefilename = "../bellydynamic-data/CollegeMsg.txt" graphName = "CollegeMsg" if __name__ == '__main__': context = snap.TTableContext( ) # When loading strings from different files, it is important to use the same context # so that SNAP knows that the same string has been seen before in another table. schema = snap.Schema() schema.Add(snap.TStrTAttrPr("srcID", snap.atStr)) schema.Add(snap.TStrTAttrPr("dstID", snap.atStr)) schema.Add(snap.TStrTAttrPr("timestamp", snap.atInt)) # schema.Add(snap.TStrTAttrPr("edgeattr2", snap.atStr)) # schema.Add(snap.TStrTAttrPr("srcnodeattr1", snap.atStr)) # schema.Add(snap.TStrTAttrPr("srcnodeattr2", snap.atStr)) # schema.Add(snap.TStrTAttrPr("dstnodeattr1", snap.atStr)) # schema.Add(snap.TStrTAttrPr("dstnodeattr2", snap.atStr)) table = snap.TTable.LoadSS(schema, edgefilename, context, " ", snap.TBool(False)) # In this example, we add both edge attributes to the network, # but only one src node attribute, and no dst node attributes. edgeattrv = snap.TStrV() edgeattrv.Add("timestamp")
config = ConfigParser.ConfigParser() config.readfp(open(args.config_file)) if args.loglevel: numeric_level = getattr(logging, args.loglevel.upper(), None) logging.basicConfig(level=numeric_level) context = snap.TTableContext() # Construct the graph logging.info('Building Multi-Modal Network') Graph = snap.TMMNet.New() # Loading Modes try: chemical_mode_file = config.get('Modes', 'Chemical') cmschema = snap.Schema() cmschema.Add(snap.TStrTAttrPr("ChemicalId", snap.atStr)) cmschema.Add(snap.TStrTAttrPr("datasetId", snap.atStr)) chemical_mode = snap.TTable.LoadSS(cmschema, chemical_mode_file, context, "\t", snap.TBool(False)) logging.info('Done loading Chemical Mode') snap.LoadModeNetToNet(Graph, "Chemical", chemical_mode, "ChemicalId", snap.TStr64V()) except ConfigParser.NoOptionError: logging.info('Skipping Chemical Mode') try: function_mode_file = config.get('Modes', 'Function') fmschema = snap.Schema() fmschema.Add(snap.TStrTAttrPr("FunctionId", snap.atStr)) fmschema.Add(snap.TStrTAttrPr("datasetId", snap.atStr)) function_mode = snap.TTable.LoadSS(fmschema, function_mode_file, context, "\t", snap.TBool(False)) logging.info('Done loading Function Mode') snap.LoadModeNetToNet(Graph, "Function", function_mode, "FunctionId", snap.TStr64V())
if not dstdir is None: try: os.makedirs(dstdir) except OSError: pass context = snap.TTableContext() t = testutils.Timer(ENABLE_TIMER) # a) Compute authority scores # Load posts # >>> t1 = ringo.load('posts.tsv') S = snap.Schema() S.Add(snap.TStrTAttrPr("PostId", snap.atInt)) S.Add(snap.TStrTAttrPr("UserId", snap.atInt)) S.Add(snap.TStrTAttrPr("AnswerId", snap.atInt)) t1 = snap.TTable.LoadSS("t1", S, os.path.join(srcdir, POSTS_FILE), context, '\t', snap.TBool(False)) t.show("load posts", t1) # Load tags # >>> t2 = ringo.load('tags.tsv') #S = snap.Schema() #S.Add(snap.TStrTAttrPr("PostId", snap.atInt)) #S.Add(snap.TStrTAttrPr("Tag", snap.atStr)) #t2 = snap.TTable.LoadSS("t2", S, os.path.join(srcdir, TAGS_FILE), context, '\t', snap.TBool(False)) #t.show("load tags", t2) # Select
import snap #tsv_file = "data-table1.txt" tsv_file = "data/data-table.txt" print("writing table") context = snap.TTableContext() schema = snap.Schema() schema.Add(snap.TStrTAttrPr("srcID", snap.atInt)) schema.Add(snap.TStrTAttrPr("dstID", snap.atInt)) schema.Add(snap.TStrTAttrPr("distance", snap.atFlt)) #schema.Add(snap.TStrTAttrPr("distance", snap.atStr)) table = snap.TTable.LoadSS(schema, tsv_file, context, "\t", snap.TBool(False)) #FOut = snap.TFOut(table_file) #table.Save(FOut) #FOut.Flush() tmp = table.BegRI() while tmp < table.EndRI(): #print(str(tmp)) #print(tmp.GetFltAttr('distance')) print(tmp.GetIntAttr('srcID'), tmp.GetIntAttr('dstID'), tmp.GetFltAttr('distance')) #print(tmp.GetIntAttr('srcID'), tmp.GetIntAttr('dstID'), tmp.GetStrAttr('distance')) tmp.Next()
import testutils if __name__ == '__main__': if len(sys.argv) < 2: print """Usage: """ + sys.argv[0] + """ <srcfile> srcfile: posts.tsv file from StackOverflow dataset""" sys.exit(1) srcfile = sys.argv[1] context = snap.TTableContext() t = testutils.Timer() r = testutils.Resource() schema = snap.Schema() schema.Add(snap.TStrTAttrPr("Id", snap.atInt)) schema.Add(snap.TStrTAttrPr("OwnerUserId", snap.atInt)) schema.Add(snap.TStrTAttrPr("AcceptedAnswerId", snap.atInt)) schema.Add(snap.TStrTAttrPr("CreationDate", snap.atInt)) schema.Add(snap.TStrTAttrPr("Score", snap.atInt)) table = snap.TTable.LoadSS("1", schema, srcfile, context, "\t", snap.TBool(False)) t.show("load text", table) r.show("__loadtext__") table = table.IsNextK("CreationDate", 1, "OwnerUserId") t.show("isnextk", table) r.show("__isnextk__")
exit(1) postsFile = sys.argv[1] tagsFile = sys.argv[2] commentsFile = sys.argv[3] destFile = sys.argv[4] if len(sys.argv) >= 4 else None context = snap.TTableContext() t = testutils.Timer(ENABLE_TIMER) # a) Compute authority scores # Load posts # >>> posts = ringo.load('posts.tsv') S = snap.Schema() S.Add(snap.TStrTAttrPr("PostId", snap.atInt)) S.Add(snap.TStrTAttrPr("UserId", snap.atInt)) S.Add(snap.TStrTAttrPr("AcceptedAnswerId", snap.atInt)) S.Add(snap.TStrTAttrPr("CreationDate", snap.atStr)) posts = snap.TTable.LoadSS("t1", S, postsFile, context, '\t', snap.TBool(False)) t.show("load posts", posts) # Load tags # >>> tags = ringo.load('tags.tsv') S = snap.Schema() S.Add(snap.TStrTAttrPr("PostId", snap.atInt)) S.Add(snap.TStrTAttrPr("Tag", snap.atStr)) tags = snap.TTable.LoadSS("t2", S, tagsFile, context, '\t', snap.TBool(False)) t.show("load tags", tags)
yearFile = sys.argv[2] dstDir = sys.argv[3] if len(sys.argv) >= 4 else None if not dstDir is None: try: os.makedirs(dstDir) except OSError: pass context = snap.TTableContext() t = testutils.Timer(ENABLE_TIMER) # Load data # >>> authors = ringo.load('authors.tsv') S = snap.Schema() S.Add(snap.TStrTAttrPr("Key", snap.atStr)) S.Add(snap.TStrTAttrPr("Author", snap.atStr)) authors = snap.TTable.LoadSS("1", S, authorFile, context, '\t', snap.TBool(False)) t.show("load authors table", authors) # >>> year = ringo.load('year.tsv') S = snap.Schema() S.Add(snap.TStrTAttrPr("Key", snap.atStr)) S.Add(snap.TStrTAttrPr("Year", snap.atInt)) year = snap.TTable.LoadSS("2", S, yearFile, context, '\t', snap.TBool(False)) t.show("load year table", year) # Select # >>> year.select('Year >= 2005') year.SelectAtomicIntConst("Year", 2005, snap.GTE) t.show("select", year)
import snap graphfilename = "C:\Python27\HW1\wiki-vote.txt" schema = snap.Schema() context = snap.TTableContext() schema.Add(snap.TStrTAttrPr("srcID", snap.atStr)) schema.Add(snap.TStrTAttrPr("dstID", snap.atStr)) sample_table = snap.TTable.LoadSS(schema, graphfilename, context, "\t", snap.TBool(False)) # graph will be an object of type snap.PNGraph graph = snap.ToGraph(snap.PNGraph, sample_table, "srcID", "dstID", snap.aaFirst) #no of nodes Count = snap.CntNonZNodes(graph) print "Count of nodes with degree greater than 0 is %d" % Count #no of edges Count = snap.CntOutDegNodes(graph, 0) print "Count of nodes with out-degree 0 is %d" % Count #no of nodes with zero in-degree Count = snap.CntInDegNodes(graph, 0) print "Count of nodes with in-degree 0 is %d" % Count #no of directed edges Count = snap.CntUniqDirEdges(graph) print "Count of directed edges is %d" % Count #no of undirected edges Count = snap.CntUniqUndirEdges(graph) print "Count of undirected edges is %d" % Count #no of self edges Count = snap.CntSelfEdges(graph) print "Count of self edges is %d" % Count
prnodes = pd.read_csv(prpath + str(pid) + '_pr_nodes_reduced.csv') predges = pd.read_csv(prpath + str(pid) + '_pr_edges_reduced.csv')[['source', 'target']] # Merge in a single nodes and edges files usernodes = pd.DataFrame(list(issuesnodes['id']) + list(gitpnodes['username']) + list(prnodes['id']), columns=['username']).drop_duplicates() useredges = pd.concat([r2, issuesedges, predges]).drop_duplicates() usernodes.to_csv('temp/mergednodes.csv', index=None) useredges.to_csv('temp/mergededges.csv', index=None) # Build graph from temp files using SNAP library context = snap.TTableContext() e_schema = snap.Schema() e_schema.Add(snap.TStrTAttrPr("source", snap.atStr)) e_schema.Add(snap.TStrTAttrPr("target", snap.atStr)) n_schema = snap.Schema() n_schema.Add(snap.TStrTAttrPr("username", snap.atStr)) edgetable = snap.TTable.LoadSS(e_schema, 'temp/mergededges.csv', context, ",", snap.TBool(True)) nodetable = snap.TTable.LoadSS(n_schema, 'temp/mergednodes.csv', context, ",", snap.TBool(True)) edgeattrv = snap.TStrV() nodeattrv = snap.TStrV() nodeattrv.Add("username") net = snap.ToNetwork(snap.PNEANet, edgetable, "source", "target", edgeattrv, nodetable, "username", nodeattrv,
if __name__ == '__main__': if len(sys.argv) < 3: print "Usage: " + sys.argv[0] + " <srcfile1> <srcfile2>" sys.exit(1) srcfile1 = sys.argv[1] srcfile2 = sys.argv[2] context = snap.TTableContext() t = testutils.Timer() r = testutils.Resource() FIn = snap.TFIn(srcfile1) t1 = snap.TTable.Load(FIn, context) t.show("load bin", t1) r.show("__loadbin__") schema = snap.Schema() schema.Add(snap.TStrTAttrPr("Index", snap.atInt)) t2 = snap.TTable.LoadSS(schema, srcfile2, context, "\t", snap.TBool(False)) t.show("load text", t2) r.show("__loadtext__") t3 = t1.Join("Src", t2, "Index") t.show("join", t3) r.show("__join__")
import sys sys.path.append("../utils") import snap import testutils if __name__ == '__main__': srcfile = '/dfs/ilfs2/0/ringo/StackOverflow_joined/debug.tsv' context = snap.TTableContext() print "Loading table..." schema = snap.Schema() schema.Add(snap.TStrTAttrPr("Val", snap.atInt)) table = snap.TTable.LoadSS("1", schema, srcfile, context, "\t", snap.TBool(False)) print "Selecting rows with val == 0 in place..." table.SelectAtomicIntConst("Val", 0, snap.EQ) print "Number of rows in result: %d" % table.GetNumValidRows() print "10 first rows of table:" testutils.dump(table, 10)