def read_graph(filename):
    if filename.endswith('.anet'):
        graph = load_anet_graph(filename)
    else:
        graph = Graph.Read_Edgelist(filename)

    return graph
Exemplo n.º 2
0
def main():
    #Check input parameter alpha
    if len(sys.argv)!=2:
        print("Enter value of alpha as parameter")
        return
    alpha = float(sys.argv[1])
    #Convert for preferred output to file
    alph = 0
    if alpha == 0.5:
        alph = 5
    elif alpha == 0.0:
        alph = 0
    elif alpha == 1.0:
        alph = 1
    
    #Input Graph
    graph = Graph.Read_Edgelist('data/fb_caltech_small_edgelist.txt')
    attr = pd.read_csv('data/fb_caltech_small_attrlist.csv')

    #Initialize weights and attributes
    graph.es['weight'] = 1
    attr_names = attr.keys()
    for x in attr.iterrows():
        for y in range(len(x[1])):
            graph.vs[x[0]][attr_names[y]] = x[1][y]

    #Similarity Matrix
    sim_matrix = compute_similarity_matrix(graph)

    #Phase 1
    community_list = phase1(graph,alpha,sim_matrix)
    print('Communities after Phase 1:')
    print(len(set(community_list)),"Communities")
    phase1_output = ''
    for x in VertexClustering(graph,community_list):
        if x:
            phase1_output += ','.join(map(str,x))
            phase1_output += "\n"
    phase1_output = phase1_output[:-2]

    #Phase 2
    community_list, mapped_clusters = phase2(graph,community_list,alpha,sim_matrix)
    print(mapped_clusters)
    print('Communities after Phase 2:')
    print(len(set(community_list)),"Communities")
    phase2_output = ''
    for cluster in VertexClustering(graph,community_list):
        if cluster:
            original_vertices = []
            for vertex in cluster:
                original_vertices.extend(mapped_clusters[vertex])
            phase2_output += ','.join(map(str,original_vertices))
            phase2_output += '\n'
            print(cluster)
    phase2_output = phase2_output[:-2]

    file = open("communities_"+str(alph)+".txt", 'w+')
    file.write(phase2_output)
    file.close()
    return
Exemplo n.º 3
0
 def import_graph(graph_loc):
     if isinstance(graph_loc, Graph):
         return graph_loc
     extension = graph_loc.split('.')[-1]
     if extension == 'edgelist':
         return Graph.Read_Edgelist(graph_loc)
     elif extension == 'gml':
         return Graph.Read_GML(graph_loc)
Exemplo n.º 4
0
 def import_graph(self, graph_loc):
     extension = graph_loc.split('.')[-1]
     if extension == 'edgelist':
         return Graph.Read_Edgelist(graph_loc)
     elif extension == 'gml':
         return Graph.Read_GML(graph_loc)
     elif extension == 'graphml':
         return Graph.Read(graph_loc)
     elif extension == 'dl':
         with open(graph_loc, 'r') as to_read:
             data_reached = False
             edge_list = []
             for line in to_read:
                 if data_reached:
                     edge = line.split(' ')[0:2]
                     if edge in edge_list or [edge[1], edge[0]
                                              ] in edge_list:
                         continue
                     edge_list.append(edge)
                 elif line == 'data:\n':
                     data_reached = True
         return Graph.TupleList(edge_list, directed=False)
                                   csv_options={
                                       "delimiter": " ",
                                       "quotechar": '"'
                                   })
        block = gt.minimize_nested_blockmodel_dl(
            g,
            B_min=G.graph['number_communities'],
            B_max=G.graph['number_communities'])
        num_block = block.levels[0].get_B()
        block = block.levels[0].get_blocks()
        partition = [0 for i in range(G.number_of_nodes())]
        for i in range(G.number_of_nodes()):  #for every node
            partition[i] = block[i]
        zsbm.append(ami(partition, G.graph['labels_communities']))

        igraph = ig.Read_Edgelist(G.graph['edgelist'])
        part = igraph.community_infomap()
        partition = [0 for i in range(G.number_of_nodes())]
        for i in range(G.number_of_nodes()):
            for j in range(len(part)):
                if i in part[j]:
                    partition[i] = j
        zinfomap.append(ami(partition, G.graph['labels_communities']))

        Y = community.best_partition(G.to_undirected(
        ))  #https://perso.crans.org/aynaud/communities/api.html
        #uses Louvain heuristices
        partition = [0 for i in range(G.number_of_nodes())]
        for k in range(G.number_of_nodes()):
            partition[k] = Y[k]
        zmaxmod.append(ami(partition, G.graph['labels_communities']))
Exemplo n.º 6
0
#
# Actual script
#

# 1
g_names = [
    'corticalCat', 'corticalHuman', 'corticalMonkey', 'usAirports', 'euroroad',
    'hamster'
]

g_giants = []
for g_name in g_names:
    g_path = path.join('networks', g_name + '.txt')
    with open(g_path, 'rb') as g_file:
        giant = Graph.Read_Edgelist(g_file, directed=False)
        giant = giant.components().giant()
        giant.simplify()
        g_giants.append(giant)

print('\nAssortativity')
print('=============\n')
print(g_names[0], '= {:.4f}'.format(g_giants[0].assortativity_degree()))
print(g_names[1], '= {:.4f}'.format(g_giants[1].assortativity_degree()))
print(g_names[2], '= {:.4f}'.format(g_giants[2].assortativity_degree()))
print(g_names[3], '= {:.4f}'.format(g_giants[3].assortativity_degree()))
print(g_names[4], '= {:.4f}'.format(g_giants[4].assortativity_degree()))
print(g_names[5], '= {:.4f}'.format(g_giants[5].assortativity_degree()))
print('done')

# 2
direct = raw_input('Input the forder under which the files will be analyzed\n')
if not os.path.exists(direct):
    print "Wrong" + direct
if not os.path.exists(direct + "/output_others_code"):
    os.mkdir(direct + "/output_others_code")
time_cost = []
perm = []
modu = []
community_number = []
max_community_size = []
average_community_size = []
for filename in os.listdir(direct):
    if os.path.isfile(direct + "/" + filename):
        print "Processing:" + direct + "/" + filename + "\n"
        start = datetime.datetime.now()
        g = Graph.Read_Edgelist(direct + "/" + filename, directed=False)
        "community = Graph.community_spinglass(g)"
        "dendrogram = Graph.community_fastgreedy(g)"
        "community = dendrogram.as_clustering()"
        "community = Graph.community_infomap(g)"
        community = Graph.community_label_propagation(g)
        "community = Graph.community_multilevel(g)"
        end = datetime.datetime.now()
        t = end - start
        time_cost.append(t.seconds)
        "TODO:perm"
        perm.append(permanence_igraph.permanence_igraph(g, community))
        modu.append(g.modularity(community))
        community_number.append(len(community))
        max_community = 0
        average_community = 0
Exemplo n.º 8
0
    parser.add_argument("-n", "--networkid", required=True, type=int,
    help="0 --> model1\t4 --> real1\n1 --> model2\t5 --> real2\n2 --> model3\t6 --> real3\n3 --> model4\t7 --> real4\n")
    parser.add_argument("-o", "--outfile", required=True, type=str)
    parser.add_argument("-b", "--batchsize", default=500, type=int)
    if len(sys.argv) < 2:
        parser.print_help()
        sys.exit(1)
    args = parser.parse_args()

    with open(args.outfile,"w") as outfile:
        fm = networkname[args.networkid]
        rawfile = open(databasepath + fm + '.csv','r')
        els = rawfile.read().replace(',', ' ')
        nfile = open(fm + '.csv.space', 'w')
        nfile.write(els)
        rawfile.close()
        nfile.close()
        print("Formatting Done.")

        elfile = open(fm + '.csv.space', 'r')
        print("Reading Graph : " + fm)
        rgraph = Graph.Read_Edgelist(elfile, directed=False)
        elfile.close()
        for i in range(len(rgraph.vs)):
            rgraph.vs[i]["name"] = i

        print("Greedy PageRank...")
        s_index = greedyPageRank(rgraph,args.batchsize)
        fstr = fm + "," + ",".join(map(str,s_index)) + "\n"
        outfile.write(fstr)
# clean up
try:
    os.remove(datadir + "D_" + network + ".hdf5")
except:
    print("No clean up necessary.")

nland = int(sys.argv[2])
datatype = 'i4' # 4 byte (32 bit integer)

 # if graph_driver == 'igraph':
# load from igraph
print("Loading data...")
start = T.time()
edgelist = datadir + "com-" + network + ".ungraph.remapped.txt"
# G2 = Graph.Read(edgelist,format="edgelist", directed=False)
G2 = Graph.Read_Edgelist(edgelist, directed=False)
# G2 = Graph.Load(datadir + "igraph_graph_" + network,format="graphmlz")
print("Done Loading! %.3f" %(T.time()-start))
# G2 = Graph.Load("igraph_edgelist",format="edgelist")
# G2 = Graph.as_undirected(G2) # avoid inf when calculating distances
# nodes = G2.vs.indices
nnodes = G2.vcount() #len(nodes)
# print("Nodes ids are in order = ", np.amax(nodes) + 1 == G2.vcount())

# get node degrees for each index
print("Get degrees for each node...")
degrees = G2.vs.degree()
print("Creating node degree map (after remapping)...")
n2d = np.zeros((nnodes,1),dtype=datatype)
# n2d[:,0] = nodes
n2d[:,0] = degrees
Exemplo n.º 10
0
"""
	Simple script for builiding batches of test data from simple settings text files.
"""
from igraph import Graph
import os
import subprocess

file = open("girvan_kout", "r")
FNULL = open(os.devnull, 'w')

for line in file:
    print line
    if not line.startswith('#'):
        params = line.split(',')
        if not os.path.exists(params[0]):
            os.makedirs(params[0])
        print line
        for i in xrange(int(params[2])):
            filename = params[0].strip() + params[1].strip() + "_" + str(
                i) + ".gml"
            print(params[3])
            subprocess.call(params[3], shell=True)
            g = Graph.Read_Edgelist("network.dat", directed=False)
            g.simplify()
            # # write to gml
            g.write_gml(filename)
            # move communities.dat to filename.coms
            print(filename)
            os.rename("community.dat", filename + ".coms")
Exemplo n.º 11
0
'''
Created on 2014/9/29

@author: free
'''
from igraph import Graph
g = Graph.Read_Edgelist("../as19971108_only_edge.txt", directed=False)
community_1 = Graph.community_infomap(g)
print g.modularity(community_1)

#program executing from here

length_of_arguments = len(sys.argv)  #checking length of arguments in sys
if (length_of_arguments < 2):
    print('please enter the alpha value')
    sys.exit(1)

alphavalue = sys.argv[1]  #picking the first argument value i.e the alpha value
alphavalue = float(alphavalue)  #converting alpha value to float
#print(alphavalue)

# reads the graph as a edgelist from the given edge list file -> convertes as vertex -> edge values
graphobtained = Graph.Read_Edgelist(
    'data/fb_caltech_small_edgelist.txt', directed=False
)  #[Reference Link : #https://igraph.org/c/doc/igraph-Foreign.html}

#reading a dataframe of the attributes for each node.
attributes = pd.read_csv(
    'data/fb_caltech_small_attrlist.csv'
)  #[Reference Link : https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html]

#to set all the vertex attributes

iterator = 0
while (iterator < 65):  #65 since we have 65 vertices
    graphobtained.vs[attributes.keys()[iterator]] = attributes[attributes.keys(
    )[iterator]]  #using vertex sqeuence to set all the attributes for the vertex #[Reference link : https://igraph.org/python/doc/igraph.VertexSeq-class.html]
    iterator = iterator + 1
Exemplo n.º 13
0
 def read_data(self):
     """Get the data"""
     self.g = Graph.Read_Edgelist("data/fb_caltech_small_edgelist.txt")
     self.g = self.g.as_undirected()
     self.df = pd.read_csv("data/fb_caltech_small_attrlist.csv")
     self.preprocessing()
Exemplo n.º 14
0
def createGraph():
    attributes = pd.read_csv('data/fb_caltech_small_attrlist.csv').as_matrix()
    graph = Graph.Read_Edgelist('data/fb_caltech_small_edgelist.txt',
                                directed=False)
    return attributes, graph