Ejemplo n.º 1
0
def main():
    random.seed(13)  # the seed can affect affect the search path
    gfile = sys.argv[1]  # data graph file given as GraphML
    qfiles = sys.argv[
        2:]  # list of graphs (GraphML) to query in the data graph

    print("---- Graph: %s ----" % (gfile))

    st = time.time()
    G = igraph.load(gfile, format="graphml")
    load_time = time.time() - st

    print("---- times ----")
    print("load time: %.2f" % load_time)

    total_time = 0.0
    for qfile in qfiles:
        print("---- Query: %s ----" % (qfile))
        st = time.time()
        Q = igraph.load(qfile, format="graphml")
        loadQ_time = time.time() - st

        iso_time, num_matches = vf2(G, Q)

        query_time = loadQ_time + iso_time

        print("query time : %.2f" % query_time)
        print("num matches: %d" % (num_matches))

        total_time += query_time

    print("total query time: %.2f" % (total_time))
    print("total time: %.2f" % (total_time + load_time))
    print("\n")
Ejemplo n.º 2
0
    def process_file(self, filename):
        """Loads a graph from the given file, runs the clustering
        algorithm on it and prints the clusters to the standard
        output."""
        self.log.info("Processing %s..." % filename)

        _, ext = os.path.splitext(filename)
        if ext == ".txt":
            graph = load(filename, format=self.options.format, directed=False)
        else:
            graph = load(filename, format=self.options.format)

        # If the graph has weights and we want to ignore them, delete them
        if self.options.no_weights and "weight" in graph.edge_attributes():
            del graph.es["weight"]

        # If the graph is directed, we have to make it undirecteed
        if graph.is_directed():
            self.log.warning("Converting directed graph to undirected.")
            if igraph_version < "0.6":
                graph.to_undirected(collapse=False)
                self.log.warning("Mutual edges will be collapsed into a single edge.")
                self.log.warning("Weights will be lost!")
            else:
                graph.to_undirected(combine_edges="sum")

        # Sanity checks
        if not graph.is_simple():
            self.log.warning("Removing self-loops and collapsing multiple edges...")
            if igraph_version < "0.6":
                graph.simplify(reduce_attributes="sum")
            else:
                graph.simplify(combine_edges="sum")

        # Set up the "name" attribute properly
        if "label" in graph.vertex_attributes():
            graph.vs["name"] = graph.vs["label"]
            del graph.vs["label"]
        elif "name" not in graph.vertex_attributes():
            graph.vs["name"] = [str(i) for i in xrange(graph.vcount())]

        # Run the algorithm, get the result generator
        self.log.info("Calculating clusters, please wait...")
        algorithm = HLC(graph, self.options.min_size)
        results = algorithm.run(self.options.threshold)

        # Print the optimal threshold if we determined it automatically
        if self.options.threshold is None:
            self.log.info("Threshold = %.6f" % algorithm.last_threshold)
            self.log.info("D = %.6f" % algorithm.last_partition_density)

        # Print the results
        if self.options.output:
            outfile = open(self.options.output, "w")
            self.log.info("Saving results to %s..." % self.options.output)
        else:
            outfile = sys.stdout
        for community in results:
            print("\t".join(graph.vs[community]["name"]), file=outfile)
Ejemplo n.º 3
0
def main():

    G = igraph.load('/Users/ytesfaye/tmp/GRAPHS/flights.graphml')
    #snap_home, filename = setup(G)

    vc = cesna(G)
    print(vc)
Ejemplo n.º 4
0
def __prepare__(data_dir):
    """
    TEMPLATE COMMENT: prepare the data into graphml format.
    """

    vertex_filename = os.path.join(data_dir, VERTEX_DATA_FILE)
    edge_filename = os.path.join(data_dir, EDGE_DATA_FILE)

    g = igraph.load(edge_filename)

    vertex_file = open(vertex_filename)
    reader = csv.DictReader(vertex_file)

    for case in reader:
        caseid = int(case['caseid'])
        v = g.vs[caseid]

        v['caseid'] = case['caseid']
        v['usid'] = case['usid']
        v['parties'] = case['parties']
        v['year'] = case['year']

    vertex_file.close()

    # Case IDs are 1-indexed, so we delete the 0th vertex as it was extraneous.
    g.delete_vertices([0])

    graph_file = os.path.join(data_dir, GRAPH_NAME + GRAPH_TYPE)
    g.write_graphml(graph_file)
Ejemplo n.º 5
0
def load_data():
    start = time.time()
    try:
        print("Loading data from /data pickles and hfd5 adj matrices")
        f = h5py.File('data/cosponsorship_data.hdf5', 'r')
        for chamber in ['house', 'senate']:
            for congress in SUPPORTED_CONGRESSES:
                adj_matrix_lookup[(chamber, congress)] = np.asarray(f[chamber + str(congress)])

                igraph_graph = igraph.load("data/" + chamber + str(congress) + "_igraph.pickle", format="pickle")
                igraph_graph_lookup[(chamber, congress, False)] = igraph_graph

                nx_graph = nx.read_gpickle("data/" + chamber + str(congress) + "_nx.pickle")
                nx_graph_lookup[(chamber, congress, False)] = nx_graph
    except IOError as e:
        print("Loading data from cosponsorship files")
        f = h5py.File("data/cosponsorship_data.hdf5", "w")
        for chamber in ['house', 'senate']:
            for congress in SUPPORTED_CONGRESSES:
                print("Starting %s %s" % (str(congress), chamber))
                adj_matrix = load_adjacency_matrices(congress, chamber)
                data = f.create_dataset(chamber + str(congress), adj_matrix.shape, dtype='f')
                data[0: len(data)] = adj_matrix

                # igraph
                get_cosponsorship_graph(congress, chamber, False).save("data/" + chamber + str(congress) + "_igraph.pickle", "pickle")
                # networkx
                nx.write_gpickle(get_cosponsorship_graph_nx(congress, chamber, False), "data/" + chamber + str(congress) + "_nx.pickle")

                print("Done with %s %s" % (str(congress), chamber))
    print("Data loaded in %d seconds" % (time.time() - start))
Ejemplo n.º 6
0
def __prepare__(data_dir):
    """
    TEMPLATE COMMENT: prepare the data into graphml format.
    """

    vertex_filename = os.path.join(data_dir, VERTEX_DATA_FILE)
    edge_filename = os.path.join(data_dir, EDGE_DATA_FILE)

    g = igraph.load(edge_filename)

    vertex_file = open(vertex_filename)
    reader = csv.DictReader(vertex_file)

    for case in reader:
        caseid = int(case['caseid'])
        v = g.vs[caseid]

        v['caseid']  = case['caseid']
        v['usid']    = case['usid']
        v['parties'] = case['parties']
        v['year']    = case['year']

    vertex_file.close()

    # Case IDs are 1-indexed, so we delete the 0th vertex as it was extraneous.
    g.delete_vertices([0])

    graph_file = os.path.join(data_dir, GRAPH_NAME + GRAPH_TYPE)
    g.write_graphml(graph_file)
Ejemplo n.º 7
0
def load(fn):
    """
    Load a graph from file with name fn.

    Returns a sonet.graph.Graph
    """
    return Graph(ig.load(fn))
Ejemplo n.º 8
0
def run_graph(graph_file, output_file, neighborhood, flag = None):
    print "==== New graph ===="
    print "Input:", graph_file
    print "Output:", output_file
    print "Neighborhood:", neighborhood
    print ""

    print "Loading graph"
    print "Start time: %s" % (datetime.now())
    g = ig.load(graph_file)
    print "End time: %s" % (datetime.now())

    gflag = None
    if flag == None:
        print "Creating flag complex"
        print "Start time: %s" % (datetime.now())
        gedges = list(g.get_edgelist())
        gedgelist = map(list,gedges)
        gflag = sh.flag(gedgelist,4)
        print "End time: %s" % (datetime.now())
    else:
        print "Using passed in flag complex"
        gflag = flag

    print "Finding the local homology"
    print "Start time: %s" % (datetime.now())
    for n in range(neighborhood+1):
        graph = graph_file.split("/")[-1]
        ofile = open(output_file, 'w')
        ofile.write("Local Homology (neighborhood=%d) of flag complex generated by %s\n" % (n, graph))
        locHomTable(gflag, {}, n, ofile)
    print "End time: %s" % (datetime.now())
    print "\n"

    return gflag
Ejemplo n.º 9
0
def networking(nodes, edges):
    #creates graph
    net = nx.Graph()

    #adds nodes to graph
    for n in nodes:
        net.add_node(str(n.id1))

    for s1 in edges.keys():
        for s2 in edges[s1].keys():
            #net.add_edge(int(s1.id1),int(s2.id1), length=edges[s1][s2]["cosine"])
            net.add_edge(str(s1.id1),
                         str(s2.id1),
                         weight=edges[s1][s2]["cosine"])

    #prints number of nodes in net
    print(net.number_of_nodes())

    #exports .graphml and exports .svg to browser
    nx.write_graphml(net, "graph.graphml")
    nx.write_gml(net, "graph.gml")
    graph = igraph.load("graph.gml")
    layout = graph.layout("fr")
    igraph.Graph.write_svg(graph,
                           fname="static/graph_image.svg",
                           layout=layout,
                           width=4000,
                           height=4000)

    return net
Ejemplo n.º 10
0
def main():

    G = igraph.load('/Users/ytesfaye/tmp/GRAPHS/flights.graphml')
    #snap_home, filename = setup(G)

    vc = cesna(G)
    print(vc)
Ejemplo n.º 11
0
def load(fn):
    """
    Load a graph from file with name fn.

    Returns a sonet.graph.Graph
    """
    return Graph(ig.load(fn))
Ejemplo n.º 12
0
def _draw(args):
    import igraph
    g = igraph.load('_'.join(args.screen_name) + EDG_EXT)
    sys.stdout.write('%s Handles\n%s Follow Relationships\n' %
                     (g.vcount(), g.ecount()))
    sys.stdout.write('Avg Shortest Path = %.6f\n' % g.average_path_length())
    sys.stdout.write('In-Degree Distribution mean = %.6f, sd = %.6f\n' %
                     (g.degree_distribution(mode=igraph.IN).mean,
                      g.degree_distribution(mode=igraph.IN).sd))
    sys.stdout.write('Clustering Coefficient = %.6f\n' %
                     g.transitivity_undirected())
    sys.stdout.write('Degree Assortativity = %.6f\n' %
                     g.assortativity_degree())
    width = height = int(750 + g.vcount() * 4.73 - g.vcount()**2 * 1.55e-3)
    comp = g.clusters(igraph.STRONG if args.strong else igraph.WEAK)
    sys.stdout.write('Cluster Modularity = %.6f\n' % comp.modularity)
    gc = comp.giant()
    gc_size = gc.vcount()
    sys.stdout.write('Fraction Handles in Giant Component = %.2f%%\n' %
                     (100.0 * gc_size / g.vcount()))
    if len(args.screen_name) > 1 and not args.strong:
        RGB_dict = dict(zip(SHAPES, _palette(len(SHAPES))))
        for v in g.vs:
            v['color'] = RGB_dict[v['shape']]
    else:
        for v in g.vs:
            v['color'] = (.8, .8, .8)
        if gc.ecount() > 0:
            sys.stdout.write('GC Diameter %i (unweighted)\n' % gc.diameter())
            gc_vert = comp[comp.sizes().index(gc_size)]
            cim = gc.community_infomap(edge_weights=gc.es['weight'],
                                       vertex_weights=gc.vs['lfr'])
            RGB_tuples = _palette(max(cim.membership) + 1)
            for v in g.vs:
                gc_v = gc.vs.select(id_eq=v['id'])
                if len(gc_v) > 0:
                    v['color'] = RGB_tuples[cim.membership[gc_v[0].index]]
            g.es['color'] = [g.vs['color'][e.target] for e in g.es]
    E = max(g.es['weight'])
    g.es['width'] = [(not args.transparent) * max(1, 10 * e['weight'] / E)
                     for e in g.es]
    g.es['arrow_size'] = [max(1, 3 * e['weight'] / E) for e in g.es]
    ID = max(g.vs.indegree()) or 1
    g.vs['label_size'] = [max(12, 36 * v.indegree() / ID) for v in g.vs]
    for v in g.vs:
        if v['type'] == 'mention':
            v['label_color'] = 'blue'
    for v in g.vs:
        filename = '%s/%s%s' % (FDAT_DIR, v['user_id'], FDAT_EXT)
        if not os.path.isfile(filename):
            v['color'] = (0, 0, 0)
    filename = '_'.join(args.screen_name) + \
      ('_s' if args.strong else '_w') + \
      ('_t' if args.transparent else '') + \
      '.' + args.format
    igraph.plot(g,
                filename,
                layout=g.layout(args.layout),
                bbox=(width, height),
                margin=50)
Ejemplo n.º 13
0
def make_navigationoptions(args):
    if not args.prefix:
        args.prefix = os.path.splitext(os.path.basename(args.dataset))[0]

    dataset = np.loadtxt(args.dataset)

    graph = ig.load(args.graph)

    vd = cPickle.load(open(args.clustering, 'rb'))
    cl = vd.as_clustering()

    index = FLANN()
    index.load_index(args.index, dataset)

    # Find adjacent communities
    k = args.nn
    knn, dists = index.nn_index(dataset, k+1)
    knn = knn[:, 1:]
    dists = dists[:, 1:]
    membership = np.array(cl.membership)

    adjacencies = {}
    for community in cl:
        for node in community:
            mode_label = mode(membership[knn[node]])
            mode_freq = mode_label[1][0]
            mode_label = mode_label[0][0]

            if mode_label != membership[node] and sum(graph.are_connected(node, nid) for nid in knn[node]) > (k/2):
                adjacencies.setdefault(membership[node], []).append(int(mode_label))

    # Make options
    cPickle.dump([options.NavigationOption(source, target, membership, args.index, args.dataset, args.nn)
        for source, targets in adjacencies.iteritems()
            for target in set(targets)], open(args.prefix + '-navigation-options.pl', 'wb'))
Ejemplo n.º 14
0
 def __prepare__(self):
     """
     """
     #convert gml to graphml
     G = igraph.load( os.path.join(self.raw_data_path, "football.gml"))
     #must delete the id attribute since graphml uses it as a reserved attribute and gml does not
     del G.vs['id']
     G.write_graphml(self.graph_path)
Ejemplo n.º 15
0
 def __prepare__(self):
     """
     """
     #convert gml to graphml
     G = igraph.load(os.path.join(self.raw_data_path, "football.gml"))
     #must delete the id attribute since graphml uses it as a reserved attribute and gml does not
     del G.vs['id']
     G.write_graphml(self.graph_path)
Ejemplo n.º 16
0
def main(argv=None):
	if argv==None:
		argv = sys.argv
	if len(argv) < 5 or len(argv) > 6:
		print "Usage: %s songGraph.pickle exclusionList NumNoms src [commStruct]"% argv[0]
		return
	global GIVE
	exclusionRaw = open(argv[2]).readlines()
	exclusionList = []
	for edge in exclusionRaw:
		exclusionList.append(edge.strip())
	numNoms = int(argv[3])
	
	if len(argv) == 6:
		# songReport = cPickle.load(open(argv[5]))
		# commStruct = songReport.vcG
		commStruct = cPickle.load(open(argv[5]))
		g = commStruct.graph.copy()
	else:
		commStruct = None
		g = igraph.load(argv[1])

	if argv[4] == 'None':
		src = None
		paths = None
	else:
		src = argv[4]
	
	
	omras2DB = pg.connect(dbname=DBNAME, host=HOST, user=USER)
	try:
		currentSession = omras2DB.query("SELECT session_no FROM sessions WHERE is_current = true").getresult()[0][0]
		nomSession = int(currentSession)
		print "%s :: Nominations will be inserted into session %i"%(argv[0], nomSession)
	except IndexError:
		print "%s :: no current session. turn on the radio dameon and try again."%argv[0]
		return
	
	print "%s:: removed %i edges from graph."%(argv[0], len(exclusionList))
	g = g.delete_vertices(g.vs.select(hashkey_in=exclusionList))

	toDelete = g.vs.select(duration=None)
	print "%s:: removed %i vertices from graph due to unknown duration."%(argv[0], len(toDelete))
	g = g.delete_vertices(toDelete)

	toDelete = g.vs.select(hashkey=None)
	print "%s:: removed %i vertices from graph due to unknown hashkey."%(argv[0], len(toDelete))
	g = g.delete_vertices(toDelete)
	
	deadEnds = g.vs.select(_outdegree=0)

	
	
	try:
		src = g.vs.select(hashkey= src)[0].index
	except Exception, err:
		print "%s :: ERROR :: trouble dereferencing src uid, nominees may be incorrectly filtered. \n\tmsg:%s\n\tproceeding w/o src"%(argv[0], str(err))
		src = None
Ejemplo n.º 17
0
def new_top_k(net, k):

    print('method called')

    top_k_net = nx.Graph()

    top_k = []

    #loops through edges in network
    for e in net.edges:
        #defines end points for edges
        source, target = e

        #sorts edges for both end points
        sorted_source = sorted(net.edges(source, data=True),
                               key=lambda t: t[2].get('cosine', 1),
                               reverse=True)
        sorted_target = sorted(net.edges(target, data=True),
                               key=lambda t: t[2].get('cosine', 1),
                               reverse=True)

        #checks if number of edges is within K already
        if k < (len(sorted_source)):
            top_source = sorted_source[:k]
        else:
            top_source = sorted_target
        if k < (len(sorted_target)):
            top_target = sorted_target[:k]
        else:
            top_target = sorted_target

        #adds edge to top_k_net
        for s in top_source:
            s_source, s_target, s_cos = s
            for t in top_target:
                t_source, t_target, t_cos = t
                if t_source == s_target and s_source == t_target:
                    top_k_net.add_edge(str(s_source),
                                       str(s_target),
                                       weight=s_cos)

    #adds nodes
    for n in nx.nodes(net):
        top_k_net.add_node(n)

    #export
    nx.write_graphml(top_k_net, "graph.graphml")
    nx.write_gml(top_k_net, "graph.gml")
    graph = igraph.load("graph.gml")
    layout = graph.layout("fr")
    igraph.Graph.write_svg(graph,
                           fname="static/graph_image.svg",
                           layout=layout,
                           width=4000,
                           height=4000)

    return top_k_net
Ejemplo n.º 18
0
def main():
    import optparse

    p = optparse.OptionParser(usage="usage: %prog [-s SOURCE] [-h] file")
    p.add_option('-s', '--source', metavar='SOURCE', dest='source',
                 help='Specify a graph to use as source for attributes '+ \
                 '(this will disable API calls)')

    opts, files = p.parse_args()

    if not files:
        p.error("Give me a file, please ;-)")
    fn = files[0]

    lang, date, type_ = explode_dump_filename(fn)

    groups = ('bot', 'sysop', 'bureaucrat', 'checkuser', 'steward', 'import',
              'transwiki', 'uploader', 'ipblock-exempt', 'oversight',
              'founder', 'rollbacker', 'accountcreator', 'autoreviewer',
              'abusefilter')
    g = ig.load(fn)
    if opts.source:
        sourceg = ig.load(opts.source)
        for destv in g.vs:
            try:
                sourcev = sourceg.vs.select(username=destv['username'])[0]
            except IndexError:
                print destv['username'], 'not found in source'
                for group in groups:
                    destv[group] = None
                continue
            for group in groups:
                destv[group] = sourcev[group]

    else:
        for group in groups:
            addGroupAttribute(g, lang, group)

        print 'BLOCKED ACCOUNTS'
        addBlockedAttribute(g, lang)

    print 'ANONYMOUS USERS'
    g.vs['anonymous'] = map(isip, g.vs['username'])
    g.write("%swiki-%s%s_rich.pickle" % (lang, date, type_), format="pickle")
Ejemplo n.º 19
0
def main():
    import optparse

    p = optparse.OptionParser(usage="usage: %prog [-s SOURCE] [-h] file")
    p.add_option('-s', '--source', metavar='SOURCE', dest='source',
                 help='Specify a graph to use as source for attributes '+ \
                 '(this will disable API calls)')

    opts, files = p.parse_args()

    if not files:
        p.error("Give me a file, please ;-)")
    fn = files[0]

    lang, date, type_ = explode_dump_filename(fn)

    groups = ('bot', 'sysop', 'bureaucrat', 'checkuser', 'steward', 'import',
              'transwiki', 'uploader', 'ipblock-exempt', 'oversight',
              'founder', 'rollbacker', 'accountcreator', 'autoreviewer',
              'abusefilter')
    g = ig.load(fn)
    if opts.source:
        sourceg = ig.load(opts.source)
        for destv in g.vs:
            try:
                sourcev = sourceg.vs.select(username=destv['username'])[0]
            except IndexError:
                print destv['username'], 'not found in source'
                for group in groups:
                    destv[group] = None
                continue
            for group in groups:
                destv[group] = sourcev[group]

    else:
        for group in groups:
            addGroupAttribute(g, lang, group)

        print 'BLOCKED ACCOUNTS'
        addBlockedAttribute(g, lang)

    print 'ANONYMOUS USERS'
    g.vs['anonymous'] = map(isip, g.vs['username'])
    g.write("%swiki-%s%s_rich.pickle" % (lang, date, type_), format="pickle")
Ejemplo n.º 20
0
def main():
    facebook_graph = igraph.load('../data/fb_caltech_small_edgelist.txt', )
    final_communities = sac1(facebook_graph)
    file = open('communities.txt', 'w+')

    for c in final_communities:
        community = map(lambda x: str(x), c)
        file.write(", ".join(community) + "\n")

    file.close()
Ejemplo n.º 21
0
  def loadFromIgraph(self, filename, gformat="graphml"):
    """
    Load a sparse matrix from igraph as a numpy pickle

    Positional arguments:
    ====================
    filename - the file name/path to where you want to save the graph
    gformat - the format which you want to use to save the graph. Choices:
    """
    self.spcscmat = igraph.load(filename, format=gformat)
Ejemplo n.º 22
0
    def loadFromIgraph(self, filename, gformat="graphml"):
        """
    Load a sparse matrix from igraph as a numpy pickle

    Positional arguments:
    ====================
    filename - the file name/path to where you want to save the graph
    gformat - the format which you want to use to save the graph. Choices:
    """
        self.graph = igraph.load(filename, format=gformat)
Ejemplo n.º 23
0
def main():
    facebook_graph = igraph.load('./data/fb_caltech_small_edgelist.txt')
    final_communities = sac1(facebook_graph)
    file = open('communities.txt', 'w+')

    for c in final_communities:
        community = map(lambda x: str(x), c)
        file.write(", ".join(community) + "\n")

    file.close()
Ejemplo n.º 24
0
def get_graph():

    data_dir = os.path.join(os.path.dirname(__file__), "data")
    graph_path = os.path.join(data_dir, "out_1.graphml")

    if not os.path.exists(graph_path):
        __download__(data_dir)
        __prepare__(data_dir)

    return igraph.load(graph_path)
Ejemplo n.º 25
0
def max_net(net, max_size):

    #creates new list of edges to form filtered network
    new_component_total = []

    #identifies component and sorts edges by weight
    for node in nx.nodes(net):

        component = nx.node_connected_component(net, node)

        #list of all edges in a component
        component_edges = []

        #adds to list of component edges
        for n in component:
            edges = sorted(net.edges(n, data=True),
                           key=lambda t: t[2].get('weight', 1),
                           reverse=True)
            component_edges.extend(edges)

        #if component size is > max_size, edges are removed until it is <= max_size and a new component is generated
        if (len(component) > max_size):
            new_component = make_new_component(
                remove_last_edge(net, component_edges, node, max_size),
                max_size)
            new_component_total.extend(new_component)

        #if a component is small enough, new_component total is automatically extended
        else:
            new_component_total.extend(edges)

    #removes duplicates from edge list
    component_final = []
    for edge in new_component_total:
        if edge not in component_final:
            component_final.append(edge)

    #creates a new network with filtered components
    max_size_component_net = nx.Graph(component_final)

    for n in nx.nodes(net):
        max_size_component_net.add_node(n)

    nx.write_graphml(max_size_component_net, "graph.graphml")
    nx.write_gml(max_size_component_net, "graph.gml")
    graph = igraph.load("graph.gml")
    layout = graph.layout("fr")
    igraph.Graph.write_svg(graph,
                           fname="static/graph_image.svg",
                           layout=layout,
                           width=4000,
                           height=4000)

    return max_size_component_net
Ejemplo n.º 26
0
def load_trials(dir, rho):
    files = os.listdir(dir)
    ext = '.gml'
    rword = 'rho=' + str(rho) + '_'
    trialint = lambda s: int(s.strip(ext).split('_')[-1].split('=')[-1])
    files = list(filter(lambda s: s.endswith(ext) and (rword in s), files))
    files.sort(key=trialint)
    gs = []
    for fn in files:
        gs.append(ig.load(dir + '/' + fn))
    return gs
Ejemplo n.º 27
0
 def setUp(self):
     self.G=igraph.load("karate.gml")
     
     membership=[
                 [0,1,2,3,7,11,12,13,17,19,21],
                 [4,5,6,10,16],
                 [8,9,14,15,18,20,22,23,24,25,26,27,28,29,30,31,32,33]]
     cover=igraph.VertexCover(self.G, membership)
     metrics=VertexCoverMetric.run_analysis(cover, weights=None)
     metrics.report()
     self.comm_metrics=metrics.comm_metrics
Ejemplo n.º 28
0
def get_graph():

    data_dir = os.path.join(os.path.dirname(__file__), "data")
    graph_path = os.path.join(data_dir, "out_1.graphml")

    if not os.path.exists(graph_path):
        __download__(data_dir)
        __prepare__(data_dir)


    return igraph.load(graph_path)
Ejemplo n.º 29
0
def main():
    import igraph
    if len(argv) == 2:
        g = igraph.load(argv[1])
        print g.summary()
        c_im = g.community_infomap()
        s_im = igraph_surprise(g, c_im)
        print c_im.summary()
        print "Infomap: Surprise = %s, Modularity = %s" % (s_im, c_im.q)
        print

        c_fg = g.as_undirected().community_fastgreedy().as_clustering()
        s_fg = igraph_surprise(g, c_fg)
        print c_fg.summary()
        print "Fast Greedy: Surprise = %s, Modularity = %s" % (s_fg, c_fg.q)
        print


        if not g.is_directed():
            c_mls = g.community_multilevel(return_levels=True)
        else:
            c_mls = g.as_undirected().community_multilevel(return_levels=True)
        
        for i, c_ml in enumerate(c_mls):
            s_ml = igraph_surprise(g, c_ml)
            print c_ml.summary()
            print "Multi-Level at Level %s: Surprise = %s, Modularity = %s" % (i, s_ml, c_ml.q)
            print

    elif len(argv) == 3:
        g = igraph.Graph.Read_Edgelist(argv[1])
        
        # load partition data into dict with node id as index
        tmp = dict([ map(int,l.split()) for l in open(argv[2]).readlines() ])

        # re-map to 0 based partition id's
        zmap = dict((v, k) for k,v in enumerate(set(tmp.values())))
        p = dict([(k, zmap[v]) for k,v in tmp.iteritems() ])

        # load partition data into clustering object
        # to re-use igraph functions
        vc = igraph.VertexClustering(g, [ p[v.index] for v in g.vs ])

        print g.summary()
        print vc.summary()

        # get S
        s = igraph_surprise(g, vc)
        print "Surprise = %s, Modularity = %s" % (s, vc.q)
    else:
        print "Usage: %s graph-file" % argv[0]
        print "    or %s network-edge-list partition-file" % argv[0]
Ejemplo n.º 30
0
def get_graph():

    data_dir = os.path.join(os.path.dirname(__file__), "data")
    graph_path = "nba.graphml"
    full_path = os.path.join(data_dir, graph_path)

    if not os.path.exists(data_dir):
        __download__(data_dir)

    if not os.path.exists(full_path):
        __prepare__(data_dir, graph_path)

    return igraph.load(full_path)
Ejemplo n.º 31
0
def get_graph():

    data_dir = os.path.join(os.path.dirname(__file__), "data")
    graph_path = "nba.graphml"
    full_path = os.path.join(data_dir, graph_path)

    if not os.path.exists(data_dir):
        __download__(data_dir)

    if not os.path.exists(full_path):
        __prepare__(data_dir, graph_path)

    return igraph.load(full_path)
Ejemplo n.º 32
0
    def setUp(self):
        self.G = igraph.load("karate.gml")

        membership = [[0, 1, 2, 3, 7, 11, 12, 13, 17, 19, 21],
                      [4, 5, 6, 10, 16],
                      [
                          8, 9, 14, 15, 18, 20, 22, 23, 24, 25, 26, 27, 28, 29,
                          30, 31, 32, 33
                      ]]
        cover = igraph.VertexCover(self.G, membership)
        metrics = VertexCoverMetric.run_analysis(cover, weights=None)
        metrics.report()
        self.comm_metrics = metrics.comm_metrics
Ejemplo n.º 33
0
    def get_graph(self):
        '''
        Returns the graph loaded in memory
        '''

        if not os.path.exists(self.raw_data_path):
            os.mkdir(self.raw_data_path)
            self.__download__()

        if not os.path.exists(self.graph_path):
            self.__prepare__()

        return igraph.load(self.graph_path)
Ejemplo n.º 34
0
    def get_graph(self):
        '''
        Returns the graph loaded in memory
        '''

        if not os.path.exists(self.raw_data_path):
            os.mkdir(self.raw_data_path)
            self.__download__()

        if not os.path.exists(self.graph_path):
            self.__prepare__()

        return igraph.load(self.graph_path)
Ejemplo n.º 35
0
def get_graph():
    """
    Downloads and prepares the graph from DOWNLOAD_URL
    """
    data_dir = os.path.join(os.path.dirname(__file__), "data")
    graph_path = os.path.join(data_dir, GRAPH_NAME + GRAPH_TYPE)

    if not os.path.exists(graph_path):
        __download__(data_dir)
        __prepare__(data_dir)

    G = igraph.load(graph_path)
    multigraph_to_weights(G)
    return G
Ejemplo n.º 36
0
def get_graph():
    """
    Downloads and prepares the network science collaboration graph
    """
    data_dir = os.path.join(os.path.dirname(__file__), "data")
    graph_path = os.path.join(data_dir, GRAPH_NAME + GRAPH_TYPE)

    if not os.path.exists(graph_path):
        __download__(data_dir)
        __prepare__(data_dir)
    else:
        print(graph_path, "already exists. Using old file.")

    return igraph.load(graph_path)
Ejemplo n.º 37
0
def get_graph():
    """
    Downloads and prepares the network science collaboration graph
    """
    data_dir = os.path.join(os.path.dirname(__file__), "data")
    graph_path = os.path.join(data_dir, GRAPH_NAME + GRAPH_TYPE)

    if not os.path.exists(graph_path):
        __download__(data_dir)
        __prepare__(data_dir)
    else:
        print(graph_path, "already exists. Using old file.")

    return igraph.load(graph_path)
Ejemplo n.º 38
0
    def __prepare__(self):

        data = os.path.join(self.raw_data_path, DATA_DIR, "malaria.edgelist")
        mod_data = os.path.join(self.raw_data_path, DATA_DIR, "mod_malaria.edgelist")

        #we just need to remove the third column which has 1's in it
        #so igraph can read it as an edgelist
        with open(data, 'r') as f:
            with open(mod_data, 'w') as new:
                for line in f:
                    new.write(line[:-2] + '\n')

        G = igraph.load(mod_data)
        G.write_graphml(self.graph_path)
Ejemplo n.º 39
0
def get_graph():
    """
    TEMPLATE COMMENT: Downloads and prepares a graph
    """
    data_dir = os.path.join(os.path.dirname(__file__), "data")
    graph_path = os.path.join(data_dir, GRAPH_NAME + GRAPH_TYPE)

    if not os.path.exists(graph_path):
        __download__(data_dir)
        __prepare__(data_dir)
    else:
        print(graph_path, "already exists. Using old file.")

    return igraph.load(graph_path)
Ejemplo n.º 40
0
def get_graph():
    """
    TEMPLATE COMMENT: Downloads and prepares a graph
    """
    data_dir = os.path.join(os.path.dirname(__file__), "data")
    graph_path = os.path.join(data_dir, GRAPH_NAME + GRAPH_TYPE)

    if not os.path.exists(graph_path):
        __download__(data_dir)
        __prepare__(data_dir)
    else:
        print(graph_path, "already exists. Using old file.")

    return igraph.load(graph_path)
Ejemplo n.º 41
0
def main():
    parser = argparse.ArgumentParser(
        description=('Generate test graphs'),
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('graph_out', metavar='graph-out')
    parser.add_argument('trace_out', metavar='trace-out')
    parser.add_argument('--progressbar', action='store_true')
    parser.add_argument('--verbose', '-v', action='count', default=0)

    parser.add_argument('--node-count',
                        '-nc',
                        type=int,
                        dest='node_count',
                        default=100)
    parser.add_argument('--network-type',
                        choices=[x for x in graphs_map.iterkeys()],
                        default='star')
    parser.add_argument('--trace-count', type=int, default=50)

    arguments = parser.parse_args()

    arguments.verbose = min(len(helpers.LEVELS), arguments.verbose)
    logging.getLogger('compnet').setLevel(helpers.LEVELS[arguments.verbose])

    show_progress = arguments.progressbar

    # g = graphs_map[arguments.network_type](arguments.node_count)
    # g = igraph.Graph.Barabasi(900, 9)
    # for n in g.vs:
    #     n['closeness'] = g.closeness(n)
    #     n['name'] = 'V%d' % n.index

    # g.save(arguments.graph_out)
    g = igraph.load(arguments.graph_out)

    pairs = [
        random.sample(xrange(0, g.vcount()), 2)
        for x in xrange(0, arguments.trace_count)
    ]
    pairs = [[g.vs[x[0]]['name'], g.vs[x[1]]['name']] for x in pairs]

    traces = []
    for p in pairs:
        trace = random.choice(g.get_all_shortest_paths(p[0], p[1]))
        trace = [g.vs[x]['name'] for x in trace]
        traces.append(trace)

    helpers.save_to_json(arguments.trace_out, traces)
Ejemplo n.º 42
0
def main():
    """Main function"""

    if not os.path.exists(NETWORK_FILE):
        print "Input network file is not present!"
        sys.exit(1)

    graph = ig.load(NETWORK_FILE)
    graph['name'] = 'meneame'

    general_analysis(graph)

    community_analysis(graph)

    save_degree_distribution(graph, IMAGES_FOLDER)
    save_weights_distribution(graph, IMAGES_FOLDER)
Ejemplo n.º 43
0
    def setUp(self):
        self.G=igraph.load("karate.gml")
        membership=[
                    [0,1,2,3,7,11,12,13,17,19,21],
                    [4,5,6,10,16],
                    [8,9,14,15,18,20,22,23,24,25,26,27,28,29,30,31,32,33]]
        weights=[5,7,4,5,8,7,2,1,1,6,7,4,9,6,8,2,2,1,2,5,6,5,7,7,3,4,4,6,7,7,5,7,4,8,5,4,5,3,1,6,4,3,3,3,1,6,2,7,8,8,1,7,5,7,5,4,7,3,7,5,8,9,4,2,8,8,6,3,6,6,8,5,6,7,5,7,7,7]
        self.G.es['weight'] = weights

        self.cover=igraph.VertexCover(self.G, membership)

        self.comm_metrics = None
        if False:
          vcm = VertexCoverMetric()
          metrics=vcm.run_analysis(self.cover, weights = None)
          self.comm_metrics = metrics.comm_metrics
Ejemplo n.º 44
0
    def get(self, id):
        """Retrieves the dataset with the given ID from Nexus.

        Dataset IDs are formatted as follows: the name of a dataset on its own
        means that a single network should be returned if the dataset contains
        a single network, or multiple networks should be returned if the dataset
        contains multiple networks. When the name is followed by a dot and a
        network ID, only a single network will be returned: the one that has the
        given network ID. When the name is followed by a dot and a star, a
        dictionary mapping network IDs to networks will be returned even if the
        original dataset contains a single network only.

        E.g., getting C{"karate"} would return a single network since the
        Zachary karate club dataset contains one network only. Getting
        C{"karate.*"} on the other hand would return a dictionary with one
        entry that contains the Zachary karate club network.

        @param id: the ID of the dataset to retrieve.
        @return: an instance of L{Graph} (if a single graph has to be returned)
          or a dictionary mapping network IDs to instances of L{Graph}.
        """
        from igraph import load

        dataset_id, network_id = self._parse_dataset_id(id)

        params = dict(format="Python-igraph", id=dataset_id)
        response = self._get_response("/api/dataset", params, compressed=True)
        response = self._ensure_uncompressed(response)
        result = load(response, format="pickle")

        if network_id is None:
            # If result contains a single network only, return that network.
            # Otherwise return the whole dictionary
            if not isinstance(result, dict):
                return result
            if len(result) == 1:
                return result[result.keys()[0]]
            return result

        if network_id == "*":
            # Return a dict no matter what
            if not isinstance(result, dict):
                result = dict(dataset_id=result)
            return result

        return result[network_id]
    def get(self, id):
        """Retrieves the dataset with the given ID from Nexus.

        Dataset IDs are formatted as follows: the name of a dataset on its own
        means that a single network should be returned if the dataset contains
        a single network, or multiple networks should be returned if the dataset
        contains multiple networks. When the name is followed by a dot and a
        network ID, only a single network will be returned: the one that has the
        given network ID. When the name is followed by a dot and a star, a
        dictionary mapping network IDs to networks will be returned even if the
        original dataset contains a single network only.

        E.g., getting C{"karate"} would return a single network since the
        Zachary karate club dataset contains one network only. Getting
        C{"karate.*"} on the other hand would return a dictionary with one
        entry that contains the Zachary karate club network.

        @param id: the ID of the dataset to retrieve.
        @return: an instance of L{Graph} (if a single graph has to be returned)
          or a dictionary mapping network IDs to instances of L{Graph}.
        """
        from igraph import load

        dataset_id, network_id = self._parse_dataset_id(id)

        params = dict(format="Python-igraph", id=dataset_id)
        response = self._get_response("/api/dataset", params, compressed=True)
        response = self._ensure_uncompressed(response)
        result = load(response, format="pickle")

        if network_id is None:
            # If result contains a single network only, return that network.
            # Otherwise return the whole dictionary
            if not isinstance(result, dict):
                return result
            if len(result) == 1:
                return result[list(result.keys())[0]]
            return result

        if network_id == "*":
            # Return a dict no matter what
            if not isinstance(result, dict):
                result = dict(dataset_id=result)
            return result

        return result[network_id]
Ejemplo n.º 46
0
def main(f_path):

    global centrality_calculation_by_igraph, centrality_calculation_by_networkx

    G = igraph.load(f_path)

    centrality_calculation_by_igraph = c.centrality_calculation_by_igraph(G)

    G = nx.DiGraph(G.get_edgelist())

    centrality_calculation_by_networkx = c.centrality_calculation_by_networkx(
        G)

    # cluster_coefficient_calculation = c.cluster_coefficient_calculation(G)

    return centrality_calculation_by_igraph,
    centrality_calculation_by_networkx
Ejemplo n.º 47
0
def make_navigationoptions(args):
    if not args.prefix:
        args.prefix = os.path.splitext(os.path.basename(args.dataset))[0]

    dataset = np.loadtxt(args.dataset)

    graph = ig.load(args.graph)

    vd = cPickle.load(open(args.clustering, 'rb'))
    cl = vd.as_clustering()

    index = FLANN()
    index.load_index(args.index, dataset)

    # Find adjacent communities
    k = args.nn
    knn, dists = index.nn_index(dataset, k + 1)
    knn = knn[:, 1:]
    dists = dists[:, 1:]
    membership = np.array(cl.membership)

    adjacencies = {}
    for community in cl:
        for node in community:
            mode_label = mode(membership[knn[node]])
            mode_freq = mode_label[1][0]
            mode_label = mode_label[0][0]

            if mode_label != membership[node] and sum(
                    graph.are_connected(node, nid)
                    for nid in knn[node]) > (k / 2):
                adjacencies.setdefault(membership[node],
                                       []).append(int(mode_label))

    # Make options
    cPickle.dump([
        options.NavigationOption(source, target, membership, args.index,
                                 args.dataset, args.nn)
        for source, targets in adjacencies.iteritems()
        for target in set(targets)
    ], open(args.prefix + '-navigation-options.pl', 'wb'))
def igraph_to_networkx(netx):
    '''returns an igraph instance,given all edges, vertices, and appropriate attributes'''
    if isinstance(netx,str):
        iG=igraph.load(netx)
    else:
        iG=netx
    netG=networkx.Graph()
    int_to_id={}

    for i in range(len(iG.vs)):
        attr=iG.vs[i].attributes()
        int_to_id[i]=attr["id"]

        netG.add_node(attr["id"],attr_dict=attr)
        del attr["id"]
    #print(int_to_id)
    for i in range(len(iG.es)):
        m=int_to_id[iG.es[i].source]
        n=int_to_id[iG.es[i].target]
        netG.add_edge(m,n,iG.es[i].attributes())
    
    return netG
Ejemplo n.º 49
0
def result():
    print(request.form)
    username=request.form.get("username")
    password=request.form.get("password")
    analysis_id=int(request.form.get("analysis_id"))
    top_k=request.form.get("top_k")
    max_component_size=request.form.get("max_component_size")
    filter = request.form.get("filters") 


    spectra_list, matches = sim.compare(pimport.import_pimp(username, password, analysis_id))
    #generates network by passing spectra_list(nodes) and matches(edges)
    network = networking.networking(spectra_list, matches)

    #applies filters
    if filter is not None:
        if top_k != "":
            network = networking.new_top_k(network, (int(top_k))) 
            
        if max_component_size != "":
            network = networking.max_net(network, (int(max_component_size)))
    #networking.graphml(network, "graph.graphml")
    

    #export
    nx.write_graphml(network, "uploads/graph.graphml")
    nx.write_gml(network, "graph.gml")
    graph = igraph.load("graph.gml")
    layout = graph.layout("kk")
    igraph.Graph.write_svg(graph,fname="static/graph_image.svg", layout = layout, width = 4000, height = 4000)


    graph_file = "graph_image.svg"

    return render_template("result.html", graph_image = graph_file)


    
Ejemplo n.º 50
0
    def __prepare__(self):
        vertex_filename = os.path.join(self.raw_data_path, VERTEX_DATA_FILE)
        edge_filename = os.path.join(self.raw_data_path, EDGE_DATA_FILE)

        g = igraph.load(edge_filename)

        vertex_file = open(vertex_filename)
        reader = csv.DictReader(vertex_file)

        for case in reader:
            caseid = int(case['caseid'])
            v = g.vs[caseid]

            v['caseid'] = case['caseid']
            v['usid'] = case['usid']
            v['parties'] = case['parties']
            v['year'] = case['year']

        vertex_file.close()

        # Case IDs are 1-indexed, so we delete the 0th vertex as it was extraneous.
        g.delete_vertices([0])
        g.write_graphml(self.graph_path)
Ejemplo n.º 51
0
def example():
	"""
	Performs the example outlined in the README. Draws the graph of one dataset.
	"""
	g = ig.load("sp_data_school_day_1_g.graphml") # whichever file you would like

	# Assigning colors to genders for plotting
	colorDict = {"M": "blue", "F": "pink", "Unknown": "black"}

	for vertex in g.vs:
		# each vertex is labeled as its classname and colored as its gender.
   		vertex["label"] = vertex["classname"]
    	vertex["color"] = colorDict[vertex["gender"]]


	layout = g.layout("fr") # Fruchterman-Reingold layout

	# If Cairo is improperly installed, raises TypeError: plotting not available
	try:
		ig.plot(g, layout=layout)
	except TypeError as e:
		print "It looks like Cairo isn't properly installed. Refer to the wiki."
		exit(1)
Ejemplo n.º 52
0
def main():
    ### Read initial graph
    #g = igraph.load('data_repo/Imputed_data_False9_0509.graphmlz') ### This file contains the weekday 9am link level travel time for SF, imputed data collected from a month worth of Google Directions API
    g = igraph.load('data_repo/London_Directed/London_0621.graphmlz')
    print(g.summary())

    mode = 'ALL'  ### 'IN', 'OUT', 'ALL'
    degree_dist = g.degree_distribution(mode=mode)
    loc = []
    val = []
    for bn in degree_dist.bins():
        loc.append(bn[0])
        val.append(bn[2])
    print(loc, val)

    plt.bar(loc, val, width=1, color='w', edgecolor='black', log=True)
    for i, v in enumerate(val):
        plt.text(i - 0.25, v, str(v), color='blue', fontweight='bold')
    plt.xlim([-1, (int(max(loc)) + 1)])
    plt.title('Degree distribution of mode {}'.format(mode))
    plt.xlabel('Degree')
    plt.ylabel('Vertices count')
    plt.show()
Ejemplo n.º 53
0
def label_igraph_network(network_path):
    g = i.load(network_path)
    g = g.as_undirected()
    print 'Get giant component'
    g = g.clusters(mode='weak').giant()
    simple = g.is_simple()
    print 'IS SIMPLE? %s' % simple
    if not simple:
        g.simplify(multiple=True, loops=True, combine_edges='ignore')

    try:
        for vs in g.vs:
            vs['name'] = 'LBL%s' % vs['name']
    except KeyError:
        for vs in g.vs:
            vs['name'] = 'LBL%s' % vs.index

    edge_list = helpers.degree_labeling_network(g, 1.1)

    vs = list(set([y for t in [(x[0], x[1]) for x in edge_list] for y in t]))
    labeled_g = helpers.load_as_inferred_links_nofile(vs, edge_list)

    return labeled_g
Ejemplo n.º 54
0
    def __prepare__(self):
        vertex_filename = os.path.join(self.raw_data_path, VERTEX_DATA_FILE)
        edge_filename = os.path.join(self.raw_data_path, EDGE_DATA_FILE)

        g = igraph.load(edge_filename)

        vertex_file = open(vertex_filename)
        reader = csv.DictReader(vertex_file)

        for case in reader:
            caseid = int(case['caseid'])
            v = g.vs[caseid]

            v['caseid']  = case['caseid']
            v['usid']    = case['usid']
            v['parties'] = case['parties']
            v['year']    = case['year']

        vertex_file.close()

        # Case IDs are 1-indexed, so we delete the 0th vertex as it was extraneous.
        g.delete_vertices([0])
        g.write_graphml(self.graph_path)
Ejemplo n.º 55
0
    def process_file(self, filename):
        """Loads a graph from the given file, runs the clustering
        algorithm on it and prints the clusters to the standard
        output."""
        self.log.info("Processing %s..." % filename)

        graph = load(filename, format=self.options.format)

        # If the graph has weights and we want to ignore them, delete them
        if self.options.no_weights and "weight" in graph.edge_attributes():
            del graph.es["weight"]

        # If the graph is directed, we have to make it undirecteed
        if graph.is_directed():
            graph.to_undirected(combine_edges="sum")
            self.log.warning("Converted directed graph to undirected.")

        # Set up the "name" attribute properly
        if "label" in graph.vertex_attributes():
            graph.vs["name"] = graph.vs["label"]
            del graph.vs["label"]
        elif "name" not in graph.vertex_attributes():
            graph.vs["name"] = [str(i) for i in xrange(graph.vcount())]

        # Run the algorithm, get the result generator
        self.log.info("Calculating clusters, please wait...")
        algorithm = HLC(graph, self.options.min_size)
        results = algorithm.run(self.options.threshold)

        # Print the optimal threshold if we determined it automatically
        if self.options.threshold is None:
            self.log.info("Threshold = %.6f" % algorithm.last_threshold)
            self.log.info("D = %.6f" % algorithm.last_partition_density)

        # Print the results
        for community in results:
            print "\t".join(graph.vs[community]["name"])
Ejemplo n.º 56
0
def main(argv=None):
	if argv==None:
		argv = sys.argv
	if len(argv) < 2 or len(argv) > 3:
		print "Usage: %s songGraph.pkl [vertexDendrogram.pickle]"% argv[0]
		return
	if len(argv) == 3:
		vertClustPkl = argv[2]
	else:
		vertClustPkl = None
	
	s = shout.Shout()
	print "Using libshout version %s" % shout.version()
	
	s.host = 'doc.gold.ac.uk'
	s.port = 8000
	s.user = '******'
	s.password = '******'
	s.mount = "/vRad.mp3"
	s.name = "Steerable Optimized Self-Organizing Radio"
	s.genre = "CompNets"
	s.url = "http://radio.benfields.net"
	s.format = 'mp3' # | 'vorbis'
	#s.protocol = 'icy' | 'xaudiocast' | 'http'
	# s.public = 0 | 1
	# s.audio_info = { 'key': 'val', ... }
	#  (keys are shout.SHOUT_AI_BITRATE, shout.SHOUT_AI_SAMPLERATE,
	#	shout.SHOUT_AI_CHANNELS, shout.SHOUT_AI_QUALITY)
	currentTracklist = []
	try:
		graphLocation = argv[1]
		loadedGraph = igraph.load(graphLocation, format="pickle")
		ms = copy.deepcopy(loadedGraph)
	except Exception, err:
		print "trouble loading the graph at {0} should point to the graph (as pickle).  Sort it out and try again.".format(argv[1])
		return
Ejemplo n.º 57
0
    def __prepare__(self):

        G = igraph.load(os.path.join(self.raw_data_path, "netscience.gml"))
        del G.vs['id'] #graphml uses the id field, so we must remove it
        G.write_graphml(self.graph_path)
Ejemplo n.º 58
0
 def get_books(self):
     g = ig.load("territories/data/polbooks.gml")
     g.simplify(loops=False)
     return g