예제 #1
0
    def renderPage(self, extension):
        uid = self.request.get('uid', None)
        network = None

        strpage = "Error."
        supported_extensions = ['gdf', 'graphml']
        if not uid == None and extension in supported_extensions:
            session = sessionmanager.getsession(self)
            if session and session['me']['id'] == uid:
                q = db.GqlQuery("SELECT * FROM Network WHERE uid = :1", uid)
                networks = q.fetch(1)

                if not len(networks) == 0:
                    network = networks[0]
                    graph = loadGraph(network.getnodes(), network.getedges())

                    strpage = ''
                    if extension == 'gdf':
                        for line in nx.generate_gdf(graph):
                            strpage += line + '\n'
                        self.response.headers['Content-Type'] = "text/gdf"

                    if extension == 'graphml':
                        for line in nx.generate_graphml(graph):
                            strpage += line + '\n'
                        self.response.headers['Content-Type'] = "xml/graphml"

        self.response.out.write(strpage)
예제 #2
0
파일: susamuru.py 프로젝트: derlem/susamuru
def at_vdt_etg(limit=None):
    at_vdts_map = construct_at_dt_map_from_file(AT_VDTS_FILENAME)
    with open(AT_VDT_ETG_FILENAME, mode='w') as at_vdt_eth_file:
        writer = csv.writer(at_vdt_eth_file,
                            delimiter=DELIMITER,
                            quotechar=ETG_QUOTE_CHAR,
                            quoting=csv.QUOTE_MINIMAL)
        at_vdts_size = len(at_vdts_map)
        percentage = 0
        page_count = 0
        init_datetime = datetime.datetime.now()
        for ambiguation_term_title, valid_disambiguation_terms in at_vdts_map.items(
        ):
            row_items = []
            for vdt in valid_disambiguation_terms:
                row_items.append(ambiguation_term_title)
                row_items.append(vdt.title())
                etg = get_etg(vdt)
                etg_grapml = list(generate_graphml(etg, prettyprint=False))[0]

                row_items.append(etg_grapml)
                writer.writerow(row_items)
                row_items = []
            page_count += 1
            percentage = (page_count * 100.0) / at_vdts_size
            curr_time = datetime.datetime.now()
            date_time = curr_time - init_datetime
            print(date_time)
            print("% [", percentage, "] of pages processed.", " ", date_time,
                  " has passed.")
            etc = (date_time / percentage) * (100 - percentage)
            print(" ETC: ", etc)
    def renderPage(self, extension):
        uid = self.request.get("uid", None)
        network = None

        strpage = "Error."
        supported_extensions = ["gdf", "graphml"]
        if not uid == None and extension in supported_extensions:
            session = sessionmanager.getsession(self)
            if session and session["me"]["id"] == uid:
                q = db.GqlQuery("SELECT * FROM Network WHERE uid = :1", uid)
                networks = q.fetch(1)

                if not len(networks) == 0:
                    network = networks[0]
                    graph = loadGraph(network.getnodes(), network.getedges())

                    strpage = ""
                    if extension == "gdf":
                        for line in nx.generate_gdf(graph):
                            strpage += line + "\n"
                        self.response.headers["Content-Type"] = "text/gdf"

                    if extension == "graphml":
                        for line in nx.generate_graphml(graph):
                            strpage += line + "\n"
                        self.response.headers["Content-Type"] = "xml/graphml"

        self.response.out.write(strpage)
def main(out_dir,
         direction,
         num_graphs,
         graph_params,
         eta,
         require_connected=True,
         require_invertible=True,
         require_stationary=True):
    graph_params = yaml.safe_load(graph_params)
    graphs = dict()
    for i in range(num_graphs):
        i_d = str(uuid4())
        graph = make_graph(graph_params,
                           eta,
                           require_connected=require_connected,
                           require_invertible=require_invertible,
                           require_stationary=require_stationary)
        graphs[i_d] = '\n'.join(nx.generate_graphml(graph))

    out_file = Path(out_dir) / 'undirected_sw_graphs.yaml'
    out_file.write_text(yaml.dump(graphs))

    out_path = Path(out_dir) / 'watts_strogats'
    out_path.mkdir(parents=True, exist_ok=True)
    graph_params_str = '_'.join([
        f'{param}_{graph_params[param]}'
        for param in sorted(graph_params.keys())
    ])
    out_file = out_path / f'{direction}_{graph_params_str}.yaml'
    out_file.write_text(yaml.dump(graphs))
예제 #5
0
def main(out_dir,
         direction,
         num_graphs,
         num_nodes,
         graph_params,
         eta,
         require_connected=True,
         require_invertible=True,
         require_stationary=True):
    graph_params = yaml.safe_load(graph_params)
    graphs = dict()
    i = 0
    while i < num_graphs:
        i_d = str(uuid4())

        graph_params['n'] = num_nodes

        graph = make_random_graph(graph_params,
                                  eta,
                                  require_connected=require_connected,
                                  require_invertible=require_invertible,
                                  require_stationary=require_stationary)
        graphs[i_d] = '\n'.join(nx.generate_graphml(graph))
        i += 1

    out_path = Path(out_dir) / 'erdos_renyi'
    out_path.mkdir(parents=True, exist_ok=True)

    out_file = out_path / f'{direction}_p_{graph_params["p"]}.yaml'
    out_file.write_text(yaml.dump(graphs))
예제 #6
0
    def write_service_graphs(service):
        graphsdir = 'graphs'
        try:
            os.makedirs(graphsdir)
        except OSError as exc:
            if exc.errno == errno.EEXIST and os.path.isdir(graphsdir):
                pass

        service.build_topology_graph(level=3, bridges=False)

        for lvl in range(0, 4):
            g = service.build_topology_graph(level=lvl, bridges=False)
            nx.write_graphml(
                g,
                os.path.join(graphsdir,
                             "{0}-lvl{1}.graphml".format(service.id, lvl)))
            g = service.build_topology_graph(level=lvl, bridges=True)
            nx.write_graphml(
                g,
                os.path.join(graphsdir,
                             "{0}-lvl{1}-br.graphml".format(service.id, lvl)))

        g = service.build_topology_graph(level=3,
                                         bridges=True,
                                         vdu_inner_connections=False)
        service.complete_graph = nx.generate_graphml(g,
                                                     encoding='utf-8',
                                                     prettyprint=True)
        nx.write_graphml(
            g,
            os.path.join(graphsdir,
                         "{0}-lvl3-complete.graphml".format(service.id)))
예제 #7
0
 def hashtag_user_graph(self, request, queryset):
     graph = Tweet.objects.hashtag_user_graph(queryset)
     response = HttpResponse(
         '\n'.join(nx.generate_graphml(graph)),
         content_type='text/graphml'
     )
     response['Content-Disposition'] = 'attachment; filename=hashtags-users.graphml'
     return response
예제 #8
0
파일: admin.py 프로젝트: Solfasol/aura
 def download_common_followers_graph(self, request, queryset):
     graph = Community.objects.build_common_followers_graph(queryset)
     response = HttpResponse(
         '\n'.join(nx.generate_graphml(graph)),
         content_type='text/csv'
     )
     response['Content-Disposition'] = 'attachment; filename="common-followers-%s.graphml"' % '-'.join([c.name for c in queryset])
     return response
예제 #9
0
def to_graphml(nodes: List[str], edges: List[Edge]) -> str:
    G = nx.Graph()
    G.add_nodes_from(nodes)
    G.add_edges_from([(e.source, e.target) for e in edges])

    graph_str = "\n".join(nx.generate_graphml(G, prettyprint=False))

    return graph_str
예제 #10
0
def getdag(request):
    name = request.GET.get('name', '')

    if (name == ''): return HttpResponse("DAG not specified.")
    logger.info('WORKFLOW: Received request for DAG %s', name)

    g = fetchdag(name)
    s = '\n'.join(nx.generate_graphml(g))
    return HttpResponse(s)
예제 #11
0
    def query(self, opts):
        if opts.log_level:
            LOGGER.setLevel(opts.log_level)

        if opts.id:
            #   if opts.id is provided, query a egocentric network
            nodes, links = self.egocentric(opts)
        else:
            #   otherwise a sampled network
            nodes, links = self.sociocentric(opts)

        G = self.generateGraph([{'id': n} for n in nodes], links, opts)

        self.__debugGraph(G)
        self.densifyGraph(G, opts.limit)

        self.__debugGraph(G)
        node_data, metrics = self.getGraphDetails(G, opts)

        #   if optimize>1, removed nodes causing trouble
        try:
            for ob in node_data:
                n = ob.get('id')
                if n:
                    for k,v in ob.items():
                        if k!='id':
                            G.nodes[n][k] = v
                else:
                    LOGGER.debug("No 'id' found for {}".format(ob))
        except Exception as e:
            LOGGER.error("{} occured".format(e))
            LOGGER.error("{}".format(node_data))
            raise e
        
        '''
        Check if coordinates need adjusting based on ?_x or ?_y result value
        '''
        for _,v in G.nodes(data=True):
            if ('_x' in v.keys() or '_y' in v.keys()):
                LOGGER.debug("self.adjustPositions")
                self.adjustPositions(G)
                break

        if opts.format == NetworkBuilder.GRAPHML:

            res = '\n'.join(nx.generate_graphml(G, prettyprint=True))

        else:

            res = nx.readwrite.json_graph.cytoscape_data(G)
            res['metrics'] = metrics

        return res
예제 #12
0
async def respond_graphml(
        gen: AsyncIterator[JsonElement]) -> AsyncGenerator[str, None]:
    # Note: this is a very inefficient way of creating a response, since it creates the graph in memory
    # on the server side, so we can reuse the networkx code.
    # This functionality can be reimplemented is a streaming way.
    def no_nested_props(js: Json) -> Json:
        reported: Json = value_in_path_get(js, NodePath.reported, {})
        res = {
            k: v
            for k, v in reported.items()
            if v is not None and not isinstance(v, (dict, list))
        }
        return res

    graph = await result_to_graph(gen, no_nested_props)
    for line in generate_graphml(graph):
        yield line
 def serialize_graph(self,
                     format: GraphFormat = GraphFormat.GRAPHML) -> str:
     """
     Serialize a given graph into GraphML string or return None
     if graph is not found
     :return:
     """
     graph = self.storage.extract_graph(self.graph_id)
     graph_string = None
     if graph is not None:
         if format == GraphFormat.GRAPHML:
             graph_string = '\n'.join(nx.generate_graphml(graph))
         elif format == GraphFormat.JSON_NODELINK:
             json_object = nx.readwrite.node_link_data(graph)
             graph_string = json.dumps(json_object)
         elif format == GraphFormat.CYTOSCAPE:
             json_object = nx.readwrite.cytoscape_data(graph)
             graph_string = json.dumps(json_object)
         else:
             raise PropertyGraphQueryException(
                 graph_id=self.graph_id,
                 node_id=None,
                 msg=f"Unsupported export graph format {format.name}")
     return graph_string
예제 #14
0
 def get_graphml_for_culture(self, node_list):
     trait_subgraph = self.graph.subgraph(node_list)
     linefeed=chr(10)
     return linefeed.join(nx.generate_graphml(trait_subgraph))
예제 #15
0
 def get_graphml_for_culture(self, node_list):
     trait_subgraph = self.graph.subgraph(node_list)
     linefeed = chr(10)
     return linefeed.join(nx.generate_graphml(trait_subgraph))
예제 #16
0
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 30 22:19:24 2018

@author: Ran_the_User
"""

import networkx as nx
#import matplotlib.pyplot as plt
#%matplotlib wx

G = nx.readwrite.graphml.read_graphml('bokeh_server_flow.graphml')

nx.draw(G, with_labels=True, node_size=700,
        node_shape='p')  # one of 'so^>v<dph8'.
#G.nodes()

linefeed = chr(10)  # linefeed=\n
s = linefeed.join(nx.generate_graphml(G))
s = nx.parse_graphml
예제 #17
0
 def to_graphml(self, output_path="data/graph.ml"):
     with open(output_path, "w") as f:
         f.write(nx.generate_graphml(compact_graph(self), encoding="utf-8"))
예제 #18
0
def centrality():
	start_time = datetime.now()
	#TODO add config file read
	#TODO support cross network calculations (author_node --is--> author_node)
	## >Get the REQUIRED parameters
	req_params = {}
	for entry in req_param_list:
		if request.args.get(entry) is not None:
			req_params[entry] = urllib2.unquote(request.args.get(entry)).replace('\'', '')
		else:
			ret_string = {'error': 'Required parameter missing: ' + entry}
			inf_sup.append_to_log(log_filename, str(ret_string))
			return jsonify(ret_string)
	#TODO Validate start_date, end_date
	## >Verify the metric is valid
	if req_params['metric'] not in metric_list:
		ret_string = {'error': 'Invalid metric requested'}
		inf_sup.append_to_log(log_filename, str(ret_string))
		return jsonify(ret_string)

	## >Verify the start date is before the end date
	if int(req_params['start_date']) > int(req_params['end_date']):
		ret_string = {'error': 'End data before start date'}
		inf_sup.append_to_log(log_filename, str(ret_string))
		return jsonify(ret_string)

	## >Get the OPTIONAL parameters
	opt_params = {}
	for entry in opt_param_list:
		if request.args.get(entry) is not None:
			opt_params[entry] = urllib2.unquote(request.args.get(entry)).replace('\'', '')
		else:
			opt_params[entry] = None
	#TODO validate the optional parameters

	## >Get the FORMAT parameters
	for_params = {}
	for entry in format_param_list:
		if request.args.get(entry) is not None:
			for_params[entry] = urllib2.unquote(request.args.get(entry)).replace('\'', '')
		else:
			for_params[entry] = None
	params = dict(req_params.items() + opt_params.items() + for_params.items())

	## >Build the mongo query
	mongo_query = {}
	mongo_query['PostDate'] = {'$gte': params['start_date'], '$lte': params['end_date']}
	mongo_query['Network'] = params['network']

	for param, value in opt_params.iteritems():
		if value is not None:
			if param is 'type':
				mongo_query['Type'] = opt_params['type']
			if param is 'twit_collect':
				mongo_query['Meta.sources'] = {'$in': [opt_params['twit_collect']]}
			if param is 'matched_project':
				mongo_query['Matching'] = {'$elemMatch': {'ProjectId': opt_params['matched_project']}}
			if param is 'matched_topic':
				#TODO
				pass
			if param is 'scored_project':
				#TODO
				pass
			if param is 'scored_topic':
				#TODO
				pass

	## >Check if there are any matches
	if author_collection.find(mongo_query).count == 0:
		ret_string = {'error': 'No connections found matching the criteria'}
		inf_sup.append_to_log(log_filename, str(ret_string))
		return jsonify(ret_string)
	else:
		## >Map/reduce the A-->A connections
		a2a_map = Code("""
				function () {
					emit({"author": this.Author, "connection": this.Connection},
						{"count": 1}
						);
					}
				""")
		a2a_reduce = Code("""
				function (key, values) {
					var count = 0;
					values.forEach(function(v) {
						count += v['count'];
						});
					return {"count": count};
				}
				""")
		a2a_result = author_collection.map_reduce(a2a_map, a2a_reduce, "a2a_results", query=mongo_query).find()

	## >Build the author list
	author_list = []
	for a2a_count in a2a_result:
		con_author = a2a_count['_id']['author'].replace('&', '&amp;')
		con_connect = a2a_count['_id']['connection'].replace('&', '&amp;')
		if (len(con_author) > 0) and (len(con_connect) > 0):
			author_list.append((con_author, con_connect, int(a2a_count['value']['count'])))

	## >Influence Calculations
	if len(author_list) > 0:
		## >Create a black graph
		G = nx.DiGraph()

		## >Add the endges to the graph
		G.add_weighted_edges_from(author_list)

		## >Run the requested metric, on the graph 'G'
		try:
			calc_metric, stats = inf.run_metric(params['metric'], G, 'weight', True)
		except:
			try:
				if params['metric'] is 'pagerank':
					calc_metric, stats = inf.run_metric('pagerank_norm', G, 'weight', True)
				else:
					return jsonify({'error': 'Error calculating metric'})
			except:
				return jsonify({'error': 'Pagerank did not converge'})
	else:
		ret_string = {'error': 'No connections found matching the criteria'}
		inf_sup.append_to_log(log_filename, str(ret_string))
		return jsonify(ret_string)

	## >Build the dictionary to return
	data_results = {}

	## >Append the metric data
	data_results['metrics'] = calc_metric

	## >If graph requested
	if for_params['return_graph'] is not None:
		if for_params['return_graph'].lower() == 'true':
			## >If format = data
			if for_params['format'] is None:
				## >Append the graph data
				data_results['graph'] = nx.to_edgelist(G, nodelist=None)
			## >If format = graphml
			elif for_params['format'].lower() == 'graphml':
				## >Create the graphml filename
				graphml_name = inf_sup.create_filename(params)
				## >Get the graphml data
				graphml_data = '\n'.join(nx.generate_graphml(G))
				## >Add the versioning
				graphml_final = '<?xml version="1.0" encoding="UTF-8"?>' + "\n"
				h = HTMLParser.HTMLParser()

				for line in graphml_data.split("\n"):
					## >Escape the html content
					line = h.unescape(line)
					## >For each node add appropriate metric data into the graphml
					if '<node id="' in line:
						graphml_final += (line.replace('/>', '>') + "\n")
						node_name = line.partition('"')[-1].rpartition('"')[0]
						graphml_final += '      <data key="d1">' + str(calc_metric[node_name]) + '</data>' + "\n"
						graphml_final += '    </node>' + "\n"
					else:
						graphml_final += line + "\n"
						## >Add the key for the metric attribute
						if '<key' in line:
							graphml_final += '  <key attr.name="' + params['metric'] + '" attr.type="float" for="node" id="d1" />'

				if app.debug is True:
					## >Write out the graphml for testing
					graphml_name = inf_sup.create_filename(params)
					with open(graphml_name, 'w') as output_file:
						for line in graphml_final:
							output_file.write(line.encode('utf-8'))
					if not output_file.closed:
						output_file.close()

				## >Create the appropriate response to return the graphml
				response = make_response(graphml_final)
				response.headers["Content-Type"] = 'text/xml'
				response.headers["Content-Distribution"] = 'attachment; filename=%s' % (graphml_name,)
				return response

	## >To the log
	statistics = {}
	statistics['api_query'] = params
	statistics['mongo_query'] = mongo_query
	statistics['influence_metric'] = params['metric']
	statistics['metric_runtime'] = stats
	statistics['full_runtime'] = str(datetime.now() - start_time)
	statistics['graph_nodes'] = G.order()
	statistics['graph_edges'] = G.size()
	inf_sup.append_to_log(log_filename, str(statistics))

	if app.debug is True:
		### >Write out the influence for testing
		graphml_name = inf_sup.create_filename(params)
		influence_file = graphml_name.replace('.graphml', '.txt')
		with open(influence_file, 'w') as output_file:
			graph_list = calc_metric.items()
			for item in graph_list:
				output_file.write(item[0].encode('utf_8') + "," + str(item[1]) + '\n')
		if not output_file.closed:
			output_file.close()

	return jsonify(result=data_results)
예제 #19
0
 def to_graphml(self):
     # Stringify all entities in graph, because GraphML exporter doesn't like non-string objects.
     g = nx.DiGraph()
     for s, p, e in self.relations():
         g.add_edge(str(s), str(p), str(e))
     return nx.generate_graphml(g)
예제 #20
0
def draw(gr, impacts={}, changes={},
         return_svg=False, return_graphml=False,
         save_svg=True, save_graphml=False):
    """ Use GraphViz/Dot to draw a topology, optionally highlighting
     elements of interest to ensure they stand out in the diagram."""
    if gr.graph.has_key('type'):
        topo_type = gr.graph['type']  # e.g. 'IS-IS'
    else:
        topo_type = 'unknown'
    if gr.graph.has_key('region'):
        topo_region = gr.graph['region']  # e.g. 'EU'
    else:
        topo_region = 'unknown'
    if gr.graph.has_key('timestamp'):
        topo_time = time.gmtime(gr.graph['timestamp'])
        topo_time = time.strftime("%Y-%m-%d_%H-%M-%S", topo_time) + "_GMT"
    else:
        topo_time = "unknown"
    font_labels = 'Helvetica'
    colors = {}
    colors['main'] = 'lightgray' # root window background
    colors['borders'] = 'lightslategray' # borders around nodes+clusters
    colors['pop'] = 'beige' # cluster/POP background
    colors['node'] = 'linen' # regular node fill color
    # List of normal link colors (cycle through all these colors to vary
    # link colors for ease of tracing individual links)
    colors['links'] = ['midnightblue', 'black', 'saddlebrown', 'purple']
    # We really want the next colors to stand out, to show interesting things: 
    colors['spof'] = 'lightsalmon' # node is a single point of failure
    colors['isolated'] = 'yellow' # node would be isolated by SPOF failure
    colors['removed'] = 'red' # element has been removed since previous topo
    colors['added'] = '#01DF01' # (green) element has been added since previous topo
    #
    # If we got a topology diff, we need to re-add the nodes+edges that are now
    # missing since the previous capture, or they won't be drawn in the current
    # topology diagram.
    if changes.has_key('nodes_removed'):
        for node in changes['nodes_removed']:
            gr.add_node(node)
    if changes.has_key('edges_removed'):
        for edge in changes['edges_removed']:
            gr.add_edge(edge[0], edge[1], weight=1)
    #
    # Use PyDot/GraphViz to draw the topology.  The variable options here
    # (e.g. nodesep) are specified in the Dot language.
    dotgraph = pydot.Dot(
        "IS-IS Topology",
        graph_type='graph',
        bgcolor = colors['main'],
        nodesep = '0.25',
        ratio = '0.25',
        # 'ortho': edges horiz+vert w/ sharp corners, as in a circuit schematic
#        splines = 'ortho',
        rankdir = 'UD',  # UD=up/down, LR=left/right
        center = 'true',
        remincross = 'true',
        # simplify: draw only one edge between connected nodes
        simplify = 'true')
    #
    legend = pydot.Cluster(
        "Legend",
        label = "LEGEND\n" +\
         "Topology: " + topo_type +"\n" +\
         "Region: " + topo_region + "\n" +\
         "Recorded: " + topo_time + " \n\n\
         Note: Except for red (removed) and green (added),\n\
         link colors vary for ease of traceability.\n\n\
         (SPOF: Single Point of Failure)\n\n",
        fontsize = 16,
        parent_graph = dotgraph,
        style = 'filled',
        color = colors['borders'],
        fillcolor = "white")
    if impacts != {}:
        legend.add_node(pydot.Node(
            "Potential\nIsolation",
            style = 'filled',
            fillcolor = colors['isolated']))
        legend.add_node(pydot.Node(
            "Potential\nSPOF",
            style = 'filled',
            fillcolor = colors['spof']))
    if changes != {}:
        legend.add_node(pydot.Node(
            "Added",
            style = 'filled',
            fillcolor = colors['added']))
        legend.add_node(pydot.Node(
            "Removed\n(Failed?)",
            style = 'filled',
            fillcolor = colors['removed']))
    legend.add_node(pydot.Node(
        "Router",
        style = 'filled',
        fillcolor = colors['node']))
    dotgraph.add_subgraph(legend)
    #
    for node in gr.nodes():
        # Assuming the first 3 char's of the node name is the POP/location
        cluster_name = node[0:3]
        # create subgraph for the POP if it's not there already
        if dotgraph.get_subgraph(cluster_name) == []:
            subgr = pydot.Cluster(
                cluster_name,
                parent_graph = dotgraph,
                style = 'rounded',
                label = cluster_name,
                font = font_labels,
                fontsize = '16',
                color = colors['borders'],
                fillcolor = colors['pop'])
        if changes.has_key('nodes_removed') and node in changes['nodes_removed']:
            nodefill = colors['removed']
        elif impacts.has_key('spofs') and node in impacts['spofs']:
            nodefill = colors['spof']
        elif impacts.has_key('isolated') and node in impacts['isolated']:
            nodefill = colors['isolated']
        elif changes.has_key('nodes_added') and node in changes['nodes_added']:
            nodefill = colors['added']
        else:
            nodefill = colors['node']
        subgr.add_node(pydot.Node(
            node,
            URL = url_node(node),
            font = font_labels,
            fontsize = '10',
            style = "filled",
            color = colors['borders'],
            fillcolor = nodefill))
        dotgraph.add_subgraph(subgr)
    xcolor = 0  # link color selector; link colors vary for ease of tracing
    for edge in gr.edges():
        if changes.has_key('edges_removed') and edge in changes['edges_removed']:
            ecolor = colors['removed']
        elif changes.has_key('edges_added') and edge in changes['edges_added']:
            ecolor = colors['added']
        else:
            ecolor = colors['links'][xcolor]
        dotedge = pydot.Edge(edge, color = ecolor)
        dotgraph.add_edge(dotedge)
        xcolor += 1
        if xcolor >= len(colors['links']):
            xcolor = 0
    # We could draw a JPG/GIF/PNG/etc, but:
    # Generating SVG/DOT/XDOT is faster, smaller output, and easier to
    # manipulate on-the-fly for interactivity (e.g. Raphael, canviz).
    topos = {}
    if return_svg:
        topos['svg'] = dotgraph.create(prog='dot', format='svg')
    if return_graphml:
        topos['graphml'] = '\n'.join(nx.generate_graphml(gr))
    if save_svg:
        fname = "%s_%s_%s.svg" % (topo_time, topo_type, topo_region)
        dotgraph.write(fname, prog='dot', format='svg')
    if save_graphml:
        fname = "%s_%s_%s.graphml" % (topo_time, topo_type, topo_region)
        nx.write_graphml(gr, fname)
    return topos
예제 #21
0
        g.node['moo']['voltage'] = 110

if (out != ''):
    nx.write_graphml(g, out)

if (json):
    print("---------------LINK DATA------------------")
    d0 = json_graph.node_link_data(g)
    print(d0)
    print("------------ADJACENCY DATA----------------")
    d1 = json_graph.adjacency_data(g)
    print(d1)
    print("------------------------------------------")

if (prGraph):
    s = '\n'.join(nx.generate_graphml(g))
    print(s)

if (plot or save != ''):
    try:
        import matplotlib.pyplot as plt
    except:
        print('matplotlib not found')
        exit(-1)

    nx.draw_networkx(g)
    plt.axis('off')
    if (save != ''):
        plt.savefig(save)
    if (plot):
        plt.show()
예제 #22
0
def graph2graphml(graph):
    return "\n".join(networkx.generate_graphml(graph)) + "\n"
예제 #23
0
파일: netx.py 프로젝트: epurdy/elegans
def print_graphml(net):
    strg = '\n'.join(nx.generate_graphml(net))
    print strg
예제 #24
0
파일: netx.py 프로젝트: epurdy/elegans
def to_graphml(net):
    return '\n'.join(nx.generate_graphml(net))
예제 #25
0
def graph2graphml(graph):
    return '\n'.join(networkx.generate_graphml(graph)) + '\n'
예제 #26
0
def centrality():
    start_time = datetime.now()
    # TODO add config file read
    # TODO support cross network calculations (author_node --is--> author_node)
    ## >Get the REQUIRED parameters
    req_params = {}
    for entry in req_param_list:
        if request.args.get(entry) is not None:
            req_params[entry] = urllib2.unquote(request.args.get(entry)).replace("'", "")
        else:
            ret_string = "Required parameter missing: " + entry
            return jsonify(result=ret_string)
            # TODO Validate start_date, end_date
            ## >Verify the metric is valid
    if req_params["metric"] not in metric_list:
        return jsonify(result="Invalid metric requested")

        ## >Verify the start date is before the end date
    if int(req_params["start_date"]) > int(req_params["end_date"]):
        return jsonify(result="End data before start date")

        ## >Get the OPTIONAL parameters
    opt_params = {}
    for entry in opt_param_list:
        if request.args.get(entry) is not None:
            opt_params[entry] = urllib2.unquote(request.args.get(entry)).replace("'", "")
        else:
            opt_params[entry] = None
            # TODO validate the optional parameters

            ## >Get the FORMAT parameters
    for_params = {}
    for entry in format_param_list:
        if request.args.get(entry) is not None:
            for_params[entry] = urllib2.unquote(request.args.get(entry)).replace("'", "")
        else:
            for_params[entry] = None
    params = dict(req_params.items() + opt_params.items() + for_params.items())

    ## >Build the mongo query
    mongo_query = {}
    mongo_query["PostDate"] = {"$gte": params["start_date"], "$lte": params["end_date"]}
    mongo_query["Network"] = params["network"]

    for param, value in opt_params.iteritems():
        if value is not None:
            if param is "type":
                mongo_query["Type"] = opt_params["type"]
            if param is "twit_collect":
                mongo_query["Meta.sources"] = {"$in": [opt_params["twit_collect"]]}
            if param is "matched_project":
                mongo_query["Matching"] = {"$elemMatch": {"ProjectId": opt_params["matched_project"]}}
            if param is "matched_topic":
                # TODO
                pass
            if param is "scored_project":
                # TODO
                pass
            if param is "scored_topic":
                # TODO
                pass

                ## >Check if there are any matches
    if author_collection.find(mongo_query).count == 0:
        return "No connections found matching the criteria"
    else:
        ## >Map/reduce the A-->A connections
        a2a_map = Code(
            """
				function () {
					emit({"author": this.Author, "connection": this.Connection},
						{"count": 1}
						);
					}
				"""
        )
        a2a_reduce = Code(
            """
				function (key, values) {
					var count = 0;
					values.forEach(function(v) {
						count += v['count'];
						});
					return {"count": count};
				}
				"""
        )
        a2a_result = author_collection.map_reduce(a2a_map, a2a_reduce, "a2a_results", query=mongo_query).find()

        ## >Build the author list
    author_list = []
    for a2a_count in a2a_result:
        author_list.append(
            (
                a2a_count["_id"]["author"].replace("&", "/x26"),
                a2a_count["_id"]["connection"].replace("&", "/x26"),
                int(a2a_count["value"]["count"]),
            )
        )

        ## >Influence Calculations
    if len(author_list) > 0:
        ## >Create a black graph
        G = nx.DiGraph()

        ## >Add the endges to the graph
        G.add_weighted_edges_from(author_list)

        ## >Run the requested metric, on the graph 'G'
        calc_metric, stats = inf.run_metric(params["metric"], G, "weight", True)
    else:
        return jsonify(result="Parameters produced no graph/metrics")

        ## >Build the dictionary to return
    data_results = {}

    ## >Append the metric data
    data_results["metrics"] = calc_metric

    ## >If graph requested
    if for_params["return_graph"] is not None:
        if for_params["return_graph"].lower() == "true":
            ## >If format = data
            if for_params["format"] is None:
                ## >Append the graph data
                data_results["graph"] = nx.to_edgelist(G, nodelist=None)
                ## >If format = graphml
            elif for_params["format"].lower() == "graphml":
                ## >Create the graphml filename
                graphml_name = inf_sup.create_filename(params)
                ## >Get the graphml data
                graphml_data = "\n".join(nx.generate_graphml(G))
                ## >Add the versioning
                graphml_final = '<?xml version="1.0" encoding="UTF-8"?>' + "\n"
                h = HTMLParser.HTMLParser()

                for line in graphml_data.split("\n"):
                    ## >Escape the html content
                    line = h.unescape(line)
                    ## >For each node add appropriate metric data into the graphml
                    if '<node id="' in line:
                        graphml_final += line.replace("/>", ">") + "\n"
                        node_name = line.partition('"')[-1].rpartition('"')[0]
                        graphml_final += '      <data key="d1">' + str(calc_metric[node_name]) + "</data>" + "\n"
                        graphml_final += "    </node>" + "\n"
                    else:
                        graphml_final += line + "\n"
                        ## >Add the key for the metric attribute
                        if "<key" in line:
                            graphml_final += (
                                '  <key attr.name="' + params["metric"] + '" attr.type="float" for="node" id="d1" />'
                            )

                if app.debug is True:
                    ## >Write out the graphml for testing
                    with open(graphml_name, "w") as output_file:
                        for line in graphml_final:
                            output_file.write(line.encode("utf-8"))
                    if not output_file.closed:
                        output_file.close()

                        ## >Create the appropriate response to return the graphml
                response = make_response(graphml_final)
                response.headers["Content-Type"] = "text/xml"
                response.headers["Content-Distribution"] = "attachment; filename=%s" % (graphml_name,)
                return response

    if app.debug is True:
        ## >If debug mode add the query parameters
        data_results["query"] = params
        ## >And add statistics about the process
        statistics = {}
        statistics["runtime"] = str(datetime.now() - start_time)
        data_results["stats"] = statistics
        ## >Add the mongo query used
        data_results["query"] = mongo_query
    return jsonify(result=data_results)
예제 #27
0
 def create_graphml(self, object):
     """"Write graphml with networkx."""
     graphml = '\n'.join(nx.generate_graphml(object))
     return graphml
예제 #28
0
def restore(dataset, timestamp=None, fmt='dot'):
    """Restore reconstructs the network topology at a specific time in the past.

    Restore replays gossip messages from a dataset and reconstructs
    the network as it would have looked like at the specified
    timestamp in the past. The network is then printed to stdout using
    the format specified with `--fmt`.

    """
    if timestamp is None:
        timestamp = time.time()

    cutoff = timestamp - 2 * 7 * 24 * 3600
    channels = {}
    nodes = {}

    # Some target formats do not suport UTF-8 aliases.
    codec = 'UTF-8' if fmt in ['dot'] else 'ASCII'

    for m in tqdm(dataset, desc="Replaying gossip messages"):
        if isinstance(m, ChannelAnnouncement):

            channels[f"{m.short_channel_id}/0"] = {
                "source": m.node_ids[0].hex(),
                "destination": m.node_ids[1].hex(),
                "timestamp": 0,
                "features": m.features.hex(),
            }

            channels[f"{m.short_channel_id}/1"] = {
                "source": m.node_ids[1].hex(),
                "destination": m.node_ids[0].hex(),
                "timestamp": 0,
                "features": m.features.hex(),
            }

        elif isinstance(m, ChannelUpdate):
            scid = f"{m.short_channel_id}/{m.direction}"
            chan = channels.get(scid, None)
            ts = m.timestamp

            if ts > timestamp:
                # Skip this update, it's in the future.
                continue

            if ts < cutoff:
                # Skip updates that cannot possibly keep this channel alive
                continue

            if chan is None:
                raise ValueError(
                    f"Could not find channel with short_channel_id {scid}")

            if chan["timestamp"] > ts:
                # Skip this update, it's outdated.
                continue

            chan["timestamp"] = ts
            chan["fee_base_msat"] = m.fee_base_msat
            chan["fee_proportional_millionths"] = m.fee_proportional_millionths
            chan["htlc_minimim_msat"] = m.htlc_minimum_msat
            if m.htlc_maximum_msat:
                chan["htlc_maximum_msat"] = m.htlc_maximum_msat
            chan["cltv_expiry_delta"] = m.cltv_expiry_delta
        elif isinstance(m, NodeAnnouncement):
            node_id = m.node_id.hex()

            old = nodes.get(node_id, None)
            if old is not None and old["timestamp"] > m.timestamp:
                continue

            alias = m.alias.replace(b'\x00', b'').decode(codec, 'ignore')
            nodes[node_id] = {
                "id": node_id,
                "timestamp": m.timestamp,
                "features": m.features.hex(),
                "rgb_color": m.rgb_color.hex(),
                "alias": alias,
                "addresses": ",".join([str(a) for a in m.addresses]),
                "out_degree": 0,
                "in_degree": 0,
            }

    # Cleanup pass: drop channels that haven't seen an update in 2 weeks
    todelete = []
    for scid, chan in tqdm(channels.items(), desc="Pruning outdated channels"):
        if chan["timestamp"] < cutoff:
            todelete.append(scid)
        else:
            node = nodes.get(chan["source"], None)
            if node is None:
                continue
            else:
                node["out_degree"] += 1
            node = nodes.get(chan["destination"], None)
            if node is None:
                continue
            else:
                node["in_degree"] += 1

    for scid in todelete:
        del channels[scid]

    nodes = [
        n for n in nodes.values() if n["in_degree"] > 0 or n['out_degree'] > 0
    ]

    if len(channels) == 0:
        print(
            "ERROR: no channels are left after pruning, make sure to select a"
            "timestamp that is covered by the dataset.")
        sys.exit(1)

    g = nx.DiGraph()
    for n in nodes:
        g.add_node(n["id"], **n)

    for scid, c in channels.items():
        g.add_edge(c["source"], c["destination"], scid=scid, **c)

    if fmt == 'dot':
        print(nx.nx_pydot.to_pydot(g))

    elif fmt == 'gml':
        for line in nx.generate_gml(g):
            print(line)

    elif fmt == 'graphml':
        for line in nx.generate_graphml(g,
                                        named_key_ids=True,
                                        edge_id_from_attribute='scid'):
            print(line)

    elif fmt == 'json':
        print(json.dumps(json_graph.adjacency_data(g)))
예제 #29
0
def addwf(request):

    fileinfo = None
    jobinfo = None
    sticky = False
    inherit = None

    post = request.POST
    dagName = post['dag']
    name = post['name']
    user = post['user']
    state = post['state']
    filejson = post['fileinfo']
    jobjson = post['jobinfo']
    description = post['description']
    wfuuid = uuid.uuid1()

    if (filejson != ''):
        try:
            fileinfo = json.loads(filejson)
            logger.info('WORKFLOW: fileinfo %s', fileinfo)
        except:  # non-JSON string... Identify:
            if (filejson == 'sticky'):
                sticky = True
                logger.info('WORKFLOW: sticky filename set')
            if ('inherit' in filejson):
                try:
                    inherit = filejson.split(':')[1]
                    logger.info('WORKFLOW: filename inheritance %s', inherit)
                except:
                    pass

    if (jobjson != ''):
        jobinfo = json.loads(jobjson)  # print(jobjson) # print(jobinfo)

    ts_def = timezone.now()

    myDAG = dag.objects.get(name=dagName)
    rootName = myDAG.root
    nvert = myDAG.nvertices

    # Create a Workflow object and populate it:
    wf = workflow()
    wf.ts_def = ts_def
    wf.uuid = wfuuid
    wf.dag = dagName
    wf.name = name
    wf.user = user
    wf.state = state
    wf.nvertices = nvert
    wf.description = description
    # ATTN: we'll save the WF a bit later ( we would like to get the root job uuid).

    g = nx.DiGraph()

    # +++++++++ JOBS
    for dv in dagVertex.objects.filter(dag=dagName):
        g.add_node(dv.name, wf=dagName)

        # defaults:
        payload = dv.payload
        env = dv.env
        if (jobinfo):  # need to overwrite some attributes
            for k in jobinfo.keys():
                if (k == dv.name):
                    try:
                        payload = jobinfo[k]["payload"]
                    except:
                        pass
                    try:
                        env = json.dumps(jobinfo[k]["env"])
                    except:
                        pass

        j = job(
            uuid=uuid.uuid1(),
            wfuuid=wfuuid,
            jobtype=dv.jobtype,
            payload=payload,
            env=env,
            priority=dv.priority,
            state='template',
            ts_def=ts_def,
            timelimit=dv.timelimit,
            name=dv.name,
        )

        if (dv.name == rootName): wf.rootuuid = j.uuid
        j.save()
    # +++++++++ END JOBS

    wf.save()  # can do it now since needed rootuuid, now we have it

    # +++++++++ DATA
    for de in dagEdge.objects.filter(dag=dagName):

        # even in a multigraph, an edge can only be associated with one
        # source and one target. A source and a target can be connected
        # by multiple edges in a multigraph, however. So check for
        # the former condition.

        s_cand = job.objects.filter(wfuuid=wfuuid, name=de.source)
        t_cand = job.objects.filter(wfuuid=wfuuid, name=de.target)

        if (len(s_cand) != 1):
            return HttpResponse("addwf: inconsistent graph - source")
        if (len(t_cand) != 1):
            return HttpResponse("addwf: inconsistent graph - target")

        sourceuuid = s_cand[0].uuid
        targetuuid = t_cand[0].uuid

        # a "dataset" - a file in this case - is created by default with a name formed
        # from its UUID and predefined extension (as per type object)
        # It can, however, be overridden by supplemental information foubd in "fileinfo"

        # Defaults:

        d_uuid = str(uuid.uuid1())
        dt = ''
        try:
            dt = datatype.objects.get(name=de.datatype)
        except:
            delstat = ''
            if delwf(wfuuid):
                delstat = 'OK'
            else:
                delstat = 'FAIL'
            return HttpResponse(
                'Failed to get datatype, deleting workflow. Clean up status: %s'
                % delstat)

        ext = dt.ext  # expect that the dot is included

        dirpath = de.dirpath

        filename = ''
        if (sticky):
            filename = de.name
        elif (inherit):
            filename = inherit + ':' + dagName + ':' + de.source + ':' + de.datatag + ext
        else:
            filename = d_uuid + ext

        comment = de.comment  # default comment inherited from DAG

        # Optional overrides: client may send JSON data with overriding data
        if (fileinfo):
            for k in fileinfo.keys():
                if ((de.source + ":" + de.target) == k):
                    try:
                        filename = fileinfo[k]["name"]
                    except:
                        pass
                    try:
                        dirpath = fileinfo[k]["dirpath"]
                    except:
                        pass
                    try:
                        comment = fileinfo[k]["comment"]
                    except:
                        pass

        logger.info('WORKFLOW: filename %s', filename)

        d = dataset(
            uuid=d_uuid,
            wfuuid=wfuuid,
            source=de.source,
            target=de.target,
            sourceuuid=sourceuuid,
            targetuuid=targetuuid,
            name=filename,
            state='template',
            dirpath=dirpath,
            comment=comment,
            datatype=de.datatype,
            datatag=de.datatag,
            wf=name,
        )

        d.save()
        g.add_edge(de.source, de.target)

        # +++ AUGMENT JOB ENVIRONMENT WITH DATASET INFO
        fullname = dirpath + filename

        #print('source:',de.source,' target:', de.target,' datatag:', de.datatag, ' full name:',fullname)

        for jid in (sourceuuid, targetuuid):
            j4env = job.objects.get(
                uuid=jid)  # MUST work as per comments above, we checked
            j4env.augmentEnv({de.datatag: fullname
                              })  # Note that the filename is set in the env..
            j4env.save()

    # +++++++++ END DATA

    # Now that the WF is created, is can be set into defined state
    # (hence ready for execution) if so desired. This method
    # toggles both wf and its "root job"

    if (state == 'defined'): wf2defined(wf)

    # Finish up and send  message to the client
    s = '\n'.join(nx.generate_graphml(g))  # this can be in fact something else
    return HttpResponse(s)
예제 #30
0
def to_graphml(g):
    return generate_graphml(g)