コード例 #1
0
def export(G, D3, Sigma, GEXF, name='Graph'):
    if D3 == True:
        print("Starting D3 Export for", name)
        # D3
        # Print JSON
        f = open(name + 'D3.json', 'w')
        f.write(json_graph.dumps(G))
        print("D3 Exported")

    if Sigma == True:
        print("Starting Sigma Export for", name)
        # Sigma
        graphThing = json.loads(json_graph.dumps(G))
        for link in graphThing["links"]:
            link["source"] = link["sources"]
            del link["sources"]
            link["target"] = link["targets"]
            del link["targets"]
        graphThing["edges"] = graphThing["links"]
        del graphThing["links"]
        # Print JSON
        f = open(name + 'Sigma.json', 'w')
        f.write(json.dumps(graphThing, indent=2))
        print("Exporting for Sigma")

    if GEXF == True:
        print("Starting GEXF export for", name)
        # Print GEXF
        nx.write_gexf(G, name + ".gexf", prettyprint=True)
        print("Exporting GEXF")

    if not D3 and not Sigma and not GEXF:
        print("Not doin' nuthin'")
コード例 #2
0
def main():
    #read the recipes and make a dictionary with the name of the recipe and its ingredients
    r=csv.reader(open("recipes.csv",'r'))
    data={}
    
    for row in r:
	data[row[2]]=row[1] 
    #stevia will be the node that the whole graph is based into
    G=nx.Graph()
    G.add_node("stevia",weight=1000,link="")
    
    #for every recipe gather its ingredients
    for row in data:
	ingr=data[row].split(';')
	for i in ingr:
	    #if the node is already in the graph, increase the weight
	    if(i in G.nodes()):
		G.node[i]["weight"]+=1
	    #else add the ingredient as a new node,initialize its weight, add a link as an example recipe and add the edge
	    else:
		G.add_node(i)
		G.add_edge(i,"stevia")
		G.node[i]["weight"]=1
		G.node[i]["link"]=row
    f = open('graph.json', 'w')
    f.write(json_graph.dumps(G))
    f.close()
コード例 #3
0
ファイル: NLProcessor.py プロジェクト: venkatvi/EYB
def visualizeNetwork(nodes, edges, json_file):
	#visualize as graph
	B = nx.Graph()
	B.add_nodes_from(nodes)
	for edge, weight in edges.iteritems():
		nodes=edge.split("#")
		error =0
		for node in nodes:
			if node in B:
				if not 'degree' in B.node[node]:
					B.node[node]['degree'] = 1
				else:
					B.node[node]['degree'] += 1
			else:
				print "No node found: " + node
				error = 1
				break;
		if error == 0:
			B.add_edges_from([(nodes[0], nodes[1])]) 
			B[nodes[0]][nodes[1]]['value'] = weight

	#write network data into json
	dumps = json_graph.dumps(B)

	with open(json_file, 'w') as file:
		file.write(dumps);

	return B
コード例 #4
0
ファイル: convert.py プロジェクト: huihuifan/jq1
def graphmltojson(graphfile, outfile):
    """
	Converts GraphML file to json while adding communities/modularity groups
	using python-louvain. JSON output is usable with D3 force layout.
	Usage:
	>>> python convert.py -i mygraph.graphml -o outfile.json
	"""

    G = nx.read_graphml(graphfile)
    G = nx.Graph(G)
    #G = nx.DiGraph.to_undirected(G)

    #karate = Nexus.get(G)
    #cl = karate.community_fastgreedy()
    #k = 57
    #cl.as_clustering(k).membership

    #finds best community using louvain
    partition = community.best_partition(G)

    #adds partition/community number as attribute named 'modularitygroup'
    for n, d in G.nodes_iter(data=True):
        d['group'] = partition[n]

    node_link = json_graph.node_link_data(G)
    json = json_graph.dumps(node_link)

    # Write to file
    fo = open(outfile, "w")
    fo.write(json)
    fo.close()
コード例 #5
0
ファイル: convert.py プロジェクト: huihuifan/jq1
def graphmltojson(graphfile, outfile):
	"""
	Converts GraphML file to json while adding communities/modularity groups
	using python-louvain. JSON output is usable with D3 force layout.
	Usage:
	>>> python convert.py -i mygraph.graphml -o outfile.json
	"""
	
	G = nx.read_graphml(graphfile)
	G = nx.Graph(G)
	#G = nx.DiGraph.to_undirected(G)

	#karate = Nexus.get(G)
	#cl = karate.community_fastgreedy()
	#k = 57
	#cl.as_clustering(k).membership

	#finds best community using louvain
	partition = community.best_partition(G)
 
	#adds partition/community number as attribute named 'modularitygroup'
	for n,d in G.nodes_iter(data=True):
		d['group'] = partition[n]
 
	node_link = json_graph.node_link_data(G)
	json = json_graph.dumps(node_link)
	
	# Write to file
	fo = open(outfile, "w")
	fo.write(json);
	fo.close()
コード例 #6
0
 def data(self, **kw):
     try:
         with closing(open('cache.json', 'r')) as data_file:
             print 'Reading from cache'
             return data_file.read()
     except IOError:
         print 'Fetching data'
         with closing(open('cache.json', 'w')) as data_file:
             foaf_graph = None
             try:
                 with closing(open('graph_cache.json', 'r')) as graph_file:
                     print 'Reading from graph cache'
                     foaf_graph = jg.load(graph_file)
             except IOError:
                 foaf_graph = retrieve_foaf(FBTOKEN)
             clusters = community.best_partition(foaf_graph)
             degree_distribution = get_histograms(foaf_graph)
             cluster_counts = get_cluster_counts(clusters)
             top10 = get_top_degree(foaf_graph, 10)
             foaf_json_graph = json.loads(jg.dumps(foaf_graph))
             ob = foaf_graph.degree()
             infos = {
                 'graph':foaf_json_graph,
                 'clusters':clusters,
                 'cluster_counts':cluster_counts,
                 'degree_distribution':degree_distribution,
                 'degree':foaf_graph.degree(),
                 'top10':top10
             }
             foaf_data = json.dumps(infos)
             data_file.write(foaf_data)
             return foaf_data
コード例 #7
0
def GraphtoCSV(nxGraph, node_keys=['id'], edge_keys=['source','target'], measures=None):
    """convert nxGraph to a node list and edge list
    :param node_keys: is a list of keys in node json
    :param edge_keys: is a list of keys in edge json
    :param measures: is a the name of a centrality measure in networkx, 
    e.g. 'betweenness_centrality', or any measure that returns scores for nodes
    returns two CSVs, a node list and edges list with node_keys
    and edge_keys, respectively as columns."""
    data = json.loads(json_graph.dumps(nxGraph))
    nodes = data.get('nodes')
    edges = data.get('links')
    # if measures:
    #     methods = nx.__dict__#{'betweeness_centrality': nx.betweenness_centrality}
    #     props= {}
    #     for m in measures:
    #         import pdb; pdb.set_trace()
    #         score = methods[m](nxGraph)
    #         props[m] = score
    #     for node in nodes:
    #         for prop in props:
    #             node[prop] = props[prop][node['id']]

    node_f = csv.writer(open("nodes.csv", "wb+"))
    node_row = node_keys
    if measures:
        node_row = node_row + measures
    node_f.writerow(node_row)
    for n in nodes:
        node_f.writerow([n.get(key) for key in node_row])
    edge_f = csv.writer(open("edges.csv", "wb+"))
    edge_row = edge_keys
    edge_f.writerow(edge_row)
    for e in edges:
        edge_f.writerow([e.get(key) for key in edge_row])
コード例 #8
0
ファイル: views.py プロジェクト: snowshine09/MURI
def prepareNetwork(request):
    if request.method == 'POST':
        response = {}
        response['nodes'] = []
        response['links'] = []
        node_types = request.POST.getlist('entities[]', None)
        events_id = request.POST.getlist('events_id[]', None)

        if node_types == None or events_id == None:
            return

        graph   = nx.DiGraph()

        events = Entity.objects.filter(id__in=events_id)
        linked_entities = list(events.select_subclasses())

        for eve in events:
            entities = list(chain(eve.findTargets(), eve.findSources()))
            linked_entities += entities
        for entity in linked_entities:
            graph.add_node(entity.id, entity.getKeyAttr())

        relations = Relationship.objects.filter( Q(source__in=linked_entities) & Q(target__in=linked_entities) )
        for relation in relations:
            graph.add_edge(relation.source.id, relation.target.id, relation.getAllAttr())

        return HttpResponse(json_graph.dumps(graph), mimetype='application/json')
    return
コード例 #9
0
def generate_scale_free_power_law_graph(num, exp, seed):
    '''
	this function generates a scale free with power law
	graph and write it into a file with .net format
	'''
    sequence = create_degree_sequence(num, powerlaw_sequence, exponent=exp)
    graph = nx.configuration_model(sequence, seed=seed)
    loops = graph.selfloop_edges()
    json_str = json_graph.dumps(graph)
    dict_graph = json.loads(json_str)
    output_file = open('scale_free_power_law.net', 'w')
    # write nodes
    total_node_num = len(dict_graph['nodes'])
    output_file.write('*Vertices ' + str(total_node_num) + '\n')
    count = 1
    for item in dict_graph['nodes']:
        # +1 coz id starts with 0, it should start from 1
        output_file.write('  ' + str(count) + '  ' + str(item['id'] + 1) +
                          '\n')
        count = count + 1
    # write edges, links
    output_file.write('*Edges' + '\n')
    for item in dict_graph['links']:
        # +1 coz source and target starts with 0, it should start from 1
        output_file.write(
            str(item['source'] + 1) + ' ' + str(item['target'] + 1) + ' 1' +
            '\n')
    output_file.close()
コード例 #10
0
ファイル: graph.py プロジェクト: Koolboyman/pokecrystal
 def to_d3(self):
     """ Exports to d3.js because we're gangster like that.
     """
     import networkx.readwrite.json_graph as json_graph
     content = json_graph.dumps(self)
     fh = open("crystal/crystal.json", "w")
     fh.write(content)
     fh.close()
コード例 #11
0
def show_min_path(name1, name2, levels=0):

    path = min_path(name1, name2)

    H = color_path(path, 'blue')
    for i in range(levels):
        H = expand_graph(H)

    data = json_graph.dumps(H)
    return data
コード例 #12
0
def show_min_path(name1, name2, levels=0):

	path=min_path(name1, name2)
	
	H=color_path(path,'blue')
	for i in range(levels):
		H=expand_graph(H)

	data=json_graph.dumps(H)
	return data
コード例 #13
0
ファイル: browser.py プロジェクト: cite-architecture/pandect
    def _badger_get_host_dest_info(self, source_vertex_attr_value,\
                                       source_vertex_attr_type=None,\
                                       target_vertex_attr_type=None):
        badger_graph = nx.Graph()
        source_vertex_attr_value_idx = None
        target_vertex_attr_value_idx = None

        if ("urn-cptl-HOST-ipv4" == source_vertex_attr_type and\
                  "urn-cptl-HOST-hostname" == target_vertex_attr_type):
            file = self.pandect_data_dir + "/test/resources" + "/dst.url-uniq"
            source_vertex_attr_value_idx = 0
            target_vertex_attr_value_idx = 1
        elif ("urn-cptl-HOST-ipv4" == source_vertex_attr_type and\
                  "urn-cptl-HOST-ipv4" == target_vertex_attr_type):
            file = self.pandect_data_dir + "/test/resources" + "/dst.ip-uniq"
            source_vertex_attr_value_idx = 0
            target_vertex_attr_value_idx = 1
        elif ("urn-cptl-HOST-ipv4" == source_vertex_attr_type and\
                  "urn-cptl-HOST-tag-tldcount" == target_vertex_attr_type):
            file = self.pandect_data_dir + "/test/resources" + "/dst.url-uniq.tldcount"
            source_vertex_attr_value_idx = 0
            target_vertex_attr_value_idx = 1
        elif ("urn-cptl-HOST-ipv4" == source_vertex_attr_type and\
                  "urn-cptl-HOST-tag-cccount" == target_vertex_attr_type):
            file = self.pandect_data_dir + "/test/resources" + "/dst.cc-uniq.cccount"
            source_vertex_attr_value_idx = 0
            target_vertex_attr_value_idx = 1            
        else:
            raise Exception("Unknown!")

        # We have the selected vertex urn, add a node for that in 
        #  the return graph.  (e.g. HOST_4)
        badger_graph.add_node(0, {source_vertex_attr_type:source_vertex_attr_value})
        i = 1

        f = open(file)
        lines = f.readlines()
        for line in lines:
            line = line.rstrip("\n")
            line_pcs = line.split(":")
            vertex_attr_value = line_pcs[source_vertex_attr_value_idx]
            target_attr_value = line_pcs[target_vertex_attr_value_idx]
            # Add a name for every hostname and an edge from the 
            #  selected_vertex_urn to that hostname
            if vertex_attr_value == source_vertex_attr_value:
                if "urn-cptl-HOST-tag-tldcount" == target_vertex_attr_type or\
                        "urn-cptl-HOST-tag-cccount" == target_vertex_attr_type:
                    target_attr_value += "," + line_pcs[2]
                badger_graph.add_node(i, {target_vertex_attr_type:target_attr_value})    
                badger_graph.add_edge(0, i)

            i = i + 1
        f.close()
        result = json_graph.dumps(badger_graph)
        return result
コード例 #14
0
def run(date=""):
	ER=pd.DataFrame.from_csv('/Users/wgmueller/Desktop/Data/enron/weeks.csv',sep="\t")
	sub_ER=ER[(ER['V2'] == int(date))] #& (ER['sender'] == 155)]
	g=nx.Graph(zip(sub_ER['sender'],sub_ER['receiver']))
	ec=nx.eigenvector_centrality(g)
	deg=g.degree()
	nx.set_node_attributes(g,'degree',deg)
	bc=nx.betweenness_centrality(g)
	nx.set_node_attributes(g,'eigcen',ec)
	nx.set_node_attributes(g,'betweenness',bc)
	return json_graph.dumps(g)
コード例 #15
0
ファイル: topic_tree.py プロジェクト: kcl-ddh/bacalhau
    def to_json(self, filepath):
        """Serializes the TopicTree to JSON Graph format and writes it
        to a file.

        `filepath` is a file path or File-like object."""
        if isinstance(filepath, basestring):
            json_file = open(filepath, "w")
        else:
            json_file = filepath
        json_file.write(json_graph.dumps(self))
        if isinstance(filepath, basestring):
            json_file.close()
コード例 #16
0
def concept_nbh(name, levels):
	G=json_graph.load(open("static/local_instance.json"))
	G=G.subgraph(get_id(name))
	
	#the user might not know his numbers very well
	try:
		levels=int(levels)
	except:
		levels=0

	for i in range(levels):
		G=expand_graph(G)

	data=json_graph.dumps(G)
	return data
コード例 #17
0
def export_json(G, path):
    jsonString= json_graph.dumps(G)
    jsonData = json.loads(jsonString)
    nodes = jsonData['nodes']
    links = jsonData['links']
    
    res = {}
    res['nodes'] = nodes
    res['links'] = links
    jsonString = json.dumps(res)
    jsonString = jsonString.replace('id', 'name')
    jsonString = jsonString.replace('weight', 'value')
    with open(path, 'w') as f:
     f.write(jsonString)
     f.flush()
コード例 #18
0
def concept_nbh(name, levels):
    G = json_graph.load(open("static/local_instance.json"))
    G = G.subgraph(get_id(name))

    #the user might not know his numbers very well
    try:
        levels = int(levels)
    except:
        levels = 0

    for i in range(levels):
        G = expand_graph(G)

    data = json_graph.dumps(G)
    return data
コード例 #19
0
ファイル: disagree.py プロジェクト: jflanigan/AMRICA
def xlang_main(args):
  """ Disagreement graphs for aligned cross-language language. """
  src_amr_fh = codecs.open(args.src_amr, encoding='utf8')
  tgt_amr_fh = codecs.open(args.tgt_amr, encoding='utf8')
  src2tgt_fh = codecs.open(args.align_src2tgt, encoding='utf8')
  tgt2src_fh = codecs.open(args.align_tgt2src, encoding='utf8')
  gold_aligned_fh = None
  if args.align_in:
    gold_aligned_fh = codecs.open(args.align_in, encoding='utf8')
  (json_fh, align_fh) = open_output_files(args)

  amrs_same_sent = []
  aligner = Amr2AmrAligner(num_best=args.num_align_read, num_best_in_file=args.num_aligned_in_file, src2tgt_fh=src2tgt_fh, tgt2src_fh=tgt2src_fh)
  while True:
    (src_amr_line, src_comments) = amr_metadata.get_amr_line(src_amr_fh)
    if src_amr_line == "":
      break
    (tgt_amr_line, tgt_comments) = amr_metadata.get_amr_line(tgt_amr_fh)
    src_amr = amr_metadata.AmrMeta.from_parse(src_amr_line, src_comments, consts_to_vars=True)
    tgt_amr = amr_metadata.AmrMeta.from_parse(tgt_amr_line, tgt_comments, consts_to_vars=True)
    (cur_id, src_sent) = get_sent_info(src_amr.metadata)
    (tgt_id, tgt_sent) = get_sent_info(tgt_amr.metadata, dflt_id=cur_id)
    assert cur_id == tgt_id

    smatchgraphs = hilight_disagreement([tgt_amr], src_amr, args.num_restarts, aligner=aligner, gold_aligned_fh=gold_aligned_fh)
    amr_graphs = get_disagreement_graphs(smatchgraphs, aligner=aligner,
      unmatch_dead_nodes=(gold_aligned_fh == None))

    if json_fh:
      json_fh.write(json_graph.dumps(amr_graphs[0]) + '\n')
    if align_fh:
      align_fh.write("""# ::id %s\n# ::src_snt %s\n# ::tgt_snt %s\n""" % (cur_id, src_sent, tgt_sent))
      align_fh.write('\n'.join(smatchgraphs[0].get_text_alignments()) + '\n\n')
    if (args.verbose):
      print("ID: %s\n Sentence: %s\n Sentence: %s\n Score: %f" % (cur_id, src_sent, tgt_sent, amr_graphs[0][1]))

    if args.outdir != None:
      ag = nx.to_agraph(amr_graphs[0][0])
      ag.graph_attr['label'] = "%s\n%s" % (src_sent, tgt_sent)
      ag.layout(prog=args.layout)
      ag.draw('%s/%s.png' % (args.outdir, cur_id))

  src_amr_fh.close()
  tgt_amr_fh.close()
  src2tgt_fh.close()
  tgt2src_fh.close()
  gold_aligned_fh and gold_aligned_fh.close()
  close_output_files(json_fh, align_fh)
コード例 #20
0
ファイル: graphserialize.py プロジェクト: mfrey/archive
  def write_json_graph(self, file_name, network):
    """ Write a JSON representation of a graph to a file. 

    Parameters
    ----------
    file_name : The name of the file to which the JSON representation will be written

    network: The graph which will be written

    """
    try:
      export_file = open(file_name, "w")
      export_file.write(json_graph.dumps(network))
      export_file.close()
    except IOError:
      self.logger.fatal('an IO error exception occurred while writing a JSON representation of a graph')
コード例 #21
0
def show_nca(name1, name2, levels=0):
	nca_list=nca(name1, name2)
	G=json_graph.load(open("static/local_instance.json"))
	H=nx.DiGraph()
	for each in nca_list:
		anc_path=nx.compose(color_path(each[0],'green'),color_path(each[1],'yellow'))
		H=nx.compose(H,anc_path)

	for i in range(levels):
		H=expand_graph(H)

	for each in nca_list:
		H.node[each[0][0]]['color']='red' #color the nca different

	data=json_graph.dumps(H)
	return data
コード例 #22
0
ファイル: models.py プロジェクト: bceskavich/grov
	def create_graph(self):
		# Initiate Graph
		g = nx.DiGraph()

		twitter_ids = [u.twitter_id for u in User.query.all()]

		for user in User.query.all():
			amnt = 1
			if len(Connection.query.filter_by(twitter_id=user.twitter_id).all()) > 0:
				amnt += len(Connection.query.filter_by(twitter_id=user.twitter_id).all())
			g.add_node(user.twitter_id, {'label':'@' + user.username, 'amnt':amnt, 'class':'user', 'avatar':user.avatar()})

		for conn in Connection.query.all():
			if conn.twitter_id in twitter_ids:
				pass
			elif conn.rel == 0:
				amnt = len(Connection.query.filter_by(twitter_id=conn.twitter_id).all())
				g.add_node(conn.twitter_id, {
					'label':'Anonymous Follower',
					'user_id':conn.user_id,
					'amnt':amnt,
					'class':'anonymous' })
			elif conn.rel == 1:
				amnt = len(Connection.query.filter_by(twitter_id=conn.twitter_id).all())
				g.add_node(conn.twitter_id, {
					'label':'Anonymous Friend',
					'user_id':conn.user_id,
					'amnt':amnt,
					'class':'anonymous' })


		friends = Connection.query.filter_by(rel=1)
		followers = Connection.query.filter_by(rel=0)

		for i in friends:
			user = User.query.get(i.user_id)
			g.add_edge(i.twitter_id, user.twitter_id)

		for i in followers:
			user = User.query.get(i.user_id)
			g.add_edge(user.twitter_id, i.twitter_id)

		data = json_graph.dumps(g, indent=1, encoding='utf-8')
		src = os.path.join(basedir, 'app/static/json/graphdata.json')
		f = file(src, 'w')
		f.write(data)
		f.close()
コード例 #23
0
def show_nca(name1, name2, levels=0):
    nca_list = nca(name1, name2)
    G = json_graph.load(open("static/local_instance.json"))
    H = nx.DiGraph()
    for each in nca_list:
        anc_path = nx.compose(color_path(each[0], 'green'),
                              color_path(each[1], 'yellow'))
        H = nx.compose(H, anc_path)

    for i in range(levels):
        H = expand_graph(H)

    for each in nca_list:
        H.node[each[0][0]]['color'] = 'red'  #color the nca different

    data = json_graph.dumps(H)
    return data
コード例 #24
0
ファイル: utils.py プロジェクト: pmcgannon22/gma-ui
def analysis(request, msgs, group_info):
    members = group_info[u'members']
    member_map = request.session['member_map']

    (total_messages, total_likes, likes_given, likes_rec, like_graph, prank) = basics_count(msgs, [m[u'user_id'] for m in members])

    like_ratio, msg_percentage = {},{}
    total_group_msgs = float(group_info[u'messages'][u'count'])
    for member in members:
        m = member[u'user_id']
        msg_percentage[m] = float(total_messages[m])/total_group_msgs
        try:
            like_ratio[m] = float(likes_rec[m])/float(likes_given[m])
        except:
            like_ratio[m] = 0.0
    #pers = {member[u'user_id']: (1-msg_percentage[member[u'user_id']]) for member in members}
    #avg = sum(pers.values())/len(pers)
    #pers = {n: (pers[n] if n in pers else avg) for n in like_graph.nodes_iter()}
    #pagerank = nx.pagerank(like_graph, alpha=.95, personalization=pers)
    return GroupAnalysis(msgs_per=total_messages, likes_rec=likes_rec, likes_give=likes_given,
                    prank=prank,
                    msg_perc=msg_percentage, ratio=like_ratio, like_network=json_graph.dumps(like_graph))
コード例 #25
0
ファイル: generators.py プロジェクト: gmoben/adviseme
    def get_json(self):
        from networkx.readwrite import json_graph
        from networkx.relabel import relabel_nodes
        import json

        mod_trees = []
        for t in self.trees:
            mapping = dict()
            for n in t.node.keys():
                if isinstance(n, Course):
                    mapping[n] = n.__unicode__()
            mod_tree = relabel_nodes(t, mapping)
            mod_trees.append(mod_tree)

        n_depth = max([self.max_depth(t) for t in self.trees])
        n_width = 0
        for nbd in [self.nodes_by_depth(t) for t in self.trees]:
            nbd_max = max([len(d) for d in nbd])
            n_width += nbd_max

        return json.dumps({'trees': [json.loads(json_graph.dumps(mt)) for mt in mod_trees],
                           'n_depth': n_depth, 'n_width': n_width})
コード例 #26
0
ファイル: DatingGraph.py プロジェクト: malliwi88/DatingGraph
    def generateNXModel(self, outFile):
        nodeLookup = {}
        index = 1
        for r in self.relationship:
            if not nodeLookup.has_key(r['firstCelebrity']):
                self.nxg.add_node(index, {'name' : r['firstCelebrity']})
                nodeLookup[r['firstCelebrity']] = index
                index = index +  1
            if not nodeLookup.has_key(r['secondCelebrity']):
                self.nxg.add_node(index, {'name' : r['secondCelebrity']})
                nodeLookup[r['secondCelebrity']] = index
                index = index +  1
        for r in self.relationship:
           self.nxg.add_edge(nodeLookup[r['firstCelebrity']], 
                             nodeLookup[r['secondCelebrity']], 
                             {'verb' : r['verb'], 
                              'timeFrame' : r['timeFrame'], 
                              'relationType' : r['relationType'],
                              'level' : r['level']})

        # Dump Json for NetworkX graph
        with open(outFile + '.json','w') as fp:
            fp.write(json_graph.dumps(self.nxg,indent=4) + '\n')        
コード例 #27
0
def GraphtoCSV(nxGraph,
               node_keys=['id'],
               edge_keys=['source', 'target'],
               measures=None):
    """convert nxGraph to a node list and edge list
    :param node_keys: is a list of keys in node json
    :param edge_keys: is a list of keys in edge json
    :param measures: is a the name of a centrality measure in networkx, 
    e.g. 'betweenness_centrality', or any measure that returns scores for nodes
    returns two CSVs, a node list and edges list with node_keys
    and edge_keys, respectively as columns."""
    data = json.loads(json_graph.dumps(nxGraph))
    nodes = data.get('nodes')
    edges = data.get('links')
    # if measures:
    #     methods = nx.__dict__#{'betweeness_centrality': nx.betweenness_centrality}
    #     props= {}
    #     for m in measures:
    #         import pdb; pdb.set_trace()
    #         score = methods[m](nxGraph)
    #         props[m] = score
    #     for node in nodes:
    #         for prop in props:
    #             node[prop] = props[prop][node['id']]

    node_f = csv.writer(open("nodes.csv", "wb+"))
    node_row = node_keys
    if measures:
        node_row = node_row + measures
    node_f.writerow(node_row)
    for n in nodes:
        node_f.writerow([n.get(key) for key in node_row])
    edge_f = csv.writer(open("edges.csv", "wb+"))
    edge_row = edge_keys
    edge_f.writerow(edge_row)
    for e in edges:
        edge_f.writerow([e.get(key) for key in edge_row])
コード例 #28
0
def graphmltojson(graphfile, outfile):
    """
	Converts GraphML file to json while adding communities/modularity groups
	using python-louvain. JSON output is usable with D3 force layout.
	Usage:
	>>> python convert.py -i mygraph.graphml -o outfile.json
	"""

    G = nx.read_graphml(graphfile)

    #finds best community using louvain
    partition = community.best_partition(G)

    #adds partition/community number as attribute named 'modularitygroup'
    for n, d in G.nodes_iter(data=True):
        d['modularitygroup'] = partition[n]

    node_link = json_graph.node_link_data(G)
    json = json_graph.dumps(node_link)

    # Write to file
    fo = open(outfile, "w")
    fo.write(json)
    fo.close()
コード例 #29
0
ファイル: convert.py プロジェクト: Purdom/dh-graphs
def graphmltojson(graphfile, outfile):
	"""
	Converts GraphML file to json while adding communities/modularity groups
	using python-louvain. JSON output is usable with D3 force layout.
	Usage:
	>>> python convert.py -i mygraph.graphml -o outfile.json
	"""
	
	G = nx.read_graphml(graphfile)	

	#finds best community using louvain
	partition = community.best_partition(G)

	#adds partition/community number as attribute named 'modularitygroup'
	for n,d in G.nodes_iter(data=True):
		d['modularitygroup'] = partition[n]

	node_link = json_graph.node_link_data(G)
	json = json_graph.dumps(node_link)
	
	# Write to file
	fo = open(outfile, "w")
	fo.write(json);
	fo.close()
コード例 #30
0
ファイル: networkxtest.py プロジェクト: imuhata8ri/kemonotag
def save(G, fname):
    from networkx.readwrite import json_graph
    data = json_graph.dumps(G, sort_keys=True,indent=2)
    f = open(fname, 'w')
    #json.dump(data, f)
    f.write(data)
コード例 #31
0
 def graph(self, **kw):
     with closing(open('graph_cache.json', 'w')) as graph_file:
         foaf_graph = retrieve_foaf(FBTOKEN)                             
         foaf_graph_json = jg.dumps(foaf_graph)
         graph_file.write(foaf_graph_json)
         return foaf_graph_json
                    DG,currid=gNodeAdd2(DG,tmpfinal,currid,assessment['assessment_method'],w)
                else:
                    DG,currid=gNodeAdd2(DG,tmpcont,currid,assessment['assessment_method'],w)

#--------
if format=='gexf':
    import networkx.readwrite.gexf as gf
    writer=gf.GEXFWriter(encoding='utf-8',prettyprint=True,version='1.1draft')
    writer.add_graph(DG)
    scraperwiki.utils.httpresponseheader("Content-Type", "text/xml")
    from xml.etree.cElementTree import tostring
    print tostring(writer.xml)
else: #format=='json'
    jdata = json_graph.tree_data(DG,root=1)#json_graph.node_link_data(DG)
    scraperwiki.utils.httpresponseheader("Content-Type", "text/json")
    print json_graph.dumps(jdata)import scraperwiki, gviz_api

#Keep the API key [private - via http://blog.scraperwiki.com/2011/10/19/tweeting-the-drilling/
import os, cgi
try:
    qsenv = dict(cgi.parse_qsl(os.getenv("QUERY_STRING")))
    key=qsenv["KEY"]
    if 'progID' in qsenv: progID=qsenv['progID']
    else: progID='6'
    if 'full' in qsenv: full=qsenv['full']
    else: full=''
    if 'format' in qsenv: format=qsenv['format']
    else: format='json'
    if 'typ' in qsenv: typ=qsenv['typ']
    else: typ='prog'
    if 'awardID' in qsenv: awardID=qsenv['awardID']
コード例 #33
0
 def to_json(self):
     self.json["molecular_graph"] = json_graph.dumps(self.molecular_graph)
     self.json["type"] = self.type
     self.json["class"] = self.__class__.__name__
     self.json["atoms"] = [a.get_json() for a in self.get_atoms_as_list()]
コード例 #34
0
def make_results_array(parasenttok,myarray_ke,gr_ke_sample,\
                               paras,number_of_words,
                               countTrueSent,countAvSentLen,\
                               nodes,edges,gr_se_sample,edges_over_sents,\
                               ranked_global_weights,reorganised_array,threshold_ke,\
                               len_headings,\
                               countAssQSent,countTitleSent,\
                               b_last,len_body,len_refs,refsheaded,late_wc,appendixheaded,\
                               introheaded,i_first,i_last,i_toprank,countIntroSent,percent_body_i,\
                               conclheaded,c_first,c_last,c_toprank,countConclSent,percent_body_c,\
                               keylemmas,keywords,fivemostfreq,bigram_keyphrases,trigram_keyphrases,quadgram_keyphrases,\
                               scoresNfreqs,avfreqsum,\
                               kls_in_ass_q_long,sum_freq_kls_in_ass_q_long,\
                               kls_in_ass_q_short,sum_freq_kls_in_ass_q_short,\
                               kls_in_tb_index, sum_freq_kls_in_tb_index,\
                               all_bigrams):
    """
    Return the result of the text & sentence analytics
    @return: A dictionary containing various elements of the text analytics
    """
    essay = OrderedDict()
    '''
    @todo: What limit should we implement for the top ranked sentence? Hard-coded? Threshold-based? Parameter to process?
    '''
    top_ranked_global_weights = ranked_global_weights[:15]

    # Index sentence ID by rank
    mylist2 = {}
    for idx, val in enumerate(top_ranked_global_weights):
        mylist2[val[1]] = idx

    # list of lemmas of every sentence
    myLemmas = []
    for (a, b, c, d, e) in reorganised_array:
        myLemmas.append([x for (w, x) in e])

    # Restructure parasenttok with text,ID, structure tag (and score?)
    reorpar = []
    inc = 0
    for par in parasenttok:
        newpar = []
        for sent in par:
            newsent = {
                'text': sent,  ## sentence text
                'id': inc,  ## sentence ID
                'tag': reorganised_array[inc][2],  ## structural tag
                'lemma': myLemmas[inc]  ## sentence's lemmas
            }
            if inc in mylist2:
                newsent['rank'] = mylist2[inc]  ## rank if in top 15
            newpar.append(newsent)
            inc += 1
        reorpar.append(newpar)

    ### Add version of data structure
    essay['version'] = ANALYTICS_VERSION

    ### Add paragraph/sentence structure
    essay['parasenttok'] = parasenttok

    ### Add data on sentences
    se_data = OrderedDict()
    mylist2 = []
    for (a, b, c, d, e) in top_ranked_global_weights:
        mylist2.append((a, b, c))
    se_data['se_ranked'] = mylist2
    se_data['se_parasenttok'] = reorpar
    essay['se_data'] = se_data

    ### Add statistics on essay
    se_stats = OrderedDict()
    se_stats['paras'] = paras
    se_stats['len_body'] = len_body
    se_stats['len_headings'] = len_headings
    se_stats['all_sents'] = sum(w for w in [len(x) for x in parasenttok])
    se_stats['countTrueSent'] = countTrueSent
    se_stats['number_of_words'] = number_of_words
    se_stats['countAvSentLen'] = countAvSentLen
    se_stats['countAssQSent'] = countAssQSent  # new
    se_stats['countTitleSent'] = countTitleSent  # new
    essay['se_stats'] = se_stats

    se_graph = OrderedDict()
    se_graph['nodes'] = nodes
    se_graph['edges'] = edges
    se_graph['edges_over_sents'] = edges_over_sents
    essay['se_graph'] = se_graph

    ##se_sample_graph = OrderedDict()
    ##se_sample_graph['gr_se_sample'] = gr_se_sample
    ##essay['se_sample_graph'] = se_sample_graph
    essay['se_sample_graph'] = json_graph.dumps(gr_se_sample)

    body = OrderedDict()
    body['late_wc'] = late_wc  # new
    body['b_last'] = b_last  # new
    essay['body'] = body

    ### Add section feedback
    intro = OrderedDict()
    intro['introheaded'] = introheaded
    intro['i_first'] = i_first
    intro['i_last'] = i_last
    intro['countIntroSent'] = countIntroSent
    intro['percent_body_i'] = percent_body_i
    intro['i_toprank'] = i_toprank  # var name changed
    essay['intro'] = intro

    concl = OrderedDict()
    concl['conclheaded'] = conclheaded
    concl['c_first'] = c_first
    concl['c_last'] = c_last
    concl['countConclSent'] = countConclSent
    concl['percent_body_c'] = percent_body_c
    concl['c_toprank'] = c_toprank  # var name changed
    essay['concl'] = concl

    refs = OrderedDict()
    refs['len_refs'] = len_refs  # new
    refs['refsheaded'] = refsheaded  # var name changed
    essay['refs'] = refs

    appendix = OrderedDict()
    appendix['appendixheaded'] = appendixheaded  # new
    essay['appendix'] = appendix

    ke_data = OrderedDict()
    ke_data['myarray_ke'] = myarray_ke
    ke_data['fivemostfreq'] = fivemostfreq
    ke_data['keylemmas'] = keylemmas
    ke_data['threshold_ke'] = threshold_ke  # new
    ke_data['keywords'] = keywords
    ke_data['all_bigrams'] = all_bigrams
    ke_data['bigram_keyphrases'] = bigram_keyphrases
    ke_data['trigram_keyphrases'] = trigram_keyphrases
    ke_data['quadgram_keyphrases'] = quadgram_keyphrases
    ke_data['kls_in_ass_q_long'] = kls_in_ass_q_long
    ke_data['kls_in_ass_q_short'] = kls_in_ass_q_short
    ke_data['kls_in_tb_index'] = kls_in_tb_index  # new
    ke_data['scoresNfreqs'] = scoresNfreqs
    essay['ke_data'] = ke_data

    ##ke_sample_graph = OrderedDict()
    ##ke_sample_graph['gr_ke_sample'] = gr_ke_sample
    ##essay['ke_sample_graph'] = ke_sample_graph
    essay['ke_sample_graph'] = json_graph.dumps(gr_ke_sample)

    ke_stats = OrderedDict()
    ke_stats['avfreqsum'] = avfreqsum
    ke_stats[
        'sum_freq_kls_in_ass_q_long'] = sum_freq_kls_in_ass_q_long  # var name changed
    ke_stats[
        'sum_freq_kls_in_ass_q_short'] = sum_freq_kls_in_ass_q_short  # var name changed
    ke_stats['sum_freq_kls_in_tb_index'] = sum_freq_kls_in_tb_index  # new
    essay['ke_stats'] = ke_stats

    #print '\n\nThis is essay[ke_stats][bigram_keyphrases]\n'
    #print essay['ke_data']
    #print essay['ke_stats']
    #print essay['struc_feedback']
    #print essay['ranked']

    #pprint.pprint(essay)
    return essay
コード例 #35
0
ファイル: dmlib.py プロジェクト: mabodo/MapReduceWords
def json_network():
    #returns the string of the json of the graph previously loaded
    return json_graph.dumps(GRAPH)
コード例 #36
0
ファイル: disagree.py プロジェクト: jfainberg/AMRICA
def monolingual_main(args):
  """ Disagreement graphs for different annotations of a single sentence. """
  infile = codecs.open(args.infile, encoding='utf8')
  gold_aligned_fh = None
  if args.align_in:
    gold_aligned_fh = codecs.open(args.align_in, encoding='utf8')
  (json_fh, align_fh) = open_output_files(args)

  amrs_same_sent = []
  cur_id = ""
  while True:
    (amr_line, comments) = amr_metadata.get_amr_line(infile)
    cur_amr = None
    if amr_line:
      cur_amr = amr_metadata.AmrMeta.from_parse(amr_line, comments,
      consts_to_vars=(gold_aligned_fh != None or align_fh != None))
      get_sent_info(cur_amr.metadata)
      if 'annotator' not in cur_amr.metadata:
        cur_amr.metadata['annotator'] = ''
      if not cur_id:
        cur_id = cur_amr.metadata['id']

    if cur_amr is None or cur_id != cur_amr.metadata['id']:
      gold_amr = amrs_same_sent[0]
      test_amrs = amrs_same_sent[1:]
      if len(test_amrs) == 0:
        test_amrs = [gold_amr] # single AMR view case
        args.num_restarts = 1 # TODO make single AMR view more efficient
      smatchgraphs = hilight_disagreement(test_amrs, gold_amr,
        args.num_restarts, gold_aligned_fh=gold_aligned_fh)
      amr_graphs = get_disagreement_graphs(smatchgraphs, unmatch_dead_nodes=(gold_aligned_fh == None))
      gold_anno = gold_amr.metadata['annotator']
      sent = gold_amr.metadata['tok']

      if (args.verbose):
        print("ID: %s\n Sentence: %s\n gold anno: %s" % (cur_id, sent, gold_anno))

      for (ind, a) in enumerate(test_amrs):
        (g, score) = amr_graphs[ind]
        test_anno = a.metadata['annotator']
        if json_fh:
          json_fh.write(json_graph.dumps(g) + '\n')
        if align_fh:
          sg = smatchgraphs[ind][0]
          align_fh.write("""# ::id %s\n# ::tok %s\n# ::gold_anno %s\n# ::test_anno %s\n""" % \
            (cur_id, sent, gold_anno, test_anno))
          align_fh.write('\n'.join(sg.get_text_alignments()) + '\n\n')
        if (args.verbose):
          print("  annotator %s score: %d" % (test_anno, score))

        ag = nx.to_agraph(g)
        ag.graph_attr['label'] = sent
        ag.layout(prog=args.layout)
        ag.draw('%s/%s_annotated_%s_%s.png' % (args.outdir, cur_id, gold_anno, test_anno))

      amrs_same_sent = []
      if cur_amr is not None:
        cur_id = cur_amr.metadata['id']
      else:
        break

    amrs_same_sent.append(cur_amr)

  infile.close()
  gold_aligned_fh and gold_aligned_fh.close()
  close_output_files(json_fh, align_fh)
コード例 #37
0
def labeled_digraph2d3(graph, html_file_name='index.html'):
    """Export to SVG embedded in HTML, animated with d3.js

    Example
    =======
    From C{examples/transys/machine_examples.py} call:

    >>> m = garage_counter_with_state_vars()

    Then export to html:

    >>> m.save('index.html', 'html')

    See Also
    ========
    FSM, BA, Mealy

    @param graph: labeled graph to export
    @type graph: L{LabeledDiGraph}
    """
    file_path = inspect.getfile(inspect.currentframe())
    dir_path = os.path.dirname(os.path.abspath(file_path))

    d3_file_name = os.path.join(dir_path, 'd3.v3.min.js')
    d3_file = open(d3_file_name)
    d3_js = d3_file.read()

    s = """
    <!DOCTYPE html>
    <meta charset="utf-8">
    <style>

    .node {
      stroke: black;
      stroke-width: 1.5px;
    }

    .link {
      stroke: #999;
      stroke-opacity: .6;
    }

    .end-arrow {
        fill            : gray;
        stroke-width    : 1px;
    }

    </style>

    <script>
    """

    # embed d3.js to create single .html,
    # instead of bunch of files
    s += d3_js

    s += """
    </script>
    <body>

    <script>
    var width = 960,
        height = 500;

    var color = d3.scale.category20();

    var force = d3.layout.force()
        .charge(-120)
        .linkDistance(200)
        .size([width, height]);

    var svg = d3.select("body").append("svg")
        .attr("width", width)
        .attr("height", height);

    svg.append('svg:defs').append('svg:marker')
        .attr('id', 'end-arrow')
        .attr('viewBox', '0 -5 10 10')
        .attr('refX', 15)
        .attr('refY', 0)
        .attr('markerWidth', 4)
        .attr('markerHeight', 4)
        .attr('orient', 'auto')
      .append('svg:path')
        .attr('d', 'M0,-5L10,0L0,5')
        .attr('class', 'end-arrow');

    var graph = """

    # embed to avoid browser local file-loading restrictions
    try:
        s += json_graph.dumps(graph)
    except:
        # better error msg for numpy array
        import json
        data = json_graph.node_link_data(graph)
        s += json.dumps(data, default=lambda x: str(x))

    s += ';'

    s += """
    function draw(graph){
      force
          .nodes(graph.nodes)
          .links(graph.links)
          .start();

      var link = svg.append("svg:g").selectAll("path")
          .data(graph.links)
        .enter().append("svg:path")
          .attr("class", "link")
          .style("stroke-width", 10)
          .style("fill", "none")
          .style("marker-end", 'url(#end-arrow)');

      link.append("title")
          .text(function(d) {
          	return """

    # edge labels (shown when mouse on edge)
    if hasattr(graph, '_transition_label_def') and \
    hasattr(graph, '_transition_dot_label_format'):
        transition_label_def = graph._transition_label_def
        transition_label_format = graph._transition_dot_label_format
        s += _format_label(transition_label_def, transition_label_format)
    else:
        s += '" ";'

    s += """});

      var node = svg.selectAll(".node")
          .data(graph.nodes)
       .enter().append("g")
          .attr("class", "node")
          .call(force.drag);

      node.append("circle")
          .attr("r", 30)
          .style("fill", "#66CC00")

      node.append("text")
          .attr("dx", 0)
          .attr("dy", 0)
          .attr("fill", "red")
          .text(function(d) { return d.id});

      node.append("title")
          .style("fill", "gray")
          .text(function(d) { return """

    # edge labels (shown when mouse on edge)
    if hasattr(graph, '_state_label_def') and \
    hasattr(graph, '_state_dot_label_format'):
        state_label_def = graph._state_label_def
        state_label_format = graph._state_dot_label_format
        s += _format_label(state_label_def, state_label_format)
    else:
        s += '" ";'

    s += """});

      force.on("tick", function() {
        link.attr("d", function(d) {
            var dx = d.target.x -d.source.x,
                dy = d.target.y -d.source.y,
                dr = Math.sqrt(dx * dx + dy * dy);
            return "M" +
                d.source.x + "," +
                d.source.y + "A" +
                dr + "," + dr + " 0 0,1 " +
                d.target.x + "," +
                d.target.y;
        });

        node.attr("cx", function(d) { return d.x; })
            .attr("cy", function(d) { return d.y; })
            .attr("transform", function(d) {
                return "translate(" + d.x + "," + d.y + ")";
            });
      });
    };

    draw(graph)

    </script>
    </body>
    """

    html_file = open(html_file_name, 'w')
    html_file.write(s)
    return True
コード例 #38
0
    def do_GET(self):
        try:
            #TODO: handle "/" and return index.html
            if self.path == "/":
                self.path = "/index.html"

            pathparts = self.path.split("/")

            if pathparts[1] == "json":

                if pathparts[2] == "overlay":
                    try:
                        overlay_id = pathparts[3]
                    except IndexError:
                        data = json.dumps(self.server.get_anm().overlays())
                        self.send_response(200)
                        self.send_header('Content-type', 'text/json')
                        self.end_headers()
                        self.wfile.write(data)

                    overlay_graph = self.server.get_overlay(
                        overlay_id)._graph.copy()
                    graphics_graph = self.server.get_overlay(
                        "graphics")._graph.copy()
                    overlay_graph = ank.stringify_netaddr(overlay_graph)
                    # JSON writer doesn't handle 'id' already present in nodes
                    #for n in graph:
                    #del graph.node[n]['id']

                    #TODO: only update, don't over write if already set
                    for n in overlay_graph:
                        overlay_graph.node[n].update({
                            'x':
                            graphics_graph.node[n]['x'],
                            'y':
                            graphics_graph.node[n]['y'],
                            'asn':
                            graphics_graph.node[n]['asn'],
                            'device_type':
                            graphics_graph.node[n]['device_type'],
                        })

                    # remove leading space
                    x = (overlay_graph.node[n]['x'] for n in overlay_graph)
                    y = (overlay_graph.node[n]['y'] for n in overlay_graph)
                    x_min = min(x)
                    y_min = min(y)
                    for n in overlay_graph:
                        overlay_graph.node[n]['x'] += -x_min
                        overlay_graph.node[n]['y'] += -y_min

# strip out graph data
                    overlay_graph.graph = {}

                    data = json_graph.dumps(overlay_graph, indent=4)
                    self.send_response(200)
                    self.send_header('Content-type', 'text/json')
                    self.end_headers()
                    self.wfile.write(data)
                    return

                if pathparts[2] == "ip":
                    ip_tree = self.server.get_ip()

                    data = ip_tree.json()
                    self.send_response(200)
                    self.send_header('Content-type', 'text/json')
                    self.end_headers()
                    self.wfile.write(data)
                    return


# server up overlay
            else:
                #TODO: use os path join here
                stripped_path = self.path[
                    1:]  #TODO: See how BaseHTTPServer does this for example
                file_location = os.path.join(os.getcwd(), "ank_vis",
                                             stripped_path)
                #note that this potentially makes every file on your computer readable by the internet
                f = open(file_location, "r")
                print "Serving", stripped_path
                mimetype, encoding = mimetypes.guess_type(file_location)

                self.send_response(200)
                self.send_header('Content-type', mimetype)
                self.end_headers()
                self.wfile.write(f.read())
                f.close()
                return

            #TODO: if .js transfer as MIME type script

        except IOError:
            print "not found", self.path
コード例 #39
0
ファイル: convert_graph.py プロジェクト: Ahsanzia/galaxytools
def write_json(graph, outfile):
    json_dict=json_graph.node_link_data(graph)
    json_string=json_graph.dumps(json_dict)
    outfile.write(json_string)
コード例 #40
0
 def to_json(self):
     self.json["molecular_graph"] = json_graph.dumps(self.molecular_graph)
     self.json["type"] = self.type
     self.json["class"]=self.__class__.__name__
     self.json["atoms"] = [a.get_json() for a in self.get_atoms_as_list()]
コード例 #41
0
def Write_JSON(g, output_path):
    filename = "etddata.json"
    with open(os.path.join(output_path,filename), "w") as f:
        data = json_graph.dumps(g)
        f.write(data)
	print "Wrote file at "+os.path.join(output_path,filename)
コード例 #42
0
def process_flight(segment_info, tail_number, aircraft_info={}, achieved_flight_record={},
                   requested=[], required=[], include_flight_attributes=True,
                   additional_modules=[], pre_flight_kwargs={}, force=False,
                   initial={}, reprocess=False):
    '''
    Processes the HDF file (segment_info['File']) to derive the required_params (Nodes)
    within python modules (settings.NODE_MODULES).

    Note: For Flight Data Services, the definitive API is located here:
        "PolarisTaskManagement.test.tasks_mask.process_flight"

    :param segment_info: Details of the segment to process
    :type segment_info: dict
    :param aircraft: Aircraft specific attributes
    :type aircraft: dict
    :param achieved_flight_record: See API Below
    :type achieved_flight_record: Dict
    :param requested: Derived nodes to process (dependencies will also be
        evaluated).
    :type requested: List of Strings
    :param required: Nodes which are required, otherwise an exception will be
        raised.
    :type required: List of Strings
    :param include_flight_attributes: Whether to include all flight attributes
    :type include_flight_attributes: Boolean
    :param additional_modules: List of module paths to import.
    :type additional_modules: List of Strings
    :param pre_flight_kwargs: Keyword arguments for the pre-flight analysis hook.
    :type pre_flight_kwargs: dict
    :param force: Ignore errors raised while deriving nodes.
    :type force: bool
    :param initial: Initial content for nodes to avoid reprocessing (excluding parameter nodes which are saved to the hdf).
    :type initial: dict
    :param reprocess: Force reprocessing of all Nodes (including derived Nodes already saved to the HDF file).

    :returns: See below:
    :rtype: Dict

    Sample segment_info
    --------------------
    {
        'File':  # Path to HDF5 file to process
        'Start Datetime':  # Datetime of the origin of the data (at index 0)
        'Segment Type': # segment type obtained from split segments e.g. START_AND_STOP
    }

    Sample aircraft_info
    --------------------
    {
        'Tail Number':  # Aircraft Registration
        'Identifier':  # Aircraft Ident
        'Manufacturer': # e.g. Boeing
        'Manufacturer Serial Number': #MSN
        'Model': # e.g. 737-808-ER
        'Series': # e.g. 737-800
        'Family': # e.g. 737
        'Frame': # e.g. 737-3C
        'Main Gear To Altitude Radio': # Distance in metres
        'Wing Span': # Distance in metres
    }

    Sample achieved_flight_record
    -----------------------------
    {
        # Simple values first, e.g. string, int, float, etc.
        'AFR Flight ID': # e.g. 1
        'AFR Flight Number': # e.g. 1234
        'AFR Type': # 'POSITIONING'
        'AFR Off Blocks Datetime': # datetime(2015,01,01,13,00)
        'AFR Takeoff Datetime': # datetime(2015,01,01,13,15)
        'AFR Takeoff Pilot': # 'Joe Bloggs'
        'AFR Takeoff Gross Weight': # weight in kg
        'AFR Takeoff Fuel': # fuel in kg
        'AFR Landing Datetime': # datetime(2015,01,01,18,45)
        'AFR Landing Pilot': # 'Joe Bloggs'
        'AFR Landing Gross Weight': # weight in kg
        'AFR Landing Fuel': # weight in kg
        'AFR On Blocks Datetime': # datetime(2015,01,01,19,00)
        'AFR V2': # V2 used at takeoff in kts
        'AFR Vapp': # Vapp used in kts
        'AFR Vref': # Vref used in kts
        # More complex data that needs to be looked up next:
        'AFR Takeoff Airport':  {
            'id': 4904, # unique id
            'name': 'Athens Intl Airport Elefterios Venizel',
            'code': {'iata': 'ATH', 'icao': 'LGAV'},
            'latitude': 37.9364,
            'longitude': 23.9445,
            'location': {'city': u'Athens', 'country': u'Greece'},
            'elevation': 266, # ft
            'magnetic_variation': 'E003186 0106',
            }
           },
        'AFR Landing Aiport': {
            'id': 1, # unique id
            'name': 'Athens Intl Airport Elefterios Venizel',
            'code': {'iata': 'ATH', 'icao': 'LGAV'},
            'latitude': 37.9364,
            'longitude': 23.9445,
            'location': {'city': u'Athens', 'country': u'Greece'},
            'elevation': 266, # ft
            'magnetic_variation': 'E003186 0106',
            }
           },
        'AFR Destination Airport': None, # if not required, or exclude this key
        'AFR Takeoff Runway': {
            'id': 1,
            'identifier': '21L',
            'magnetic_heading': 212.6,
            'strip': {
                'id': 1,
                'length': 13123,
                'surface': 'ASP',
                'width': 147},
            'start': {
                'elevation': 308,
                'latitude': 37.952425,
                'longitude': 23.970422},
            'end': {
                'elevation': 279,
                'latitude': 37.923511,
                'longitude': 23.943261},
            'glideslope': {
                'angle': 3.0,
                'elevation': 282,
                'latitude': 37.9473,
                'longitude': 23.9676,
                'threshold_distance': 999},
            'localizer': {
                'beam_width': 4.5,
                'elevation': 256,
                'frequency': 111100,
                'heading': 213,
                'latitude': 37.919281,
                'longitude': 23.939294},
            },
        'AFR Landing Runway': {
            'id': 1,
            'identifier': '21L',
            'magnetic_heading': 212.6,
            'strip': {
                'id': 1,
                'length': 13123,
                'surface': 'ASP',
                'width': 147},
            'start': {
                'elevation': 308,
                'latitude': 37.952425,
                'longitude': 23.970422},
            'end': {
                'elevation': 279,
                'latitude': 37.923511,
                'longitude': 23.943261},
            'glideslope': {
                'angle': 3.0,
                'elevation': 282,
                'latitude': 37.9473,
                'longitude': 23.9676,
                'threshold_distance': 999},
            'localizer': {
                'beam_width': 4.5,
                'elevation': 256,
                'frequency': 111100,
                'heading': 213,
                'latitude': 37.919281,
                'longitude': 23.939294},
            },
    }

    Sample Return
    -------------
    {
        'flight':[Attribute('name value')],
        'kti':[GeoKeyTimeInstance('index name latitude longitude')]
            if lat/long available
            else [KeyTimeInstance('index name')],
        'kpv':[KeyPointValue('index value name slice')]
    }

    sample flight Attributes:

    [
        Attribute('Takeoff Airport', {'id':1234, 'name':'Int. Airport'},
        Attribute('Approaches', [4567,7890]),
        ...
    ],

    '''
    
    hdf_path = segment_info['File']
    if 'Start Datetime' not in segment_info:
        import pytz
        segment_info['Start Datetime'] = datetime.utcnow().replace(tzinfo=pytz.utc)
    logger.info("Processing: %s", hdf_path)

    if aircraft_info:
        # Aircraft info has already been provided.
        logger.info(
            "Using aircraft_info dictionary passed into process_flight '%s'." %
            aircraft_info)
    else:
        aircraft_info = get_aircraft_info(tail_number)

    aircraft_info['Tail Number'] = tail_number

    # go through modules to get derived nodes
    node_modules = additional_modules + settings.NODE_MODULES
    derived_nodes = get_derived_nodes(node_modules)

    if requested:
        requested = \
            list(set(requested).intersection(set(derived_nodes)))
    else:
        # if requested isn't set, try using ALL derived_nodes!
        logger.info("No requested nodes declared, using all derived nodes")
        requested = derived_nodes.keys()

    # include all flight attributes as requested
    if include_flight_attributes:
        requested = list(set(
            requested + get_derived_nodes(
                ['analysis_engine.flight_attribute']).keys()))
    
    initial = process_flight_to_nodes(initial)
    for node_name in requested:
        initial.pop(node_name, None)

    # open HDF for reading
    with hdf_file(hdf_path) as hdf:
        hdf.start_datetime = segment_info['Start Datetime']
        if hooks.PRE_FLIGHT_ANALYSIS:
            logger.info("Performing PRE_FLIGHT_ANALYSIS action '%s' with options: %s",
                        hooks.PRE_FLIGHT_ANALYSIS.func_name, pre_flight_kwargs)
            hooks.PRE_FLIGHT_ANALYSIS(hdf, aircraft_info, **pre_flight_kwargs)
        else:
            logger.info("No PRE_FLIGHT_ANALYSIS actions to perform")
        # Track nodes.
        param_names = hdf.valid_lfl_param_names() if reprocess else hdf.valid_param_names()
        node_mgr = NodeManager(
            segment_info, hdf.duration, param_names,
            requested, required, derived_nodes, aircraft_info,
            achieved_flight_record)
        # calculate dependency tree
        process_order, gr_st = dependency_order(node_mgr, draw=False)
        if settings.CACHE_PARAMETER_MIN_USAGE:
            # find params used more than
            for node in gr_st.nodes():
                if node in node_mgr.derived_nodes:
                    # this includes KPV/KTIs but they'll be ignored by HDF
                    qty = len(gr_st.predecessors(node))
                    if qty > settings.CACHE_PARAMETER_MIN_USAGE:
                        hdf.cache_param_list.append(node)
            logging.info("HDF set to cache parameters: %s",
                         hdf.cache_param_list)

        # derive parameters
        ktis, kpvs, sections, approaches, flight_attrs = \
            derive_parameters(hdf, node_mgr, process_order, params=initial, force=force)

        # geo locate KTIs
        ktis = geo_locate(hdf, ktis)
        ktis = _timestamp(segment_info['Start Datetime'], ktis)

        # geo locate KPVs
        kpvs = geo_locate(hdf, kpvs)
        kpvs = _timestamp(segment_info['Start Datetime'], kpvs)

        # Store version of FlightDataAnalyser
        hdf.analysis_version = __version__
        # Store dependency tree
        hdf.dependency_tree = json_graph.dumps(gr_st)
        # Store aircraft info
        hdf.set_attr('aircraft_info', aircraft_info)
        hdf.set_attr('achieved_flight_record', achieved_flight_record)

    return {
        'flight': flight_attrs,
        'kti': ktis,
        'kpv': kpvs,
        'approach': approaches,
        'phases': sections,
    }
コード例 #43
0
def local_mega_graph():
    H = json_graph.load(open("static/local_instance.json"))
    data = json_graph.dumps(H)
    return data
コード例 #44
0
ファイル: topic_tree.py プロジェクト: jmiguelv/bacalhau
 def to_json(self, filepath):
     """Serializes the TopicTree to JSON Graph format."""
     json_file = open(filepath, 'w')
     json_file.write(json_graph.dumps(self))
     json_file.close()
コード例 #45
0
def process_flight(hdf_path,
                   tail_number,
                   aircraft_info={},
                   start_datetime=None,
                   achieved_flight_record={},
                   requested=[],
                   required=[],
                   include_flight_attributes=True,
                   additional_modules=[]):
    '''
    Processes the HDF file (hdf_path) to derive the required_params (Nodes)
    within python modules (settings.NODE_MODULES).

    Note: For Flight Data Services, the definitive API is located here:
        "PolarisTaskManagement.test.tasks_mask.process_flight"

    :param hdf_path: Path to HDF File
    :type hdf_path: String
    :param aircraft: Aircraft specific attributes
    :type aircraft: dict
    :param start_datetime: Datetime of the origin of the data (at index 0)
    :type start_datetime: Datetime
    :param achieved_flight_record: See API Below
    :type achieved_flight_record: Dict
    :param requested: Derived nodes to process (dependencies will also be
        evaluated).
    :type requested: List of Strings
    :param required: Nodes which are required, otherwise an exception will be
        raised.
    :type required: List of Strings
    :param include_flight_attributes: Whether to include all flight attributes
    :type include_flight_attributes: Boolean
    :param additional_modules: List of module paths to import.
    :type additional_modules: List of Strings

    :returns: See below:
    :rtype: Dict

    Sample aircraft_info
    --------------------
    {
        'Tail Number':  # Aircraft Registration
        'Identifier':  # Aircraft Ident
        'Manufacturer': # e.g. Boeing
        'Manufacturer Serial Number': #MSN
        'Model': # e.g. 737-808-ER
        'Series': # e.g. 737-800
        'Family': # e.g. 737
        'Frame': # e.g. 737-3C
        'Main Gear To Altitude Radio': # Distance in metres
        'Wing Span': # Distance in metres
    }

    Sample achieved_flight_record
    -----------------------------
    {
        # Simple values first, e.g. string, int, float, etc.
        'AFR Flight ID': # e.g. 1
        'AFR Flight Number': # e.g. 1234
        'AFR Type': # 'POSITIONING'
        'AFR Off Blocks Datetime': # datetime(2015,01,01,13,00)
        'AFR Takeoff Datetime': # datetime(2015,01,01,13,15)
        'AFR Takeoff Pilot': # 'Joe Bloggs'
        'AFR Takeoff Gross Weight': # weight in kg
        'AFR Takeoff Fuel': # fuel in kg
        'AFR Landing Datetime': # datetime(2015,01,01,18,45)
        'AFR Landing Pilot': # 'Joe Bloggs'
        'AFR Landing Gross Weight': # weight in kg
        'AFR Landing Fuel': # weight in kg
        'AFR On Blocks Datetime': # datetime(2015,01,01,19,00)
        'AFR V2': # V2 used at takeoff in kts
        'AFR Vapp': # Vapp used in kts
        'AFR Vref': # Vref used in kts
        # More complex data that needs to be looked up next:
        'AFR Takeoff Airport':  {
            'id': 4904, # unique id
            'name': 'Athens Intl Airport Elefterios Venizel',
            'code': {'iata': 'ATH', 'icao': 'LGAV'},
            'latitude': 37.9364,
            'longitude': 23.9445,
            'location': {'city': u'Athens', 'country': u'Greece'},
            'elevation': 266, # ft
            'magnetic_variation': 'E003186 0106',
            }
           },
        'AFR Landing Aiport': {
            'id': 1, # unique id
            'name': 'Athens Intl Airport Elefterios Venizel',
            'code': {'iata': 'ATH', 'icao': 'LGAV'},
            'latitude': 37.9364,
            'longitude': 23.9445,
            'location': {'city': u'Athens', 'country': u'Greece'},
            'elevation': 266, # ft
            'magnetic_variation': 'E003186 0106',
            }
           },
        'AFR Destination Airport': None, # if not required, or exclude this key
        'AFR Takeoff Runway': {
            'id': 1,
            'identifier': '21L',
            'magnetic_heading': 212.6,
            'strip': {
                'id': 1,
                'length': 13123,
                'surface': 'ASP',
                'width': 147},
            'start': {
                'elevation': 308,
                'latitude': 37.952425,
                'longitude': 23.970422},
            'end': {
                'elevation': 279,
                'latitude': 37.923511,
                'longitude': 23.943261},
            'glideslope': {
                'angle': 3.0,
                'elevation': 282,
                'latitude': 37.9473,
                'longitude': 23.9676,
                'threshold_distance': 999},
            'localizer': {
                'beam_width': 4.5,
                'elevation': 256,
                'frequency': 111100,
                'heading': 213,
                'latitude': 37.919281,
                'longitude': 23.939294},
            },
        'AFR Landing Runway': {
            'id': 1,
            'identifier': '21L',
            'magnetic_heading': 212.6,
            'strip': {
                'id': 1,
                'length': 13123,
                'surface': 'ASP',
                'width': 147},
            'start': {
                'elevation': 308,
                'latitude': 37.952425,
                'longitude': 23.970422},
            'end': {
                'elevation': 279,
                'latitude': 37.923511,
                'longitude': 23.943261},
            'glideslope': {
                'angle': 3.0,
                'elevation': 282,
                'latitude': 37.9473,
                'longitude': 23.9676,
                'threshold_distance': 999},
            'localizer': {
                'beam_width': 4.5,
                'elevation': 256,
                'frequency': 111100,
                'heading': 213,
                'latitude': 37.919281,
                'longitude': 23.939294},
            },
    }

    Sample Return
    -------------
    {
        'flight':[Attribute('name value')],
        'kti':[GeoKeyTimeInstance('index name latitude longitude')]
            if lat/long available
            else [KeyTimeInstance('index name')],
        'kpv':[KeyPointValue('index value name slice')]
    }

    sample flight Attributes:

    [
        Attribute('Takeoff Airport', {'id':1234, 'name':'Int. Airport'},
        Attribute('Approaches', [4567,7890]),
        ...
    ],

    '''
    if start_datetime is None:
        import pytz
        start_datetime = datetime.utcnow().replace(tzinfo=pytz.utc)
    logger.info("Processing: %s", hdf_path)

    if aircraft_info:
        # Aircraft info has already been provided.
        logger.info(
            "Using aircraft_info dictionary passed into process_flight '%s'." %
            aircraft_info)
    else:
        aircraft_info = get_aircraft_info(tail_number)

    aircraft_info['Tail Number'] = tail_number

    # go through modules to get derived nodes
    node_modules = additional_modules + settings.NODE_MODULES
    derived_nodes = get_derived_nodes(node_modules)

    if requested:
        requested = \
            list(set(requested).intersection(set(derived_nodes)))
    else:
        # if requested isn't set, try using ALL derived_nodes!
        logger.info("No requested nodes declared, using all derived nodes")
        requested = derived_nodes.keys()

    # include all flight attributes as requested
    if include_flight_attributes:
        requested = list(
            set(requested +
                get_derived_nodes(['analysis_engine.flight_attribute']).keys())
        )

    # open HDF for reading
    with hdf_file(hdf_path) as hdf:
        hdf.start_datetime = start_datetime
        if hooks.PRE_FLIGHT_ANALYSIS:
            logger.info("Performing PRE_FLIGHT_ANALYSIS actions: %s",
                        hooks.PRE_FLIGHT_ANALYSIS.func_name)
            hooks.PRE_FLIGHT_ANALYSIS(hdf, aircraft_info)
        else:
            logger.info("No PRE_FLIGHT_ANALYSIS actions to perform")
        # Track nodes. Assume that all params in HDF are from LFL(!)
        node_mgr = NodeManager(start_datetime, hdf.duration,
                               hdf.valid_param_names(), requested, required,
                               derived_nodes, aircraft_info,
                               achieved_flight_record)
        # calculate dependency tree
        process_order, gr_st = dependency_order(node_mgr, draw=False)
        if settings.CACHE_PARAMETER_MIN_USAGE:
            # find params used more than
            for node in gr_st.nodes():
                if node in node_mgr.derived_nodes:
                    # this includes KPV/KTIs but they'll be ignored by HDF
                    qty = len(gr_st.predecessors(node))
                    if qty > settings.CACHE_PARAMETER_MIN_USAGE:
                        hdf.cache_param_list.append(node)
            logging.info("HDF set to cache parameters: %s",
                         hdf.cache_param_list)

        # derive parameters
        kti_list, kpv_list, section_list, approach_list, flight_attrs = \
            derive_parameters(hdf, node_mgr, process_order)

        # geo locate KTIs
        kti_list = geo_locate(hdf, kti_list)
        kti_list = _timestamp(start_datetime, kti_list)

        # geo locate KPVs
        kpv_list = geo_locate(hdf, kpv_list)
        kpv_list = _timestamp(start_datetime, kpv_list)

        # Store version of FlightDataAnalyser
        hdf.analysis_version = __version__
        # Store dependency tree
        hdf.dependency_tree = json_graph.dumps(gr_st)
        # Store aircraft info
        hdf.set_attr('aircraft_info', aircraft_info)
        hdf.set_attr('achieved_flight_record', achieved_flight_record)

    return {
        'flight': flight_attrs,
        'kti': kti_list,
        'kpv': kpv_list,
        'approach': approach_list,
        'phases': section_list,
    }
コード例 #46
0
ファイル: mongodb_interface.py プロジェクト: venkatvi/EYB
			B.add_edges_from([(category, author)])

	for category, cookbooks in categoryCookBook.iteritems():
		if not 'degree' in B.node[category]:
			B.node[category]['degree'] = 1		
		else:
			B.node[category]['degree'] += 1
		for cookbook in cookbooks:
			if not 'degree' in B.node[cookbook]:
				B.node[cookbook]['degree'] = 1
			else:
				B.node[cookbook]['degree'] += 1
			B.add_edges_from([(category, cookbook)])

	for cookbook, author in cookBookAuthor.iteritems():
		if not 'degree' in B.node[cookbook]:
			B.node[cookbook]['degree'] = 1		
		else:
			B.node[cookbook]['degree'] += 1
		if not 'degree' in B.node[author]:
			B.node[author]['degree'] = 1
		else:
			B.node[author]['degree'] += 1
		B.add_edges_from([(cookbook, author)])


	json_file = options.rootPath + "/coquere/ingredientNets/data/"+options.cuisine+"_dataStats.json"	
	dumps = json_graph.dumps(B)
	with open(json_file, 'w') as file:
		file.write(dumps)		
コード例 #47
0
 def to_json(cls, g):
     return json_graph.dumps(g)