示例#1
0
def generateProfToProfGraph(profs,professionsCat,file_prefix):
	print "graph prof to prof"
	dotString=""
	idEdge=0
	
	if export_gexf :
		gexf_file=gexf.Gexf("Paul Girard medialab Sciences Po","IEP professers linked by common institutions "+file_prefix)
		graph=gexf_file.addGraph("undirected","static")
		
		for name,formations,professions,id in profs :
			graph.addNode(str(id),name)
		
		
	for prof1, prof2 in [(p1,p2) for i,p1 in enumerate(profs) for p2 in profs[i:] if p1!=p2] :
		weight=0
		labels=[]
		# pour toutes les professions du prof 1
		for profID in prof1[2] :
			# si la profession est présente dans celle du prof2
			if profID in prof2[2] :
				# on incrémente le poids du lien et on ajoute la profession au label
				weight+=1
				labels.append(professionsCat[profID][3] if not professionsCat[profID][3]=="" else professionsCat[profID][2])
		# si on a trouvé des professions communes
		if weight>0 :
			# on ajoute un lien
			if export_gexf  :
				graph.addEdge(idEdge,str(prof1[3]),str(prof2[3]),weight=str(weight),label=" | ".join(labels))
			if export_dot :
				dotString+=getDotLinkString(str(prof1[3])+"-"+prof1[0],str(prof2[3])+"-"+prof2[0],str(weight)," | ".join(labels))
			idEdge+=1
	
示例#2
0
    def to_gexf(self):
        edges = set()
        champ = self.champ
        k = self.K
        gexf_file = gexf.Gexf('Graph similarity', champ + str(k))
        gph = gexf_file.addGraph('gr', "undirected", "static")
        intensAtt = gph.addNodeAttribute("intensity", "0.0", "float")
        clussAtt = gph.addNodeAttribute("cluster", "100", "int")

        with open(
                '../data/graph_{}_{}/user_names_info.txt'.format(
                    champ, str(k)), 'r') as f:
            for line in f:
                line = line[:-1]
                line = line.split(',')
                n = gph.addNode(line[0], line[1])
                #, attributes={'intensity':line[2], 'cluster':line[3]})
                n.addAttribute(intensAtt, line[2])
                n.addAttribute(clussAtt, line[3])

        with open('../data/graph_{}_{}/graph.txt'.format(champ, str(k)),
                  'r') as f:
            eid = 0
            for line in f:
                line = line[:-1]
                line = line.split(';')
                u1 = line[0]
                for u2, val in [el.split(',') for el in line[1:]]:
                    if (u1, u2) not in edges and (u2, u1) not in edges:
                        eid += 1
                        edges.add((u1, u2))
                        gph.addEdge(eid, u1, u2, weight=val)

        file = open('./graph_{}_{}/graphe.gexf'.format(champ, str(k)), 'wb')
        gexf_file.write(file)
示例#3
0
def generate_timely_graph(visited: List[str], string2candidate: Dict[str, CandidateFeature]):
	#generate id
	id2candidate: Dict[str, int] = {}
	for counter in range(len(visited)):
		id2candidate[visited[counter]] = counter

	import gexf

	gexf = gexf.Gexf('Felix Neutatz', '28-11-2012')
	# make an undirected dynamical graph
	graph = gexf.addGraph('directed', 'dynamic', '28-11-2012')
	edge_counter = 0
	current_time = 0

	id2candidate['__root__'] = -1
	graph.addNode(-1, '__root__', start=str(0.0))

	for v_i in range(len(visited)):
		id2candidate[visited[v_i]] = v_i
		graph.addNode(v_i, visited[v_i], start=str(current_time))

		current_candidate = string2candidate[visited[v_i]]
		current_time += string2candidate[str(current_candidate)].runtime_properties['execution_time']
		if not isinstance(current_candidate, RawFeature):
			for p in current_candidate.parents:
				graph.addEdge(edge_counter, id2candidate[str(p)], id2candidate[str(current_candidate)], start=str(current_time))
				edge_counter += 1
		else:
			graph.addEdge(edge_counter, id2candidate['__root__'], id2candidate[str(current_candidate)], start=str(current_time))

	gexf.write(open('/tmp/dynamic_graph.gexf', "wb+"))
    def get_gexf_groupsonly(self, groups, author="", title=""):
        g = gexf.Gexf(author, title)
        graph = g.addGraph("undirected", "static", "groups")
        nb_members_att = graph.addNodeAttribute("nb_members", "0")

        # add nodes
        for group in groups.values():

            graph.addNode(group.id,
                          group.name).addAttribute(nb_members_att,
                                                   str(group.nb_members))

        for (k, v) in self.iteritems():
            (id1, id2) = k.split("_")
            # add edge
            graph.addEdge(k, id1, id2, str(len(v)))

        return g
示例#5
0
def write_gexf(r_g, w, uinfo, champ, k):
    edges = set()
    file = open('../data/graph_{}_{}/graphe_reduit.gexf'.format(champ, k),
                'wb')
    gexf_file = gexf.Gexf('Reduced Graph similarity', champ + str(k))
    gph = gexf_file.addGraph("undirected", "static", 'gr')
    intensAtt = gph.addNodeAttribute("intensity", "0.0", "float")
    clusAtt = gph.addNodeAttribute("cluster", "100", "int")
    for k, v in uinfo.items():
        n = gph.addNode(k, v[0])
        n.addAttribute(intensAtt, str(v[1]))
        n.addAttribute(clusAtt, str(v[2]))
    eid = 0
    for (k, v) in r_g:
        if (k, v) not in edges and (v, k) not in edges:
            edges.add((k, v))
            gph.addEdge(eid, k, v, weight=w[eid])
        eid += 1
    gexf_file.write(file)
示例#6
0
		file.write("digrpah output {\n"+dotString+"}")
		
		
################### INST TO INST #########################
#
#
##########################################################
def generateInstToInstGraph(profs,professionsCat,file_prefix):
	print "graph inst to inst"
	dotString=""
	idEdge=0
	
	if export_gexf :
		gexf_file=gexf.Gexf("Paul Girard medialab Sciences Po","Institutions linked by common IEP professers "+file_prefix)
		graph=gexf_file.addGraph("undirected","static")
		idAttInstCat2=graph.addNodeAttribute("cat2","","String")
		idAttInstCat1=graph.addNodeAttribute("cat1","","String")
		
		for cat,group1,group2,name,id in professionsCat.values() :
			n=graph.addNode(str(id),name if not name=="" else group2)
			n.addAttribute(idAttInstCat2,group2)
			n.addAttribute(idAttInstCat1,group1)
	
	
	for inst1,inst2 in [(inst1,inst2) for i,inst1 in enumerate(professionsCat.values()) for inst2 in professionsCat.values()[i:] if inst1!=inst2] :
		weight=0
		labels=[]
		idinst1=inst1[4]
		idinst2=inst2[4]
		
		# pour toutes les professions du prof 1
		for prof in profs :
			if idinst1 in prof[2] and idinst2 in prof[2] :
			# si inst1 et inst 2 sont présentes dans les professions du prof
				# on incrémente le poids du lien et on ajoute le prof au label
				weight+=1
				labels.append(prof[0])
		# si on a trouvé des professeurs communs
		if weight>0 :
			# on ajoute un lien
			node1=inst1[3] if not inst1[3]=="" else inst1[2]
			node2=inst2[3] if not inst2[3]=="" else inst2[2]
    def get_gexf_groups_members(self, groups, members, author="", title=""):
        g = gexf.Gexf(author, title)
        graph = g.addGraph("undirected", "static", "groups members")
        nb_members_att = graph.addNodeAttribute("nb_members", "1")
        type_att = graph.addNodeAttribute("type", "group", "String")

        # add nodes
        for group in groups.values():
            n = graph.addNode("group_" + group.id, group.name)
            n.addAttribute(nb_members_att, str(group.nb_members))
            n.addAttribute(type_att, "group")

        for member in members.values():
            n = graph.addNode("member_" + member.id, member.name)
            n.addAttribute(type_att, "member")

            for group_id in member:
                graph.addEdge(group_id + "_" + member.id,
                              "member_" + member.id, "group_" + group_id)

        return g
示例#8
0
    def export_graph(self, filename=None):
        pop = self._poplist.get(T=0)
        gexf_elt = gexf.Gexf('William Schueller', 'Naming Games AL')
        G = gexf_elt.addGraph('undirected', 'dynamic',
                              'meaning space exploration')

        def color_of_node(pop, m):
            nag = 0
            for ag in pop._agentlist:
                if m in ag._vocabulary.get_known_meanings():
                    nag += 1
                ag._vocabulary.del_cache()
            val = nag / float(len(pop._agentlist))
            if val == 0:
                return (1, 0, 0)
            elif val == 1.:
                return (0, 1, 0)
            else:
                return (1 - val, 1 - val, 1)

        def transform_color(col_val):
            if col_val == 0:
                return 0
            elif col_val == 1.:
                return 1
            elif 0 < col_val < 1.:
                return 0.5 + col_val * 0.3

        pop = self._poplist.get(T=0)
        mG = pop.env.meaning_graph
        node_list = mG.nodes()
        edge_list = mG.edges()
        for m in node_list:
            G.addNode(m, m)
        e_id = 0
        for e in edge_list:
            G.addEdge(e_id, *e)
            e_id += 1
        id_col = G.addNodeAttribute('node_color',
                                    '',
                                    mode='dynamic',
                                    type='string',
                                    force_id='color')
        id_col2 = G.addNodeAttribute('colorfloat',
                                     '',
                                     mode='dynamic',
                                     type='float')
        id_col3 = G.addNodeAttribute('4color',
                                     '',
                                     mode='dynamic',
                                     type='float')
        id_col4 = G.addNodeAttribute('srtheo',
                                     '',
                                     mode='dynamic',
                                     type='float')
        for t_index in range(len(self._T) - 1):
            t = self._T[t_index + 1]
            t_m = self._T[t_index]
            lt = np.log(self._T[t_index + 1])
            lt_m = np.log(self._T[t_index])
            pop = self._poplist.get(T=t)
            t1 = t_m
            t2 = t
            for m in node_list:
                col = color_of_node(pop, m)
                colsrtheo = ngmeth.srtheo(pop=pop, m=m)
                color = str(colors.rgb2hex(col))
                #color = str(col)
                #color = str(col[0])
                G._nodes[m].addAttribute(
                    id=id_col,
                    value=color)  #,start=str(float(t_m)),end=str(float(t)))
                G._nodes[m].addAttribute(id=id_col2,
                                         value=str(1 - col[0]),
                                         start=str(float(t1)),
                                         end=str(float(t2)))
                G._nodes[m].addAttribute(id=id_col3,
                                         value=str(transform_color(1 -
                                                                   col[0])),
                                         start=str(float(t1)),
                                         end=str(float(t2)))
                G._nodes[m].addAttribute(id=id_col4,
                                         value=str(colsrtheo),
                                         start=str(float(t1)),
                                         end=str(float(t2)))
        if filename is None:
            filename = str(uuid.uuid1())
        if len(filename) < 5 or filename[-5:] != '.gexf':
            filename = filename + '.gexf'
        with open(filename, "w") as output_file:
            output_file.write("<?xml version='1.0' encoding='utf-8'?>\n")
            gexf_elt.write(output_file)
#!/usr/bin/python

# converts Adrian's graph format to GEXF dynamic graph format so it can be imported
# by gephi and similar packages
#
# Uses pygexf (can be installed by pip)
# https://pythonhosted.org/pygexf/users.html
#

import gexf

src_file = "../simulations/data/graphs/seattle-busses-contacts-86400h-1180n.txt"
out_file = "../simulations/data/graphs/seattle-busses-contacts-86400h-1180n.gexf"

ge = gexf.Gexf("guille", "Seattle bus contacts")
graph = ge.addGraph("undirected", "dynamic", "seattle")

#nodes from 0 to 1179

for node in range(1180):
    graph.addNode(str(node), str(node))

print("Graph created, reading edges...")

with open(src_file, "r") as f:
    edge_id = 0
    #skip header
    next(f)
    for line in f:
        n1, n2, start_time, duration = line.split(":")
        end_time = int(start_time) + int(duration)
示例#10
0
monthreader = csv.reader(
    open("C:\Users\CRAZY\PycharmProjects\Small_Projects\NMProject\MonthData.csv", "rb"))

rowno = 0
for row in monthreader:
    if rowno == 0:
        rowno += 1
        continue
    if rowno == 1:
        rowno += 1
        month = monthreader[2]
        G = networkx.DiGraph()
        G.add_node(monthreader[0])
        G.add_node(monthreader[1])
        G.add_edge(monthreader[0],monthreader[1],weight=monthreader[3])
    else:
        if monthreader[2] == month:
            if not G.has_node(monthreader[0])
            G.add_node(monthreader[0])
            G.add_node(monthreader[1])
            G.add_edge(monthreader[0], monthreader[1], weight=monthreader[3])

gexf = gexf.Gexf('Your Name','28-11-2012')
#make an undirected dynamical graph
graph = gexf.addGraph('undirected','dynamic','28-11-2012',timeformat='date')
#you add nodes with a unique id
graph.addNode(Source,"source_id")
#make edge with unique id, the edge has time duration from start to end
graph.addEdge(edge_id,Source,Target,start = Date , end = Date)
#write the gexf format to fileout
gexf.write(fileOut)
示例#11
0
    print "left movie %s of %s" % (i, len(movs))
    f1 = movieFeatures(m1)
    if not f1:
        continue
    for m2 in movs[i + 1:]:
        print "  right movie %s" % m2
        f2 = movieFeatures(m2)
        if not f2:
            continue
        intersect = len(f1.intersection(f2))
        union = len(f1.union(f2))
        frac = intersect / union if union else 0
        shared[(m1, m2)] = (decimal.Decimal("%.4f" % frac), intersect)

edgeId = 0
doc = gexf.Gexf("drewp", "tropes")
out = doc.addGraph("undirected", "static", "common tropes")
for (m1, m2), count in shared.items():
    n1 = out.addNode(m1, movieName(m1))
    n2 = out.addNode(m2, movieName(m2))
    if count:
        out.addEdge(edgeId, m1, m2, weight=count)
    edgeId += 1
doc.write(open("out.gexf", "w"))

d3graph = {"nodes": [], "links": []}
for m in movs:
    d3graph['nodes'].append({'name': movieName(m)})
for (m1, m2), counts in shared.items():
    if count:
        d3graph['links'].append({
示例#12
0
Convert a graph in Json Graph Streaming format to a dynamic GEXF graph. 
Usage: json2gexf.py json_file gexf_file 
'''

try:
    import json
except ImportError:
    try:
        import simplejson as json
    except:
        raise "Requires either simplejson or Python 2.6!"

import gexf
import sys

xml = gexf.Gexf("pygephi - Graph Streaming",
                "https://github.com/panisson/pygephi_graphstreaming")
graph = xml.addGraph("undirected", "dynamic", "a Json generated graph")

json_file_name = sys.argv[-2]
json_file = open(json_file_name)
file_content = json_file.read()

event_types = set(['an', 'cn', 'dn', 'ae', 'ce', 'de'])

node_properties = set(['label', 'r', 'g', 'b'])
edge_properties = set(['source', 'target', 'directed', 'label', 'r', 'g', 'b'])


def inject_node_property(node, attr_id, value):
    if attr_id in ['r', 'g', 'b']:
        color = int(value * 255)
示例#13
0
def main():

    parser = argparse.ArgumentParser(
        description='Graphs for Lone Wolf by Emanuele Ruffaldi 2018')
    parser.add_argument('--outputs', default="pdf png svg")
    parser.add_argument('--outputpath',
                        help="path where files will be generated")
    parser.add_argument('--inputpath',
                        help="path containing the book files from projectaon")
    parser.add_argument('--contentlink',
                        default="https://www.projectaon.org/en/xhtml/lw/")
    parser.add_argument('--clusters',
                        action="store_true",
                        help="make clusters containing all books")
    parser.add_argument('--book',
                        default=-1,
                        type=int,
                        help="book to work with, -1 means all")
    parser.add_argument('--save-gexf', action="store_true")
    parser.add_argument('--target', default="_blank")
    args = parser.parse_args()
    #<p class="choice">If you wish to use your Kai Discipline of Sixth Sense, <a href="sect141.htm">turn to 141</a>.</p>
    # files: sect#.htm
    #/Users/eruffaldi/Downloads/en/xhtml/

    #for x in os.listdir(sys.argv[1]):
    #   if x.startswith("sect") and x.endswith(".htm"):

    outputs = args.outputs.split(" ")
    output = args.outputpath
    input = args.inputpath
    link = args.contentlink
    clusters = args.clusters
    print "Cluster Mode", args.clusters

    oo = open("script.sh", "w")
    booki = 0
    gg = None
    if clusters:
        outname = os.path.join(input, "all" + ".dot")
        print "generating", outname
        outfile = open(outname, "wb")
        outfile.write("digraph G {\n")
        if args.save_gexf:
            gg = None if not gexf else gexf.Gexf("Emanuele Ruffaldi",
                                                 "Lone Wolf")
    bookplots = []
    bookplots_names = []
    allstats = []
    for dirname in os.listdir(input):
        fx = os.path.join(input, dirname)
        if not os.path.isdir(fx):
            continue
        booki += 1
        if args.book != -1 and args.book != booki:
            continue
        if not clusters:
            outname = os.path.abspath(os.path.join(input, dirname + ".dot"))
            print "generating", outname
            outfile = open(outname, "wb")
            outfile.write("digraph G {\n")
            if args.save_gexf:
                gg = None if gexf is None else gexf.Gexf(
                    "Emanuele Ruffaldi", "Lone Wolf")
        else:
            outfile.write("subgraph cluster_%d {\n" % booki)
        outfile.write("\tlabel=\"%s\"\n" % titles[booki - 1])

        #https://gephi.org/gexf/format/schema.html
        if args.save_gexf and gg is not None:
            graph = gg.addGraph("directed", "static", "VI graph")
        else:
            graph = DummmyGraph()

        allpairs = OrderedDict()
        incoming = defaultdict(set)
        outgoing = defaultdict(set)
        sommer = set()
        pagetype = defaultdict(set)
        pagelink = dict()
        ancestors = defaultdict(set)
        randomnodes = dict()
        for i in range(1, 500):
            #name = int(x[4:-4])
            name = "sect%d.htm" % i
            fp = os.path.join(fx, name)
            if not os.path.isfile(fp):
                break
            pagelink[i] = name  #os.path.abspath(fp)
            y = open(fp, "rb").read()
            if y.find("COMBAT SKILL") >= 0 and y.find("ENDURANCE") >= 0:
                pagetype[i].add("combat")
            if y.find("Sommerswerd") >= 0:
                sommer.add(i)
            israndom = (y.find("If your total is") >= 0
                        or y.find("If the number you have picked is") >= 0
                        ) and y.find("Random") >= 0
            if israndom:
                extractrandom(i, y, randomnodes)

            for p in re.findall("<a href=\"sect(\d+)\.htm\">", y):
                p = int(p)
                allpairs[(i, p)] = 1
                incoming[p].add(i)
                outgoing[i].add(p)
                ancestors[p] |= ancestors[i]
        last = i - 1

        if booki == 5:
            special = [(331, 373)]
            for x, y in special:
                incoming[y].add(x)
                outgoing[x].add(y)
                allpairs[(x, y)] = 1
        elif booki == 7:
            special = [(100, 34)]
            for x, y in special:
                incoming[y].add(x)
                outgoing[x].add(y)
                allpairs[(x, y)] = 1

        # we want to estimate if an edge is backward, that is a given page (node) goes back to another
        # one that has been visited earlier
        backward = set()
        roots = [i for i in range(1, last + 1) if len(incoming[i]) == 0]

        s = analyze(outgoing, incoming, roots, last, backward, booki)
        s["book"] = booki

        outgoing_norandom = dict([(k, v) for k, v in outgoing.iteritems()
                                  if k not in randomnodes])
        shortest = analyzeshortest(outgoing_norandom, last)
        if len(shortest) == 0:
            shortest = analyzeshortest(outgoing, last)
            s["mindist"] = -len(shortest)
        else:
            s["mindist"] = len(shortest)
        allstats.append(s)
        outgoing_dag, incoming_dag = makedag(outgoing, incoming, backward)
        print "shortest:"

        print "apply shortest", len(shortest)
        for i in range(1, len(shortest)):
            allpairs[(shortest[i - 1], shortest[i])] = 2
        # TBD print "book",booki,dirname,s
        dg = nx.from_dict_of_lists(outgoing_dag, nx.DiGraph())
        if not nx.is_directed_acyclic_graph(dg):
            # special
            if booki == 2:
                for a, b in [(15, 244), (172, 64)]:
                    outgoing_dag[a].remove(b)
                    dg.remove_edge(a, b)
            elif booki == 6:
                for a, b in [(105, 79)]:
                    outgoing_dag[a].remove(b)
                    dg.remove_edge(a, b)
            print "cycles", list(nx.simple_cycles(dg))
        if nx.is_directed_acyclic_graph(dg):
            ordered = nx.topological_sort(dg)
            #invordered = dict([(x,i) for i,x in ordered])

            # topological sort
            cp = count_dag_paths(ordered, incoming_dag, outgoing_dag, 1, last)
            totalpaths = cp[1]
            s["totalpaths"] = totalpaths
            if False:
                # Something for estimating the mandatory nodes, actually few of them
                necessarynodes = set(
                    [k for k, v in cp.iteritems() if v >= totalpaths])
                necessarynodes.remove(1)
                s["needednodes1"] = len(necessarynodes)
                necessarynodes = set()
                for c in outgoing[1]:
                    totalpathsc = cp[c]
                    necessarynodesc = set(
                        [k for k, v in cp.iteritems() if v >= totalpathsc])
                    necessarynodes |= necessarynodesc
                s["needednodes2"] = len(necessarynodes)

            deadscore = computedeathscore(booki, incoming_dag, outgoing_dag,
                                          ordered, last, randomnodes, pagetype)
            print "allthis", list(deadscore.iteritems())
            print deadscore[1]
        else:
            deadscore = {}
            if False:
                incoming = incoming_dag
                outgoing = outgoing_dag
                for a, b in backward:
                    del allpairs[(a, b)]
            s["totalpaths"] = 0
            s["needednodes1"] = 0
            s["needednodes2"] = 0

        if len(deadscore) > 0 and len(shortest) > 0:
            ts = range(0, len(shortest))
            bookplots.append(plt.plot(ts, [deadscore[x] for x in shortest])[0])
            bookplots_names.append("Book %d" % booki)
            plt.ylabel('Death probability')
            plt.xlabel('Steps in shortest')
            # only combat steps
            tsc = [
                istep for istep in ts if "combat" in pagetype[shortest[istep]]
            ]
            plt.scatter(tsc, [deadscore[shortest[istep]] for istep in tsc],
                        marker="*",
                        c="r")
            outpathdead = os.path.abspath(
                os.path.join(output, dirname + ".shortest.png"))
            plt.legend(bookplots, bookplots_names)
            print "making", outpathdead
            plt.savefig(outpathdead, format='png')

        pagedict = defaultdict(dict)
        npages = dict()
        # title, defaultValue=None, type="integer", mode="static", force_id=""
        gat = graph.addNodeAttribute("type", "", "string")
        gac = graph.addNodeAttribute("combat", "false", "boolean")
        gas = graph.addNodeAttribute("Sommerswerd", "false", "boolean")
        gal = graph.addNodeAttribute("url", "", "string")
        #graph.addNodeAttribute("type","" ".join(pagetype[i]))
        npages[1] = graph.addNode("1", "Page-1")
        for i in range(2, last + 1):
            npages[i] = graph.addNode(str(i), "Page-%d" % i)
            if len(pagetype[i]) != 0:
                npages[i].addAttribute(gat, " ".join(pagetype[i]))
            o = outgoing[i]
            ww = dict()
            if "combat" in pagetype[i]:
                npages[i].addAttribute(gac, "true")
                ww["shape"] = "tripleoctagon"
                ww["fillcolor"] = "orange"
                ww["style"] = "filled"
                if i in sommer:
                    ww["fillcolor"] = "magenta"
                else:
                    ww["fillcolor"] = "orange"
            else:
                npages[i].addAttribute(gac, "false")
                ww["shape"] = "circle"
            if len(o) == 0:
                ww["fillcolor"] = "red"
                ww["style"] = "filled"
                ww["shape"] = "Mcircle"
            elif len(incoming[i]) == 0:
                ww["fillcolor"] = "green"
                ww["style"] = "filled"
            pagedict[i].update(ww)

        for s in sommer:
            npages[s].addAttribute(gas, "true")
        for i in range(1, last + 1):
            for o in outgoing[i]:
                # id, source, target,
                graph.addEdge((i, o), str(i), str(o))

        # THE ONLY non dead end
        if booki == 3:
            pagedict[61]["fillcolor"] = "orange"

        for i in range(1, last + 1):
            pagedict[i][
                "label"] = "\"%d\"" % i  #/%d/%d\"" % (i,distancefromroot[i],maxincoming[i])
            pagedict[i]["URL"] = "\"%s/%s/%s\"" % (link, dirname, pagelink[i])
            pagedict[i]["target"] = args.target
            npages[i].addAttribute(gal, pagedict[i]["URL"])

        pagedict[1].update(dict(fillcolor="green", style="filled"))
        pagedict[last].update(dict(fillcolor="green", style="filled"))

        for i in range(1, last + 1):
            ww = pagedict[i]
            if len(ww) > 0:
                outfile.write(" b%dp%d [%s];\n" % (booki, i, ",".join(
                    ["%s=%s" % (x, y) for x, y in ww.iteritems()])))

        for k, v in allpairs.iteritems():
            pfrom, pto = k
            sw = {}
            if (pfrom, pto) in backward:
                sw["arrowhead"] = "inv"
                sw["arrowtail"] = "inv"
            else:
                ww = deadscore.get(pto)
                if ww is not None:
                    ##%2x%2x%2x
                    sw["color"] = color2rgbhex(colormap(ww, 0.0, 1.0))
                    sw["label"] = "\"%0.3f\"" % ww

            if v == 2:
                sw["penwidth"] = 3.0
            if pfrom in randomnodes:
                sw["style"] = "dashed"
            outfile.write("b%dp%d -> b%dp%d [%s];\n" %
                          (booki, pfrom, booki, pto, ",".join(
                              ["%s=%s" % (x, y) for x, y in sw.iteritems()])))

        outfile.write("}\n")

        if not clusters:
            for t in outputs:
                oo.write(" ".join([
                    "dot", "-T" + t, outname, "-o" +
                    os.path.join(output, dirname + "." + t)
                ]) + "\n")
            if args.save_gexf:
                og = open(outname[0:-4] + ".gexf", "wb")
                gg.write(og)

            if True:
                outfile = os.path.join(args.outputpath, dirname + ".html")
                print("OUTFILE", outfile)
                oframe = open(outfile, "w")
                oframe.write("""
                    <script src="common.js">
</script>
                    <iframe src="%s.svg" width=45%% height=100%%   id="gframe" style="border:none;"></iframe>
    <iframe src="%s/%s/sect1.htm" name="book" width=45%% height=100%% id="bframe" onLoad="changed(this.contentWindow.location);" style="border:none;"></iframe>
                    """ % (dirname, args.contentlink, dirname))
                oframe.close()

            # close graph

            #subprocess.call(["dot", "-T" + t,outname,"-o"+os.path.join(output,dirname+"."+t)],shell=True)
    if clusters:
        outfile.write("}\n")
        for t in outputs:
            oo.write(" ".join([
                "dot", "-T" + t, outname, "-o" +
                os.path.join(output, dirname + "." + t)
            ]) + "\n")
        if args.save_gexf:
            og = open(outname[0:-4] + ".gexf", "wb")
            gg.write(og)
        if True:
            oframe = open(os.path.join(args.outputpath, "all.html"), "w")
            oframe.write("""
                <iframe src="all.pdf" width=50%% height=100%% style="border:none;"></iframe>
<iframe name="book" width=50%% height=100%% style="border:none;"></iframe>
                """)
            oframe.close()

    print "created script.sh"
    oo.close()
    if True:
        print "\t".join(["Book", "Shortest", "Total Paths"])
        for i, a in enumerate(allstats):
            print "%d\t%d\t%d" % (a["book"], a["mindist"], a["totalpaths"]
                                  )  #,a["maxdist"])
示例#14
0
#
#################################
 	
			
# load categories

verbose=0
# profession
file=open("indata/code_prof.csv")
professionsName,professionsGroup=loadCategory(file)
#print professions
#if verbose :
#	for id,vals in professionsCat.iteritems() :
#		print id+"|"+"|".join(vals)
if verbose :
	for id,[name,groupID,depth] in professionsName.iteritems() :
		print professionsGroup[groupID][0]+" > "+str(id)+" - "+name
	for id,[name,groupID,depth] in professionsGroup.iteritems() :
		
		print (professionsGroup[groupID][0] if not groupID==-1 else "-1")+" > "+str(id)+" - "+name
#print professionsCat.keys()

# formation

# extra

years=("2008","1996","1986","1970")

for year in years :
	# load prof
	gexf_file=gexf.Gexf("medialab Sciences Po - Paul Girard - Marie Scot","Institutions and professers IEP "+year)