def get_parameterized_intercitation_dag(self,old_node,new_node,dag):
        desc = nx.descendants(dag,old_node)
        desc.add(old_node)
        anc = nx.ancestors(dag,new_node)
        anc.add(new_node)

        # Intersect lineages to get ad tree
        intersect = desc.intersection(anc)

        if (len(intersect) == 0):
            print "No common intercitations between ",old_node," and ",new_node
        else:
          rev_dag = nx.reverse(dag,copy=True)
          # Strength of weighting due to impact (# citations)
          impact_param = 1.0

          #Strength of weighting due to network relevance of paper's citations
          network_relevance_param = 1.0

          #Strength of weighting due to redundancy in citation network
          network_robustness_param = 1.0

          sum_citations = sum([pow(dag.in_degree(w),impact_param) for w in intersect])

          #Store importance score
          importance_dict = {}
          for w in intersect:
            importance_dict[w] = pow(dag.in_degree(w),impact_param)

          #Calculate network relevance
          net_relevance = {}
          for w in intersect:
            cited_reach_cnt = 0
            for cited in dag.neighbors(w):
              #If we can reach old node through cited node add to count
              if (nx.has_path(dag,cited,old_node)):
                cited_reach_cnt += 1
            net_relevance[w] = pow(float(cited_reach_cnt)/dag.out_degree(w),network_relevance_param)


          #Calculate network robustness
          net_robustness = {}
          for w in intersect:
            citer_alt_path = 0
            cited_alt_path = 0
            for citer in rev_dag.neighbors(w):
              #If we can reach old node through citer node (without using that citation as a link)
              if (nx.has_path(dag,citer,old_node)):
                citer_alt_path += 1
            for cited in dag.neighbors(w):
              if (nx.has_path(rev_dag,cited,new_node)):
                cited_alt_path += 1
            net_robustness[w] = pow(float(cited_alt_path + citer_alt_path)/(dag.out_degree(w) + dag.in_degree(w)),network_robustness_param)
Пример #2
0
def check_road_path(road_graph, u, v):
    sp = nx.shortest_path(road_graph, u, v)
    if len(sp) >= 20:
        print "path too long"
        return None
    print "shortest path length", len(sp)
    print "shortest path", sp
    for i in xrange(1, len(sp) - 1):
        v1, v2 = sp[i], sp[i + 1]
        print v1, v2
        road_graph.remove_edge(v1, v2)
        if nx.has_path(road_graph, v1, v2):
            fp = nx.shortest_path(road_graph, v1, v2)
            if 3 < len(fp) < 8:
                print "fix path length", len(fp)
                print "fix path", fp
        else:
            pass
        if nx.has_path(road_graph, u, v):
            sp2 = nx.shortest_path(road_graph, u, v)
            if len(sp2) <= 20 and u in sp2 and v in sp2:
               print "new shortest path length", len(sp2)
               print "new shortest path", sp2
        else:
            pass
        road_graph.add_edge(v1, v2)
def verify(prog, src_name, dst_name):
    src = prog.subs.find(src_name)
    dst = prog.subs.find(dst_name)
    if src is None or dst is None:
        return None

    graphs = GraphsBuilder()
    graphs.run(prog)
    cg = graphs.callgraph

    if nx.has_path(cg, src.id.number, dst.id.number):
        return ('calls', nx.shortest_path(cg, src.id.number, dst.id.number))

    calls = CallsitesCollector(graphs.callgraph, src.id.number, dst.id.number)

    for sub in prog.subs:
        calls.run(sub)
        cfg = graphs.callgraph.nodes[sub.id.number]['cfg']
        for src in calls.srcs:
            for dst in calls.dsts:
                if src != dst and nx.has_path(cfg, src, dst):
                    return ('sites', nx.shortest_path(cfg, src, dst))
        calls.clear()

    return None
 def enter_Call(self,jmp):
     callee = direct(jmp.target[0])
     if callee:
         if nx.has_path(self.callgraph, callee.number, self.src):
             self.srcs.append(self.caller)
         if nx.has_path(self.callgraph, callee.number, self.dst):
             self.dsts.append(self.caller)
Пример #5
0
	def get_patterns_b(self):
		roots = []
		for n in G.nodes():
			if not nx.has_path(G, n, '1'):
				G.remove_node(n)
		# for n in nx.ancestors(G, '1'):
			elif G.predecessors(n) == []:
				roots.append(n)
		if roots == []:
			print '\n******No Pattern******\n'
		else:
			print '\n******Patterns******\n'
			print '\nExtracted Pattern <%i>' %len(roots)
		i = 0
		for n in roots:
			pattern = []
			if nx.has_path(G, n, '1'):
				for p in nx.dijkstra_path(G, n, '1')[:-1]:
					if G.node[p].has_key('fontcolor'):
						pattern.append(G.node[p]['label'].split(r'\n')[1])
					else:
						label = G.node[p]['label'].split(r'\n')[:-1]
						pattern.append('%s:{%s}' %(label[0].split('(')[0], ', '.join(label[1:])))
			print '%d:' %i, u' '.join(pattern)
			i += 1
Пример #6
0
	def get_patterns_a(self):
		leaves = []
		root_name = "%s//%s" %(root, root.data)
		for n in G.nodes():
			if not nx.has_path(G, root_name, n):
				G.remove_node(n)
		# for n in nx.descendants(G, root_name):
			elif G.successors(n) == []:
				leaves.append(n)
		if leaves == []:
			print '\n******No Pattern******\n'
		else:
			print '\n******Patterns******\n'
			print '\nExtracted Pattern <%i>' %len(leaves)

		i = 0
		for n in leaves:
			pattern = []
			if nx.has_path(G, root_name, n):
				for p in nx.dijkstra_path(G, root_name, n):
					if G.node[p].has_key('fontcolor'):
						pattern.append(G.node[p]['label'].split(r'\n')[1])
					elif G.node[p] == {}:
						pass
					else:
						label = G.node[p]['label'].split(r'\n')[:-1]
						pattern.append('<%s>:{%s}' %(label[0].split('(')[0], ', '.join(label[1:])))
			print '%d:' %i, u'//'.join(pattern)
			i += 1
Пример #7
0
def get_contigs_of_mates(node, bamfile, G):
    """ retrieves set of nodes mapped to by read pairs
        having one mate on node; discards isolated nodes
        because they tend to reflect irrelevant alignments
    """
    mate_tigs = set([])
    if node[-1] == "'": node=node[:-1]
    try:    
        for hit in bamfile.fetch(node):
            nref = bamfile.getrname(hit.next_reference_id)
            if nref != node:
                mate_tigs.add(nref)

    except ValueError:
        pass
    source_name = node #re.sub('NODE_','EDGE_', node)

    # print "before removal", mate_tigs
    to_remove = set([])
    for nd in mate_tigs:
        # flip name from "NODE_" prefix back to "EDGE_"
        # differs between contigs set and graph node names
        nd_name = nd #re.sub('NODE_','EDGE_', nd)
        if (G.in_degree(nd_name)==0 and G.out_degree(nd_name)==0) or \
        (not G.has_node(nd_name)):
            to_remove.add(nd)
        # see if nd reachable by node or vice-versa
        # try both flipping to rc and switching source and target    
        elif not any([nx.has_path(G, source_name, nd_name), nx.has_path(G, rc_node(source_name),nd_name), 
          nx.has_path(G, nd_name, source_name), nx.has_path(G, nd_name, rc_node(source_name))]):
            to_remove.add(nd)
    mate_tigs -= to_remove
    # print "after removal", mate_tigs

    return mate_tigs
Пример #8
0
def _apply_is(is_formulas, core_formulas):
    """
    Given a list of formulas, resolve transitivity by Is relation

    :param formula_nodes:
    :return:
    """
    graph = nx.Graph()
    explicit_sigs = set()
    equal_formulas = []
    for formula_node in is_formulas:
        assert isinstance(formula_node, FormulaNode)
        a_node, b_node = formula_node.children
        a_sig, b_sig = a_node.signature, b_node.signature

        if a_sig.return_type == 'number' or b_sig.return_type == 'number':
            equal_formula = FormulaNode(signatures['Equals'], [a_node, b_node])
            equal_formulas.append(equal_formula)

        if not isinstance(a_sig, VariableSignature) or not isinstance(b_sig, VariableSignature):
            continue

        graph.add_edge(a_sig, b_sig)
        p = re.compile("^([A-Z]+|[a-z])$")
        if p.match(a_sig.name):
            explicit_sigs.add(a_sig)
        if p.match(b_sig.name):
            explicit_sigs.add(b_sig)

    tester = lambda sig: sig in graph and any(nx.has_path(graph, sig, explicit_sig) for explicit_sig in explicit_sigs)
    getter = lambda sig: [explicit_sig for explicit_sig in explicit_sigs if nx.has_path(graph, sig, explicit_sig)][0]
    new_formula_nodes = [formula_node.replace_signature(tester, getter) for formula_node in core_formulas]
    new_formula_nodes = new_formula_nodes + equal_formulas
    return new_formula_nodes
Пример #9
0
def betweenness(G):
  deltas = {}
  B = {}
  n = len(G.nodes())
  count = 1
  for s in G.nodes():
    print 'On node', count, 'of', n
    count += 1
    sigmas = {}
    delta_s = {}
    preds = {}
    #get sigmas of s for each v:
    for v in G.nodes():
      if nx.has_path(G, s, v):
        pred_set = Set()
        sigmas[v] = get_sigma(G, s, v, pred_set)
        preds[v] = pred_set
      else:
        sigmas[v] = 0
    #get successors for use in finding delta:
    successors = get_successors(preds)

    #get deltas of s for each edge in E:
    for e in G.edges():
      if e not in B:
        B[e] = 0
      if nx.has_path(G, s, e[0]):
        B[e] += get_delta(G, s, e, sigmas, successors)

  return B
Пример #10
0
def road(road_file_path, comments='#'):
    G = nx.read_edgelist(road_file_path, comments=comments, nodetype=int)
    nodes = []
    start_node = random.choice(G.nodes())
    queue = [start_node]
    added_nodes = 1
    seen = set()
    while added_nodes < MAX_ROAD_NODES and len(queue) > 0:
        curr = queue.pop()
        if curr in seen:
            continue
        else:
            nodes.append(curr)
            queue += G.neighbors(curr)
            seen.add(curr)
            added_nodes += 1
    
    G = G.subgraph(nodes)
 
    mapping = {}
    for i, node in enumerate(G.nodes()):
        x = i / 12
        y = i % 12
        mapping[node] = (x, y)
    #nx.relabel_nodes(G, mapping, copy=False)
    
    mapping2 = {}
    for i, node in enumerate(sorted(G.nodes())):
        mapping2[node] = i
    #nx.relabel_nodes(G, mapping2, copy=False)
    
    G.graph['name'] = 'road'
    
    pos = nx.kamada_kawai_layout(G, scale = graphscale)
    for u in G.nodes():
        G.node[u]['pos'] = pos[u]
    
    done = False
    for i in xrange(MAX_ROAD_ATTEMPTS):
        n1, n2 = sample(G.nodes(), 2)
        if not nx.has_path(G, n1, n2):
            continue
        sp = nx.shortest_path(G, n1, n2)
        if len(sp) < 8 or len(sp) > 30:
            continue
        index = random.choice(range(len(sp) / 4, 3 * len(sp) / 4))
        u, v = sp[index], sp[index + 1]
        G.remove_edge(u, v)
        if not nx.has_path(G, u, v):
            G.add_edge(u, v)
            continue
        fp = nx.shortest_path(G, u, v)
        if len(fp) > 8:
            G.add_edge(u, v)
            continue
        #print n1, n2, u, v, sp, fp
        G.add_edge(u, v)
        set_init_road_path(G, n1, n2, u, v)
        return G
Пример #11
0
def er_network(p=0.5):
    G = nx.grid_2d_graph(11, 11)
    for u in G.nodes():
        for v in G.nodes():
            if u == nest and v == target:
                continue
            if v == nest and u == target:
                continue
            if u != v:
                if random() <= p:
                    G.add_edge(u, v)
                else:
                    if G.has_edge(u, v):
                        G.remove_edge(u, v)
    if not nx.has_path(G, nest, target):
        return None
    short_path = nx.shortest_path(G, nest, target)
    if len(short_path) <= 3:
        return None
    #print short_path
    idx = choice(range(1, len(short_path) - 1))
    #print idx
    G.remove_edge(short_path[idx], short_path[idx + 1])
    for i in xrange(idx):
        P.append((short_path[i], short_path[i + 1]))
    for i in xrange(idx + 1, len(short_path) - 1):
        P.append((short_path[i], short_path[i + 1]))
    #print P
        
    if not nx.has_path(G, nest, target):
        return None
    
    for i,u in enumerate(G.nodes_iter()):
        M[i] = u
        Minv[u] = i
        pos[u] = [u[0],u[1]] # position is the same as the label.

        if (u[0] == nest) or (u == target):
            node_size.append(100)
            node_color.append('r')
        else:
            node_size.append(10)
            node_color.append('k') 
        
    for u,v in G.edges_iter():
        G[u][v]['weight'] = MIN_PHEROMONE
        if (u, v) in P or (v, u) in P:
            edge_color.append('g')
            edge_width.append(10)
        else:
            edge_color.append('k')
            edge_width.append(1)
    
    for i, (u,v) in enumerate(G.edges()):
        Ninv[(u, v)] = i
        N[i] = (u, v)        
        Ninv[(v, u)] = i
            
    return G
Пример #12
0
def test_TR(DAG, TR):
    missing_edges = []
    for node1 in DAG.nodes():
        for node2 in DAG.nodes(): #iterates over all pairs of nodes in the DAG
            if dl.age_check(DAG, node1, node2): #ensure that there could possibly be a path from node1 to node2
                if nx.has_path(DAG, node1, node2): #tests whether there is a path between these two nodes in the original DAG
                    if not nx.has_path(TR, node1, node2): 
                        missing_edges.append([node1, node2]) #if there is no longer a path between these two pairs of nodes in the transitive reduction...
    return missing_edges #...then these two edges are stored and printed
Пример #13
0
def four_chain(three_chain_list, DAG_TC): #Uses the transitive completion of the DAG to find all of the three chains in the DAG
    four_chain_list = []
    for three_chain in three_chain_list: #Iterates over every 3 chain
        [node1, node2, node3] = three_chain
        for node in DAG_TC.nodes(): #Iterates over every node in the DAG
            if dl.age_check(DAG_TC, node1, node): #If a node has birthdays between two of the nodes in the 3 chain, it could be possible to find a 4 chain that has this node added in to the 3 chain
                if dl.age_check(DAG_TC, node, node2): 
                    if nx.has_path(DAG_TC, node1, node):
                        if nx.has_path(DAG_TC, node, node2): 
                            four_chain_list.append([node1, node, node2, node3]) #If a three chain can be formed, add it to the list
    return four_chain_list
Пример #14
0
def three_chain(DAG_TC): #Uses the transitive completion of the DAG to find all of the three chains in the DAG
    three_chain_list = []
    for edge in DAG_TC.edges(): #Iterates over every edge in the TC, which is also every 2 chain
        [node1, node2] = edge
        for node in DAG_TC.nodes():
            if dl.age_check(DAG_TC, node1, node): #If a node has birthdays between each end of the 2 chain, it could be possible to find a 3 chain that has this node inbetween the the ends of the 2 chain
                if dl.age_check(DAG_TC, node, node2):
                    if nx.has_path(DAG_TC, node1, node): #check if there is a path to the middle node from the 1st
                        if nx.has_path(DAG_TC, node, node2): #check if there is a path from the middle node to the 2nd
                            three_chain_list.append([node1, node, node2]) #If a three chain can be formed, add it to the list
    return three_chain_list
def createMinMaxGraphByWeight( **kwargs):
    ## first need to find the pairs with the maximum occurrence, then we work down from there until all of the
    ## nodes are included
    ## the weight

    weight = kwargs.get('weight', "weight")
    input_graph = kwargs.get('input_graph')
    sumDistance = calc_sum_distance(input_graph)

    #first create a graph that is complete
    new_graph = createCompleteGraphByDistance(input_graph=input_graph.copy(), weight='weight')

    output_graph = nx.Graph(is_directed=False)
    output_graph.add_nodes_from(input_graph.nodes(data=True)) ## copy just the nodes

    pairsHash={}

    for e in new_graph.edges_iter():
        d = new_graph.get_edge_data(*e)
        fromAssemblage = e[0]
        toAssemblage = e[1]
        key = fromAssemblage+"*"+toAssemblage
        value = new_graph[fromAssemblage][toAssemblage]['weight']
        pairsHash[key]=value

    for key, value in sorted(pairsHash.iteritems(), key=operator.itemgetter(1), reverse=True ):
        ass1, ass2 = key.split("*")
        edgesToAdd={}
        if nx.has_path(output_graph, ass1, ass2) == False:
            edgesToAdd[key]=value
             ## check to see if any other pairs NOT already represented that have the same value
            for p in pairsHash:
                if pairsHash[p] == value:
                    k1,k2 = p.split("*")
                    if nx.has_path(output_graph, k1,k2) == False:
                        edgesToAdd[p]=pairsHash[p]
            ## now add all of the edges that are the same value if they dont already exist as paths
            for newEdge in edgesToAdd:
                a1,a2 = newEdge.split("*")
                key=a1+"*"+a2
                distance=edgeDistance[key]
                weight=1/distance
                normalized_weight=distance/sumDistance
                if weight in [0,None,False]:
                    weight=0.000000000001 ## use this value so that we never get a 1/0
                output_graph.add_path([a1, a2], normalized_weight=normalized_weight,unnormalized_weight=weight,
                                      distance=distance, weight=weight)
    ## now remove all of the non linked nodes.
    outdeg = output_graph.degree()
    to_remove = [n for n in outdeg if outdeg[n] < 1]
    input_graph.remove_nodes_from(to_remove)

    return output_graph.copy()
Пример #16
0
 def _path(self, start_obj, end_obj):
     graph = self._graph
     di_graph = self._di_graph
     if( start_obj.fqtn == end_obj.fqtn and
         nx.has_path( di_graph, start_obj.fqtn, end_obj.fqtn ) ):
             path = [start_obj.fqtn]
     elif nx.has_path( di_graph, start_obj.fqtn, end_obj.fqtn ):
         path = nx.shortest_path(
             di_graph, start_obj.fqtn, end_obj.fqtn, 'weight' )
     else:
         path = nx.shortest_path(
             graph, start_obj.fqtn, end_obj.fqtn, 'weight' )
     return path
Пример #17
0
def interval_size(DAG, start, end):
    
    i = 0
    #print 'checking interval from %s to %s' % (start, end)
    for node in DAG.nodes():        
        if nx.has_path(DAG, start, node):
            if nx.has_path(DAG, node, end):
                i += 1
                #print 'found node %s in interval %s to %s' % (node, start, end)
       
                 
            pass
    return i
Пример #18
0
def transitive_reduction(G):
    """
    Returns a transitive reduction of a graph.  The original graph
    is not modified.

    A transitive reduction H of G has a path from x to y if and
    only if there was a path from x to y in G.  Deleting any edge
    of H destroys this property.  A transitive reduction is not
    unique in general.  A transitive reduction has the same
    transitive closure as the original graph.

    A transitive reduction of a complete graph is a tree.  A
    transitive reduction of a tree is itself.

    >>> G = nx.DiGraph([(1, 2), (1, 3), (2, 3), (2, 4), (3, 4)])
    >>> H = transitive_reduction(G)
    >>> H.edges()
    [(1, 2), (2, 3), (3, 4)]
    """
    H = G.copy()
    for a, b, w in G.edges_iter(data=True):
        # Try deleting the edge, see if we still have a path
        # between the vertices
        H.remove_edge(a, b)
        if not nx.has_path(H, a, b):  # we shouldn't have deleted it
            H.add_edge(a, b, w)
    return H
def is_destination_reachable_from_source(noc_rg, source_node, destination_node):
    """
    checks if destination is reachable from the local port of the source node
    the search starts from the local port
    :param noc_rg: NoC routing graph
    :param source_node: source node id
    :param destination_node: destination node id
    :return: True if there is a path else, False
    """
    # the Source port should be input port since this is input of router
    # (which will be connected to PE's output port)
    source = str(source_node)+str('L')+str('I')
    # the destination port should be output port since this is output of router to PE
    # (which will be connected to PE's input port)
    destination = str(destination_node)+str('L')+str('O')
    if has_path(noc_rg, source, destination):
        if Config.RotingType == 'MinimalPath':
            path_length = shortest_path_length(noc_rg, source, destination)
            minimal_hop_count = manhattan_distance(source_node, destination_node)
            if (path_length/2) == minimal_hop_count:
                return True
        else:
            return True
    else:
        return False
Пример #20
0
def find_disconnected_groups(graph):
	
	# create a dictionary of the nodes of the graph with their colour
	node_colours = {}
	for i in graph.nodes():
		node_colours[i] = None

	colour_counter = 0
	groups = {}

	# while all the nodes are uncoloured
	while (None in node_colours.values()):
		for i in graph.nodes():
			# if it is uncoloured, colour it and find
			# all of the nodes connected to it and colour them
			if node_colours[i] == None:
				node_colours[i] = colour_counter
				groups[colour_counter] = [i]
				for j in graph.nodes():
					if nx.has_path(graph, i, j) == True:
						node_colours[j] = node_colours[i]
						if j not in groups[colour_counter]:
							groups[colour_counter].append(j)
				colour_counter += 1
				# print node_colours

	# the number of groups
	n_groups = max(groups.keys()) + 1

	return groups
Пример #21
0
 def _singleSegment(self, nodes):
     # disjoint line or a bent line at 45 degrees appearing as dichtonomous tree but an error due to
     # improper binarization, so remove them and do not account for statistics
     listOfPerms = list(itertools.combinations(nodes, 2))
     if type(nodes[0]) == int:
         modulus = [[start - end] for start, end in listOfPerms]
         dists = [abs(i[0]) for i in modulus]
     else:
         dims = len(nodes[0])
         modulus = [[start[dim] - end[dim] for dim in range(0, dims)] for start, end in listOfPerms]
         dists = [sum(modulus[i][dim] * modulus[i][dim] for dim in range(0, dims)) for i in range(0, len(modulus))]
     if len(list(nx.articulation_points(self._subGraphSkeleton))) == 1 and set(dists) != 1:
         # each node is connected to one or two other nodes which are not a distance of 1 implies there is a
         # one branch point with two end points in a single dichotomous tree"""
         for sourceOnTree, item in listOfPerms:
             if nx.has_path(self._subGraphSkeleton, sourceOnTree, item) and sourceOnTree != item:
                 simplePaths = list(nx.all_simple_paths(self._subGraphSkeleton, source=sourceOnTree, target=item))
                 simplePath = simplePaths[0]
                 countBranchNodesOnPath = sum([1 for point in simplePath if point in nodes])
                 if countBranchNodesOnPath == 2:
                     curveLength = self._getLengthAndRemoveTracedPath(simplePath)
                     self.isolatedEdgeInfoDict[sourceOnTree, item] = curveLength
     else:
         # each node is connected to one or two other nodes implies it is a line,
         endPoints = [k for (k, v) in self._nodeDegreeDict.items() if v == 1]
         sourceOnLine = endPoints[0]
         targetOnLine = endPoints[1]
         simplePath = nx.shortest_path(self._subGraphSkeleton, source=sourceOnLine, target=targetOnLine)
         curveLength = self._getLengthAndRemoveTracedPath(simplePath)
         self.isolatedEdgeInfoDict[sourceOnLine, targetOnLine] = curveLength
Пример #22
0
def generic_product_rule(g, op):
    sel1 = random.sample(g.nodes(), 2)
    if nx.has_path(g, *sel1):
        g.add_edge(*sel1)
        return sel1
    sel2 = random.sample(g.nodes(), 2)
    if nx.has_path(g, *sel2):
        g.add_edge(*sel2)
        return sel2
    elif op( len(nx.node_connected_component(g, sel2[0])) * len(nx.node_connected_component(g, sel2[1])), \
             len(nx.node_connected_component(g, sel1[0])) * len(nx.node_connected_component(g, sel1[1])) ):
        g.add_edge(*sel2)
        return sel2
    else:
        g.add_edge(*sel1)
        return sel1
Пример #23
0
def get_ontology_paths(basic_ontology, from_type, to_obj):
    """
    type-to-type ontology path

    :param ontology:
    :param from_type:
    :param to_obj:
    :return:
    """
    assert from_type.name in basic_ontology.types
    assert to_obj.type.name in basic_ontology.types


    graph = basic_ontology.ontology_graph.copy()
    assert isinstance(graph, nx.DiGraph)

    for function in basic_ontology.functions.values():
        if function.valence > 1:
            graph.remove_node(function.id)

    """
    if from_type is to_obj:
        paths = [cycle for cycle in nx.simple_cycles(basic_ontology.ontology_graph) if from_type.id in cycle]
    """
    if not nx.has_path(graph, from_type.id, to_obj.type.id):
        paths = []
    elif from_type == to_obj.type:
        paths = [[from_type.id]]

    else:
        paths = list(nx.all_simple_paths(graph, from_type.id, to_obj.type.id))

    path_dict = {key: OntologyPath(basic_ontology, [basic_ontology.get_by_id(id_) for id_ in path] + [to_obj], key)
                 for key, path in enumerate(paths)}
    return path_dict
Пример #24
0
 def currentLeader (self, switch):
     for c in sorted(list(self.controllers)):
         if c not in self.graph:
             self.graph.add_node(c)
     for c in sorted(list(self.controllers)):
         if nx.has_path(self.graph, c, switch):
             return c #Find the first connected controller
Пример #25
0
def main(fName="cppzk.txt"):
    g = nx.Graph()
    for eachLine in open(fName):
        fields = eachLine.split()
        g.add_edge(fields[0], fields[1])
         
#     keyConns = [["ASPA0085", "ARGA0082"], ["ARGA0082", "GLUA0194"]]
    keyConns = [["ASPA0085", "GLUA0194"]]
#     keyConns = [["ASPA0085", "ARGA0082"], ["ARGA0082", "GLUA0194"], ["ASPA0085", "GLUA0194"]]

    keyAtoms = {"ASPA0085":["OD1", "OD2"], "ARGA0082":["NE", "NH1", "NH2"], "GLUA0194":["OE1", "OE2"]}
    
    for eachConn in keyConns:
        sourceRes = eachConn[0]
        targetRes = eachConn[1]
        
        for eachSourceAtom in keyAtoms[sourceRes]:
            sourceAtom = sourceRes + eachSourceAtom
            if sourceAtom not in g.nodes(): continue
            for eachTargetAtom in keyAtoms[targetRes]:
                targetAtom = targetRes + eachTargetAtom
                if targetAtom not in g.nodes(): continue
                
                if nx.has_path(g, sourceAtom, targetAtom):
                    print "Path between %13s%13s" % (sourceAtom, targetAtom),
                    print nx.shortest_path(g, sourceAtom, targetAtom)
Пример #26
0
def number_of_hidden_nodes(G):
    path_list = []
    
    for j in range(6):
        if(G.has_node(j)):
            if (nx.has_path(G,j,6)):
                for path in nx.shortest_simple_paths(G, j, 6):
                    path_list = np.append(path_list, (len(path)-2))
            
    for j in range(6):
        if(G.has_node(j)):
            if (nx.has_path(G,j,7)):
                for path in nx.shortest_simple_paths(G, j, 7):
                    path_list = np.append(path_list, (len(path)-2))
    
    return np.max(int(np.max(path_list)),0)
Пример #27
0
def get_my_global_efficiency(filename) :
	threshold = 0
	f = open(filename[:-4]+'_global_efficiency.dat','w')
	g = open(filename[:-4]+'_node_global_efficiency.dat','w')
	print f
	print g
	f.write('threshold\tglob_effic\n')
	g.write('node\tthreshold\tnode_glob_effc\n')	
	for i in range(0,101):
		threshold = float(i)/100
		G = get_my_threshold_matrix(filename, threshold)
		global_efficiency = 0.
		for node_i in G :
			sum_inverse_dist = 0.
			for node_j in G :
				if node_i != node_j :
					if nx.has_path(G, node_i, node_j) == True :
						sum_inverse_dist += 1. / nx.shortest_path_length(G, node_i, node_j) 
			g.write('%d\t%f\t%f\n' % ((node_i+1), threshold, (sum_inverse_dist / nx.number_of_nodes(G)) )) ##?
			global_efficiency += sum_inverse_dist / (nx.number_of_nodes(G) -1.)
		g.write("\n")
		global_efficiency = global_efficiency / nx.number_of_nodes(G)
		f.write("%f\t%f\n" % (threshold, global_efficiency))
	f.close()
	g.close()
Пример #28
0
def _check_path(dpid1, dpid2):
  if dpid1 == dpid2:
    return True
  else:
    g = _graph_for_link('lldp')
    return nx.has_path(g, dpid1, dpid2) if all(i in g.nodes() for i in [dpid1, dpid2]) else log.info(
      'not all nodes in g')
Пример #29
0
    def add_edge(self, start, end, **kwargs):
        """
        Add an edge between two nodes.

        The nodes will be automatically added if they are not present in the network.

        Parameters
        ----------
        start: tuple
               Both the start and end nodes should specify the time slice as
               (node_name, time_slice). Here, node_name can be any hashable
               python object while the time_slice is an integer value,
               which denotes the time slice that the node belongs to.

        end: tuple
               Both the start and end nodes should specify the time slice as
               (node_name, time_slice). Here, node_name can be any hashable
               python object while the time_slice is an integer value,
               which denotes the time slice that the node belongs to.

        Examples
        --------
        >>> from pgmpy.models import DynamicBayesianNetwork as DBN
        >>> model = DBN()
        >>> model.add_nodes_from(['D', 'I'])
        >>> model.add_edge(('D',0), ('I',0))
        >>> model.edges()
        [(('D', 1), ('I', 1)), (('D', 0), ('I', 0))]
        """
        try:
            if len(start) != 2 or len(end) !=2:
                raise ValueError('Nodes must be of type (node, time_slice).')
            elif not isinstance(start[1], int) or not isinstance(end[1], int):
                raise ValueError('Nodes must be of type (node, time_slice).')
            elif start[1] == end[1]:
                start = (start[0], 0)
                end = (end[0], 0)
            elif start[1] == end[1] - 1:
                start = (start[0], 0)
                end = (end[0], 1)
            elif start[1] > end[1]:
                raise NotImplementedError('Edges in backward direction are not allowed.')
            elif start[1] != end[1]:
                raise ValueError("Edges over multiple time slices is not currently supported")
        except TypeError:
            raise ValueError('Nodes must be of type (node, time_slice).')

        if start == end:
            raise ValueError('Self Loops are not allowed')
        elif start in super(DynamicBayesianNetwork, self).nodes() and end \
                in super(DynamicBayesianNetwork, self).nodes() and \
                nx.has_path(self, end, start):
            raise ValueError(
                 'Loops are not allowed. Adding the edge from ({start} --> {end}) forms a loop.'.format(
                     start=str(start), end=str(end)))

        super(DynamicBayesianNetwork, self).add_edge(start, end, **kwargs)

        if start[1] == end[1]:
            super(DynamicBayesianNetwork, self).add_edge((start[0], 1 - start[1]), (end[0], 1 - end[1]))
Пример #30
0
def TR_from_examining_edges(DAG):
    for edge in DAG.edges():
        [source, target] = edge
        DAG.remove_edge(source, target)
        if not nx.has_path(DAG,source,target):
            DAG.add_edge(source,target)
    return DAG
Пример #31
0
    def step(self, action):
        assert self.current_node is not None
        assert self.destination is not None
        assert type(self.remaining_time) in [int, float]
        assert self.remaining_time >= 0
        current_node = self.current_node
        destination = self.destination
        remaining_time = self.remaining_time
        action_space = self.get_action_space(current_node)
        assert action_space.contains(action)
        _, next_node = action

        done = False
        reward = 0.0
        self.num_step += 1

        if current_node == destination:
            obs = (current_node, destination, remaining_time
                   )  # if we are already at the destination, we do nothing
            done = True
        else:
            act_time = self.graph[current_node][next_node]['time']
            act_cost = self.graph[current_node][next_node]['cost']
            reward += (-act_cost)
            remaining_time -= act_time
            obs = (next_node, destination, remaining_time)
            arrive = (next_node == destination)
            miss_ddl = (remaining_time < 0)

            if miss_ddl:
                time_penalty = self.time_radius
                cost_penalty = self.cost_radius
                if nx.has_path(self.graph, current_node, destination):
                    time_penalty = nx.shortest_path_length(self.graph,
                                                           source=current_node,
                                                           target=destination,
                                                           weight='time')
                    cost_penalty = nx.shortest_path_length(self.graph,
                                                           source=current_node,
                                                           target=destination,
                                                           weight='cost')
                reward += (-time_penalty * self.miss_deadline_penalty -
                           cost_penalty)

            if miss_ddl or arrive:
                done = True
            current_node = next_node

        self.last_action = action
        self.last_reward = reward
        self.episode_total_reward += reward

        # update the current state
        self.current_node = current_node
        self.remaining_time = remaining_time

        # if we are stuck in some node, then we are done
        next_action_space = self.get_action_space(current_node)
        if next_action_space.sample() is None:
            if done is False:
                done = True
                # if we get stuck, then are doomed to be late
                time_penalty = self.time_radius
                cost_penalty = self.cost_radius
                reward += (-time_penalty * self.miss_deadline_penalty -
                           cost_penalty)

        return (obs, reward, done, {})
def city_directions():
    """
    Purpose:    To create a path between any two USA cities using the NA roads dataset
    Input:      None
    Output:     The path distance in miles OR -1 if no path exists
    """
    # data looks like this: [ "cityname1", "cityname2" ]
    citySource, cityDest = request.args.get("cityArgs", None).split(',')
    citySource = (unquote(citySource)).title()
    cityDest = (unquote(cityDest)).title()
    source_target_list = []
    # only run if both source and destination cities are populated with text
    if citySource and cityDest:
        source_target_list = load_city_docs(citySource, cityDest)
        # identify the source city
        source_city = source_target_list[0]
        source_city_coords = tuple(source_city['geometry']['coordinates'])
        source_city_name = source_city['properties']['name']
        # locate the nearest road segment(s) to source city
        nearest_roads_to_source = [
            list(linestring.coords)
            for linestring in list(usrails_and_roads_DF.iloc[list(
                usrail_and_roads_SI.nearest((source_city_coords[0],
                                             source_city_coords[1],
                                             source_city_coords[0],
                                             source_city_coords[1]),
                                            num_results=1))].geometry)
        ]

        target_city = source_target_list[1]
        target_city_coords = tuple(target_city['geometry']['coordinates'])
        target_city_name = target_city['properties']['name']
        # find the closest road segment(s) to the target city
        nearest_roads_to_target = [
            list(linestring.coords)
            for linestring in list(usrails_and_roads_DF.iloc[list(
                usrail_and_roads_SI.nearest((target_city_coords[0],
                                             target_city_coords[1],
                                             target_city_coords[0],
                                             target_city_coords[1]),
                                            num_results=1))].geometry)
        ]

        # adds the source city to the NA roads graph
        if source_city_coords not in US_road_graph:
            add_city_to_graph(source_city_coords, source_city_name,
                              nearest_roads_to_source, US_road_graph)

        # adds the target city to the NA roads graph
        if target_city_coords not in US_road_graph:
            add_city_to_graph(target_city_coords, target_city_name,
                              nearest_roads_to_target, US_road_graph)

        # if a path exists between the two cities, create a geojson feature of it
        if has_path(US_road_graph, source_city_coords, target_city_coords):
            total_distance, path = single_source_dijkstra(US_road_graph,
                                                          source_city_coords,
                                                          target_city_coords,
                                                          weight=distance)
            path_geojson = {
                'type': 'feature',
                'properties': {
                    'source': source_city_name,
                    'destination': target_city_name,
                    'distance': total_distance
                },
                'geometry': {
                    'type': 'LineString',
                    'coordinates': [list(point) for point in path]
                }
            }
            # load the path into a GeoDataFrame for processing
            path_df = GeoDataFrame.from_features([path_geojson],
                                                 crs="epsg:4326")
            # use the buffer method to produce a Polygon of 0.2 degrees thickness surrounding the path
            buffered_path = (path_df.buffer(0.2)).to_crs(crs="epsg:4326")
            # create a dataframe from the buffered path
            buffered_path_df = GeoDataFrame(buffered_path,
                                            geometry=buffered_path.geometry)
            buffered_path_df[0] = None
            # perform a spatial join of the buffered path and the ufo sightings, earthquakes, etc dataframe.
            #   This will return all disasters within 0.2 degrees of the path
            join_results = GeoDataFrame(
                sjoin(disasters_DF, buffered_path_df, lsuffix="left"))
            # from here, dump the path, the buffered path, and the disasters 0.2 degrees from the path to files
            #   for the front end to visualize
            dump(
                path_geojson,
                open(
                    './Assignments/A05/assets/api/data/shortest_paths/' +
                    source_city_name + '_' + target_city_name + '.geojson',
                    'w'))
            dump(
                loads(buffered_path.to_json()),
                open(
                    './Assignments/A05/assets/api/data/shortest_paths/buffered.geojson',
                    'w'))
            dump(
                loads(join_results.to_json(show_bbox=False)),
                open(
                    './Assignments/A05/assets/api/data/shortest_paths/closest_points.geojson',
                    'w'))
            return str(total_distance)
        else:
            return "-1"
Пример #33
0
    import numpy as np

    start_s3 = time.clock()

    d_source = np.zeros((len(source), len(source)))
    jac_source = np.zeros((len(source), len(source)))

    dummy = 9999

    for i in range(len(source)):
        if i % 100 == 0:
            print('Shortest Path:' + str(i) + '/' + str(len(source)) + '---' +
                  'Time Elapsed:' + str((time.clock() - start_s3)))
        for j in range(len(source)):

            if nx.has_path(G, source[i], source[j]):
                d_source[i, j] = nx.shortest_path_length(G,
                                                         source=source[i],
                                                         target=source[j])
            else:
                d_source[i, j] = dummy  # unconnected components

            a = set([k[0] for k in community[i]])
            b = set([k[0] for k in community[j]])

            c = a.intersection(b)
            jac_source[i, j] = float(len(c)) / (len(a) + len(b) - len(c))

    mixed_source = d_source / jac_source

    print('Time Elapsed--- ' + str((time.clock() - start_s3)))
Пример #34
0
def entropy(G, sources=None, sinks=None):
    """
    Compute entropy, equations from [AwGB90]_.

    Entropy is a measure of uncertainty in a random variable.
    In a water distribution network model, the random variable is
    flow in the pipes and entropy can be used to measure alternate flow paths
    when a network component fails.  A network that carries maximum entropy
    flow is considered reliable with multiple alternate paths.

    Parameters
    ----------
    G : NetworkX or WNTR graph
        Entropy is computed using a directed graph based on pipe flow direction.
        The 'weight' of each link is equal to the flow rate.

    sources : list of strings, optional (default = all reservoirs)
        List of node names to use as sources.

    sinks : list of strings, optional (default = all nodes)
        List of node names to use as sinks.

    Returns
    -------
    A tuple which includes:
        - A pandas Series that contains entropy for each node
        - System entropy (float)
    """

    if G.is_directed() == False:
        return

    if sources is None:
        sources = [key for key,value in nx.get_node_attributes(G,'type').items() if value == 'Reservoir' ]

    if sinks is None:
        sinks = G.nodes()

    S = {}
    Q = {}
    for nodej in sinks:
        if nodej in sources:
            S[nodej] = 0 # nodej is the source
            continue

        sp = [] # simple path
        if G.node[nodej]['type']  == 'Junction':
            for source in sources:
                if nx.has_path(G, source, nodej):
                    simple_paths = _all_simple_paths(G,source,target=nodej)
                    sp = sp + ([p for p in simple_paths])
                    # all_simple_paths was modified to check 'has_path' in the
                    # loop, but this is still slow for large networks
                    # what if the network was skeletonized based on series pipes
                    # that have the same flow direction?
                    # what about duplicating paths that have pipes in series?
                #print j, nodeid, len(sp)

        if len(sp) == 0:
            S[nodej] = np.nan # nodej is not connected to any sources
            continue

        sp = np.array(sp)

        # Uj = set of nodes on the upstream ends of links incident on node j
        Uj = G.predecessors(nodej)
        # qij = flow in link from node i to node j
        qij = []
        # aij = number of equivalnet independent paths through the link from node i to node j
        aij = []
        for nodei in Uj:
            mask = np.array([nodei in path for path in sp])
            # NDij = number of paths through the link from node i to node j
            NDij = sum(mask)
            if NDij == 0:
                continue
            temp = sp[mask]
            # MDij = links in the NDij path
            MDij = [(t[idx],t[idx+1]) for t in temp for idx in range(len(t)-1)]

            flow = 0
            for link in G[nodei][nodej].keys():
                flow = flow + G[nodei][nodej][link]['weight']
            qij.append(flow)

            # dk = degree of link k in MDij
            dk = Counter()
            for elem in MDij:
                # divide by the numnber of links between two nodes
                dk[elem] += 1/len(G[elem[0]][elem[1]].keys())
            V = np.array(list(dk.values()))
            aij.append(NDij*(1-float(sum(V - 1))/sum(V)))

        Q[nodej] = sum(qij) # Total flow into node j

        # Equation 7
        S[nodej] = 0
        for idx in range(len(qij)):
            if qij[idx]/Q[nodej] > 0:
                S[nodej] = S[nodej] - \
                    qij[idx]/Q[nodej]*math.log(qij[idx]/Q[nodej]) + \
                    qij[idx]/Q[nodej]*math.log(aij[idx])

    Q0 = sum(nx.get_edge_attributes(G, 'weight').values())

    # Equation 3
    S_ave = 0
    for nodej in sinks:
        if not np.isnan(S[nodej]):
            if nodej not in sources:
                if Q[nodej]/Q0 > 0:
                    S_ave = S_ave + \
                        (Q[nodej]*S[nodej])/Q0 - \
                        Q[nodej]/Q0*math.log(Q[nodej]/Q0)
                        
    S = pd.Series(S) # convert S to a series
    
    return [S, S_ave]
Пример #35
0
G = nx.from_pandas_edgelist(df_edges, 'from_node', 'to_node', True, nx.DiGraph)
# G.add_edges_from(nx.from_pandas_edgelist(df_edges[df_edges['BA_NumberOfLanes'] > 0], 'B_node', 'A_node', True, nx.DiGraph).edges)

# adding the node attributes
for i in G.nodes():
    try:
        G.node[i]['x_coord'] = df_nodes.x_coord[i]
        G.node[i]['y_coord'] = df_nodes.y_coord[i]
        G.node[i]['pos'] = (G.node[i]['x_coord'],G.node[i]['y_coord']) # for drawing
        G.node[i]['node_type'] = df_nodes.node_type[i]  # could be used in future to filter out "fatal" issues
                                                    # e.g. path exists to an external node that only has inbound travel lanes
    except:
        print(i," not on node list")
    
    # add other attributes as needed

validPaths = 0
for i in G.nodes():
    if i < 3034:   # Hack to select only low numbered nodes (e.g., centroids in a typical network)
        toCheck = list(G.nodes())
        toCheck.remove(i)
        for j in toCheck:
            if j < 3034:   # Hack to select only low numbered nodes (e.g., centroids in a typical network)
                if nx.has_path(G,i,j):
                    validPaths = validPaths + 1
                    if validPaths % 1000 == 0:
                        print(validPaths)
                else:
                    print(i, j, nx.has_path(G, i, j))
print(validPaths," valid paths")
Пример #36
0
def compute_single_sp(G_gt_, G_prop_, kd_idx_dic_prop, kdtree_prop,
                      x_coord='x', y_coord='y',
                      weight='length', query_radius=5,
                      length_buffer=0.05, make_plots=False, verbose=False):
    '''Single SP metric
    return 1 if within length_buffer
    return 0 if path is outside length_buffer or DNE for either gt or prop
    return -1 if path between randomly chosen nodes DNE for both graphs'''

    # choose random ground truth source and target nodes
    [source_gt, target_gt] = np.random.choice(
        G_gt_.nodes(), size=2, replace=False)
    if verbose:
        print("source_gt:", source_gt, "target_gt:", target_gt)
    # source_gt, target_gt = 10002, 10039
    x_s_gt, y_s_gt = G_gt_.node[source_gt][x_coord], G_gt_.node[source_gt][y_coord]
    x_t_gt, y_t_gt = G_gt_.node[target_gt][x_coord], G_gt_.node[target_gt][y_coord]

    # if verbose:
    #    print ("x_s_gt:", x_s_gt)
    #    print ("y_s_gt:", y_s_gt)

    # get route.  If it does not exists, set len = -1
    if not nx.has_path(G_gt_, source_gt, target_gt):
        len_gt = -1
    else:
        len_gt = nx.dijkstra_path_length(
            G_gt_, source_gt, target_gt, weight=weight)

    # get nodes in prop graph
    # see if source, target node exists in proposal
    source_p_l, _ = apls_utils.nodes_near_point(x_s_gt, y_s_gt,
                                                kdtree_prop, kd_idx_dic_prop,
                                                x_coord=x_coord, y_coord=y_coord,
                                                radius_m=query_radius)
    target_p_l, _ = apls_utils.nodes_near_point(x_t_gt, y_t_gt,
                                                kdtree_prop, kd_idx_dic_prop,
                                                x_coord=x_coord, y_coord=y_coord,
                                                radius_m=query_radius)

    # if either source or target does not exists, set prop_len as -1
    if (len(source_p_l) == 0) or (len(target_p_l) == 0):
        len_prop = -1

    else:
        source_p, target_p = source_p_l[0], target_p_l[0]
        x_s_p, y_s_p = G_prop_.node[source_p][x_coord], G_prop_.node[source_p][y_coord]
        x_t_p, y_t_p = G_prop_.node[target_p][x_coord], G_prop_.node[target_p][y_coord]

        # get route
        if not nx.has_path(G_prop_, source_p, target_p):
            len_prop = -1
        else:
            len_prop = nx.dijkstra_path_length(
                G_prop_, source_p, target_p, weight=weight)

    # path length difference, as a percentage
    perc_diff = np.abs((len_gt - len_prop) / len_gt)
    # check path lengths
    # if both paths do not exist, skip
    if (len_gt == -1) and (len_prop == -1):
        match = -1
    # if one is positive and one negative, return 0
    elif (np.sign(len_gt) != np.sign(len_prop)):
        match = 0
    # else, campare lengths
    elif perc_diff > length_buffer:
        match = 0
    else:
        match = 1

    if verbose:
        # print ("source_gt:", source_gt, "target_gt:", target_gt)
        print("len_gt:", len_gt)
        print("len_prop:", len_prop)
        print("perc_diff:", perc_diff)

    if make_plots:

        # plot G_gt_init
        plt.close('all')
        # plot initial graph
        if len_gt != -1:
            fig, ax = osmnx_funcs.plot_graph_route(G_gt_, nx.shortest_path(
                G_gt_, source=source_gt, target=target_gt, weight=weight))
        else:
            fig, ax = osmnx_funcs.plot_graph(G_gt_, axis_off=True)
        ax.set_title("Ground Truth, L = " + str(np.round(len_gt, 2)))
        # draw a circle (this doesn't work unless it's a PatchCollection!)
        patches = [Circle((x_s_gt, y_s_gt), query_radius, alpha=0.3),
                   Circle((x_t_gt, y_t_gt), query_radius, alpha=0.3)]
        p = PatchCollection(patches, alpha=0.4, color='orange')
        ax.add_collection(p)
        # also a simple point
        ax.scatter([x_s_gt], [y_s_gt], c='green', s=6)
        ax.scatter([x_t_gt], [y_t_gt], c='red', s=6)

        # plot proposal graph
        if len_prop != -1:
            fig, ax1 = osmnx_funcs.plot_graph_route(G_prop_, nx.shortest_path(
                G_prop_, source=source_p, target=target_p, weight=weight))
        else:
            fig, ax1 = osmnx_funcs.plot_graph(G_prop_, axis_off=True)
        ax1.set_title("Proposal, L = " + str(np.round(len_prop, 2)))
        # draw patches from ground truth!
        patches = [Circle((x_s_gt, y_s_gt), query_radius, alpha=0.3),
                   Circle((x_t_gt, y_t_gt), query_radius, alpha=0.3)]
        p = PatchCollection(patches, alpha=0.4, color='orange')
        ax1.add_collection(p)
        if len_prop != -1:
            # also a simple point
            ax1.scatter([x_s_p], [y_s_p], c='green', s=6)
            ax1.scatter([x_t_p], [y_t_p], c='red', s=6)

    return match
Пример #37
0
def heuristic_algorithm(src, dst, graph_c, b, k):
    U = []
    S = []

    if nx.has_path(graph_c, source=src, target=dst):
        p = nx.shortest_path(graph_c, source=src, target=dst)
        U.append((p, graph_c))
        S.append(p)

    while (len(U) > 0):
        (p, G) = U.pop(0)
        for i in range(0, b):

            G_c = nx.Graph()
            G_c.add_edges_from(G.edges())

            edge_index = random.randrange(len(p) - 1)

            u = p[edge_index]
            v = p[edge_index + 1]

            t = random.random()
            r = 0.25 * len(p)

            toRemove = []
            for edge in G_c.edges():
                a = edge[0]
                b = edge[1]

                if nx.has_path(G_c, a, u):
                    path_a_u = nx.shortest_path(G_c, a, u)
                    a_u_len = len(path_a_u) + t - 1

                    path_a_v = nx.shortest_path(G_c, a, v)
                    a_v_len = len(path_a_v) + (1 - t) - 1

                    path_b_u = nx.shortest_path(G_c, b, u)
                    b_u_len = len(path_b_u) + t - 1

                    path_b_v = nx.shortest_path(G_c, b, v)
                    b_v_len = len(path_b_v) + (1 - t) - 1

                    if a_u_len <= r or a_v_len <= r or b_u_len <= r or b_v_len <= r:
                        # G_c.remove_edge(a,b)
                        toRemove.append((a, b))

            for edge in toRemove:
                G_c.remove_edge(edge[0], edge[1])

            if G_c.has_edge(u, v):
                G_c.remove_edge(u, v)

            if nx.has_path(G_c, src, dst):
                p_c = nx.shortest_path(G_c, src, dst)
                U.append((p_c, G_c))

                if acceptable(p_c, S):
                    S.append(p_c)

            if len(S) == k:
                return S
    return S
Пример #38
0
    #stop the training if the stop criteria is met
    if stop == 1:
        print('stopping')
        #cool_plot(G_new)
        break

    #find edge-removal progress
    N_edges_new = G.number_of_edges()
    delta_edges = N_edges_old - N_edges_new
    loop.update(delta_edges)

    #get the shortest connections for all of the pins
    temp = []
    for j in range(len(poss_conn_all)):
        if nx.has_path(G, poss_conn_all[j, 0], poss_conn_all[j, 1]):
            temp.append(
                nx.dijkstra_path_length(G, poss_conn_all[j, 0],
                                        poss_conn_all[j, 1]))
        else:
            temp.append(float("inf"))
    if i == 0:
        large_re_log = temp
    else:
        large_re_log = np.vstack((large_re_log, temp))
    if i % 10 == 0:
        if i == 0:
            re_real_log = find_resistance(G, poss_conn_all)
        else:
            re_real_log = np.vstack(
                (re_real_log, find_resistance(G, poss_conn_all)))
    def check_intercept_cluster(self,c):
        #find center of each cluster, as well as its endpoints using a convex hull
        clust = self.cluster_points[c]
        label = self.labels[self.cluster_labels[c][0]]
        clust_hull = CH(clust, qhull_options="QJ")
        endpointsIndeces = clust_hull.vertices
        avgPoint = get_average_point(clust)
        self.clust_centers.append(avgPoint)
        max_dist = 0
        #use endpoints to calculate radius of cluster
        for e in endpointsIndeces:
            point = clust[e]
            if dist(point,avgPoint) > max_dist:
                max_dist = dist(point,avgPoint)
        #Test 1: Distance ratios

        #create graph of points in the cluster
        clust_graph = nx.Graph()
        for i in range(len(clust)):
            clust_graph.add_node(i)
        for i in range(len(clust)):
            for n in range(i,len(clust)):
                clust_graph.add_edge(i,n, dist= dist(clust[i],clust[n]))

        #create sub-graph of close points to use for geodesic distances
        sub_graph = [(u,v,d) for (u,v,d) in clust_graph.edges(data=True) if d['dist'] <=max_dist / 3]
        clust_graph = nx.Graph()
        clust_graph.add_edges_from(sub_graph)
        max_ratio = 0

        #calculate both euclidean and geodesic distances for each pair of endpoints
        for e1 in endpointsIndeces:
            p1 = clust[e1]
            for e2 in endpointsIndeces:
                if e1 != e2 and nx.has_path(clust_graph,source=e1,target=e2):
                    p2 = clust[e2]
                    path= nx.shortest_path(clust_graph,source=e1,target=e2,weight='dist')
                    geo_dist = 0
                    for i in range((len(path)-1)):
                        d = dist(clust[path[i]],clust[path[i+1]])
                        geo_dist = geo_dist + d
                    euclid_dist = dist(p1,p2)
                    ratio = geo_dist/euclid_dist
                    if ratio > max_ratio:
                        max_ratio = ratio
        #if the ratio is above the threshold mark it as an intercept cluster
        if(max_ratio > self.distance_ratio_threshold and label not in self.dist_intercept_clusters):
            self.dist_intercept_clusters.append(label)
        
        #Test 2: Sphericity

        #add the buffered radius to list of radii for future use
        self.clust_radii.append(max_dist*self.radius_buffer)
        
        #calculate the sphericity using volume of the cluster and the volume of the sphere that is created using the center point and that farthest endpoint 
        box_vol = clust_hull.volume
        sphere_vol = 4/3*math.pi * (max_dist**3)
        sphericity = box_vol / sphere_vol
        # if this ratio is above the threshold, mark it as an intercept cluster
        if sphericity > self.sphericity_threshold:
            self.sphere_intercept_clusters.append(label)
        #print(str(max_ratio) +"  "+str(sphericity))
        
        #calculate spherictity
        #printProgressBar(c,len(self.cluster_points)-1)

        #Test 3: planar distance
        clust_plane = Plane.best_fit(clust)
        avg_planar_dist = 0
        for p in clust:
            avg_planar_dist = avg_planar_dist + clust_plane.distance_point(p)
        avg_planar_dist = (avg_planar_dist / len(clust) )/max_dist
        if avg_planar_dist > self.planar_threshold:
            self.planar_intercept_clusters.append(label)
        data = {'label':int(label),'sphericity':float(sphericity),'planar_dist':avg_planar_dist,'dist_ratio':float(max_ratio),'center':[float(avgPoint[0]),float(avgPoint[1]),float(avgPoint[2])],'adjacent_clusters':[]}
        self.clust_data.append(data)
Пример #40
0
 def hasRelativePose(self, frame1, frame2):
     if (frame1 not in self.graph) or (frame2 not in self.graph):
         return False
     return nx.has_path(self.graph, frame1, frame2)
Пример #41
0
        end = input("Enter any caracter to continue : ")
    elif selection == '3':
        print("the successor of the element of the graph are : ")
        print(G.succ)

        print("the predecessors of the element of the graph are : ")
        print(G.pred)
        end = input("Enter any caracter to continue : ")
    elif selection == '4':
        if (nx.is_eulerian(G) == True):
            print("the graph is eulerien")
        else:
            print("the graph is not eulerien")
        n1 = int(input("Enter the origin : "))
        n2 = int(input("Enter the destination of the path : "))
        if (nx.has_path(G, n1, n2) == True):
            print("The path exist : ")
            paths = sorted(nx.all_simple_paths(G, n1, n2))
            print(paths)
        else:
            print("The path does not exist")
        end = input("Enter any caracter to continue : ")
    elif selection == '5':
        if (len(list(nx.simple_cycles(G))) == 0):
            print("the graph has no cycle")
        else:
            print("the graph has : " + str(len(list(nx.simple_cycles(G)))) +
                  " cycle(s) ")
            li = list(nx.simple_cycles(G))
            print(list(nx.simple_cycles(G)))
            i = 0
Пример #42
0
def connected_double_edge_swap(G, nswap=1, _window_threshold=3):
    """Attempts the specified number of double-edge swaps in the graph ``G``.

    A double-edge swap removes two randomly chosen edges ``(u, v)`` and ``(x,
    y)`` and creates the new edges ``(u, x)`` and ``(v, y)``::

     u--v            u  v
            becomes  |  |
     x--y            x  y

    If either ``(u, x)`` or ``(v, y)`` already exist, then no swap is performed
    so the actual number of swapped edges is always *at most* ``nswap``.

    Parameters
    ----------
    G : graph
       An undirected graph

    nswap : integer (optional, default=1)
       Number of double-edge swaps to perform

    _window_threshold : integer

       The window size below which connectedness of the graph will be checked
       after each swap.

       The "window" in this function is a dynamically updated integer that
       represents the number of swap attempts to make before checking if the
       graph remains connected. It is an optimization used to decrease the
       running time of the algorithm in exchange for increased complexity of
       implementation.

       If the window size is below this threshold, then the algorithm checks
       after each swap if the graph remains connected by checking if there is a
       path joining the two nodes whose edge was just removed. If the window
       size is above this threshold, then the algorithm performs do all the
       swaps in the window and only then check if the graph is still connected.

    Returns
    -------
    int
       The number of successful swaps

    Raises
    ------

    NetworkXError

       If the input graph is not connected, or if the graph has fewer than four
       nodes.

    Notes
    -----

    The initial graph ``G`` must be connected, and the resulting graph is
    connected. The graph ``G`` is modified in place.

    References
    ----------
    .. [1] C. Gkantsidis and M. Mihail and E. Zegura,
           The Markov chain simulation method for generating connected
           power law random graphs, 2003.
           http://citeseer.ist.psu.edu/gkantsidis03markov.html
    """
    if not nx.is_connected(G):
        raise nx.NetworkXError("Graph not connected")
    if len(G) < 4:
        raise nx.NetworkXError("Graph has less than four nodes.")
    n = 0
    swapcount = 0
    deg = G.degree()
    # Label key for nodes
    dk = list(n for n, d in G.degree())
    cdf = nx.utils.cumulative_distribution(list(d for n, d in G.degree()))
    window = 1
    while n < nswap:
        wcount = 0
        swapped = []
        # If the window is small, we just check each time whether the graph is
        # connected by checking if the nodes that were just separated are still
        # connected.
        if window < _window_threshold:
            # This Boolean keeps track of whether there was a failure or not.
            fail = False
            while wcount < window and n < nswap:
                # Pick two random edges without creating the edge list. Choose
                # source nodes from the discrete degree distribution.
                (ui, xi) = nx.utils.discrete_sequence(2, cdistribution=cdf)
                # If the source nodes are the same, skip this pair.
                if ui == xi:
                    continue
                # Convert an index to a node label.
                u = dk[ui]
                x = dk[xi]
                # Choose targets uniformly from neighbors.
                v = random.choice(list(G.neighbors(u)))
                y = random.choice(list(G.neighbors(x)))
                # If the target nodes are the same, skip this pair.
                if v == y:
                    continue
                if x not in G[u] and y not in G[v]:
                    G.remove_edge(u, v)
                    G.remove_edge(x, y)
                    G.add_edge(u, x)
                    G.add_edge(v, y)
                    swapped.append((u, v, x, y))
                    swapcount += 1
                n += 1
                # If G remains connected...
                if nx.has_path(G, u, v):
                    wcount += 1
                # Otherwise, undo the changes.
                else:
                    G.add_edge(u, v)
                    G.add_edge(x, y)
                    G.remove_edge(u, x)
                    G.remove_edge(v, y)
                    swapcount -= 1
                    fail = True
            # If one of the swaps failed, reduce the window size.
            if fail:
                window = int(math.ceil(window / 2))
            else:
                window += 1
        # If the window is large, then there is a good chance that a bunch of
        # swaps will work. It's quicker to do all those swaps first and then
        # check if the graph remains connected.
        else:
            while wcount < window and n < nswap:
                # Pick two random edges without creating the edge list. Choose
                # source nodes from the discrete degree distribution.
                (ui, xi) = nx.utils.discrete_sequence(2, cdistribution=cdf)
                # If the source nodes are the same, skip this pair.
                if ui == xi:
                    continue
                # Convert an index to a node label.
                u = dk[ui]
                x = dk[xi]
                # Choose targets uniformly from neighbors.
                v = random.choice(list(G.neighbors(u)))
                y = random.choice(list(G.neighbors(x)))
                # If the target nodes are the same, skip this pair.
                if v == y:
                    continue
                if x not in G[u] and y not in G[v]:
                    G.remove_edge(u, v)
                    G.remove_edge(x, y)
                    G.add_edge(u, x)
                    G.add_edge(v, y)
                    swapped.append((u, v, x, y))
                    swapcount += 1
                n += 1
                wcount += 1
            # If the graph remains connected, increase the window size.
            if nx.is_connected(G):
                window += 1
            # Otherwise, undo the changes from the previous window and decrease
            # the window size.
            else:
                while swapped:
                    (u, v, x, y) = swapped.pop()
                    G.add_edge(u, v)
                    G.add_edge(x, y)
                    G.remove_edge(u, x)
                    G.remove_edge(v, y)
                    swapcount -= 1
                window = int(math.ceil(window / 2))
    return swapcount
Пример #43
0
            else:
                if skipfunc.search(line): skipfuncs.add(line)
                else: G.add_edge(fields[3], fields[1], kind=' calls function ')
        if fields[2] == ' static variable ':
            G.add_edge(fields[1], fields[3], kind=fields[2])
            statics.add(fields[3])
        if fields[2] == ' known thread unsafe function ':
            G.add_edge(fields[1],
                       fields[3],
                       kind=' known thread unsafe function ')
            statics.add(fields[3])
fileinput.close()

for tfunc in sorted(toplevelfuncs):
    for static in sorted(statics):
        if nx.has_path(G, tfunc, static):
            path = nx.shortest_path(G, tfunc, static)

            print "Non-const static variable \'" + re.sub(
                farg, "()", static) + "' is accessed in call stack '",
            for i in range(0, len(path) - 1):
                print re.sub(farg, "()",
                             path[i]) + G[path[i]][path[i + 1]]['kind'],
            print re.sub(farg, "()", path[i + 1]) + "' ,",
            for key in G[tfunc].keys():
                if 'kind' in G[tfunc][key] and G[tfunc][key][
                        'kind'] == ' overrides function ':
                    print "'" + re.sub(farg, "()",
                                       tfunc) + "' overrides '" + re.sub(
                                           farg, "()", key) + "'",
            print
Пример #44
0
def find_transitive_relations(graph, new, relTypes=['A', 'I']):
    trels = []
    for relType in relTypes:
        #print 'relation: ', relType
        relGraph = filter_edge_types(graph, relType)
        relNew = filter_edge_types(new, relType)
        for (fr, to) in relNew.edges_iter():
            # a duplicate edge, skip it and save time
            if relGraph.has_edge(fr, to):
                continue

            #print 'considering ', fr, to
            if nx.has_path(relGraph, fr, to):
                trels.append((fr, to, relType))

            #try:
            #p = nx.shortest_path(relGraph, fr, to)
            #except nx.NetworkXNoPath:
            #continue
            #else:
            #if len(p) > 2: # this is for the same edges
            #trels.append((fr, to, relType))
    return trels


#end

#a = load_BMG_to_networkx('g1.bmg')
#b = load_BMG_to_networkx('g2.bmg')
#print find_transitive_relations(a, b)

#a = load_BMG_to_networkx('SA_ET_crosstalk_JA_mali_space.bmg')
#reset_edge_colors(a)
#s = export_to_BMG(a)
#fp = open('test.bmg', 'w')
#fp.write(s)
#fp.close()

#a = load_BMG_to_networkx('graph1.bmg')
#b = load_BMG_to_networkx('graph2.bmg')
#e = merge(a,b)
#fp = open('graph123.bmg', 'w')
#fp.write(export_to_BMG(e))
#fp.close()

#g = merge_incremental_graph(a, b)
#s = export_to_BMG(g)
#fp = open('graph12.bmg', 'w')
#fp.write(s)
#fp.close()

#rels = [('component_eds1', 'component_sa', 'A'),
#('component_eds1', 'component_pr', 'A'),
#('component_npr1', 'component_pr', 'A'),
#('component_eds5', 'component_pr1', 'A'),
#('component_ja', 'component_pdf1.2', 'A'),
#('component_ja', 'component_vsp', 'A'),
#('component_ja', 'component_thi2.1', 'A'),
#('component_sa', 'component_tga2', 'A'),
#('component_sa', 'component_pr', 'A'),
#('component_sa', 'component_pr1', 'A'),
#('component_ethylene', 'component_hls1', 'A'),
#('component_ethylene', 'component_pdf1.2', 'A'),
#('component_ethylene', 'component_arf2', 'A'),
#('component_ethylene', 'component_ein2', 'A'),
#('component_ethylene', 'component_ein3', 'A'),
#('component_ethylene', 'component_erf1', 'A'),
#('component_sid2', 'component_pr1', 'A'),
#('component_sid2', 'component_pr', 'A'),
#('component_ein2', 'component_erf1', 'A'),
#('component_ctr1', 'component_erf1', 'A'),
#('component_mapk', 'component_ein2', 'A'),
#('component_mapk', 'component_ein3', 'A'),
#('component_mapk', 'component_erf1', 'A'),
#('component_pad4', 'component_npr1', 'A'),
#('component_pad4', 'component_sa', 'A')]

#a = load_BMG_to_networkx('before_nov+triplets.bmg')
#g = colour_relations(a, rels)
Пример #45
0
    def _join(self, t1, t2, translate):
        """
        Get the least upper bound of t1 and t2.

        :param t1:
        :param t2:
        :return:
        """

        # Trivial cases
        t1 = translate(t1)
        t2 = translate(t2)

        if t1 == t2:
            return t1
        if isinstance(t1, TopType):
            return t2
        elif isinstance(t2, TopType):
            return t1

        if isinstance(t1, TypeVariableReference) and not isinstance(t2, TypeVariableReference):
            return t1
        elif isinstance(t2, TypeVariableReference) and not isinstance(t1, TypeVariableReference):
            return t2

        # consult the graph
        t1_cls = self._abstract(t1)
        t2_cls = self._abstract(t2)

        if t1_cls in BASE_LATTICE and t2_cls in BASE_LATTICE:
            queue = [ t1_cls ]
            while queue:
                n = queue[0]
                queue = queue[1:]

                if networkx.has_path(BASE_LATTICE, n, t2_cls):
                    return self._concretize(n, t1, t2, translate)
                # go up
                queue.extend(BASE_LATTICE.predecessors(n))

        # handling Struct
        if t1_cls is Struct and t2_cls is Struct:
            fields = { }
            for offset in sorted(set(itertools.chain(t1.fields.keys(), t2.fields.keys()))):
                if offset in t1.fields and offset in t2.fields:
                    v = self._join(t1.fields[offset], t2.fields[offset], translate)
                elif offset in t1.fields:
                    v = t1.fields[offset]
                elif offset in t2.fields:
                    v = t2.fields[offset]
                else:
                    raise Exception("Impossible")
                fields[offset] = v
            return Struct(fields)

        if t1_cls is Pointer64 and t2_cls is Struct:
            # swap them
            t1, t1_cls, t2, t2_cls = t2, t2_cls, t1, t1_cls
        if t1_cls is Struct and len(t1.fields) == 1 and 0 in t1.fields:
            if t1.fields[0].size == 8 and t2_cls is Pointer64:
                # they are equivalent
                # e.g., struct{0: int64}  ptr64(int8)
                # return t2 since t2 is more specific
                return t2
            elif t1.fields[0].size == 4 and t2_cls is Pointer32:
                return t2

        # import ipdb; ipdb.set_trace()
        return TopType()
Пример #46
0
min_cycle = min(cyc_len_list)
avg_cycle = sum(cyc_len_list) / len(cyc_len_list)

print("max cycleLength: ", max_cycle)
print("avg cycleLength: ", avg_cycle)
print("min cycleLength: ", min_cycle)

flattened_cycles = []
for i in range(len(cyc_list)):
    for k in range(len(cyc_list[i])):
        flattened_cycles.append(cyc_list[i][k])
cyc_item_list = [i[0] for i in cyc_list]
terminal_paths = []
for node in term_list:
    for target in cyc_item_list:
        if nx.has_path(DG, node, target):
            tl = nx.shortest_simple_paths(DG, node, target)
            terminal_paths.append(list(tl))

tail_length_list = []
for i in terminal_paths:
    count = 0
    for val in i[0]:
        if val not in flattened_cycles:
            count += 1
    tail_length_list.append(count)

max_tail = max(tail_length_list)
avg_tail = sum(tail_length_list) / len(tail_length_list)

print("max tailLength: ", max_tail)
Пример #47
0
plt.legend(handles=[black_line, green_line, blue_line], loc = 'lower right')
plt.axis('off')
plt.savefig("DirectedGraph.png") # save as png.
plt.show() # display
# This is a bit crowded, may need to get rid of some unnecessary edges?

# Find the heaviest path!
def get_weight(path):
    sum = 0
    for i in range(0, len(path)-2):
        sum += DG[path[i]][path[i+1]]['weight']
    return(sum)
heaviest_paths = []
for i in range(0, len(test)-1):
    for j in range(0, len(test)-1):
        if i < j and nx.has_path(DG, i, j):
                heaviest_paths.append(max((path for path in nx.all_simple_paths(DG, i, j)),
                        key=lambda path: get_weight(path)))
heaviest_path = max((path for path in heaviest_paths),
                    key=lambda path: get_weight(path))
for path in heaviest_paths:
    print(str(path) + ' weighs: ' + str(get_weight(path)))
print(str(heaviest_path) + " is the heaviest")
	
# Get the element id of the one we want to choose
# right now the criteria is the soonest 80%+ pick that is not cold or none
pick = data.loc[ (~data['selectionID'].isin(["unselectable"])) & (data['perc'] > 80) & (~data['temp'].isin(['None']))]
#,'Cold']))]
pick = pick.ix[min(pick.index),'selectionID']

### Connect to website and click the link of soonest and/or hottest/ most likely pick ###
def defense():
    '''for key,value in voltages.iteritems():
		weight[key[2]]=value'''
    initial_l = {}
    initial_l_edges = {}
    initial_l = nx.load_centrality(H, normalized=True, weight='cable')
    #bc=sorted(nx.edge_betweenness_centrality(H,normalized=True,weight='cable',reverse=True)
    #print initial_l_edges
    #nodes_to_remove=nx.load_centrality(H)
    nodes_to_remove = nx.out_degree_centrality(H)
    #nodes_to_remove=nx.in_degree_centrality(H)
    #nodes_to_remove=nx.closeness_centrality(H,distance='length',normalized=True)
    #nodes_to_remove=nx.betweenness_centrality(H,weight='cable',normalized=True)
    #hub,authorities=nx.hits(H)
    lamda = 2
    for m, n in initial_l.iteritems():
        initial_l[m] = lamda * n
    m = 0
    remove = []
    remove_nodes = []
    remove_edges = []
    pp = 0
    #High_centrality={}
    '''for key,value in sorted(nodes_to_remove.iteritems(), key=lambda (k,v): (v,k),reverse=True):
		m+=1
		if m>=80 and m<=100:
			remove_nodes.append(key)
	rn=random.choice(remove_nodes)
	remove.append(rn)
	print rn'''
    '''appending 100 nodes that are to be removed from network'''
    for key, value in sorted(nodes_to_remove.iteritems(),
                             key=lambda (k, v): (v, k),
                             reverse=True):
        m += 1
        if m <= 1000:
            remove.append(key)
    in_wcc = len(max(nx.weakly_connected_components(H), key=len))
    '''n_wcc=nx.number_weakly_connected_components(H)
	in_scc=len(max(nx.strongly_connected_components(H), key=len))
	n_scc=nx.number_strongly_connected_components(H)'''
    g = 0
    successor = []
    trans_l = {}
    '''removing the nodes which are selected'''
    while g == 0:
        rr = list(remove)
        ee = list(remove_edges)
        for r in rr:
            if r in set(generators):
                generators.remove(r)
            if V.node[r]['color'] == 'red' or r in set(defended):
                remove.remove(r)
        for inn, e in enumerate(ee):
            if V.edge[ee[inn][0]][ee[inn][1]]['color'] == 'red':
                remove_edges.remove(ee[inn])
        del rr[:]
        del ee[:]
        H.remove_edges_from(remove_edges)
        for ind, eee in enumerate(remove_edges):
            if remove_edges[ind][1] not in set(remove):
                remove.append(remove_edges[ind][1])
        for nn in remove:
            for x in list(H.successors(nn)):
                if x not in set(successor) and x not in set(remove):
                    successor.append(x)
        H.remove_nodes_from(remove)
        for n in list(V.in_edges(remove)):
            V[n[0]][n[1]]['color'] = 'red'
            edges_data[(n[0], n[1])][6] = 'r-'
        for oe in list(V.out_edges(remove)):
            V[oe[0]][oe[1]]['color'] = 'red'
            edges_data[(oe[0], oe[1])][6] = 'r-'
        for l in remove:
            V.node[l]['color'] = 'red'
            nodes_data[l][5] = 'ro'
        for ed in remove_edges:
            V.edge[ed[0]][ed[1]]['color'] = 'red'
            edges_data[(ed[0], ed[1])][6] = 'r-'
        del remove[:]
        del remove_edges[:]
        '''removing components not containing a generator'''
        '''the inner loop is for cascade caused by removed nodes'''
        a = 0
        while a == 0:
            sss = list(successor)
            for zz, rz in enumerate(sss):
                if rz in generators or V.node[rz][
                        'color'] == 'red' or rz in set(defended):
                    successor.pop(zz)
            del sss[:]
            for su in successor:
                itera = 0
                for g in generators:
                    if su == g:
                        itera += 1
                        break
                    elif nx.has_path(H, g, su):
                        itera += 1
                        break
                if itera == 0:
                    remove.append(su)
            del successor[:]
            if len(remove) != 0:
                rrrr = list(remove)
                for ll, r in enumerate(rrrr):
                    #dont have to pop generators here cause they are not getting added in the first place
                    if V.node[r]['color'] == 'red':
                        remove.pop(ll)
                del rrrr[:]
                for nn in remove:
                    for x in list(H.successors(nn)):
                        if x not in set(successor) and x not in set(remove):
                            successor.append(x)
                H.remove_nodes_from(remove)
                for n in list(V.in_edges(remove)):
                    V[n[0]][n[1]]['color'] = 'red'
                    edges_data[(n[0], n[1])][6] = 'r-'
                for oe in list(V.out_edges(remove)):
                    V[oe[0]][oe[1]]['color'] = 'red'
                    edges_data[(oe[0], oe[1])][6] = 'r-'
                for l in remove:
                    V.node[l]['color'] = 'red'
                    nodes_data[l][5] = 'ro'
            else:
                a += 1
            del remove[:]
        '''checking for nodes which are overloaded and appending them into remove list'''
        trans_l = nx.load_centrality(H, normalized=True, weight='cable')
        #trans_l_edges=nx.edge_betweenness_centrality(H,normalized=True,weight='cable')
        b = 0
        for key, value in trans_l.iteritems():
            if trans_l[key] > initial_l[key] and key not in generators:
                remove.append(key)
                b += 1
        '''for ky,vl in trans_l_edges.iteritems():
			if trans_l_edges[ky]>initial_l_edges[ky]:
				print (ky," ",vl," ")
				remove_edges.append(ky)
				mm+=1'''
        trans_l.clear()
        #trans_l_edges.clear()
        if b == 0:
            break
    h = 0
    for cc, vv in nodes_data.iteritems():
        if vv[5] == 'ro':
            h += 1
    #print "node removed",rn
    f_wcc = len(max(nx.weakly_connected_components(H), key=len))
    #f_scc=len(max(nx.strongly_connected_components(H), key=len))
    #nx.write_graphml(V,"outdegreelambda.graphml")
    print "cascade size %f" % ((float(in_wcc) - float(f_wcc)) / float(in_wcc))
    print "%d wcc %d" % (in_wcc, f_wcc)
    print "no. of nodes effected after removing 100 nodes", h
    '''print "%d numberwcc %d"%(n_wcc,nx.number_weakly_connected_components(H))
	print "%d scc %d"%(in_scc,f_scc)
	print "%d numberscc %d"%(n_scc,nx.number_strongly_connected_components(H))'''
    '''creating a csv file containing the nodes and edges with cascaded nodes colored as red'''
    o = []
    with open('../../NS_project/Code/outcentrality_vertices.csv',
              'wb') as csvfile:
        nodewriter = csv.writer(csvfile, delimiter=',')
        header = ['v_id', 'lon', 'lat', 'color']
        nodewriter.writerow(header)
        for da, it in nodes_data.iteritems():
            for l in range(0, 3):
                o.append(str(it[l]))
            o.append(str(it[5]))
            nodewriter.writerow(o)
            del o[:]
    with open('../../NS_project/Code/outcentrality_edges.csv',
              'wb') as csvfile1:
        edgewriter = csv.writer(csvfile1, delimiter=',')
        header1 = ['l_id', 'v_id_1', 'v_id_2', 'color']
        edgewriter.writerow(header1)
        for db, itt in edges_data.iteritems():
            for l in range(0, 3):
                o.append(str(itt[l]))
            o.append(str(itt[6]))
            edgewriter.writerow(o)
            del o[:]
Пример #49
0
    def can_be_applied(graph, candidate, expr_index, sdfg, strict=False):
        in_array = graph.nodes()[candidate[RedundantSecondArray._in_array]]
        out_array = graph.nodes()[candidate[RedundantSecondArray._out_array]]

        in_desc = in_array.desc(sdfg)
        out_desc = out_array.desc(sdfg)

        # Ensure in degree is one (only one source, which is in_array)
        if graph.in_degree(out_array) != 1:
            return False

        # Make sure that the candidate is a transient variable
        if not out_desc.transient:
            return False

        # 1. Get edge e1 and extract/validate subsets for arrays A and B
        e1 = graph.edges_between(in_array, out_array)[0]
        a_subset, b1_subset = _validate_subsets(e1, sdfg.arrays)

        if strict:
            # In strict mode, make sure the memlet covers the removed array
            if not b1_subset:
                return False
            subset = copy.deepcopy(b1_subset)
            subset.squeeze()
            shape = [sz for sz in out_desc.shape if sz != 1]
            if any(m != a for m, a in zip(subset.size(), shape)):
                return False

            # NOTE: Library node check
            # The transformation must not apply in strict mode if out_array is
            # not a view, is input to a library node, and an access or a view
            # of in_desc is also output to the same library node.
            # The reason is that the application of the transformation will lead
            # to in_desc being both input and output of the library node.
            # We do not know if this is safe.

            # First find the true in_desc (in case in_array is a view).
            true_in_desc = in_desc
            if isinstance(in_desc, data.View):
                e = sdutil.get_view_edge(graph, in_array)
                if not e:
                    return False
                true_in_desc = sdfg.arrays[e.dst.data]

            if not isinstance(out_desc, data.View):

                edges_to_check = []
                for a in graph.out_edges(out_array):
                    if isinstance(a.dst, nodes.LibraryNode):
                        edges_to_check.append(a)
                    elif (isinstance(a.dst, nodes.AccessNode)
                          and isinstance(sdfg.arrays[a.dst.data], data.View)):
                        for b in graph.out_edges(a.dst):
                            edges_to_check.append(graph.memlet_path(b)[-1])

                for a in edges_to_check:
                    if isinstance(a.dst, nodes.LibraryNode):
                        for b in graph.out_edges(a.dst):
                            if isinstance(b.dst, nodes.AccessNode):
                                desc = sdfg.arrays[b.dst.data]
                                if isinstance(desc, data.View):
                                    e = sdutil.get_view_edge(graph, b.dst)
                                    if not e:
                                        return False
                                    desc = sdfg.arrays[e.dst.data]
                                    if desc is true_in_desc:
                                        return False

            # In strict mode, check if the state has two or more access nodes
            # for in_array and at least one of them is a write access. There
            # might be a RW, WR, or WW dependency.
            accesses = [
                n for n in graph.nodes() if isinstance(n, nodes.AccessNode)
                and n.desc(sdfg) == in_desc and n is not in_array
            ]
            if len(accesses) > 0:
                if (graph.in_degree(in_array) > 0
                        or any(graph.in_degree(a) > 0 for a in accesses)):
                    # We need to ensure that a data race will not happen if we
                    # remove in_array.
                    # First, we simplify the graph
                    G = helpers.simplify_state(graph)
                    # Loop over the accesses
                    for a in accesses:
                        subsets_intersect = False
                        for e in graph.in_edges(a):
                            _, subset = _validate_subsets(e,
                                                          sdfg.arrays,
                                                          dst_name=a.data)
                            res = subsets.intersects(a_subset, subset)
                            if res == True or res is None:
                                subsets_intersect = True
                                break
                        if not subsets_intersect:
                            continue
                        try:
                            has_bward_path = nx.has_path(G, a, in_array)
                        except NodeNotFound:
                            has_bward_path = nx.has_path(graph.nx, a, in_array)
                        try:
                            has_fward_path = nx.has_path(G, in_array, a)
                        except NodeNotFound:
                            has_fward_path = nx.has_path(graph.nx, in_array, a)
                        # If there is no path between the access nodes
                        # (disconnected components), then it is definitely
                        # possible to have data races. Abort.
                        if not (has_bward_path or has_fward_path):
                            return False
                        # If there is a forward path then a must not be a direct
                        # successor of in_array.
                        if has_fward_path and a in G.successors(in_array):
                            for src, _ in G.in_edges(a):
                                if src is in_array:
                                    continue
                                if (nx.has_path(G, in_array, src)
                                        and src != out_array):
                                    continue
                                return False

        # Make sure that both arrays are using the same storage location
        # and are of the same type (e.g., Stream->Stream)
        if in_desc.storage != out_desc.storage:
            return False
        if type(in_desc) != type(out_desc):
            if isinstance(in_desc, data.View):
                # Case View -> Access
                # If the View points to the Access (and has a different shape?)
                # then we should (probably) not remove the Access.
                e = sdutil.get_view_edge(graph, in_array)
                if e and e.dst is out_array and in_desc.shape != out_desc.shape:
                    return False
                # Check that the View's immediate ancestors are Accesses.
                # Otherwise, the application of the transformation will result
                # in an ambiguous View.
                view_ancestors_desc = [
                    e.src.desc(sdfg)
                    if isinstance(e.src, nodes.AccessNode) else None
                    for e in graph.in_edges(in_array)
                ]
                if any([
                        not desc or isinstance(desc, data.View)
                        for desc in view_ancestors_desc
                ]):
                    return False
            elif isinstance(out_desc, data.View):
                # Case Access -> View
                # If the View points to the Access and has the same shape,
                # it can be removed
                e = sdutil.get_view_edge(graph, out_array)
                if e and e.src is in_array and in_desc.shape == out_desc.shape:
                    return True
                return False
            else:
                # Something else, for example, Stream
                return False
        else:
            # Two views connected to each other
            if isinstance(in_desc, data.View):
                return False

        # Find occurrences in this and other states
        occurrences = []
        for state in sdfg.nodes():
            occurrences.extend([
                n for n in state.nodes()
                if isinstance(n, nodes.AccessNode) and n.desc(sdfg) == out_desc
            ])
        for isedge in sdfg.edges():
            if out_array.data in isedge.data.free_symbols:
                occurrences.append(isedge)

        if len(occurrences) > 1:
            return False

        # Check whether the data copied from the first datanode cover
        # the subsets of all the output edges of the second datanode.
        # We assume the following pattern: A -- e1 --> B -- e2 --> others

        # 2. Iterate over the e2 edges
        for e2 in graph.out_edges(out_array):
            # 2-a. Extract/validate subsets for array B and others
            try:
                b2_subset, _ = _validate_subsets(e2, sdfg.arrays)
            except NotImplementedError:
                return False
            # 2-b. Check where b1_subset covers b2_subset
            if not b1_subset.covers(b2_subset):
                return False
            # 2-c. Validate subsets in memlet tree
            # (should not be needed for valid SDGs)
            path = graph.memlet_tree(e2)
            for e3 in path:
                if e3 is not e2:
                    try:
                        _validate_subsets(e3,
                                          sdfg.arrays,
                                          src_name=out_array.data)
                    except NotImplementedError:
                        return False

        return True
Пример #50
0
            print "fourgrams"
            for grams in fourgrams2:
                try:
                    entval = grams[0] + ' ' + grams[1] + ' ' + grams[
                        2] + ' ' + grams[3]
                    #print entval

                    if entval in G:
                        #print entva
                        if count <= 2:
                            entity_list.append(str(entval))
                            count = count + 1

                        for x in range(0, len(keyword)):
                            if nx.has_path(G,
                                           source=keyword[x].lower(),
                                           target=entval):
                                if x == 3 or x == 4:
                                    key_len[x] = key_len[x] + len(
                                        nx.shortest_path(
                                            G,
                                            source=keyword[x].lower(),
                                            target=entval))
                                    key_div[x] = key_div[x] + 2.01
                                elif x == 5:
                                    key_len[x] = key_len[x] + len(
                                        nx.shortest_path(
                                            G,
                                            source=keyword[x].lower(),
                                            target=entval))
                                    key_div[x] = key_div[x] + 1.65
 def path_complete_condition(G):
     return all([nx.has_path(G,x,y) for x,y in transmit_node_pairs])
Пример #52
0
"""
--- Day 7: Handy Haversacks ---
https://adventofcode.com/2020/day/7
"""
from aocd import data
from collections import deque
import networkx as nx

g = nx.DiGraph()
for line in data.splitlines():
    left, right = line.split("s contain ")
    rights = right.split(", ")
    for right in rights:
        n, right = right.split(None, 1)
        right = right.rstrip("s.")
        n = 0 if n == "no" else int(n)
        g.add_edge(left, right, weight=n)

print("part a:", sum(nx.has_path(g, b, "shiny gold bag") for b in g.nodes) - 1)

b = -1
q = deque([(1, "shiny gold bag")])
while q:
    w, b0 = q.popleft()
    b += w
    q.extend((w * g[b0][b1]["weight"], b1) for b1 in g[b0])

print("part b:", b)
print('################################')
################################################################
## Create Paths
################################################################
print('################################')
print('Loading Paths')
print('################################')
f = open(sFileName, 'w')
l = 0
sline = 'ID|Cost|StartAt|EndAt|Path|Measure'
if nVSet == True: print('0', sline)
f.write(sline + '\n')
for sNode0 in nx.nodes_iter(G):
    for sNode1 in nx.nodes_iter(G):
        if sNode0 != sNode1 and \
            nx.has_path(G, sNode0, sNode1)==True and \
            nx.shortest_path_length(G, \
              source=sNode0, \
              target=sNode1, \
              weight='DistanceMiles') < nMaxPath:
            l += 1
            sID = '{:.0f}'.format(l)
            spath = ','.join(nx.shortest_path(G, \
              source=sNode0, \
              target=sNode1, \
              weight='DistanceMiles'))
            slength= '{:.6f}'.format(\
              nx.shortest_path_length(G, \
              source=sNode0, \
              target=sNode1, \
              weight='DistanceMiles'))
Пример #54
0
    def _get_dag(self):
        g = nx.DiGraph()
        dag_path = '{}/.dag'.format(Settings.training_data_path)
        if os.path.exists(dag_path):
            g_file = json.load(open(dag_path, 'r'))
            g.add_nodes_from(g_file['nodes'])
            for u, v in g_file['edges']:
                g.add_edge(u, v)

            return g

        g.add_nodes_from(str(x[1]) for x in self._tpl.get_templates())
        edges = []
        times = {}
        for i in tqdm(range(Settings.n_training_data)):
            self._tpl.init('{}/{}.csv'.format(Settings.training_data_path, i),
                           True)
            cls = Clustering(self._tpl, self._top)
            root_cause = cls.get_root_cause()
            mapping = cls.get_node_to_log_mapping()
            if root_cause is None:
                continue
            top = cls.cluster_by_topology(root_cause['node'])
            '''
            for u, v in top.edges:
                for logu in mapping[u]:
                    for logv in mapping[v]:
                        s = str(logu['template'])
                        t = str(logv['template'])
                        if s == t:
                            continue
                        if (s, t) not in times:
                            times[s, t] = 0
                        times[s, t] += 1
                        if times[s, t] >= 9:
                            edges.append((s, t, times[s, t]))
            '''
            s = str(root_cause['template'])
            for node in top.nodes:
                for log in mapping[node]:
                    t = str(log['template'])
                    if s == t:
                        continue
                    if (s, t) not in times:
                        times[s, t] = 0
                    times[s, t] += 1
                    if times[s, t] >= 3:
                        edges.append((s, t, times[s, t]))

        sorted(edges, key=lambda x: x[2], reverse=True)

        for u, v, w in edges:
            try:
                if not nx.has_path(g, v, u):
                    g.add_edge(u, v)
            except nx.NodeNotFound:
                g.add_edge(u, v)

        json.dump({
            'edges': list(g.edges),
            'nodes': list(g.nodes)
        }, open(dag_path, 'w'))

        return g
Пример #55
0
 def validate(order):
     assert isinstance(order, list)
     assert set(order) == set(DG)
     for u, v in combinations(order, 2):
         assert not nx.has_path(DG, v, u)
Пример #56
0
    def _meet(self, t1, t2, translate):
        """
        Get the greatest lower bound of t1 and t2.

        :param t1:
        :param t2:
        :return:
        """

        t1 = translate(t1)
        t2 = translate(t2)

        if t1 == t2:
            return t1
        elif isinstance(t1, BottomType):
            return t2
        elif isinstance(t2, BottomType):
            return t1

        if isinstance(t1, TypeVariableReference) and not isinstance(
                t2, TypeVariableReference):
            return t1
        elif isinstance(t2, TypeVariableReference) and not isinstance(
                t1, TypeVariableReference):
            return t2

        # consult the graph
        t1_cls = self._abstract(t1)
        t2_cls = self._abstract(t2)

        if t1_cls in self._base_lattice and t2_cls in self._base_lattice:
            queue = [t1_cls]
            while queue:
                n = queue[0]
                queue = queue[1:]

                if networkx.has_path(self._base_lattice, t2_cls, n):
                    return self._concretize(n, t1, t2, self._meet, translate)
                # go down
                queue.extend(self._base_lattice.successors(n))

        # handling Struct
        if t1_cls is Struct and t2_cls is Struct:
            fields = {}
            for offset in sorted(
                    set(itertools.chain(t1.fields.keys(), t2.fields.keys()))):
                if offset in t1.fields and offset in t2.fields:
                    v = self._meet(t1.fields[offset], t2.fields[offset],
                                   translate)
                elif offset in t1.fields:
                    v = t1.fields[offset]
                elif offset in t2.fields:
                    v = t2.fields[offset]
                else:
                    raise Exception("Impossible")
                fields[offset] = v
            return Struct(fields=fields)

        # single element and single-element struct
        if issubclass(t2_cls, Int) and t1_cls is Struct:
            # swap them
            t1, t1_cls, t2, t2_cls = t2, t2_cls, t1, t1_cls
        if issubclass(t1_cls, Int) and t2_cls is Struct and len(
                t2.fields) == 1 and 0 in t2.fields:
            # e.g., char & struct {0: char}
            return Struct(fields={0: self._meet(t1, t2.fields[0], translate)})

        ptr_class = self._pointer_class()

        # Struct and Pointers
        if t1_cls is ptr_class and t2_cls is Struct:
            # swap them
            t1, t1_cls, t2, t2_cls = t2, t2_cls, t1, t1_cls
        if t1_cls is Struct and len(t1.fields) == 1 and 0 in t1.fields:
            if t1.fields[0].size == 8 and t2_cls is Pointer64:
                # they are equivalent
                # e.g., struct{0: int64}  ptr64(int8)
                # return t2 since t2 is more specific
                return t2
            elif t1.fields[0].size == 4 and t2_cls is Pointer32:
                return t2

        # import ipdb; ipdb.set_trace()
        return BottomType()
Пример #57
0
    def apply(cls,
              stack,
              model,
              feeders=100,
              equipment_type=None,
              selection=('Random', 15),
              seed=0,
              placement_folder=''):
        subset = []
        if selection is None:
            return model

        # TODO: Apply placements to feeders selectively. Currently applying to all equipment in the distribution system

        # Currently just including random selections. Need to also include and document other selection options
        if selection[0] == 'Reclosers':
            # Get a subset of nodes at the end of Reclosers. This algorithm finds to closest goabs to the feeder head (in topological order)
            # without common ancestry. i.e. no goab should be upstream of another goab. If this is not possible,
            # the number of reclosers is decreased

            # Percentage of feeders that each Reclosers number is used for e.g. if selection[1] = 4 and feeders = [50,40,30,20], 50% of feedes have 1st goab, 40% of feeders have second etc.
            feeders_str = str(feeders)
            if isinstance(feeders, list):
                feeders_str = ''
                for f in feeders:
                    feeders_str = feeders_str + str(f) + '-'
                feeders_str = feeders_str.strip('-')

            file_name = str(feeders_str) + '_Node_' + selection[0] + '-' + str(
                selection[1]) + '-' + str(selection[2]) + '.json'
            all_goabs = {}
            random.seed(seed)

            tmp_network = Network()
            tmp_network.build(model, 'st_mat')
            tmp_network.set_attributes(model)
            tmp_network.remove_open_switches(model)
            tmp_network.rebuild_digraph(model, 'st_mat')
            sorted_elements = []
            for element in nx.topological_sort(tmp_network.digraph):
                sorted_elements.append(element)
            for i in model.models:
                if isinstance(
                        i,
                        Line) and i.is_recloser is not None and i.is_recloser:
                    is_open = False
                    for wire in i.wires:
                        if wire.is_open:
                            is_open = True
                    if is_open:
                        continue
                    if hasattr(
                            i, 'feeder_name'
                    ) and i.feeder_name is not None and i.feeder_name != 'subtransmission':
                        if i.feeder_name in all_goabs:
                            all_goabs[i.feeder_name].append(i.name)
                        else:
                            all_goabs[i.feeder_name] = [i.name]

            goab_key_list = list(all_goabs.keys())
            random.seed(seed)
            random.shuffle(goab_key_list)
            goab_counter = 0
            for key in goab_key_list:
                goab_counter += 1
                feeder_goabs_dic = {
                }  # Topological sorting done by node. This matches goabs to their end-node
                for goab in all_goabs[key]:
                    feeder_goabs_dic[
                        model[goab].
                        to_element] = goab  # shouldn't have multiple switches ending at the same node
                feeder_goabs = []
                feeder_goab_ends = []
                for element in sorted_elements:
                    if element in feeder_goabs_dic:
                        feeder_goabs.append(feeder_goabs_dic[element])
                        feeder_goab_ends.append(element)
                connectivity_matrix = [[
                    False for i in range(len(feeder_goabs))
                ] for j in range(len(feeder_goabs))]
                for i in range(len(feeder_goabs)):
                    goab1 = feeder_goab_ends[i]
                    for j in range(i + 1, len(feeder_goabs)):
                        goab2 = feeder_goab_ends[j]
                        if goab1 == goab2:
                            continue
                        connected = nx.has_path(tmp_network.digraph, goab1,
                                                goab2)
                        connectivity_matrix[i][j] = connected
                        if connected:
                            connectivity_matrix[j][i] = connected

                selected_goabs = []
                num_goabs = selection[2]
                finished = False
                if num_goabs == 0:
                    finished = True
                while not finished:
                    for i in range(len(feeder_goabs)):
                        current_set = set([i])
                        for j in range(i + 1, len(feeder_goabs)):
                            skip_this_one = False
                            for k in current_set:
                                if connectivity_matrix[j][
                                        k]:  #i.e. see if the candidate node has common anything upstream or downstream
                                    skip_this_one = True
                                    break
                            if skip_this_one:
                                continue
                            current_set.add(j)
                            if len(current_set) == num_goabs:
                                break
                        if len(current_set) == num_goabs:
                            finished = True
                            for k in current_set:
                                selected_goabs.append(feeder_goabs[k])
                            break
                    if not finished:
                        num_goabs -= 1

                for i in range(min(len(selected_goabs), selection[1])):
                    if goab_counter / float(
                            len(goab_key_list)) * 100 <= feeders[i]:
                        subset.append(model[selected_goabs[i]].to_element)

        if selection[0] == 'Random':
            class_equipment_type = locate(equipment_type)
            all_equipment = []
            for i in model.models:
                if isinstance(i, class_equipment_type):
                    all_equipment.append(i.name)

            if len(selection) == 3:
                random.seed(seed)
                random.shuffle(all_equipment)
                start_pos = math.floor(
                    len(all_equipment) * float(selection[1]) / 100.0)
                end_pos = math.floor(
                    len(all_equipment) * float(selection[2]) / 100.0)
                subset = all_equipment[start_pos:end_pos]
                file_name = str(feeders) + '_' + equipment_type.split(
                    '.')[-1] + '_' + selection[0] + '-' + str(
                        selection[1]) + '-' + str(
                            selection[2]) + '_' + str(seed) + '.json'
            if len(selection) == 2:
                random.seed(seed)
                subset = random.sample(
                    all_equipment,
                    math.floor(
                        len(all_equipment) * float(selection[1]) / 100.0))
                file_name = str(feeders) + '_' + equipment_type.split(
                    '.')[-1] + '_' + selection[0] + '-' + str(
                        selection[1]) + '_' + str(seed) + '.json'

        if not os.path.exists(placement_folder):
            os.makedirs(placement_folder)

        with open(os.path.join(placement_folder, file_name), "w") as f:
            f.write(json_tricks.dumps(subset, sort_keys=True, indent=4))

        return model
Пример #58
0
        def create_connected_graph(role_scores, concepts, root_id, dependent,
                                   aligns, source):
            #role_scores: amr x amr x rel
            graph = nx.MultiDiGraph()
            n = len(concepts)
            role_scores = role_scores.view(n, n, -1)
            max_non_score, max_non_score_id = role_scores[:, :, 1:].max(-1)
            max_non_score_id = max_non_score_id + 1
            non_score = role_scores[:, :, 0]
            active_cost = non_score - max_non_score  #so lowest cost edge gets to active first
            candidates = []
            # add all nodes
            for h_id in range(n):
                h, h_v = get_uni_var(concepts, h_id)
                graph.add_node(h_v,
                               value=h,
                               align=aligns[h_id],
                               gold=True,
                               dep=dependent[h_id])

            constant_links = {}
            normal_edged_links = {}
            # add all pairs of edges
            for h_id in range(n):
                for d_id in range(n):
                    if h_id != d_id:
                        h, h_v = get_uni_var(concepts, h_id)
                        d, d_v = get_uni_var(concepts, d_id)
                        r = rel_dict.getLabel(max_non_score_id[h_id,
                                                               d_id].item())
                        #r_inver = PSDGraph.get_inversed_edge(r)
                        # relations in rel_dict are forward argx, opx, sntx top
                        # and all backward relations.
                        # normalize mod should already be done in rel_dict
                        # we should normalize it here when connecting, make sure to not consider the same relation twices
                        # make sure -arg is fixed at last
                        if not PSDGraph.is_must_arg_functor(
                                r) and PSDGraph.is_arg_functor(r):
                            # we should add -arg if r is in args list
                            if PSDGraph.is_inversed_edge(r):
                                r_inver = PSDGraph.get_inversed_edge(r)
                                # r_inver here is forward
                                if PSDGraph.check_oblig_args_in_vallex(
                                        r_inver, d):
                                    r_inver = r_inver + "-arg"
                                    r = PSDGraph.get_inversed_edge(r_inver)
                                else:
                                    pass
                            else:
                                # if forward, not considering -arg, because forward is top, -args or .members
                                # r is forward, then r_inver is backward
                                r_inver = PSDGraph.get_inversed_edge(r)
                        else:
                            # if not arg functor, not considering -arg
                            r_inver = PSDGraph.get_inversed_edge(r)
                        # TODO: for mwe, compound ner
                        if active_cost[h_id, d_id] < 0:
                            normal_edged_links.setdefault((h_v, r), []).append(
                                (active_cost[h_id, d_id], d_v, r_inver))
                        else:
                            candidates.append(
                                (active_cost[h_id,
                                             d_id], (h_v, d_v, r, r_inver)))

            max_edge_per_node = 1 if not self.training else 100
            for h_v, r in normal_edged_links:
                sorted_list = sorted(normal_edged_links[h_v, r],
                                     key=lambda j: j[0])
                for _, d_v, r_inver in sorted_list[:max_edge_per_node]:
                    #     if graph.has_edge(h_v, d_v):
                    #         continue
                    graph.add_edge(h_v, d_v, key=r, role=r)
                    graph.add_edge(d_v, h_v, key=r_inver, role=r_inver)
                for cost, d_v, r_inver in sorted_list[
                        max_edge_per_node:]:  #remaining
                    candidates.append((cost, (h_v, d_v, r, r_inver)))

            candidates = sorted(candidates, key=lambda j: j[0])

            for _, (h_v, d_v, r, r_inver) in candidates:
                if nx.is_strongly_connected(graph):
                    break
                if not nx.has_path(graph, h_v, d_v):
                    graph.add_edge(h_v, d_v, key=r, role=r, force_connect=True)
                    graph.add_edge(d_v,
                                   h_v,
                                   key=r_inver,
                                   role=r_inver,
                                   force_connect=True)

            _, root_v = get_uni_var(concepts, root_id)
            h_v = BOS_WORD
            root_symbol = PSDUniversal.TOP_PSDUniversal()
            graph.add_node(h_v, value=root_symbol, align=-1, gold=True, dep=1)
            graph.add_edge(h_v, root_v, key=":top", role=":top")
            graph.add_edge(root_v, h_v, key=":top-of", role=":top-of")

            for n, d in graph.nodes(True):
                le, pos, sense = d["value"].le, d["value"].pos, d[
                    "value"].sense
                # here align is the token index list
                # last saving for not assigned sense. mainly for frames check
                if get_sense:
                    sense = self.fragment_to_node_converter.fix_sense(
                        pos, le, sense)

                anchors = []
                # convert token ids into character offset, with input source data
                tok_index = d["align"]
                if tok_index >= 0 and tok_index < len(
                        source[ANCHOR_IND_SOURCE_BATCH]):
                    anchors.extend(source[ANCHOR_IND_SOURCE_BATCH][tok_index])

                d["value"] = PSDUniversal(pos, le, sense, anchors)
                d["anchors"] = anchors

            if not nx.is_strongly_connected(graph):
                logger.warn(
                    "not connected after contraction: %s, %s, %s, %s, %s, %s, %s, %s",
                    self.graph_to_quadruples(graph),
                    graph_to_amr(graph), candidates, constant_links,
                    graph.nodes(), graph.edges(), normal_edged_links, concepts)
            return graph, self.graph_to_quadruples(graph)
Пример #59
0
            G.add_edge(fields[1], fields[3], kind=' overrides function ')
        else:
            if not skipfunc.search(line):
                G.add_edge(fields[3],
                           fields[1],
                           kind=' calls override function ')
                if epfunc.search(fields[1]): epfuncs.add(fields[1])
f.close()

#for epfunc in sorted(epfuncs): print(epfunc)
print("Callstacks for top level functions calling EventSetupRecord::get<>()")
print()

callstacks = set()
for tfunc in sorted(toplevelfuncs):
    for epfunc in sorted(epfuncs):
        if nx.has_path(G, tfunc, epfunc):
            path = nx.shortest_path(G, tfunc, epfunc)
            cs = ""
            previous = str("")
            for p in path:
                stripped = re.sub(farg, "()", p)
                if previous != stripped:
                    cs += stripped + "; "
                    previous = stripped
            callstacks.add(cs)
            break

for cs in sorted(callstacks):
    print(cs)
Пример #60
0
def generate_expanded_graph(graph_in):
    """Generates an expanded graph based on node parameterization

    Parameterization is controlled using the `iterables` field of the
    pipeline elements.  Thus if there are two nodes with iterables a=[1,2]
    and b=[3,4] this procedure will generate a graph with sub-graphs
    parameterized as (a=1,b=3), (a=1,b=4), (a=2,b=3) and (a=2,b=4).
    """
    logger.debug("PE: expanding iterables")
    graph_in = _remove_nonjoin_identity_nodes(graph_in, keep_iterables=True)
    # standardize the iterables as {(field, function)} dictionaries
    for node in graph_in.nodes_iter():
        if node.iterables:
            _standardize_iterables(node)
    allprefixes = list('abcdefghijklmnopqrstuvwxyz')

    # the iterable nodes
    inodes = _iterable_nodes(graph_in)
    logger.debug("Detected iterable nodes %s" % inodes)
    # while there is an iterable node, expand the iterable node's
    # subgraphs
    while inodes:
        inode = inodes[0]
        logger.debug("Expanding the iterable node %s..." % inode)

        # the join successor nodes of the current iterable node
        jnodes = [node for node in graph_in.nodes_iter()
                  if hasattr(node, 'joinsource') and
                  inode.name == node.joinsource and
                  nx.has_path(graph_in, inode, node)]

        # excise the join in-edges. save the excised edges in a
        # {jnode: {source name: (destination name, edge data)}}
        # dictionary
        jedge_dict = {}
        for jnode in jnodes:
            in_edges = jedge_dict[jnode] = {}
            edges2remove = []
            for src, dest, data in graph_in.in_edges_iter(jnode, True):
                in_edges[src._id] = data
                edges2remove.append((src, dest))

            for src, dest in edges2remove:
                graph_in.remove_edge(src, dest)
                logger.debug("Excised the %s -> %s join node in-edge."
                             % (src, dest))

        if inode.itersource:
            # the itersource is a (node name, fields) tuple
            src_name, src_fields = inode.itersource
            # convert a single field to a list
            if isinstance(src_fields, string_types):
                src_fields = [src_fields]
            # find the unique iterable source node in the graph
            try:
                iter_src = next((node for node in graph_in.nodes_iter()
                                 if node.name == src_name and
                                 nx.has_path(graph_in, node, inode)))
            except StopIteration:
                raise ValueError("The node %s itersource %s was not found"
                                 " among the iterable predecessor nodes"
                                 % (inode, src_name))
            logger.debug("The node %s has iterable source node %s"
                         % (inode, iter_src))
            # look up the iterables for this particular itersource descendant
            # using the iterable source ancestor values as a key
            iterables = {}
            # the source node iterables values
            src_values = [getattr(iter_src.inputs, field) for field in src_fields]
            # if there is one source field, then the key is the the source value,
            # otherwise the key is the tuple of source values
            if len(src_values) == 1:
                key = src_values[0]
            else:
                key = tuple(src_values)
            # The itersource iterables is a {field: lookup} dictionary, where the
            # lookup is a {source key: iteration list} dictionary. Look up the
            # current iterable value using the predecessor itersource input values.
            iter_dict = dict([(field, lookup[key]) for field, lookup in
                              inode.iterables if key in lookup])
            # convert the iterables to the standard {field: function} format

            def make_field_func(*pair):
                return pair[0], lambda: pair[1]

            iterables = dict([make_field_func(*pair) for pair in iter_dict.items()])
        else:
            iterables = inode.iterables.copy()
        inode.iterables = None
        logger.debug('node: %s iterables: %s' % (inode, iterables))

        # collect the subnodes to expand
        subnodes = [s for s in dfs_preorder(graph_in, inode)]
        prior_prefix = []
        for s in subnodes:
            prior_prefix.extend(re.findall('\.(.)I', s._id))
        prior_prefix = sorted(prior_prefix)
        if not len(prior_prefix):
            iterable_prefix = 'a'
        else:
            if prior_prefix[-1] == 'z':
                raise ValueError('Too many iterables in the workflow')
            iterable_prefix =\
                allprefixes[allprefixes.index(prior_prefix[-1]) + 1]
        logger.debug(('subnodes:', subnodes))

        # append a suffix to the iterable node id
        inode._id += ('.' + iterable_prefix + 'I')

        # merge the iterated subgraphs
        subgraph = graph_in.subgraph(subnodes)
        graph_in = _merge_graphs(graph_in, subnodes,
                                 subgraph, inode._hierarchy + inode._id,
                                 iterables, iterable_prefix, inode.synchronize)

        # reconnect the join nodes
        for jnode in jnodes:
            # the {node id: edge data} dictionary for edges connecting
            # to the join node in the unexpanded graph
            old_edge_dict = jedge_dict[jnode]
            # the edge source node replicates
            expansions = defaultdict(list)
            for node in graph_in.nodes_iter():
                for src_id, edge_data in list(old_edge_dict.items()):
                    if node._id.startswith(src_id):
                        expansions[src_id].append(node)
            for in_id, in_nodes in list(expansions.items()):
                logger.debug("The join node %s input %s was expanded"
                             " to %d nodes." % (jnode, in_id, len(in_nodes)))
            # preserve the node iteration order by sorting on the node id
            for in_nodes in list(expansions.values()):
                in_nodes.sort(key=lambda node: node._id)

            # the number of join source replicates.
            iter_cnt = count_iterables(iterables, inode.synchronize)
            # make new join node fields to connect to each replicated
            # join in-edge source node.
            slot_dicts = [jnode._add_join_item_fields() for _ in range(iter_cnt)]
            # for each join in-edge, connect every expanded source node
            # which matches on the in-edge source name to the destination
            # join node. Qualify each edge connect join field name by
            # appending the next join slot index, e.g. the connect
            # from two expanded nodes from field 'out_file' to join
            # field 'in' are qualified as ('out_file', 'in1') and
            # ('out_file', 'in2'), resp. This preserves connection port
            # integrity.
            for old_id, in_nodes in list(expansions.items()):
                # reconnect each replication of the current join in-edge
                # source
                for in_idx, in_node in enumerate(in_nodes):
                    olddata = old_edge_dict[old_id]
                    newdata = deepcopy(olddata)
                    # the (source, destination) field tuples
                    connects = newdata['connect']
                    # the join fields connected to the source
                    join_fields = [field for _, field in connects
                                   if field in jnode.joinfield]
                    # the {field: slot fields} maps assigned to the input
                    # node, e.g. {'image': 'imageJ3', 'mask': 'maskJ3'}
                    # for the third join source expansion replicate of a
                    # join node with join fields image and mask
                    slots = slot_dicts[in_idx]
                    for con_idx, connect in enumerate(connects):
                        src_field, dest_field = connect
                        # qualify a join destination field name
                        if dest_field in slots:
                            slot_field = slots[dest_field]
                            connects[con_idx] = (src_field, slot_field)
                            logger.debug("Qualified the %s -> %s join field"
                                         " %s as %s." %
                                         (in_node, jnode, dest_field, slot_field))
                    graph_in.add_edge(in_node, jnode, newdata)
                    logger.debug("Connected the join node %s subgraph to the"
                                 " expanded join point %s" % (jnode, in_node))

        # nx.write_dot(graph_in, '%s_post.dot' % node)
        # the remaining iterable nodes
        inodes = _iterable_nodes(graph_in)

    for node in graph_in.nodes():
        if node.parameterization:
            node.parameterization = [param for _, param in
                                     sorted(node.parameterization)]
    logger.debug("PE: expanding iterables ... done")

    return _remove_nonjoin_identity_nodes(graph_in)