コード例 #1
0
ファイル: graph_search.py プロジェクト: parkerclayton/pyMARS
def graph_search(nx_graph, target_species):
    """Search nodal graph and generate list of species to exclude

    Parameters
    ----------
    nx_graph : obj
        networkx graph object of solution\
    target_species : list
        List of target species to search from

    Returns
    -------
    essential_nodes : str
        String containing names of essential species
    """

    if len(target_species) > 1:
        essential_nodes = list()
        for target in target_species:
            essential = list(nx.dfs_preorder_nodes(nx_graph, target))
            for sp in essential:
                if sp not in essential_nodes:
                    essential_nodes.append(sp)
    else:
        essential_nodes = list(nx.dfs_preorder_nodes(nx_graph, target_species[0]))

    return essential_nodes
コード例 #2
0
ファイル: type_utils.py プロジェクト: Yelp/environment_tools
def convert_location_type(location, source_type, desired_type):
    """ Converts the provided location into the desired location_type

    This will perform a DFS on the location graph to find connected components
    to the supplied node and then filter by the desired location_type.
    Basically if we consider our datacenter layout a DAG, then this method will
    search any nodes connected to the source location looking for the proper
    type.

    Examples:
    Assume available_location_types() is ['ecosystem', 'region', 'habitat'],
    and the location graph is:
     - prod
       - uswest1-prod
         - uswest1aprod
         - uswest1bprod

    # convert a habitat to the containing ecosystem
    convert_location_type('uswest1aprod', 'habitat', 'ecosystem') -> ['prod']
    # convert a region to the member habitats
    convert_location_type('uswest1-prod', 'region', 'habitat') ->
        ['uswest1aprod', 'uswest1bprod']

    :param location: A string that represents a location, e.g. "devc"
    :param source_type: A string that should exist inside the list returned
        by available_location_types. This is the type of the provided location
        and is optional. This exists because the names in the DAG may not be
        unique across all levels, and providing this type will disambiguate
        between which "devc" you mean (ecosystem or habitat).
    :param desired_type: A string that should exist inside the
        list returned by available_location_types. This is the desired type
        that the caller wants.
    :returns: locations, A list of locations that are of the location_type.
        These will be connected components filtered by type. Note that
        these results are sorted for calling consistency before returning.
    :rtype: list of strings
    """
    search_node = '{0}_{1}'.format(location, source_type)

    direction = compare_types(desired_type, source_type)
    candidates = set()
    if direction < 0:
        # We are converting "up", and have to walk the tree backwards
        reversed_graph = nx.reverse(location_graph())
        candidates |= set(nx.dfs_preorder_nodes(reversed_graph, search_node))
    else:
        candidates |= set(nx.dfs_preorder_nodes(location_graph(), search_node))

    # Only return results that are of the correct type
    result = filter(lambda x: x.endswith('_' + desired_type), candidates)
    return sorted([loc[:loc.rfind('_')] for loc in result])
コード例 #3
0
ファイル: part1.py プロジェクト: jim-bo/silp2
def _validate_comp_pre(RG):
    ''' validates connection at a certain level '''

    # use topological sort to find root.
    root = nx.topological_sort(RG)[0]

    # try to solve each node.
    for p in nx.dfs_preorder_nodes(RG, source=root):

        # dive down.
        if RG.node[p]['graph'] != None:
            _validate_comp_pre(RG.node[p]['graph'])

        # skip if child.
        if len(RG.successors(p)) == 0:
            continue

        # check children.
        for q in RG.successors(p):

            # get sets.
            pcomp = RG.node[p]['comp']
            qcomp = RG.node[q]['comp']

            # compute cuts.
            cutGIVEN = RG[p][q]['cut']
            cutTEST = pcomp.intersection(qcomp)

            # test cut.
            if cutGIVEN != cutTEST:
                print "bad cut"
                print cutGIVEN, cutTEST
                print n, parent
                sys.exit()
コード例 #4
0
ファイル: __init__.py プロジェクト: hoonto/reeb
def triangulate_rectangular_contours(landscape):
    """
    Given an enumerated contour landscape tree, returns a list of the triangles in the landscape metaphor.
    """
    root_node = [n for n in landscape if landscape.in_degree(n) == 0][0]
    triangles = []

    # handle the root node
    child_node = landscape.successors(root_node)[0]
    outer_inds = landscape.node[root_node]["contour"].indices
    inner_inds = landscape.node[child_node]["contour"].indices
    triangles.extend(triangulate_nested_rectangle(outer_inds, inner_inds))

    # now work through the remaining nodes
    dfs_nodes = nx.dfs_preorder_nodes(landscape, root_node)
    dfs_nodes.next()
    for node in dfs_nodes:
        # we only do something if this is not a leaf
        is_leaf = landscape.out_degree(node) == 0
        if not is_leaf:
            outer_saddle_contour = landscape.node[node]["contour"]
            for inner_saddle_contour in outer_saddle_contour.children:
                child = inner_saddle_contour.child
                child_contour = landscape.node[child]["contour"]
                inner_saddle_indices = inner_saddle_contour.indices
                child_indices = child_contour.indices

                if child_contour.type == "leaf":
                    triangles.extend(triangulate_nested_point(inner_saddle_indices, child_indices[0]))
                else:
                    triangles.extend(triangulate_nested_rectangle(inner_saddle_indices, child_indices))

    return triangles
    return np.array(points)
コード例 #5
0
ファイル: dgm.py プロジェクト: DLunin/pygraphmodels
 def reachable(self, source, observed):
     """
     Finds a set of reachable (in the sense of d-separation) nodes in graph.
     :param self: target graph
     :param source: source node name
     :param observed: a sequence of observed nodes
     :return: a set of reachable nodes
     """
     V = nx.number_of_nodes(self)
     A = set(sum([list(nx.dfs_preorder_nodes(self.reverse(), z)) for z in observed], []))
     Z = observed
     L = [(source, 'up')]
     V = set()
     result = set()
     while len(L) > 0:
         x, d = L.pop()
         if (x, d) in V:
             continue
         if x not in Z:
             result.add((x, d))
         V.add((x, d))
         if d == 'up' and x not in Z:
             for y in self.predecessors_iter(x):
                 L.append((y, 'up'))
             for y in self.successors_iter(x):
                 L.append((y, 'down'))
         elif d == 'down':
             if x in A:
                 for y in self.predecessors_iter(x):
                     L.append((y, 'up'))
             if x not in Z:
                 for y in self.successors_iter(x):
                     L.append((y, 'down'))
     result = set([x[0] for x in result])
     return result - {source}
コード例 #6
0
ファイル: eval.py プロジェクト: martinpilat/dag-evaluate
def process_boosters(dag):

    dag_nx = utils.dag_to_nx(dag)

    processed_dag = dict()
    sub_dags = []
    for k, spec in dag.items():
        if spec[1][0] == 'booBegin':
            input_name = spec[0]
            for node in nx.dfs_preorder_nodes(dag_nx, k):
                node_type = dag[node][1][0]
                if node == k:
                    continue
                if node_type == 'booster':
                    sub_dags.append(dag[node][1][2])
                if node_type == 'booEnd':
                    sub_dags = [normalize_dag(sd) for sd in sub_dags]
                    processed_dag[k] = (input_name, ['booster', {'sub_dags': sub_dags}], dag[node][2])
                    sub_dags = []
                    break
        elif spec[1][0] in ['booster', 'booEnd']:
            continue
        else:
            processed_dag[k] = spec

    return processed_dag
コード例 #7
0
 def calc_ProjFeatures(self):
     #Add edges to projection Graph
     for node in self.gM.nodes():
         neighbours=self.G.neighbors(node)
         for item in neighbours:
             if self.gM.has_node(item):
                 try:
                     if node!=item:
                         self.gM.add_edge(node,item)
                 except:
                     pass
     
        
     #Initialize and Calculate features
     closed=[];self.gM_connComp=0;
     self.gM_maxDeg=0;self.gM_sizeMaxComp=0
     
     for node in self.gM.nodes():
         if node not in closed:
             x=nx.dfs_preorder_nodes(self.gM,node)
             pre=list(x)                
             closed=closed +pre      
             self.gM_connComp=  self.gM_connComp+1
             if len(pre)>self.gM_sizeMaxComp:
                 self.gM_sizeMaxComp=len(pre)
         if self.gM_maxDeg < self.gM.degree(node):
                 self.gM_maxDeg=self.gM.degree(node)
     return
コード例 #8
0
ファイル: _mcy.py プロジェクト: argriffing/raoteh
def _get_node_to_pset_same_transition_matrix(T, root, P,
        node_to_allowed_states=None):
    T_bfs = nx.bfs_tree(T, root)
    preorder_nodes = list(nx.dfs_preorder_nodes(T, root))
    sorted_states = sorted(P)

    # Put the tree into sparse boolean csr form.
    tree_csr_indices, tree_csr_indptr = _digraph_to_bool_csr(
            T_bfs, preorder_nodes)

    # Put the transition matrix into sparse boolean csr form.
    trans_csr_indices, trans_csr_indptr = _digraph_to_bool_csr(
            P, sorted_states)

    # Define the state mask.
    state_mask = _define_state_mask(
            node_to_allowed_states, preorder_nodes, sorted_states)

    # Update the state mask.
    pyfelscore.mcy_get_node_to_pset(
            tree_csr_indices,
            tree_csr_indptr,
            trans_csr_indices,
            trans_csr_indptr,
            state_mask)

    # Convert the updated state mask into a node_to_pset dict.
    node_to_pset = _state_mask_to_dict(
            state_mask, preorder_nodes, sorted_states)

    # Return the node_to_pset dict.
    return node_to_pset
コード例 #9
0
ファイル: dag.py プロジェクト: 4c656554/networkx
def transitive_closure(G):
    """ Returns transitive closure of a directed graph

    The transitive closure of G = (V,E) is a graph G+ = (V,E+) such that
    for all v,w in V there is an edge (v,w) in E+ if and only if there
    is a non-null path from v to w in G.

    Parameters
    ----------
    G : NetworkX DiGraph
        Graph

    Returns
    -------
    TC : NetworkX DiGraph
        Graph

    Raises
    ------
    NetworkXNotImplemented
        If G is not directed

    References
    ----------
    .. [1] http://www.ics.uci.edu/~eppstein/PADS/PartialOrder.py

    """
    TC = nx.DiGraph()
    TC.add_nodes_from(G.nodes())
    TC.add_edges_from(G.edges())
    for v in G:
        TC.add_edges_from((v, u) for u in nx.dfs_preorder_nodes(G, source=v)
                          if v != u)
    return TC
コード例 #10
0
ファイル: boundaries.py プロジェクト: tboch/mocpy
    def _retrieve_skycoords(V):
        coords_l = []
        # Accessing the borders one by one. At this step, V_subgraphs contains a list of cycles
        # (i.e. one describing the external border of the MOC component and several describing the holes
        # found in the MOC component).
        V_subgraphs = nx.connected_component_subgraphs(V)
        for v in V_subgraphs:
            # Compute the MST for each cycle
            v = nx.convert_node_labels_to_integers(v)
            mst = nx.minimum_spanning_tree(v)
            # Get one end of the span tree by looping over its node and checking if the degree is one
            src = None
            for (node, deg) in mst.degree():
                if deg == 1:
                    src = node
                    break

            # Get the unordered lon and lat
            ra = np.asarray(list(nx.get_node_attributes(v, 'ra').values()))
            dec = np.asarray(list(nx.get_node_attributes(v, 'dec').values()))
            coords = np.vstack((ra, dec)).T
            # Get the ordering from the MST
            ordering = np.asarray(list(nx.dfs_preorder_nodes(mst, src)))
            # Order the coords
            coords = coords[ordering]
            # Get a skycoord containing N coordinates computed from the Nx2 `coords` array
            coords = SkyCoord(coords, unit="deg")
            coords_l.append(coords)

        return coords_l
コード例 #11
0
ファイル: dgm.py プロジェクト: DLunin/pygraphmodels
def descendants(G, x):
    """
    Set of all descendants of node in a graph, not including itself.
    :param G: target graph
    :param x: target node
    :return: set of descendants
    """
    return set(nx.dfs_preorder_nodes(G, x)) - {x}
コード例 #12
0
 def recalculateDependent(self, node, returnResult=False):
     if self.dependencyGraph.has_node(node):
         generator = dfs_preorder_nodes(self.dependencyGraph, node)
         next(generator )   # skip the first, that is us
         nodelist = list(generator)  # make a list, we need it twice 
         result = [ self.recalculateNode(node) for node in nodelist ]                
         return (nodelist, result) if returnResult else nodelist     # return which ones were re-calculated, so gui can be updated 
     return (list(), list()) if returnResult else list()
コード例 #13
0
ファイル: treenet.py プロジェクト: binarybana/samcnet
    def propose(self):
        self.oldgraph = self.graph.copy()
        self.memo_entropy = None

        g = self.graph
        if g.edges():
            scheme = np.random.randint(3)
        else:
            scheme = np.random.randint(2)

        nodes = g.nodes()

        if scheme == 0: # Perturb probabilities
            n1 = np.random.randint(len(nodes))
            if g.predecessors(n1):
                g.node[n1]['delta'] = st.beta.rvs(0.4,0.4)
                g.node[n1]['eta'] = st.beta.rvs(0.4,0.4)
                g.node[n1]['marginal'] = np.nan
            else: 
                g.node[n1]['delta'] = np.nan
                g.node[n1]['eta'] = np.nan
                g.node[n1]['marginal'] = np.random.rand()

        if scheme == 1: # Add or change edge 'backwards'
            while True:
                random.shuffle(nodes) #inefficient
                n0 = nodes[0]
                n1 = nodes[1]
                if g.predecessors(n1):
                    g.remove_edge(g.predecessors(n1)[0], n1)
                g.add_edge(n0, n1)

                if nx.is_directed_acyclic_graph(g):
                    break
                else:
                    g = self.graph = self.oldgraph.copy()

            g.node[n1]['delta'] = st.beta.rvs(0.4,0.4)
            g.node[n1]['eta'] = st.beta.rvs(0.4,0.4)
            g.node[n1]['marginal'] = np.nan

        if scheme == 2: # Remove edge
            edges = g.edges()
            edge = edges[np.random.randint(len(edges))]
            n1 = edge[1]
            g.remove_edge(*edge)

            g.node[n1]['delta'] = np.nan
            g.node[n1]['eta'] = np.nan
            g.node[n1]['marginal'] = np.random.rand()

        #print len(g.edges())
        trav = nx.dfs_preorder_nodes(g,n1) # FIXME, this may be a problem, dfs_preorder 
        # was not what I thought it was before
        trav.next()
        for node in trav:
            g.node[node]['marginal'] = np.nan #invalidate downstream cached marginals
コード例 #14
0
def kosaraju_strongly_connected_components(G, source=None):
    """Generate nodes in strongly connected components of graph.

    Parameters
    ----------
    G : NetworkX Graph
        An directed graph.

    Returns
    -------
    comp : generator of sets
        A genrator of sets of nodes, one for each strongly connected
        component of G.

    Raises
    ------
    NetworkXNotImplemented:
        If G is undirected.

    Examples
    --------
    Generate a sorted list of strongly connected components, largest first.

    >>> G = nx.cycle_graph(4, create_using=nx.DiGraph())
    >>> nx.add_cycle(G, [10, 11, 12])
    >>> [len(c) for c in sorted(nx.kosaraju_strongly_connected_components(G),
    ...                         key=len, reverse=True)]
    [4, 3]

    If you only want the largest component, it's more efficient to
    use max instead of sort.

    >>> largest = max(nx.kosaraju_strongly_connected_components(G), key=len)

    See Also
    --------
    connected_components
    weakly_connected_components

    Notes
    -----
    Uses Kosaraju's algorithm.

    """
    with nx.utils.reversed(G):
        post = list(nx.dfs_postorder_nodes(G, source=source))

    seen = set()
    while post:
        r = post.pop()
        if r in seen:
            continue
        c = nx.dfs_preorder_nodes(G, r)
        new = {v for v in c if v not in seen}
        yield new
        seen.update(new)
コード例 #15
0
def order_intervals(intervals, start):
	""" computes the intersection graph of the intervals, takes the data attribute
		of the intervals as nodes, hence intervals belonging to the same segment
		correspond to one node in the graph.
		returns a traversal starting from start. """
	G = dict(((i.data, []) for i in intervals)) 

	#right to left sweep according to left endpoints
	#linear time with proper sorting
	stack = []
	for i in sorted(intervals, reverse = True, key = lambda i: i.endpoints):
		l, r = i.endpoints

		while stack and r>stack[-1].endpoints[1]:
			i2 = stack.pop()

		if stack and r>= stack[-1].endpoints[0]:
			G[i.data].append(stack[-1].data)
			G[stack[-1].data].append(i.data)

		stack.append(i)

	#left to right sweep according to right endpoints
	stack = []
	for i in sorted(intervals, key = lambda i: (i.endpoints[1], i.endpoints[0])):
		l, r = i.endpoints

		while stack and l<stack[-1].endpoints[0]:
			i2 = stack.pop()

		if stack and l<= stack[-1].endpoints[1]:
			G[i.data].append(stack[-1].data)
			G[stack[-1].data].append(i.data)
		stack.append(i)

	order = list(nx.dfs_preorder_nodes(G,start))

	if not len(order) == len(G):
		seen = set(order)
		for v in G:
			if v in seen: continue
			raise IntervalException(list(nx.dfs_preorder_nodes(G,v)))
	return order
コード例 #16
0
 def getStructure(self):
     nodes = []
     for n in nx.dfs_preorder_nodes(self.structure, self.root):
         p = self.structure.in_edges(n)
         if len(p) == 0:
             p = ""
         else:
             p = p[0][0]
         nodes.append((n, self.structure.node[n]['name'], p))
     return nodes
コード例 #17
0
def transitiveReduction(g):
    ''' from http://stackoverflow.com/questions/17078696/im-trying-to-perform-the-transitive-reduction-of-directed-graph-in-python'''
    for n1 in g.nodes_iter():
        if g.has_edge(n1, n1):
            g.remove_edge(n1, n1)
        for n2 in g.successors(n1):
            for n3 in g.successors(n2):
                for n4 in nx.dfs_preorder_nodes(g, n3):
                    if g.has_edge(n1, n4):
                        g.remove_edge(n1, n4) 
コード例 #18
0
ファイル: data.py プロジェクト: ngraetz/dismod_mr
    def describe(self, data_type):
        G = self.hierarchy
        df = self.get_data(data_type)

        for n in nx.dfs_postorder_nodes(G, 'all'):
            G.node[n]['cnt'] = len(df[df['area']==n].index) + pl.sum([G.node[c]['cnt'] for c in G.successors(n)])
            G.node[n]['depth'] = nx.shortest_path_length(G, 'all', n)
        
        for n in nx.dfs_preorder_nodes(G, 'all'):
            if G.node[n]['cnt'] > 0:
                print ' *'*G.node[n]['depth'], n, int(G.node[n]['cnt'])
コード例 #19
0
ファイル: board.py プロジェクト: p7k/battleship
 def _find_ship_and_adjacents(self, i):
     """DFS work to find adjacent ships comprising the new larger ship."""
     ship, adj_decks, adj_ships = [i], [], []
     for adj_tile in self._adjacent_tiles(i):
         if adj_tile in self._decks:
             adj_decks.append(adj_tile)
             adj_ship = list(networkx.dfs_preorder_nodes(self._decks,
                                                         adj_tile))
             adj_ships.append(adj_ship)
             ship.extend(adj_ship)
     return sorted(ship), adj_decks, adj_ships
コード例 #20
0
ファイル: _mcy.py プロジェクト: argriffing/raoteh
def _esd_get_node_to_set(T, root,
        node_to_allowed_states=None, P_default=None):
    # Construct the bfs tree, preserving transition matrices on the edges.
    T_bfs = nx.DiGraph()
    for na, nb in nx.bfs_edges(T, root):
        T_bfs.add_edge(na, nb)
        edge_object = T[na][nb]
        P = edge_object.get('P', None)
        if P is not None:
            T_bfs[na][nb]['P'] = P

    # Get the ordered list of nodes in preorder.
    preorder_nodes = list(nx.dfs_preorder_nodes(T, root))

    # Get the set of all states in all transition matrices.
    state_set = set()
    for na, nb in T_bfs.edges():
        edge_object = T_bfs[na][nb]
        P = edge_object.get('P', P_default)
        state_set.update(set(P))
    sorted_states = sorted(state_set)

    # Put the tree into sparse boolean csr form.
    tree_csr_indices, tree_csr_indptr = _digraph_to_bool_csr(
            T_bfs, preorder_nodes)

    # Define the state mask.
    state_mask = _define_state_mask(
            node_to_allowed_states, preorder_nodes, sorted_states)

    # Construct the edge-specific transition matrix as an ndim-3 numpy array.
    esd_transitions = _get_esd_transitions(
            T_bfs, preorder_nodes, sorted_states, P_default=P_default)

    # Backward pass to update the state mask.
    pyfelscore.mcy_esd_get_node_to_pset(
            tree_csr_indices,
            tree_csr_indptr,
            esd_transitions,
            state_mask)

    # Forward pass to update the state mask.
    pyfelscore.esd_get_node_to_set(
            tree_csr_indices,
            tree_csr_indptr,
            esd_transitions,
            state_mask)

    # Convert the updated state mask into a node_to_set dict.
    node_to_set = _state_mask_to_dict(
            state_mask, preorder_nodes, sorted_states)

    # Return the node_to_set dict.
    return node_to_set
コード例 #21
0
    def __init__(self,w):

        g=nx.DiGraph()
        g0=nx.Graph()

        n=math.factorial(w)
        self.n = n
        perms = []
        self.perms=perms
        for p in itertools.permutations(list(range(w))):
            perms.append(p)

        for x in range(0,w)  :
            for y in range(x+1,w+1):
                d = y-x
                for i in range(n):
                    for j in range(n):
                        match=True
                        #print d,x,y,i,j,w-d,"####"
                        for k in range(w-d):
                            #print d,x,y,i,j,k,k+d
                            if not perms[i][k]+d ==  perms[j][k+d]:
                                match=False
                        if match:
                            g.add_edge((x,i),(y,j),weight=d*d )
                            g0.add_edge((x,i),(y,j),weight=d*d )
                            #print (x,i),(y,j),d,"z"

                            #        for e in g.edges():
                            #print "e:",e

        n_backlinks=0

        backlinks={}

        for i in range(n):
            backlinks[i]=[]
            nn = nx.dfs_preorder_nodes(g,(0,i))
            #print "#",str((0,i))#,list(nn)
            sg = g0.subgraph(nn)
            #print "#",list(sg.nodes())
            t = nx.minimum_spanning_tree(sg)
            for a,b in t.edges():
                if b<a:
                    c=b
                    b=a
                    a=c
                if a==(0,i):
                    #print "x:",b[0]-a[0],a,b,perms[b[1]],perms[a[1]]
                    n_backlinks+=1
                    backlinks[i].append( (b[0]-a[0],b[1]) )

        self.backlinks = backlinks
        self.n_backlinks=n_backlinks
コード例 #22
0
ファイル: data.py プロジェクト: aflaxman/gbd
    def describe(self, data_type):
        G = self.hierarchy
        df = self.get_data(data_type)

        for n in nx.dfs_postorder_nodes(G, "all"):
            G.node[n]["cnt"] = len(df[df["area"] == n].index) + pl.sum([G.node[c]["cnt"] for c in G.successors(n)])
            G.node[n]["depth"] = nx.shortest_path_length(G, "all", n)

        for n in nx.dfs_preorder_nodes(G, "all"):
            if G.node[n]["cnt"] > 0:
                print " *" * G.node[n]["depth"], n, int(G.node[n]["cnt"])
コード例 #23
0
def edges2oo(f):
    g=nx.Graph()
    ll={}
    scaffolds={}
    scaffold={}
    while True:
        l = f.readline()
        if not l: break
        if l[:6]=="#edge:": #continue
            c = l.strip().split()
            v=eval(" ".join(c[3:]))
            if v['contig']: ll[c[1][:-2]]=v['length']
            g.add_edge(c[1],c[2],v)
        if l[:3]=="cc:": #continue
            c = l.strip().split()
            scn=int(c[1])
            scl=eval(" ".join(c[3:]))
            scaffolds[scn]=scl
            for s in scl:
               scaffold[s]=scn 

    contigs=[]
    strand={}
    scaffold={}
    coords={}
    facing={}
    ccn=1
    for c in nx.connected_components(g):
#        print "#",len(c)
        ends=[]
        for cc in c:
            scaffold[cc]=ccn
            if g.degree(cc)==1:
                ends.append(cc)
        ccn+=1
        order = list(nx.dfs_preorder_nodes(g,ends[0]))

#        def traverse_and_layout(n,coords,facing,x,s,og,max_disp=False):
#    """Traverse the nodes in og, from node n.  Give node n position x.  s==1 for increasing coords, -1 for decreasing.  store in coords and facing the position and 'side' of each end respectively.  
#    Stop traversing if you hit max_disp (optional)."""
        greedy_chicagoan2.traverse_and_layout(ends[0],coords,facing,0,1,g)
        order1=[]
        for i in range(0,len(order),2):
            print(order)
            if not order[i][:-2]==order[i+1][:-2]:
                print("wtf", i,order)
                exit(0)
            if order[i][-1]=="5": 
                strand[order[i][:-2]]="+"
            else:
                strand[order[i][:-2]]="-"
            order1.append(order[i][:-2])
        contigs.append(order1)
    return(contigs,strand,ll,scaffolds,scaffold,coords)
コード例 #24
0
def _depth_first_search(edge_list):
    '''
    This function does a depth first search of a graph with a random node
    selected as the root of a tree representing the graph.
    '''
    G_tmp = nx.Graph()
    G_tmp.add_weighted_edges_from(edge_list)
    edge_index = random.randint(0, len(edge_list) - 1)
    path_sequence = list(nx.dfs_preorder_nodes(G_tmp,
                                               edge_list[edge_index][0]))
    return path_sequence
コード例 #25
0
def optimize_dead_ends(in_g, out_g):
  print "DEBUG: There are %s dead ends and %s odd-degree nodes in this graph." % (
      len(dead_ends(in_g)), len(odd_nodes(in_g)))
  for node in dead_ends(in_g):
    new_from = node
    new_to = next(n for n in networkx.dfs_preorder_nodes(
        in_g, node) if len(set(adjoining_streets(in_g,n))) > 1)
    print "DEBUG: I will optimize by adding an edge from %s to %s" % (
        in_g.node[new_from]['pretty_name'], in_g.node[new_to]['pretty_name'])
    add_artificial_edge(in_g, out_g, new_from, new_to)
  print "DEBUG: There are now %s dead ends and %s odd-degree nodes in this graph." % (
      len(dead_ends(out_g)), len(odd_nodes(out_g)))
コード例 #26
0
    def _partition(self):
        """
        Return all the atoms of the I-diagram for `dist`.

        Parameters
        ----------
        dist : distribution
            The distribution to compute the I-diagram of.
        """
        rvs = self.dist.get_rv_names()
        if not rvs:
            rvs = tuple(range(self.dist.outcome_length()))

        self._lattice = poset_lattice(rvs)
        rlattice = self._lattice.reverse()
        Hs = {}
        Is = {}
        atoms = {}
        new_atoms = {}

        # Entropies
        for node in self._lattice:
            Hs[node] = self._measure(self.dist, node) # pylint: disable=no-member

        # Subset-sum type thing, basically co-information calculations.
        for node in self._lattice:
            Is[node] = sum((-1)**(len(rv)+1)*Hs[rv] for rv in nx.dfs_preorder_nodes(self._lattice, node))

        # Mobius inversion of the above, resulting in the Shannon atoms.
        for node in list(nx.topological_sort(self._lattice))[:-1]:
            kids = islice(nx.dfs_preorder_nodes(rlattice, node), 1, None)
            atoms[node] = Is[node] - sum(atoms[child] for child in kids)

        # get the atom indices in proper format
        for atom, value in atoms.items():
            a_rvs = tuple((_,) for _ in atom)
            a_crvs = tuple(sorted(set(rvs) - set(atom)))
            new_atoms[(a_rvs, a_crvs)] = value

        self.atoms = new_atoms
コード例 #27
0
ファイル: _mc0_dense.py プロジェクト: argriffing/raoteh
def get_node_to_distn_esd(T, root, node_to_pmap, nstates,
        root_distn=None, P_default=None):
    """

    """
    # Construct the bfs tree, preserving transition matrices on the edges.
    T_bfs = nx.DiGraph()
    for na, nb in nx.bfs_edges(T, root):
        T_bfs.add_edge(na, nb)
        edge_object = T[na][nb]
        P = edge_object.get('P', None)
        if P is not None:
            T_bfs[na][nb]['P'] = P

    # Get the ordered list of nodes in preorder.
    preorder_nodes = list(nx.dfs_preorder_nodes(T, root))
    nnodes = len(preorder_nodes)

    # Put node_to_pmap into a dense array.
    subtree_probability = np.empty((nnodes, nstates), dtype=float)
    for nb_index, nb in enumerate(preorder_nodes):
        subtree_probability[nb_index] = node_to_pmap[nb]

    # Put the tree into sparse boolean csr form.
    tree_csr_indices, tree_csr_indptr = _density.digraph_to_bool_csr(
            T_bfs, preorder_nodes)

    # Construct the edge-specific transition matrix as an ndim-3 numpy array.
    esd_transitions = _density.get_esd_transitions(
            T_bfs, preorder_nodes, nstates, P_default=P_default)

    # Define the prior distribution at the root.
    if root_distn is None:
        root_distn = np.ones(nstates, dtype=float)

    # Get the posterior distribution at each node.
    node_to_distn_array = np.empty((nnodes, nstates), dtype=float)
    pyfelscore.mc0_esd_get_node_to_distn(
            tree_csr_indices,
            tree_csr_indptr,
            esd_transitions,
            root_distn,
            subtree_probability,
            node_to_distn_array)

    # Convert the ndarray back into a dict.
    node_to_distn = {}
    for na_index, na in enumerate(preorder_nodes):
        node_to_distn[na] = node_to_distn_array[na_index]

    # Return the dict.
    return node_to_distn
コード例 #28
0
ファイル: 62_reactome_tree.py プロジェクト: ivanamihalek/tcga
def genes_in_subgraph(cursor, graph, parent_id):
	genes = []
	# this is the whole subtree
	descendants = [pid for pid in nx.dfs_preorder_nodes(graph, parent_id) if count_successors(graph, pid) == 0]

	desc_id_string = ",".join([quotify(d) for d in descendants])
	qry = "select distinct(ensembl_gene_id) from  ensembl2reactome "
	qry += "where reactome_pathway_id in (%s)" % desc_id_string
	ret = error_intolerant_search(cursor,qry)
	if not ret:
		print("possible problem in Reactome: no associated genes found for ", desc_id_string)
		return []
	return [r[0] for r in ret]
コード例 #29
0
ファイル: rosmatch.py プロジェクト: orenlivne/euler
    def max_repeated(self, k):
        '''Return the node of maximum prefix length with #leaves >= k under it.'''
        g = self._g
        num_leaves = [0] * g.number_of_nodes()
        for node in nx.dfs_postorder_nodes(g): num_leaves[node] = 1 if g.out_degree(node) == 0 else sum(num_leaves[child] for child in g.successors_iter(node))
        
        prefix_len = np.zeros((g.number_of_nodes(),), dtype=int)
        for node in nx.dfs_preorder_nodes(g):
            node_prefix_len = prefix_len[node]
            for child, e_attr in g[node].iteritems(): prefix_len[child] = node_prefix_len + e_attr['weight'][1]

        try: return max(it.ifilter(lambda x: x[0][1] >= k, ((v, k) for k, v in enumerate(zip(prefix_len, num_leaves)))))[1]
        except ValueError: return None
コード例 #30
0
ファイル: logistic.py プロジェクト: vmady/bonspy
    def _add_state(self, g):
        for node in nx.dfs_preorder_nodes(g, tuple()):
            if node == tuple():
                state = {}
            elif node[-1] is None:
                parent = g.predecessors(node)[0]
                state = g.node[parent]['state']
            else:
                state = {feat: value for feat, value in zip(self.features, node)}

            g.node[node]['state'] = state

        return g
コード例 #31
0
def kosaraju_strongly_connected_components(G,source=None):
    """Return nodes in strongly connected components of graph.

    Parameters
    ----------
    G : NetworkX Graph
       An directed graph.

    Returns
    -------
    comp : list of lists
       A list of nodes for each component of G.
       The list is ordered from largest connected component to smallest.

    See Also       
    --------
    connected_components

    Notes
    -----
    Uses Kosaraju's algorithm.
    """
    components=[]
    G=G.reverse(copy=False)
    post=list(nx.dfs_postorder_nodes(G,source=source))
    G=G.reverse(copy=False)
    seen={}
    while post:
        r=post.pop()
        if r in seen:
            continue
        c=nx.dfs_preorder_nodes(G,r)
        new=[v for v in c if v not in seen]
        seen.update([(u,True) for u in new])
        components.append(new)
    components.sort(key=len,reverse=True)            
    return components            
コード例 #32
0
def kosaraju_strongly_connected_components(G,source=None):
    """Generate nodes in strongly connected components of graph.

    Parameters
    ----------
    G : NetworkX Graph
       An directed graph.

    Returns
    -------
    comp : generator of lists
       A list of nodes for each component of G.

    Raises
    ------
    NetworkXNotImplemented: If G is undirected.

    See Also
    --------
    connected_components

    Notes
    -----
    Uses Kosaraju's algorithm.
    """
    with nx.utils.reversed(G):
        post = list(nx.dfs_postorder_nodes(G, source=source))

    seen = {}
    while post:
        r = post.pop()
        if r in seen:
            continue
        c = nx.dfs_preorder_nodes(G,r)
        new=[v for v in c if v not in seen]
        seen.update([(u,True) for u in new])
        yield new
コード例 #33
0
    def get_best_paths(self, npaths=-1):
        """
        Extract npaths paths leading to the highest values of the cost function
        :param npaths: an integer specifying number of paths to generate (default all path)
        :return: generator returning the list of nodes.
        """
        # set the default number of paths if npaths is negative
        npaths = (len(self.landscape) if npaths < 0 else npaths)

        # Get npaths nodes with the largest cost function, which represent the end of policies
        best_nodes = heapq.nlargest(
            npaths,
            ((self.get_cost_function(prop), node) for node, prop in self.landscape.node.items())
        )

        # Extract the nodes
        best_nodes = zip(*best_nodes)[1]

        for end_node in best_nodes:
            # extract list of nodes using depth first search
            # Note: depth first search must yield the same result as breath first search
            nodes = list(nx.dfs_preorder_nodes(self.landscape, end_node))
            nodes.reverse()
            yield nodes
コード例 #34
0
    def extract_conjunctive_queries(r_graph):
        import networkx as nx
        import rdflib
        from rdflib import URIRef, BNode, Literal
        from rdflib.namespace import RDF
        from rdflib.extras.external_graph_libs import rdflib_to_networkx_multidigraph

        join_nodes = r_graph.subjects(RDF.type,
                                      URIRef("http://www.dfki.de/voc#Join"))

        #        nx_r_graph = rdflib_to_networkx_multidigraph(r_graph, edge_attrs=lambda s, p, o: {'e': r_graph.qname(p)})
        nx_r_graph = rdflib_to_networkx_multidigraph(
            r_graph, edge_attrs=lambda s, p, o: {'e': p})
        #        draw_graph(nx_r_graph)

        nx_conjunctive_query_patterns = list()

        for join_node in join_nodes:
            nx_conjunctive_query_patterns.append(
                nx.subgraph(nx_r_graph,
                            list(nx.dfs_preorder_nodes(nx_r_graph,
                                                       join_node))).copy())

        return nx_conjunctive_query_patterns
コード例 #35
0
    def _build_graph(self, target, commands=False, outs=False):
        import networkx
        from dvc import dvcfile
        from dvc.repo.graph import get_pipeline
        from dvc.utils import parse_target

        path, name, tag = parse_target(target)
        target_stage = dvcfile.Dvcfile(self.repo, path, tag=tag).stages[name]
        G = get_pipeline(self.repo.pipelines, target_stage)

        nodes = set()
        for stage in networkx.dfs_preorder_nodes(G, target_stage):
            if commands:
                if stage.cmd is None:
                    continue
                nodes.add(stage.cmd)
            elif outs:
                for out in stage.outs:
                    nodes.add(str(out))
            else:
                nodes.add(stage.addressing)

        edges = []
        for from_stage, to_stage in networkx.edge_dfs(G, target_stage):
            if commands:
                if to_stage.cmd is None:
                    continue
                edges.append((from_stage.cmd, to_stage.cmd))
            elif outs:
                for from_out in from_stage.outs:
                    for to_out in to_stage.outs:
                        edges.append((str(from_out), str(to_out)))
            else:
                edges.append((from_stage.addressing, to_stage.addressing))

        return list(nodes), edges, networkx.is_tree(G)
コード例 #36
0
 def calc_all_pairs(self, num_relations=3):
     """
     Given a connected graph, calculate all possible pairs of
     paths, i.e all simple paths
     :return:
     """
     nodes = list(nx.dfs_preorder_nodes(self.connected_family, 0))
     all_pairs = []
     for node_a, node_b in it.combinations(nodes, 2):
         for path in nx.all_simple_paths(self.connected_family,
                                         node_a,
                                         node_b,
                                         cutoff=num_relations):
             path_len = len(path)
             if path_len == num_relations + 1:
                 min_path = [path[0], path[-1]]
                 if self.connected_family.has_edge(min_path[0],
                                                   min_path[1]):
                     all_pairs.append((node_a, node_b, path, min_path))
             else:
                 continue
     # calculate path stats
     path_lens = [len(path_pairs[2]) - 1 for path_pairs in all_pairs]
     max_paths = max(path_lens)
     min_paths = min(path_lens)
     path_stats = {
         'max_path':
         max_paths,
         'min_path':
         min_paths,
         'num_path':
         len(all_pairs),
         'path_counts':
         [path_lens.count(pi) for pi in range(min_paths, max_paths + 1)]
     }
     return all_pairs, path_stats
コード例 #37
0
def peo(tree):
    """ Returns a perfect elimination order and corresponding cliques, separators, histories, , rests for tree.

    Args:
        tree (NetworkX graph): A junction tree.

    Returns:
       tuple: A tuple of form (C, S, H, A, R), where the elemenst are lists of Cliques, Separators, Histories, , Rests, from a perfect elimination order.
    """
    # C = list(nx.dfs_preorder_nodes(tree, tree.nodes()[0])) # nx < 2.x
    C = list(nx.dfs_preorder_nodes(tree, list(tree.nodes)[0]))  # nx > 2.x
    S = [set() for j in range(len(C))]
    H = [set() for j in range(len(C))]
    R = [set() for j in range(len(C))]
    A = [set() for j in range(len(C) - 1)]
    S[0] = None
    H[0] = C[0]
    R[0] = C[0]
    for j in range(1, len(C)):
        H[j] = H[j - 1] | C[j]
        S[j] = H[j - 1] & C[j]
        A[j - 1] = H[j - 1] - S[j]
        R[j] = C[j] - H[j - 1]
    return (C, S, H, A, R)
コード例 #38
0
def apprAlgorithm(G):
    # Find a minimum spanning tree T of G
    T = nx.minimum_spanning_tree(G, weight='weight')

    dfs = nx.dfs_preorder_nodes(T, '0')
    listnode = []
    for item in dfs:
        listnode += [item]

    # Create the hamiltonian tour
    L = nx.Graph()
    L.add_nodes_from(G.nodes(data=True))

    cost = 0
    weight = nx.get_edge_attributes(G, 'weight')
    for index, item in enumerate(listnode):
        if index < len(G) - 1:
            L.add_edge(item, listnode[index + 1])
            cost += G[str(item)][str(listnode[index + 1])]['weight']
        else:
            L.add_edge(item, listnode[0])
            cost += G[str(item)][str(listnode[0])]['weight']

    return (cost, T, L, listnode)
コード例 #39
0
ファイル: grammars.py プロジェクト: satanson/polytracker
 def verify(self, test_disconnection: bool = True):
     for prod in self.productions.values():
         for rule in prod:
             for v in rule.sequence:
                 if isinstance(v, str):
                     if v not in self:
                         raise MissingProductionError(f"Production {prod.name} references {v}, which is not in the grammar")
                     elif prod.name not in self.used_by[v]:
                         raise CorruptedGrammarError(
                             f"Production {prod.name} references {v} but that is not "
                             'recorded in the "used by" table: '
                             f"{self.used_by[prod.name]!r}"
                         )
         for user in self.used_by[prod.name]:
             if user not in self:
                 raise CorruptedGrammarError(
                     f"Production {prod.name} is used by {user}, but {user} production is not in the grammar"
                 )
         # if not self.used_by[prod.name] and self.start is not prod:
         #     print(f"Warning: Production {prod.name} is never used")
     for prod_name in self.used_by.keys():
         if prod_name not in self:
             raise CorruptedGrammarError(f'Production {prod_name} is in the "used by" table, but not in the grammar')
     if self.start is not None and test_disconnection:
         # make sure there is a path from start to every other production
         graph = self.dependency_graph()
         visited = set(node for node in nx.dfs_preorder_nodes(graph, source=self.start))
         if len(visited) < len(self.productions):
             not_visited_prods = set(node for node in self.productions.values() if node not in visited)
             # it's okay if the unvisited productions aren't able to produce terminals
             not_visited = [node.name for node in not_visited_prods if node.can_produce_terminal]
             if not_visited:
                 raise DisconnectedGrammarError(
                     "These productions are not accessible from the start production "
                     f"{self.start.name}: {', '.join(not_visited)}"
                 )
コード例 #40
0
    def local_independencies(self, variables):
        """
        Returns an instance of Independencies containing the local independencies
        of each of the variables.

        Parameters
        ----------
        variables: str or array like
            variables whose local independencies are to be found.

        Examples
        --------
        >>> from pgmpy.models import DAG
        >>> student = DAG()
        >>> student.add_edges_from([('diff', 'grade'), ('intel', 'grade'),
        >>>                         ('grade', 'letter'), ('intel', 'SAT')])
        >>> ind = student.local_independencies('grade')
        >>> ind
        (grade _|_ SAT | diff, intel)
        """

        independencies = Independencies()
        for variable in (
            variables if isinstance(variables, (list, tuple)) else [variables]
        ):
            non_descendents = (
                set(self.nodes())
                - {variable}
                - set(nx.dfs_preorder_nodes(self, variable))
            )
            parents = set(self.get_parents(variable))
            if non_descendents - parents:
                independencies.add_assertions(
                    [variable, non_descendents - parents, parents]
                )
        return independencies
コード例 #41
0
def approximation(g):
    # n is the number of vertices.
    n = g.number_of_nodes()

    # You might want to use the function "nx.minimum_spanning_tree(g)"
    # which returns a Minimum Spanning Tree of the graph g

    # You also might want to use the command "list(nx.dfs_preorder_nodes(graph, 0))"
    # which gives a list of vertices of the given graph in depth-first preorder.
    msp = nx.minimum_spanning_tree(g)
    dfs_nodes = list(nx.dfs_preorder_nodes(msp, 0))

    prev = -1
    ans = 0
    for node in dfs_nodes:
        if (prev == -1):
            prev = node
        else:
            ans += g[prev][node]['weight']
            prev = node

    ans += g[prev][dfs_nodes[0]]['weight']

    return ans
コード例 #42
0
ファイル: mtgt.py プロジェクト: TemporalInept/Lituus
 def findall(self, ntype, source='root', attr=None, val=None):
     """
      finds all nodes in the tree of the type ntype starting at source with
      attribute attr (if set) having value val (if set)
     :param ntype: node type to find
     :param source: the source to start the search from
     :param attr: the attribute key the node will have
     :param val: the val that the given attribute key will have
     :return: a list of node ids
     """
     if val and not attr:
         raise lts.LituusException(lts.ETREE, "attr required with val")
     found = []
     for node in nx.dfs_preorder_nodes(self._t, source):
         if node_type(node) == ntype and not node == source:
             if attr:
                 if attr in self._t.node[node]:
                     if val:
                         if self._t.node[node][attr] == val:
                             found.append(node)
                     else:
                         if attr in self._t.node[nid]: found.append(node)
             else: found.append(node)
     return found
コード例 #43
0
def unconstrained_bridge_augmentation(G):
    """Finds an optimal 2-edge-augmentation of G using the fewest edges.

    This is an implementation of the algorithm detailed in [1]_.
    The basic idea is to construct a meta-graph of bridge-ccs, connect leaf
    nodes of the trees to connect the entire graph, and finally connect the
    leafs of the tree in dfs-preorder to bridge connect the entire graph.

    Parameters
    ----------
    G : NetworkX graph
       An undirected graph.

    Yields
    ------
    edge : tuple
        Edges in the bridge augmentation of G

    Notes
    -----
    Input: a graph G.
    First find the bridge components of G and collapse each bridge-cc into a
    node of a metagraph graph C, which is gaurenteed to be a forest of trees.

    C contains p "leafs" --- nodes with exactly one incident edge.
    C contains q "isolated nodes" --- nodes with no incident edges.

    Theorem: If p + q > 1, then at least :math:`ceil(p / 2) + q` edges are
        needed to bridge connect C. This algorithm achieves this min number.

    The method first adds enough edges to make G into a tree and then pairs
    leafs in a simple fashion.

    Let n be the number of trees in C. Let v(i) be an isolated vertex in the
    i-th tree if one exists, otherwise it is a pair of distinct leafs nodes
    in the i-th tree. Alternating edges from these sets (i.e.  adding edges
    A1 = [(v(i)[0], v(i + 1)[1]), v(i + 1)[0], v(i + 2)[1])...]) connects C
    into a tree T. This tree has p' = p + 2q - 2(n -1) leafs and no isolated
    vertices. A1 has n - 1 edges. The next step finds ceil(p' / 2) edges to
    biconnect any tree with p' leafs.

    Convert T into an arborescence T' by picking an arbitrary root node with
    degree >= 2 and directing all edges away from the root. Note the
    implementation implicitly constructs T'.

    The leafs of T are the nodes with no existing edges in T'.
    Order the leafs of T' by DFS prorder. Then break this list in half
    and add the zipped pairs to A2.

    The set A = A1 + A2 is the minimum augmentation in the metagraph.

    To convert this to edges in the original graph

    References
    ----------
    .. [1] Eswaran, Kapali P., and R. Endre Tarjan. (1975) Augmentation problems.
        http://epubs.siam.org/doi/abs/10.1137/0205044

    See Also
    --------
    :func:`bridge_augmentation`
    :func:`k_edge_augmentation`

    Example
    -------
    >>> G = nx.path_graph((1, 2, 3, 4, 5, 6, 7))
    >>> sorted(unconstrained_bridge_augmentation(G))
    [(1, 7)]
    >>> G = nx.path_graph((1, 2, 3, 2, 4, 5, 6, 7))
    >>> sorted(unconstrained_bridge_augmentation(G))
    [(1, 3), (3, 7)]
    >>> G = nx.Graph([(0, 1), (0, 2), (1, 2)])
    >>> G.add_node(4)
    >>> sorted(unconstrained_bridge_augmentation(G))
    [(1, 4), (4, 0)]
    """
    # -----
    # Mapping of terms from (Eswaran and Tarjan):
    #     G = G_0 - the input graph
    #     C = G_0' - the bridge condensation of G. (This is a forest of trees)
    #     A1 = A_1 - the edges to connect the forest into a tree
    #         leaf = pendant - a node with degree of 1

    #     alpha(v) = maps the node v in G to its meta-node in C
    #     beta(x) = maps the meta-node x in C to any node in the bridge
    #         component of G corresponding to x.

    # find the 2-edge-connected components of G
    bridge_ccs = list(nx.connectivity.bridge_components(G))
    # condense G into an forest C
    C = collapse(G, bridge_ccs)

    # Choose pairs of distinct leaf nodes in each tree. If this is not
    # possible then make a pair using the single isolated node in the tree.
    vset1 = [
        tuple(cc) * 2  # case1: an isolated node
        if len(cc) == 1 else sorted(
            cc, key=C.degree)[0:2]  # case2: pair of leaf nodes
        for cc in nx.connected_components(C)
    ]
    if len(vset1) > 1:
        # Use this set to construct edges that connect C into a tree.
        nodes1 = [vs[0] for vs in vset1]
        nodes2 = [vs[1] for vs in vset1]
        A1 = list(zip(nodes1[1:], nodes2))
    else:
        A1 = []
    # Connect each tree in the forest to construct an arborescence
    T = C.copy()
    T.add_edges_from(A1)

    # If there are only two leaf nodes, we simply connect them.
    leafs = [n for n, d in T.degree() if d == 1]
    if len(leafs) == 1:
        A2 = []
    if len(leafs) == 2:
        A2 = [tuple(leafs)]
    else:
        # Choose an arbitrary non-leaf root
        root = next(n for n, d in T.degree() if d > 1)
        # order the leaves of C by (induced directed) preorder
        v2 = [n for n in nx.dfs_preorder_nodes(T, root) if T.degree(n) == 1]
        # connecting first half of the leafs in pre-order to the second
        # half will bridge connect the tree with the fewest edges.
        half = int(math.ceil(len(v2) / 2.0))
        A2 = list(zip(v2[:half], v2[-half:]))

    # collect the edges used to augment the original forest
    aug_tree_edges = A1 + A2

    # Construct the mapping (beta) from meta-nodes to regular nodes
    inverse = defaultdict(list)
    for k, v in C.graph['mapping'].items():
        inverse[v].append(k)
    # sort so we choose minimum degree nodes first
    inverse = {
        mu: sorted(mapped, key=lambda u: (G.degree(u), u))
        for mu, mapped in inverse.items()
    }

    # For each meta-edge, map back to an arbitrary pair in the original graph
    G2 = G.copy()
    for mu, mv in aug_tree_edges:
        # Find the first available edge that doesn't exist and return it
        for u, v in it.product(inverse[mu], inverse[mv]):
            if not G2.has_edge(u, v):
                G2.add_edge(u, v)
                yield u, v
                break
コード例 #44
0
ファイル: test_dfs.py プロジェクト: lpp1985/lpp_Script
 def test_preorder_nodes(self):
     assert_equal(list(nx.dfs_preorder_nodes(self.G, source=0)),
                  [0, 1, 2, 4, 3])
     assert_equal(list(nx.dfs_preorder_nodes(self.D)), [0, 1, 2, 3])
コード例 #45
0
def build_pipeline_graph(input: DataType, output: DataType, registry, max_list_depth=3, max_pipeline_width=3) -> "PipelineBuilder":
    """
    Creates a `PipelineBuilder` instance that generates all pipelines
    from `input` to `output` types.

    ##### Parameters

    - `input`: type descriptor for the desired input.
    - `output`: type descriptor for the desired output.
    - `registry`: list of available classes to build the pipelines.
    """
    
    # First we will unpack the input and output type and
    # store them in actual lists for easier use

    if isinstance(input, Tuple):
        input_type = list(input.inner)
    else:
        input_type = [input]

    if isinstance(output, Tuple):
        output_type = list(output.inner)
    else:
        output_type = [output]

    logger.info(f"input_type={input_type}")
    logger.info(f"output_type={output_type}")

    # Before starting, let's create all the List[T] wrappers up to 
    # `max_list_depth` and add them to `registry`, so that they are available later
    for algorithm in list(registry):
        for _ in range(max_list_depth):
            algorithm = make_list_wrapper(algorithm)
            registry.append(algorithm)

    # We will also need an index to quickly find out which algorithms
    # accept each input type
    index = defaultdict(set)

    for algorithm in registry:
        types = _get_annotations(algorithm).input
        types = list(types.inner) if isinstance(types, Tuple) else [types]

        for t in types:
            index[t].add(algorithm)

    logger.info(f"Built algorithm index with {len(index)} entries and {len(registry)} total algorithms.")

    # The graph contains all the algorithms, each algorithm is connected
    # to all those nodes that it can process, which are nodes whose output
    # type is a superset of what the algorithm requires.
    G = Graph()

    # For each node stored in the graph, we will store also the full list
    # of all inputs and outputs that we can guarantee are available at this point.
    # Initially we add the `Start` node, which produces all of the inputs,
    # and the `End` node which consumes all the outputs.
    start_node = PipelineStart(input_type)
    end_node = PipelineEnd(output_type)
    G.add_edge(GraphSpace.Start, start_node)
    G.add_edge(end_node, GraphSpace.End)

    # We will apply a BFS algorithm at this point. We will make sure
    # that once a node is processed, all the algorithms to which it could
    # potentially connect are stored in the graph.
    # Initially the `Start` node is the only one open.
    open_nodes = [start_node]
    closed_nodes = set()

    while open_nodes:
        # This is the next node we will need to connect.
        node = open_nodes.pop(0)

        if node in closed_nodes:
            continue

        # When leaving this node we can guarantee that we have the types in this list.
        types = node.output
        logger.info(f"Processing node={node}")

        # We will need this method to check if all of the input types of and algorithm are
        # guaranteed at this point, i.e., if they are available in `types`,
        # or at least a conforming type is.
        def type_is_guaranteed(input_type):
            for other_type in types:
                if conforms(other_type, input_type):
                    return True

            return False

        # In this point we have to identify all the algorithms that could continue
        # from this point on. These are all the algorithms whose input expects a subset
        # of the types that we already have.
        potential_algorithms = set()

        for t in types:
            potential_algorithms |= index[t]

        for algorithm in potential_algorithms:
            annotations = _get_annotations(algorithm)
            algorithm_input_types = list(annotations.input.inner) if isinstance(annotations.input, Tuple) else [annotations.input]
            algorithm_output_types = list(annotations.output.inner) if isinstance(annotations.output, Tuple) else [annotations.output]
            logger.info(f"Analyzing algorithm={algorithm.__name__} with inputs={algorithm_input_types} and outputs={algorithm_output_types}")

            if any(not type_is_guaranteed(input_type) for input_type in algorithm_input_types):
                logger.info(f"Skipping algorithm={algorithm.__name__}")
                continue
                    
            # At this point we can add the current algorithm to the graph.
            # First, we make the current algorithm "consume" the input types,
            # hence, the output types produced at this point are the output types
            # this algorithm provides plus any input type not consumed so far.
            output_types = sorted(set([t for t in types if t not in algorithm_input_types] + algorithm_output_types), key=str)

            if len(output_types) > max_pipeline_width:
                continue
            
            # We add this node to the graph and we mark that it consumes the inputs,
            # so that later when sampling we can correctly align all the types.
            # When building the node, we can get a `ValueError` if the internal
            # grammar cannot be built; in that case, we simply skip it
            try:
                new_node = PipelineNode(algorithm=algorithm, input=types, output=output_types)
                G.add_node(new_node)
                G.add_edge(node, new_node)
                open_nodes.append(new_node)
                logger.info(f"Adding node={algorithm.__name__} producing types={output_types}")
            except ValueError as e:
                logger.warning(f"Node={algorithm.__name__} cannot be built. Error={e}.")           

        # Let's check if we can add the `End` node.
        if all(type_is_guaranteed(t) for t in output_type):
            G.add_edge(node, end_node)
            logger.info("Connecting to end node")
            
        closed_nodes.add(node)

    # Once done we have to check if the `End` node was at some point included in the graph.
    # Otherwise that means there is no possible path.
    if GraphSpace.End not in G:
        raise TypeError(
            "No pipelines can be constructed from input:%r to output:%r."
            % (input, output)
        )

    # Now we remove all nodes that don't participate in any path
    # leaving to `End`
    reachable_from_end = set(nx.dfs_preorder_nodes(G.reverse(False), GraphSpace.End))
    unreachable_nodes = set(G.nodes) - reachable_from_end
    G.remove_nodes_from(unreachable_nodes)

    # If the node `Start` was removed, that means the graph is disconnected.
    if not GraphSpace.Start in G:
        raise TypeError(
            "No pipelines can be constructed from input:%r to output:%r."
            % (input, output)
        )

    return PipelineBuilder(G, registry)
コード例 #46
0
ファイル: Main3.py プロジェクト: aybikeu/RL
                           state_dict[(first_state_id, 'resource')], first_state_id)

    #This is not the ordered schedule but rather the roads cleared until this state
    cleared_roads = [i for i, val in enumerate(state_dict[(first_state_id, 'debris')]) if val == 0]
    Schedule.extend(cleared_roads)


    #G2 is the original graph whereas G_disrupted is the graph to be constructed with the cleared roads
    for cr in Schedule:
        ed = [edge for edge, edge_id in ActionList.items() if edge_id == cr][0]
        G2[ed[0]][ed[1]]['debris'] = 0
        G_restored.add_edge(ed[0], ed[1])

    reachable_nodes = [] #list for reachable nodes
    for s in supply_nodes:
        reachable_nodes.extend(list(nx.dfs_preorder_nodes(G_restored, s)))

    actions = funcs2.initializeActionSpace(reachable_nodes, G, ActionList)  # actions are the admissable action indices corresponding in ActionList

    ############sas_vec.append(first_state.ID)
    while remaining_demand:

        #Choose action
        eligible_actions = actions - set(Schedule)
        action = random.choice(list(eligible_actions))
        Schedule.append(action)

        ## Vertex collapse - condense the network
        #For large sized instances calculating sp can be hard
        try:
            betw_centrality_service[first_state.ID]
コード例 #47
0
ファイル: mol.py プロジェクト: mstieffe/deepMap
    def aa_seq(self, order="dfs", train=True):
        mol_atoms_heavy = [a for a in self.atoms if a.type.mass >= 2.0]
        atom_seq_dict_heavy = {}
        atom_seq_dict_hydrogens = {}
        atom_predecessors_dict = {}
        cg_seq = self.cg_seq(order=order, train=train)
        for bead, predecessor_beads in cg_seq:
            bead_atoms = bead.atoms
            heavy_atoms = [a for a in bead_atoms if a.type.mass >= 2.0]
            hydrogens = [a for a in bead_atoms if a.type.mass < 2.0]
            predecessor_atoms = list(itertools.chain.from_iterable([b.atoms for b in set(predecessor_beads)]))
            predecessor_atoms_heavy = [a for a in predecessor_atoms if a.type.mass >= 2.0]
            predecessor_atoms_hydrogens = [a for a in predecessor_atoms if a.type.mass < 2.0]

            #find start atom
            psble_start_nodes = []
            n_heavy_neighbors = []
            for a in heavy_atoms:
                n_heavy_neighbors.append(len(list(nx.all_neighbors(self.G_heavy, a))))
                for n in nx.all_neighbors(self.G_heavy, a):
                    if n in predecessor_atoms_heavy:
                        psble_start_nodes.append(a)
            if psble_start_nodes:
                #start_atom = np.random.choice(psble_start_nodes)
                #weird bonds in cg sPS... therefore just take first one...
                start_atom = psble_start_nodes[0]
            else:
                start_atom = heavy_atoms[np.array(n_heavy_neighbors).argmin()]
            #else:
            #    start_atom = heavy_atoms[0]

            #sequence through atoms of bead
            if order == "bfs":
                edges = list(nx.bfs_edges(self.G.subgraph(heavy_atoms), start_atom))
                atom_seq = [start_atom] + [e[1] for e in edges]
            elif order == "random":
                atom_seq = [start_atom]
                pool = []
                for n in range(1, len(heavy_atoms)):
                    pool += list(nx.neighbors(self.G.subgraph(heavy_atoms), atom_seq[-1]))
                    pool = list(set(pool))
                    next = np.random.choice(pool)
                    while next in atom_seq:
                        next = np.random.choice(pool)
                    pool.remove(next)
                    atom_seq.append(next)
            else:
                atom_seq = list(nx.dfs_preorder_nodes(self.G.subgraph(heavy_atoms), start_atom))
            #hydrogens = self.hydrogens[:]
            np.random.shuffle(hydrogens)
            #atom_seq = atom_seq + hydrogens

            #atom_seq = []
            for n in range(0, len(atom_seq)):
                atom_predecessors_dict[atom_seq[n]] = predecessor_atoms_heavy + atom_seq[:n]
            for n in range(0, len(hydrogens)):
                atom_predecessors_dict[hydrogens[n]] = mol_atoms_heavy + predecessor_atoms_hydrogens + hydrogens[:n]

            atom_seq_dict_heavy[bead] = atom_seq
            atom_seq_dict_hydrogens[bead] = hydrogens


        return cg_seq, atom_seq_dict_heavy, atom_seq_dict_hydrogens, atom_predecessors_dict
コード例 #48
0
def solve(G):
    """
    Idea for solve method below: is to basically find shortest paths (via Dijkstras) between each pair of vertices within the dominating set.
    Put all the vertices within the dominating set into a new graph G_prime, and add edges between each pair of vertices. Introduce new vertices
    into the graph as necessary to ensure connectivity. In the end, return the MST of G_prime. Also calls extra-vertex-optimization in the end.

    Args:
        G: networkx.Graph

    Returns:
        T: networkx.Graph
    """

    # TODO: your code here!
    dominatingSet = min_weighted_dominating_set(G, weight="weight")

    # The variable defined below, apsp, stands for all pairs shortest paths from calling NetworkX built-in Dijkstra's algorithm.
    apsp = dict(
        nx.algorithms.shortest_paths.weighted.all_pairs_dijkstra(
            G, weight="weight"))

    # G_prime, the new graph below, shall consist of all vertices within the dominating set along with their shortest path edge weights in
    # between, bringing in new vertices as necessary.
    G_prime = nx.Graph()
    G.add_nodes_from(dominatingSet)

    # Vertices to add contains new vertices which must be added into graph G prime in order to ensure connectivity of nodes from min
    # dominating set.
    extra_vertices = set()

    for node in dominatingSet:
        for node2 in dominatingSet:
            shortest_path = apsp[node][1][node2]
            # First, identify new vertices to be thrown into G prime.
            for vertex in shortest_path:
                if vertex not in dominatingSet:
                    G_prime.add_node(
                        vertex
                    )  # I do believe from my Internet search that duplicate nodes has no effect
                    extra_vertices.add(
                        vertex
                    )  # Keep track of the list of all vertices within the dominating set
            # Next, identify new edges to be thrown into G prime. Adding edges more than once has no effect.
            for i in range(len(shortest_path) - 1):
                origin_vertex = shortest_path[i]
                terminus_vertex = shortest_path[i + 1]
                w = G.get_edge_data(origin_vertex, terminus_vertex)['weight']
                G_prime.add_edge(origin_vertex, terminus_vertex, weight=w)

    final_edges = list(
        nx.minimum_spanning_edges(G_prime,
                                  algorithm='kruskal',
                                  weight='weight',
                                  keys=True,
                                  data=True,
                                  ignore_nan=False))

    T = nx.Graph()
    T.add_nodes_from(dominatingSet)
    T.add_nodes_from(extra_vertices)
    T.add_edges_from(final_edges)

    current_average = average_pairwise_distance(T)
    last_average = 4000
    print(current_average)

    # Until adding more edges doesn't improve the average pairwise cost
    while current_average < last_average:
        last_average = current_average
        # For every node in T
        for node in nx.dfs_preorder_nodes(T, source=list(T.nodes)[0]):
            neighbors = nx.algorithms.traversal.breadth_first_search.bfs_tree(
                G, node, reverse=False, depth_limit=1)
            # Get one of its neighbors NOT in T
            for node2 in neighbors:
                # and add the edge between that vertex and its neighbor
                # if it decreases the average pairwise cost.
                if node2 not in T and G.get_edge_data(node, node2)\
                and G[node][node2]['weight'] < current_average:
                    T.add_node(node2)
                    T.add_edge(node,
                               node2,
                               weight=G.get_edge_data(node, node2)['weight'])
                    new_average = average_pairwise_distance(T)
                    if new_average > current_average:
                        T.remove_node(node2)
                        #T.remove_edge(node, node2)
                    else:
                        current_average = new_average
                        print("Adding an edge between", node, "and", node2,
                              "yields average", new_average)

    print("Dominating vertices:", [node for node in T])

    return T
    def nodeatk(self, graph_org, centrality, wt):
        graph = graph_org.copy()
        ncc2flag = 0
        criticaln = 0
        criticallcc = 0
        sf2 = 0
        efr2 = 0

        ###### initilization
        tempcc = [len(xind) for xind in nx.weakly_connected_components(graph)]
        lcc = [max(tempcc)]  # largest connected component
        ncc = [len(tempcc)]  # no. of connected comp.

        sf = [self.resoutput.get_servicefactor(graph)]  # service factor
        efr = [self.resoutput.get_edgerobustness(graph)]  # edge flow robust

        while (len(graph.nodes) > 2):

            if centrality == "out degree":
                nodeimpscore = dict(graph.out_degree(weight=wt))
            else:
                nodeimpscore = nx.betweenness_centrality(graph, weight=wt)

            nodeselected = max(nodeimpscore, key=nodeimpscore.get)
            recchildnodes = list(nx.dfs_preorder_nodes(graph, nodeselected))
            recchildnodes.remove(nodeselected)

            ##### node removal at primary level
            graph.remove_node(nodeselected)

            #### effect of input on output- transitive property

            for childnode in recchildnodes:
                indeg = graph.in_degree(childnode, weight="weight")

                try:
                    tempratio = indeg / graph.nodes[childnode]['indegwt']
                except:
                    continue

                for (nodest, nodeed) in graph.edges(childnode):
                    graph[nodest][nodeed]['weight'] = tempratio * (
                        graph[nodest][nodeed]['weight'])

                ##### node removal at secondary level
#                if indeg ==0:
#                    graph.remove_node(childnode)

### collect metrics
            tempcc = [
                len(xind) for xind in nx.weakly_connected_components(graph)
            ]
            lcc.append(max(tempcc))
            ncc.append(len(tempcc))

            if len(graph.edges()) > 1:
                tempefr = self.resoutput.get_edgerobustness(graph)
                tempsf = self.resoutput.get_servicefactor(graph)

            sf.append(tempsf)
            efr.append(tempefr)

            ####### critical values when graph gets disconnected into 2 components first time
            if len(tempcc) == 2 and ncc2flag == 0:
                ncc2flag = 1
                criticaln = len(lcc)
                criticallcc = max(tempcc)
                sf2 = tempsf
                efr2 = tempefr

        return lcc, ncc, sf, efr, criticaln, criticallcc, sf2, efr2
        def get_nodeattack(graph_org):

            criticaln = 0
            criticallcc = 0
            sf2 = 0
            efr2 = 0
            ncc2flag = 0
            graph = graph_org.copy()

            tempcc = [
                len(xind) for xind in nx.weakly_connected_components(graph)
            ]
            lcc = [max(tempcc)]  # largest connected component
            ncc = [len(tempcc)]  # no. of connected comp.

            sf = [self.resoutput.get_servicefactor(graph)]  # service factor
            efr = [self.resoutput.get_edgerobustness(graph)
                   ]  # edge flow robust

            while (len(graph.nodes) > 2):
                nodeselected = np.random.choice(graph.nodes())
                recchildnodes = list(nx.dfs_preorder_nodes(
                    graph, nodeselected))
                recchildnodes.remove(nodeselected)

                ##### node removal at primary level
                graph.remove_node(nodeselected)

                #### effect of input on output- transitive property

                for childnode in recchildnodes:
                    indeg = graph.in_degree(childnode, weight="weight")

                    try:
                        tempratio = indeg / graph.nodes[childnode]['indegwt']
                    except:
                        continue

                    for (nodest, nodeed) in graph.edges(childnode):
                        graph[nodest][nodeed]['weight'] = tempratio * (
                            graph[nodest][nodeed]['weight'])

                    ##### node removal at secondary level
#                    if indeg ==0:
#                        graph.remove_node(childnode)

#### collecting metrics
                tempcc = [
                    len(xind) for xind in nx.weakly_connected_components(graph)
                ]
                try:
                    lcc.append(max(tempcc))
                except:
                    break

                ncc.append(len(tempcc))

                if len(graph.edges()) > 1:
                    tempsf = self.resoutput.get_servicefactor(graph)
                    tempefr = self.resoutput.get_edgerobustness(graph)

                sf.append(tempsf)
                efr.append(tempefr)

                ####### critical values when graph gets disconnected into 2 components first time
                if len(tempcc) == 2 and ncc2flag == 0:
                    ncc2flag = 1
                    criticaln = len(lcc)
                    criticallcc = max(tempcc)
                    sf2 = tempsf
                    efr2 = tempefr

            return lcc, ncc, sf, efr, criticaln, criticallcc, sf2, efr2
コード例 #51
0
 def maximal_cliques(self):
     """ return the list of maximal cliques in the model """
     #return list(self.tree.nodes())
     return list(nx.dfs_preorder_nodes(self.tree))
コード例 #52
0
def _reproduce_stages(G,
                      stages,
                      node,
                      downstream=False,
                      ignore_build_cache=False,
                      **kwargs):
    r"""Derive the evaluation of the given node for the given graph.

    When you _reproduce a stage_, you want to _evaluate the descendants_
    to know if it make sense to _recompute_ it. A post-ordered search
    will give us an order list of the nodes we want.

    For example, let's say that we have the following pipeline:

                               E
                              / \
                             D   F
                            / \   \
                           B   C   G
                            \ /
                             A

    The derived evaluation of D would be: [A, B, C, D]

    In case that `downstream` option is specifed, the desired effect
    is to derive the evaluation starting from the given stage up to the
    ancestors. However, the `networkx.ancestors` returns a set, without
    any guarantee of any order, so we are going to reverse the graph and
    use a pre-ordered search using the given stage as a starting point.

                   E                                   A
                  / \                                 / \
                 D   F                               B   C   G
                / \   \        --- reverse -->        \ /   /
               B   C   G                               D   F
                \ /                                     \ /
                 A                                       E

    The derived evaluation of _downstream_ B would be: [B, D, E]
    """

    import networkx as nx

    if downstream:
        # NOTE (py3 only):
        # Python's `deepcopy` defaults to pickle/unpickle the object.
        # Stages are complex objects (with references to `repo`, `outs`,
        # and `deps`) that cause struggles when you try to serialize them.
        # We need to create a copy of the graph itself, and then reverse it,
        # instead of using graph.reverse() directly because it calls
        # `deepcopy` underneath -- unless copy=False is specified.
        pipeline = nx.dfs_preorder_nodes(G.copy().reverse(copy=False), node)
    else:
        pipeline = nx.dfs_postorder_nodes(G, node)

    result = []
    for n in pipeline:
        try:
            ret = _reproduce_stage(stages, n, **kwargs)

            if len(ret) != 0 and ignore_build_cache:
                # NOTE: we are walking our pipeline from the top to the
                # bottom. If one stage is changed, it will be reproduced,
                # which tells us that we should force reproducing all of
                # the other stages down below, even if their direct
                # dependencies didn't change.
                kwargs["force"] = True

            result += ret
        except Exception as ex:
            raise ReproductionError(stages[n].relpath, ex)
    return result
コード例 #53
0
ファイル: testImm.py プロジェクト: neela23/DataAnalysis
import networkx as nx
import scipy.io as sc
import matplotlib.pyplot as plt

A = nx.read_edgelist('test.edges')
nx.draw(A)
plt.draw()
#plt.show()

firstNode = nx.nodes(A)[0]
print firstNode
dfs_list = list(nx.dfs_preorder_nodes(A, firstNode))

for node in dfs_list:
    print 'node:', node
コード例 #54
0
def generar_grafo(nombre_artista):
    G = nx.DiGraph()

    # agegar nodos (26)
    G.add_node("Ava Max")
    G.add_node("Dua Lipa")
    G.add_node("Melanie Martinez")
    G.add_node("Little Mix")
    G.add_node("Lady Gaga")
    G.add_node("Bebe Rexha")
    G.add_node("Camila Cabello")
    G.add_node("Anne Marie")
    G.add_node("Ariana Grande")
    G.add_node("Halsey")
    G.add_node("Zara Larsson")
    G.add_node("Sabrina Carpenter")
    G.add_node("Demi Lovato")
    G.add_node("Bea Miller")
    G.add_node("Hailee Steinfeld")
    G.add_node("Meghan Trainor")
    G.add_node("Clean Bandit")
    G.add_node("5SOS")
    G.add_node("MGK")
    G.add_node("Jonas Brothers")
    G.add_node("Sam Smith")
    G.add_node("Sia")
    G.add_node("Diplo")
    G.add_node("Zedd")
    G.add_node("Charlotte Lawrence")
    G.add_node("Doja Cat")

    #print ("Nodos: ", G.nodes())

    #Agregar aristas
    G.add_edge("Camila Cabello","5SOS")
    G.add_edge("Camila Cabello","Clean Bandit")
    G.add_edge("Dua Lipa","Anne Marie")
    G.add_edge("Dua Lipa","Camila Cabello")
    G.add_edge("5SOS","Diplo")
    G.add_edge("Halsey","Lady Gaga")
    G.add_edge("Halsey","Dua Lipa")
    G.add_edge("Halsey","Ava Max")
    G.add_edge("Halsey","Sia")
    G.add_edge("Sia","MGK")
    G.add_edge("Sia","Melanie Martinez")
    G.add_edge("Diplo","Halsey")
    G.add_edge("Diplo","Doja Cat")
    G.add_edge("Melanie Martinez","Halsey")
    G.add_edge("Melanie Martinez","Ariana Grande")
    G.add_edge("Demi Lovato","Melanie Martinez")
    G.add_edge("Demi Lovato","Sam Smith")
    G.add_edge("Lady Gaga","Demi Lovato")
    G.add_edge("Lady Gaga","Bea Miller")
    G.add_edge("Lady Gaga","MGK")
    G.add_edge("Lady Gaga","Zedd")
    G.add_edge("Lady Gaga","Anne Marie")
    G.add_edge("Bea Miller","Meghan Trainor")
    G.add_edge("Bea Miller","Little Mix")
    G.add_edge("Ava Max","Bebe Rexha")
    G.add_edge("Zara Larsson","Zedd")
    G.add_edge("Zedd", "Sia")
    G.add_edge("Clean Bandit", "Doja Cat")
    G.add_edge("Clean Bandit", "Ava Max")
    G.add_edge("Little Mix", "Zara Larsson")
    G.add_edge("Little Mix", "Meghan Trainor")
    G.add_edge("Little Mix", "Sabrina Carpenter")
    G.add_edge("Meghan Trainor", "Dua Lipa")
    G.add_edge("Sabrina Carpenter", "Doja Cat")
    G.add_edge("Sabrina Carpenter", "Charlotte Lawrence")
    G.add_edge("Hailee Steinfeld", "Little Mix")
    G.add_edge("Doja Cat", "Lady Gaga")
    G.add_edge("Charlotte Lawrence", "Zara Larsson")
    G.add_edge("Ariana Grande", "Sam Smith")
    G.add_edge("Sam Smith", "Charlotte Lawrence")
    G.add_edge("Bebe Rexha", "Hailee Steinfeld")
    G.add_edge("Bebe Rexha", "Meghan Trainor")
    G.add_edge("Bebe Rexha", "Ariana Grande")
    G.add_edge("Jonas Brothers", "5SOS")
    G.add_edge("Anne Marie", "Jonas Brothers")
    G.add_edge("MGK", "Anne Marie")
    G.add_edge("MGK", "Jonas Brothers")
    G.add_edge("MGK", "5SOS")
    

    #Orden DFS del grafo
    return (list(nx.dfs_preorder_nodes(G,nombre_artista)))

    
    def linkatk(self, graph_org, centrality, wt, dfact):
        graph = graph_org.copy()
        ncc2flag = 0
        criticaln = 0
        criticallcc = 0
        sf2 = 0
        efr2 = 0

        for u, v, w in graph.edges(data=True):
            graph[u][v]['newweight'] = w['weight']

        ##### initialization
        tempcc = [len(xind) for xind in nx.weakly_connected_components(graph)]
        lcc = [max(tempcc)]  # largest connected component
        ncc = [len(tempcc)]  # no. of connected comp.

        sf = [self.resoutput.get_servicefactor(graph)]  # service factor
        efr = [self.resoutput.get_edgerobustness(graph)]  # edge flow robust

        while (len(graph.edges) > 1):

            ranks = centrality(graph, weight=wt)
            edges_sorted = sorted(graph.edges(),
                                  key=lambda n: ranks[n],
                                  reverse=True)  # sorts in descending

            edge_selected = edges_sorted[0]
            nodeinc = edge_selected[1]
            recchildnodes = list(nx.dfs_preorder_nodes(graph, nodeinc))

            graph[edge_selected[0]][edge_selected[1]]['newweight'] = graph[
                edge_selected[0]][edge_selected[1]]['newweight'] - 0.1

            ##### comparing weights with service acceptable level
            if graph[edge_selected[0]][
                    edge_selected[1]]['newweight'] < dfact * (
                        graph[edge_selected[0]][edge_selected[1]]['weight']):
                graph.remove_edge(*edge_selected)

                #### effect of input on output- transitive property
                for childnode in recchildnodes:
                    indeg = graph.in_degree(childnode, weight="newweight")

                    try:
                        tempratio = indeg / graph.nodes[childnode]['indegwt']
                    except:
                        continue

                    for (nodest, nodeed) in graph.edges(childnode):
                        graph[nodest][nodeed]['newweight'] = tempratio * (
                            graph[nodest][nodeed]['newweight'])

                    ##### node removal at secondary level
#                    if indeg ==0:
#                        graph.remove_node(childnode)
            else:
                #### effect of input on output- transitive property
                for childnode in recchildnodes:
                    indeg = graph.in_degree(childnode, weight="newweight")

                    try:
                        tempratio = indeg / graph.nodes[childnode]['indegwt']
                    except:
                        continue

                    for (nodest, nodeed) in graph.edges(childnode):
                        graph[nodest][nodeed]['newweight'] = tempratio * (
                            graph[nodest][nodeed]['newweight'])

                    ##### node removal at secondary level
#                    if indeg ==0:
#                        graph.remove_node(childnode)

##### collecting metrics
            tempcc = [
                len(xind) for xind in nx.weakly_connected_components(graph)
            ]
            lcc.append(max(tempcc))
            ncc.append(len(tempcc))

            tempsf = self.resoutput.get_servicefactor(graph)
            sf.append(tempsf)

            if len(graph.edges()) > 1:
                tempefr = self.resoutput.get_edgerobustness(graph)

            efr.append(tempefr)

            ####### critical values when graph disconnected into 2 components first time
            if len(tempcc) == 2 and ncc2flag == 0:
                ncc2flag = 1
                criticaln = len(lcc)
                criticallcc = max(tempcc)
                sf2 = tempsf
                efr2 = tempefr

        return lcc, ncc, sf, efr, criticaln, criticallcc, sf2, efr2
コード例 #56
0
def build_pipelines(input, output, registry) -> "PipelineBuilder":
    """
    Creates a `PipelineBuilder` instance that generates all pipelines
    from `input` to `output` types.

    ##### Parameters

    - `input`: type descriptor for the desired input.
    - `output`: type descriptor for the desired output.
    - `registry`: list of available classes to build the pipelines.
    """

    # warnings.warn(
    #     "This method is deprecated and not under use by AutoGOAL's"
    #     " internal API anymore, use `build_pipeline_graph` instead.",
    #     category=DeprecationWarning,
    #     stacklevel=2,
    # )

    list_pairs = set()
    types_queue = []

    if isinstance(input, Tuple):
        types_queue.extend(input.inner)
    else:
        types_queue.append(input)

    if isinstance(output, Tuple):
        types_queue.extend(output.inner)
    else:
        types_queue.append(output)

    types_seen = set()

    while types_queue:
        output_type = types_queue.pop(0)

        def build(internal_output, depth):
            if internal_output in types_seen:
                return

            for other_clss in registry:
                annotations = _get_annotations(other_clss)

                if annotations in list_pairs:
                    continue

                other_input = annotations.input
                other_output = annotations.output

                if other_input == other_output:
                    continue

                if not conforms(internal_output, other_input):
                    continue

                other_wrapper = build_composite_list(other_input, other_output, depth)
                list_pairs.add(annotations)
                registry.append(other_wrapper)
                types_queue.append(_get_annotations(other_wrapper).output)

        depth = 0

        while isinstance(output_type, List):
            if output_type.depth() >= MAX_LIST_DEPTH:
                break

            depth += 1

            output_type = output_type.inner
            build(output_type, depth)
            types_seen.add(output_type)

            logger.debug("Output type", output_type)

    list_tuples = set()

    def connect_tuple_wrappers(node, output_type):
        if not isinstance(output_type, Tuple):
            return

        for index in range(0, len(output_type.inner)):
            internal_input = output_type.inner[index]

            for other_clss in registry:
                annotations = _get_annotations(other_clss)
                other_input = annotations.input

                if not (conforms(internal_input, other_input) and other_clss != node):
                    continue

                # `other_class` has input compatible with one element in the Tuple
                # build the output `Tuple[..., internal_output, ...]` of the wrapper class
                internal_output = annotations.output
                output_tuple = list(output_type.inner)
                output_tuple[index] = internal_output
                output_tuple_type = Tuple(*output_tuple)

                # dynamic class representing the wrapper algorithm
                if (index, output_type, output_tuple_type) in list_tuples:
                    continue

                other_wrapper = build_composite_tuple(
                    index, output_type, output_tuple_type
                )
                list_tuples.add((index, output_type, output_tuple_type))
                registry.append(other_wrapper)

                open_nodes.append(other_wrapper)
                G.add_edge(node, other_wrapper)

    G = Graph()

    open_nodes = []
    closed_nodes = set()

    # Enqueue open nodes
    for clss in registry:
        if conforms(input, _get_annotations(clss).input):
            open_nodes.append(clss)
            G.add_edge(GraphSpace.Start, clss)

    connect_tuple_wrappers(GraphSpace.Start, input)

    if GraphSpace.Start not in G:
        raise TypeError("There are no classes compatible with input type:%r." % input)

    while open_nodes:
        clss = open_nodes.pop(0)

        if clss in closed_nodes:
            continue

        closed_nodes.add(clss)
        output_type = _get_annotations(clss).output

        for other_clss in registry:
            other_input = _get_annotations(other_clss).input
            if conforms(output_type, other_input) and other_clss != clss:
                open_nodes.append(other_clss)
                G.add_edge(clss, other_clss)

        connect_tuple_wrappers(clss, output_type)

        if conforms(output_type, output):
            G.add_edge(clss, GraphSpace.End)

    if GraphSpace.End not in G:
        raise TypeError(
            "No pipelines can be constructed from input:%r to output:%r."
            % (input, output)
        )

    reachable_from_end = set(nx.dfs_preorder_nodes(G.reverse(False), GraphSpace.End))
    unreachable_nodes = set(G.nodes) - reachable_from_end
    G.remove_nodes_from(unreachable_nodes)

    if not GraphSpace.Start in G:
        raise TypeError(
            "No pipelines can be constructed from input:%r to output:%r."
            % (input, output)
        )

    return PipelineBuilder(G, registry)
コード例 #57
0
ファイル: demography.py プロジェクト: earlyjohn/momi
 def leaves_subtended_by(self):
     return {
         v: self.leaves & set(nx.dfs_preorder_nodes(self, v))
         for v in self
     }
コード例 #58
0
ファイル: graph.py プロジェクト: searich/net-analysis
def domain_view(multi_graph: nx.MultiDiGraph,
                root_domain: str) -> nx.MultiDiGraph:
    """Returns the subgraph rooted at the given domain."""
    return multi_graph.subgraph(nx.dfs_preorder_nodes(multi_graph,
                                                      root_domain))
コード例 #59
0
cond_mnem2 = [('\x0f' + chr(x)) for x in range(0x80, 0x90)]
uncond_mnem1 = [0xe9, 0xeb, 0xc3]
uncond_mnem2 = ['\xff\x04', '\xff\x05']

for n in nodes:
    if n.id != None:
        if ord(n.code[-1]) in uncond_mnem1 or (len(n.code) > 1 and n.code[-2:]
                                               in uncond_mnem2):
            conditional[n.id] = False
        elif ord(n.code[-1]) in cond_mnem1 or (len(n.code) > 1
                                               and n.code[-2:] in cond_mnem2):
            conditional[n.id] = True
        else:
            conditional[n.id] = True

trav = nx.dfs_preorder_nodes(g, 0)
chains = []
visited = set()

# gather all contiguous pieces: "chains" of blocks
for t in trav:
    chain = []

    if t in visited:
        continue

    c = t
    while True:
        #print c
        if c in visited:
            idxs = filter(lambda i: chains[i][0] == c, range(len(chains)))
コード例 #60
0
l = len(file)
pos = np.zeros((l, 3))
rotM = np.zeros((l, 3, 3))
quat = np.zeros((l, 4))

for i, line in enumerate(file):
    line_list = line.split()
    pos[i] = np.array(line_list[1:4])
    quat[i] = np.array(line_list[4:])
    r = R.from_quat(quat[i])
    rotM[i] = r.as_matrix().copy()

clf = NearestNeighbors(2).fit(pos)
G = clf.kneighbors_graph()
T = nx.from_scipy_sparse_matrix(G)
paths = [list(nx.dfs_preorder_nodes(T, i)) for i in range(len(pos))]
mindist = np.inf
minidx = 0

for i in range(len(pos)):
    p = paths[i]  # order of nodes
    ordered = pos[p]  # ordered nodes
    # find cost of that order by the sum of euclidean distances between points (i) and (i+1)
    cost = (((ordered[:-1] - ordered[1:])**2).sum(1)).sum()
    if cost < mindist:
        mindist = cost
        minidx = i
opt_order = paths[minidx]

posop = pos[opt_order]