示例#1
0
    def lineage(self, ind, format='list', v=False):
        try:
            if ind in self.name2ox:
                ind = self.name2ox[ind]
            else:
                ind = int(ind)

            if format == 'list':
                out = []
                for i in nx.dfs_successors(self.tree, ind):
                    out += [[
                        self.tree.nodes[i]['rank'], self.tree.nodes[i]['name']
                    ]]
                return out

            if format == 'dict':
                out = {'no rank': []}
                for i in nx.dfs_successors(self.tree, ind):
                    rank = self.tree.nodes[i]['rank']
                    name = self.tree.nodes[i]['name']
                    if rank == 'no rank':
                        out[rank] += [name]
                    else:
                        out[rank] = name
                return out

        except:
            if v:
                sys.stderr.write('FAILED QUERY: %s\n' % ind)
            return None
 def test_dls_successor(self):
     result = nx.dfs_successors(self.G, source=4, depth_limit=3)
     assert ({n: set(v) for n, v in result.items()} ==
             {2: {1, 7}, 3: {2}, 4: {3, 5}, 5: {6}})
     result = nx.dfs_successors(self.D, source=7, depth_limit=2)
     assert ({n: set(v) for n, v in result.items()} ==
             {8: {9}, 2: {3}, 7: {8, 2}})
示例#3
0
 def dls_test_successor(self):
     result = nx.dfs_successors(self.G, source=4, depth_limit=3)
     assert_equal({n: set(v) for n, v in result.items()},
                  {2: {1, 7}, 3: {2}, 4: {3, 5}, 5: {6}})
     result = nx.dfs_successors(self.D, source=7, depth_limit=2)
     assert_equal({n: set(v) for n, v in result.items()},
                  {8: {9}, 2: {3}, 7: {8, 2}})
示例#4
0
 def test_successor(self):
     assert (nx.dfs_successors(self.G, source=0) == {
         0: [1],
         1: [2],
         2: [4],
         4: [3]
     })
     assert nx.dfs_successors(self.D) == {0: [1], 2: [3]}
示例#5
0
 def test_successor(self):
     assert_equal(nx.dfs_successors(self.G, source=0), {
         0: [1],
         1: [2],
         2: [4],
         4: [3]
     })
     assert_equal(nx.dfs_successors(self.D), {0: [1], 2: [3]})
 def dls_test_successor(self):
     result = nx.dfs_successors(self.G, source=4, depth_limit=3)
     assert_equal(dict((n, set(v)) for n, v in result.items()), {
         2: set([1, 7]),
         3: set([2]),
         4: set([3, 5]),
         5: set([6])
     })
     result = nx.dfs_successors(self.D, source=7, depth_limit=2)
     assert_equal(dict((n, set(v)) for n, v in result.items()), {
         8: set([9]),
         2: set([3]),
         7: set([8, 2])
     })
示例#7
0
def obtain_ancestry_path(G, strainID):
    """Given a strain ID, obtain its ancestors in reverse chronical order along with 
  the plasmids to a list.
  If the strainID is not in the graph G, then an empty list [] will be returned."""
    ancestry_path = []

    try:
        ancestor_dic = nx.dfs_successors(G, strainID)

        key = strainID
        while key:
            v = ancestor_dic.get(key)
            if v:  # This key does have an ancestor
                plamsids_dic = G.edges[key, v[0]]
                plasmids = []
                if bool(plamsids_dic):  # check the dic is not empty
                    plasmids = plamsids_dic['plasmids']
                ancestry_path.append((plasmids, v[0]))
                key = v[0]
            else:
                key = v
    except KeyError:
        pass

    return ancestry_path
示例#8
0
def create_subgraph(G, node):
    edges = nx.dfs_successors(G, node)
    nodes = []
    for k, v in edges.items():
        nodes.extend([k])
        nodes.extend(v)
    return G.subgraph(nodes)
示例#9
0
def amr_subgraphs_optimized(g, n_min=1, n_max=None):  # g: AMRGraph object
    """ -> connected subgraphs whose number of nodes is >= n_min & <= n_max """
    output = defaultdict(list)
    # PROXY_AFP_ENG_20021112_0467.11 - a cyclic graph
    if not nx.is_directed_acyclic_graph(g):
        print('The input graph is not directed acyclic.')
        return output

    amr_root = list(g.successors('@'))[0]
    order = list(nx.dfs_preorder_nodes(g, amr_root))
    #    print(order)
    if not n_max:
        n_max = len(g.nodes())
    # assumption: n_min < n_max
    for i in range(n_min, n_max + 1):
        #        print(i)
        for n in order:
            #            pool = list(nx.dfs_preorder_nodes(g,'s',depth_limit=i-1))
            pool = set(y for v in nx.dfs_successors(g, n, depth_limit=i -
                                                    1).values() for y in v)
            #            print(n,pool)
            if len(pool) < i - 1:
                continue
            for ns in itertools.combinations(pool, i - 1):
                sg = g.subgraph((n, ) + ns).copy()
                if nx.is_connected(sg.to_undirected()):
                    amr_root = list(nx.topological_sort(sg))[0]
                    sg.add_edge('@', amr_root, label='')
                    sg = AMRGraph(sg)
                    sg.meta = '# connected subgraph of {} nodes'.format(i)
                    output[i].append(sg)
    return output
    def graph_plot_dyadic_Pr(self):
        source = []
        out = []
        test = nx.dfs_successors(self.graph)
        odd_nodes = {key: val for key, val in test.items() if key % 2 == 1}

        # Generate the 2 axis for plots
        source = [
            i / (2**(np.floor(np.log2(i)) + 1)) for i in odd_nodes.keys()
        ]
        out = [i / (2**(np.floor(np.log2(i)) + 1)) for i in odd_nodes.values()]

        fig, ax = plt.subplots()
        # ax.set_xlim([2,max_to_iter])

        # Always draw y=x
        ax.scatter(source,
                   out,
                   s=25,
                   cmap=plt.cm.coolwarm,
                   zorder=10,
                   marker='o')
        lims = [
            np.min([ax.get_xlim(), ax.get_ylim()]),  # min of both axes
            np.max([ax.get_xlim(), ax.get_ylim()]),  # max of both axes
        ]
        # ax.plot(lims, lims, 'k--', alpha=0.75, zorder=0,)
        ax.set_aspect('equal')
        ax.set_xlim(lims)
        ax.set_ylim(lims)
        ax.xaxis.set_major_locator(MaxNLocator(integer=True))
        fig.savefig('graph_plot_Pr_dyadic' + str(self.n) + ' .png', dpi=300)
def nodes_below(net, u):
    yield u

    u_successors = nx.dfs_successors(net, u)
    nodes_below_u = chain.from_iterable(u_successors.values())
    for v in nodes_below_u:
        yield v
示例#12
0
    def explain(self) -> Dict[str, Any]:
        with self.lock:
            nodes = {}
            edges = nx.dfs_successors(self.tree.G, source=Tree.ROOT)

            for node_id in self.tree.G.nodes:
                node = self.tree.get_node(node_id)
                nodes[node_id] = {
                    'label': node.label,
                    'details': node.explanations
                }

            def transform_node(node_id: int):
                children = []
                element = {
                    'id': str(node_id),
                    **nodes[node_id]
                }
                try:
                    for child in edges[node_id]:
                        children.append(transform_node(child))
                except KeyError:
                    pass
                if len(children) > 0:
                    element['children'] = children
                return element

            hierarchy = transform_node(Tree.ROOT)
            return hierarchy
示例#13
0
def getSubgraph(graph, node):
    a = nx.dfs_successors(graph, node)
    if a == {}:
        return [node]
    else:
        return [node] + getSubgraph(graph, a[node][0]) + getSubgraph(
            graph, a[node][1])
示例#14
0
def evalSymbReg(individual, points):
    # Transform the tree expression in a callable function
    func = toolbox.compile(expr=individual)

    # traversing the syntax tree to discover options and interactions

    nodes, edges, labels = gp.graph(expr=individual)

    g = nx.Graph()
    g.add_nodes_from(nodes)
    g.add_edges_from(edges)

    pos = graphviz_layout(g, prog="dot")
    nx.draw_networkx_nodes(g, pos)
    nx.draw_networkx_edges(g, pos)
    nx.draw_networkx_labels(g, pos, labels)
    plt.show()

    degrees = g.degree()
    primitives = nx.dfs_successors(g)


    L = g.size()
    #for i in range(L):
    #    if degrees[i] == 1:




    sqerrors = []
    for point in points:
        x1 = point[0]
        x2 = point[1]
        sqerrors.append((func(x1,x2) - x1 ** 4 - x1 ** 3 - x2 ** 2 - x2) ** 2)
    return math.fsum(sqerrors) / len(points),
    def _create_root_subgraph(self, graph, name):
        """ Returns a sub graph with module as root node

        @:parameter graph <pygraphviz.AGraph>
        @:parameter name <string> (A module name)
        @:returns <DiGraph>
        """
        edges = nx.dfs_successors(nx.DiGraph(graph), name)
        nodes = set([])
        if self._exclude_states:
            for node in edges.keys():
                state = self.get_state(node)
                if state not in self._exclude_states:
                    nodes.add(node)
            for subnodes in edges.values():
                for node in subnodes:
                    state = self.get_state(node)
                    if state not in self._exclude_states:
                        nodes.add(node)
        else:
            for k, v in edges.items():
                nodes.add(k)
                nodes.update(v)
        graph = self._graph.subgraph(nodes)
        clean_graph(graph)
        return graph
示例#16
0
文件: GO.py 项目: cwt1/BioTK-1
def _get_ancestry_table(g):
    rows = []
    for n in g.nodes():
        for ancestors in nx.dfs_successors(g, n).values():
            for ancestor in ancestors:
                rows.append((ancestor, n))
    return pd.DataFrame(rows, columns=["Ancestor", "Descendant"])
def downtrace(nodeid, graph):
    children_list = []
    thesevens = ('species', 'genus', 'family', 'order', 'class', 'phylum',
                 'kingdom', 'superkingdom'
                 )  # These are the levels we like to report
    dfs_dict = nx.dfs_successors(
        graph,
        source=nodeid)  # Get all successors begin with our node in this graph
    ranks = {}
    for key, value in dfs_dict.items():
        for node in value:
            ranks[graph.nodes[node]['level']] = ranks.get(
                graph.nodes[node]['level'], 0) + 1
    ranks = list(ranks.keys())
    child_rank = ''
    for item in thesevens[::-1]:
        if item in ranks:
            child_rank = item
            break
    if child_rank != '':
        for key, value in dfs_dict.items():
            for node in value:
                if graph.nodes[node][
                        'level'] == child_rank:  # Keep on those with the anchor level
                    children_list.append(node)
        return children_list
    else:
        return children_list
示例#18
0
    def node_topological_descendants(self,
                                     source_node_asset_id,
                                     target_asset_type=None):
        """
        Return list of IDs of nodes that are descendants of given source node
        in topological order.


        Parameters
        ----------
        source_node_asset_id : int
            The ID of the source ProjectAsset/node.
        target_asset_type : None or str
            If given, the type of asset to return IDs for ('dataset',
            'transformation', or 'visualization').

        """
        G = self._graph
        dfs = nx.dfs_successors(G, source=source_node_asset_id)
        topological_ordering = sorted({x for v in dfs.itervalues() for x in v})
        if target_asset_type is None:
            return topological_ordering
        else:
            filtered_topological_ordering = list()
            for node in topological_ordering:
                if G.node[node]['type'] == target_asset_type:
                    filtered_topological_ordering.append(node)
            return filtered_topological_ordering
示例#19
0
def find_cluster_members(G, node_idx, epsilon):
    """
    Given a minimum spanning tree G from HDBSCAN, the function finds all elements that belong to the 
    same cluster as the node with index node_idx, if we ran DBSCAN with the provided epsilon parameter.
    """

    # First, keep going up the tree until the inverse of lambda (edge weight) is smaller than the epsilon
    child = node_idx
    parent = None
    while True:
        if len(list(G.predecessors(child))) == 0:
            break

        parent = list(G.predecessors(child))[0]
        if 1 / G.edges[parent, child]['weight'] < epsilon:
            child = parent
            continue
        else:
            break

    # Now that we have the parent, extract all the parent and all the children as a subgraph
    nodes = set([parent])

    search = nx.dfs_successors(G, parent)
    for value in search.values():
        nodes = nodes.union(value)

    return nodes
示例#20
0
def doit():
    """ Does the work """

    color_graph = nx.DiGraph()

    # build a graph consisting of dag edges (a,b) where a is a type of bag that is in b
    with open("input.txt", "r") as file_descriptor:
        reader = csv.reader(file_descriptor)
        for row in reader:
            first_color = re.match(r"^(.*) bags contain", row[0]).group(1)
            if re.match(
                r"^(.*) bags contain no other bags", row[0]
            ):  # this is an end node in the graph
                color_graph.add_node(first_color)
            else:  # this contains bags!
                match = re.match(r"^(.*) bags contain (\d) (.*) bag", row[0])
                found_color = match.group(3)
                the_weight = match.group(2)
                color_graph.add_edge(found_color, first_color, weight=the_weight)

            for another_bag in row[1 : len(row)]:
                match = re.match(r"^ (\d) (.*) bag", another_bag)
                found_color = match.group(2)
                the_weight = match.group(1)
                color_graph.add_edge(found_color, first_color, weight=the_weight)

    shiny_gold_graph = nx.dfs_successors(color_graph, "shiny gold")

    bags = set()
    for key in shiny_gold_graph.keys():
        bags.add(key)
        for value in shiny_gold_graph[key]:
            bags.add(value)
    print(len(bags) - 1)
    print(howmanysuccessors(color_graph, "shiny gold") - 1)
示例#21
0
def queryCourse(coursename):
    #should return the nested json file rooted at the course.
    d = nx.dfs_successors(G, coursename)

    #file = open("course.json","w")

    return None
def filterGraph(pairs, node):
    k_graph = nx.from_pandas_edgelist(pairs, 'subject', 'object', create_using=nx.MultiDiGraph())
    edges = nx.dfs_successors(k_graph, node)
    nodes = []
    for k, v in edges.items():
        nodes.extend([k])
        nodes.extend(v)
    subgraph = k_graph.subgraph(nodes)
    layout = (nx.random_layout(k_graph))
    nx.draw_networkx(
        subgraph,
        node_size=1000,
        arrowsize=20,
        linewidths=1.5,
        pos=layout,
        edge_color='red',
        edgecolors='black',
        node_color='white'
        )
    labels = dict(zip((list(zip(pairs.subject, pairs.object))), pairs['relation'].tolist()))
    edges= tuple(subgraph.out_edges(data=False))
    sublabels ={k: labels[k] for k in edges}
    nx.draw_networkx_edge_labels(subgraph, pos=layout, edge_labels=sublabels, font_color='red')
    plt.axis('off')
    plt.show()
    plt.savefig('church_knowledge_graph.png')
    def test_make_agent_walk_along_nx_graph(self):
        nx_skeleton = networkx_utils.NxSkeleton()
        coords = np.array([[100, 100, 100], [100, 101, 100],
                           [100, 102, 101], [100, 103, 102], [100, 104, 103], [100, 105, 104]])
        edgelist = [(0, 1), (1, 2), (2, 3), (3, 4), (4, 5)]
        nx_skeleton.initialize_from_edgelist(coords, edgelist)

        nm = neuron_maze.NeuronMaze()
        nm.make_agent_walk_along_nx_graph(nx_skeleton, 0, 5)
        self.assertEqual(nm.hero.visited_positions[1], tuple(coords[1]))
        first_direction = Vector3(0, 1, 0)
        self.assertEqual(nm.directions.index(first_direction), nm.hero.taken_actions[0])


        current_nx_graph = nm.nx_skeletons.nx_graph_dic[1]
        nx_skeleton = networkx_utils.NxSkeleton(nx_graph=current_nx_graph)

        source = networkx_utils.get_nodes_with_a_specific_degree(current_nx_graph,
                                                                  degree_value=1)
        source_node = source[0]
        number_of_steps = 5
        successor_dic = nx.dfs_successors(current_nx_graph, source=source[0])
        for steps in range(number_of_steps):
            source = successor_dic[source[0]]
        target_node = source[0]
        print 'source node', source_node
        print 'target node', target_node
        nm = neuron_maze.NeuronMaze()
        nm.make_agent_walk_along_nx_graph(nx_skeleton, source_node, target_node)
示例#24
0
 def node_dependents(self, node_asset_id, reverse=False):
     G = self._graph
     dfs = nx.dfs_successors(G, source=node_asset_id)
     topological_ordering = sorted({x for v in dfs.itervalues() for x in v})
     if reverse:
         topological_ordering = topological_ordering[::-1]
     return topological_ordering
示例#25
0
def get_subdags(dag, n_workers, worker_offset):
    if n_workers > 1 and worker_offset >= n_workers:
        raise ValueError(
            "n-workers is less than the worker-offset given! "
            "Either decrease --n-workers or decrease --worker-offset!")

    # Get connected subdags and sort by nodes
    if n_workers > 1:
        root_nodes = sorted([k for k, v in dag.in_degree().items() if v == 0])
        nodes = set()
        found = set()
        for idx, root_node in enumerate(root_nodes):
            # Flatten the nested list
            children = itertools.chain(
                *nx.dfs_successors(dag, root_node).values())
            # This is the only obvious way of ensuring that all nodes are included
            # in exactly 1 subgraph
            found.add(root_node)
            if idx % n_workers == worker_offset:
                nodes.add(root_node)
                for child in children:
                    if child not in found:
                        nodes.add(child)
                        found.add(child)
            else:
                for child in children:
                    found.add(child)
        subdags = dag.subgraph(list(nodes))
        logger.info(
            "Building and testing sub-DAGs %i in each group of %i, which is %i packages",
            worker_offset, n_workers, len(subdags.nodes()))
    else:
        subdags = dag

    return subdags
示例#26
0
 def leaves_below(nxg, node):
     assert isinstance(graph, nx.DiGraph)
     return set(
         sum(
             ([vv for vv in v if nxg.out_degree(vv) == 0]
              for k, v in nx.dfs_successors(nxg, node).items()),
             [],
         ))
示例#27
0
def list_dependencies(G, source_filter_function):
    source_nodes = filter(source_filter_function, G.nodes())

    for n in source_nodes:
        print '\n\n----------------------------------------'
        print 'Dependencies of %s:' % n
        for d in sorted(nx.dfs_successors(G, n)):
            print d
示例#28
0
	def cheminEchappeBombe(self,i0,j0):
		G = self.graphe
		chemin = nx.dfs_successors(G, source=(i0,j0),depth_limit=2)
		cle = list(chemin.keys())
		first = cle[0]
		sec = chemin[first][0]
		
		return first,sec
示例#29
0
 def prune(self, images):
     ''' Prune not in images '''
     nodes = {}
     for image in images:
         nodes[image] = []
         nodes.update(networkx.dfs_successors(self.graph, source=image))
     nodes = set([item for key, values in nodes.items() for item in [key]+values])
     self.graph.remove_nodes_from(set(self.graph.nodes()) - set(nodes))
示例#30
0
def type_of_node(G, node_1, node_2):
    successors = nx.dfs_successors(G)
    if (nx.has_path(G, source=node_1, target=node_2)):  #node_1 comes first
        return 1
    elif (nx.has_path(G, source=node_2, target=node_1)):  #node_2 comes first
        return 2
    else:  #not same lineage
        return 0
示例#31
0
	def get_nounPharse_short(self, nounhead):
		npIDs=[]
		if(self.udgraph.node[nounhead]['pos'] in ['NOUN','PROPN']):
			allsuccessors = nx.dfs_successors(self.udgraph,nounhead)

			flag = True
			parents = [nounhead]
			
			while len(parents)>0:
				temp = []
				for parent in parents:
					if parent in allsuccessors.keys():
						for child in allsuccessors[parent]:
							if (parent==nounhead and self.udgraph[parent][child]['relation'] not in ['acl','acl:relcl','cc','conj','appos','punct','amod']):
							#or (parent!=nounhead and self.udgraph[parent][child]['relation'] not in ['acl','acl:relcl','appos','dobj','punct']):
								#if parent!=nounhead or self.udgraph[parent][child]['relation'] not in []:
								npIDs.append(child)
								temp.append(child)
				parents = temp
			
			'''
			for parent,child in allsuccessors.items():
				print(str(parent))
				print(child)
			'''		
			#raw_input(" ")

			#for value in allsuccessors.values():
			#	npIDs.extend(value)
			#print(npIDs)

		npIDs.append(nounhead)
		npTokens =[]
		npIDs.sort()
		#print(npIDs)

		start = 0
		for i in range(0,len(npIDs)):
			if self.udgraph.node[npIDs[i]]['pos']=='ADP':
				start = i+1
		npIDs = npIDs[start:]

		flag = False
		for i in range(1,len(npIDs)):
			if npIDs[i]-npIDs[i-1] != 1:
				flag = True

		if flag == True:
			npIDs = []
			npIDs.append(nounhead)


		for npID in npIDs:
			npTokens.append(self.udgraph.node[npID]['token'])
			
		nounPhrase = (' ').join(npTokens)

		return nounPhrase
def gather_relevant_nodes(G, s):
    subnodes = nx.dfs_successors(G, source=s)
    ret = []
    for l in subnodes.values():
        ret.extend(l)
    for node in ret:
        prednodes = nx.dfs_predecessors(G, node)
        for l in prednodes.keys():
            ret.append(l)
    return ret
示例#33
0
 def cover_set(self, node, rho, target_set):
     cover_set = []
     cover = nx.dfs_successors(self.G, node, rho)
     # print(cover)
     for x in cover.keys():
         for n in cover[x]:
             # print(n)
             if n in target_set:
                 cover_set.append(n)
     return cover_set
示例#34
0
def all_successors(graph,node):
    """
    Given a networkx graph and a node string return a list of all successors of the node
    i.e. every node J such that there is a path from the input node to J
    """
    d = nx.dfs_successors(graph,node)
    return_list = []
    for key in d:
        return_list = return_list + d[key]
    return list(set(return_list))
示例#35
0
 def successors(self):
     ''' Add attribute successors to all nodes '''
     successors = {}
     for image in self.graph.nodes_iter():
         successors[image] = len(set([
             successor
             for values in networkx.dfs_successors(self.graph, source=image).values()
             for successor in values
         ]))
     networkx.set_node_attributes(self.graph, 'successors', successors)
示例#36
0
 def cover_nodes(self, node):
     cover = nx.dfs_successors(self.G, node, self.rho2 - self.rho1)
     sub_nodes = []
     if node in self.D1:
         sub_nodes.append(node)
     for k in cover.keys():
         for v in cover[k]:
             if v in self.D1:
                 sub_nodes.append(v)
     return sub_nodes
示例#37
0
def generate_dep_graph(l,filename, preprocessing=True, colors=True, write_gml=True):
    p=filter(lambda x: x.has_key('package'), l)
    #TODO: handling of install tags
    u=map(lambda x: x.strip(), filter(lambda x: x.has_key('request'), l)[0]['upgrade'].split(','))

    G=nx.DiGraph()
    versions=collect_versions(p)

    #****************************************************************************************
    #ADD DEPENDENCIES
    #****************************************************************************************
    #implicit
    for e in versions:
        if len(versions[e])>1:
            for v in ifilter(lambda x: x is not None, versions[e]):
                G.add_edge(tuple2str(e,None),tuple2str(e,v),property='implict',
                           graphics=gml_edge_graphics('implicit'))
    #explicit
    for r in p:
        v=(r['package'], r['version'])
        for cudf_property in cudf_properties:
            add(G,r,cudf_property,versions)

    if preprocessing:
        #****************************************************************************************
        #COLLECT RELEVANT VERSIONS
        #****************************************************************************************
        relevant_versions=[]
        for e in u:
            e=e.strip()
            if versions[e]:
                #TODO
                #tuple2str(e,reduce(lambda x,y: x if LooseVersion(x)>
                #         LooseVersion(y) else y, versions[e]))
                relevant_versions.append(e)
            else:
                relevant_versions.append(tuple2str(e,versions[e]))

        successors=set()
        for v in relevant_versions:
            ret = []
            for e,k in nx.dfs_successors(G,v).iteritems():
                ret.append(e)
                ret.extend(k)
            successors.update(ret)
        G=G.subgraph(list(successors)).copy()

        if colors:
            color_nodes(G,p,u,successors)
    if write_gml:
        nx.write_gml(G,filename)
    return G
示例#38
0
    def test_get_feasible_history(self):

        # This transition matrix is on a 4x4 grid.
        P = _mc0.get_example_transition_matrix()

        # Define a very sparse tree.
        T = nx.Graph()
        T.add_weighted_edges_from([
            (0, 12, 1.0),
            (0, 23, 2.0),
            (0, 33, 1.0),
            ])

        # Define the known states
        node_to_state = {
                12 : 11,
                23 : 14,
                33 : 41}

        # Define a root at a node with a known state,
        # so that we can avoid specifying a distribution at the root.
        root = 12
        T_aug = _sample_mcx.get_feasible_history(T, node_to_state,
                root=root, P_default=P)

        # The unweighted and weighted tree size should be unchanged.
        assert_allclose(
                T.size(weight='weight'), T_aug.size(weight='weight'))

        # Check that for each node in the initial tree,
        # all adjacent edges in the augmented tree have the same state.
        # Furthermore if the state of the node in the initial tree is known,
        # check that the adjacent edges share this known state.
        for a in T:
            states = set()
            for b in T_aug.neighbors(a):
                states.add(T_aug[a][b]['state'])
            assert_equal(len(states), 1)
            state = _util.get_first_element(states)
            if a in node_to_state:
                assert_equal(node_to_state[a], state)

        # Check that every adjacent edge pair is a valid transition.
        successors = nx.dfs_successors(T_aug, root)
        for a, b in nx.bfs_edges(T_aug, root):
            if b in successors:
                for c in successors[b]:
                    ab = T_aug[a][b]['state']
                    bc = T_aug[b][c]['state']
                    assert_(ab in P)
                    assert_(bc in P[ab])
                    assert_(P[ab][bc]['weight'] > 0)
示例#39
0
 def getClusterMembers(self):
     '''
     '''
     G = self.G
     topnodes = [node for node in G.pred if not G.pred[node]]
     clusterMembers = defaultdict(list)
     for tnode in topnodes:
         dfs = nx.dfs_successors(G, tnode)
         for node in dfs:
             for mem in dfs[node]:
                 if int(mem.split('_')[0]) == 0:
                     clusterMembers[tnode].append(mem)
     return clusterMembers
示例#40
0
def get_history_root_state_and_transitions(T, root=None):
    """

    Parameters
    ----------
    T : undirected weighted networkx tree with edges annotated with states
        A sampled history of states and substitution times on the tree.
    root : integer, optional
        The root of the tree.
        If not specified, an arbitrary root will be used.

    Returns
    -------
    root_state : integer
        The state at the root.
    transition_counts : directed weighted networkx graph
        A networkx graph that tracks the number of times
        each transition type appears in the history.

    """

    # Bookkeeping.
    degrees = T.degree()

    # Pick a root with only one neighbor if no root was specified.
    if root is None:
        root = _util.get_arbitrary_tip(T, degrees)

    # The root must have a well defined state.
    # This means that it cannot be adjacent to edges with differing states.
    root_states = [T[root][b]['state'] for b in T[root]]
    if len(set(root_states)) != 1:
        raise ValueError('the root does not have a well defined state')
    root_state = root_states[0]

    # Count the state transitions.
    transition_counts = nx.DiGraph()
    successors = nx.dfs_successors(T, root)
    for a, b in nx.bfs_edges(T, root):
        if degrees[b] == 2:
            c = _util.get_first_element(successors[b])
            sa = T[a][b]['state']
            sb = T[b][c]['state']
            if sa != sb:
                if transition_counts.has_edge(sa, sb):
                    transition_counts[sa][sb]['weight'] += 1
                else:
                    transition_counts.add_edge(sa, sb, weight=1)

    # Return the statistics.
    return root_state, transition_counts
def rec_dfs_successor(G, node_name):
    """Recursively determines the successors of a node, returns a flat-list of such successors"""
    temp_affected_nodes = nx.dfs_successors(G, node_name).get(node_name, [])
    temp = []
    for temp_node in temp_affected_nodes:
        temp_list = rec_dfs_successor(G, temp_node)
        temp.append(temp_list)
    temp_affected_nodes.append(temp)

    if temp_affected_nodes:
        temp_affected_nodes = flatten_list(temp_affected_nodes)
        return list(set(temp_affected_nodes))
    else:
        return []
示例#42
0
文件: symd.py 项目: xym-tool/symd
def print_dependency_tree():
    """
    For each module, print the dependency tree for imported modules
    :return: None
    """
    print('\n=== Module Dependency Trees ===')
    for node_name in G.nodes_iter():
        if G.node[node_name][TAG_ATTR] != UNKNOWN_TAG:
            dg = nx.dfs_successors(G, node_name)
            plist = []
            print(augment_format_string(node_name, '\n%s:') % node_name)
            if len(dg):
                imports = dg[node_name]
                print_dependents(dg, plist, imports)
def dconn(gDir, path, conds):
	"""Check whether the path d-connects start and end nodes."""
	for idx in range(len(path) - 2):
		p1 = path[idx]
		p2 = path[idx + 1]
		p3 = path[idx + 2]
		if (gDir.has_edge(p1, p2) and gDir.has_edge(p3, p2)): # p1 -> p2 <- p3
			if (p2 not in conds):
				if not (len(set(nx.dfs_successors(gDir, p2)).intersection(conds)) > 0):					
					return False
		else:
			det = gDir.node[p2]["determines"]
			if (p2 in conds) or ((det is not None) and (det <= conds)):
				return False	
	return True
示例#44
0
    def build(self):
        ''' Build '''
        queue_out = queue.Queue(maxsize=META['limits']['threads'])
        queue_in = queue.Queue()
        for _ in range(META['limits']['threads']):
            ThreadBuild(queue_out, queue_in).start()

        active = []
        while self.graph:
            degree = self.graph.in_degree()
            images = [image for image in degree if image not in active and degree[image] == 0]
            images = sorted(images, reverse=True,
                            key=lambda image: self.graph.node[image]['successors'])

            for image in images:
                try:
                    queue_out.put(image, block=False)
                except queue.Full:
                    break
                active.append(image)

            build = queue_in.get()
            active.remove(build.name)
            print('\n\033[33m### Build log: {0:s} ###\033[0m'.format(build.name))
            for line in build.log:
                if 'stream' in line:
                    print(line['stream'], end='')
                else:
                    print(line)
            if build.state == 'finished':
                print('\033[32m### Build finished: {0:s} ###\033[0m\n'.format(build.name))
                self.graph.remove_node(build.name)
            elif build.state == 'failed':
                print('\033[31m### Build failed: {0:s} ###\033[0m\n'.format(build.name))
                successors = networkx.dfs_successors(self.graph, source=build.name)
                if not successors:
                    self.graph.remove_node(build.name)
                else:
                    self.graph.remove_nodes_from(set(
                        [image for key, values in successors.items() for image in [key]+values]
                    ))
            else:
                raise RuntimeError('State is unknown: {0:s} {1:s}'.format(build.name, build.state))
            queue_in.task_done()
示例#45
0
文件: joins.py 项目: maheshpj/qbe
def joins_from_successors(join_subgraph, root):
    """
    Creates a dfs successors tree and use it to create joins with root
    returns all inner joins list
    Example:
       root = 'a'        
       successors = {'a': ['c'], 
                     'c': ['j', 'f'], 
                     'f': ['i'], 
                     'j': ['k']} 
                     
       from a
        inner join c on a.id = c.id
            inner join j on j.id = c.id
                inner join k on k.id = j.id
            inner join f on f.id = c.id
                inner join i on i.id = f.id         
    """    
    successors = nx.dfs_successors(join_subgraph, root)
    return create_joins(successors, root, [])
示例#46
0
    def descendant_nodes(self, u):
        """
        Returns a set with all the descendents of u.

        EXAMPLE::
        
            >>> network = PhyloNetwork(eNewick="((,(3,4)#1)2,#1)1;")
            >>> network.nodes()
            ... ['_5', '_4', '_3', '_2', '_1', '#1']
            >>> network.node_by_taxa('2')
            ... '_2'
            >>> network.descendant_nodes('_2')
            ... ['_5', '_4', '#1' '_3', '_2']
            >>> network.strict_descendant_nodes('_2')
            ... ['_3', '_2']
            >>> network.descendant_taxa('_2')
            ... ['4', '3', '2']
            
        """
        return sum(list(dfs_successors(self, u).values()), [])+ [u]
def directLeafConnection(G, H):
    nodes = H.nodes()
    counter = 0
    while not udah_belom(H):
        if time.time()-waktu1 > 300:
            return H
        for x in nodes:
            counter += 1
            x_neigh = H.neighbors(x)
            if len(x_neigh) > 2:
                for y in x_neigh:
                    dummyGraph = H.copy()
    
                    dummyGraph.node[x]['neighbor_color'].remove(dummyGraph.node[y]['color'])        #remove neighbor color
                    dummyGraph.node[y]['neighbor_color'].remove(dummyGraph.node[x]['color'])        #remove neighbor color
                    dummyGraph.remove_edge(x,y)
    
                    tree = nx.dfs_successors(dummyGraph, x)
                    anak_anak = nx.dfs_predecessors(dummyGraph, x)
                    for anak in anak_anak:
                        if anak not in tree:
                            if validColor(dummyGraph, anak, y):
                                dummyGraph.add_edge(anak,y, weight=G[anak][y]['weight'])
                                addNeighborColor(dummyGraph, y, anak)
                                H = dummyGraph.copy()
                                #drawHraph(H)
                                break
            if counter%100000==0:
                print 'iteration: ', counter, ' in Direct Leaf Connection'
            if counter == 500001:
                #os.system('say "Redo from scratch"')
                mst = kruskal_mst(G)
                H = G.copy()
                H.remove_edges_from(H.edges())
                H.add_edges_from(mst)

                H = directLeafConnection(G, H)
                return H
    return H
示例#48
0
        def getAtomByMolIndex(self,index):
                """Returns the Atom by it's index in the molecule where the index is defined as the number of bonds away from the anchor atom 
                (i.e. the atom at index 1 is the atom directly connected to the anchor atom)

                Parameters
                ----------
                index : int
                    The molecular index of the Atom object one wishes to retrieve where the index is defined as the number of bonds away from the anchor atom.

                Returns
                -------
                Atom Object
                    The Atom Object located at the specified index if the index is greater than the number of atoms in the molecule minus one then 
                    the function returns None as this is out of the range of the atom list.
                """
                if(index>(len(self.atoms)-1)):
                    return None
                successor_dict= ntwkx.dfs_successors(self.graph,source=self.anchorAtom.atomID)
                currentID = self.anchorAtom.atomID
                for i in range(index):
                    currentID = successor_dict[currentID][0]
                return self.getAtomByID(currentID)
示例#49
0
#########
import networkx as nx
G = nx.DiGraph()
G.add_edge(1,2)
G.add_edge(2,3)
G.add_edge(2,7)
G.add_edge(3,4)
G.add_edge(3,6)
G.add_edge(4,5)
paths = nx.dfs_successors(G, 1)
print paths.keys()
print paths[1]
print paths[2]
print paths[3]
print paths[4]
示例#50
0
def nest_graph(g, source):
    "take graph and source elt, return nested dictionary"
    d = networkx.dfs_successors(g, source)
    return {source: nest_successor_dict(d.get(source, ()), d)}
def minibatch_(functions, clf,scaler,w, loss__,mse,hinge1,hinge2,full_image,img_nr,alphas,learning_rate,subsamples, mode):
    X_p, y_p, inv = get_data_from_img_nr(class_,img_nr, subsamples)
    if X_p != []:
        boxes = []
        ground_truth = inv[0][2]
        img_nr = inv[0][0]
        print img_nr
        if less_features:
            X_p = [fts[0:features_used] for fts in X_p]
        if os.path.isfile('/var/node436/local/tstahl/Coords_prop_windows/'+ (format(img_nr, "06d")) +'.txt'):
            f = open('/var/node436/local/tstahl/Coords_prop_windows/'+ (format(img_nr, "06d")) +'.txt', 'r')
        else:
            print 'warning'
        for line, y in zip(f, inv):
            tmp = line.split(',')
            coord = []
            for s in tmp:
                coord.append(float(s))
            boxes.append([coord, y[2]])
        #assert(len(boxes)<500)
        boxes, y_p, X_p = sort_boxes(boxes, y_p, X_p, 0,5000)
        
        if os.path.isfile('/var/node436/local/tstahl/GroundTruth/%s/%s.txt'%(class_,format(img_nr, "06d"))):
            gr = open('/var/node436/local/tstahl/GroundTruth/%s/%s.txt'%(class_,format(img_nr, "06d")), 'r')
        else:
            gr = []
        ground_truths = []
        for line in gr:
           tmp = line.split(',')
           ground_truth = []
           for s in tmp:
              ground_truth.append(int(s))
           ground_truths.append(ground_truth)
        
        #prune boxes
        pruned_x = []
        pruned_y = []
        pruned_boxes = []
        if prune:
            for i, y_ in enumerate(y_p):
                if y_ > 0:
                    pruned_x.append(X_p[i])
                    pruned_y.append(y_p[i])
                    pruned_boxes.append(boxes[i])
        else:
            pruned_x = X_p
            pruned_y = y_p
            pruned_boxes = boxes
        
        if subsampling and pruned_boxes > subsamples:
            pruned_x = pruned_x[0:subsamples]
            pruned_y = pruned_y[0:subsamples]
            pruned_boxes = pruned_boxes[0:subsamples]
            
            
        # create_tree
        G, levels = create_tree(pruned_boxes)
        
        #prune tree to only have levels which fully cover the image, tested
        if prune_fully_covered:
            nr_levels_covered = 100
            total_size = surface_area(pruned_boxes, levels[0])
            for level in levels:
                sa = surface_area(pruned_boxes, levels[level])
                sa_co = sa/total_size
                if sa_co != 1.0:
                    G.remove_nodes_from(levels[level])
                else:
                    nr_levels_covered = level
            levels = {k: levels[k] for k in range(0,nr_levels_covered + 1)}
            
        # prune levels, speedup + performance 
        levels_tmp = {k:v for k,v in levels.iteritems() if k<prune_tree_levels}
        levels_gone = {k:v for k,v in levels.iteritems() if k>=prune_tree_levels}
        levels = levels_tmp
        #prune tree as well, for patches training
        for trash_level in levels_gone.values():
            G.remove_nodes_from(trash_level)
        
        coords = []
        features = []
        f_c = []
        f = []
        
        #either subsampling or prune_fully_covered
        #assert(subsampling != prune_fully_covered)
        
        if subsampling:
            if os.path.isfile('/var/node436/local/tstahl/Features_prop_windows/upper_levels/%s_%s_%s.txt'%(class_,format(img_nr, "06d"),subsamples)):
                f_c = open('/var/node436/local/tstahl/Features_prop_windows/upper_levels/%s_%s_%s.txt'%(class_,format(img_nr, "06d"),subsamples), 'r+')
            else:
                if mode == 'extract_train' or mode == 'extract_test':                
                    print 'coords for %s with %s samples have to be extracted'%(img_nr,subsamples)
                    f_c = open('/var/node436/local/tstahl/Features_prop_windows/upper_levels/%s_%s_%s.txt'%(class_,format(img_nr, "06d"),subsamples), 'w')
                    for level in levels:
                        levl_boxes = extract_coords(levels[level], pruned_boxes)
                        if levl_boxes != []:
                            for lvl_box in levl_boxes:
                                if lvl_box not in coords:
                                    coords.append(lvl_box)
                                    f_c.write('%s,%s,%s,%s'%(lvl_box[0],lvl_box[1],lvl_box[2],lvl_box[3]))
                                    f_c.write('\n')
                    f_c.close()
                    print 'features for %s with %s samples have to be extracted'%(img_nr,subsamples)
                    os.system('export PATH=$PATH:/home/koelma/impala/lib/x86_64-linux-gcc')
                    os.system('export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/home/koelma/impala/third.13.03/x86_64-linux/lib')
                    #print "EuVisual /var/node436/local/tstahl/Images/%s.jpg /var/node436/local/tstahl/Features_prop_windows/Features_upper/sheep_%s_%s.txt --eudata /home/koelma/EuDataBig --imageroifile /var/node436/local/tstahl/Features_prop_windows/upper_levels/sheep_%s_%s.txt"%((format(img_nr, "06d")),format(img_nr, "06d"),subsamples,format(img_nr, "06d"),subsamples)
                    os.system("EuVisual /var/node436/local/tstahl/Images/%s.jpg /var/node436/local/tstahl/Features_prop_windows/Features_upper/%s_%s_%s.txt --eudata /home/koelma/EuDataBig --imageroifile /var/node436/local/tstahl/Features_prop_windows/upper_levels/%s_%s_%s.txt"%(class_,(format(img_nr, "06d")),format(img_nr, "06d"),subsamples,class_,format(img_nr, "06d"),subsamples))
                    if os.path.isfile('/var/node436/local/tstahl/Features_prop_windows/upper_levels/%s_%s_%s.txt'%(class_,format(img_nr, "06d"),subsamples)):
                        f_c = open('/var/node436/local/tstahl/Features_prop_windows/upper_levels/%s_%s_%s.txt'%(class_,format(img_nr, "06d"),subsamples), 'r')
                    else:
                        f_c = []
            coords = []
                
            if os.path.isfile('/var/node436/local/tstahl/Features_prop_windows/Features_upper/%s_%s_%s.txt'%(class_,format(img_nr, "06d"),subsamples)):
                f = open('/var/node436/local/tstahl/Features_prop_windows/Features_upper/%s_%s_%s.txt'%(class_,format(img_nr, "06d"),subsamples), 'r') 
                
                
        elif prune_fully_covered:
            if os.path.isfile('/var/node436/local/tstahl/Features_prop_windows/upper_levels/sheep_%s_fully_cover_tree.txt'%(format(img_nr, "06d"))):
                f_c = open('/var/node436/local/tstahl/Features_prop_windows/upper_levels/sheep_%s_fully_cover_tree.txt'%(format(img_nr, "06d")), 'r+')
                
                
            else:
                if mode == 'extract_train' or mode == 'extract_test':                
                    print 'coords for %s with fully_cover_tree samples have to be extracted'%(img_nr)
                    f_c = open('/var/node436/local/tstahl/Features_prop_windows/upper_levels/sheep_%s_fully_cover_tree.txt'%(format(img_nr, "06d")), 'w')
                    for level in levels:
                        levl_boxes = extract_coords(levels[level], pruned_boxes)
                        if levl_boxes != []:
                            for lvl_box in levl_boxes:
                                if lvl_box not in coords:
                                    coords.append(lvl_box)
                                    f_c.write('%s,%s,%s,%s'%(lvl_box[0],lvl_box[1],lvl_box[2],lvl_box[3]))
                                    f_c.write('\n')
                    f_c.close()
                    print 'features for %s with fully_cover_tree samples have to be extracted'%(img_nr)
                    os.system('export PATH=$PATH:/home/koelma/impala/lib/x86_64-linux-gcc')
                    os.system('export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/home/koelma/impala/third.13.03/x86_64-linux/lib')
                    #print "EuVisual /var/node436/local/tstahl/Images/%s.jpg /var/node436/local/tstahl/Features_prop_windows/Features_upper/sheep_%s_%s.txt --eudata /home/koelma/EuDataBig --imageroifile /var/node436/local/tstahl/Features_prop_windows/upper_levels/sheep_%s_%s.txt"%((format(img_nr, "06d")),format(img_nr, "06d"),subsamples,format(img_nr, "06d"),subsamples)
                    print "EuVisual /var/node436/local/tstahl/Images/%s.jpg /var/node436/local/tstahl/Features_prop_windows/Features_upper/sheep_%s_fully_cover_tree.txt --eudata /home/koelma/EuDataBig --imageroifile /var/node436/local/tstahl/Features_prop_windows/upper_levels/sheep_%s_fully_cover_tree.txt"%((format(img_nr, "06d")),format(img_nr, "06d"),format(img_nr, "06d"))
                    os.system("EuVisual /var/node436/local/tstahl/Images/%s.jpg /var/node436/local/tstahl/Features_prop_windows/Features_upper/sheep_%s_fully_cover_tree.txt --eudata /home/koelma/EuDataBig --imageroifile /var/node436/local/tstahl/Features_prop_windows/upper_levels/sheep_%s_fully_cover_tree.txt"%((format(img_nr, "06d")),format(img_nr, "06d"),format(img_nr, "06d")))
                    if os.path.isfile('/var/node436/local/tstahl/Features_prop_windows/upper_levels/sheep_%s_fully_cover_tree.txt'%(format(img_nr, "06d"))):
                        f_c = open('/var/node436/local/tstahl/Features_prop_windows/upper_levels/sheep_%s_fully_cover_tree.txt'%(format(img_nr, "06d")), 'r')
                    else:
                        f_c = []
            coords = []
                
            if os.path.isfile('/var/node436/local/tstahl/Features_prop_windows/Features_upper/sheep_%s_fully_cover_tree.txt'%(format(img_nr, "06d"))):
                f = open('/var/node436/local/tstahl/Features_prop_windows/Features_upper/sheep_%s_fully_cover_tree.txt'%(format(img_nr, "06d")), 'r') 
                        
                
        else:
            if os.path.isfile('/var/node436/local/tstahl/Features_prop_windows/Features_upper/sheep%s.txt'%(format(img_nr, "06d"))):
                f = open('/var/node436/local/tstahl/Features_prop_windows/Features_upper/sheep%s.txt'%(format(img_nr, "06d")), 'r') 
            if os.path.isfile('/var/node436/local/tstahl/Features_prop_windows/upper_levels/sheep%s.txt'%(format(img_nr, "06d"))):
                f_c = open('/var/node436/local/tstahl/Features_prop_windows/upper_levels/sheep%s.txt'%(format(img_nr, "06d")), 'r+')
                
        if f_c != []:
            for i,line in enumerate(f_c):
                str_ = line.rstrip('\n').split(',')
                cc = []
                for s in str_:
                   cc.append(float(s))
                coords.append(cc)
        if f != []:
            for i,line in enumerate(f):
                str_ = line.rstrip('\n').split(',')  
                ff = []
                for s in str_:
                   ff.append(float(s))
                features.append(ff)
        #assert len(coords) == len(features)
        
        # append x,y of intersections
        if learn_intersections:
            for inters,coord in zip(features,coords):
#                if inters not in pruned_x:
                pruned_x.append(inters)
                ol = 0.0
                ol = get_intersection_count(coord, ground_truths)
                pruned_y.append(ol)
                
        if mode == 'mean_variance':
            print 'normalizing'
            sum_x += np.array(pruned_x).sum(axis=0)
            n_samples += len(pruned_x)
            sum_sq_x +=  (np.array(pruned_x)**2).sum(axis=0)
            scaler.partial_fit(pruned_x)  # Don't cheat - fit only on training data
            return sum_x,n_samples,sum_sq_x, scaler
            
        if less_features:
            features = [fts[0:features_used] for fts in features]
        #normalize
        norm_x = []
        if normalize and (mode != 'extract_train' and mode != 'extract_test'):
#            for p_x in pruned_x:
#                norm_x.append((p_x-mean)/variance)
            norm_x = scaler.transform(pruned_x)
            if features != []:
                features = scaler.transform(features)
        else:
            norm_x = pruned_x
        data = (G, levels, pruned_y, norm_x, pruned_boxes, ground_truths, alphas)
        sucs = nx.dfs_successors(G)
        
        predecs = nx.dfs_predecessors(G)
        
        #preprocess: node - children
        children = {}
        last = -1
        for node,children_ in zip(sucs.keys(),sucs.values()):
            if node != last+1:
                for i in range(last+1,node):
                    children[i] = []
                children[node] = children_
            elif node == last +1:
                children[node] = children_
            last = node
        if mode == 'training':
            if alphas[0] == 0: #if we don't learn the proposals, we learn just the levels: better, because every level has same importance and faster
                print 'training levels', img_nr
                for level in levels:
                    print 'level' , level
                    if img_nr in functions:
                        if level in functions[img_nr]:
                            function = functions[img_nr][level]
                        else:
                            function = []
                    else:
                        functions[img_nr] = {}
                        function = []
                    w, function = tree_level_regression(class_,function,levels,level,features,coords,scaler,w,norm_x,pruned_y,None,predecs,children,pruned_boxes,learning_rate,alphas,img_nr,jans_idea)
                    if level not in functions[img_nr]:
                        functions[img_nr][level] = function
                return w, len(pruned_y), len(levels)
            else: #if we learn proposals, levels with more proposals have more significance...., slow - need to change
                print 'training patches', img_nr
                print predecs
                nodes = list(G.nodes())
                for node in nodes:
                    print node
                    if node == 0:
                        w = learn_root(w,norm_x[0],pruned_y[0],learning_rate,alphas)
                    else:
                        for num,n in enumerate(levels.values()):
                            if node in n:
                                level = num
                                break
                        if img_nr in functions:
                            if level in functions[img_nr]:
                                function = functions[img_nr][level]
                            else:
                                function = []
                        else:
                            functions[img_nr] = {}
                            function = []
                        #w, function = tree_level_regression(class_,function,levels,level,features,coords,scaler,w,norm_x,pruned_y,node,predecs,children,pruned_boxes,learning_rate,alphas,img_nr)
                        w, function = constrained_regression(class_,function,features,coords,scaler,w,norm_x,pruned_y,node,predecs,children,pruned_boxes,learning_rate,alphas,img_nr,squared_hinge_loss)
                        #TODO: train regressor/classifier that predicts/chooses level. Features: level, number of proposals, number of intersections, avg size of proposal, predictions(for regressor), etc.
                        if level not in functions[img_nr]:
                            functions[img_nr][level] = function
                return w, len(pruned_y), len(G.nodes())
        elif mode == 'scikit_train':
            clf.partial_fit(norm_x,pruned_y)
            return clf
        elif mode == 'loss_train':
            if alphas[0] == 0: #levels
                loss__.append(tree_level_loss(class_,features,coords,scaler, w, data, predecs, children,img_nr,-1,functions))
                return loss__
            else:
                loss__.append(loss(class_,squared_hinge_loss,features,coords,scaler,w, data, predecs, children,img_nr, -1))
        elif mode == 'loss_test' or mode == 'loss_eval':
            print mode, loss__
            if alphas[0] == 0: #levels
                loss__.append(tree_level_loss(class_,features,coords,scaler, w, data, predecs, children,img_nr,-1,functions))
                cpl = max(0, np.dot(w,np.array(norm_x[0]).T))
                full_image.append([pruned_y[0],cpl])
                return loss__,full_image
            else:
                loss__.append(loss(class_,squared_hinge_loss,features,coords,scaler,w, data, predecs, children,img_nr, -1))
                cpl = max(0, np.dot(w,np.array(norm_x[0]).T))
                full_image.append([pruned_y[0],cpl])
                return loss__,full_image
        elif mode == 'loss_scikit_test' or mode == 'loss_scikit_train':
            loss__.append(((clf.predict(norm_x) - pruned_y)**2).sum())
            return loss__ 
        elif mode == 'levels_train' or mode == 'levels_test':
            preds = []
            for i,x_ in enumerate(norm_x):
                preds.append(np.dot(w, x_))
            cpls = []
            truelvls = []
            used_boxes_ = []
            total_size = surface_area(pruned_boxes, levels[0])
            fully_covered_score = 0.0
            fully_covered_score_lvls = 0.0
            covered_levels = []
            print mode, len(levels)
            for level in levels:
                function = functions[img_nr][level]
                cpl,used_boxes,_ = count_per_level([],class_,features,coords,scaler,w, preds, img_nr, pruned_boxes,levels[level], '',function)
                # clipp negative predictions
                cpl = max(0,cpl)
                if used_boxes != []:
                    used_boxes_.append(used_boxes[0][1])
                tru = y_p[0]
                cpls.append(cpl)
                sa = surface_area(pruned_boxes, levels[level])
                sa_co = sa/total_size
                if sa_co == 1.0:
                   fully_covered_score += cpl
                   fully_covered_score_lvls += 1
                   covered_levels.append(cpl)
                truelvls.append(tru)
            return cpls,truelvls
示例#52
0
文件: _mcx.py 项目: argriffing/raoteh
def get_node_to_pset(T, root, node_to_state=None, P_default=None):
    """
    For each node, get the set of states that give positive subtree likelihood.

    This function is analogous to get_node_to_pmap.

    Parameters
    ----------
    T : undirected unweighted acyclic networkx graph
        A tree whose edges are optionally annotated
        with edge-specific state transition probability matrix P.
    root : integer
        The root node.
    node_to_state : dict, optional
        A sparse map from a node to its known state if any.
        Nodes in this map are assumed to have completely known state.
        Nodes not in this map are assumed to have completely missing state.
        If this map is not provided,
        all states information will be assumed to be completely missing.
        Entries of this dict that correspond to nodes not in the tree
        will be silently ignored.
    P_default : networkx directed weighted graph, optional
        Sparse transition matrix to be used for edges
        which are not annotated with an edge-specific transition matrix.

    Returns
    -------
    node_to_pset : dict
        A map from a node to the set of states with positive subtree likelihood.

    """
    if len(set(T)) == 1:
        if root not in T:
            raise ValueError('unrecognized root')
        if (node_to_state is not None) and (root in node_to_state):
            root_state = node_to_state[root]
            root_pset = {root_state}
        else:
            all_states = set(P_default)
            root_pset = all_states
        return {root : root_pset}

    # Bookkeeping.
    successors = nx.dfs_successors(T, root)
    predecessors = nx.dfs_predecessors(T, root)

    # Compute the map from node to set.
    node_to_pset = {}
    for nb in nx.dfs_postorder_nodes(T, root):

        # If a parent node is available, get a set of states
        # involved in the transition matrix associated with the parent edge.
        # A more complicated implementation would use only the sink
        # states of that transition matrix.
        na_set = None
        if nb in predecessors:
            na = predecessors[nb]
            P = T[na][nb].get('P', P_default)
            na_set = set(P)

        # If the state of the current state is known,
        # define the set containing only that state.
        nb_set = None
        if nb in node_to_state:
            nb_set = {node_to_state[nb]}

        # If a child node is available, get the set of states
        # that have transition to child states
        # for which the child subtree likelihoods are positive.
        nc_set = None
        if nb in successors:
            for nc in successors[nb]:
                allowed_set = set()
                P = T[nb][nc].get('P', P_default)
                for sb, sc in P.edges():
                    if sc in node_to_pset[nc]:
                        allowed_set.add(sb)
                if nc_set is None:
                    nc_set = allowed_set
                else:
                    nc_set.intersection_update(allowed_set)

        # Take the intersection of informative constraints due to
        # possible parent transitions,
        # possible direct constraints on the node state,
        # and possible child node state constraints.
        pset = None
        for constraint_set in (na_set, nb_set, nc_set):
            if constraint_set is not None:
                if pset is None:
                    pset = constraint_set
                else:
                    pset.intersection_update(constraint_set)

        # This value should not be None unless there has been some problem.
        if pset is None:
            raise ValueError('internal error')

        # Define the pset for the node.
        node_to_pset[nb] = pset

    # Return the node_to_pset map.
    return node_to_pset
def cheapestLeafConnection(G, H):
    nodes = H.nodes()
    candidates = []
    tmp_weight = 0
    counter = 0
    while not udah_belom(H):
        #counter = 0
        for x in nodes:
            candidates=[]
            counter += 1
            x_neigh = H.neighbors(x)
            if len(x_neigh) > 2:
                for y in x_neigh:
                    #make a copy of current mst
                    dummyGraph = H.copy()
                    
                    #remove edge
                    removeNeighborColor(dummyGraph, x, y)
                    tmp_weight = G[x][y]['weight']      #save weight
                    dummyGraph.remove_edge(x,y)
    
                    tree = nx.dfs_successors(dummyGraph, x)
                    anak_anak = nx.dfs_predecessors(dummyGraph, x)
                    for anak in anak_anak:
                        if anak not in tree:            #if leaf
                            if validColor(dummyGraph, anak, y):
                                tmp = (anak, y, G[anak][y]['weight'])
                                candidates.append(tmp)
                    dummyGraph.add_edge(x, y, weight = tmp_weight)
                    addNeighborColor(dummyGraph, x, y)

            if len(candidates)>0:
                candidates = sorted(candidates, key = lambda z: z[2])
                fro, tom, wei = candidates[0]
                dummyGraph.add_edge(fro,tom, weight=wei)
                removeNeighborColor(dummyGraph, x, tom)
                dummyGraph.remove_edge(x,tom)
                addNeighborColor(dummyGraph, fro, tom)
                H = dummyGraph.copy()
                #drawHraph(H)    
            elif (counter%10000) ==0:
                if counter%100000==0:
                    print 'no candidates,', counter, ' iterations in Cheapest Leaf Connection'
                #drawGraph(dummyGraph)
                for x in nodes:
                    x_neigh = dummyGraph.neighbors(x)
                    if len(x_neigh)==1:
                        dummyGraph.remove_edge(x, x_neigh[0])
                        removeNeighborColor(dummyGraph, x, x_neigh[0])
                        for y in dummyGraph.neighbors(x_neigh[0]):
                            if validColor(dummyGraph, x, y) and len(dummyGraph.neighbors(y))<3:
                                dummyGraph.add_edge(x,y)
                                addNeighborColor(dummyGraph, x, y)
                                H = dummyGraph.copy()
                                break
                        break
            elif counter == 250001:
                print 'Using Cheapest Leaf Connection failed miserably =('
                #os.system('say "Using candidates failed miserably..."')
                print 'Try using Direct Leaf Connection'
                #os.system('say "Try using direct leaf connection"')
                H = directLeafConnection(G, dummyGraph)
                return H
    return H
示例#54
0
 def test_successor(self):
     assert_equal(nx.dfs_successors(self.G,source=0),
                  {0: [1], 1: [2], 2: [4], 4: [3]})
     assert_equal(nx.dfs_successors(self.D), {0: [1], 2: [3]})
示例#55
0
文件: ddg.py 项目: holycrap872/angr
    def _construct(self):
        """
        Construct the data dependence graph.

        We track the following types of dependence:
        - (Intra-IRSB) temporary variable dependencies
        - Register dependencies
        - Memory dependencies, although it's very limited. See below.

        We track the following types of memory access:
        - (Intra-functional) Stack read/write.
            Trace changes of stack pointers inside a function, and the dereferences of stack pointers.
        - (Inter-functional) Stack read/write.
        - (Global) Static memory positions.
            Keep a map of all accessible memory positions to their source statements per function. After that, we
            traverse the CFG and link each pair of reads/writes together in the order of control-flow.

        We do not track the following types of memory access
        - Symbolic memory access
            Well, they cannot be tracked under fastpath mode (which is the mode we are generating the CTF) anyways.
        """

        # TODO: Here we are assuming that there is only one node whose address is the entry point. Otherwise it should
        # TODO: be fixed.
        initial_node = self._cfg.get_any_node(self._start)

        # Initialize the worklist
        worklist = list(networkx.dfs_successors(self._cfg.graph, initial_node))
        # Also create a set for our worklist for fast inclusion test
        worklist_set = set(worklist)

        # A dict storing defs set
        # variable -> locations
        live_defs_per_node = {}

        while worklist:
            # Pop out a node
            node = worklist[0]
            worklist = worklist[ 1 : ]
            worklist_set.remove(node)

            # Grab all final states. There are usually more than one (one state for each successor), and we gotta
            # process all of them
            final_states = node.final_states

            if node in live_defs_per_node:
                live_defs = live_defs_per_node[node]
            else:
                live_defs = {}
                live_defs_per_node[node] = live_defs

            successing_nodes = self._cfg.graph.successors(node)
            for state in final_states:
                if state.scratch.jumpkind == 'Ijk_FakeRet' and len(final_states) > 1:
                    # Skip fakerets if there are other control flow transitions available
                    continue

                # TODO: Match the jumpkind
                # TODO: Support cases where IP is undecidable
                corresponding_successors = [n for n in successing_nodes if n.addr == state.se.any_int(state.ip)]
                if not corresponding_successors:
                    continue
                successing_node = corresponding_successors[0]

                new_defs = self._track(state, live_defs)

                if successing_node in live_defs_per_node:
                    defs_for_next_node = live_defs_per_node[successing_node]
                else:
                    defs_for_next_node = {}
                    live_defs_per_node[successing_node] = defs_for_next_node

                changed = False
                for var, code_loc_set in new_defs.iteritems():
                    if var not in defs_for_next_node:
                        l.debug('%s New var %s', state.ip, var)
                        defs_for_next_node[var] = code_loc_set
                        changed = True

                    else:
                        for code_loc in code_loc_set:
                            if code_loc not in defs_for_next_node[var]:
                                l.debug('%s New code location %s', state.ip, code_loc)
                                defs_for_next_node[var].add(code_loc)
                                changed = True

                if changed:
                    # Put all reachable successors back to our worklist again
                    if successing_node not in worklist_set:
                        worklist.append(successing_node)
                        worklist_set.add(successing_node)

                    all_successors_dict = networkx.dfs_successors(self._cfg._graph, source=successing_node)
                    for successors in all_successors_dict.values():
                        for s in successors:
                            if successing_node not in worklist_set:
                                worklist.append(s)
                                worklist_set.add(s)
示例#56
0
 def get_all_successors(self, basic_block):
     return networkx.dfs_successors(self._graph, basic_block)
示例#57
0
文件: vsa_ddg.py 项目: chen93/angr
    def _explore(self):
        """
        Starting from the start_node, explore the entire VFG, and perform the following:
        - Generate def-use chains for all registers and memory addresses using a worklist
        """

        # TODO: The worklist algorithm can definitely use some optimizations. It is a future work.

        # The worklist holds individual VFGNodes that comes from the VFG
        # Initialize the worklist with all nodes in VFG
        worklist = list(self._vfg.graph.nodes())
        # Set up a set of worklist for fast inclusion test
        worklist_set = set(worklist)

        # A dict storing defs set
        # variable -> locations
        live_defs_per_node = { }

        while worklist:
            # Pop out a node
            node = worklist[0]
            worklist_set.remove(node)
            worklist = worklist[ 1 : ]

            # Grab all final states. There are usually more than one (one state for each successor), and we gotta
            # process all of them
            final_states = node.final_states

            if node in live_defs_per_node:
                live_defs = live_defs_per_node[node]
            else:
                live_defs = { }
                live_defs_per_node[node] = live_defs

            successing_nodes = self._vfg.graph.successors(node)
            for state in final_states:
                if state.history.jumpkind == 'Ijk_FakeRet' and len(final_states) > 1:
                    # Skip fakerets if there are other control flow transitions available
                    continue

                # TODO: Match the jumpkind
                # TODO: Support cases where IP is undecidable
                corresponding_successors = [ n for n in successing_nodes if n.addr == state.se.eval(state.ip) ]
                if not corresponding_successors:
                    continue
                successing_node = corresponding_successors[0]

                new_defs = self._track(state, live_defs)

                if successing_node in live_defs_per_node:
                    defs_for_next_node = live_defs_per_node[successing_node]
                else:
                    defs_for_next_node = { }
                    live_defs_per_node[successing_node] = defs_for_next_node

                changed = False
                for var, code_loc_set in new_defs.iteritems():
                    if var not in defs_for_next_node:
                        defs_for_next_node[var] = code_loc_set
                        changed = True

                    else:
                        for code_loc in code_loc_set:
                            if code_loc not in defs_for_next_node[var]:
                                defs_for_next_node[var].add(code_loc)
                                changed = True

                if changed:
                    # Put all reachable successors back to our worklist again
                    if successing_node not in worklist_set:
                        worklist.append(successing_node)
                        worklist_set.add(successing_node)
                    all_successors_dict = networkx.dfs_successors(self._vfg.graph, source=successing_node)
                    for successors in all_successors_dict.values():
                        for s in successors:
                            if s not in worklist_set:
                                worklist.append(s)
                                worklist_set.add(s)
示例#58
0
文件: _mcx.py 项目: argriffing/raoteh
def get_node_to_pmap(T, root,
        node_to_state=None, P_default=None, node_to_set=None):
    """
    For each node, construct the map from state to subtree likelihood.

    Parameters
    ----------
    T : undirected unweighted acyclic networkx graph
        A tree whose edges are optionally annotated
        with edge-specific state transition probability matrix P.
    root : integer
        The root node.
    node_to_state : dict, optional
        A sparse map from a node to its known state if any.
        Nodes in this map are assumed to have completely known state.
        Nodes not in this map are assumed to have completely missing state.
        If this map is not provided,
        all states information will be assumed to be completely missing.
        Entries of this dict that correspond to nodes not in the tree
        will be silently ignored.
    P_default : networkx directed weighted graph, optional
        Sparse transition matrix to be used for edges
        which are not annotated with an edge-specific transition matrix.
    node_to_set : dict, optional
        Maps nodes to possible states.

    Returns
    -------
    node_to_pmap : dict
        A map from a node to a map from a state to a subtree likelihood.

    """
    # Get the possible states for each node,
    # after accounting for the rooted tree shape
    # and the edge-specific transition matrix sparsity patterns
    # and the observed states.
    if node_to_set is None:
        node_to_pset = get_node_to_pset(T, root,
                node_to_state=node_to_state, P_default=P_default)
        node_to_set = _mc0.get_node_to_set(T, root,
                node_to_pset, P_default=P_default)

    # Bookkeeping.
    successors = nx.dfs_successors(T, root)

    # For each node, get a sparse map from state to subtree likelihood.
    node_to_pmap = {}
    for node in nx.dfs_postorder_nodes(T, root):

        # Build the pmap.
        pmap = {}
        for node_state in node_to_set[node]:

            # Add the subtree likelihood to the node state pmap.
            cprob = 1.0
            for n in successors.get(node, []):
                P = T[node][n].get('P', P_default)
                nprob = 0.0
                allowed_states = set(P[node_state]) & set(node_to_pmap[n])
                if not allowed_states:
                    raise ValueError('internal error')
                for s in allowed_states:
                    a = P[node_state][s]['weight']
                    b = node_to_pmap[n][s]
                    nprob += a * b
                cprob *= nprob
            pmap[node_state] = cprob

        # Add the map from state to subtree likelihood.
        node_to_pmap[node] = pmap

    # Return the map from node to the map from state to subtree likelihood.
    return node_to_pmap
    def reinitialize_world(self, nx_graph_id=None, center_node=False, start_node=False, number_of_initial_steps=0):
        # assert nx_graph_id is None and node_id is not None,'node id can only be specified when also nx_graph id is specified'
        self.hero.reset()
        blocked_objects = []
        if 'nx_skeleton_filename' in self.settings:
            # Select random skeleton
            node_to_jump = None
            if nx_graph_id is None:
                nx_graph_id = choice(self.nx_skeletons.nx_graph_dic.keys())
            current_graph = self.nx_skeletons.nx_graph_dic[nx_graph_id]
            if center_node:
                node_to_jump = networkx_utils.get_center_node_from_nx_graph(current_graph)
                if not len(node_to_jump) == 1:
                    node_to_jump = choice(current_graph.nodes())
                else:
                    node_to_jump = node_to_jump[0][0]
            if start_node:
                assert center_node is False, 'either start node or center node have to be set to False'
                node_to_jump = networkx_utils.get_nodes_with_a_specific_degree(current_graph)
                if len(node_to_jump) == 2:
                    node_to_jump = node_to_jump[0]
            if node_to_jump is None:
                node_to_jump = choice(current_graph.nodes())

            if number_of_initial_steps > 0 and current_graph.number_of_nodes()-2 > number_of_initial_steps:
                successor_dic = nx.dfs_successors(current_graph, source=node_to_jump)
                source = [node_to_jump]
                source_node = node_to_jump
                # Save the starting position for later to make sure that it is
                # not added as an endpoint in the game
                source_pos = current_graph.node[node_to_jump]['position']
                source_pos = Point3(source_pos[0], source_pos[1], source_pos[2])
                blocked_objects.append(source_pos)
                for steps in range(number_of_initial_steps):
                    source = successor_dic[source[0]]
                target_node = source[0]
                self.make_agent_walk_along_nx_graph(current_graph, source_node, target_node)
                # Remove potential endnode objects from the source node


                # print 'hero walked already a bit', self.hero.visited_positions
                # print 'hero walked already a bit', self.hero.taken_actions
                # print 'hero walked already a bit', self.hero.observed_observations
            else:
                pos = self.nx_graph.node[node_to_jump]['position']

                self.hero.jump(pos)
        else:
            self.jump()

        #Get id for position
        pos = self.hero.position
        if 'volumetric_objects_filename' in self.settings:
            seg_id = self.volumetric_object_matrix[pos.x, pos.y, pos.z]
            # new_reward_matrix = self.reward_matrix_ori.copy()
            # new_observation_matrix = self.reward_matrix_ori.copy()
            # new_reward_matrix[self.volumetric_object_matrix != seg_id] = 0
            # self.reward_matrix = new_reward_matrix
            # self.observation_matrix = new_observation_matrix
            # self.observation_matrix = new_reward_matrix
        else:
            seg_id= None
        # #Reinitialize the skeletons
        # for obj in self.removed_objects:
        #     self.objects.append(obj)
        self.objects = []
        self.removed_objects= []
        self.objects_eaten['skeleton'] = 0
        self.skeletons_to_objects(seg_id=seg_id, blocked_objects=blocked_objects)
        self.games_played += 1