コード例 #1
0
ファイル: graph_util.py プロジェクト: SpringRi/phd
def GraphToInputTarget(
    graph: nx.DiGraph) -> typing.Tuple[nx.DiGraph, nx.DiGraph]:
  """Returns 2 graphs with input and target feature vectors for training.

  Args:
    graph: An `nx.DiGraph` instance.

  Returns:
    The input `nx.DiGraph` instance.
    The target `nx.DiGraph` instance.

  Raises:
    ValueError: unknown node type
  """

  def CreateFeature(data_dict: typing.Dict[str, typing.Any],
                    feature_names: typing.List[str]):
    return np.hstack([np.array(data_dict[feature], dtype=float) for feature in
                      feature_names])

  def ToOneHot(indices: typing.Iterator[int], max_value: int, axis: int = -1):
    one_hot = np.eye(max_value)[indices]
    if axis not in (-1, one_hot.ndim):
      one_hot = np.moveaxis(one_hot, -1, axis)
    return one_hot

  input_node_fields = ("pos", "weight", "start", "end")
  input_edge_fields = ("distance",)
  target_node_fields = ("solution",)
  target_edge_fields = ("solution",)

  input_graph = graph.copy()
  target_graph = graph.copy()

  solution_length = 0
  # Set node features.
  for node_index, node_feature in graph.nodes(data=True):
    input_graph.add_node(
        node_index, features=CreateFeature(node_feature, input_node_fields))
    target_node = ToOneHot(
        CreateFeature(node_feature, target_node_fields).astype(int), 2)[0]
    target_graph.add_node(node_index, features=target_node)
    solution_length += int(node_feature["solution"])
  solution_length /= graph.number_of_nodes()

  # Set edge features.
  for receiver, sender, features in graph.edges(data=True):
    input_graph.add_edge(
        sender, receiver, features=CreateFeature(features, input_edge_fields))
    target_edge = ToOneHot(
        CreateFeature(features, target_edge_fields).astype(int), 2)[0]
    target_graph.add_edge(sender, receiver, features=target_edge)

  # Set graph features.
  input_graph.graph["features"] = np.array([0.0])
  target_graph.graph["features"] = np.array([solution_length], dtype=float)

  return input_graph, target_graph
コード例 #2
0
    def GraphToInputTarget(
            cls, graph: nx.DiGraph) -> typing.Tuple[nx.DiGraph, nx.DiGraph]:
        """Returns 2 graphs with input and target feature vectors for training.

    Args:
      graph: An `nx.DiGraph` instance.

    Returns:
      The input `nx.DiGraph` instance.
      The target `nx.DiGraph` instance.

    Raises:
      ValueError: unknown node type
    """
        input_node_fields = ("pos", "weight", "start", "end")
        input_edge_fields = ("distance", )
        target_node_fields = ("solution", )
        target_edge_fields = ("solution", )

        input_graph = graph.copy()
        target_graph = graph.copy()

        solution_length = 0
        # Set node features.
        for node_index, node_feature in graph.nodes(data=True):
            input_graph.add_node(node_index,
                                 features=cls.CreateFeature(
                                     node_feature, input_node_fields))
            target_node = cls.ToOneHot(
                cls.CreateFeature(node_feature,
                                  target_node_fields).astype(int), 2)[0]
            target_graph.add_node(node_index, features=target_node)
            solution_length += int(node_feature["solution"])
        solution_length /= graph.number_of_nodes()

        # Set edge features.
        for receiver, sender, features in graph.edges(data=True):
            input_graph.add_edge(
                sender,
                receiver,
                features=cls.CreateFeature(features, input_edge_fields),
            )
            target_edge = cls.ToOneHot(
                cls.CreateFeature(features, target_edge_fields).astype(int),
                2)[0]
            target_graph.add_edge(sender, receiver, features=target_edge)

        # Set graph features.
        input_graph.graph["features"] = np.array([0.0])
        target_graph.graph["features"] = np.array([solution_length],
                                                  dtype=float)

        return input_graph, target_graph
コード例 #3
0
def fp_bgp_simulate(dg: DiGraph, c_as: int, s_as_set: set):
    g = dg.copy()
    transform(g, c_as)
    for i in range(30):
        print(i)
        fp_bgp_advertise(g)
    routes = {}
    for a in s_as_set:
        if a not in g.nodes:
            print("a not in g.nodes")
            continue
        a_rib = g.node[a]['rib']
        assert isinstance(a_rib, PyTricia)
        r = a_rib.get("1.1.1.1/24")
        if r is None:
            print("route is None %s %s" % (str(c_as), str(a)))
            continue
        p = a
        r = []
        while p != c_as:
            r.append(p)
            p = g.node[p]['rib'].get("1.1.1.1/24")
            assert p is not None
            p = p[0]
        r.append(c_as)
        routes[a] = r
    return routes
コード例 #4
0
def find_outdag(igraph: networkx.DiGraph) -> List[str]:
    """
    Finds the maximal directed acyclic subgraph that is closed under the successors operation.
    Essentially, these components are the "output cascades" which can be exploited by various algorithms, e.g.
    the computation of basins of attraction.

    **arguments**:
        * *igraph*: interaction graph

    **returns**:
        * *names*: the outdag

    **example**::

        >>> find_outdag(igraph)
        ["v7", "v8", "v9"]
    """

    graph = igraph.copy()
    sccs = networkx.strongly_connected_components(graph)
    sccs = [list(x) for x in sccs]
    candidates = [scc[0] for scc in sccs if len(scc) == 1]
    candidates = [x for x in candidates if not graph.has_edge(x, x)]
    sccs = [
        scc for scc in sccs if len(scc) > 1 or graph.has_edge(scc[0], scc[0])
    ]

    graph.add_node("!")
    for scc in sccs:
        graph.add_edge(scc[0], "!")

    outdag = [x for x in candidates if not networkx.has_path(graph, x, "!")]

    return outdag
コード例 #5
0
ファイル: trees.py プロジェクト: estnltk/grammar_experiments
def get_valid_paths(graph: nx.DiGraph, rule: Rule):
    new_graph = graph.copy()

    new_graph.remove_nodes_from([START, END])
    new_graph.add_nodes_from([START, END])

    entries, exits = [], []

    for node in new_graph.nodes():
        if node[-1] == rule.rhs[0]:
            entries.append(node)
        if node[-1] == rule.rhs[-1]:
            exits.append(node)

    new_graph.add_edges_from([(START, i) for i in entries])
    new_graph.add_edges_from([(i, END) for i in exits])
    paths = [[START]]
    dones_paths = []
    etalon = rule.rhs[:]

    while paths:
        current_path = paths.pop(0)
        for succ in new_graph.successors(current_path[-1]):
            if len(current_path) <= len(etalon):
                if succ[-1] == etalon[len(current_path) - 1]:
                    paths.append(current_path + [succ])
            elif len(current_path) == len(etalon) + 1 and succ == END:
                dones_paths.append(current_path[1:])
    return dones_paths
コード例 #6
0
def brute_force_decomposition(arborescence_stubs: ArborescenceDecomposition, graph: nx.DiGraph) -> List[
    ArborescenceDecomposition]:
    """
    Takes a set of initial arborescences and returns a brute-forced list of all possible arborescence decompositions.
    :param arborescence_stubs: the initial set of Arborescence objects (initialized with an arc to the destination)
    :param graph:
    :return: a list of lists, each representing a possible arborescence decomposition
    """
    assert len(arborescence_stubs) > 0
    try:
        arborescence = next(a for a in arborescence_stubs if len(a.vertices) < len(graph))
    except StopIteration:
        # print("Found an arc-disjoint arborescence decomposition.")
        return [arborescence_stubs]

    variants = brute_force_arborescence(arborescence, graph)
    # print(f"Found {len(variants)} variants for arborescence-stub number {arborescence_stubs.index(arborescence)}.")

    decompositions = []
    # TODO: do some parallelization here
    for variant in variants:
        # print(f"found arborescence: {variant}")
        # print(f"doing next arborescence with ars: {arcs}")
        reduced_graph = graph.copy()
        reduced_graph.remove_edges_from(variant.arcs)
        decompositions.extend(
            # replace the stub with the complete arborescence and recursively call procedure again
            brute_force_decomposition(
                ArborescenceDecomposition([variant if a == arborescence else a for a in arborescence_stubs]),
                reduced_graph))
    return decompositions
コード例 #7
0
def __analyze_first_tasks(process_graph: DiGraph) -> list:
    """
    Extracts the first tasks of the process

    Parameters
    ----------
    process_graph : Networkx di-graph

    Returns
    -------
    list of tasks
    """
    temp_process_graph = process_graph.copy()
    for node in list(temp_process_graph.nodes):
        if process_graph.nodes[node]['type'] not in ['start', 'end', 'task']:
            preds = list(temp_process_graph.predecessors(node))
            succs = list(temp_process_graph.successors(node))
            temp_process_graph.add_edges_from(
                list(itertools.product(preds, succs)))
            temp_process_graph.remove_node(node)
    graph_data = pd.DataFrame.from_dict(dict(temp_process_graph.nodes.data()),
                                        orient='index')
    start = graph_data[graph_data.type.isin(['start'])]
    start = start.index.tolist()[0]  # start node id
    in_tasks = [
        temp_process_graph.nodes[x]['name']
        for x in temp_process_graph.successors(start)
    ]
    return in_tasks
def flat_to_hierarchical(f_graph: DiGraph):
    h_graph = f_graph.copy()
    for edge in h_graph.edges:
        edge_data = h_graph[edge[0]][edge[1]]
        if 'edges' not in edge_data:
            edge_data['edges'] = []
        sub_edge = {
            'source': edge[0],
            'target': edge[1],
            'predicted': edge_data['predicted']
        }
        if edge_data['predicted'] is True:
            sub_edge['prediction_score'] = edge_data['prediction_score']
            sub_edge['applied_methods'] = edge_data['applied_methods']
        edge_data['edges'].append(sub_edge)

        if h_graph.has_edge(edge[1], edge[0]):
            sub_edge_data = h_graph[edge[1]][edge[0]]
            sub_edge = {
                'source': edge[1],
                'target': edge[0],
                'predicted': sub_edge_data['predicted']
            }
            if sub_edge_data['predicted'] is True:
                sub_edge['prediction_score'] = sub_edge_data['prediction_score']
                sub_edge['applied_methods'] = sub_edge_data['applied_methods']
            edge_data['edges'].append(sub_edge)
            h_graph.remove_edge(edge[1], edge[0])
        if edge_data['predicted'] is True:
            del edge_data['prediction_score']
            del edge_data['applied_methods']
        del edge_data['predicted']
    return h_graph
コード例 #9
0
def print_stats(cfg: nx.DiGraph, entry_node: Optional[str] = None) -> None:
    """Print CFG statistics."""
    if entry_node and entry_node not in cfg:
        logging.warning(
            'Entry point %s does not exist in the CFG. '
            'Skipping...', entry_node)
        return

    if entry_node:
        descendants = nx.descendants(cfg, entry_node)
        descendants.add(entry_node)
        reachable_cfg = cfg.copy()
        reachable_cfg.remove_nodes_from(n for n in cfg if n not in descendants)
    else:
        reachable_cfg = cfg

    print()
    if entry_node:
        print('%s stats' % entry_node)

    num_bbs = reachable_cfg.number_of_nodes()
    print('  num. basic blocks: %d' % num_bbs)

    num_edges = reachable_cfg.size()
    print('  num. edges: %d' % num_edges)

    num_indirect_calls = get_num_indirect_calls(reachable_cfg)
    print('  num. indirect calls: %d' % num_indirect_calls)

    if entry_node:
        eccentricity = nx.eccentricity(reachable_cfg, v=entry_node)
        print('  eccentricity: %d' % eccentricity)

        longest_path = get_longest_path(reachable_cfg, entry_node)
        print('  longest path from %s: %s' % (entry_node, longest_path))
コード例 #10
0
def tarjan_condition(edge: Tuple[int, int], graph: nx.DiGraph,
                     edges_used: Set[Tuple[int, int]], connectivity: int,
                     destination: str) -> bool:
    """
    Checks the condition for arborescence decomposition construction according to Tarjan (1974).
    :param edge: the edge for which the condition should be checked
    :param graph: the original graph of the arborescence decomposition
    :param edges_used: all edges used for arborescences up to this point
    :param connectivity: the desired connectivity, i.e. the number of arborescences that still need to be constructed
    :param destination: the destination vertex of the decomposition
    :return: a boolean, indicating if the condition is fulfilled
    """
    if connectivity == 0:
        return True

    reduced_graph = graph.copy()
    reduced_graph.remove_edges_from(edges_used)
    reduced_graph.remove_edge(edge[0],
                              edge[1])  # TODO: not mentioned in Tarjan?

    for i in range(1, connectivity + 1):
        reduced_graph.add_edge(edge[0], f"helper_node_{i}")
        reduced_graph.add_edge(f"helper_node_{i}", destination)

    return nx.edge_connectivity(reduced_graph, edge[0],
                                edge[1]) >= connectivity
コード例 #11
0
def get_all_pairs_edge_disjoint_shortest_paths(g: nx.DiGraph):
    edge_disjoint_shortest_paths = {}

    for source in g.nodes:
        shortest_paths = {
            target: node_list_to_edge_list(path)
            for target, path in nx.shortest_path(g, source,
                                                 weight="weight").items()
        }
        ws = {node: len(shortest_paths[node]) for node in g.nodes}
        t = nx.DiGraph()
        t.add_nodes_from(g.nodes)
        t.add_edges_from(
            itertools.chain(*map(lambda path: node_list_to_edge_list(path),
                                 shortest_paths.values())))
        for target in g.nodes:
            edge_disjoint_shortest_paths[source] = (
                target,
                get_edge_disjoint_shortest_paths(
                    g.copy().to_directed(),
                    source,
                    target,
                    shortest_paths,
                    ws,
                    t.to_directed(),
                ),
            )

    return edge_disjoint_shortest_paths
コード例 #12
0
    def prune_graph_simple(self, graph: nx.DiGraph,
                           flow: Tuple[int, int]) -> nx.DiGraph:
        """
        Remove cycles between flow source and sink. Uses distances to give a
        partial topological order then removes edges that take us in the wrong
        direction. Simple but removes more paths than necessary.
        Args:
            graph: graph to DAGify
            flow: source and sink of the flow

        Returns:
            A DAG with source at the start and sink at the end
        """
        graph = graph.copy()

        # first calculate distance to sink for each vertex
        distances = collections.defaultdict(int)
        distance_results = nx.shortest_path_length(graph,
                                                   source=None,
                                                   target=flow[1],
                                                   weight='route_weight')
        distances.update(distance_results)

        # now we prune edges that take us further from the destination so
        # that there are no cycles
        for (src, dst) in list(graph.edges()):
            if distances[dst] >= distances[src]:
                graph.remove_edge(src, dst)
        return graph
コード例 #13
0
def add_random_edges(win_graph: nx.DiGraph, candidates: set) -> nx.DiGraph:
    """Chooses a random pair of nodes that aren't connected to each other, and then connects them,
    never adding edges that would result in a cycle, until the graph is a complete win-graph.
    """
    to_add = candidates.difference(set(win_graph.nodes))
    win_graph = win_graph.copy()
    candidates = set(win_graph.nodes).union(to_add)
    edge_list = []
    for u in candidates:
        for v in candidates:
            if u != v:
                edge_list.append((u, v))

    random.shuffle(edge_list)

    # Existing edges don't have to be checked
    for edge in win_graph.edges:
        edge_list.remove(edge)

    for c1, c2 in edge_list:
        try:
            win_graph.add_edge(c1, c2)
            nx.find_cycle(win_graph)
            win_graph.remove_edge(c1, c2)
        except nx.NetworkXNoCycle:
            pass

    return win_graph
コード例 #14
0
def rand_rewire_graph(g: nx.DiGraph, p: float) -> Tuple[nx.DiGraph, Set[str]]:
    """
    Re-wire given share of edges randomly by either
      - re-assigning an edge to another node pair (50%)
      - swap labels of two edges (50%)
    """
    g = g.copy()
    nodes = list(g.nodes)
    labels_mod = set()

    for i in range(round(len(g.nodes) * p)):
        edges = list(g.edges)

        # swap edge labels
        if random.random() < .5:
            u, v = random.choice(edges)
            x, y = random.choice(edges)

            tmp = g[u][v]['label']
            g[u][v]['label'] = g[x][y]['label']
            g[x][y]['label'] = tmp

            labels_mod.add(tmp)

        # re-wire edge
        else:
            u, v = random.choice(edges)
            x, y = random.choice(nodes), random.choice(nodes)

            g.add_edge(x, y, label=g[u][v]['label'])
            g.remove_edge(u, v)

            labels_mod.add(g[x][y]['label'])

    return g, labels_mod
コード例 #15
0
    def __init__(
            self,
            graph: DiGraph,
            mu: float,
            beta: float,
            stochastic: bool = True,
            reconnection_policy: ReconnectionPolicy = ReconnectionPolicy.NONE,
            default_delay=0,
            weight_transfer=False):
        self.graph = graph.copy()

        self.mu = mu
        self.beta = beta
        self.stochastic = stochastic
        self.reconnection_policy = reconnection_policy
        self.default_delay = default_delay
        self.weight_transfer = weight_transfer

        self.defaulted_density = []
        self.nodes_per_sector = self.group_by_sector(self.graph.nodes)

        self.iteration = 0

        if default_delay != 0:
            self.init_default_delay()
コード例 #16
0
ファイル: day20.py プロジェクト: etfrogers/AdventOfCode
def convert_to_tree(graph: nx.DiGraph):
    if len(graph.nodes) == 1:
        return graph.copy()
    tree = nx.dag_to_branching(graph)
    for v, source in tree.nodes(data='source'):
        tree.nodes[v]['regex'] = graph.nodes[source]['regex']
    return tree
コード例 #17
0
ファイル: network_generator.py プロジェクト: blandfort/clep
def overlay_samples(
        data: pd.DataFrame,
        information_graph: nx.DiGraph,
        summary: bool = False,
) -> Union[nx.DiGraph, Tuple[nx.DiGraph, pd.DataFrame, Set]]:
    """Overlay the data onto the information graph by adding edges between patients and information nodes."""
    patient_label_mapping = {patient: label for patient, label in zip(data.index, data['label'])}
    value_mapping = {0: 'no_change', 1: 'up_reg', -1: 'down_reg'}

    overlay_graph = information_graph.copy()

    data_copy = data.drop(columns='label')
    values_data = data_copy.values

    summary_data = pd.DataFrame(0, index=data_copy.index, columns=["positive_relation", "negative_relation"])
    linked_genes = set()
    edges_to_remove = []

    for index, value_list in enumerate(tqdm(values_data, desc='Adding patients to the network: ')):
        for column, value in enumerate(value_list):
            patient = data_copy.index[index]
            gene = data_copy.columns[column]

            # Avoid mangled duplicates from pandas
            if "." in gene:
                if gene.split(".")[0] in data_copy.columns:
                    gene = gene.split(".")[0]

            # Ignore features with score of 0
            if value == 0:
                continue

            # Skip if gene is not in the knowledge graph
            if gene in information_graph.nodes:
                if overlay_graph.has_edge(patient, gene):
                    if overlay_graph.get_edge_data(patient, gene)['relation'] != value_mapping[value]:
                        if (patient, gene) not in edges_to_remove:
                            edges_to_remove.append((patient, gene))
                    continue
                linked_genes.add(gene)
                overlay_graph.add_edge(patient, gene, relation=value_mapping[value],
                                       label=patient_label_mapping[patient])
            if summary:
                summary_data.at[patient, VALUE_TO_COLNAME[value]] += 1

    # Remove patient-gene triples that have conflicting duplicates in the data
    for patient, gene in edges_to_remove:
        logger.warning(f"{patient}-{gene} triple is being discarded due to conflicting data")
        overlay_graph.remove_edge(patient, gene)

    if summary:
        non_conn_pats = summary_data[(summary_data['positive_relation'] == 0) & (summary_data['negative_relation'] == 0)]

        if len(non_conn_pats) > 0:
            logger.warning(f'{len(non_conn_pats)} samples is/are not connected to any genes.')

        return overlay_graph, summary_data, linked_genes
    else:
        return overlay_graph
コード例 #18
0
def run_density(graph: DiGraph, mu, beta, policy, delay, max_iterations):
    g = graph.copy()

    sn = SecNet(g, mu, beta, reconnection_policy=policy, default_delay=delay)

    sn.run(max_iterations)

    return sn.defaulted_density
コード例 #19
0
ファイル: config.py プロジェクト: timgates42/partridge
def reroot_graph(G: nx.DiGraph, node: str) -> nx.DiGraph:
    """Return a copy of the graph rooted at the given node"""
    G = G.copy()
    for n, successors in list(nx.bfs_successors(G, source=node)):
        for s in successors:
            G.add_edge(s, n, **G.edges[n, s])
            G.remove_edge(n, s)
    return G
コード例 #20
0
ファイル: graph_utils.py プロジェクト: pavel-slepenkov/duro
def copy_graph_without_attributes(source_graph: nx.DiGraph,
                                  attributes: List) -> nx.DiGraph:
    graph = source_graph.copy()
    for node in graph:
        for attribute in attributes:
            if attribute in graph.node[node]:
                del graph.node[node][attribute]
    return graph
コード例 #21
0
def topological_sort_grouping(dag_graph: nx.DiGraph) -> List:
    # copy the graph
    graph_copy = dag_graph.copy()
    res = []
    while graph_copy:
        zero_indegree = [v for v, d in graph_copy.in_degree() if d == 0]
        res.append(zero_indegree)
        graph_copy.remove_nodes_from(zero_indegree)
    return res
コード例 #22
0
def create_undirected_graph(graph: DiGraph) -> Graph:
    copied_directed = graph.copy()

    copied_directed.add_node("0")
    for node in copied_directed.nodes:
        if copied_directed.in_degree(node) == 0 and node != "0":
            copied_directed.add_edge("0", node)

    return Graph(copied_directed)
コード例 #23
0
def new_fas(G: nx.DiGraph, process_id=0):
    ret = G.copy()

    while (True):
        try:
            cycle = nx.find_cycle(ret)
            u, v = cycle[0]
            ret.remove_edge(u, v)
        except:
            break
    return ret
コード例 #24
0
def remove_node_attributes(G: nx.DiGraph, attributes: Union[str, Iterable[str]]):
    """
    Return a copy of the graph with the given attributes
    deleted from all nodes.
    """
    G = G.copy()
    for _, data in G.nodes(data=True):
        for attribute in setwrap(attributes):
            if attribute in data:
                del data[attribute]
    return G
コード例 #25
0
def to_hasse(graph: DiGraph) -> DiGraph:
    _graph = graph.copy()
    for pair_x, _, pair_z in get_transitive(_graph):
        node_x, _ = pair_x
        _, node_z = pair_z
        with suppress(NetworkXError):
            _graph.remove_edge(node_x, node_z)
    for node_x, node_y in get_reflexive(_graph):
        _graph.remove_edge(node_x, node_y)

    return _graph
コード例 #26
0
ファイル: topological_sort.py プロジェクト: ybc1991/Cirq
def random_topological_sort(dag: networkx.DiGraph) -> Iterable[Any]:
    remaining_dag = dag.copy()
    frontier = list(node for node in remaining_dag.nodes()
                    if not remaining_dag.pred[node])
    while frontier:
        random.shuffle(frontier)
        node = frontier.pop()
        succ = remaining_dag.succ[node]
        remaining_dag.remove_node(node)
        frontier.extend(new_node for new_node in succ
                        if not remaining_dag.pred[new_node])
        yield node
    assert not remaining_dag
コード例 #27
0
ファイル: graph_lib.py プロジェクト: skearnes/scripture-graph
def add_use_edges(digraph: nx.DiGraph, threshold: float) -> pd.DataFrame:
    """Adds suggested edges to the graph using USE embedding similarity."""
    model_url = "https://tfhub.dev/google/universal-sentence-encoder-large/5"
    graph = digraph.copy()
    remove_topic_nodes(graph)
    embeddings = get_embeddings(graph, model_url=model_url, batch_size=1000)
    similarity = angular_cosine(embeddings)
    similarity[similarity < threshold] = 0.0
    nonzero = get_nonzero_edges(graph, similarity)
    mask = ~nonzero.exists
    suggested = nonzero[mask].copy()
    _add_suggested_edges(digraph, suggested, kind="use")
    return suggested
コード例 #28
0
    def normalize_graph(self,
                        graph: nx.DiGraph,
                        verbose: bool = False) -> nx.DiGraph:
        graph = graph.copy()

        unvisited_nodes = Queue()
        unvisited_nodes.queue = deque(graph.nodes.keys())

        # init: dependencies parsing & POS
        for node_id in unvisited_nodes.queue:
            node = graph.nodes[node_id]
            prepare_node(self.parser, node)

        # decomposition rules
        def run_rules(node_id, is_use_preserved=True):
            is_decomposed = False
            for rule in self.rules:
                preserved_tokens = None
                if is_use_preserved:
                    # todo: preserved words just for no-operational?
                    preserved_tokens = self._preserved_tokens  #if (not isinstance(rule, op_norm_rules.OperationDecomposeRule)) else None
                decomposed, added_nodes_ids = rule.decompose(
                    node_id, graph, preserved_tokens=preserved_tokens)
                if decomposed:
                    for id in added_nodes_ids:
                        unvisited_nodes.put(id)
                    is_decomposed = True
                    if verbose:
                        copy = graph.copy()
                        self._update_labels_from_doc(graph=copy)
                        _logger.info(
                            f"{rule}{' -reserved' if is_use_preserved else ''} (node: {node_id})\t{Decomposition.from_graph(graph=copy).to_string()}"
                        )
            return is_decomposed

        while not unvisited_nodes.empty():
            node_id = unvisited_nodes.get()
            run_rules(node_id, is_use_preserved=True)
            run_rules(node_id, is_use_preserved=False)

        # update "label" from "doc" if needed
        self._update_labels_from_doc(graph=graph)

        # re-order operation chain
        self.reorder_operations(graph)

        # re-order graph alphabetically
        self.reorder(graph)

        # todo: reorder args: intersection, union, ... args order
        return graph
コード例 #29
0
def brute_force_arborescence(arborescence_stub: Arborescence, graph: nx.DiGraph) -> List[Arborescence]:
    """
    Creates all possible arborescences on basis of the given arborescence and the available set of arcs in the graph.
    :param arborescence_stub: the initial Arborescence stub on which to create all possible arborescences
    :param graph:
    :return: a set of all possible arborescences
    """
    assert len(arborescence_stub.vertices) > 1 and not len(arborescence_stub.vertices) > len(graph)
    # print(f"filling the following arborescence: {arborescence_stub}")

    if len(arborescence_stub.vertices) == len(graph):
        if len(arborescence_stub.arcs) != len(graph) - 1:  # fundamental property of trees
            return []
        else:
            return [arborescence_stub]

    else:
        candidate_arcs = [arc for arc in graph.edges if
                          arc[0] not in arborescence_stub.vertices and arc[1] in arborescence_stub.vertices]
        if len(candidate_arcs) == 0:
            return []
        powerset = list(itertools.chain.from_iterable(
            itertools.combinations(candidate_arcs, r) for r in range(1, len(candidate_arcs) + 1, 1)))

        # Only sets that add a vertex exactly one time are valid candidates
        candidates = []
        for candidate_set in powerset:
            incident_vertices = set()
            valid = True
            for arc in candidate_set:
                if arc[0] in incident_vertices:
                    # print(f"removing the candidate: {candidate_set}")
                    valid = False
                    break
                incident_vertices.add(arc[0])
            if valid:
                candidates.append(candidate_set)

        # print(f"{len(candidates)} possible arc-combinations for the next step.")
        results = []
        for candidate_set in candidates:
            arborescence = copy(arborescence_stub)
            arborescence.add_arcs(candidate_set)
            # reduced_arcs = [arc for arc in arcs if arc not in candidate_set]
            # print(f"current candidate set: {candidate_set}")
            # print(f"one step deeper with arcs: {reduced_arcs}")
            reduced_graph = graph.copy()
            reduced_graph.remove_edges_from(candidate_arcs)
            results.extend(brute_force_arborescence(arborescence, reduced_graph))

        return results
コード例 #30
0
def find_id_groups(G: nx.DiGraph) -> Generator:
    G = G.copy()
    remove_all_import_package_ids(G)
    fun_ids, var_ids = find_fun_ids_and_var_ids(G)
    fun_groups = find_function_usage_groups(G, fun_ids)
    var_groups = find_variable_usage_groups(G, var_ids)
    comps = set(frozenset(s) for s in fun_groups).union(set(frozenset(s) for s in var_groups))
    flat_comps = [e for s in comps for e in s]
    assert(len(flat_comps) == len(set(flat_comps)))
    # validate all comps are valid: have one variable dependent
    for c in comps:
        t = type_of_newly_defined_variable(G, set(c))
        if t:
            yield c, t
コード例 #31
0
ファイル: network.py プロジェクト: xdansun/pysparrow
    def __init__(self, path, version="0"):
        g = DiGraph()
        gaged_reaches = []
        db = openFile(path, "r")
        table = db.getNode("/", "networks/network" + str(version))
        reaches = {}
        # read data out of file
        for row in table:
            if str(row["ComID"]) != "-1":
                reaches[row["ComID"]] = Reach(self, row)
            else:
                reaches[row["ComID"]] = "-1"
                g.add_edge(Reach(self, row), "-1")
            if row["MonitoredFlag"] == "1":
                gaged_reaches.append(row["ComID"])
        db.close()
        # make network
        for comid in reaches.keys():
            to_comID = reaches[comid]._ToComID
            if to_comID != "-1":
                g.add_edge(reaches[comid], reaches[to_comID])
            else:
                g.add_edge(reaches[comid], -1)
        self._g_unbroken = g.copy()
        self._g_unbroken_reverse = self._g_unbroken.reverse()

        # break upstream of monitored reaches
        for i in gaged_reaches:
            if i != "-1":
                up = g.predecessors(reaches[i])
                for j in up:
                    if j != "-1":
                        g.delete_edge(j, reaches[i])
                    else:
                        g.delete_edge(j, "-1")
        self._g = g
        self._g_rev = g.reverse()
        self._version = str(version)
        self._path = str(path)
        self._reaches = reaches
        db.close()
コード例 #32
0
class VariableDictionary(SequenceDict):
    """Ordered Dictionary to hold variable values. It maintains a dependency graph
    to check for cycles and to recalculate the necessary values when one of the fields is updated"""   
    expression = Expression()
    def __init__(self, *args, **kwargs):
        self.valueView = VariableDictionaryView(self)
        self.dependencyGraph = DiGraph()
        self.globaldict = dict()
        super(VariableDictionary, self).__init__(*args, **kwargs)

    def __getstate__(self):
        return dict((key, value) for key, value in self.__dict__ if key not in ['globaldict'])

    def __reduce__(self):
        theclass, theitems, inst_dict = super(VariableDictionary, self).__reduce__()
        inst_dict.pop('globaldict', None)
        return theclass, theitems, inst_dict

    def setGlobaldict(self, globaldict):
        self.globaldict = globaldict 
                
    def calculateDependencies(self):
        self.dependencyGraph = DiGraph()   # clear the old dependency graph in case parameters got removed
        for name, var in self.items():
            if hasattr(var, 'strvalue'):
                try:
                    var.value, dependencies = self.expression.evaluate(var.strvalue, self.valueView, listDependencies=True)
                    self.addDependencies(self.dependencyGraph, dependencies, name)
                    var.strerror = None
                except (KeyError, ValueError, ZeroDivisionError) as e:
                    logging.getLogger(__name__).warning( str(e) )
                    var.strerror = str(e)
                except Exception as e:
                    errstr = "Unable to evaluate the expression '{0}' for variable '{1}'.".format(var.strvalue,var.name)
                    logging.getLogger(__name__).warning( errstr )
                    var.strerror = errstr
            else:
                var.strerror = None
        self.recalculateAll()
        
    def merge(self, variabledict, globaldict=None, overwrite=False, linkNewToParent=False ):
        if globaldict is not None:
            self.globaldict = globaldict
        for name in list(self.keys()):
            if name not in variabledict:
                self.pop(name)
        for name, var in variabledict.items():
            if var.type in ['parameter', 'address'] and (name not in self or overwrite):
                self[name] = copy.deepcopy(var)
                if linkNewToParent:
                    self[name].useParentValue = True
        self.sortToMatch( list(variabledict.keys()) )        
        self.calculateDependencies()
                
    def __setitem__(self, key, value):
        super(VariableDictionary, self).__setitem__(key, value)
        if hasattr(value, 'strvalue'):
            self.setStrValue( key, value.strvalue )

    def __deepcopy__(self, memo):
        new = type(self)()
        new.globaldict = self.globaldict
        new.update( (name, copy.deepcopy(value)) for name, value in list(self.items()))
        new.dependencyGraph = self.dependencyGraph
        #calculateDependencies()
        return new
                
    def addDependencies(self, graph, dependencies, name):
        """add all the dependencies to name"""
        for dependency in dependencies:
            self.addEdgeNoCycle(graph, dependency, name)        
                
    def addEdgeNoCycle(self, graph, first, second ):
        """add the dependency to the graph, raise CyclicDependencyException in case of cyclic dependencies"""
        graph.add_edge(first, second)
        cycles = simple_cycles( graph )
        for cycle in cycles:
            raise CyclicDependencyException(cycle)
                
    def setStrValueIndex(self, index, strvalue):
        return self.setStrValue( self.keyAt(index), strvalue)
        
    def setStrValue(self, name, strvalue):
        """update the variable value with strvalue and recalculate as necessary"""  
        var = self[name]
        try:
            result, dependencies = self.expression.evaluate(strvalue, self.valueView, listDependencies=True )
            graph = self.dependencyGraph.copy()            # make a copy of the graph. In case of cyclic dependencies we do not want o leave any changes
            for edge in list(graph.in_edges([name])):      # remove all the inedges, dependencies from other variables might be gone
                graph.remove_edge(*edge)
            self.addDependencies(graph, dependencies, name) # add all new dependencies
            var.value = result
            var.strvalue = strvalue
            self.dependencyGraph = graph
            var.strerror = None
        except KeyError as e:
            var.strerror = str(e)
        except Exception as e:
            errstr = "Unable to evaluate the expression '{0}' for variable '{1}'.".format(strvalue,name)
            logging.getLogger(__name__).warning( errstr )
            var.strerror = errstr
        return self.recalculateDependent(name)

    def setParentStrValue(self, name, strvalue):
        var = self[name]
        try:
            result, dependencies = self.expression.evaluate(strvalue, self.valueView, listDependencies=True )
            graph = self.dependencyGraph.copy()            # make a copy of the graph. In case of cyclic dependencies we do not want o leave any changes
            for edge in list(graph.in_edges([name])):      # remove all the inedges, dependencies from other variables might be gone
                graph.remove_edge(*edge)
            self.addDependencies(graph, dependencies, name) # add all new dependencies
            var.parentValue = result
            var.parentStrvalue = strvalue
            self.dependencyGraph = graph
            var.strerror = None
        except KeyError as e:
            var.strerror = str(e)
        except Exception as e:
            errstr = 'Unable to evaluate the expression \'{0}\' for variable \'{1}\'.'.format(strvalue,name)
            logging.getLogger(__name__).warning( errstr )
            var.strerror = errstr
        return self.recalculateDependent(name)

    def setValue(self, name, value):
        """update the variable value with value and recalculate as necessary.
        This is done using existing dependencies."""
        var = self[name]
        try:
            var.value = value
            var.strvalue = ""
            var.strerror = None
        except KeyError as e:
            var.strerror = str(e)
        return self.recalculateDependent(name, returnResult=True)
        
    def setParentValue(self, name, value):
        """update the variable value with value and recalculate as necessary.
        This is done using existing dependencies."""
        var = self[name]
        try:
            var.parentValue = value
            var.parentStrvalue = ""
            var.strerror = None
        except KeyError as e:
            var.strerror = str(e)
        return self.recalculateDependent(name, returnResult=True)

    def setEncodingIndex(self, index, encoding):
        self.at(index).encoding = None if encoding == 'None' else str(encoding)
    
    def setEnabledIndex(self, index, enabled):
        self.at(index).enabled = enabled
       
    def recalculateDependent(self, node, returnResult=False):
        if self.dependencyGraph.has_node(node):
            generator = dfs_preorder_nodes(self.dependencyGraph, node)
            next(generator )   # skip the first, that is us
            nodelist = list(generator)  # make a list, we need it twice 
            result = [ self.recalculateNode(node) for node in nodelist ]                
            return (nodelist, result) if returnResult else nodelist     # return which ones were re-calculated, so gui can be updated 
        return (list(), list()) if returnResult else list()

    def recalculateNode(self, node):
        if node in self:
            var = self[node]
            if hasattr(var, 'strvalue'):
                try:
                    var.value = self.expression.evaluate(var.strvalue, self.valueView)
                    var.strerror = None
                except (KeyError, ValueError) as e:
                    var.strerror = str(e)
                except Exception as e:
                    errstr = 'Unable to evaluate the expression \'{0}\'.'.format(var.strvalue)
                    logging.getLogger(__name__).warning( errstr )
                    var.strerror = errstr
            else:
                logging.getLogger(__name__).warning("variable {0} does not have strvalue. Value is {1}".format(var, var.value))
            return var.value
        return None
            
    def recalculateAll(self):
        g = self.dependencyGraph.reverse()
        for node, indegree in g.in_degree_iter():
            if indegree==0:
                for calcnode in dfs_postorder_nodes(g, node):
                    self.recalculateNode(calcnode)
                    
    def bareDictionaryCopy(self):
        return SequenceDict( self )