def inject_remove_node(self, lhs_node_id): """Inject a new node removal to the rule. This method removes from `p` all the nodes that map to the node with the id `lhs_node_id`. In addition, all the nodes from `rhs` that are mapped by the nodes removed in `p` are also removed. Parameters ---------- lhs_node_id Id of the node in `lhs` that should be removed by the rule. """ # remove corresponding nodes from p and rhs p_keys = keys_by_value(self.p_lhs, lhs_node_id) for k in p_keys: if k in self.p.nodes(): primitives.remove_node(self.p, k) if self.p_rhs[k] in self.rhs.nodes(): primitives.remove_node(self.rhs, self.p_rhs[k]) affected_nodes = keys_by_value(self.p_rhs, self.p_rhs[k]) for node in affected_nodes: del self.p_rhs[node] del self.p_lhs[k] return
def remove_node_rhs(self, n): """Remove a node from a rhs.""" p_keys = keys_by_value(self.p_rhs, n) for p_node in p_keys: primitives.remove_node(self.p, p_node) del self.p_rhs[p_node] del self.p_lhs[p_node] primitives.remove_node(self.rhs, n)
def subgraph(graph, nodes): """Get a subgraph induced by a set nodes. :param graph: :param nodes: :return: """ subgraph = copy.deepcopy(graph) for node in graph.nodes(): if node not in nodes: remove_node(subgraph, node) return subgraph
def _remove_node_rhs(self, node_id): """Remove a node from the `rhs`. This method removes a given node from the `rhs`, if there exist nodes from `p` that map to this node they are removed as well. """ p_keys = keys_by_value(self.p_rhs, node_id) for p_node in p_keys: primitives.remove_node(self.p, p_node) del self.p_rhs[p_node] del self.p_lhs[p_node] primitives.remove_node(self.rhs, node_id)
def remove_node(self, n): """Remove a node in the graph.""" # remove corresponding nodes from p and rhs p_keys = keys_by_value(self.p_lhs, n) for k in p_keys: if k in self.p.nodes(): primitives.remove_node(self.p, k) if self.p_rhs[k] in self.rhs.nodes(): primitives.remove_node(self.rhs, self.p_rhs[k]) affected_nodes = keys_by_value(self.p_rhs, self.p_rhs[k]) for node in affected_nodes: del self.p_rhs[node] del self.p_lhs[k] return
def _get_rule_liftings(hierarchy, origin_id, rule, instance, p_typing=None, ignore=None): if ignore is None: ignore = [] if p_typing is None: p_typing = {} liftings = {} if rule.is_restrictive(): for graph in hierarchy.bfs_tree(origin_id, reverse=True): if graph not in ignore: if graph != origin_id: # find the lifting to a graph if hierarchy.is_graph(graph): origin_typing = hierarchy.get_typing(graph, origin_id) # Compute L_G l_g, l_g_g, l_g_l = pullback( hierarchy.get_graph(graph), rule.lhs, hierarchy.get_graph(origin_id), origin_typing, instance) # Compute canonical P_G canonical_p_g, p_g_l_g, p_g_p = pullback( l_g, rule.p, rule.lhs, l_g_l, rule.p_lhs) # Remove controlled things from P_G if graph in p_typing.keys(): l_g_factorization = { keys_by_value(l_g_g, k)[0]: v for k, v in p_typing[graph].items() } p_g_nodes_to_remove = set() for n in canonical_p_g.nodes(): l_g_node = p_g_l_g[n] # If corresponding L_G node is specified in # the controlling relation, remove all # the instances of P nodes not mentioned # in this relations if l_g_node in l_g_factorization.keys(): p_nodes = l_g_factorization[l_g_node] if p_g_p[n] not in p_nodes: del p_g_p[n] del p_g_l_g[n] p_g_nodes_to_remove.add(n) for n in p_g_nodes_to_remove: primitives.remove_node(canonical_p_g, n) liftings[graph] = { "rule": Rule(p=canonical_p_g, lhs=l_g, p_lhs=p_g_l_g), "instance": l_g_g, "l_g_l": l_g_l, "p_g_p": p_g_p } return liftings
def _propagate_rule_up(graph, origin_typing, rule, instance, p_origin, p_typing, inplace=False): if inplace is True: graph_prime = graph else: graph_prime = copy.deepcopy(graph) if p_typing is None: p_typing = {} lhs_removed_nodes = rule.removed_nodes() lhs_removed_node_attrs = rule.removed_node_attrs() p_removed_edges = rule.removed_edges() p_removed_edge_attrs = rule.removed_edge_attrs() lhs_cloned_nodes = rule.cloned_nodes() graph_prime_graph = id_of(graph.nodes()) graph_prime_origin = copy.deepcopy(origin_typing) for lhs_node in rule.lhs.nodes(): origin_node = instance[lhs_node] g_nodes = keys_by_value(origin_typing, origin_node) for node in g_nodes: if lhs_node in lhs_removed_nodes: primitives.remove_node(graph_prime, node) del graph_prime_graph[node] del graph_prime_origin[node] else: graph_prime_origin[node] = origin_node for lhs_node, p_nodes in lhs_cloned_nodes.items(): nodes_to_clone = keys_by_value(origin_typing, instance[lhs_node]) for node in nodes_to_clone: if node in p_typing.keys(): p_nodes = p_typing[node] for i, p_node in enumerate(p_nodes): if i == 0: graph_prime_origin[node] = p_origin[p_node] graph_prime_graph[node] = node else: new_name = primitives.clone_node(graph_prime, node) graph_prime_origin[new_name] = p_origin[p_node] graph_prime_graph[new_name] = node if len(p_nodes) == 0: primitives.remove_node(graph_prime, node) for lhs_node, attrs in lhs_removed_node_attrs.items(): nodes_to_remove_attrs = keys_by_value(origin_typing, instance[lhs_node]) for node in nodes_to_remove_attrs: primitives.remove_node_attrs(graph_prime, node, attrs) for p_u, p_v in p_removed_edges: us = keys_by_value(graph_prime_origin, p_origin[p_u]) vs = keys_by_value(graph_prime_origin, p_origin[p_v]) for u in us: for v in vs: if (u, v) in graph_prime.edges(): primitives.remove_edge(graph_prime, u, v) for (p_u, p_v), attrs in p_removed_edge_attrs.items(): us = keys_by_value(origin_typing, p_origin[p_u]) vs = keys_by_value(origin_typing, p_origin[p_v]) for u in us: for v in vs: primitives.removed_edge_attrs(graph_prime, u, v, attrs) return (graph_prime, graph_prime_graph, graph_prime_origin)
def _propagate_rule_to(graph, origin_typing, rule, instance, p_origin, inplace=False): if inplace is True: graph_prime = graph else: graph_prime = copy.deepcopy(graph) lhs_removed_nodes = rule.removed_nodes() lhs_removed_node_attrs = rule.removed_node_attrs() p_removed_edges = rule.removed_edges() p_removed_edge_attrs = rule.removed_edge_attrs() lhs_cloned_nodes = rule.cloned_nodes() graph_prime_graph = id_of(graph.nodes()) graph_prime_origin = dict() for lhs_node in rule.lhs.nodes(): origin_node = instance[lhs_node] g_nodes = keys_by_value( origin_typing, origin_node) for node in g_nodes: if lhs_node in lhs_removed_nodes: primitives.remove_node( graph_prime, node) del graph_prime_graph[node] else: graph_prime_origin[node] = origin_node for lhs_node, p_nodes in lhs_cloned_nodes.items(): nodes_to_clone = keys_by_value(origin_typing, instance[lhs_node]) for node in nodes_to_clone: for i, p_node in enumerate(p_nodes): if i == 0: graph_prime_origin[node] = p_origin[p_node] graph_prime_graph[node] = node else: new_name = primitives.clone_node( graph_prime, node) graph_prime_origin[new_name] = p_origin[p_node] graph_prime_graph[new_name] = node for lhs_node, attrs in lhs_removed_node_attrs.items(): nodes_to_remove_attrs = keys_by_value( origin_typing, instance[lhs_node]) for node in nodes_to_remove_attrs: primitives.remove_node_attrs( graph_prime, node, attrs) for p_u, p_v in p_removed_edges: us = keys_by_value(graph_prime_origin, p_origin[p_u]) vs = keys_by_value(graph_prime_origin, p_origin[p_v]) for u in us: for v in vs: if (u, v) in graph_prime.edges(): primitives.remove_edge( graph_prime, u, v) for (p_u, p_v), attrs in p_removed_edge_attrs.items(): us = keys_by_value(origin_typing, p_origin[p_u]) vs = keys_by_value(origin_typing, p_origin[p_v]) for u in us: for v in vs: primitives.removed_edge_attrs( graph_prime, u, v, attrs) return (graph_prime, graph_prime_graph, graph_prime_origin)
def pullback_complement(a, b, d, a_b, b_d, inplace=False): """Find the final pullback complement from a->b->d. Makes changes to d inplace. """ check_homomorphism(a, b, a_b, total=True) check_homomorphism(b, d, b_d, total=True) if not is_monic(b_d): raise InvalidHomomorphism( "Second homomorphism is not monic, " "cannot find final pullback complement!" ) if inplace is True: c = d else: c = copy.deepcopy(d) a_c = dict() c_d = id_of(c.nodes()) # Remove/clone nodes for b_node in b.nodes(): a_keys = keys_by_value(a_b, b_node) # Remove nodes if len(a_keys) == 0: remove_node(c, b_d[b_node]) del c_d[b_d[b_node]] # Keep nodes elif len(a_keys) == 1: a_c[a_keys[0]] = b_d[b_node] # Clone nodes else: i = 1 for k in a_keys: if i == 1: a_c[k] = b_d[b_node] c_d[b_d[b_node]] = b_d[b_node] else: new_name = clone_node(c, b_d[b_node]) a_c[k] = new_name c_d[new_name] = b_d[b_node] i += 1 # Remove edges for (b_n1, b_n2) in b.edges(): a_keys_1 = keys_by_value(a_b, b_n1) a_keys_2 = keys_by_value(a_b, b_n2) if len(a_keys_1) > 0 and len(a_keys_2) > 0: for k1 in a_keys_1: for k2 in a_keys_2: if d.is_directed(): if (k1, k2) not in a.edges() and\ (a_c[k1], a_c[k2]) in c.edges(): remove_edge(c, a_c[k1], a_c[k2]) else: if (k1, k2) not in a.edges() and\ (k2, k1) not in a.edges(): if (a_c[k1], a_c[k2]) in d.edges() or\ (a_c[k2], a_c[k1]) in d.edges(): remove_edge(c, a_c[k1], a_c[k2]) # Remove node attrs for a_node in a.nodes(): attrs_to_remove = dict_sub( b.node[a_b[a_node]], a.node[a_node] ) remove_node_attrs(c, a_c[a_node], attrs_to_remove) # removed_node_attrs[a_c[a_node]] = attrs_to_remove # Remove edge attrs for (n1, n2) in a.edges(): attrs_to_remove = dict_sub( get_edge(b, a_b[n1], a_b[n2]), get_edge(a, n1, n2) ) remove_edge_attrs(c, a_c[n1], a_c[n2], attrs_to_remove) # removed_edge_attrs[(a_c[n1], a_c[n2])] = attrs_to_remove return (c, a_c, c_d)
def unfold_nugget(hie, nug_id, ag_id, mm_id, test=False): """unfold a nugget with conflicts to create multiple nuggets""" nug_gr = copy.deepcopy(hie.node[nug_id].graph) mm_typing = copy.deepcopy(hie.get_typing(nug_id, mm_id)) ag_typing = copy.deepcopy(hie.get_typing(nug_id, ag_id)) # create one new locus for each linked agent, region or residue linked to # a locus new_ports = {} # new_port remember the loci/state it is created from old_ports = [] non_comp_neighbors = {} for node in nug_gr.nodes(): # move the state test to explicit "is_equal" nodes if mm_typing[node] == "state" and "val" in nug_gr.node[node]: for val in nug_gr.node[node]["val"]: id_prefix = "{}_{}".format(val, node) test_id = unique_node_id(nug_gr, id_prefix) add_node(nug_gr, test_id, {"val": val}) mm_typing[test_id] = "is_equal" add_edge(nug_gr, test_id, node) # for testing if test: ag = hie.node[ag_id].graph ag_test_id = unique_node_id(ag, id_prefix) add_node(ag, ag_test_id, {"val": val}) add_edge(ag, ag_test_id, ag_typing[node]) hie.edge[ag_id][mm_id].mapping[ag_test_id] = "is_equal" real_nugget = hie.node[nug_id].graph old_test_id = unique_node_id(real_nugget, id_prefix) add_node(real_nugget, old_test_id, {"val": val}) add_edge(real_nugget, old_test_id, node) hie.edge[nug_id][ag_id].mapping[old_test_id] = ag_test_id if mm_typing[node] in ["locus", "state"]: comp_neighbors = [ comp for comp in nug_gr.successors(node) if mm_typing[comp] in ["agent", "region", "residue"] ] other_neighbors = [ other for other in (nug_gr.successors(node) + nug_gr.predecessors(node)) if other not in comp_neighbors ] old_ports.append(node) for comp in comp_neighbors: id_prefix = "{}_{}".format(node, comp) port_id = unique_node_id(nug_gr, id_prefix) add_node(nug_gr, port_id) mm_typing[port_id] = mm_typing[node] ag_typing[port_id] = ag_typing[node] new_ports[port_id] = node add_edge(nug_gr, port_id, comp) for other in other_neighbors: if mm_typing[other] in ["mod", "is_equal"]: add_edge(nug_gr, other, port_id) else: add_edge(nug_gr, port_id, other) non_comp_neighbors[port_id] = set(other_neighbors) # remove the old potentially shared between agents/region/residues loci for port in old_ports: remove_node(nug_gr, port) del mm_typing[port] del ag_typing[port] # associate the components nodes (agent,region, residue) to the ports components = {} for port in new_ports: components[port] = _agents_of_components(nug_gr, mm_typing, port) def _nonconflicting(port1, action_node1, port2, action_node2): typ1 = mm_typing[action_node1] typ2 = mm_typing[action_node2] if port1 == port2: if typ1 == typ2: return False if mm_typing[port1] == "state": return True if {typ1, typ2} & {"is_free", "is_bnd"}: return False different_loci = set(nug_gr.predecessors(action_node1)) !=\ set(nug_gr.predecessors(action_node2)) return different_loci elif action_node1 != action_node2: return True elif typ1 in ["mod", "is_equal", "is_free"]: return False else: return new_ports[port1] != new_ports[port2] def replace(node): """identify is_equal and mod nodes with same values""" if mm_typing[node] == "is_equal": return ("is_equal", str(nug_gr.node[node]["val"])) if mm_typing[node] == "mod": return ("mod", str(nug_gr.node[node]["val"])) return node def reduce_subsets(set_list): return set_list def subset_up_to_equivalence(set1, set2): set1 = {frozenset(map(replace, s)) for s in set1} set2 = {frozenset(map(replace, s)) for s in set2} return set1.issubset(set2) def replace2(node): """identify is_equal and mod nodes with same values""" if mm_typing[node] == "is_equal": return ("is_equal", str(nug_gr.node[node]["val"]), frozenset(nug_gr.successors(node))) if mm_typing[node] == "mod": return ("mod", str(nug_gr.node[node]["val"]), frozenset(nug_gr.successors(node))) return node def _equivalent_actions(act1, act2, edge_list): l1 = [(port, replace(node)) for (port, node) in edge_list if node == act1] l2 = [(port, replace(node)) for (port, node) in edge_list if node == act2] return l1 == l2 def _equivalent_edge(p1, a1, p2, a2): return p1 == p2 and replace2(a1) == replace2(a2) def _valid_subsets(memo_dict, set_list): """build non conflicting sets of sets of nodes""" if set_list == []: return [[]] memo_key = frozenset(set_list) if memo_key in memo_dict: return memo_dict[memo_key] (port, a_node) = set_list[0] conflicting_edges = [ (port2, a_node2) for (port2, a_node2) in set_list[1:] if not _nonconflicting(port, a_node, port2, a_node2) ] nonconflicting_sets =\ [(port2, a_node2) for (port2, a_node2) in set_list[1:] if _nonconflicting(port, a_node, port2, a_node2)] equivalent_edges = [ (p2, n2) for (p2, n2) in set_list if p2 == port and _equivalent_actions(a_node, n2, set_list) ] new_set_list = [ (p2, n2) for (p2, n2) in set_list[1:] if p2 != port or not _equivalent_actions(a_node, n2, set_list) ] cond1 = (len([node for (_, node) in set_list[1:] if node == a_node]) == 0 and all( replace(n2) == replace(a_node) for (p2, n2) in set_list[1:] if p2 == port)) if nonconflicting_sets == new_set_list or cond1: memo_dict[memo_key] =\ [sub + [(port, a_node)] for sub in _valid_subsets(memo_dict, nonconflicting_sets)] return memo_dict[memo_key] else: without_current_edge = _valid_subsets(memo_dict, new_set_list) def conflict_with_removed_edges(edge_list): return all( any(not _nonconflicting(p1, a_node1, p2, a_node2) for (p2, a_node2) in edge_list) for (p1, a_node1) in equivalent_edges) # with_conflict = list(filter(conflict_with_current_edge, without_current_edge)) with_conflict = list( filter(conflict_with_removed_edges, without_current_edge)) memo_dict[memo_key] =\ with_conflict +\ [sub + [(port, a_node)] for sub in _valid_subsets(memo_dict, nonconflicting_sets)] return memo_dict[memo_key] def _complete_subsets(set_list): print(set_list) return [components[port] | {a_node} for (port, a_node) in set_list] def _remove_uncomplete_actions(set_list): """remove actions and test which are not connected to enough components""" labels = {node: 0 for node in nug_gr.nodes()} for nodes in set_list: for node in nodes: labels[node] += 1 to_remove = set() for node in nug_gr.nodes(): if (mm_typing[node] in ["bnd", "brk", "is_bnd"] and labels[node] < 2): to_remove.add(node) if (mm_typing[node] in ["is_free", "mod", "is_equal"] and labels[node] < 1): to_remove.add(node) return [nodes for nodes in set_list if not nodes & to_remove] port_action_list = [(port, a_node) for (port, a_nodes) in non_comp_neighbors.items() for a_node in a_nodes] # build globally non conflicting subsets and remove the uncomplete actions memo_dict = {} valid_ncss = { frozenset( map(frozenset, _remove_uncomplete_actions(_complete_subsets(set_list)))) for set_list in _valid_subsets(memo_dict, port_action_list) } maximal_valid_ncss = valid_ncss # add the nodes that where not considered at all # because they are not connected to a locus or state nodes_with_ports = set.union( set.union(*(list(non_comp_neighbors.values()) + [set()])), set.union(*(list(components.values()) + [set()]))) nodes_without_ports = set(nug_gr.nodes()) - nodes_with_ports # build the nuggets and add them to the hierarchy # as children of the old one for testing def _graph_of_ncs(ncs): sub_graphs = [(subgraph(nug_gr, nodes), {node: node for node in nodes}) for nodes in ncs] sub_graphs.append((subgraph(nug_gr, nodes_without_ports), {node: node for node in nodes_without_ports})) return multi_pullback_pushout(nug_gr, sub_graphs) valid_graphs = map(_graph_of_ncs, maximal_valid_ncss) new_nuggets = [] for (new_nugget, new_typing) in valid_graphs: if test: typing_by_old_nugget = {} for node in new_nugget.nodes(): if new_typing[node] in hie.node[nug_id].graph.nodes(): typing_by_old_nugget[node] = new_typing[node] else: typing_by_old_nugget[node] = new_ports[new_typing[node]] new_nuggets.append((new_nugget, typing_by_old_nugget)) else: new_ag_typing = compose_homomorphisms(ag_typing, new_typing) new_mm_typing = compose_homomorphisms(mm_typing, new_typing) new_nuggets.append((new_nugget, new_ag_typing, new_mm_typing)) return new_nuggets
def compose_splices(hie, ag_id, mm_id, splices_list, new_rule_name): """build a rewritting rule of the action graph, from a list of chosen splice variants, each one represented as a subgraph of the action graph """ known_agents = [] lhs = nx.DiGraph() ppp = nx.DiGraph() p_lhs = {} lhs_ag = {} action_graph = hie.node[ag_id].graph for spl in splices_list: mm_typing = hie.get_typing(spl, mm_id) ag_typing = hie.get_typing(spl, ag_id) splg = hie.node[spl].graph agents = [ ag_typing[node] for node in splg if mm_typing[node] == "agent" ] if len(agents) != 1: raise ValueError("there must be exactly one agent in a splice") components = _components_of_agent(action_graph, hie.edge[ag_id][mm_id].mapping, agents[0]) new_agent = action_graph.subgraph(components) newagent_ag = {n: n for n in new_agent.nodes()} # If no locus at all is present, we add them all to the variant if all(mm_typing[node] != "locus" for node in splg): ag_mm = hie.edge[ag_id][mm_id].mapping new_splg = copy.deepcopy(new_agent) for node in new_agent: if (ag_mm[node] != "locus" and node not in [ag_typing[n] for n in splg]): remove_node(new_splg, node) splg = new_splg ag_typing = {node: node for node in new_splg} mm_typing = compose_homomorphisms(ag_mm, ag_typing) if agents[0] not in known_agents: known_agents.append(agents[0]) (new_lhs, lhs_newlhs, newagent_newlhs, newlhs_ag) =\ pullback_pushout(lhs, new_agent, action_graph, lhs_ag, newagent_ag) ppp_newlhs = compose_homomorphisms(lhs_newlhs, p_lhs) else: new_lhs = lhs newlhs_ag = lhs_ag ppp_newlhs = p_lhs splg_newlhs = {} for node in splg: imgs = keys_by_value(newlhs_ag, ag_typing[node]) if len(imgs) != 1: raise ValueError("node {} should have exactly one" " image in new_agent ({})".format(node, imgs)) splg_newlhs[node] = imgs[0] (tmp, tmp_ppp, tmp_splg) = pullback(ppp, splg, new_lhs, ppp_newlhs, splg_newlhs) loci_nodes = [ node for node in tmp.nodes() if (compose_homomorphisms(mm_typing, tmp_splg)[node] == "locus") ] loci_graph = tmp.subgraph(loci_nodes) loci_graph_id = {node: node for node in loci_graph.nodes()} locigraph_splg = compose_homomorphisms(tmp_splg, loci_graph_id) locigraph_ppp = compose_homomorphisms(tmp_ppp, loci_graph_id) (new_ppp, ppp_newppp, splg_newppp) = pushout(loci_graph, ppp, splg, locigraph_ppp, locigraph_splg) newppp_newlhs = {} # maybe test but conflict should not happen for node in ppp.nodes(): newppp_newlhs[ppp_newppp[node]] = ppp_newlhs[node] for node in splg.nodes(): newppp_newlhs[splg_newppp[node]] = splg_newlhs[node] ppp = new_ppp lhs = new_lhs p_lhs = newppp_newlhs lhs_ag = newlhs_ag lhs_mm_typing = compose_homomorphisms(hie.edge[ag_id][mm_id].mapping, lhs_ag) (lhs_loci, lhsloci_lhs) = subgraph_by_types(lhs, ["locus"], lhs_mm_typing) (final_ppp, _, _, finalppp_lhs) = pullback_pushout(lhs_loci, ppp, lhs, lhsloci_lhs, p_lhs) rule = Rule(final_ppp, lhs, final_ppp, finalppp_lhs) rule_id = hie.unique_graph_id(new_rule_name) rule_name = tree.get_valid_name(hie, ag_id, new_rule_name) hie.add_rule(rule_id, rule, {"name": rule_name}) hie.add_rule_typing(rule_id, ag_id, lhs_ag, compose_homomorphisms(lhs_ag, finalppp_lhs))
def pullback_complement(a, b, d, a_b, b_d, inplace=False): """Find the final pullback complement from a->b->d. Makes changes to d inplace. """ check_homomorphism(a, b, a_b, total=True) check_homomorphism(b, d, b_d, total=True) if not is_monic(b_d): raise InvalidHomomorphism("Second homomorphism is not monic, " "cannot find final pullback complement!") if inplace is True: c = d else: c = copy.deepcopy(d) a_c = dict() c_d = id_of(c.nodes()) # Remove/clone nodes for b_node in b.nodes(): a_keys = keys_by_value(a_b, b_node) # Remove nodes if len(a_keys) == 0: remove_node(c, b_d[b_node]) del c_d[b_d[b_node]] # Keep nodes elif len(a_keys) == 1: a_c[a_keys[0]] = b_d[b_node] # Clone nodes else: i = 1 for k in a_keys: if i == 1: a_c[k] = b_d[b_node] c_d[b_d[b_node]] = b_d[b_node] else: new_name = clone_node(c, b_d[b_node]) a_c[k] = new_name c_d[new_name] = b_d[b_node] i += 1 # Remove edges for (b_n1, b_n2) in b.edges(): a_keys_1 = keys_by_value(a_b, b_n1) a_keys_2 = keys_by_value(a_b, b_n2) if len(a_keys_1) > 0 and len(a_keys_2) > 0: for k1 in a_keys_1: for k2 in a_keys_2: if d.is_directed(): if (k1, k2) not in a.edges() and\ (a_c[k1], a_c[k2]) in c.edges(): remove_edge(c, a_c[k1], a_c[k2]) else: if (k1, k2) not in a.edges() and\ (k2, k1) not in a.edges(): if (a_c[k1], a_c[k2]) in d.edges() or\ (a_c[k2], a_c[k1]) in d.edges(): remove_edge(c, a_c[k1], a_c[k2]) # Remove node attrs for a_node in a.nodes(): attrs_to_remove = dict_sub(b.node[a_b[a_node]], a.node[a_node]) remove_node_attrs(c, a_c[a_node], attrs_to_remove) # removed_node_attrs[a_c[a_node]] = attrs_to_remove # Remove edge attrs for (n1, n2) in a.edges(): attrs_to_remove = dict_sub(get_edge(b, a_b[n1], a_b[n2]), get_edge(a, n1, n2)) remove_edge_attrs(c, a_c[n1], a_c[n2], attrs_to_remove) # removed_edge_attrs[(a_c[n1], a_c[n2])] = attrs_to_remove return (c, a_c, c_d)