def merge_graphs(hie, g_id, name1, name2, mapping, new_name): """ merge two graph based on an identity relation between their nodes. We first build a span from the given relation. The new graph is then computed as the pushout. """ new_name = get_valid_name(hie, g_id, new_name) id1 = child_from_name(hie, g_id, name1) id2 = child_from_name(hie, g_id, name2) g1 = hie.node[id1].graph g2 = hie.node[id2].graph # build the span from the relation if hie.directed: g0 = nx.DiGraph() else: g0 = nx.Graph() left_mapping = {} right_mapping = {} for (n1, n2) in mapping: new_node = unique_node_id(g0, n2) prim.add_node(g0, new_node) left_mapping[new_node] = n1 right_mapping[new_node] = n2 # compute the pushout (new_graph, g1_new_graph, g2_new_graph) = \ pushout(g0, g1, g2, left_mapping, right_mapping) new_id = hie.unique_graph_id(new_name) new_attrs = _new_merged_graph_attrs(hie, id1, id2, new_name) hie.add_graph(new_id, new_graph, new_attrs) # recover the typings of the new pushout graph g1_typings = {t: hie.edge[id1][t] for t in hie.successors(id1)} g2_typings = {t: hie.edge[id2][t] for t in hie.successors(id2)} new_typings = typings_of_pushout(g1, g2, new_graph, g1_new_graph, g2_new_graph, g1_typings, g2_typings) for (typ_id, (typ_mapping, typ_total)) in new_typings.items(): hie.add_typing(new_id, typ_id, typ_mapping, total=typ_total) # recover the typings of children by the new pushout graph new_id1 = _copy_graph(hie, new_id) for child in all_children(hie, id1): hie.add_edge(child, new_id1) tmp_typ = Typing(g1_new_graph, total=hie.edge[child][id1].all_total()) hie.edge[child][new_id1] = tmp_typ * hie.edge[child][id1] new_id2 = _copy_graph(hie, new_id) for child in all_children(hie, id2): hie.add_edge(child, new_id2) tmp_typ = Typing(g2_new_graph, total=hie.edge[child][id2].all_total()) hie.edge[child][new_id2] = tmp_typ * hie.edge[child][id2] _merge_hierarchy(hie, hie, new_id, new_id1) _merge_hierarchy(hie, hie, new_id, new_id2) hie.remove_node(new_id1) hie.remove_node(new_id2)
def concat(p1, p2): p2_in_typing = { typ_gr: compose_homomorphisms(typ_map, p2.in_morph) for (typ_gr, typ_map) in p2.typings.items() } p1_out_typing = { typ_gr: compose_homomorphisms(typ_map, p1.out_morph) for (typ_gr, typ_map) in p1.typings.items() } matchings = find_match(p2.in_pat, p1.out_pat, p2_in_typing, p1_out_typing, p1.typing_graphs, decr_types=True) new_patterns = [] for i, matching in enumerate(matchings): out1_p2 = compose_homomorphisms(p2.in_morph, matching) (p12, p1_p12, p2_p12) = pushout(p1.out_pat, p1.graph, p2.graph, p1.out_morph, out1_p2) in1_p12 = compose_homomorphisms(p1_p12, p1.in_morph) in2_p12 = compose_homomorphisms(p2_p12, p2.in_morph) out2_p12 = compose_homomorphisms(p2_p12, p2.out_morph) new_typings = {} for (typ_id, typ_map) in p1.typings.items(): if typ_id not in new_typings.keys(): new_typings[typ_id] = {} for node in p1.graph.nodes(): if node in typ_map.keys(): new_typings[typ_id][p1_p12[node]] = typ_map[node] for (typ_id, typ_map) in p2.typings.items(): if typ_id not in new_typings.keys(): new_typings[typ_id] = {} for node in p2.graph.nodes(): if node in typ_map.keys(): new_typings[typ_id][p2_p12[node]] = typ_map[node] (new_in, _, _, newin_p12) = pullback_pushout(p1.in_pat, p2.in_pat, p12, in1_p12, in2_p12) new_pattern = Pattern("%s_%s_%s" % (p1.name, p2.name, i), p12, new_in, newin_p12, p2.out_morph, out2_p12, new_typings, p1.typing_graphs.update(p2.typing_graphs)) new_patterns.append(new_pattern) return new_patterns
def new_action_graph(hie, nug_typings): """replace partial ag by total ag (adding non typed nodes to it)""" """(used for import from indra)""" nugs = nug_typings.keys() ag = hie.node["action_graph"].graph ag_kami = {} for nug in nugs: nug_kami = hie.edge[nug]["kami"].mapping nug_ag = nug_typings[nug] for nug_node, ag_node in nug_ag.items(): ag_kami[ag_node] = nug_kami[nug_node] for nug in nugs: graph = hie.node[nug].graph nug_kami = hie.edge[nug]["kami"].mapping nug_ag = nug_typings[nug] bot = nx.DiGraph() bot.add_nodes_from(nug_ag.keys()) bot_nug = {node: node for node in bot.nodes()} bot_ag = {node: nug_ag[node] for node in bot.nodes()} (new_ag, ag_newag, nug_newag) = pushout(bot, ag, graph, bot_ag, bot_nug) for other_nug in nug_typings: if other_nug != nug: nug_typings[other_nug] =\ compose_homomorphisms(ag_newag, nug_typings[other_nug]) nug_typings[nug] = nug_newag newag_kami = {} for node in ag: newag_kami[ag_newag[node]] = ag_kami[node] for node in graph: newag_kami[nug_newag[node]] = nug_kami[node] hie.remove_edge(nug, "kami") ag = new_ag ag_kami = newag_kami hie.remove_graph("action_graph") hie.add_graph("action_graph", ag, {"name": "action_graph"}) hie.add_typing("action_graph", "kami", ag_kami, total=True) for nug in nug_typings: hie.add_typing(nug, "action_graph", nug_typings[nug], total=True)
def apply_to(self, graph, instance, inplace=False): """Perform graph rewriting with the rule. Parameters ---------- graph : nx.(Di)Graph Graph to rewrite with the rule. instance : dict Instance of the `lhs` pattern in the graph defined by a dictionary where keys are nodes of `lhs` and values are nodes of the graph. inplace : bool, optional If `True`, the rewriting will be performed in-place by applying primitve transformations to the graph object, otherwise the result of the rewriting is a new graph object. Default value is `False`. Returns ------- g_prime : nx.(Di)Graph Result of the rewriting. If parameter `inplace` was `True`, `g_prime` is exactly the (transformed) input graph object `graph`. rhs_g_prime : dict Matching of the `rhs` in `g_prime`, a dictionary, where keys are nodes of `rhs` and values are nodes of `g_prime`. """ g_m, p_g_m, g_m_g = pullback_complement( self.p, self.lhs, graph, self.p_lhs, instance, inplace ) g_prime, g_m_g_prime, rhs_g_prime = pushout( self.p, g_m, self.rhs, p_g_m, self.p_rhs, inplace) return (g_prime, rhs_g_prime)
def _rewrite_base(hierarchy, graph_id, rule, instance, lhs_typing, rhs_typing, inplace=False): g_m, p_g_m, g_m_g =\ pullback_complement(rule.p, rule.lhs, hierarchy.node[graph_id].graph, rule.p_lhs, instance, inplace) g_prime, g_m_g_prime, r_g_prime = pushout(rule.p, g_m, rule.rhs, p_g_m, rule.p_rhs, inplace) relation_updates = [] for related_g in hierarchy.adjacent_relations(graph_id): relation_updates.append((graph_id, related_g)) updated_homomorphisms = dict() for typing_graph in hierarchy.successors(graph_id): new_hom = copy.deepcopy(hierarchy.edge[graph_id][typing_graph].mapping) removed_nodes = set() new_nodes = dict() for node in rule.lhs.nodes(): p_keys = keys_by_value(rule.p_lhs, node) # nodes that were removed if len(p_keys) == 0: removed_nodes.add(instance[node]) elif len(p_keys) == 1: if typing_graph not in rhs_typing.keys() or\ rule.p_rhs[p_keys[0]] not in rhs_typing[typing_graph].keys(): if r_g_prime[rule.p_rhs[p_keys[0]]] in new_hom.keys(): removed_nodes.add(r_g_prime[rule.p_rhs[p_keys[0]]]) # nodes were clonned elif len(p_keys) > 1: for k in p_keys: if typing_graph in rhs_typing.keys() and\ rule.p_rhs[k] in rhs_typing[typing_graph].keys(): new_nodes[r_g_prime[rule.p_rhs[k]]] =\ list(rhs_typing[typing_graph][rule.p_rhs[k]])[0] else: removed_nodes.add(r_g_prime[rule.p_rhs[k]]) for node in rule.rhs.nodes(): p_keys = keys_by_value(rule.p_rhs, node) # nodes that were added if len(p_keys) == 0: if typing_graph in rhs_typing.keys(): if node in rhs_typing[typing_graph].keys(): new_nodes[node] = list(rhs_typing[ typing_graph][node])[0] # nodes that were merged elif len(p_keys) > 1: for k in p_keys: removed_nodes.add(p_g_m[k]) # assign new type of node if typing_graph in rhs_typing.keys(): if node in rhs_typing[typing_graph].keys(): new_type = list(rhs_typing[typing_graph][node]) new_nodes[r_g_prime[node]] = new_type # update homomorphisms for n in removed_nodes: if n in new_hom.keys(): del new_hom[n] new_hom.update(new_nodes) updated_homomorphisms.update({ (graph_id, typing_graph): new_hom }) return { "graph": (g_m, p_g_m, g_m_g, g_prime, g_m_g_prime, r_g_prime), "homomorphisms": updated_homomorphisms, "relations": relation_updates }
def compose_splices(hie, ag_id, mm_id, splices_list, new_rule_name): """build a rewritting rule of the action graph, from a list of chosen splice variants, each one represented as a subgraph of the action graph """ known_agents = [] lhs = nx.DiGraph() ppp = nx.DiGraph() p_lhs = {} lhs_ag = {} action_graph = hie.node[ag_id].graph for spl in splices_list: mm_typing = hie.get_typing(spl, mm_id) ag_typing = hie.get_typing(spl, ag_id) splg = hie.node[spl].graph agents = [ ag_typing[node] for node in splg if mm_typing[node] == "agent" ] if len(agents) != 1: raise ValueError("there must be exactly one agent in a splice") components = _components_of_agent(action_graph, hie.edge[ag_id][mm_id].mapping, agents[0]) new_agent = action_graph.subgraph(components) newagent_ag = {n: n for n in new_agent.nodes()} # If no locus at all is present, we add them all to the variant if all(mm_typing[node] != "locus" for node in splg): ag_mm = hie.edge[ag_id][mm_id].mapping new_splg = copy.deepcopy(new_agent) for node in new_agent: if (ag_mm[node] != "locus" and node not in [ag_typing[n] for n in splg]): remove_node(new_splg, node) splg = new_splg ag_typing = {node: node for node in new_splg} mm_typing = compose_homomorphisms(ag_mm, ag_typing) if agents[0] not in known_agents: known_agents.append(agents[0]) (new_lhs, lhs_newlhs, newagent_newlhs, newlhs_ag) =\ pullback_pushout(lhs, new_agent, action_graph, lhs_ag, newagent_ag) ppp_newlhs = compose_homomorphisms(lhs_newlhs, p_lhs) else: new_lhs = lhs newlhs_ag = lhs_ag ppp_newlhs = p_lhs splg_newlhs = {} for node in splg: imgs = keys_by_value(newlhs_ag, ag_typing[node]) if len(imgs) != 1: raise ValueError("node {} should have exactly one" " image in new_agent ({})".format(node, imgs)) splg_newlhs[node] = imgs[0] (tmp, tmp_ppp, tmp_splg) = pullback(ppp, splg, new_lhs, ppp_newlhs, splg_newlhs) loci_nodes = [ node for node in tmp.nodes() if (compose_homomorphisms(mm_typing, tmp_splg)[node] == "locus") ] loci_graph = tmp.subgraph(loci_nodes) loci_graph_id = {node: node for node in loci_graph.nodes()} locigraph_splg = compose_homomorphisms(tmp_splg, loci_graph_id) locigraph_ppp = compose_homomorphisms(tmp_ppp, loci_graph_id) (new_ppp, ppp_newppp, splg_newppp) = pushout(loci_graph, ppp, splg, locigraph_ppp, locigraph_splg) newppp_newlhs = {} # maybe test but conflict should not happen for node in ppp.nodes(): newppp_newlhs[ppp_newppp[node]] = ppp_newlhs[node] for node in splg.nodes(): newppp_newlhs[splg_newppp[node]] = splg_newlhs[node] ppp = new_ppp lhs = new_lhs p_lhs = newppp_newlhs lhs_ag = newlhs_ag lhs_mm_typing = compose_homomorphisms(hie.edge[ag_id][mm_id].mapping, lhs_ag) (lhs_loci, lhsloci_lhs) = subgraph_by_types(lhs, ["locus"], lhs_mm_typing) (final_ppp, _, _, finalppp_lhs) = pullback_pushout(lhs_loci, ppp, lhs, lhsloci_lhs, p_lhs) rule = Rule(final_ppp, lhs, final_ppp, finalppp_lhs) rule_id = hie.unique_graph_id(new_rule_name) rule_name = tree.get_valid_name(hie, ag_id, new_rule_name) hie.add_rule(rule_id, rule, {"name": rule_name}) hie.add_rule_typing(rule_id, ag_id, lhs_ag, compose_homomorphisms(lhs_ag, finalppp_lhs))
def _rewrite_base(hierarchy, graph_id, rule, instance, lhs_typing, rhs_typing, inplace=False): g_m, p_g_m, g_m_g =\ pullback_complement(rule.p, rule.lhs, hierarchy.node[graph_id].graph, rule.p_lhs, instance, inplace) g_prime, g_m_g_prime, r_g_prime = pushout(rule.p, g_m, rule.rhs, p_g_m, rule.p_rhs, inplace) relation_updates = [] for related_g in hierarchy.adjacent_relations(graph_id): relation_updates.append((graph_id, related_g)) updated_homomorphisms = dict() for typing_graph in hierarchy.successors(graph_id): new_hom = copy.deepcopy(hierarchy.edge[graph_id][typing_graph].mapping) removed_nodes = set() new_nodes = dict() for node in rule.lhs.nodes(): p_keys = keys_by_value(rule.p_lhs, node) # nodes that were removed if len(p_keys) == 0: removed_nodes.add(instance[node]) elif len(p_keys) == 1: if typing_graph not in rhs_typing.keys() or\ rule.p_rhs[p_keys[0]] not in rhs_typing[typing_graph].keys(): if r_g_prime[rule.p_rhs[p_keys[0]]] in new_hom.keys(): removed_nodes.add(r_g_prime[rule.p_rhs[p_keys[0]]]) # nodes were clonned elif len(p_keys) > 1: for k in p_keys: if typing_graph in rhs_typing.keys() and\ rule.p_rhs[k] in rhs_typing[typing_graph].keys(): new_nodes[r_g_prime[rule.p_rhs[k]]] =\ list(rhs_typing[typing_graph][rule.p_rhs[k]])[0] else: removed_nodes.add(r_g_prime[rule.p_rhs[k]]) for node in rule.rhs.nodes(): p_keys = keys_by_value(rule.p_rhs, node) # nodes that were added if len(p_keys) == 0: if typing_graph in rhs_typing.keys(): if node in rhs_typing[typing_graph].keys(): new_nodes[node] = list( rhs_typing[typing_graph][node])[0] # nodes that were merged elif len(p_keys) > 1: for k in p_keys: removed_nodes.add(p_g_m[k]) # assign new type of node if typing_graph in rhs_typing.keys(): if node in rhs_typing[typing_graph].keys(): new_type = list(rhs_typing[typing_graph][node]) new_nodes[r_g_prime[node]] = new_type # update homomorphisms for n in removed_nodes: if n in new_hom.keys(): del new_hom[n] new_hom.update(new_nodes) updated_homomorphisms.update({(graph_id, typing_graph): new_hom}) return { "graph": (g_m, p_g_m, g_m_g, g_prime, g_m_g_prime, r_g_prime), "homomorphisms": updated_homomorphisms, "relations": relation_updates }