def get_canonical_curies_dict(curie: Union[str, List[str]], log: ARAXResponse) -> Dict[str, Dict[str, str]]: curies = convert_string_or_list_to_list(curie) try: synonymizer = NodeSynonymizer() log.debug( f"Sending NodeSynonymizer.get_canonical_curies() a list of {len(curies)} curies" ) canonical_curies_dict = synonymizer.get_canonical_curies(curies) log.debug(f"Got response back from NodeSynonymizer") except Exception: tb = traceback.format_exc() error_type, error, _ = sys.exc_info() log.error(f"Encountered a problem using NodeSynonymizer: {tb}", error_code=error_type.__name__) return {} else: if canonical_curies_dict is not None: unrecognized_curies = { input_curie for input_curie in canonical_curies_dict if not canonical_curies_dict.get(input_curie) } if unrecognized_curies: log.warning( f"NodeSynonymizer did not return canonical info for: {unrecognized_curies}" ) return canonical_curies_dict else: log.error(f"NodeSynonymizer returned None", error_code="NodeNormalizationIssue") return {}
def get_canonical_curies_list(curie: Union[str, List[str]], log: ARAXResponse) -> List[str]: curies = convert_to_list(curie) try: synonymizer = NodeSynonymizer() log.debug(f"Sending NodeSynonymizer.get_canonical_curies() a list of {len(curies)} curies") canonical_curies_dict = synonymizer.get_canonical_curies(curies) log.debug(f"Got response back from NodeSynonymizer") except Exception: tb = traceback.format_exc() error_type, error, _ = sys.exc_info() log.error(f"Encountered a problem using NodeSynonymizer: {tb}", error_code=error_type.__name__) return [] else: if canonical_curies_dict is not None: recognized_input_curies = {input_curie for input_curie in canonical_curies_dict if canonical_curies_dict.get(input_curie)} unrecognized_curies = set(curies).difference(recognized_input_curies) if unrecognized_curies: log.warning(f"NodeSynonymizer did not return canonical info for: {unrecognized_curies}") canonical_curies = {canonical_curies_dict[recognized_curie].get('preferred_curie') for recognized_curie in recognized_input_curies} # Include any original curies we weren't able to find a canonical version for canonical_curies.update(unrecognized_curies) if not canonical_curies: log.error(f"Final list of canonical curies is empty. This shouldn't happen!", error_code="CanonicalCurieIssue") return list(canonical_curies) else: log.error(f"NodeSynonymizer returned None", error_code="NodeNormalizationIssue") return []
def get_preferred_categories(curie: Union[str, List[str]], log: ARAXResponse) -> Optional[List[str]]: curies = convert_to_list(curie) synonymizer = NodeSynonymizer() log.debug( f"Sending NodeSynonymizer.get_canonical_curies() a list of {len(curies)} curies" ) canonical_curies_dict = synonymizer.get_canonical_curies(curies) log.debug(f"Got response back from NodeSynonymizer") if canonical_curies_dict is not None: recognized_input_curies = { input_curie for input_curie in canonical_curies_dict if canonical_curies_dict.get(input_curie) } unrecognized_curies = set(curies).difference(recognized_input_curies) if unrecognized_curies: log.warning( f"NodeSynonymizer did not recognize: {unrecognized_curies}") preferred_categories = { canonical_curies_dict[recognized_curie].get('preferred_category') for recognized_curie in recognized_input_curies } if preferred_categories: return list(preferred_categories) else: log.warning( f"Unable to find any preferred categories; will default to biolink:NamedThing" ) return ["biolink:NamedThing"] else: log.error(f"NodeSynonymizer returned None", error_code="NodeNormalizationIssue") return []
def _add_inverted_predicates(qg: QueryGraph, log: ARAXResponse) -> QueryGraph: # For now, we'll consider BOTH predicates in an inverse pair (TODO: later tailor to what we know is in KG2) qedge = next(qedge for qedge in qg.edges.values()) response = requests.get( "https://raw.githubusercontent.com/biolink/biolink-model/master/biolink-model.yaml" ) if response.status_code == 200: qedge.predicate = eu.convert_to_list(qedge.predicate) biolink_model = yaml.safe_load(response.text) inverse_predicates = set() for predicate in qedge.predicate: english_predicate = predicate.split(":")[-1].replace( "_", " ") # Converts to 'subclass of' format biolink_predicate_info = biolink_model["slots"].get( english_predicate) if biolink_predicate_info and "inverse" in biolink_predicate_info: english_inverse_predicate = biolink_predicate_info[ "inverse"] machine_inverse_predicate = f"biolink:{english_inverse_predicate.replace(' ', '_')}" inverse_predicates.add(machine_inverse_predicate) log.debug( f"Found inverse predicate for {predicate}: {machine_inverse_predicate}" ) qedge.predicate = list( set(qedge.predicate).union(inverse_predicates)) else: log.warning( f"Cannot check for inverse predicates: Failed to load Biolink Model yaml file. " f"(Page gave status {response.status_code}.)") return qg
def _send_query_to_kp(self, query_graph: QueryGraph, log: ARAXResponse) -> Dict[str, any]: # Send query to their API (stripping down qnode/qedges to only the properties they like) stripped_qnodes = [] for qnode_key, qnode in query_graph.nodes.items(): stripped_qnode = {'id': qnode_key, 'type': qnode.category} if qnode.id: stripped_qnode['curie'] = qnode.id stripped_qnodes.append(stripped_qnode) qedge_key = next(qedge_key for qedge_key in query_graph.edges) # Our query graph is single-edge qedge = query_graph.edges[qedge_key] stripped_qedge = { 'id': qedge_key, 'source_id': qedge.subject, 'target_id': qedge.object, 'type': list(self.accepted_edge_types)[0] } source_stripped_qnode = next(qnode for qnode in stripped_qnodes if qnode['id'] == qedge.subject) input_curies = eu.convert_string_or_list_to_list( source_stripped_qnode['curie']) combined_response = dict() for input_curie in input_curies: # Until we have batch querying, ping them one-by-one for each input curie log.debug( f"Sending {qedge_key} query to {self.kp_name} for {input_curie}" ) source_stripped_qnode['curie'] = input_curie kp_response = requests.post(self.kp_query_endpoint, json={ 'message': { 'query_graph': { 'nodes': stripped_qnodes, 'edges': [stripped_qedge] } } }, headers={'accept': 'application/json'}) if kp_response.status_code != 200: log.warning( f"{self.kp_name} KP API returned response of {kp_response.status_code}" ) else: kp_response_json = kp_response.json() if kp_response_json.get('results'): if not combined_response: combined_response = kp_response_json else: combined_response['knowledge_graph'][ 'nodes'] += kp_response_json['knowledge_graph'][ 'nodes'] combined_response['knowledge_graph'][ 'edges'] += kp_response_json['knowledge_graph'][ 'edges'] combined_response['results'] += kp_response_json[ 'results'] return combined_response
def get_curie_names(curie: Union[str, List[str]], log: ARAXResponse) -> Dict[str, str]: curies = convert_to_list(curie) synonymizer = NodeSynonymizer() log.debug( f"Looking up names for {len(curies)} input curies using NodeSynonymizer" ) synonymizer_info = synonymizer.get_normalizer_results(curies) curie_to_name_map = dict() if synonymizer_info: recognized_input_curies = { input_curie for input_curie in synonymizer_info if synonymizer_info.get(input_curie) } unrecognized_curies = set(curies).difference(recognized_input_curies) if unrecognized_curies: log.warning( f"NodeSynonymizer did not recognize: {unrecognized_curies}") input_curies_without_matching_node = set() for input_curie in recognized_input_curies: equivalent_nodes = synonymizer_info[input_curie]["nodes"] # Find the 'node' in the synonymizer corresponding to this curie input_curie_nodes = [ node for node in equivalent_nodes if node["identifier"] == input_curie ] if not input_curie_nodes: # Try looking for slight variation (KG2 vs. SRI discrepancy): "KEGG:C02700" vs. "KEGG.COMPOUND:C02700" input_curie_stripped = input_curie.replace(".COMPOUND", "") input_curie_nodes = [ node for node in equivalent_nodes if node["identifier"] == input_curie_stripped ] # Record the name for this input curie if input_curie_nodes: curie_to_name_map[input_curie] = input_curie_nodes[0].get( "label") else: input_curies_without_matching_node.add(input_curie) if input_curies_without_matching_node: log.warning( f"No matching nodes found in NodeSynonymizer for these input curies: " f"{input_curies_without_matching_node}. Cannot determine their specific names." ) else: log.error(f"NodeSynonymizer returned None", error_code="NodeNormalizationIssue") return curie_to_name_map
def check_for_canonical_predicates( kg: QGOrganizedKnowledgeGraph, kp_name: str, log: ARAXResponse) -> QGOrganizedKnowledgeGraph: non_canonical_predicates_used = set() biolink_helper = BiolinkHelper() for qedge_id, edges in kg.edges_by_qg_id.items(): for edge in edges.values(): canonical_predicate = biolink_helper.get_canonical_predicates( edge.predicate)[0] if canonical_predicate != edge.predicate: non_canonical_predicates_used.add(edge.predicate) _ = flip_edge(edge, canonical_predicate) if non_canonical_predicates_used: log.warning( f"{kp_name}: Found edges in {kp_name}'s answer that use non-canonical " f"predicates: {non_canonical_predicates_used}. I corrected these.") return kg
def _answer_query_using_plover( qg: QueryGraph, log: ARAXResponse ) -> Tuple[Dict[str, Dict[str, Set[Union[str, int]]]], int]: rtxc = RTXConfiguration() rtxc.live = "Production" log.debug(f"Sending query to Plover") response = requests.post(f"{rtxc.plover_url}/query", json=qg.to_dict(), headers={'accept': 'application/json'}) if response.status_code == 200: log.debug(f"Got response back from Plover") return response.json(), response.status_code else: log.warning( f"Plover returned a status code of {response.status_code}. Response was: {response.text}" ) return dict(), response.status_code
def update_results_with_overlay_edge(subject_knode_key: str, object_knode_key: str, kedge_key: str, message: Message, log: ARAXResponse): try: new_edge_binding = EdgeBinding(id=kedge_key) for result in message.results: for qedge_key in result.edge_bindings.keys(): if kedge_key not in set([x.id for x in result.edge_bindings[qedge_key]]): if qedge_key not in message.query_graph.edges: log.warning(f"Encountered a result edge binding which does not exist in the query graph") continue subject_nodes = [x.id for x in result.node_bindings[message.query_graph.edges[qedge_key].subject]] object_nodes = [x.id for x in result.node_bindings[message.query_graph.edges[qedge_key].object]] result_nodes = set(subject_nodes).union(set(object_nodes)) if subject_knode_key in result_nodes and object_knode_key in result_nodes: result.edge_bindings[qedge_key].append(new_edge_binding) except: tb = traceback.format_exc() log.error(f"Error encountered when modifying results with overlay edge (subject_knode_key)-kedge_key-(object_knode_key):\n{tb}", error_code="UncaughtError")
def get_node_pairs_to_overlay(subject_qnode_key: str, object_qnode_key: str, query_graph: QueryGraph, knowledge_graph: KnowledgeGraph, log: ARAXResponse) -> Set[Tuple[str, str]]: """ This function determines which combinations of subject/object nodes in the KG need to be overlayed (e.g., have a virtual edge added between). It makes use of Resultify to determine what combinations of subject and object nodes may actually appear together in the same Results. (See issue #1069.) If it fails to narrow the node pairs for whatever reason, it defaults to returning all possible combinations of subject/object nodes. """ log.debug(f"Narrowing down {subject_qnode_key}--{object_qnode_key} node pairs to overlay") kg_nodes_by_qg_id = get_node_ids_by_qg_id(knowledge_graph) kg_edges_by_qg_id = get_edge_ids_by_qg_id(knowledge_graph) # Grab the portion of the QG already 'expanded' (aka, present in the KG) sub_query_graph = QueryGraph(nodes={key:qnode for key, qnode in query_graph.nodes.items() if key in set(kg_nodes_by_qg_id)}, edges={key:qedge for key, qedge in query_graph.edges.items() if key in set(kg_edges_by_qg_id)}) # Compute results using Resultify so we can see which nodes appear in the same results resultifier = ARAXResultify() sub_response = ARAXResponse() sub_response.envelope = Response() sub_response.envelope.message = Message() sub_message = sub_response.envelope.message sub_message.query_graph = sub_query_graph sub_message.knowledge_graph = KnowledgeGraph(nodes=knowledge_graph.nodes.copy(), edges=knowledge_graph.edges.copy()) #sub_response.envelope.message = sub_message resultify_response = resultifier.apply(sub_response, {}) # Figure out which node pairs appear together in one or more results if resultify_response.status == 'OK': node_pairs = set() for result in sub_message.results: subject_curies_in_this_result = {node_binding.id for key, node_binding_list in result.node_bindings.items() for node_binding in node_binding_list if key == subject_qnode_key} object_curies_in_this_result = {node_binding.id for key, node_binding_list in result.node_bindings.items() for node_binding in node_binding_list if key == object_qnode_key} pairs_in_this_result = set(itertools.product(subject_curies_in_this_result, object_curies_in_this_result)) node_pairs = node_pairs.union(pairs_in_this_result) log.debug(f"Identified {len(node_pairs)} node pairs to overlay (with help of resultify)") if node_pairs: return node_pairs # Back up to using the old (O(n^2)) method of all combinations of subject/object nodes in the KG log.warning(f"Failed to narrow down node pairs to overlay; defaulting to all possible combinations") return set(itertools.product(kg_nodes_by_qg_id[subject_qnode_key], kg_nodes_by_qg_id[object_qnode_key]))
def _answer_query_using_plover(qg: QueryGraph, log: ARAXResponse) -> Tuple[Dict[str, Dict[str, Union[set, dict]]], int]: rtxc = RTXConfiguration() rtxc.live = "Production" # First prep the query graph (requires some minor additions for Plover) dict_qg = qg.to_dict() dict_qg["include_metadata"] = True # Ask plover to return node/edge objects (not just IDs) dict_qg["respect_predicate_symmetry"] = True # Ignore direction for symmetric predicate, enforce for asymmetric # Allow subclass_of reasoning for qnodes with a small number of curies for qnode in dict_qg["nodes"].values(): if qnode.get("ids") and len(qnode["ids"]) < 5: if "allow_subclasses" not in qnode or qnode["allow_subclasses"] is None: qnode["allow_subclasses"] = True # Then send the actual query response = requests.post(f"{rtxc.plover_url}/query", json=dict_qg, timeout=60, headers={'accept': 'application/json'}) if response.status_code == 200: log.debug(f"Got response back from Plover") return response.json(), response.status_code else: log.warning(f"Plover returned a status code of {response.status_code}. Response was: {response.text}") return dict(), response.status_code
def get_curie_synonyms(curie: Union[str, List[str]], log: ARAXResponse) -> List[str]: curies = convert_string_or_list_to_list(curie) try: synonymizer = NodeSynonymizer() log.debug( f"Sending NodeSynonymizer.get_equivalent_nodes() a list of {len(curies)} curies" ) equivalent_curies_dict = synonymizer.get_equivalent_nodes( curies, kg_name="KG2") log.debug(f"Got response back from NodeSynonymizer") except Exception: tb = traceback.format_exc() error_type, error, _ = sys.exc_info() log.error(f"Encountered a problem using NodeSynonymizer: {tb}", error_code=error_type.__name__) return [] else: if equivalent_curies_dict is not None: curies_missing_info = { curie for curie in equivalent_curies_dict if not equivalent_curies_dict.get(curie) } if curies_missing_info: log.warning( f"NodeSynonymizer did not find any equivalent curies for: {curies_missing_info}" ) equivalent_curies = { curie for curie_dict in equivalent_curies_dict.values() if curie_dict for curie in curie_dict } all_curies = equivalent_curies.union(set( curies)) # Make sure even curies without synonyms are included return sorted(list(all_curies)) else: log.error(f"NodeSynonymizer returned None", error_code="NodeNormalizationIssue") return []
def sort_kps_for_asyncio(kp_names: Union[List[str], Set[str]], log: ARAXResponse) -> List[str]: # Order KPs such that those with longer requests will tend to be kicked off earlier kp_names = set(kp_names) asyncio_start_order = [ "infores:connections-hypothesis", "infores:biothings-explorer", "infores:biothings-multiomics-biggim-drug-response", "infores:biothings-multiomics-clinical-risk", "infores:biothings-multiomics-wellness", "infores:spoke", "infores:biothings-tcga-mut-freq", "infores:icees-dili", "infores:icees-asthma", "infores:cohd", "infores:molepro", "infores:rtx-kg2", "infores:genetics-data-provider", "infores:arax-normalized-google-distance", "infores:arax-drug-treats-disease" ] unordered_kps = kp_names.difference(set(asyncio_start_order)) if unordered_kps: log.warning( f"Selected KP(s) don't have asyncio start ordering specified: {unordered_kps}" ) asyncio_start_order = list(unordered_kps) + asyncio_start_order ordered_kps = [kp for kp in asyncio_start_order if kp in kp_names] return ordered_kps
def _pre_process_query_graph(self, query_graph: QueryGraph, log: ARAXResponse) -> QueryGraph: for qnode_key, qnode in query_graph.nodes.items(): # Convert node types to preferred format and verify we can do this query formatted_qnode_categories = { self.node_category_overrides_for_kp.get( qnode_category, qnode_category) for qnode_category in eu.convert_string_or_list_to_list( qnode.category) } accepted_qnode_categories = formatted_qnode_categories.intersection( self.accepted_node_categories) if not accepted_qnode_categories: log.error( f"{self.kp_name} can only be used for queries involving {self.accepted_node_categories} " f"and QNode {qnode_key} has category '{qnode.category}'", error_code="UnsupportedQueryForKP") return query_graph else: qnode.category = list(accepted_qnode_categories)[0] # Convert curies to equivalent curies accepted by the KP (depending on qnode type) if qnode.id: equivalent_curies = eu.get_curie_synonyms(qnode.id, log) desired_curies = [ curie for curie in equivalent_curies if curie.startswith( f"{self.kp_preferred_prefixes[qnode.category]}:") ] if desired_curies: qnode.id = desired_curies if len( desired_curies) > 1 else desired_curies[0] log.debug( f"Converted qnode {qnode_key} curie to {qnode.id}") else: log.warning( f"Could not convert qnode {qnode_key} curie(s) to preferred prefix ({self.kp_preferred_prefixes[qnode.category]})" ) return query_graph
def create_results( qg: QueryGraph, kg: QGOrganizedKnowledgeGraph, log: ARAXResponse, overlay_fet: bool = False, rank_results: bool = False, qnode_key_to_prune: Optional[str] = None, ) -> Response: regular_format_kg = convert_qg_organized_kg_to_standard_kg(kg) resultifier = ARAXResultify() prune_response = ARAXResponse() prune_response.envelope = Response() prune_response.envelope.message = Message() prune_message = prune_response.envelope.message prune_message.query_graph = qg prune_message.knowledge_graph = regular_format_kg if overlay_fet: log.debug( f"Using FET to assess quality of intermediate answers in Expand") connected_qedges = [ qedge for qedge in qg.edges.values() if qedge.subject == qnode_key_to_prune or qedge.object == qnode_key_to_prune ] qnode_pairs_to_overlay = { (qedge.subject if qedge.subject != qnode_key_to_prune else qedge.object, qnode_key_to_prune) for qedge in connected_qedges } for qnode_pair in qnode_pairs_to_overlay: pair_string_id = f"{qnode_pair[0]}-->{qnode_pair[1]}" log.debug(f"Overlaying FET for {pair_string_id} (from Expand)") fet_qedge_key = f"FET{pair_string_id}" try: overlayer = ARAXOverlay() params = { "action": "fisher_exact_test", "subject_qnode_key": qnode_pair[0], "object_qnode_key": qnode_pair[1], "virtual_relation_label": fet_qedge_key } overlayer.apply(prune_response, params) except Exception as error: exception_type, exception_value, exception_traceback = sys.exc_info( ) log.warning( f"An uncaught error occurred when overlaying with FET during Expand's pruning: {error}: " f"{repr(traceback.format_exception(exception_type, exception_value, exception_traceback))}" ) if prune_response.status != "OK": log.warning( f"FET produced an error when Expand tried to use it to prune the KG. " f"Log was: {prune_response.show()}") log.debug(f"Will continue pruning without overlaying FET") # Get rid of any FET edges that might be in the KG/QG, since this step failed remove_edges_with_qedge_key( prune_response.envelope.message.knowledge_graph, fet_qedge_key) qg.edges.pop(fet_qedge_key, None) prune_response.status = "OK" # Clear this so we can continue without overlaying else: if fet_qedge_key in qg.edges: qg.edges[ fet_qedge_key].option_group_id = f"FET_VIRTUAL_GROUP_{pair_string_id}" else: log.warning( f"Attempted to overlay FET from Expand, but it didn't work. Pruning without it." ) # Create results and rank them as appropriate log.debug(f"Calling Resultify from Expand for pruning") resultifier.apply(prune_response, {}) if rank_results: try: log.debug(f"Ranking Expand's intermediate pruning results") ranker = ARAXRanker() ranker.aggregate_scores_dmk(prune_response) except Exception as error: exception_type, exception_value, exception_traceback = sys.exc_info( ) log.error( f"An uncaught error occurred when attempting to rank results during Expand's pruning: " f"{error}: {repr(traceback.format_exception(exception_type, exception_value, exception_traceback))}." f"Log was: {prune_response.show()}", error_code="UncaughtARAXiError") # Give any unranked results a score of 0 for result in prune_response.envelope.message.results: if result.score is None: result.score = 0 return prune_response
def _validate_and_pre_process_input(qg: QueryGraph, valid_bte_inputs_dict: Dict[str, Set[str]], enforce_directionality: bool, use_synonyms: bool, log: ARAXResponse) -> Tuple[str, str]: # Make sure we have a valid one-hop query graph if len(qg.edges) != 1 or len(qg.nodes) != 2: log.error( f"BTE can only accept one-hop query graphs (your QG has {len(qg.nodes)} nodes and " f"{len(qg.edges)} edges)", error_code="InvalidQueryGraph") return "", "" qedge_key = next(qedge_key for qedge_key in qg.edges) qedge = qg.edges[qedge_key] # Make sure at least one of our qnodes has a curie qnodes_with_curies = [ qnode_key for qnode_key, qnode in qg.nodes.items() if qnode.id ] if not qnodes_with_curies: log.error( f"Neither qnode for qedge {qedge_key} has a curie specified. BTE requires that at least one of " f"them has a curie. Your query graph is: {qg.to_dict()}", error_code="UnsupportedQueryForKP") return "", "" # Figure out which query node is input vs. output if enforce_directionality: input_qnode_key = qedge.subject output_qnode_key = qedge.object else: input_qnode_key = next(qnode_key for qnode_key, qnode in qg.nodes.items() if qnode.id) output_qnode_key = list( set(qg.nodes).difference({input_qnode_key}))[0] log.warning( f"BTE cannot do bidirectional queries; the query for this edge will be directed, going: " f"{input_qnode_key}-->{output_qnode_key}") input_qnode = qg.nodes[input_qnode_key] output_qnode = qg.nodes[output_qnode_key] # Make sure predicate is allowed if qedge.predicate not in valid_bte_inputs_dict[ 'predicates'] and qedge.predicate is not None: log.error( f"BTE does not accept predicate '{qedge.predicate}'. Valid options are " f"{valid_bte_inputs_dict['predicates']}", error_code="InvalidInput") return "", "" # Process qnode types (convert to preferred format, make sure allowed) input_qnode.category = [ eu.convert_string_to_pascal_case(node_category) for node_category in eu.convert_string_or_list_to_list(input_qnode.category) ] output_qnode.category = [ eu.convert_string_to_pascal_case(node_category) for node_category in eu.convert_string_or_list_to_list(output_qnode.category) ] qnodes_missing_type = [ qnode_key for qnode_key in [input_qnode_key, output_qnode_key] if not qg.nodes[qnode_key].category ] if qnodes_missing_type: log.error( f"BTE requires every query node to have a category. QNode(s) missing a category: " f"{', '.join(qnodes_missing_type)}", error_code="InvalidInput") return "", "" invalid_qnode_categories = [ node_category for qnode in [input_qnode, output_qnode] for node_category in qnode.category if node_category not in valid_bte_inputs_dict['node_categories'] ] if invalid_qnode_categories: log.error( f"BTE does not accept QNode category(s): {', '.join(invalid_qnode_categories)}. Valid options are " f"{valid_bte_inputs_dict['node_categories']}", error_code="InvalidInput") return "", "" # Sub in curie synonyms as appropriate if use_synonyms: qnodes_with_curies = [ qnode for qnode in [input_qnode, output_qnode] if qnode.id ] for qnode in qnodes_with_curies: synonymized_curies = eu.get_curie_synonyms(qnode.id, log) qnode.id = synonymized_curies # Make sure our input node curies are in list form and use prefixes BTE prefers input_curie_list = eu.convert_string_or_list_to_list(input_qnode.id) input_qnode.id = [ eu.convert_curie_to_bte_format(curie) for curie in input_curie_list ] return input_qnode_key, output_qnode_key
def assess(self, message): #### Define a default response response = ARAXResponse() self.response = response self.message = message response.debug(f"Assessing the QueryGraph for basic information") #### Get shorter handles query_graph = message.query_graph nodes = query_graph.nodes edges = query_graph.edges #### Store number of nodes and edges self.n_nodes = len(nodes) self.n_edges = len(edges) response.debug(f"Found {self.n_nodes} nodes and {self.n_edges} edges") #### Handle impossible cases if self.n_nodes == 0: response.error( "QueryGraph has 0 nodes. At least 1 node is required", error_code="QueryGraphZeroNodes") return response if self.n_nodes == 1 and self.n_edges > 0: response.error( "QueryGraph may not have edges if there is only one node", error_code="QueryGraphTooManyEdges") return response #if self.n_nodes == 2 and self.n_edges > 1: # response.error("QueryGraph may not have more than 1 edge if there are only 2 nodes", error_code="QueryGraphTooManyEdges") # return response #### Loop through nodes computing some stats node_info = {} self.node_category_map = {} for key, qnode in nodes.items(): node_info[key] = { 'key': key, 'node_object': qnode, 'has_id': False, 'category': qnode.category, 'has_category': False, 'is_set': False, 'n_edges': 0, 'n_links': 0, 'is_connected': False, 'edges': [], 'edge_dict': {} } if qnode.id is not None: node_info[key]['has_id'] = True #### If the user did not specify a category, but there is a curie, try to figure out the category if node_info[key]['category'] is None: synonymizer = NodeSynonymizer() curie = qnode.id curies_list = qnode.id if isinstance(qnode.id, list): curie = qnode.id[0] else: curies_list = [qnode.id] canonical_curies = synonymizer.get_canonical_curies( curies=curies_list, return_all_categories=True) if curie in canonical_curies and 'preferred_type' in canonical_curies[ curie]: node_info[key]['has_category'] = True node_info[key]['category'] = canonical_curies[curie][ 'preferred_type'] if qnode.category is not None: node_info[key]['has_category'] = True #if qnode.is_set is not None: node_info[key]['is_set'] = True if key is None: response.error( "QueryGraph has a node with null key. This is not permitted", error_code="QueryGraphNodeWithNoId") return response #### Remap the node categorys from unsupported to supported if qnode.category is not None: qnode.category = self.remap_node_category(qnode.category) #### Store lookup of categorys warning_counter = 0 if qnode.category is None or (isinstance(qnode.category, list) and len(qnode.category) == 0): if warning_counter == 0: #response.debug("QueryGraph has nodes with no category. This may cause problems with results inference later") pass warning_counter += 1 self.node_category_map['unknown'] = key else: category = qnode.category if isinstance(qnode.category, list): category = qnode.category[ 0] # FIXME this is a hack prior to proper list handling self.node_category_map[category] = key #### Loop through edges computing some stats edge_info = {} self.edge_predicate_map = {} unique_links = {} #### Ignore special informationational edges for now. virtual_edge_predicates = { 'has_normalized_google_distance_with': 1, 'has_fisher_exact_test_p-value_with': 1, 'has_jaccard_index_with': 1, 'probably_treats': 1, 'has_paired_concept_frequency_with': 1, 'has_observed_expected_ratio_with': 1, 'has_chi_square_with': 1 } for key, qedge in edges.items(): predicate = qedge.predicate if isinstance(predicate, list): if len(predicate) == 0: predicate = None else: predicate = predicate[ 0] # FIXME Hack before dealing with predicates as lists! if predicate is not None and predicate in virtual_edge_predicates: continue edge_info[key] = { 'key': key, 'has_predicate': False, 'subject': qedge.subject, 'object': qedge.object, 'predicate': None } if predicate is not None: edge_info[key]['has_predicate'] = True edge_info[key]['predicate'] = predicate if key is None: response.error( "QueryGraph has a edge with null key. This is not permitted", error_code="QueryGraphEdgeWithNoKey") return response #### Create a unique node link string link_string = ','.join(sorted([qedge.subject, qedge.object])) if link_string not in unique_links: node_info[qedge.subject]['n_links'] += 1 node_info[qedge.object]['n_links'] += 1 unique_links[link_string] = 1 #print(link_string) node_info[qedge.subject]['n_edges'] += 1 node_info[qedge.object]['n_edges'] += 1 node_info[qedge.subject]['is_connected'] = True node_info[qedge.object]['is_connected'] = True #node_info[qedge.subject]['edges'].append(edge_info[key]) #node_info[qedge.object]['edges'].append(edge_info[key]) node_info[qedge.subject]['edges'].append(edge_info[key]) node_info[qedge.object]['edges'].append(edge_info[key]) node_info[qedge.subject]['edge_dict'][key] = edge_info[key] node_info[qedge.object]['edge_dict'][key] = edge_info[key] #### Store lookup of predicates warning_counter = 0 edge_predicate = 'any' if predicate is None: if warning_counter == 0: response.debug( "QueryGraph has edges with no predicate. This may cause problems with results inference later" ) warning_counter += 1 else: edge_predicate = predicate #### It's not clear yet whether we need to store the whole sentence or just the predicate #predicate_encoding = f"{node_info[qedge.subject]['predicate']}---{edge_predicate}---{node_info[qedge.object]['predicate']}" predicate_encoding = edge_predicate self.edge_predicate_map[predicate_encoding] = key #### Loop through the nodes again, trying to identify the start_node and the end_node singletons = [] for node_id, node_data in node_info.items(): if node_data['n_links'] < 2: singletons.append(node_data) elif node_data['n_links'] > 2: self.is_bifurcated_graph = True response.warning( "QueryGraph appears to have a fork in it. This might cause trouble" ) #### If this doesn't produce any singletons, then try curie based selection if len(singletons) == 0: for node_id, node_data in node_info.items(): if node_data['has_id']: singletons.append(node_data) #### If this doesn't produce any singletons, then we don't know how to continue if len(singletons) == 0: response.error("Unable to understand the query graph", error_code="QueryGraphCircular") return response #### Try to identify the start_node and the end_node start_node = singletons[0] if len(nodes) == 1: # Just a single node, fine pass elif len(singletons) < 2: response.warning( "QueryGraph appears to be circular or has a strange geometry. This might cause trouble" ) elif len(singletons) > 2: response.warning( "QueryGraph appears to have a fork in it. This might cause trouble" ) else: if singletons[0]['has_id'] is True and singletons[1][ 'has_id'] is False: start_node = singletons[0] elif singletons[0]['has_id'] is False and singletons[1][ 'has_id'] is True: start_node = singletons[1] else: start_node = singletons[0] #### Hmm, that's not very robust against odd graphs. This needs work. FIXME self.node_info = node_info self.edge_info = edge_info self.start_node = start_node current_node = start_node node_order = [start_node] edge_order = [] edges = current_node['edges'] debug = False while 1: if debug: tmp = { 'astate': '1', 'current_node': current_node, 'node_order': node_order, 'edge_order': edge_order, 'edges': edges } print( json.dumps(ast.literal_eval(repr(tmp)), sort_keys=True, indent=2)) print( '==================================================================================' ) tmp = input() if len(edges) == 0: break #if len(edges) > 1: if current_node['n_links'] > 1: response.error( f"Help, two edges at A583. Don't know what to do: {current_node['n_links']}", error_code="InteralErrorA583") return response edge_order.append(edges[0]) previous_node = current_node if edges[0]['subject'] == current_node['key']: current_node = node_info[edges[0]['object']] elif edges[0]['object'] == current_node['key']: current_node = node_info[edges[0]['subject']] else: response.error("Help, edge error A584. Don't know what to do", error_code="InteralErrorA584") return response node_order.append(current_node) #tmp = { 'astate': '2', 'current_node': current_node, 'node_order': node_order, 'edge_order': edge_order, 'edges': edges } #print(json.dumps(ast.literal_eval(repr(tmp)),sort_keys=True,indent=2)) #print('==================================================================================') #tmp = input() edges = current_node['edges'] new_edges = [] for edge in edges: key = edge['key'] if key not in previous_node['edge_dict']: new_edges.append(edge) edges = new_edges if len(edges) == 0: break #tmp = { 'astate': '3', 'current_node': current_node, 'node_order': node_order, 'edge_order': edge_order, 'edges': edges } #print(json.dumps(ast.literal_eval(repr(tmp)),sort_keys=True,indent=2)) #print('==================================================================================') #tmp = input() self.node_order = node_order self.edge_order = edge_order # Create a text rendering of the QueryGraph geometry for matching against a template self.query_graph_templates = { 'simple': '', 'detailed': { 'n_nodes': len(node_order), 'components': [] } } node_index = 0 edge_index = 0 #print(json.dumps(ast.literal_eval(repr(node_order)),sort_keys=True,indent=2)) for node in node_order: component_id = f"n{node_index:02}" content = '' component = { 'component_type': 'node', 'component_id': component_id, 'has_id': node['has_id'], 'has_category': node['has_category'], 'category_value': None } self.query_graph_templates['detailed']['components'].append( component) if node['has_id']: content = 'id' elif node['has_category'] and node[ 'node_object'].category is not None: content = f"category={node['node_object'].category}" component['category_value'] = node['node_object'].category elif node['has_category']: content = 'category' template_part = f"{component_id}({content})" self.query_graph_templates['simple'] += template_part # Since queries with intermediate nodes that are not is_set=true tend to blow up, for now, make them is_set=true unless explicitly set to false if node_index > 0 and node_index < (self.n_nodes - 1): if 'is_set' not in node or node['is_set'] is None: node['node_object'].is_set = True response.warning( f"Setting unspecified is_set to true for {node['key']} because this will probably lead to a happier result" ) elif node['is_set'] is True: response.debug( f"Value for is_set is already true for {node['key']} so that's good" ) elif node['is_set'] is False: #response.info(f"Value for is_set is set to false for intermediate node {node['key']}. This could lead to weird results. Consider setting it to true") response.info( f"Value for is_set is false for intermediate node {node['key']}. Setting to true because this will probably lead to a happier result" ) node['node_object'].is_set = True #else: # response.error(f"Unrecognized value is_set='{node['is_set']}' for {node['key']}. This should be true or false") node_index += 1 if node_index < self.n_nodes: #print(json.dumps(ast.literal_eval(repr(node)),sort_keys=True,indent=2)) #### Extract the has_predicate and predicate_value from the edges of the node #### This could fail if there are two edges coming out of the node FIXME has_predicate = False predicate_value = None if 'edges' in node: for related_edge in node['edges']: if related_edge['subject'] == node['key']: has_predicate = related_edge['has_predicate'] if has_predicate is True and 'predicate' in related_edge: predicate_value = related_edge['predicate'] component_id = f"e{edge_index:02}" template_part = f"-{component_id}()-" self.query_graph_templates['simple'] += template_part component = { 'component_type': 'edge', 'component_id': component_id, 'has_id': False, 'has_predicate': has_predicate, 'predicate_value': predicate_value } self.query_graph_templates['detailed']['components'].append( component) edge_index += 1 response.debug( f"The QueryGraph reference template is: {self.query_graph_templates['simple']}" ) #tmp = { 'node_info': node_info, 'edge_info': edge_info, 'start_node': start_node, 'n_nodes': self.n_nodes, 'n_edges': self.n_edges, # 'is_bifurcated_graph': self.is_bifurcated_graph, 'node_order': node_order, 'edge_order': edge_order } #print(json.dumps(ast.literal_eval(repr(tmp)),sort_keys=True,indent=2)) #sys.exit(0) #### Return the response return response
def _answer_query_using_CHP_client( self, query_graph: QueryGraph, log: ARAXResponse) -> QGOrganizedKnowledgeGraph: qedge_key = next(qedge_key for qedge_key in query_graph.edges) log.debug( f"Processing query results for edge {qedge_key} by using CHP client" ) final_kg = QGOrganizedKnowledgeGraph() gene_label_list = ['gene'] drug_label_list = ['drug', 'chemicalsubstance'] # use for checking the requirement source_pass_nodes = None source_category = None target_pass_nodes = None target_category = None qedge = query_graph.edges[qedge_key] source_qnode_key = qedge.subject target_qnode_key = qedge.object source_qnode = query_graph.nodes[source_qnode_key] target_qnode = query_graph.nodes[target_qnode_key] # check if both ends of edge have no curie if (source_qnode.id is None) and (target_qnode.id is None): log.error(f"Both ends of edge {qedge_key} are None", error_code="BadEdge") return final_kg # check if the query nodes are drug or disease if source_qnode.id is not None: if type(source_qnode.id) is str: source_pass_nodes = [source_qnode.id] else: source_pass_nodes = source_qnode.id has_error, pass_nodes, not_pass_nodes = self._check_id( source_qnode.id, log) if has_error: return final_kg else: if len(not_pass_nodes) == 0 and len(pass_nodes) != 0: source_pass_nodes = pass_nodes elif len(not_pass_nodes) != 0 and len(pass_nodes) != 0: source_pass_nodes = pass_nodes if len(not_pass_nodes) == 1: log.warning( f"The curie id of {not_pass_nodes[0]} is not allowable based on CHP client" ) else: log.warning( f"The curie ids of these nodes {not_pass_nodes} are not allowable based on CHP client" ) else: if type(source_qnode.id) is str: log.error( f"The curie id of {source_qnode.id} is not allowable based on CHP client", error_code="NotAllowable") return final_kg else: log.error( f"The curie ids of {source_qnode.id} are not allowable based on CHP client", error_code="NotAllowable") return final_kg else: category = source_qnode.category[0].replace( 'biolink:', '').replace('_', '').lower() source_category = category if (category in drug_label_list) or (category in gene_label_list): source_category = category else: log.error( f"The category of query node {source_qnode_key} is unsatisfiable. It has to be drug/chemical_substance or gene", error_code="CategoryError") return final_kg if target_qnode.id is not None: if type(target_qnode.id) is str: target_pass_nodes = [target_qnode.id] else: target_pass_nodes = target_qnode.id has_error, pass_nodes, not_pass_nodes = self._check_id( target_qnode.id, log) if has_error: return final_kg else: if len(not_pass_nodes) == 0 and len(pass_nodes) != 0: target_pass_nodes = pass_nodes elif len(not_pass_nodes) != 0 and len(pass_nodes) != 0: target_pass_nodes = pass_nodes if len(not_pass_nodes) == 1: log.warning( f"The curie id of {not_pass_nodes[0]} is not allowable based on CHP client" ) else: log.warning( f"The curie ids of these nodes {not_pass_nodes} are not allowable based on CHP client" ) else: if type(target_qnode.id) is str: log.error( f"The curie id of {target_qnode.id} is not allowable based on CHP client", error_code="CategoryError") return final_kg else: log.error( f"The curie ids of {target_qnode.id} are not allowable based on CHP client", error_code="CategoryError") return final_kg else: category = target_qnode.category[0].replace( 'biolink:', '').replace('_', '').lower() target_category = category if (category in drug_label_list) or (category in gene_label_list): target_category = category else: log.error( f"The category of query node {target_qnode_key} is unsatisfiable. It has to be drug/chemical_substance or gene", error_code="CategoryError") return final_kg if (source_pass_nodes is None) and (target_pass_nodes is None): return final_kg elif (source_pass_nodes is not None) and (target_pass_nodes is not None): source_dict = dict() target_dict = dict() if source_pass_nodes[0] in self.allowable_drug_curies: source_category_temp = 'drug' else: source_category_temp = 'gene' if target_pass_nodes[0] in self.allowable_drug_curies: target_category_temp = 'drug' else: target_category_temp = 'gene' if source_category_temp == target_category_temp: log.error( f"The query nodes in both ends of edge are the same type which is {source_category_temp}", error_code="CategoryError") return final_kg else: for (source_curie, target_curie) in itertools.product( source_pass_nodes, target_pass_nodes): if source_category_temp == 'drug': source_curie_temp = source_curie.replace( 'CHEMBL.COMPOUND:', 'CHEMBL:') # Let's build a simple single query q = build_query(genes=[target_curie], therapeutic=source_curie_temp, disease='MONDO:0007254', outcome=('EFO:0000714', '>=', self.CHP_survival_threshold)) response = self.client.query(q) max_probability = self.client.get_outcome_prob( response) swagger_edge_key, swagger_edge = self._convert_to_swagger_edge( target_curie, source_curie, "paired_with", max_probability) else: target_curie_temp = target_curie.replace( 'CHEMBL.COMPOUND:', 'CHEMBL:') # Let's build a simple single query q = build_query(genes=[source_curie], therapeutic=target_curie_temp, disease='MONDO:0007254', outcome=('EFO:0000714', '>=', self.CHP_survival_threshold)) response = self.client.query(q) max_probability = self.client.get_outcome_prob( response) swagger_edge_key, swagger_edge = self._convert_to_swagger_edge( source_curie, target_curie, "paired_with", max_probability) source_dict[source_curie] = source_qnode_key target_dict[target_curie] = target_qnode_key # Finally add the current edge to our answer knowledge graph final_kg.add_edge(swagger_edge_key, swagger_edge, qedge_key) # Add the nodes to our answer knowledge graph if len(source_dict) != 0: for source_curie in source_dict: swagger_node_key, swagger_node = self._convert_to_swagger_node( source_curie) final_kg.add_node(swagger_node_key, swagger_node, source_dict[source_curie]) if len(target_dict) != 0: for target_curie in target_dict: swagger_node_key, swagger_node = self._convert_to_swagger_node( target_curie) final_kg.add_node(swagger_node_key, swagger_node, target_dict[target_curie]) return final_kg elif source_pass_nodes is not None: source_dict = dict() target_dict = dict() if source_pass_nodes[0] in self.allowable_drug_curies: source_category_temp = 'drug' else: source_category_temp = 'gene' if target_category in drug_label_list: target_category_temp = 'drug' else: target_category_temp = 'gene' if source_category_temp == target_category_temp: log.error( f"The query nodes in both ends of edge are the same type which is {source_category_temp}", error_code="CategoryError") return final_kg else: if source_category_temp == 'drug': for source_curie in source_pass_nodes: genes = [ curie for curie in self.allowable_gene_curies if self.synonymizer.get_canonical_curies(curie) [curie] is not None and target_category in [ category.replace('biolink:', '').replace( '_', '').lower() for category in list( self.synonymizer.get_canonical_curies( curie, return_all_categories=True) [curie]['all_categories'].keys()) ] ] therapeutic = source_curie.replace( 'CHEMBL.COMPOUND:', 'CHEMBL:') disease = 'MONDO:0007254' outcome = ('EFO:0000714', '>=', self.CHP_survival_threshold) queries = [] for gene in genes: queries.append( build_query( genes=[gene], therapeutic=therapeutic, disease=disease, outcome=outcome, )) # use the query_all endpoint to run the batch of queries res = self.client.query_all(queries) for result, gene in zip(res["message"], genes): prob = self.client.get_outcome_prob(result) swagger_edge_key, swagger_edge = self._convert_to_swagger_edge( gene, source_curie, "paired_with", prob) source_dict[source_curie] = source_qnode_key target_dict[gene] = target_qnode_key # Finally add the current edge to our answer knowledge graph final_kg.add_edge(swagger_edge_key, swagger_edge, qedge_key) else: for source_curie in source_pass_nodes: genes = [source_curie] therapeutic = [ curie.replace('CHEMBL.COMPOUND:', 'CHEMBL:') for curie in self.allowable_drug_curies if self.synonymizer.get_canonical_curies( curie.replace('CHEMBL:', 'CHEMBL.COMPOUND:')) [curie.replace('CHEMBL:', 'CHEMBL.COMPOUND:')] is not None and target_category in [ category.replace('biolink:', '').replace( '_', '').lower() for category in list( self.synonymizer.get_canonical_curies( curie.replace('CHEMBL:', 'CHEMBL.COMPOUND:'), return_all_categories=True)[ curie.replace( 'CHEMBL:', 'CHEMBL.COMPOUND:')] ['all_categories'].keys()) ] ] disease = 'MONDO:0007254' outcome = ('EFO:0000714', '>=', self.CHP_survival_threshold) queries = [] for drug in therapeutic: queries.append( build_query( genes=genes, therapeutic=drug, disease=disease, outcome=outcome, )) # use the query_all endpoint to run the batch of queries res = self.client.query_all(queries) for result, drug in zip(res["message"], therapeutic): drug = drug.replace('CHEMBL:', 'CHEMBL.COMPOUND:') prob = self.client.get_outcome_prob(result) swagger_edge_key, swagger_edge = self._convert_to_swagger_edge( source_curie, drug, "paired_with", prob) source_dict[source_curie] = source_qnode_key target_dict[drug] = target_qnode_key # Finally add the current edge to our answer knowledge graph final_kg.add_edge(swagger_edge_key, swagger_edge, qedge_key) # Add the nodes to our answer knowledge graph if len(source_dict) != 0: for source_curie in source_dict: swagger_node_key, swagger_node = self._convert_to_swagger_node( source_curie) final_kg.add_node(swagger_node_key, swagger_node, source_dict[source_curie]) if len(target_dict) != 0: for target_curie in target_dict: swagger_node_key, swagger_node = self._convert_to_swagger_node( target_curie) final_kg.add_node(swagger_node_key, swagger_node, target_dict[target_curie]) return final_kg else: source_dict = dict() target_dict = dict() if target_pass_nodes[0] in self.allowable_drug_curies: target_category_temp = 'drug' else: target_category_temp = 'gene' if source_category in drug_label_list: source_category_temp = 'drug' else: source_category_temp = 'gene' if source_category_temp == target_category_temp: log.error( f"The query nodes in both ends of edge are the same type which is {source_category_temp}", error_code="CategoryError") return final_kg else: if target_category_temp == 'drug': for target_curie in target_pass_nodes: genes = [ curie for curie in self.allowable_gene_curies if self.synonymizer.get_canonical_curies(curie) [curie] is not None and source_category in [ category.replace('biolink:', '').replace( '_', '').lower() for category in list( self.synonymizer.get_canonical_curies( curie, return_all_categories=True) [curie]['all_categories'].keys()) ] ] therapeutic = target_curie.replace( 'CHEMBL.COMPOUND:', 'CHEMBL:') disease = 'MONDO:0007254' outcome = ('EFO:0000714', '>=', self.CHP_survival_threshold) queries = [] for gene in genes: queries.append( build_query( genes=[gene], therapeutic=therapeutic, disease=disease, outcome=outcome, )) # use the query_all endpoint to run the batch of queries res = self.client.query_all(queries) for result, gene in zip(res["message"], genes): prob = self.client.get_outcome_prob(result) swagger_edge_key, swagger_edge = self._convert_to_swagger_edge( gene, target_curie, "paired_with", prob) source_dict[gene] = source_qnode_key target_dict[target_curie] = target_qnode_key # Finally add the current edge to our answer knowledge graph final_kg.add_edge(swagger_edge_key, swagger_edge, qedge_key) else: for target_curie in target_pass_nodes: genes = [target_curie] therapeutic = [ curie.replace('CHEMBL.COMPOUND:', 'CHEMBL:') for curie in self.allowable_drug_curies if self.synonymizer.get_canonical_curies( curie.replace('CHEMBL:', 'CHEMBL.COMPOUND:')) [curie.replace('CHEMBL:', 'CHEMBL.COMPOUND:')] is not None and source_category in [ category.replace('biolink:', '').replace( '_', '').lower() for category in list( self.synonymizer.get_canonical_curies( curie.replace('CHEMBL:', 'CHEMBL.COMPOUND:'), return_all_categories=True)[ curie.replace( 'CHEMBL:', 'CHEMBL.COMPOUND:')] ['all_categories'].keys()) ] ] disease = 'MONDO:0007254' outcome = ('EFO:0000714', '>=', self.CHP_survival_threshold) queries = [] for drug in therapeutic: queries.append( build_query( genes=genes, therapeutic=drug, disease=disease, outcome=outcome, )) # use the query_all endpoint to run the batch of queries res = self.client.query_all(queries) for result, drug in zip(res["message"], therapeutic): drug = drug.replace('CHEMBL:', 'CHEMBL.COMPOUND:') prob = self.client.get_outcome_prob(result) swagger_edge_key, swagger_edge = self._convert_to_swagger_edge( target_curie, drug, "paired_with", prob) source_dict[drug] = source_qnode_key target_dict[target_curie] = target_qnode_key # Finally add the current edge to our answer knowledge graph final_kg.add_edge(swagger_edge_key, swagger_edge, qedge_key) # Add the nodes to our answer knowledge graph if len(source_dict) != 0: for source_curie in source_dict: swagger_node_key, swagger_node = self._convert_to_swagger_node( source_curie) final_kg.add_node(swagger_node_key, swagger_node, source_dict[source_curie]) if len(target_dict) != 0: for target_curie in target_dict: swagger_node_key, swagger_node = self._convert_to_swagger_node( target_curie) final_kg.add_node(swagger_node_key, swagger_node, target_dict[target_curie]) return final_kg