Exemple #1
0
    def _create_swagger_attributes(object_type, property_names, neo4j_object):
        new_attributes = []
        for property_name in property_names:
            property_value = neo4j_object.get(property_name)
            if type(property_value) is str:
                # Extract any lists, dicts, and booleans that are stored within strings
                if (property_value.startswith('[') and property_value.endswith(']')) or \
                        (property_value.startswith('{') and property_value.endswith('}')) or \
                        property_value.lower() == "true" or property_value.lower() == "false":
                    property_value = ast.literal_eval(property_value)

            if property_value is not None and property_value != {} and property_value != []:
                swagger_attribute = NodeAttribute(
                ) if object_type == "node" else EdgeAttribute()
                swagger_attribute.name = property_name

                # Figure out whether this is a url and store it appropriately
                if type(property_value) is str and (
                        property_value.startswith("http:")
                        or property_value.startswith("https:")):
                    swagger_attribute.url = property_value
                else:
                    swagger_attribute.value = property_value
                new_attributes.append(swagger_attribute)

        return new_attributes
Exemple #2
0
 def _create_ngd_edge(self, ngd_value: float, source_id: str,
                      target_id: str) -> Edge:
     ngd_edge = Edge()
     ngd_edge.type = self.ngd_edge_type
     ngd_edge.source_id = source_id
     ngd_edge.target_id = target_id
     ngd_edge.id = f"NGD:{source_id}--{ngd_edge.type}--{target_id}"
     ngd_edge.provided_by = "ARAX"
     ngd_edge.is_defined_by = "ARAX"
     ngd_edge.edge_attributes = [
         EdgeAttribute(name=self.ngd_edge_attribute_name,
                       type=self.ngd_edge_attribute_type,
                       value=ngd_value,
                       url=self.ngd_edge_attribute_url)
     ]
     return ngd_edge
Exemple #3
0
    def predict_drug_treats_disease(self):
        """
        Iterate over all the edges in the knowledge graph, add the drug-disease treatment probability for appropriate edges
        on the edge_attributes
        :return: response
        """
        parameters = self.parameters
        self.response.debug(f"Computing drug disease treatment probability based on a machine learning model")
        self.response.info(f"Computing drug disease treatment probability based on a machine learning model: See [this publication](https://doi.org/10.1101/765305) for more details about how this is accomplished.")

        attribute_name = "probability_treats"
        attribute_type = "EDAM:data_0951"
        value = 0  # this will be the default value. If the model returns 0, or the default is there, don't include that edge
        url = "https://doi.org/10.1101/765305"

        # if you want to add virtual edges, identify the source/targets, decorate the edges, add them to the KG, and then add one to the QG corresponding to them
        if 'virtual_relation_label' in parameters:
            source_curies_to_decorate = set()
            target_curies_to_decorate = set()
            # identify the nodes that we should be adding virtual edges for
            for node in self.message.knowledge_graph.nodes:
                if hasattr(node, 'qnode_ids'):
                    if parameters['source_qnode_id'] in node.qnode_ids:
                        if "drug" in node.type or "chemical_substance" in node.type:  # this is now NOT checked by ARAX_overlay
                            source_curies_to_decorate.add(node.id)
                    if parameters['target_qnode_id'] in node.qnode_ids:
                        if "disease" in node.type or "phenotypic_feature" in node.type:  # this is now NOT checked by ARAX_overlay
                            target_curies_to_decorate.add(node.id)

            added_flag = False  # check to see if any edges where added
            # iterate over all pairs of these nodes, add the virtual edge, decorate with the correct attribute

            for (source_curie, target_curie) in itertools.product(source_curies_to_decorate, target_curies_to_decorate):
                # create the edge attribute if it can be
                # loop over all equivalent curies and take the highest probability

                max_probability = 0
                converted_source_curie = self.convert_to_trained_curies(source_curie)
                converted_target_curie = self.convert_to_trained_curies(target_curie)
                if converted_source_curie is None or converted_target_curie is None:
                    continue
                res = list(itertools.product(converted_source_curie, converted_target_curie))
                if len(res) != 0:
                    all_probabilities = self.pred.prob_all(res)
                    if isinstance(all_probabilities, list):
                        max_probability = max([value for value in all_probabilities if np.isfinite(value)])

                value = max_probability

                #probability = self.pred.prob_single('ChEMBL:' + source_curie[22:], target_curie)  # FIXME: when this was trained, it was ChEMBL:123, not CHEMBL.COMPOUND:CHEMBL123
                #if probability and np.isfinite(probability):  # finite, that's ok, otherwise, stay with default
                #    value = probability[0]
                edge_attribute = EdgeAttribute(type=attribute_type, name=attribute_name, value=str(value), url=url)  # populate the edge attribute
                if edge_attribute and value != 0:
                    added_flag = True
                    # make the edge, add the attribute

                    # edge properties
                    now = datetime.now()
                    edge_type = "probably_treats"
                    qedge_ids = [parameters['virtual_relation_label']]
                    relation = parameters['virtual_relation_label']
                    is_defined_by = "ARAX"
                    defined_datetime = now.strftime("%Y-%m-%d %H:%M:%S")
                    provided_by = "ARAX"
                    confidence = None
                    weight = None  # TODO: could make the actual value of the attribute
                    source_id = source_curie
                    target_id = target_curie

                    # now actually add the virtual edges in
                    id = f"{relation}_{self.global_iter}"
                    self.global_iter += 1
                    edge = Edge(id=id, type=edge_type, relation=relation, source_id=source_id,
                                target_id=target_id,
                                is_defined_by=is_defined_by, defined_datetime=defined_datetime,
                                provided_by=provided_by,
                                confidence=confidence, weight=weight, edge_attributes=[edge_attribute], qedge_ids=qedge_ids)
                    self.message.knowledge_graph.edges.append(edge)

            # Now add a q_edge the query_graph since I've added an extra edge to the KG
            if added_flag:
                edge_type = "probably_treats"
                relation = parameters['virtual_relation_label']
                qedge_id = parameters['virtual_relation_label']
                q_edge = QEdge(id=relation, type=edge_type, relation=relation,
                               source_id=parameters['source_qnode_id'], target_id=parameters['target_qnode_id'])  # TODO: ok to make the id and type the same thing?
                self.message.query_graph.edges.append(q_edge)
            return self.response

        else:  # you want to add it for each edge in the KG
            # iterate over KG edges, add the information
            try:
                # map curies to types
                curie_to_type = dict()
                for node in self.message.knowledge_graph.nodes:
                    curie_to_type[node.id] = node.type
                # then iterate over the edges and decorate if appropriate
                for edge in self.message.knowledge_graph.edges:
                    # Make sure the edge_attributes are not None
                    if not edge.edge_attributes:
                        edge.edge_attributes = []  # should be an array, but why not a list?
                    # now go and actually get the NGD
                    source_curie = edge.source_id
                    target_curie = edge.target_id
                    source_types = curie_to_type[source_curie]
                    target_types = curie_to_type[target_curie]
                    if (("drug" in source_types) or ("chemical_substance" in source_types)) and (("disease" in target_types) or ("phenotypic_feature" in target_types)):
                        temp_value = 0
                        # loop over all pairs of equivalent curies and take the highest probability

                        max_probability = 0
                        converted_source_curie = self.convert_to_trained_curies(source_curie)
                        converted_target_curie = self.convert_to_trained_curies(target_curie)
                        if converted_source_curie is None or converted_target_curie is None:
                            continue
                        res = list(itertools.product(converted_source_curie, converted_target_curie))
                        if len(res) != 0:
                            all_probabilities = self.pred.prob_all(res)
                            if isinstance(all_probabilities, list):
                                max_probability = max([value for value in all_probabilities if np.isfinite(value)])

                        value = max_probability

                        #probability = self.pred.prob_single('ChEMBL:' + source_curie[22:], target_curie)  # FIXME: when this was trained, it was ChEMBL:123, not CHEMBL.COMPOUND:CHEMBL123
                        #if probability and np.isfinite(probability):  # finite, that's ok, otherwise, stay with default
                        #    value = probability[0]
                    elif (("drug" in target_types) or ("chemical_substance" in target_types)) and (("disease" in source_types) or ("phenotypic_feature" in source_types)):
                        #probability = self.pred.prob_single('ChEMBL:' + target_curie[22:], source_curie)  # FIXME: when this was trained, it was ChEMBL:123, not CHEMBL.COMPOUND:CHEMBL123
                        #if probability and np.isfinite(probability):  # finite, that's ok, otherwise, stay with default
                        #    value = probability[0]

                        max_probability = 0
                        converted_source_curie = self.convert_to_trained_curies(source_curie)
                        converted_target_curie = self.convert_to_trained_curies(target_curie)
                        if converted_source_curie is None or converted_target_curie is None:
                            continue
                        res = list(itertools.product(converted_target_curie, converted_source_curie))
                        if len(res) != 0:
                            all_probabilities = self.pred.prob_all(res)
                            if isinstance(all_probabilities, list):
                                max_probability = max([value for value in all_probabilities if np.isfinite(value)])

                        value = max_probability

                    else:
                        continue
                    if value != 0:
                        edge_attribute = EdgeAttribute(type=attribute_type, name=attribute_name, value=str(value), url=url)  # populate the attribute
                        edge.edge_attributes.append(edge_attribute)  # append it to the list of attributes
            except:
                tb = traceback.format_exc()
                error_type, error, _ = sys.exc_info()
                self.response.error(tb, error_code=error_type.__name__)
                self.response.error(f"Something went wrong adding the drug disease treatment probability")
            else:
                self.response.info(f"Drug disease treatment probability successfully added to edges")

            return self.response
    def make_edge_attribute_from_curies(self,
                                        source_curie,
                                        target_curie,
                                        source_name="",
                                        target_name="",
                                        default=0.,
                                        name=""):
        """
        Generic function to make an edge attribute
        :source_curie: CURIE of the source node for the edge under consideration
        :target_curie: CURIE of the target node for the edge under consideration
        :source_name: text name of the source node (in case the KP doesn't understand the CURIE)
        :target: text name of the target node (in case the KP doesn't understand the CURIE)
        :default: default value of the edge attribute
        :name: name of the KP functionality you want to apply
        """
        try:
            # edge attributes
            name = name
            type = "EDAM:data_0951"
            url = "http://cohd.smart-api.info/"
            value = default

            node_curie_to_type = self.node_curie_to_type
            source_type = node_curie_to_type[source_curie]
            target_type = node_curie_to_type[target_curie]
            # figure out which knowledge provider to use  # TODO: should handle this in a more structured fashion, does there exist a standardized KP API format?
            KP_to_use = None
            for KP in self.who_knows_about_what:
                # see which KP's can label both sources of information
                if self.in_common(
                        source_type,
                        self.who_knows_about_what[KP]) and self.in_common(
                            target_type, self.who_knows_about_what[KP]):
                    KP_to_use = KP
            if KP_to_use == 'COHD':
                # convert CURIE to OMOP identifiers
                # source_OMOPs = [str(x['omop_standard_concept_id']) for x in COHD.get_xref_to_OMOP(source_curie, 1)]
                res = self.cohdIndex.get_concept_ids(source_curie)
                if len(res) != 0:
                    source_OMOPs = res
                else:
                    source_OMOPs = []
                # target_OMOPs = [str(x['omop_standard_concept_id']) for x in COHD.get_xref_to_OMOP(target_curie, 1)]
                res = self.cohdIndex.get_concept_ids(target_curie)
                if len(res) != 0:
                    target_OMOPs = res
                else:
                    target_OMOPs = []
                # for domain in ["Condition", "Drug", "Procedure"]:
                #     source_OMOPs.update([str(x['concept_id']) for x in COHD.find_concept_ids(source_name, domain=domain, dataset_id=3)])
                #     target_OMOPs.update([str(x['concept_id']) for x in COHD.find_concept_ids(target_name, domain=domain, dataset_id=3)])
                #################################################
                # FIXME: this was the old way
                # FIXME: Super hacky way to get around the fact that COHD can't map CHEMBL drugs
                # if source_curie.split('.')[0] == 'CHEMBL':
                #     source_OMOPs = [str(x['concept_id']) for x in
                #                     COHD.find_concept_ids(source_name, domain="Drug", dataset_id=3)]
                # if target_curie.split('.')[0] == 'CHEMBL':
                #     target_OMOPs = [str(x['concept_id']) for x in
                #                     COHD.find_concept_ids(target_name, domain="Drug", dataset_id=3)]

                # uniquify everything
                # source_OMOPs = list(set(source_OMOPs))
                # target_OMOPs = list(set(target_OMOPs))

                # Decide how to handle the response from the KP
                if name == 'paired_concept_frequency':
                    # sum up all frequencies  #TODO check with COHD people to see if this is kosher
                    frequency = default
                    # for (omop1, omop2) in itertools.product(source_OMOPs, target_OMOPs):
                    #     freq_data_list = self.cohdIndex.get_paired_concept_freq(omop1, omop2, 3) # use the hierarchical dataset
                    #     if len(freq_data_list) != 0:
                    #         freq_data = freq_data_list[0]
                    #         temp_value = freq_data['concept_frequency']
                    #         if temp_value > frequency:
                    #             frequency = temp_value
                    omop_pairs = [
                        f"{omop1}_{omop2}"
                        for (omop1, omop2
                             ) in itertools.product(source_OMOPs, target_OMOPs)
                    ]
                    if len(omop_pairs) != 0:
                        res = self.cohdIndex.get_paired_concept_freq(
                            concept_id_pair=omop_pairs,
                            dataset_id=3)  # use the hierarchical dataset
                        if len(res) != 0:
                            maximum_concept_frequency = res[0][
                                'concept_frequency']  # the result returned from get_paired_concept_freq was sorted by decreasing order
                            frequency = maximum_concept_frequency
                    # decorate the edges
                    value = frequency

                elif name == 'observed_expected_ratio':
                    # should probably take the largest obs/exp ratio  # TODO: check with COHD people to see if this is kosher
                    # FIXME: the ln_ratio can be negative, so I should probably account for this, but the object model doesn't like -np.inf
                    value = float(
                        "-inf"
                    )  # FIXME: unclear in object model if attribute type dictates value type, or if value always needs to be a string

                    ###############################
                    # The following code was an experiment to see if it would speed things up, leaving it out for now since it's difficult to quantify if it does speed things up given the cacheing
                    #if len(source_OMOPs) < len(target_OMOPs):
                    #    for omop1 in source_OMOPs:
                    #        omop_to_ln_ratio = dict()
                    #        response = COHD.get_obs_exp_ratio(omop1, domain="", dataset_id=3)  # use the hierarchical dataset
                    #        if response:
                    #            for res in response:
                    #                omop_to_ln_ratio[str(res['concept_id_2'])] = res['ln_ratio']
                    #        for omop2 in target_OMOPs:
                    #            if omop2 in omop_to_ln_ratio:
                    #                temp_value = omop_to_ln_ratio[omop2]
                    #                if temp_value > value:
                    #                    value = temp_value
                    #else:
                    #    for omop1 in target_OMOPs:
                    #        omop_to_ln_ratio = dict()
                    #        response = COHD.get_obs_exp_ratio(omop1, domain="", dataset_id=3)  # use the hierarchical dataset
                    #        if response:
                    #            for res in response:
                    #                omop_to_ln_ratio[str(res['concept_id_2'])] = res['ln_ratio']
                    #        for omop2 in source_OMOPs:
                    #            if omop2 in omop_to_ln_ratio:
                    #                temp_value = omop_to_ln_ratio[omop2]
                    #                if temp_value > value:
                    #                    value = temp_value
                    ###################################

                    # for (omop1, omop2) in itertools.product(source_OMOPs, target_OMOPs):
                    #     #print(f"{omop1},{omop2}")
                    #     response = self.cohdIndex.get_obs_exp_ratio(omop1, concept_id_2=omop2, domain="", dataset_id=3)  # use the hierarchical dataset
                    #     # response is a list, since this function is overloaded and can omit concept_id_2, take the first element
                    #     if response and 'ln_ratio' in response[0]:
                    #         temp_val = response[0]['ln_ratio']
                    #         if temp_val > value:
                    #             value = temp_val
                    omop_pairs = [
                        f"{omop1}_{omop2}"
                        for (omop1, omop2
                             ) in itertools.product(source_OMOPs, target_OMOPs)
                    ]
                    if len(omop_pairs) != 0:
                        res = self.cohdIndex.get_obs_exp_ratio(
                            concept_id_pair=omop_pairs,
                            domain="",
                            dataset_id=3)  # use the hierarchical dataset
                        if len(res) != 0:
                            maximum_ln_ratio = res[0][
                                'ln_ratio']  # the result returned from get_paired_concept_freq was sorted by decreasing order
                            value = maximum_ln_ratio

                elif name == 'chi_square':
                    value = float("inf")
                    # for (omop1, omop2) in itertools.product(source_OMOPs, target_OMOPs):
                    #     response = self.cohdIndex.get_chi_square(omop1, concept_id_2=omop2, domain="", dataset_id=3)  # use the hierarchical dataset
                    #     # response is a list, since this function is overloaded and can omit concept_id_2, take the first element
                    #     if response and 'p-value' in response[0]:
                    #         temp_val = response[0]['p-value']
                    #         if temp_val < value:  # looking at p=values, so lower is better
                    #             value = temp_val
                    omop_pairs = [
                        f"{omop1}_{omop2}"
                        for (omop1, omop2
                             ) in itertools.product(source_OMOPs, target_OMOPs)
                    ]
                    if len(omop_pairs) != 0:
                        res = self.cohdIndex.get_chi_square(
                            concept_id_pair=omop_pairs,
                            domain="",
                            dataset_id=3)  # use the hierarchical dataset
                        if len(res) != 0:
                            minimum_pvalue = res[0][
                                'p-value']  # the result returned from get_paired_concept_freq was sorted by decreasing order
                            value = minimum_pvalue

                # create the edge attribute
                edge_attribute = EdgeAttribute(
                    type=type, name=name, value=str(value), url=url
                )  # populate the edge attribute # FIXME: unclear in object model if attribute type dictates value type, or if value always needs to be a string
                return edge_attribute
            else:
                return None
        except:
            tb = traceback.format_exc()
            error_type, error, _ = sys.exc_info()
            self.response.error(tb, error_code=error_type.__name__)
            self.response.error(
                f"Something went wrong when adding the edge attribute from {KP_to_use}."
            )
Exemple #5
0
    def fisher_exact_test(self):
        """
        Peform the fisher's exact test to expand or decorate the knowledge graph
        :return: response
        """

        self.response.info(
            f"Performing Fisher's Exact Test to add p-value to edge attribute of virtual edge"
        )

        # check the input parameters
        if 'source_qnode_id' not in self.parameters:
            self.response.error(
                f"The argument 'source_qnode_id' is required for fisher_exact_test function"
            )
            return self.response
        else:
            source_qnode_id = self.parameters['source_qnode_id']
        if 'virtual_relation_label' not in self.parameters:
            self.response.error(
                f"The argument 'virtual_relation_label' is required for fisher_exact_test function"
            )
            return self.response
        else:
            virtual_relation_label = str(
                self.parameters['virtual_relation_label'])
        if 'target_qnode_id' not in self.parameters:
            self.response.error(
                f"The argument 'target_qnode_id' is required for fisher_exact_test function"
            )
            return self.response
        else:
            target_qnode_id = self.parameters['target_qnode_id']
        rel_edge_id = self.parameters[
            'rel_edge_id'] if 'rel_edge_id' in self.parameters else None
        top_n = int(
            self.parameters['top_n']) if 'top_n' in self.parameters else None
        cutoff = float(
            self.parameters['cutoff']) if 'cutoff' in self.parameters else None

        # initialize some variables
        nodes_info = {}
        edge_expand_kp = []
        source_node_list = []
        target_node_dict = {}
        size_of_target = {}
        source_node_exist = False
        target_node_exist = False
        query_edge_id = set()
        rel_edge_type = set()
        source_node_type = None
        target_node_type = None

        ## Check if source_qnode_id and target_qnode_id are in the Query Graph
        try:
            if len(self.message.query_graph.nodes) != 0:
                for node in self.message.query_graph.nodes:
                    if node.id == source_qnode_id:
                        source_node_exist = True
                        source_node_type = node.type
                    elif node.id == target_qnode_id:
                        target_node_exist = True
                        target_node_type = node.type
                    else:
                        pass
            else:
                self.response.error(f"There is no query node in QG")
                return self.response
        except:
            tb = traceback.format_exc()
            error_type, error, _ = sys.exc_info()
            self.response.error(tb, error_code=error_type.__name__)
            self.response.error(
                f"Something went wrong with retrieving nodes in message QG")
            return self.response

        if source_node_exist:
            if target_node_exist:
                pass
            else:
                self.response.error(
                    f"No query node with target qnode id {target_qnode_id} detected in QG for Fisher's Exact Test"
                )
                return self.response
        else:
            self.response.error(
                f"No query node with source qnode id {source_qnode_id} detected in QG for Fisher's Exact Test"
            )
            return self.response

        ## Check if there is a query edge connected to both source_qnode_id and target_qnode_id in the Query Graph
        try:
            if len(self.message.query_graph.edges) != 0:
                for edge in self.message.query_graph.edges:
                    if edge.source_id == source_qnode_id and edge.target_id == target_qnode_id and edge.relation == None:
                        query_edge_id.update(
                            [edge.id])  # only actual query edge is added
                    elif edge.source_id == target_qnode_id and edge.target_id == source_qnode_id and edge.relation == None:
                        query_edge_id.update(
                            [edge.id])  # only actual query edge is added
                    else:
                        continue
            else:
                self.response.error(f"There is no query edge in Query Graph")
                return self.response
        except:
            tb = traceback.format_exc()
            error_type, error, _ = sys.exc_info()
            self.response.error(tb, error_code=error_type.__name__)
            self.response.error(
                f"Something went wrong with retrieving edges in message QG")
            return self.response

        if len(query_edge_id) != 0:
            if rel_edge_id:
                if rel_edge_id in query_edge_id:
                    pass
                else:
                    self.response.error(
                        f"No query edge with qedge id {rel_edge_id} connected to both source node with qnode id {source_qnode_id} and target node with qnode id {target_qnode_id} detected in QG for Fisher's Exact Test"
                    )
                    return self.response
            else:
                pass
        else:
            self.response.error(
                f"No query edge connected to both source node with qnode id {source_qnode_id} and target node with qnode id {target_qnode_id} detected in QG for Fisher's Exact Test"
            )
            return self.response

        ## loop over all nodes in KG and collect their node information
        try:
            count = 0
            for node in self.message.knowledge_graph.nodes:
                nodes_info[node.id] = {
                    'count': count,
                    'qnode_ids': node.qnode_ids,
                    'type': node.type[0],
                    'edge_index': []
                }
                count = count + 1
        except:
            tb = traceback.format_exc()
            error_type, error, _ = sys.exc_info()
            self.response.error(tb, error_code=error_type.__name__)
            self.response.error(
                f"Something went wrong with retrieving nodes in message KG")
            return self.response

        ## loop over all edges in KG and create source node list and target node dict based on source_qnode_id, target_qnode_id as well as rel_edge_id (optional, otherwise all edges are considered)
        try:
            count = 0
            for edge in self.message.knowledge_graph.edges:
                if edge.provided_by != "ARAX":

                    nodes_info[edge.source_id]['edge_index'].append(count)
                    nodes_info[edge.target_id]['edge_index'].append(count)

                    if rel_edge_id:
                        if rel_edge_id in edge.qedge_ids:
                            if source_qnode_id in nodes_info[
                                    edge.source_id]['qnode_ids']:
                                edge_expand_kp.append(edge.is_defined_by)
                                rel_edge_type.update([edge.type])
                                source_node_list.append(edge.source_id)
                                if edge.target_id not in target_node_dict.keys(
                                ):
                                    target_node_dict[edge.target_id] = {
                                        edge.source_id
                                    }
                                else:
                                    target_node_dict[edge.target_id].update(
                                        [edge.source_id])
                            else:
                                edge_expand_kp.append(edge.is_defined_by)
                                rel_edge_type.update([edge.type])
                                source_node_list.append(edge.target_id)
                                if edge.source_id not in target_node_dict.keys(
                                ):
                                    target_node_dict[edge.source_id] = {
                                        edge.target_id
                                    }
                                else:
                                    target_node_dict[edge.source_id].update(
                                        [edge.target_id])
                        else:
                            pass
                    else:
                        if source_qnode_id in nodes_info[
                                edge.source_id]['qnode_ids']:
                            if target_qnode_id in nodes_info[
                                    edge.target_id]['qnode_ids']:
                                edge_expand_kp.append(edge.is_defined_by)
                                source_node_list.append(edge.source_id)
                                if edge.target_id not in target_node_dict.keys(
                                ):
                                    target_node_dict[edge.target_id] = {
                                        edge.source_id
                                    }
                                else:
                                    target_node_dict[edge.target_id].update(
                                        [edge.source_id])

                            else:
                                pass
                        elif target_qnode_id in nodes_info[
                                edge.source_id]['qnode_ids']:
                            if source_qnode_id in nodes_info[
                                    edge.target_id]['qnode_ids']:
                                edge_expand_kp.append(edge.is_defined_by)
                                source_node_list.append(edge.target_id)
                                if edge.source_id not in target_node_dict.keys(
                                ):
                                    target_node_dict[edge.source_id] = {
                                        edge.target_id
                                    }
                                else:
                                    target_node_dict[edge.source_id].update(
                                        [edge.target_id])

                            else:
                                pass
                        else:
                            pass

                else:
                    pass

                count = count + 1  ## record edge position in message.knowledge_graph

        except:
            tb = traceback.format_exc()
            error_type, error, _ = sys.exc_info()
            self.response.error(tb, error_code=error_type.__name__)
            self.response.error(
                f"Something went wrong with retrieving edges in message KG")
            return self.response

        source_node_list = list(
            set(source_node_list))  ## remove the duplicate source node id

        ## check if there is no source node in message KG
        if len(source_node_list) == 0:
            self.response.error(
                f"No source node found in message KG for Fisher's Exact Test")
            return self.response

        ## check if there is no target node in message KG
        if len(target_node_dict) == 0:
            self.response.error(
                f"No target node found in message KG for Fisher's Exact Test")
            return self.response

        ## check if source node has more than one type. If so, throw an error
        if source_node_type is None:
            self.response.error(
                f"Source node with qnode id {source_qnode_id} was set to None in Query Graph. Please specify the node type"
            )
            return self.response
        else:
            pass

        ## check if target node has more than one type. If so, throw an error
        if target_node_type is None:
            self.response.error(
                f"Target node with qnode id {target_qnode_id} was set to None in Query Graph. Please specify the node type"
            )
            return self.response
        else:
            pass

        ##check how many kps were used in message KG. If more than one, the one with the max number of edges connnected to both source nodes and target nodes was used
        if len(collections.Counter(edge_expand_kp)) == 1:
            kp = edge_expand_kp[0]
        else:
            occurrences = collections.Counter(edge_expand_kp)
            max_index = max(
                [(value, index)
                 for index, value in enumerate(occurrences.values())]
            )[1]  # if there are more than one kp having the maximum number of edges, then the last one based on alphabetical order will be chosen.
            kp = list(occurrences.keys())[max_index]
            self.response.debug(f"{occurrences}")
            self.response.warning(
                f"More than one knowledge provider was detected to be used for expanding the edges connected to both source node with qnode id {source_qnode_id} and target node with qnode id {target_qnode_id}"
            )
            self.response.warning(
                f"The knowledge provider {kp} was used to calculate Fisher's exact test because it has the maximum number of edges both source node with qnode id {source_qnode_id} and target node with qnode id {target_qnode_id}"
            )

        ## Print out some information used to calculate FET
        if len(source_node_list) == 1:
            self.response.debug(
                f"{len(source_node_list)} source node with qnode id {source_qnode_id} and node type {source_node_type} was found in message KG and used to calculate Fisher's Exact Test"
            )
        else:
            self.response.debug(
                f"{len(source_node_list)} source nodes with qnode id {source_qnode_id} and node type {source_node_type} was found in message KG and used to calculate Fisher's Exact Test"
            )
        if len(target_node_dict) == 1:
            self.response.debug(
                f"{len(target_node_dict)} target node with qnode id {target_qnode_id} and node type {target_node_type} was found in message KG and used to calculate Fisher's Exact Test"
            )
        else:
            self.response.debug(
                f"{len(target_node_dict)} target nodes with qnode id {target_qnode_id} and node type {target_node_type} was found in message KG and used to calculate Fisher's Exact Test"
            )

        # find all nodes with the same type of 'source_qnode_id' nodes in specified KP ('ARAX/KG1','ARAX/KG2','BTE') that are adjacent to target nodes
        if kp == "ARAX/KG1":
            # query adjacent node in one DSL command by providing a list of query nodes to add_qnode()
            if rel_edge_id:
                if len(
                        rel_edge_type
                ) == 1:  # if the edge with rel_edge_id has only type, we use this rel_edge_type to find all source nodes in KP
                    self.response.debug(
                        f"{kp} and edge relation type {list(rel_edge_type)[0]} were used to calculate total adjacent nodes in Fisher's Exact Test"
                    )
                    result = self.query_size_of_adjacent_nodes(
                        node_curie=list(target_node_dict.keys()),
                        adjacent_type=source_node_type,
                        kp=kp,
                        rel_type=list(rel_edge_type)[0],
                        use_cypher_command=True)
                else:  # if the edge with rel_edge_id has more than one type, we ignore the edge type and use all types to find all source nodes in KP
                    self.response.warning(
                        f"The edges with specified qedge id {rel_edge_id} have more than one type, we ignore the edge type and use all types to calculate Fisher's Exact Test"
                    )
                    self.response.debug(
                        f"{kp} was used to calculate total adjacent nodes in Fisher's Exact Test"
                    )
                    result = self.query_size_of_adjacent_nodes(
                        node_curie=list(target_node_dict.keys()),
                        adjacent_type=source_node_type,
                        kp=kp,
                        rel_type=None,
                        use_cypher_command=True)
            else:  # if no rel_edge_id is specified, we ignore the edge type and use all types to find all source nodes in KP
                self.response.debug(
                    f"{kp} was used to calculate total adjacent nodes in Fisher's Exact Test"
                )
                result = self.query_size_of_adjacent_nodes(
                    node_curie=list(target_node_dict.keys()),
                    adjacent_type=source_node_type,
                    kp=kp,
                    rel_type=None,
                    use_cypher_command=True)

            if result is None:
                return self.response  ## Something wrong happened for querying the adjacent nodes
            else:
                size_of_target = result
        else:
            # query adjacent node for query nodes one by one in parallel
            if rel_edge_id:
                if len(
                        rel_edge_type
                ) == 1:  # if the edge with rel_edge_id has only type, we use this rel_edge_type to find all source nodes in KP
                    self.response.debug(
                        f"{kp} and edge relation type {list(rel_edge_type)[0]} were used to calculate total adjacent nodes in Fisher's Exact Test"
                    )
                    parameter_list = [
                        (node, source_node_type, kp, list(rel_edge_type)[0])
                        for node in list(target_node_dict.keys())
                    ]
                else:  # if the edge with rel_edge_id has more than one type, we ignore the edge type and use all types to find all source nodes in KP
                    self.response.warning(
                        f"The edges with specified qedge id {rel_edge_id} have more than one type, we ignore the edge type and use all types to calculate Fisher's Exact Test"
                    )
                    self.response.debug(
                        f"{kp} was used to calculate total adjacent nodes in Fisher's Exact Test"
                    )
                    parameter_list = [(node, source_node_type, kp, None)
                                      for node in list(target_node_dict.keys())
                                      ]
            else:  # if no rel_edge_id is specified, we ignore the edge type and use all types to find all source nodes in KP
                self.response.debug(
                    f"{kp} was used to calculate total adjacent nodes in Fisher's Exact Test"
                )
                parameter_list = [(node, source_node_type, kp, None)
                                  for node in list(target_node_dict.keys())]

            ## get the count of all nodes with the type of 'source_qnode_id' nodes in KP for each target node in parallel
            try:
                with multiprocessing.Pool() as executor:
                    target_count_res = [
                        elem for elem in executor.map(
                            self._query_size_of_adjacent_nodes_parallel,
                            parameter_list)
                    ]
                    executor.close()
            except:
                tb = traceback.format_exc()
                error_type, error, _ = sys.exc_info()
                self.response.error(tb, error_code=error_type.__name__)
                self.response.error(
                    f"Something went wrong with querying adjacent nodes in parallel"
                )
                return self.response

            if any([type(elem) is list for elem in target_count_res]):
                for msg in [
                        elem2 for elem1 in target_count_res
                        if type(elem1) is list for elem2 in elem1
                ]:
                    if type(msg) is tuple:
                        self.response.error(msg[0], error_code=msg[1])
                    else:
                        self.response.error(msg)
                return self.response  ## Something wrong happened for querying the adjacent nodes
            else:
                for index in range(len(target_node_dict)):
                    node = list(target_node_dict.keys())[index]
                    size_of_target[node] = target_count_res[index]

        ## Based on KP detected in message KG, find the total number of node with the same type of source node
        if kp == 'ARAX/KG1':
            size_of_total = self.size_of_given_type_in_KP(
                node_type=source_node_type, use_cypher_command=True,
                kg='KG1')  ## Try cypher query first
            if size_of_total is not None:
                if size_of_total != 0:
                    self.response.debug(
                        f"ARAX/KG1 and cypher query were used to calculate total number of node with the same type of source node in Fisher's Exact Test"
                    )
                    self.response.debug(
                        f"Total {size_of_total} nodes with node type {source_node_type} was found in ARAX/KG1"
                    )
                    pass
                else:
                    size_of_total = self.size_of_given_type_in_KP(
                        node_type=source_node_type,
                        use_cypher_command=False,
                        kg='KG1'
                    )  ## If cypher query fails, then try kgNodeIndex
                    if size_of_total == 0:
                        self.response.error(
                            f"KG1 has 0 node with the same type of source node with qnode id {source_qnode_id}"
                        )
                        return self.response
                    else:
                        self.response.debug(
                            f"ARAX/KG1 and kgNodeIndex were used to calculate total number of node with the same type of source node in Fisher's Exact Test"
                        )
                        self.response.debug(
                            f"Total {size_of_total} nodes with node type {source_node_type} was found in ARAX/KG1"
                        )
                        pass
            else:
                return self.response  ## Something wrong happened for querying total number of node with the same type of source node

        elif kp == 'ARAX/KG2':
            ## check KG1 first as KG2 might have many duplicates. If KG1 is 0, then check KG2
            size_of_total = self.size_of_given_type_in_KP(
                node_type=source_node_type, use_cypher_command=True,
                kg='KG1')  ## Try cypher query first
            if size_of_total is not None:
                if size_of_total != 0:
                    self.response.warning(
                        f"Although ARAX/KG2 was found to have the maximum number of edges connected to both {source_qnode_id} and {target_qnode_id}, ARAX/KG1 and cypher query were used to find the total number of nodes with the same type of source node with qnode id {source_qnode_id} as KG2 might have many duplicates"
                    )
                    self.response.debug(
                        f"Total {size_of_total} nodes with node type {source_node_type} was found in ARAX/KG1"
                    )
                    pass
                else:
                    size_of_total = self.size_of_given_type_in_KP(
                        node_type=source_node_type,
                        use_cypher_command=False,
                        kg='KG1'
                    )  ## If cypher query fails, then try kgNodeIndex
                    if size_of_total is not None:
                        if size_of_total != 0:
                            self.response.warning(
                                f"Although ARAX/KG2 was found to have the maximum number of edges connected to both {source_qnode_id} and {target_qnode_id}, ARAX/KG1 and kgNodeIndex were used to find the total number of nodes with the same type of source node with qnode id {source_qnode_id} as KG2 might have many duplicates"
                            )
                            self.response.debug(
                                f"Total {size_of_total} nodes with node type {source_node_type} was found in ARAX/KG1"
                            )
                            pass
                        else:
                            size_of_total = self.size_of_given_type_in_KP(
                                node_type=source_node_type,
                                use_cypher_command=False,
                                kg='KG2')
                            if size_of_total is None:
                                return self.response  ## Something wrong happened for querying total number of node with the same type of source node
                            elif size_of_total == 0:
                                self.response.error(
                                    f"KG2 has 0 node with the same type of source node with qnode id {source_qnode_id}"
                                )
                                return self.response
                            else:
                                self.response.debug(
                                    f"ARAX/KG2 and kgNodeIndex were used to calculate total number of node with the same type of source node in Fisher's Exact Test"
                                )
                                self.response.debug(
                                    f"Total {size_of_total} nodes with node type {source_node_type} was found in ARAX/KG2"
                                )
                                pass
                    else:
                        return self.response  ## Something wrong happened for querying total number of node with the same type of source node
            else:
                return self.response  ## Something wrong happened for querying total number of node with the same type of source node
        else:
            self.response.error(
                f"Only KG1 or KG2 is allowable to calculate the Fisher's exact test temporally"
            )
            return self.response

        size_of_query_sample = len(source_node_list)

        self.response.debug(f"Computing Fisher's Exact Test P-value")
        # calculate FET p-value for each target node in parallel
        parameter_list = [
            (node, len(target_node_dict[node]),
             size_of_target[node] - len(target_node_dict[node]),
             size_of_query_sample - len(target_node_dict[node]),
             (size_of_total - size_of_target[node]) -
             (size_of_query_sample - len(target_node_dict[node])))
            for node in target_node_dict
        ]

        try:
            with multiprocessing.Pool() as executor:
                FETpvalue_list = [
                    elem for elem in executor.map(
                        self._calculate_FET_pvalue_parallel, parameter_list)
                ]
                executor.close()
        except:
            tb = traceback.format_exc()
            error_type, error, _ = sys.exc_info()
            self.response.error(tb, error_code=error_type.__name__)
            self.response.error(
                f"Something went wrong with computing Fisher's Exact Test P-value"
            )
            return self.response

        if any([type(elem) is list for elem in FETpvalue_list]):
            for msg in [
                    elem2 for elem1 in FETpvalue_list if type(elem1) is list
                    for elem2 in elem1
            ]:
                if type(msg) is tuple:
                    self.response.error(msg[0], error_code=msg[1])
                else:
                    self.response.error(msg)
            return self.response
        else:
            output = dict(FETpvalue_list)

        # check if the results need to be filtered
        output = dict(sorted(output.items(), key=lambda x: x[1]))
        if cutoff:
            output = dict(filter(lambda x: x[1] < cutoff, output.items()))
        else:
            pass
        if top_n:
            output = dict(list(output.items())[:top_n])
        else:
            pass

        # add the virtual edge with FET result to message KG
        self.response.debug(
            f"Adding virtual edge with FET result to message KG")

        virtual_edge_list = [
            Edge(id=f"{value[0]}_{index}",
                 type='has_fisher_exact_test_p-value_with',
                 relation=value[0],
                 source_id=value[2],
                 target_id=value[3],
                 is_defined_by="ARAX",
                 defined_datetime=datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
                 provided_by="ARAX",
                 confidence=None,
                 weight=None,
                 edge_attributes=[
                     EdgeAttribute(type="data:1669",
                                   name="fisher_exact_test_p-value",
                                   value=str(value[1]),
                                   url=None)
                 ],
                 qedge_ids=[value[0]]) for index, value in enumerate(
                     [(virtual_relation_label, output[adj], node, adj)
                      for adj in target_node_dict if adj in output.keys()
                      for node in target_node_dict[adj]], 1)
        ]

        self.message.knowledge_graph.edges.extend(virtual_edge_list)

        count = len(virtual_edge_list)

        self.response.debug(
            f"{count} new virtual edges were added to message KG")

        # add the virtual edge to message QG
        if count > 0:
            self.response.debug(f"Adding virtual edge to message QG")
            edge_type = "has_fisher_exact_test_p-value_with"
            q_edge = QEdge(id=virtual_relation_label,
                           type=edge_type,
                           relation=virtual_relation_label,
                           source_id=source_qnode_id,
                           target_id=target_qnode_id)
            self.message.query_graph.edges.append(q_edge)
            self.response.debug(f"One virtual edge was added to message QG")

        return self.response
 def _create_icees_edge_attribute(self, p_value):
     return EdgeAttribute(name=self.icees_attribute_name,
                          value=p_value,
                          type=self.icees_attribute_type)
    def compute_ngd(self):
        """
        Iterate over all the edges in the knowledge graph, compute the normalized google distance and stick that info
        on the edge_attributes
        :default: The default value to set for NGD if it returns a nan
        :return: response
        """
        if self.response.status != 'OK':  # Catches any errors that may have been logged during initialization
            self._close_database()
            return self.response
        parameters = self.parameters
        self.response.debug(f"Computing NGD")
        self.response.info(
            f"Computing the normalized Google distance: weighting edges based on source/target node "
            f"co-occurrence frequency in PubMed abstracts")

        self.response.info(
            "Converting CURIE identifiers to human readable names")
        node_curie_to_name = dict()
        try:
            for node in self.message.knowledge_graph.nodes:
                node_curie_to_name[node.id] = node.name
        except:
            tb = traceback.format_exc()
            error_type, error, _ = sys.exc_info()
            self.response.error(f"Something went wrong when converting names")
            self.response.error(tb, error_code=error_type.__name__)

        name = "normalized_google_distance"
        type = "EDAM:data_2526"
        value = self.parameters['default_value']
        url = "https://arax.rtx.ai/api/rtx/v1/ui/#/PubmedMeshNgd"

        # if you want to add virtual edges, identify the source/targets, decorate the edges, add them to the KG, and then add one to the QG corresponding to them
        if 'virtual_relation_label' in parameters:
            source_curies_to_decorate = set()
            target_curies_to_decorate = set()
            curies_to_names = dict()
            # identify the nodes that we should be adding virtual edges for
            for node in self.message.knowledge_graph.nodes:
                if hasattr(node, 'qnode_ids'):
                    if parameters['source_qnode_id'] in node.qnode_ids:
                        source_curies_to_decorate.add(node.id)
                        curies_to_names[node.id] = node.name
                    if parameters['target_qnode_id'] in node.qnode_ids:
                        target_curies_to_decorate.add(node.id)
                        curies_to_names[node.id] = node.name

            # Convert these curies to their canonicalized curies (needed for the local NGD system)
            canonicalized_curie_map = self._get_canonical_curies_map(
                list(source_curies_to_decorate.union(
                    target_curies_to_decorate)))
            self.load_curie_to_pmids_data(canonicalized_curie_map.values())
            added_flag = False  # check to see if any edges where added
            num_computed_total = 0
            num_computed_slow = 0
            self.response.debug(
                f"Looping through node pairs and calculating NGD values")
            # iterate over all pairs of these nodes, add the virtual edge, decorate with the correct attribute
            for (source_curie,
                 target_curie) in itertools.product(source_curies_to_decorate,
                                                    target_curies_to_decorate):
                # create the edge attribute if it can be
                source_name = curies_to_names[source_curie]
                target_name = curies_to_names[target_curie]
                num_computed_total += 1
                canonical_source_curie = canonicalized_curie_map.get(
                    source_curie, source_curie)
                canonical_target_curie = canonicalized_curie_map.get(
                    target_curie, target_curie)
                ngd_value = self.calculate_ngd_fast(canonical_source_curie,
                                                    canonical_target_curie)
                if ngd_value is None:
                    ngd_value = self.NGD.get_ngd_for_all(
                        [source_curie, target_curie],
                        [source_name, target_name])
                    self.response.debug(
                        f"Had to use eUtils to compute NGD between {source_name} "
                        f"({canonical_source_curie}) and {target_name} ({canonical_target_curie}). "
                        f"Value is: {ngd_value}")
                    num_computed_slow += 1
                if np.isfinite(
                        ngd_value
                ):  # if ngd is finite, that's ok, otherwise, stay with default
                    value = ngd_value
                edge_attribute = EdgeAttribute(
                    type=type, name=name, value=str(value),
                    url=url)  # populate the NGD edge attribute
                if edge_attribute:
                    added_flag = True
                    # make the edge, add the attribute

                    # edge properties
                    now = datetime.now()
                    edge_type = "has_normalized_google_distance_with"
                    qedge_ids = [parameters['virtual_relation_label']]
                    relation = parameters['virtual_relation_label']
                    is_defined_by = "ARAX"
                    defined_datetime = now.strftime("%Y-%m-%d %H:%M:%S")
                    provided_by = "ARAX"
                    confidence = None
                    weight = None  # TODO: could make the actual value of the attribute
                    source_id = source_curie
                    target_id = target_curie

                    # now actually add the virtual edges in
                    id = f"{relation}_{self.global_iter}"
                    self.global_iter += 1
                    edge = Edge(id=id,
                                type=edge_type,
                                relation=relation,
                                source_id=source_id,
                                target_id=target_id,
                                is_defined_by=is_defined_by,
                                defined_datetime=defined_datetime,
                                provided_by=provided_by,
                                confidence=confidence,
                                weight=weight,
                                edge_attributes=[edge_attribute],
                                qedge_ids=qedge_ids)
                    self.message.knowledge_graph.edges.append(edge)

            # Now add a q_edge the query_graph since I've added an extra edge to the KG
            if added_flag:
                #edge_type = parameters['virtual_edge_type']
                edge_type = "has_normalized_google_distance_with"
                relation = parameters['virtual_relation_label']
                q_edge = QEdge(id=relation,
                               type=edge_type,
                               relation=relation,
                               source_id=parameters['source_qnode_id'],
                               target_id=parameters['target_qnode_id'])
                self.message.query_graph.edges.append(q_edge)

            self.response.info(f"NGD values successfully added to edges")
            num_computed_fast = num_computed_total - num_computed_slow
            percent_computed_fast = round(
                (num_computed_fast / num_computed_total) * 100)
            self.response.debug(
                f"Used fastNGD for {percent_computed_fast}% of edges "
                f"({num_computed_fast} of {num_computed_total})")
        else:  # you want to add it for each edge in the KG
            # iterate over KG edges, add the information
            try:
                # Map all nodes to their canonicalized curies in one batch (need canonical IDs for the local NGD system)
                canonicalized_curie_map = self._get_canonical_curies_map(
                    [node.id for node in self.message.knowledge_graph.nodes])
                self.load_curie_to_pmids_data(canonicalized_curie_map.values())
                num_computed_total = 0
                num_computed_slow = 0
                self.response.debug(
                    f"Looping through edges and calculating NGD values")
                for edge in self.message.knowledge_graph.edges:
                    # Make sure the edge_attributes are not None
                    if not edge.edge_attributes:
                        edge.edge_attributes = [
                        ]  # should be an array, but why not a list?
                    # now go and actually get the NGD
                    source_curie = edge.source_id
                    target_curie = edge.target_id
                    source_name = node_curie_to_name[source_curie]
                    target_name = node_curie_to_name[target_curie]
                    num_computed_total += 1
                    canonical_source_curie = canonicalized_curie_map.get(
                        source_curie, source_curie)
                    canonical_target_curie = canonicalized_curie_map.get(
                        target_curie, target_curie)
                    ngd_value = self.calculate_ngd_fast(
                        canonical_source_curie, canonical_target_curie)
                    if ngd_value is None:
                        ngd_value = self.NGD.get_ngd_for_all(
                            [source_curie, target_curie],
                            [source_name, target_name])
                        self.response.debug(
                            f"Had to use eUtils to compute NGD between {source_name} "
                            f"({canonical_source_curie}) and {target_name} ({canonical_target_curie}). "
                            f"Value is: {ngd_value}")
                        num_computed_slow += 1
                    if np.isfinite(
                            ngd_value
                    ):  # if ngd is finite, that's ok, otherwise, stay with default
                        value = ngd_value
                    ngd_edge_attribute = EdgeAttribute(
                        type=type, name=name, value=str(value),
                        url=url)  # populate the NGD edge attribute
                    edge.edge_attributes.append(
                        ngd_edge_attribute
                    )  # append it to the list of attributes
            except:
                tb = traceback.format_exc()
                error_type, error, _ = sys.exc_info()
                self.response.error(tb, error_code=error_type.__name__)
                self.response.error(
                    f"Something went wrong adding the NGD edge attributes")
            else:
                self.response.info(f"NGD values successfully added to edges")
                num_computed_fast = num_computed_total - num_computed_slow
                percent_computed_fast = round(
                    (num_computed_fast / num_computed_total) * 100)
                self.response.debug(
                    f"Used fastNGD for {percent_computed_fast}% of edges "
                    f"({num_computed_fast} of {num_computed_total})")
            self._close_database()
            return self.response
Exemple #8
0
    def compute_ngd(self):
        """
        Iterate over all the edges in the knowledge graph, compute the normalized google distance and stick that info
        on the edge_attributes
        :default: The default value to set for NGD if it returns a nan
        :return: response
        """
        parameters = self.parameters
        self.response.debug(f"Computing NGD")
        self.response.info(f"Computing the normalized Google distance: weighting edges based on source/target node "
                           f"co-occurrence frequency in PubMed abstracts")

        self.response.info("Converting CURIE identifiers to human readable names")
        node_curie_to_name = dict()
        try:
            for node in self.message.knowledge_graph.nodes:
                node_curie_to_name[node.id] = node.name
        except:
            tb = traceback.format_exc()
            error_type, error, _ = sys.exc_info()
            self.response.error(f"Something went wrong when converting names")
            self.response.error(tb, error_code=error_type.__name__)


        self.response.warning(f"Utilizing API calls to NCBI eUtils, so this may take a while...")
        name = "normalized_google_distance"
        type = "data:2526"
        value = self.parameters['default_value']
        url = "https://arax.rtx.ai/api/rtx/v1/ui/#/PubmedMeshNgd"
        ngd_method_counts = {"fast": 0, "slow": 0}

        # if you want to add virtual edges, identify the source/targets, decorate the edges, add them to the KG, and then add one to the QG corresponding to them
        if 'virtual_relation_label' in parameters:
            source_curies_to_decorate = set()
            target_curies_to_decorate = set()
            curies_to_names = dict()
            # identify the nodes that we should be adding virtual edges for
            for node in self.message.knowledge_graph.nodes:
                if hasattr(node, 'qnode_ids'):
                    if parameters['source_qnode_id'] in node.qnode_ids:
                        source_curies_to_decorate.add(node.id)
                        curies_to_names[node.id] = node.name
                    if parameters['target_qnode_id'] in node.qnode_ids:
                        target_curies_to_decorate.add(node.id)
                        curies_to_names[node.id] = node.name
            added_flag = False  # check to see if any edges where added
            # iterate over all pairs of these nodes, add the virtual edge, decorate with the correct attribute
            for (source_curie, target_curie) in itertools.product(source_curies_to_decorate, target_curies_to_decorate):
                # create the edge attribute if it can be
                source_name = curies_to_names[source_curie]
                target_name = curies_to_names[target_curie]
                self.response.debug(f"Computing NGD between {source_name} and {target_name}")
                ngd_value, method_used = self.NGD.get_ngd_for_all_fast([source_curie, target_curie], [source_name, target_name])
                ngd_method_counts[method_used] += 1
                if np.isfinite(ngd_value):  # if ngd is finite, that's ok, otherwise, stay with default
                    value = ngd_value
                edge_attribute = EdgeAttribute(type=type, name=name, value=str(value), url=url)  # populate the NGD edge attribute
                if edge_attribute:
                    added_flag = True
                    # make the edge, add the attribute

                    # edge properties
                    now = datetime.now()
                    edge_type = "has_normalized_google_distance_with"
                    qedge_ids = [parameters['virtual_relation_label']]
                    relation = parameters['virtual_relation_label']
                    is_defined_by = "ARAX"
                    defined_datetime = now.strftime("%Y-%m-%d %H:%M:%S")
                    provided_by = "ARAX"
                    confidence = None
                    weight = None  # TODO: could make the actual value of the attribute
                    source_id = source_curie
                    target_id = target_curie

                    # now actually add the virtual edges in
                    id = f"{relation}_{self.global_iter}"
                    self.global_iter += 1
                    edge = Edge(id=id, type=edge_type, relation=relation, source_id=source_id,
                                target_id=target_id,
                                is_defined_by=is_defined_by, defined_datetime=defined_datetime,
                                provided_by=provided_by,
                                confidence=confidence, weight=weight, edge_attributes=[edge_attribute], qedge_ids=qedge_ids)
                    self.message.knowledge_graph.edges.append(edge)

            # Now add a q_edge the query_graph since I've added an extra edge to the KG
            if added_flag:
                #edge_type = parameters['virtual_edge_type']
                edge_type = "has_normalized_google_distance_with"
                relation = parameters['virtual_relation_label']
                q_edge = QEdge(id=relation, type=edge_type, relation=relation,
                               source_id=parameters['source_qnode_id'], target_id=parameters[
                        'target_qnode_id'])
                self.message.query_graph.edges.append(q_edge)
        else:  # you want to add it for each edge in the KG
            # iterate over KG edges, add the information
            try:
                for edge in self.message.knowledge_graph.edges:
                    # Make sure the edge_attributes are not None
                    if not edge.edge_attributes:
                        edge.edge_attributes = []  # should be an array, but why not a list?
                    # now go and actually get the NGD
                    source_curie = edge.source_id
                    target_curie = edge.target_id
                    source_name = node_curie_to_name[source_curie]
                    target_name = node_curie_to_name[target_curie]
                    ngd_value, method_used = self.NGD.get_ngd_for_all_fast([source_curie, target_curie], [source_name, target_name])
                    ngd_method_counts[method_used] += 1
                    if np.isfinite(ngd_value):  # if ngd is finite, that's ok, otherwise, stay with default
                        value = ngd_value
                    ngd_edge_attribute = EdgeAttribute(type=type, name=name, value=str(value), url=url)  # populate the NGD edge attribute
                    edge.edge_attributes.append(ngd_edge_attribute)  # append it to the list of attributes
            except:
                tb = traceback.format_exc()
                error_type, error, _ = sys.exc_info()
                self.response.error(tb, error_code=error_type.__name__)
                self.response.error(f"Something went wrong adding the NGD edge attributes")
            else:
                self.response.info(f"NGD values successfully added to edges")
                self.response.debug(f"Used fast NGD for {ngd_method_counts['fast']} edges, back-up NGD method for {ngd_method_counts['slow']}")

            return self.response
Exemple #9
0
    def compute_jaccard(self):
        message = self.message
        parameters = self.parameters
        self.response.debug(f"Computing Jaccard distance and adding this information as virtual edges")
        self.response.info(f"Computing Jaccard distance and adding this information as virtual edges")

        self.response.info("Getting all relevant nodes")
        # TODO: should I check that they're connected to the start node, or just assume that they are?
        # TODO: For now, assume that they are
        try:
            intermediate_nodes = set()
            end_node_to_intermediate_node_set = dict()  # keys will be end node curies, values will be tuples the (intermediate curie ids, edge_type)
            for node in message.knowledge_graph.nodes:
                if parameters['intermediate_node_id'] in node.qnode_ids:
                    intermediate_nodes.add(node.id)  # add the intermediate node by it's identifier
                # also look for the source node id
                if parameters['start_node_id'] in node.qnode_ids:
                    source_node_id = node.id
                if parameters['end_node_id'] in node.qnode_ids:
                    end_node_to_intermediate_node_set[node.id] = set()

            # now iterate over the edges to look for the ones we need to add  # TODO: Here, I won't care which direction the edges are pointing
            for edge in message.knowledge_graph.edges:
                if edge.source_id in intermediate_nodes:  # if source is intermediate
                    if edge.target_id in end_node_to_intermediate_node_set:
                        end_node_to_intermediate_node_set[edge.target_id].add((edge.source_id, edge.type))  # add source
                elif edge.target_id in intermediate_nodes:  # if target is intermediate
                    if edge.source_id in end_node_to_intermediate_node_set:
                        end_node_to_intermediate_node_set[edge.source_id].add((edge.target_id, edge.type))  # add target

            # now compute the actual jaccard indexes
            denom = len(intermediate_nodes)
            end_node_to_jaccard = dict()
            for end_node_id in end_node_to_intermediate_node_set:
                # TODO: add code here if you care about edge types
                numerator = len(end_node_to_intermediate_node_set[end_node_id])
                jacc = numerator / float(denom)
                end_node_to_jaccard[end_node_id] = jacc

            # now add them all as virtual edges

            # edge properties
            j_iter = 0
            now = datetime.now()
            #edge_type = parameters['virtual_edge_type']
            edge_type = 'has_jaccard_index_with'
            qedge_ids = [parameters['virtual_relation_label']]
            relation = parameters['virtual_relation_label']
            is_defined_by = "ARAX"
            defined_datetime = now.strftime("%Y-%m-%d %H:%M:%S")
            provided_by = "ARAX"
            confidence = None
            weight = None  # TODO: could make the jaccard index the weight
            try:
                source_id = source_node_id
            except:
                tb = traceback.format_exc()
                error_type, error, _ = sys.exc_info()
                self.response.warning(
                    f"Source node id: {parameters['start_node_id']} not found in the KG. Perhaps the KG is empty?")
                #self.response.error(tb, error_code=error_type.__name__)

            # edge attribute properties
            description = f"Jaccard index based on intermediate query nodes {parameters['intermediate_node_id']}"
            attribute_type = 'data:1772'
            name = "jaccard_index"
            url = None

            # now actually add the virtual edges in
            for end_node_id, value in end_node_to_jaccard.items():
                edge_attribute = EdgeAttribute(type=attribute_type, name=name, value=value, url=url)
                id = f"J{j_iter}"
                j_iter += 1
                target_id = end_node_id
                edge = Edge(id=id, type=edge_type, relation=relation, source_id=source_id, target_id=target_id,
                            is_defined_by=is_defined_by, defined_datetime=defined_datetime, provided_by=provided_by,
                            confidence=confidence, weight=weight, edge_attributes=[edge_attribute], qedge_ids=qedge_ids)
                message.knowledge_graph.edges.append(edge)

            # Now add a q_edge the query_graph since I've added an extra edge to the KG
            q_edge = QEdge(id=relation, type=edge_type, relation=relation, source_id=parameters['start_node_id'], target_id=parameters['end_node_id'])  # TODO: ok to make the id and type the same thing?
            self.message.query_graph.edges.append(q_edge)

            return self.response
        except:
            tb = traceback.format_exc()
            error_type, error, _ = sys.exc_info()
            self.response.error(f"Something went wrong when computing the Jaccard index")
            self.response.error(tb, error_code=error_type.__name__)
Exemple #10
0
    def test1(self):

        #### Create the response object and fill it with attributes about the response
        response = Response()
        response.context = "http://translator.ncats.io"
        response.id = "http://rtx.ncats.io/api/v1/response/1234"
        response.type = "medical_translator_query_response"
        response.tool_version = "RTX 0.4"
        response.schema_version = "0.5"
        response.datetime = datetime.datetime.now().strftime(
            "%Y-%m-%d %H:%M:%S")
        response.original_question_text = "what proteins are affected by sickle cell anemia"
        response.restated_question_text = "Which proteins are affected by sickle cell anemia?"
        response.result_code = "OK"
        response.message = "1 result found"

        #### Create a disease node
        node1 = Node()
        node1.id = "http://omim.org/entry/603903"
        node1.type = "disease"
        node1.name = "sickle cell anemia"
        node1.accession = "OMIM:603903"
        node1.description = "A disease characterized by chronic hemolytic anemia..."

        #### Create a protein node
        node2 = Node()
        node2.id = "https://www.uniprot.org/uniprot/P00738"
        node2.type = "protein"
        node2.name = "Haptoglobin"
        node2.symbol = "HP"
        node2.accession = "UNIPROT:P00738"
        node2.description = "Haptoglobin captures, and combines with free plasma hemoglobin..."

        #### Create a node attribute
        node2attribute1 = NodeAttribute()
        node2attribute1.type = "comment"
        node2attribute1.name = "Complex_description"
        node2attribute1.value = "The Hemoglobin/haptoglobin complex is composed of a haptoglobin dimer bound to two hemoglobin alpha-beta dimers"
        node2.node_attributes = [node2attribute1]

        #### Create an edge between these 2 nodes
        edge1 = Edge()
        edge1.type = "is_caused_by_a_defect_in"
        edge1.source_id = node1.id
        edge1.target_id = node2.id
        edge1.confidence = 1.0

        #### Add an origin and property for the edge
        origin1 = Origin()
        origin1.id = "https://api.monarchinitiative.org/api/bioentity/disease/OMIM:603903/genes/"
        origin1.type = "Monarch_BioLink_API_Relationship"

        #### Add an attribute
        attribute1 = EdgeAttribute()
        attribute1.type = "PubMed_article"
        attribute1.name = "Orthopaedic Manifestations of Sickle Cell Disease"
        attribute1.value = None
        attribute1.url = "https://www.ncbi.nlm.nih.gov/pubmed/29309293"
        origin1.attribute_list = [attribute1]
        edge1.origin_list = [origin1]

        #### Create the first result (potential answer)
        result1 = Result()
        result1.id = "http://rtx.ncats.io/api/v1/response/1234/result/2345"
        result1.text = "A free text description of this result"
        result1.confidence = 0.932

        #### Create a ResultGraph object and put the list of nodes and edges into it
        result_graph = ResultGraph()
        result_graph.node_list = [node1, node2]
        result_graph.edge_list = [edge1]

        #### Put the ResultGraph into the first result (potential answer)
        result1.result_graph = result_graph

        #### Put the first result (potential answer) into the response
        result_list = [result1]
        response.result_list = result_list

        print(response)