def calculate2(pred,truth, full_matrix=True, method='default', pseudo_counts=None):
    '''Calculate the score for SubChallenge 2

    :param pred: predicted co-clustering matrix
    :param truth: true co-clustering matrix
    :param full_matrix: logical for whether to use the full matrix or just the upper triangular matrices when calculating the score
    :param method: scoring metric used, default is average of Pseudo V,
    :param pseudo_counts: logical for how many psuedo counts to add to the matrices
    :return: subchallenge 2 score for the predicted co-clustering matrix
    '''
    larger_is_worse_methods = ['pseudoV', 'sym_pseudoV'] # methods where a larger score is worse

    pc_pred = add_pseudo_counts(pred, num=pseudo_counts) # add pseudo counts to the matrices
    pc_truth = add_pseudo_counts(truth, num=pseudo_counts) # use np.copy so that original values are not altered
    ncluster = add_pseudo_counts(mb.get_ccm('NCluster', truth), num=pseudo_counts) # predicted CCM when every mutations is in its own cluster
    onecluster = add_pseudo_counts(mb.get_ccm('OneCluster', truth), num=pseudo_counts) # predicted CCM when all mutations are in the same cluster

    func_dict = {"orig" : calculate2_orig,
            "sqrt" : calculate2_sqrt,
            "pseudoV": calculate2_pseudoV,
            "sym_pseudoV": calculate2_sym_pseudoV,
            "spearman": calculate2_spearman,
            "pearson": calculate2_pearson,
            "aupr": calculate2_aupr,
            "mcc": calculate2_mcc
    }

    func = func_dict.get(method, None)
    if func is None:
        scores = []

        for m in ['pseudoV', 'pearson', 'mcc']:
            score = func_dict[m](pc_pred, pc_truth, full_matrix=full_matrix)

            # normalize the scores to be between (worst of OneCluster and NCluster scores) and (Truth score)
            ncluster_score = func_dict[m](ncluster, pc_truth, full_matrix=full_matrix)
            onecluster_score = func_dict[m](onecluster, pc_truth, full_matrix=full_matrix)
            if method in larger_is_worse_methods: # normalize scores where a larger score is worse
                # normalize the scores to be between 0 and 1 where 1 is the true matrix
                # and zero is the worse score of the NCluster matrix and the OneCluster matrix
                worst_score = max(ncluster_score, onecluster_score)
                score = 1 - (score / worst_score)
            else: # normalize scores where a smaller score is worse
                worst_score = min(ncluster_score, onecluster_score)
                score = (score - worst_score) / (1 - worst_score)
            scores.append(score)

        return np.mean(scores)
    else:
        score = func(pc_pred, pc_truth, full_matrix=full_matrix)
        ncluster_score = func(ncluster, pc_truth, full_matrix=full_matrix)
        onecluster_score = func(onecluster, pc_truth, full_matrix=full_matrix)
        if method in larger_is_worse_methods: # normalize the scores to be between 0 and 1 where 1 is the true matrix
            worst_score = max(ncluster_score, onecluster_score) # and zero is the worse score of the NCluster matrix
            score = 1 - (score / worst_score)                   # and the OneCluster matrix - similar to above
        else:
            worst_score = min(ncluster_score, onecluster_score)
            score = (score - worst_score) / (1 - worst_score)
        return score
def calculate3(pred_ccm, pred_ad, truth_ccm, truth_ad, method="sym_pseudoV_nc", weights=None, verbose=False, pseudo_counts=True, full_matrix=True, in_mat=2):
    """Calculate the score for subchallenge 3 using the given metric or a weighted average of the
    given metrics, if more than one are specified.

    :param pred_ccm: predicted co-clustering matrix
    :param pred_ad: predicted ancestor-descendant matrix
    :param truth_ccm: true co-clustering matrix
    :param truth_ad: trus ancestor-descendant matrix
    :param method: method to use when evaluating the submission or list of methods to use
    :param weights: weights to use in averaging the scores of different methods.
    :param verbose: boolean for whether to display information about the score calculations
    Only used if 'method' is a list - in this case must be a list of numbers of the same length as 'method'.
    :param full_matrix: boolean for whether to use the full CCM/AD matrix when calculating the score
    :param in_mat: number representing which matrices to use in calculating the SC3 scoring metric
        Options:
            1 - use all input matrics i.e. CCM, ADM, ADM^T and CM
            2 - use all except co-clustering matrix (CCM)
            3 - use all except ancestor descendant matrix (ADM)
            4 - use all except ADM^T
            5 - use all except cousin matrix (CM)
    :return: score for the given submission to subchallenge 3 using the given metric
    """
    larger_is_worse_methods = ['sym_pseudoV_nc', 'sym_pseudoV', 'pseudoV_nc', 'pseudoV', "simpleKL_nc", 'simpleKL'] # methods where a larger score is worse

    if pseudo_counts:
        if isinstance(pseudo_counts, int):
            pc_pred_ccm, pc_pred_ad = add_pseudo_counts(np.copy(pred_ccm), np.copy(pred_ad), num=pseudo_counts) # add pseudo counts to the matrices
            pc_truth_ccm, pc_truth_ad = add_pseudo_counts(np.copy(truth_ccm), np.copy(truth_ad), num=pseudo_counts) # use np.copy so that original values are not altered
            ncluster_ccm, ncluster_ad = add_pseudo_counts(mb.get_ccm('NClusterOneLineage', truth_ccm), mb.get_ad('NClusterOneLineage', truth_ad), num=pseudo_counts) # predicted matrices for each mutation being in their own cluster
            onecluster_ccm, onecluster_ad = add_pseudo_counts(mb.get_ccm('OneCluster', truth_ccm), mb.get_ad('OneCluster', truth_ad), num=pseudo_counts) # predicted matrices for all mutations being in the same cluster
        else:
            pc_pred_ccm, pc_pred_ad = add_pseudo_counts(np.copy(pred_ccm), np.copy(pred_ad))
            pc_truth_ccm, pc_truth_ad = add_pseudo_counts(np.copy(truth_ccm), np.copy(truth_ad))
            ncluster_ccm, ncluster_ad = add_pseudo_counts(mb.get_ccm('NClusterOneLineage', truth_ccm), mb.get_ad('NClusterOneLineage', truth_ad))
            onecluster_ccm, onecluster_ad = add_pseudo_counts(mb.get_ccm('OneCluster', truth_ccm), mb.get_ad('OneCluster', truth_ad))
    else:
        pc_pred_ccm, pc_pred_ad, pc_truth_ccm, pc_truth_ad = pred_ccm, pred_ad, truth_ccm, truth_ad
        ncluster_ccm, ncluster_ad = mb.get_ccm('NClusterOneLineage', truth_ccm), mb.get_ad('NClusterOneLineage', truth_ad)
        onecluster_ccm, onecluster_ad = mb.get_ccm('OneCluster', truth_ccm), mb.get_ad('OneCluster', truth_ad)

    if isinstance(method, list):
        res = [calculate3_onemetric(pc_pred_ccm, pc_pred_ad, pc_truth_ccm, pc_truth_ad,
                                    method=m, verbose=verbose, in_mat=in_mat) for m in method] # calculate the score for each method

        # normalize the scores to be between (worst of NCluster score and OneCluster score) and (Truth score)
        ncluster_score = [calculate3_onemetric(ncluster_ccm, ncluster_ad, pc_truth_ccm, pc_truth_ad,
                                               method=m, verbose=verbose, full_matrix=full_matrix, in_mat=in_mat) for m in method]
        onecluster_score = [calculate3_onemetric(onecluster_ccm, onecluster_ad, pc_truth_ccm, pc_truth_ad,
                                                 method=m, verbose=verbose, full_matrix=full_matrix, in_mat=in_mat) for m in method]
        for i in range(len(method)):
            if method[i] in larger_is_worse_methods: # normalization for methods where a larger score is worse
                worst_score = max(ncluster_score[i], onecluster_score[i]) # worst of NCluster and OneCluster scores
                res[i] = 1 - (res[i] / worst_score) # normalize the score
            else: # normalization for methods where a smaller score is worse
                worst_score = min(ncluster_score[i], onecluster_score[i])
                res[i] = (res[i] - worst_score) / (1 - worst_score)


        if weights is None: # if weights are not specified or if they cannot be normalized then default to equal weights
            weights = [1] * len(method)
        elif sum(weights) == 0:
            Warning('Weights sum to zero so they are invalid, defaulting to equal weights')
            weights = [1] * len(method)

        weights = np.array(weights) / float(sum(weights)) # normalize the weights
        score = sum(np.multiply(res, weights))
    else:
        score =  calculate3_onemetric(pc_pred_ccm, pc_pred_ad, pc_truth_ccm, pc_truth_ad,
                                      method=method, verbose=verbose, full_matrix=full_matrix, in_mat=in_mat)

        # normalize the score to be between (worst of NCluster score and OneCluster score) and (Truth score) - similar to above
        ncluster_score = calculate3_onemetric(ncluster_ccm, ncluster_ad, pc_truth_ccm, pc_truth_ad,
                                              method=method, verbose=verbose, full_matrix=full_matrix, in_mat=in_mat)
        onecluster_score = calculate3_onemetric(onecluster_ccm, onecluster_ad, pc_truth_ccm, pc_truth_ad,
                                                method=method, verbose=verbose, full_matrix=full_matrix, in_mat=in_mat)
        if method in larger_is_worse_methods:
            worst_score = max(ncluster_score, onecluster_score)
            score = 1 - (score / worst_score)
        else:
            worst_score = min(ncluster_score, onecluster_score)
            score = (score - worst_score) / (1 - worst_score)
    return score
Beispiel #3
0
def calculate3(pred_ccm,
               pred_ad,
               truth_ccm,
               truth_ad,
               method="sym_pseudoV_nc",
               weights=None,
               verbose=False,
               pseudo_counts=True,
               full_matrix=True,
               in_mat=2):
    """Calculate the score for subchallenge 3 using the given metric or a weighted average of the
    given metrics, if more than one are specified.

    :param pred_ccm: predicted co-clustering matrix
    :param pred_ad: predicted ancestor-descendant matrix
    :param truth_ccm: true co-clustering matrix
    :param truth_ad: trus ancestor-descendant matrix
    :param method: method to use when evaluating the submission or list of methods to use
    :param weights: weights to use in averaging the scores of different methods.
    :param verbose: boolean for whether to display information about the score calculations
    Only used if 'method' is a list - in this case must be a list of numbers of the same length as 'method'.
    :param full_matrix: boolean for whether to use the full CCM/AD matrix when calculating the score
    :param in_mat: number representing which matrices to use in calculating the SC3 scoring metric
        Options:
            1 - use all input matrics i.e. CCM, ADM, ADM^T and CM
            2 - use all except co-clustering matrix (CCM)
            3 - use all except ancestor descendant matrix (ADM)
            4 - use all except ADM^T
            5 - use all except cousin matrix (CM)
    :return: score for the given submission to subchallenge 3 using the given metric
    """
    larger_is_worse_methods = [
        'sym_pseudoV_nc', 'sym_pseudoV', 'pseudoV_nc', 'pseudoV',
        "simpleKL_nc", 'simpleKL'
    ]  # methods where a larger score is worse

    if pseudo_counts:
        if isinstance(pseudo_counts, int):
            pc_pred_ccm, pc_pred_ad = add_pseudo_counts(
                np.copy(pred_ccm), np.copy(pred_ad),
                num=pseudo_counts)  # add pseudo counts to the matrices
            pc_truth_ccm, pc_truth_ad = add_pseudo_counts(
                np.copy(truth_ccm), np.copy(truth_ad), num=pseudo_counts
            )  # use np.copy so that original values are not altered
            ncluster_ccm, ncluster_ad = add_pseudo_counts(
                mb.get_ccm('NClusterOneLineage', truth_ccm),
                mb.get_ad('NClusterOneLineage', truth_ad),
                num=pseudo_counts
            )  # predicted matrices for each mutation being in their own cluster
            onecluster_ccm, onecluster_ad = add_pseudo_counts(
                mb.get_ccm('OneCluster', truth_ccm),
                mb.get_ad('OneCluster', truth_ad),
                num=pseudo_counts
            )  # predicted matrices for all mutations being in the same cluster
        else:
            pc_pred_ccm, pc_pred_ad = add_pseudo_counts(
                np.copy(pred_ccm), np.copy(pred_ad))
            pc_truth_ccm, pc_truth_ad = add_pseudo_counts(
                np.copy(truth_ccm), np.copy(truth_ad))
            ncluster_ccm, ncluster_ad = add_pseudo_counts(
                mb.get_ccm('NClusterOneLineage', truth_ccm),
                mb.get_ad('NClusterOneLineage', truth_ad))
            onecluster_ccm, onecluster_ad = add_pseudo_counts(
                mb.get_ccm('OneCluster', truth_ccm),
                mb.get_ad('OneCluster', truth_ad))
    else:
        pc_pred_ccm, pc_pred_ad, pc_truth_ccm, pc_truth_ad = pred_ccm, pred_ad, truth_ccm, truth_ad
        ncluster_ccm, ncluster_ad = mb.get_ccm('NClusterOneLineage',
                                               truth_ccm), mb.get_ad(
                                                   'NClusterOneLineage',
                                                   truth_ad)
        onecluster_ccm, onecluster_ad = mb.get_ccm('OneCluster',
                                                   truth_ccm), mb.get_ad(
                                                       'OneCluster', truth_ad)

    if isinstance(method, list):
        res = [
            calculate3_onemetric(pc_pred_ccm,
                                 pc_pred_ad,
                                 pc_truth_ccm,
                                 pc_truth_ad,
                                 method=m,
                                 verbose=verbose,
                                 in_mat=in_mat) for m in method
        ]  # calculate the score for each method

        # normalize the scores to be between (worst of NCluster score and OneCluster score) and (Truth score)
        ncluster_score = [
            calculate3_onemetric(ncluster_ccm,
                                 ncluster_ad,
                                 pc_truth_ccm,
                                 pc_truth_ad,
                                 method=m,
                                 verbose=verbose,
                                 full_matrix=full_matrix,
                                 in_mat=in_mat) for m in method
        ]
        onecluster_score = [
            calculate3_onemetric(onecluster_ccm,
                                 onecluster_ad,
                                 pc_truth_ccm,
                                 pc_truth_ad,
                                 method=m,
                                 verbose=verbose,
                                 full_matrix=full_matrix,
                                 in_mat=in_mat) for m in method
        ]
        for i in range(len(method)):
            if method[
                    i] in larger_is_worse_methods:  # normalization for methods where a larger score is worse
                worst_score = max(ncluster_score[i], onecluster_score[i]
                                  )  # worst of NCluster and OneCluster scores
                res[i] = 1 - (res[i] / worst_score)  # normalize the score
            else:  # normalization for methods where a smaller score is worse
                worst_score = min(ncluster_score[i], onecluster_score[i])
                res[i] = (res[i] - worst_score) / (1 - worst_score)

        if weights is None:  # if weights are not specified or if they cannot be normalized then default to equal weights
            weights = [1] * len(method)
        elif sum(weights) == 0:
            Warning(
                'Weights sum to zero so they are invalid, defaulting to equal weights'
            )
            weights = [1] * len(method)

        weights = np.array(weights) / float(
            sum(weights))  # normalize the weights
        score = sum(np.multiply(res, weights))
    else:
        score = calculate3_onemetric(pc_pred_ccm,
                                     pc_pred_ad,
                                     pc_truth_ccm,
                                     pc_truth_ad,
                                     method=method,
                                     verbose=verbose,
                                     full_matrix=full_matrix,
                                     in_mat=in_mat)

        # normalize the score to be between (worst of NCluster score and OneCluster score) and (Truth score) - similar to above
        ncluster_score = calculate3_onemetric(ncluster_ccm,
                                              ncluster_ad,
                                              pc_truth_ccm,
                                              pc_truth_ad,
                                              method=method,
                                              verbose=verbose,
                                              full_matrix=full_matrix,
                                              in_mat=in_mat)
        onecluster_score = calculate3_onemetric(onecluster_ccm,
                                                onecluster_ad,
                                                pc_truth_ccm,
                                                pc_truth_ad,
                                                method=method,
                                                verbose=verbose,
                                                full_matrix=full_matrix,
                                                in_mat=in_mat)
        if method in larger_is_worse_methods:
            worst_score = max(ncluster_score, onecluster_score)
            score = 1 - (score / worst_score)
        else:
            worst_score = min(ncluster_score, onecluster_score)
            score = (score - worst_score) / (1 - worst_score)
    return score
Beispiel #4
0
def calculate2(pred,
               truth,
               full_matrix=True,
               method='default',
               pseudo_counts=None):
    '''Calculate the score for SubChallenge 2

    :param pred: predicted co-clustering matrix
    :param truth: true co-clustering matrix
    :param full_matrix: logical for whether to use the full matrix or just the upper triangular matrices when calculating the score
    :param method: scoring metric used, default is average of Pseudo V,
    :param pseudo_counts: logical for how many psuedo counts to add to the matrices
    :return: subchallenge 2 score for the predicted co-clustering matrix
    '''
    larger_is_worse_methods = ['pseudoV', 'sym_pseudoV'
                               ]  # methods where a larger score is worse

    pc_pred = add_pseudo_counts(
        pred, num=pseudo_counts)  # add pseudo counts to the matrices
    pc_truth = add_pseudo_counts(
        truth, num=pseudo_counts
    )  # use np.copy so that original values are not altered
    ncluster = add_pseudo_counts(
        mb.get_ccm('NCluster', truth), num=pseudo_counts
    )  # predicted CCM when every mutations is in its own cluster
    onecluster = add_pseudo_counts(
        mb.get_ccm('OneCluster', truth), num=pseudo_counts
    )  # predicted CCM when all mutations are in the same cluster

    func_dict = {
        "orig": calculate2_orig,
        "sqrt": calculate2_sqrt,
        "pseudoV": calculate2_pseudoV,
        "sym_pseudoV": calculate2_sym_pseudoV,
        "spearman": calculate2_spearman,
        "pearson": calculate2_pearson,
        "aupr": calculate2_aupr,
        "mcc": calculate2_mcc
    }

    func = func_dict.get(method, None)
    if func is None:
        scores = []

        for m in ['pseudoV', 'pearson', 'mcc']:
            score = func_dict[m](pc_pred, pc_truth, full_matrix=full_matrix)

            # normalize the scores to be between (worst of OneCluster and NCluster scores) and (Truth score)
            ncluster_score = func_dict[m](ncluster,
                                          pc_truth,
                                          full_matrix=full_matrix)
            onecluster_score = func_dict[m](onecluster,
                                            pc_truth,
                                            full_matrix=full_matrix)
            if method in larger_is_worse_methods:  # normalize scores where a larger score is worse
                # normalize the scores to be between 0 and 1 where 1 is the true matrix
                # and zero is the worse score of the NCluster matrix and the OneCluster matrix
                worst_score = max(ncluster_score, onecluster_score)
                score = 1 - (score / worst_score)
            else:  # normalize scores where a smaller score is worse
                worst_score = min(ncluster_score, onecluster_score)
                score = (score - worst_score) / (1 - worst_score)
            scores.append(score)

        return np.mean(scores)
    else:
        score = func(pc_pred, pc_truth, full_matrix=full_matrix)
        ncluster_score = func(ncluster, pc_truth, full_matrix=full_matrix)
        onecluster_score = func(onecluster, pc_truth, full_matrix=full_matrix)
        if method in larger_is_worse_methods:  # normalize the scores to be between 0 and 1 where 1 is the true matrix
            worst_score = max(
                ncluster_score, onecluster_score
            )  # and zero is the worse score of the NCluster matrix
            score = 1 - (score / worst_score
                         )  # and the OneCluster matrix - similar to above
        else:
            worst_score = min(ncluster_score, onecluster_score)
            score = (score - worst_score) / (1 - worst_score)
        return score