コード例 #1
0
def evaluate_entropy_metrics(joint_prob: np.ndarray,
                             evaluation: Evaluation,
                             is_sampled_graph: bool = False):
    """Evaluates the entropy (information theoretics based) goodness of partition metrics.

    Parameters
    ---------
    joint_prob : np.ndarray
        the normalized contingency table
    evaluation : Evaluation
        stores the evaluation metrics
    is_sampled_graph : bool = False
        True if evaluation is for a sampled_graph. Default = False
    """
    # compute the information theoretic metrics
    marginal_prob_b2 = np.sum(joint_prob, 0)
    marginal_prob_b1 = np.sum(joint_prob, 1)
    idx_truth = np.nonzero(marginal_prob_b1)
    idx_alg = np.nonzero(marginal_prob_b2)
    evaluation = calc_entropy(marginal_prob_b1, marginal_prob_b2, idx_truth,
                              idx_alg, evaluation, is_sampled_graph)
    evaluation = calc_conditional_entropy(joint_prob, marginal_prob_b1,
                                          marginal_prob_b2, idx_truth, idx_alg,
                                          evaluation, is_sampled_graph)

    if is_sampled_graph:
        if evaluation.sampled_graph_entropy_truth > 0:
            fraction_missed_info = (
                evaluation.sampled_graph_entropy_truth_given_algorithm /
                evaluation.sampled_graph_entropy_truth)
        else:
            fraction_missed_info = 0
        if evaluation.sampled_graph_entropy_algorithm > 0:
            fraction_err_info = (
                evaluation.sampled_graph_entropy_algorithm_given_truth /
                evaluation.sampled_graph_entropy_algorithm)
        else:
            fraction_err_info = 0

        evaluation.sampled_graph_missed_info = fraction_missed_info
        evaluation.sampled_graph_erroneous_info = fraction_err_info
    else:
        if evaluation.entropy_truth > 0:
            fraction_missed_info = evaluation.entropy_truth_given_algorithm / evaluation.entropy_truth
        else:
            fraction_missed_info = 0
        if evaluation.entropy_algorithm > 0:
            fraction_err_info = evaluation.entropy_algorithm_given_truth / evaluation.entropy_algorithm
        else:
            fraction_err_info = 0

        evaluation.missed_info = fraction_missed_info
        evaluation.erroneous_info = fraction_err_info

    print('Fraction of missed information: {}'.format(
        abs(fraction_missed_info)))
    print('Fraction of erroneous information: {}'.format(
        abs(fraction_err_info)))