Esempio n. 1
0
def _create_empty_metrics_dict(cutoff_list, n_items, n_users, URM_train, URM_test, ignore_items, ignore_users, diversity_similarity_object):

    empty_dict = {}

    # global_RMSE_object = RMSE(URM_train + URM_test)


    for cutoff in cutoff_list:

        cutoff_dict = {}

        for metric in EvaluatorMetrics:
            if metric == EvaluatorMetrics.COVERAGE_ITEM:
                cutoff_dict[metric.value] = Coverage_Item(n_items, ignore_items)

            elif metric == EvaluatorMetrics.DIVERSITY_GINI:
                cutoff_dict[metric.value] = Gini_Diversity(n_items, ignore_items)

            elif metric == EvaluatorMetrics.SHANNON_ENTROPY:
                cutoff_dict[metric.value] = Shannon_Entropy(n_items, ignore_items)

            elif metric == EvaluatorMetrics.COVERAGE_USER:
                cutoff_dict[metric.value] = Coverage_User(n_users, ignore_users)

            elif metric == EvaluatorMetrics.DIVERSITY_MEAN_INTER_LIST:
                cutoff_dict[metric.value] = Diversity_MeanInterList(n_items, cutoff)

            elif metric == EvaluatorMetrics.DIVERSITY_HERFINDAHL:
                cutoff_dict[metric.value] = Diversity_Herfindahl(n_items, ignore_items)

            elif metric == EvaluatorMetrics.NOVELTY:
                cutoff_dict[metric.value] = Novelty(URM_train)

            elif metric == EvaluatorMetrics.AVERAGE_POPULARITY:
                cutoff_dict[metric.value] = AveragePopularity(URM_train)

            elif metric == EvaluatorMetrics.MAP:
                cutoff_dict[metric.value] = MAP()

            elif metric == EvaluatorMetrics.MRR:
                cutoff_dict[metric.value] = MRR()

            # elif metric == EvaluatorMetrics.RMSE:
            #     cutoff_dict[metric.value] = global_RMSE_object

            elif metric == EvaluatorMetrics.DIVERSITY_SIMILARITY:
                    if diversity_similarity_object is not None:
                        cutoff_dict[metric.value] = copy.deepcopy(diversity_similarity_object)
            else:
                cutoff_dict[metric.value] = 0.0


        empty_dict[cutoff] = cutoff_dict

    return  empty_dict
Esempio n. 2
0
def create_empty_metrics_dict(n_items, n_users, URM_train, ignore_items,
                              ignore_users, cutoff,
                              diversity_similarity_object):

    empty_dict = {}

    # from Base.Evaluation.ResultMetric import ResultMetric
    # empty_dict = ResultMetric()

    for metric in EvaluatorMetrics:
        if metric == EvaluatorMetrics.COVERAGE_ITEM:
            empty_dict[metric.value] = Coverage_Item(n_items, ignore_items)

        elif metric == EvaluatorMetrics.DIVERSITY_GINI:
            empty_dict[metric.value] = Gini_Diversity(n_items, ignore_items)

        elif metric == EvaluatorMetrics.SHANNON_ENTROPY:
            empty_dict[metric.value] = Shannon_Entropy(n_items, ignore_items)

        elif metric == EvaluatorMetrics.COVERAGE_USER:
            empty_dict[metric.value] = Coverage_User(n_users, ignore_users)

        elif metric == EvaluatorMetrics.DIVERSITY_MEAN_INTER_LIST:
            empty_dict[metric.value] = Diversity_MeanInterList(n_items, cutoff)

        elif metric == EvaluatorMetrics.DIVERSITY_HERFINDAHL:
            empty_dict[metric.value] = Diversity_Herfindahl(
                n_items, ignore_items)

        elif metric == EvaluatorMetrics.NOVELTY:
            empty_dict[metric.value] = Novelty(URM_train)

        elif metric == EvaluatorMetrics.AVERAGE_POPULARITY:
            empty_dict[metric.value] = AveragePopularity(URM_train)

        elif metric == EvaluatorMetrics.MAP:
            empty_dict[metric.value] = MAP()

        elif metric == EvaluatorMetrics.MRR:
            empty_dict[metric.value] = MRR()

        elif metric == EvaluatorMetrics.DIVERSITY_SIMILARITY:
            if diversity_similarity_object is not None:
                empty_dict[metric.value] = copy.deepcopy(
                    diversity_similarity_object)
        else:
            empty_dict[metric.value] = 0.0

    return empty_dict
    def test_Gini_Index(self):

        from Base.Evaluation.metrics import Gini_Diversity

        n_items = 1000

        gini_index = Gini_Diversity(n_items, ignore_items=np.array([]))

        gini_index.recommended_counter = np.ones(n_items)
        assert np.isclose(1.0, gini_index.get_metric_value(), atol=1e-2), "Gini_Index metric incorrect"

        gini_index.recommended_counter = np.ones(n_items)*1e-12
        gini_index.recommended_counter[0] = 1.0
        assert np.isclose(0.0, gini_index.get_metric_value(), atol=1e-2), "Gini_Index metric incorrect"