def evaluate( self, test_rankings, top_values = [10] ): scores = {} for top in top_values: trunc_classes = rankings.truncate_term_rankings(self.class_rankings,top) trunc_test = rankings.truncate_term_rankings(test_rankings,top) sim = self.agreement_measure.similarity( trunc_classes, trunc_test ) scores[ "terms-%03d" % (top) ] = sim return scores
def evaluate(self, test_rankings, top_values=[10]): scores = {} for top in top_values: trunc_classes = rankings.truncate_term_rankings( self.class_rankings, top) trunc_test = rankings.truncate_term_rankings(test_rankings, top) sim = self.agreement_measure.similarity(trunc_classes, trunc_test) scores["terms-%03d" % (top)] = sim return scores
def evaluate( self, test_rankings, top_values = [10] ): scores = {} k = len(test_rankings) for top in top_values: trunc_rankings = rankings.truncate_term_rankings( test_rankings, top ) pairs = 0 diversity = 0.0 for ranking_index1 in range(k): for ranking_index2 in range(ranking_index1 + 1, k): pair_dissimilarity = 1.0 - self.metric.similarity( trunc_rankings[ranking_index1], trunc_rankings[ranking_index2] ) diversity += pair_dissimilarity pairs += 1 scores[ "div-%03d" % (top) ] = diversity / pairs return scores
def evaluate(self, test_rankings, top_values=[10]): scores = {} k = len(test_rankings) for top in top_values: trunc_rankings = rankings.truncate_term_rankings( test_rankings, top) pairs = 0 diversity = 0.0 for ranking_index1 in range(k): for ranking_index2 in range(ranking_index1 + 1, k): pair_dissimilarity = 1.0 - self.metric.similarity( trunc_rankings[ranking_index1], trunc_rankings[ranking_index2]) diversity += pair_dissimilarity pairs += 1 scores["div-%03d" % (top)] = diversity / pairs return scores