def main(): parser = argparse.ArgumentParser() parser.add_argument('--listfiles', dest='listfilenames') parser.add_argument('--out', dest='outfile') parser.add_argument('--comparison_method', dest='method') args = parser.parse_args() list_of_files = map(lambda filename: {'filename' : filename}, args.listfilenames.split(',')) for index in xrange(len(list_of_files)): list_of_files[index]['ranking'] = parse_file(list_of_files[index]['filename']) list_ranking_comparisons = [] for index in xrange(len(list_of_files)-1): comparison_result = {'file1': list_of_files[index]['filename'], 'file2': list_of_files[index+1]['filename']} if args.method == 'kendall': comparison_result['result'] = calculate_kendalltau(list_of_files[index]['ranking'], list_of_files[index+1]['ranking']) elif args.method == 'sets': comparison_result['result'] = calculate_average_precision(list_of_files[index]['ranking'], list_of_files[index+1]['ranking']) list_ranking_comparisons.append(comparison_result) output_results(list_ranking_comparisons, args.outfile)
def test_calculate_average_precision(self): average_precision = calculate_average_precision(self.relevant_entries, self.retrieved_entries) assert average_precision >= 0.89 and average_precision <= 0.892