from plasem_taln import filters_baseline_similarity similarity = filters_baseline_similarity methodname = 'baseline_filters' caption = 'Semeval - Scores MAP - Méthodes de référence avec filtres' parameters = list(product(corpora, filters_partition)) parameters_description = ('Édition', 'Filtres', 'Score MAP') description_functions = [lambda x: x, get_filters_descr] for corpus, *rest in parameters: context['filters'] = [filters[key] for key in rest[0]] from plasem_semeval import write_scores_to_file from plasem_taln import comparator comp = comparator(context, similarity) scores = make_score_tree(doctrees[corpus], comp.getscore) predfile = getpredfilename(methodname, corpus, *rest) write_scores_to_file(scores, predfile) from plasem_semeval import sorted_scores_from_semeval_relevancy from plasem_algostruct import mean_average_precision MAP = mean_average_precision( sorted_scores_from_semeval_relevancy(relevancy[corpus], scores).values()) restable.append([ *(description_functions[i](value) for i, value in enumerate((corpus, *rest))), '%.2f' % (100 * MAP) ])
result = { org: average_precision(values) for org, values in sscores.items() } return result relevdict = { corpus: relevancy_dict_relevancy(relevancy[corpus]) for corpus in corpora } for corpus, *rest in parameters: context['filters'] = [filters[key] for key in rest[0]] compfilters = comparator(context, filters_baseline_similarity) scores = make_score_tree(doctrees[corpus], compfilters.getscore) predfile = getpredfilename(methodname, corpus, *rest) write_scores_to_file(scores, predfile) MAP = mean_average_precision( sorted_scores_from_semeval_relevancy(relevancy[corpus], scores).values()) if MAP > best[corpus]: best[corpus] = MAP bestmeth[corpus] = rest[0] bestaps[corpus] = avgtree(scores, relevancy[corpus]) bestscores[corpus] = scores # restable.append([*(description_functions[i](value) # for i, value in enumerate((corpus, *rest))),