def keep_qrels_topics(run, qrels): """ Returns a new run composed by only the entries of the input run belonging to topics contained in the input qrels. Important: Entries of the new run are shallow copies of entries of the input run. :param run: :type run: TrecRun :param qrels: :type qrels: QRels :return: TrecRun """ new_run = { topic_id: entries for topic_id, entries in run.entries.items() if topic_id in qrels.allJudgements } return pytrec_eval.TrecRun(new_run, run.name + '_only_qrels_topics')
def loadAll(runFilenames): """Load all runs in the list runFilenames and returns a list of TrecRun s.""" return [pytrec_eval.TrecRun(name) for name in runFilenames]
continue sdm_doc_score_map = get_scores(sdm_docs[str(topic_number)]) cur_topic_subgraph = scores[str(topic_number)] doc_count = 1 for doc, score in sorted(cur_topic_subgraph.items(), key=lambda item: item[1], reverse = True): sdm_score = float(sdm_doc_score_map[doc]) try: centrality_score = normalize(score, cur_topic_subgraph.values()) except: centrality_score = 0 combined_score = lambda1*sdm_score + lambda2*centrality_score temp_results.append(str(topic_number) + " Q0 " + doc + " " + str(doc_count) + " " + str(combined_score)+ " STANDARD") doc_count+=1 with open("temp_file.test", "w") as outfile: outfile.write("\n".join(temp_results)) run = pytrec_eval.TrecRun('temp_file.test') qrels = pytrec_eval.QRels('qrels_file.test') curr_result = pytrec_eval.evaluate(run, qrels, [pytrec_eval.ndcg])[0] if curr_result > best_score: if best_results is not None: best_results.clear() best_score = curr_result best_results = list(temp_results) print("Run completed with lambda1=" + str(lambda1) + ", lambda2=" + str(lambda2) + " and NDCG=" + str(curr_result) + ". Took: " + str(time() - start_load) + " s") for result in best_results: output.append(result) with open("results_file.test", "w") as outfile: outfile.write("\n".join(output)) print ("Results took %.2f seconds to run." %(time() - start_project))