} # Calculate absolute k and l d_core_k = round(args.d_core_k * args.nr_terms) d_core_l = round(args.d_core_l * args.nr_terms) # Read in topics via Pyserini. topics = utils.read_topics_and_ids_from_file( f'resources/topics-and-qrels/{args.topics}') for topic_num, topic in tqdm(topics): # tqdm(topics.items()): query_num = str(topic_num) query_id = topic # ['title'] query_graph = Graph(query_id, f'query_article_{query_num}') query_graph.build(**build_arguments) query_graph.trim(d_core_k, d_core_l) # Vary trim parameter here # recalculate node weights using TextRank if args.textrank: query_graph.rank() # Create new ranking. ranking = {} addition_types = {} # Loop over candidate documents and calculate similarity score. qid_docids = utils.read_docids_from_file( f'resources/candidates/{args.candidates}') for docid in qid_docids[query_num]: # Create graph object.