def init(self, force=False): base_path = util.path_dataset(self) idxs = [self.index, self.index_stem, self.doc_store] self._init_indices_parallel(idxs, self._init_iter_collection(), force) train_qrels = os.path.join(base_path, 'train.qrels.txt') valid_qrels = os.path.join(base_path, 'valid.qrels.txt') test_qrels = os.path.join(base_path, 'test.qrels.txt') if (force or not os.path.exists(train_qrels) or not os.path.exists(valid_qrels)) and self._confirm_dua(): source_stream = util.download_stream(**_FILES['qrels_2013'], encoding='utf8') source_stream2 = util.download_stream(**_FILES['qrels_2014'], encoding='utf8') with util.finialized_file(train_qrels, 'wt') as tf, \ util.finialized_file(valid_qrels, 'wt') as vf, \ util.finialized_file(test_qrels, 'wt') as Tf: for line in source_stream: cols = line.strip().split() if int(cols[0]) in VALIDATION_QIDS: vf.write(' '.join(cols) + '\n') elif int(cols[0]) in TEST_QIDS: Tf.write(' '.join(cols) + '\n') else: tf.write(' '.join(cols) + '\n') for line in source_stream2: cols = line.strip().split() if cols[0] in VALIDATION_QIDS: vf.write(' '.join(cols) + '\n') elif int(cols[0]) in TEST_QIDS: Tf.write(' '.join(cols) + '\n') else: tf.write(' '.join(cols) + '\n') all_queries = os.path.join(base_path, 'topics.txt') if (force or not os.path.exists(all_queries)) and self._confirm_dua(): source_stream = util.download_stream(**_FILES['queries_2013'], encoding='utf8') source_stream2 = util.download_stream(**_FILES['queries_2014'], encoding='utf8') train, valid = [], [] for _id, _query in trec.parse_query_mbformat(source_stream): nid = _id.replace('MB', '').strip() train.append([nid, _query]) for _id, _query in trec.parse_query_mbformat(source_stream2): nid = _id.replace('MB', '').strip() train.append([nid, _query]) plaintext.write_tsv(all_queries, train)
def init(self, force=False): idxs = [self.index, self.index_stem, self.doc_store] self._init_indices_parallel(idxs, self._init_iter_collection(), force) train_qrels = os.path.join(util.path_dataset(self), 'train.qrels.txt') valid_qrels = os.path.join(util.path_dataset(self), 'valid.qrels.txt') if (force or not os.path.exists(train_qrels) or not os.path.exists(valid_qrels)) and self._confirm_dua(): source_stream = util.download_stream( 'https://ciir.cs.umass.edu/downloads/Antique/antique-train.qrel', encoding='utf8') with util.finialized_file(train_qrels, 'wt') as tf, \ util.finialized_file(valid_qrels, 'wt') as vf: for line in source_stream: cols = line.strip().split() if cols[0] in VALIDATION_QIDS: vf.write(' '.join(cols) + '\n') else: tf.write(' '.join(cols) + '\n') train_queries = os.path.join(util.path_dataset(self), 'train.queries.txt') valid_queries = os.path.join(util.path_dataset(self), 'valid.queries.txt') if (force or not os.path.exists(train_queries) or not os.path.exists(valid_queries)) and self._confirm_dua(): source_stream = util.download_stream( 'https://ciir.cs.umass.edu/downloads/Antique/antique-train-queries.txt', encoding='utf8') train, valid = [], [] for cols in plaintext.read_tsv(source_stream): if cols[0] in VALIDATION_QIDS: valid.append(cols) else: train.append(cols) plaintext.write_tsv(train_queries, train) plaintext.write_tsv(valid_queries, valid) test_qrels = os.path.join(util.path_dataset(self), 'test.qrels.txt') if (force or not os.path.exists(test_qrels)) and self._confirm_dua(): util.download( 'https://ciir.cs.umass.edu/downloads/Antique/antique-test.qrel', test_qrels) test_queries = os.path.join(util.path_dataset(self), 'test.queries.txt') if (force or not os.path.exists(test_queries)) and self._confirm_dua(): util.download( 'https://ciir.cs.umass.edu/downloads/Antique/antique-test-queries.txt', test_queries)
def init(self, force=False): base_path = util.path_dataset(self) idxs = [self.index, self.index_stem, self.doc_store] self._init_indices_parallel(idxs, self._init_iter_collection(), force) qrels_file = os.path.join(base_path, 'qrels.robust2004.txt') if (force or not os.path.exists(qrels_file)) and self._confirm_dua(): util.download(**_FILES['qrels'], file_name=qrels_file) for fold in FOLDS: fold_qrels_file = os.path.join(base_path, f'{fold}.qrels') if (force or not os.path.exists(fold_qrels_file)): all_qrels = trec.read_qrels_dict(qrels_file) fold_qrels = { qid: dids for qid, dids in all_qrels.items() if qid in FOLDS[fold] } trec.write_qrels_dict(fold_qrels_file, fold_qrels) query_file = os.path.join(base_path, 'topics.txt') if (force or not os.path.exists(query_file)) and self._confirm_dua(): query_file_stream = util.download_stream(**_FILES['queries'], encoding='utf8') with util.finialized_file(query_file, 'wt') as f: plaintext.write_tsv(f, trec.parse_query_format(query_file_stream))
def _init_qrels(self, subset, qrels_files, force=False, expected_md5=None): qrelsf = os.path.join(util.path_dataset(self), f'{subset}.qrels') if (force or not os.path.exists(qrelsf)) and self._confirm_dua(): qrels = itertools.chain(*(trec.read_qrels( util.download_stream(f, 'utf8', expected_md5=expected_md5)) for f in qrels_files)) trec.write_qrels(qrelsf, qrels)
def _init_topics(self, subset, topic_files, qid_prefix=None, encoding=None, xml_prefix=None, force=False, expected_md5=None): topicf = os.path.join(util.path_dataset(self), f'{subset}.topics') if (force or not os.path.exists(topicf)) and self._confirm_dua(): topics = [] for topic_file in topic_files: topic_file_stream = util.download_stream( topic_file, encoding, expected_md5=expected_md5) for t, qid, text in trec.parse_query_format( topic_file_stream, xml_prefix): if qid_prefix is not None: qid = qid.replace(qid_prefix, '') topics.append((t, qid, text)) plaintext.write_tsv(topicf, topics)
def _init_iter_collection(self): files = { '2020-04-10': { 'comm_use_subset': ('https://ai2-semanticscholar-cord-19.s3-us-west-2.amazonaws.com/2020-04-10/comm_use_subset.tar.gz', "253cecb4fee2582a611fb77a4d537dc5"), 'noncomm_use_subset': ('https://ai2-semanticscholar-cord-19.s3-us-west-2.amazonaws.com/2020-04-10/noncomm_use_subset.tar.gz', "734b462133b3c00da578a909f945f4ae"), 'custom_license': ('https://ai2-semanticscholar-cord-19.s3-us-west-2.amazonaws.com/2020-04-10/custom_license.tar.gz', "2f1c9864348025987523b86d6236c40b"), 'biorxiv_medrxiv': ('https://ai2-semanticscholar-cord-19.s3-us-west-2.amazonaws.com/2020-04-10/biorxiv_medrxiv.tar.gz', "c12acdec8b3ad31918d752ba3db36121"), }, '2020-05-01': { 'comm_use_subset': ('https://ai2-semanticscholar-cord-19.s3-us-west-2.amazonaws.com/2020-05-01/comm_use_subset.tar.gz', "af4202340182209881d3d8cba2d58a24"), 'noncomm_use_subset': ('https://ai2-semanticscholar-cord-19.s3-us-west-2.amazonaws.com/2020-05-01/noncomm_use_subset.tar.gz', "9cc25b9e8674197446e7cbd4381f643b"), 'custom_license': ('https://ai2-semanticscholar-cord-19.s3-us-west-2.amazonaws.com/2020-05-01/custom_license.tar.gz', "1cb6936a7300a31344cd8a5ecc9ca778"), 'biorxiv_medrxiv': ('https://ai2-semanticscholar-cord-19.s3-us-west-2.amazonaws.com/2020-05-01/biorxiv_medrxiv.tar.gz', "9d6c6dc5d64b01e528086f6652b3ccb7"), 'arxiv': ('https://ai2-semanticscholar-cord-19.s3-us-west-2.amazonaws.com/2020-05-01/arxiv.tar.gz', "f10890174d6f864f306800d4b02233bc"), } } metadata = { '2020-04-10': ('https://ai2-semanticscholar-cord-19.s3-us-west-2.amazonaws.com/2020-04-10/metadata.csv', "42a21f386be86c24647a41bedde34046"), '2020-05-01': ('https://ai2-semanticscholar-cord-19.s3-us-west-2.amazonaws.com/2020-05-01/metadata.csv', "b1d2e409026494e0c8034278bacd1248"), } meta_url, meta_md5 = metadata[self.config['date']] fulltexts = {} with contextlib.ExitStack() as stack: for fid, (file, md5) in files[self.config['date']].items(): fulltexts[fid] = stack.enter_context( util.download_tmp(file, tarf=True, expected_md5=md5)) meta = pd.read_csv( util.download_stream(meta_url, expected_md5=meta_md5)) for _, row in meta.iterrows(): did = str(row['cord_uid']) title = str(row['title']) doi = str(row['doi']) abstract = str(row['abstract']) date = str(row['publish_time']) body = '' heads = '' if row['has_pmc_xml_parse']: path = os.path.join(row['full_text_file'], 'pmc_json', row['pmcid'] + '.xml.json') data = json.load( fulltexts[row['full_text_file']].extractfile(path)) if 'body_text' in data: body = '\n'.join(b['text'] for b in data['body_text']) heads = '\n'.join( set(b['section'] for b in data['body_text'])) elif row['has_pdf_parse']: path = os.path.join( row['full_text_file'], 'pdf_json', row['sha'].split(';')[0].strip() + '.json') data = json.load( fulltexts[row['full_text_file']].extractfile(path)) if 'body_text' in data: body = '\n'.join(b['text'] for b in data['body_text']) heads = '\n'.join( set(b['section'] for b in data['body_text'])) contents = f'{title}\n\n{abstract}\n\n{body}\n\n{heads}' doc = indices.RawDoc(did, text=contents, title=title, abstract=abstract, title_abs=f'{title}\n\n{abstract}', body=body, doi=doi, date=date) yield doc
def init(self, force=False): idxs = [self.index_stem, self.doc_store] self._init_indices_parallel(idxs, self._init_iter_collection(), force) base_path = util.path_dataset(self) needs_queries = [] if force or not os.path.exists( os.path.join(base_path, 'train.queries.tsv')): needs_queries.append(lambda it: plaintext.write_tsv( os.path.join(base_path, 'train.queries.tsv'), ((qid, txt) for file, qid, txt in it if file == 'queries.train.tsv' and qid not in MINI_DEV))) if force or not os.path.exists( os.path.join(base_path, 'minidev.queries.tsv')): needs_queries.append(lambda it: plaintext.write_tsv( os.path.join(base_path, 'minidev.queries.tsv'), ((qid, txt) for file, qid, txt in it if file == 'queries.train.tsv' and qid in MINI_DEV))) if force or not os.path.exists( os.path.join(base_path, 'dev.queries.tsv')): needs_queries.append(lambda it: plaintext.write_tsv( os.path.join(base_path, 'dev.queries.tsv'), ((qid, txt) for file, qid, txt in it if file == 'queries.dev.tsv'))) if force or not os.path.exists( os.path.join(base_path, 'eval.queries.tsv')): needs_queries.append(lambda it: plaintext.write_tsv( os.path.join(base_path, 'eval.queries.tsv'), ((qid, txt) for file, qid, txt in it if file == 'queries.eval.tsv'))) if needs_queries and self._confirm_dua(): with util.download_tmp(_SOURCES['queries']) as f, \ tarfile.open(fileobj=f) as tarf, \ contextlib.ExitStack() as ctxt: def _extr_subf(subf): for qid, txt in plaintext.read_tsv( io.TextIOWrapper(tarf.extractfile(subf))): yield subf, qid, txt query_iter = [ _extr_subf('queries.train.tsv'), _extr_subf('queries.dev.tsv'), _extr_subf('queries.eval.tsv') ] query_iter = tqdm(itertools.chain(*query_iter), desc='queries') query_iters = util.blocking_tee(query_iter, len(needs_queries)) for fn, it in zip(needs_queries, query_iters): ctxt.enter_context( util.CtxtThread(functools.partial(fn, it))) file = os.path.join(base_path, 'train.qrels') if (force or not os.path.exists(file)) and self._confirm_dua(): stream = util.download_stream(_SOURCES['train-qrels'], 'utf8') with util.finialized_file(file, 'wt') as out: for qid, _, did, score in plaintext.read_tsv(stream): if qid not in MINI_DEV: trec.write_qrels(out, [(qid, did, score)]) file = os.path.join(base_path, 'minidev.qrels') if (force or not os.path.exists(file)) and self._confirm_dua(): stream = util.download_stream(_SOURCES['train-qrels'], 'utf8') with util.finialized_file(file, 'wt') as out: for qid, _, did, score in plaintext.read_tsv(stream): if qid in MINI_DEV: trec.write_qrels(out, [(qid, did, score)]) file = os.path.join(base_path, 'dev.qrels') if (force or not os.path.exists(file)) and self._confirm_dua(): stream = util.download_stream(_SOURCES['dev-qrels'], 'utf8') with util.finialized_file(file, 'wt') as out: for qid, _, did, score in plaintext.read_tsv(stream): trec.write_qrels(out, [(qid, did, score)]) file = os.path.join(base_path, 'train.mspairs.gz') if not os.path.exists(file) and os.path.exists( os.path.join(base_path, 'qidpidtriples.train.full')): # legacy os.rename(os.path.join(base_path, 'qidpidtriples.train.full'), file) if (force or not os.path.exists(file)) and self._confirm_dua(): util.download(_SOURCES['qidpidtriples.train.full'], file) if not self.config['init_skip_msrun']: for file_name, subf in [('dev.msrun', 'top1000.dev'), ('eval.msrun', 'top1000.eval'), ('train.msrun', 'top1000.train.txt')]: file = os.path.join(base_path, file_name) if (force or not os.path.exists(file)) and self._confirm_dua(): run = {} with util.download_tmp(_SOURCES[file_name]) as f, \ tarfile.open(fileobj=f) as tarf: for qid, did, _, _ in tqdm( plaintext.read_tsv( io.TextIOWrapper(tarf.extractfile(subf)))): if qid not in run: run[qid] = {} run[qid][did] = 0. if file_name == 'train.msrun': minidev = { qid: dids for qid, dids in run.items() if qid in MINI_DEV } with self.logger.duration('writing minidev.msrun'): trec.write_run_dict( os.path.join(base_path, 'minidev.msrun'), minidev) run = { qid: dids for qid, dids in run.items() if qid not in MINI_DEV } with self.logger.duration(f'writing {file_name}'): trec.write_run_dict(file, run) query_path = os.path.join(base_path, 'trec2019.queries.tsv') if (force or not os.path.exists(query_path)) and self._confirm_dua(): stream = util.download_stream(_SOURCES['trec2019.queries'], 'utf8') plaintext.write_tsv(query_path, plaintext.read_tsv(stream)) msrun_path = os.path.join(base_path, 'trec2019.msrun') if (force or not os.path.exists(msrun_path)) and self._confirm_dua(): run = {} with util.download_stream(_SOURCES['trec2019.msrun'], 'utf8') as stream: for qid, did, _, _ in plaintext.read_tsv(stream): if qid not in run: run[qid] = {} run[qid][did] = 0. with util.finialized_file(msrun_path, 'wt') as f: trec.write_run_dict(f, run) qrels_path = os.path.join(base_path, 'trec2019.qrels') if not os.path.exists(qrels_path) and self._confirm_dua(): util.download(_SOURCES['trec2019.qrels'], qrels_path) qrels_path = os.path.join(base_path, 'judgedtrec2019.qrels') if not os.path.exists(qrels_path): os.symlink('trec2019.qrels', qrels_path) query_path = os.path.join(base_path, 'judgedtrec2019.queries.tsv') judged_qids = util.Lazy( lambda: trec.read_qrels_dict(qrels_path).keys()) if (force or not os.path.exists(query_path)): with util.finialized_file(query_path, 'wt') as f: for qid, qtext in plaintext.read_tsv( os.path.join(base_path, 'trec2019.queries.tsv')): if qid in judged_qids(): plaintext.write_tsv(f, [(qid, qtext)]) msrun_path = os.path.join(base_path, 'judgedtrec2019.msrun') if (force or not os.path.exists(msrun_path)) and self._confirm_dua(): with util.finialized_file(msrun_path, 'wt') as f: for qid, dids in trec.read_run_dict( os.path.join(base_path, 'trec2019.msrun')).items(): if qid in judged_qids(): trec.write_run_dict(f, {qid: dids}) # A subset of dev that only contains queries that have relevance judgments judgeddev_path = os.path.join(base_path, 'judgeddev') judged_qids = util.Lazy(lambda: trec.read_qrels_dict( os.path.join(base_path, 'dev.qrels')).keys()) if not os.path.exists(f'{judgeddev_path}.qrels'): os.symlink('dev.qrels', f'{judgeddev_path}.qrels') if not os.path.exists(f'{judgeddev_path}.queries.tsv'): with util.finialized_file(f'{judgeddev_path}.queries.tsv', 'wt') as f: for qid, qtext in plaintext.read_tsv( os.path.join(base_path, 'dev.queries.tsv')): if qid in judged_qids(): plaintext.write_tsv(f, [(qid, qtext)]) if self.config['init_skip_msrun']: if not os.path.exists(f'{judgeddev_path}.msrun'): with util.finialized_file(f'{judgeddev_path}.msrun', 'wt') as f: for qid, dids in trec.read_run_dict( os.path.join(base_path, 'dev.msrun')).items(): if qid in judged_qids(): trec.write_run_dict(f, {qid: dids}) if not self.config['init_skip_train10']: file = os.path.join(base_path, 'train10.queries.tsv') if not os.path.exists(file): with util.finialized_file(file, 'wt') as fout: for qid, qtext in self.logger.pbar( plaintext.read_tsv( os.path.join(base_path, 'train.queries.tsv')), desc='filtering queries for train10'): if int(qid) % 10 == 0: plaintext.write_tsv(fout, [(qid, qtext)]) file = os.path.join(base_path, 'train10.qrels') if not os.path.exists(file): with util.finialized_file(file, 'wt') as fout, open( os.path.join(base_path, 'train.qrels'), 'rt') as fin: for line in self.logger.pbar( fin, desc='filtering qrels for train10'): qid = line.split()[0] if int(qid) % 10 == 0: fout.write(line) if not self.config['init_skip_msrun']: file = os.path.join(base_path, 'train10.msrun') if not os.path.exists(file): with util.finialized_file(file, 'wt') as fout, open( os.path.join(base_path, 'train.msrun'), 'rt') as fin: for line in self.logger.pbar( fin, desc='filtering msrun for train10'): qid = line.split()[0] if int(qid) % 10 == 0: fout.write(line) file = os.path.join(base_path, 'train10.mspairs.gz') if not os.path.exists(file): with gzip.open(file, 'wt') as fout, gzip.open( os.path.join(base_path, 'train.mspairs.gz'), 'rt') as fin: for qid, did1, did2 in self.logger.pbar( plaintext.read_tsv(fin), desc='filtering mspairs for train10'): if int(qid) % 10 == 0: plaintext.write_tsv(fout, [(qid, did1, did2)]) if not self.config['init_skip_train_med']: med_qids = util.Lazy( lambda: { qid.strip() for qid in util.download_stream( 'https://raw.githubusercontent.com/Georgetown-IR-Lab/covid-neural-ir/master/med-msmarco-train.txt', 'utf8', expected_md5="dc5199de7d4a872c361f89f08b1163ef") }) file = os.path.join(base_path, 'train_med.queries.tsv') if not os.path.exists(file): with util.finialized_file(file, 'wt') as fout: for qid, qtext in self.logger.pbar( plaintext.read_tsv( os.path.join(base_path, 'train.queries.tsv')), desc='filtering queries for train_med'): if qid in med_qids(): plaintext.write_tsv(fout, [(qid, qtext)]) file = os.path.join(base_path, 'train_med.qrels') if not os.path.exists(file): with util.finialized_file(file, 'wt') as fout, open( os.path.join(base_path, 'train.qrels'), 'rt') as fin: for line in self.logger.pbar( fin, desc='filtering qrels for train_med'): qid = line.split()[0] if qid in med_qids(): fout.write(line) if not self.config['init_skip_msrun']: file = os.path.join(base_path, 'train_med.msrun') if not os.path.exists(file): with util.finialized_file(file, 'wt') as fout, open( os.path.join(base_path, 'train.msrun'), 'rt') as fin: for line in self.logger.pbar( fin, desc='filtering msrun for train_med'): qid = line.split()[0] if qid in med_qids(): fout.write(line) file = os.path.join(base_path, 'train_med.mspairs.gz') if not os.path.exists(file): with gzip.open(file, 'wt') as fout, gzip.open( os.path.join(base_path, 'train.mspairs.gz'), 'rt') as fin: for qid, did1, did2 in self.logger.pbar( plaintext.read_tsv(fin), desc='filtering mspairs for train_med'): if qid in med_qids(): plaintext.write_tsv(fout, [(qid, did1, did2)])
def _init_iter_collection(self): strm = util.download_stream( 'https://ciir.cs.umass.edu/downloads/Antique/antique-collection.txt', 'utf8') for did, text in plaintext.read_tsv(strm): yield indices.RawDoc(did, text)