def main(): args = encode_args() if args.fp16: import apex apex.amp.register_half_function(torch, 'einsum') if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") n_gpu = torch.cuda.device_count() else: device = torch.device("cuda", args.local_rank) n_gpu = 1 torch.distributed.init_process_group(backend='nccl') if not args.predict_file: raise ValueError( "If `do_predict` is True, then `predict_file` must be specified.") bert_config = AutoConfig.from_pretrained(args.model_name) if "roberta" in args.model_name: model = RobertaCtxEncoder(bert_config, args) else: model = CtxEncoder(bert_config, args) tokenizer = AutoTokenizer.from_pretrained(args.model_name) eval_dataset = EmDataset(tokenizer, args.predict_file, args.max_q_len, args.max_c_len, args.is_query_embed) eval_dataloader = DataLoader(eval_dataset, batch_size=args.predict_batch_size, collate_fn=em_collate, pin_memory=True, num_workers=args.num_workers) assert args.init_checkpoint != "" model = load_saved(model, args.init_checkpoint, exact=False) model.to(device) if args.fp16: try: from apex import amp except ImportError: raise ImportError( "Please install apex from https://www.github.com/nvidia/apex to use fp16 training." ) model = amp.initialize(model, opt_level=args.fp16_opt_level) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[args.local_rank], output_device=args.local_rank) elif n_gpu > 1: model = torch.nn.DataParallel(model) embeds = predict(model, eval_dataloader) print(embeds.size()) np.save(args.embed_save_path, embeds.cpu().numpy())
def init_reader(args): qa_config = AutoConfig.from_pretrained( 'google/electra-large-discriminator') qa_tokenizer = AutoTokenizer.from_pretrained( 'google/electra-large-discriminator') retriever_name = args.model_name args.model_name = "google/electra-large-discriminator" reader = QAModel(qa_config, args) reader = load_saved(reader, args.reader_path, False) cuda = torch.device('cuda') reader.to(cuda) reader = amp.initialize(reader, opt_level='O1') reader.eval() args.model_name = retriever_name return reader, qa_tokenizer
def init_retrieval(args): print("Initializing retrieval module...") bert_config = AutoConfig.from_pretrained(args.model_name) tokenizer = AutoTokenizer.from_pretrained(args.model_name) retriever = RobertaRetriever(bert_config, args) retriever = load_saved(retriever, args.model_path, exact=False) cuda = torch.device('cuda') retriever.to(cuda) retriever = amp.initialize(retriever, opt_level='O1') retriever.eval() print("Loading index...") index = faiss.IndexFlatIP(768) xb = np.load(args.indexpath).astype('float32') index.add(xb) if args.index_gpu != -1: res = faiss.StandardGpuResources() index = faiss.index_cpu_to_gpu(res, args.index_gpu, index) print("Loading documents...") id2doc = json.load(open(args.corpus_dict)) print("Index ready...") return retriever, index, id2doc, tokenizer
parser.add_argument("--stop-drop", default=0, type=float) parser.add_argument('--hnsw', action="store_true") args = parser.parse_args() logger.info("Loading data...") ds_items = [json.loads(_) for _ in open(args.raw_data).readlines()] # filter if args.only_eval_ans: ds_items = [_ for _ in ds_items if _["answer"][0] not in ["yes", "no"]] logger.info("Loading trained model...") bert_config = AutoConfig.from_pretrained(args.model_name) tokenizer = AutoTokenizer.from_pretrained(args.model_name) model = RobertaRetriever(bert_config, args) model = load_saved(model, args.model_path, exact=False) simple_tokenizer = SimpleTokenizer() cuda = torch.device('cuda') model.to(cuda) from apex import amp model = amp.initialize(model, opt_level='O1') model.eval() logger.info("Building index...") d = 768 xb = np.load(args.indexpath).astype('float32') if args.hnsw: if path.exists("data/hotpot_index/wiki_index_hnsw.index"): index = faiss.read_index("index/wiki_index_hnsw.index")
def main(): args = train_args() if args.fp16: import apex apex.amp.register_half_function(torch, 'einsum') date_curr = date.today().strftime("%m-%d-%Y") model_name = f"{args.prefix}-seed{args.seed}-bsz{args.train_batch_size}-fp16{args.fp16}-lr{args.learning_rate}-decay{args.weight_decay}-warm{args.warmup_ratio}-valbsz{args.predict_batch_size}-shared{args.shared_encoder}-multi{args.multi_vector}-scheme{args.scheme}" args.output_dir = os.path.join(args.output_dir, date_curr, model_name) tb_logger = SummaryWriter(os.path.join(args.output_dir.replace("logs","tflogs"))) if os.path.exists(args.output_dir) and os.listdir(args.output_dir): print( f"output directory {args.output_dir} already exists and is not empty.") if not os.path.exists(args.output_dir): os.makedirs(args.output_dir, exist_ok=True) logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO, handlers=[logging.FileHandler(os.path.join(args.output_dir, "log.txt")), logging.StreamHandler()]) logger = logging.getLogger(__name__) logger.info(args) if args.local_rank == -1 or args.no_cuda: device = torch.device( "cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") n_gpu = torch.cuda.device_count() else: device = torch.device("cuda", args.local_rank) n_gpu = 1 torch.distributed.init_process_group(backend='nccl') logger.info("device %s n_gpu %d distributed training %r", device, n_gpu, bool(args.local_rank != -1)) if args.accumulate_gradients < 1: raise ValueError("Invalid accumulate_gradients parameter: {}, should be >= 1".format( args.accumulate_gradients)) args.train_batch_size = int( args.train_batch_size / args.accumulate_gradients) random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if n_gpu > 0: torch.cuda.manual_seed_all(args.seed) bert_config = AutoConfig.from_pretrained(args.model_name) if "roberta" in args.model_name: model = RobertaRetriever(bert_config, args) else: if args.multi_vector > 1: logger.info(f"Use multi vector encoder...") model = MhopRetrieverMultiVector(bert_config, args) else: model = MhopRetriever(bert_config, args) tokenizer = AutoTokenizer.from_pretrained(args.model_name) collate_fc = partial(mhop_collate, pad_id=tokenizer.pad_token_id) if args.do_train and args.max_c_len > bert_config.max_position_embeddings: raise ValueError( "Cannot use sequence length %d because the BERT model " "was only trained up to sequence length %d" % (args.max_c_len, bert_config.max_position_embeddings)) eval_dataset = MhopDataset( tokenizer, args.predict_file, args.max_q_len, args.max_q_sp_len, args.max_c_len) eval_dataloader = DataLoader( eval_dataset, batch_size=args.predict_batch_size, collate_fn=collate_fc, pin_memory=True, num_workers=args.num_workers) logger.info(f"Num of dev batches: {len(eval_dataloader)}") if args.init_checkpoint != "": model = load_saved(model, args.init_checkpoint) model.to(device) print(f"number of trainable parameters: {sum(p.numel() for p in model.parameters() if p.requires_grad)}") if args.do_train: no_decay = ['bias', 'LayerNorm.weight'] optimizer_parameters = [ {'params': [p for n, p in model.named_parameters() if not any( nd in n for nd in no_decay)], 'weight_decay': args.weight_decay}, {'params': [p for n, p in model.named_parameters() if any( nd in n for nd in no_decay)], 'weight_decay': 0.0} ] optimizer = Adam(optimizer_parameters, lr=args.learning_rate, eps=args.adam_epsilon) if args.fp16: from apex import amp model, optimizer = amp.initialize( model, optimizer, opt_level=args.fp16_opt_level) else: if args.fp16: from apex import amp model = amp.initialize(model, opt_level=args.fp16_opt_level) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank) elif n_gpu > 1: model = torch.nn.DataParallel(model) if args.do_train: global_step = 0 # gradient update step batch_step = 0 # forward batch count best_mrr = 0 train_loss_meter = AverageMeter() model.train() train_dataset = MhopDataset(tokenizer, args.train_file, args.max_q_len, args.max_q_sp_len, args.max_c_len, train=True) train_dataloader = DataLoader(train_dataset, batch_size=args.train_batch_size, pin_memory=True, collate_fn=collate_fc, num_workers=args.num_workers, shuffle=True) t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs warmup_steps = t_total * args.warmup_ratio scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total ) logger.info('Start training....') for epoch in range(int(args.num_train_epochs)): for batch in tqdm(train_dataloader): batch_step += 1 batch = move_to_cuda(batch) loss = mhop_loss(model, batch, args) if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() train_loss_meter.update(loss.item()) if (batch_step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: torch.nn.utils.clip_grad_norm_( amp.master_params(optimizer), args.max_grad_norm) else: torch.nn.utils.clip_grad_norm_( model.parameters(), args.max_grad_norm) optimizer.step() scheduler.step() model.zero_grad() global_step += 1 tb_logger.add_scalar('batch_train_loss', loss.item(), global_step) tb_logger.add_scalar('smoothed_train_loss', train_loss_meter.avg, global_step) if args.eval_period != -1 and global_step % args.eval_period == 0: mrrs = predict(args, model, eval_dataloader, device, logger) mrr = mrrs["mrr_avg"] logger.info("Step %d Train loss %.2f MRR %.2f on epoch=%d" % (global_step, train_loss_meter.avg, mrr*100, epoch)) if best_mrr < mrr: logger.info("Saving model with best MRR %.2f -> MRR %.2f on epoch=%d" % (best_mrr*100, mrr*100, epoch)) torch.save(model.state_dict(), os.path.join( args.output_dir, f"checkpoint_best.pt")) model = model.to(device) best_mrr = mrr mrrs = predict(args, model, eval_dataloader, device, logger) mrr = mrrs["mrr_avg"] logger.info("Step %d Train loss %.2f MRR-AVG %.2f on epoch=%d" % ( global_step, train_loss_meter.avg, mrr*100, epoch)) for k, v in mrrs.items(): tb_logger.add_scalar(k, v*100, epoch) torch.save(model.state_dict(), os.path.join( args.output_dir, f"checkpoint_last.pt")) if best_mrr < mrr: logger.info("Saving model with best MRR %.2f -> MRR %.2f on epoch=%d" % (best_mrr*100, mrr*100, epoch)) torch.save(model.state_dict(), os.path.join( args.output_dir, f"checkpoint_best.pt")) best_mrr = mrr logger.info("Training finished!") elif args.do_predict: acc = predict(args, model, eval_dataloader, device, logger) logger.info(f"test performance {acc}")