def main(config_path): logger.info('------------Match-LSTM Train--------------') logger.info('loading config file...') global_config = read_config(config_path) # set random seed seed = global_config['model']['global']['random_seed'] torch.manual_seed(seed) enable_cuda = global_config['train']['enable_cuda'] device = torch.device("cuda" if enable_cuda else "cpu") if torch.cuda.is_available() and not enable_cuda: logger.warning("CUDA is avaliable, you can enable CUDA in config file") elif not torch.cuda.is_available() and enable_cuda: raise ValueError("CUDA is not abaliable, please unable CUDA in config file") logger.info('reading squad dataset...') dataset = SquadDataset(global_config) logger.info('constructing model...') model = MatchLSTMModel(global_config).to(device) criterion = MyNLLLoss() # optimizer optimizer_choose = global_config['train']['optimizer'] optimizer_lr = global_config['train']['learning_rate'] optimizer_param = filter(lambda p: p.requires_grad, model.parameters()) if optimizer_choose == 'adamax': optimizer = optim.Adamax(optimizer_param) elif optimizer_choose == 'adadelta': optimizer = optim.Adadelta(optimizer_param) elif optimizer_choose == 'adam': optimizer = optim.Adam(optimizer_param) elif optimizer_choose == 'sgd': optimizer = optim.SGD(optimizer_param, lr=optimizer_lr) else: raise ValueError('optimizer "%s" in config file not recoginized' % optimizer_choose) # check if exist model weight weight_path = global_config['data']['model_path'] if os.path.exists(weight_path): logger.info('loading existing weight...') weight = torch.load(weight_path, map_location=lambda storage, loc: storage) if enable_cuda: weight = torch.load(weight_path, map_location=lambda storage, loc: storage.cuda()) # weight = pop_dict_keys(weight, ['pointer', 'init_ptr_hidden']) # partial initial weight model.load_state_dict(weight, strict=False) # training arguments logger.info('start training...') train_batch_size = global_config['train']['batch_size'] valid_batch_size = global_config['train']['valid_batch_size'] # batch_train_data = dataset.get_dataloader_train(train_batch_size) # batch_dev_data = dataset.get_dataloader_dev(valid_batch_size) batch_train_data = list(dataset.get_batch_train(train_batch_size)) batch_dev_data = list(dataset.get_batch_dev(valid_batch_size)) clip_grad_max = global_config['train']['clip_grad_norm'] enable_char = global_config['model']['encoder']['enable_char'] best_valid_f1 = None # every epoch for epoch in range(global_config['train']['epoch']): # train model.train() # set training = True, make sure right dropout sum_loss = train_on_model(model=model, criterion=criterion, optimizer=optimizer, batch_data=batch_train_data, epoch=epoch, clip_grad_max=clip_grad_max, device=device, enable_char=enable_char, batch_char_func=dataset.gen_batch_with_char) logger.info('epoch=%d, sum_loss=%.5f' % (epoch, sum_loss)) # evaluate with torch.no_grad(): model.eval() # let training = False, make sure right dropout valid_score_em, valid_score_f1, valid_loss = eval_on_model(model=model, criterion=criterion, batch_data=batch_dev_data, epoch=epoch, device=device, enable_char=enable_char, batch_char_func=dataset.gen_batch_with_char) logger.info("epoch=%d, ave_score_em=%.2f, ave_score_f1=%.2f, sum_loss=%.5f" % (epoch, valid_score_em, valid_score_f1, valid_loss)) # save model when best f1 score if best_valid_f1 is None or valid_score_f1 > best_valid_f1: save_model(model, epoch=epoch, model_weight_path=global_config['data']['model_path'], checkpoint_path=global_config['data']['checkpoint_path']) logger.info("saving model weight on epoch=%d" % epoch) best_valid_f1 = valid_score_f1 logger.info('finished.')
def train(config_path): logger.info('------------MODEL TRAIN--------------') logger.info('loading config file...') global_config = read_config(config_path) # set random seed seed = global_config['global']['random_seed'] torch.manual_seed(seed) enable_cuda = global_config['train']['enable_cuda'] device = torch.device("cuda" if enable_cuda else "cpu") if torch.cuda.is_available() and not enable_cuda: logger.warning("CUDA is avaliable, you can enable CUDA in config file") elif not torch.cuda.is_available() and enable_cuda: raise ValueError("CUDA is not abaliable, please unable CUDA in config file") logger.info('reading dataset...') dataset = Dataset(global_config) logger.info('constructing model...') dataset_h5_path = global_config['data']['dataset_h5'] model = MatchLSTMPlus(dataset_h5_path) model = model.to(device) criterion = MyNLLLoss() optimizer_param = filter(lambda p: p.requires_grad, model.parameters()) model_rerank = None rank_k = global_config['global']['rank_k'] if global_config['global']['enable_rerank']: model_rerank = ReRanker(dataset_h5_path) model_rerank = model_rerank.to(device) criterion = torch.nn.NLLLoss() optimizer_param = filter(lambda p: p.requires_grad, model_rerank.parameters()) # optimizer optimizer_choose = global_config['train']['optimizer'] optimizer_lr = global_config['train']['learning_rate'] if optimizer_choose == 'adamax': optimizer = optim.Adamax(optimizer_param) elif optimizer_choose == 'adadelta': optimizer = optim.Adadelta(optimizer_param) elif optimizer_choose == 'adam': optimizer = optim.Adam(optimizer_param) elif optimizer_choose == 'sgd': optimizer = optim.SGD(optimizer_param, lr=optimizer_lr) else: raise ValueError('optimizer "%s" in config file not recoginized' % optimizer_choose) # check if exist model weight weight_path = global_config['data']['model_path'] if os.path.exists(weight_path): logger.info('loading existing weight...') weight = torch.load(weight_path, map_location=lambda storage, loc: storage) if enable_cuda: weight = torch.load(weight_path, map_location=lambda storage, loc: storage.cuda()) # weight = pop_dict_keys(weight, ['pointer', 'init_ptr_hidden']) # partial initial weight model.load_state_dict(weight, strict=False) rerank_weight_path = global_config['data']['rerank_model_path'] if global_config['global']['enable_rerank'] and os.path.exists(rerank_weight_path): logger.info('loading existing rerank weight...') weight = torch.load(rerank_weight_path, map_location=lambda storage, loc: storage) if enable_cuda: weight = torch.load(rerank_weight_path, map_location=lambda storage, loc: storage.cuda()) model_rerank.load_state_dict(weight, strict=False) # training arguments logger.info('start training...') train_batch_size = global_config['train']['batch_size'] valid_batch_size = global_config['train']['valid_batch_size'] num_workers = global_config['global']['num_data_workers'] batch_train_data = dataset.get_dataloader_train(train_batch_size, num_workers) batch_dev_data = dataset.get_dataloader_dev(valid_batch_size, num_workers) clip_grad_max = global_config['train']['clip_grad_norm'] best_avg = 0. # every epoch for epoch in range(global_config['train']['epoch']): # train model.train() # set training = True, make sure right dropout if global_config['global']['enable_rerank']: model_rerank.train() sum_loss = train_on_model(model=model, criterion=criterion, optimizer=optimizer, batch_data=batch_train_data, epoch=epoch, clip_grad_max=clip_grad_max, device=device, model_rerank=model_rerank, rank_k=rank_k) logger.info('epoch=%d, sum_loss=%.5f' % (epoch, sum_loss)) # evaluate with torch.no_grad(): model.eval() # let training = False, make sure right dropout if global_config['global']['enable_rerank']: model_rerank.eval() valid_score_em, valid_score_f1, valid_loss = eval_on_model(model=model, criterion=criterion, batch_data=batch_dev_data, epoch=epoch, device=device, model_rerank=model_rerank, rank_k=rank_k) valid_avg = (valid_score_em + valid_score_f1) / 2 logger.info("epoch=%d, ave_score_em=%.2f, ave_score_f1=%.2f, sum_loss=%.5f" % (epoch, valid_score_em, valid_score_f1, valid_loss)) # save model when best avg score if valid_avg > best_avg: if model_rerank is not None: save_model(model_rerank, epoch=epoch, model_weight_path=global_config['data']['rerank_model_path'], checkpoint_path=global_config['data']['checkpoint_path']) logging.info("saving rerank model weight on epoch=%d" % epoch) else: save_model(model, epoch=epoch, model_weight_path=global_config['data']['model_path'], checkpoint_path=global_config['data']['checkpoint_path']) logger.info("saving model weight on epoch=%d" % epoch) best_avg = valid_avg logger.info('finished.')
def test(config_path, out_path): logger.info('------------MODEL PREDICT--------------') logger.info('loading config file...') global_config = read_config(config_path) # set random seed seed = global_config['global']['random_seed'] torch.manual_seed(seed) #set default gpu os.environ["CUDA_VISIBLE_DEVICES"] = str(global_config['train']["gpu_id"]) enable_cuda = global_config['test']['enable_cuda'] device = torch.device("cuda" if enable_cuda else "cpu") if torch.cuda.is_available() and not enable_cuda: logger.warning("CUDA is avaliable, you can enable CUDA in config file") elif not torch.cuda.is_available() and enable_cuda: raise ValueError( "CUDA is not abaliable, please unable CUDA in config file") torch.set_grad_enabled( False) # make sure all tensors below have require_grad=False, logger.info('reading squad dataset...') dataset = SquadDataset(global_config) logger.info('constructing model...') model_choose = global_config['global']['model'] dataset_h5_path = global_config['data']['dataset_h5'] if model_choose == 'base': model_config = read_config('config/base_model.yaml') model = BaseModel(dataset_h5_path, model_config) elif model_choose == 'match-lstm': model = MatchLSTM(dataset_h5_path) elif model_choose == 'match-lstm+': model = MatchLSTMPlus(dataset_h5_path, global_config['preprocess']['use_domain_tag']) elif model_choose == 'r-net': model = RNet(dataset_h5_path) elif model_choose == 'm-reader': model = MReader(dataset_h5_path) else: raise ValueError('model "%s" in config file not recoginized' % model_choose) model = model.to(device) model.eval() # let training = False, make sure right dropout # load model weight logger.info('loading model weight...') model_weight_path = global_config['data']['model_path'] assert os.path.exists( model_weight_path ), "not found model weight file on '%s'" % model_weight_path weight = torch.load(model_weight_path, map_location=lambda storage, loc: storage) if enable_cuda: weight = torch.load(model_weight_path, map_location=lambda storage, loc: storage.cuda()) model.load_state_dict(weight, strict=False) # forward logger.info('forwarding...') batch_size = global_config['test']['batch_size'] num_workers = global_config['global']['num_data_workers'] if 'test_path' not in global_config['data']['dataset']: batch_test_data = dataset.get_dataloader_dev(batch_size, num_workers) else: batch_test_data = dataset.get_dataloader_test(batch_size, num_workers) # to just evaluate score or write answer to file if out_path is None: criterion = MyNLLLoss() score_em, score_f1, sum_loss = eval_on_model( model=model, criterion=criterion, batch_data=batch_test_data, epoch=None, device=device) logger.info( "test: ave_score_em=%.2f, ave_score_f1=%.2f, sum_loss=%.5f" % (score_em, score_f1, sum_loss)) else: #context_right_space = dataset.get_all_ct_right_space_dev() context_right_space = dataset.get_all_ct_right_space_test() predict_ans = predict_on_model( model=model, batch_data=batch_test_data, device=device, id_to_word_func=dataset.sentence_id2word, right_space=context_right_space) #samples_id = dataset.get_all_samples_id_dev() samples_id = dataset.get_all_samples_id_test() ans_with_id = dict(zip(samples_id, predict_ans)) logging.info('writing predict answer to file %s' % out_path) with open(out_path, 'w') as f: json.dump(ans_with_id, f) logging.info('finished.')
def test(config_path, out_path): logger.info('------------MODEL PREDICT--------------') logger.info('loading config file...') global_config = read_config(config_path) # set random seed seed = global_config['global']['random_seed'] torch.manual_seed(seed) enable_cuda = global_config['test']['enable_cuda'] device = torch.device("cuda" if enable_cuda else "cpu") if torch.cuda.is_available() and not enable_cuda: logger.warning("CUDA is avaliable, you can enable CUDA in config file") elif not torch.cuda.is_available() and enable_cuda: raise ValueError( "CUDA is not abaliable, please unable CUDA in config file") torch.set_grad_enabled( False) # make sure all tensors below have require_grad=False, logger.info('reading dataset...') dataset = Dataset(global_config) logger.info('constructing model...') dataset_h5_path = global_config['data']['dataset_h5'] model = MatchLSTMPlus(dataset_h5_path) model = model.to(device) model.eval() # let training = False, make sure right dropout criterion = MyNLLLoss() model_rerank = None rank_k = global_config['global']['rank_k'] if global_config['global']['enable_rerank']: model_rerank = ReRanker(dataset_h5_path) model_rerank = model_rerank.to(device) model_rerank.eval() criterion = torch.nn.NLLLoss() # load model weight logger.info('loading model weight...') model_weight_path = global_config['data']['model_path'] assert os.path.exists( model_weight_path ), "not found model weight file on '%s'" % model_weight_path weight = torch.load(model_weight_path, map_location=lambda storage, loc: storage) if enable_cuda: weight = torch.load(model_weight_path, map_location=lambda storage, loc: storage.cuda()) model.load_state_dict(weight, strict=False) if global_config['global']['enable_rerank']: rerank_weight_path = global_config['data']['rerank_model_path'] assert os.path.exists( rerank_weight_path ), "not found rerank model weight file on '%s'" % rerank_weight_path logger.info('loading rerank model weight...') weight = torch.load(rerank_weight_path, map_location=lambda storage, loc: storage) if enable_cuda: weight = torch.load( rerank_weight_path, map_location=lambda storage, loc: storage.cuda()) model_rerank.load_state_dict(weight, strict=False) # forward logger.info('forwarding...') batch_size = global_config['test']['batch_size'] num_workers = global_config['global']['num_data_workers'] batch_dev_data = dataset.get_dataloader_dev(batch_size, num_workers) # to just evaluate score or write answer to file if out_path is None: score_em, score_f1, sum_loss = eval_on_model(model=model, criterion=criterion, batch_data=batch_dev_data, epoch=None, device=device, model_rerank=model_rerank, rank_k=rank_k) logger.info( "test: ave_score_em=%.2f, ave_score_f1=%.2f, sum_loss=%.5f" % (score_em, score_f1, sum_loss)) else: context_right_space = dataset.get_all_ct_right_space_dev() predict_ans = predict_on_model( model=model, batch_data=batch_dev_data, device=device, id_to_word_func=dataset.sentence_id2word, right_space=context_right_space, model_rerank=model_rerank, rank_k=rank_k) samples_id = dataset.get_all_samples_id_dev() ans_with_id = dict(zip(samples_id, predict_ans)) logging.info('writing predict answer to file %s' % out_path) with open(out_path, 'w') as f: json.dump(ans_with_id, f) logging.info('finished.')
def train(config_path): logger.info('------------MODEL TRAIN--------------') logger.info('loading config file...') global_config = read_config(config_path) # set random seed seed = global_config['global']['random_seed'] torch.manual_seed(seed) #set default gpu os.environ["CUDA_VISIBLE_DEVICES"] = str(global_config['train']["gpu_id"]) enable_cuda = global_config['train']['enable_cuda'] device = torch.device("cuda" if enable_cuda else "cpu") if torch.cuda.is_available() and not enable_cuda: logger.warning("CUDA is avaliable, you can enable CUDA in config file") elif not torch.cuda.is_available() and enable_cuda: raise ValueError( "CUDA is not abaliable, please unable CUDA in config file") logger.info('reading squad dataset...') dataset = SquadDataset(global_config) logger.info('constructing model...') model_choose = global_config['global']['model'] dataset_h5_path = global_config['data']['dataset_h5'] if model_choose == 'base': model_config = read_config('config/base_model.yaml') model = BaseModel(dataset_h5_path, model_config) elif model_choose == 'match-lstm': model = MatchLSTM(dataset_h5_path) elif model_choose == 'match-lstm+': model = MatchLSTMPlus(dataset_h5_path, global_config['preprocess']['use_domain_tag']) elif model_choose == 'r-net': model = RNet(dataset_h5_path) elif model_choose == 'm-reader': model = MReader(dataset_h5_path) else: raise ValueError('model "%s" in config file not recoginized' % model_choose) model = model.to(device) criterion = MyNLLLoss() # optimizer optimizer_choose = global_config['train']['optimizer'] optimizer_lr = global_config['train']['learning_rate'] optimizer_param = filter(lambda p: p.requires_grad, model.parameters()) if optimizer_choose == 'adamax': optimizer = optim.Adamax(optimizer_param) elif optimizer_choose == 'adadelta': optimizer = optim.Adadelta(optimizer_param) elif optimizer_choose == 'adam': optimizer = optim.Adam(optimizer_param) elif optimizer_choose == 'sgd': optimizer = optim.SGD(optimizer_param, lr=optimizer_lr) else: raise ValueError('optimizer "%s" in config file not recoginized' % optimizer_choose) # check if exist model weight weight_path = global_config['data']['model_path'] if os.path.exists(weight_path): logger.info('loading existing weight...') weight = torch.load(weight_path, map_location=lambda storage, loc: storage) if enable_cuda: weight = torch.load( weight_path, map_location=lambda storage, loc: storage.cuda()) # weight = pop_dict_keys(weight, ['pointer', 'init_ptr_hidden']) # partial initial weight model.load_state_dict(weight, strict=False) # training arguments logger.info('start training...') train_batch_size = global_config['train']['batch_size'] valid_batch_size = global_config['train']['valid_batch_size'] num_workers = global_config['global']['num_data_workers'] batch_train_data = dataset.get_dataloader_train(train_batch_size, num_workers) batch_dev_data = dataset.get_dataloader_dev(valid_batch_size, num_workers) clip_grad_max = global_config['train']['clip_grad_norm'] best_avg = 0. # every epoch for epoch in range(global_config['train']['epoch']): # train model.train() # set training = True, make sure right dropout sum_loss = train_on_model(model=model, criterion=criterion, optimizer=optimizer, batch_data=batch_train_data, epoch=epoch, clip_grad_max=clip_grad_max, device=device) logger.info('epoch=%d, sum_loss=%.5f' % (epoch, sum_loss)) # evaluate with torch.no_grad(): model.eval() # let training = False, make sure right dropout valid_score_em, valid_score_f1, valid_loss = eval_on_model( model=model, criterion=criterion, batch_data=batch_dev_data, epoch=epoch, device=device) valid_avg = (valid_score_em + valid_score_f1) / 2 logger.info( "epoch=%d, ave_score_em=%.2f, ave_score_f1=%.2f, sum_loss=%.5f" % (epoch, valid_score_em, valid_score_f1, valid_loss)) # save model when best avg score if valid_avg > best_avg: save_model( model, epoch=epoch, model_weight_path=global_config['data']['model_path'], checkpoint_path=global_config['data']['checkpoint_path']) logger.info("saving model weight on epoch=%d" % epoch) best_avg = valid_avg logger.info('pretraining finished.') if global_config['global']['finetune']: batch_train_data = dataset.get_dataloader_train2( train_batch_size, num_workers) batch_dev_data = dataset.get_dataloader_dev2(valid_batch_size, num_workers) for epoch in range(global_config['train']['finetune_epoch']): # train model.train() # set training = True, make sure right dropout sum_loss = train_on_model(model=model, criterion=criterion, optimizer=optimizer, batch_data=batch_train_data, epoch=epoch, clip_grad_max=clip_grad_max, device=device) logger.info('finetune epoch=%d, sum_loss=%.5f' % (epoch, sum_loss)) # evaluate with torch.no_grad(): model.eval() # let training = False, make sure right dropout valid_score_em, valid_score_f1, valid_loss = eval_on_model( model=model, criterion=criterion, batch_data=batch_dev_data, epoch=epoch, device=device) valid_avg = (valid_score_em + valid_score_f1) / 2 logger.info( "finetune epoch=%d, ave_score_em=%.2f, ave_score_f1=%.2f, sum_loss=%.5f" % (epoch, valid_score_em, valid_score_f1, valid_loss)) # save model when best avg score if valid_avg > best_avg: save_model( model, epoch=epoch, model_weight_path=global_config['data']['model_path'], checkpoint_path=global_config['data']['checkpoint_path']) logger.info("saving model weight on epoch=%d" % epoch) best_avg = valid_avg if global_config['global']['finetune2']: batch_train_data = dataset.get_dataloader_train3( train_batch_size, num_workers) batch_dev_data = dataset.get_dataloader_dev3(valid_batch_size, num_workers) for epoch in range(global_config['train']['finetune_epoch2']): # train model.train() # set training = True, make sure right dropout sum_loss = train_on_model(model=model, criterion=criterion, optimizer=optimizer, batch_data=batch_train_data, epoch=epoch, clip_grad_max=clip_grad_max, device=device) logger.info('finetune2 epoch=%d, sum_loss=%.5f' % (epoch, sum_loss)) # evaluate with torch.no_grad(): model.eval() # let training = False, make sure right dropout valid_score_em, valid_score_f1, valid_loss = eval_on_model( model=model, criterion=criterion, batch_data=batch_dev_data, epoch=epoch, device=device) valid_avg = (valid_score_em + valid_score_f1) / 2 logger.info( "finetune2 epoch=%d, ave_score_em=%.2f, ave_score_f1=%.2f, sum_loss=%.5f" % (epoch, valid_score_em, valid_score_f1, valid_loss)) # save model when best avg score if valid_avg > best_avg: save_model( model, epoch=epoch, model_weight_path=global_config['data']['model_path'], checkpoint_path=global_config['data']['checkpoint_path']) logger.info("saving model weight on epoch=%d" % epoch) best_avg = valid_avg logger.info('finished.')
def main(config_path, out_path): logger.info('------------Match-LSTM Evaluate--------------') logger.info('loading config file...') global_config = read_config(config_path) # set random seed seed = global_config['model']['global']['random_seed'] torch.manual_seed(seed) enable_cuda = global_config['test']['enable_cuda'] device = torch.device("cuda" if enable_cuda else "cpu") if torch.cuda.is_available() and not enable_cuda: logger.warning("CUDA is avaliable, you can enable CUDA in config file") elif not torch.cuda.is_available() and enable_cuda: raise ValueError("CUDA is not abaliable, please unable CUDA in config file") torch.no_grad() # make sure all tensors below have require_grad=False logger.info('reading squad dataset...') dataset = SquadDataset(global_config) logger.info('constructing model...') model = MatchLSTMModel(global_config).to(device) model.eval() # let training = False, make sure right dropout # load model weight logger.info('loading model weight...') model_weight_path = global_config['data']['model_path'] assert os.path.exists(model_weight_path), "not found model weight file on '%s'" % model_weight_path weight = torch.load(model_weight_path, map_location=lambda storage, loc: storage) if enable_cuda: weight = torch.load(model_weight_path, map_location=lambda storage, loc: storage.cuda()) model.load_state_dict(weight, strict=False) # forward logger.info('forwarding...') enable_char = global_config['model']['encoder']['enable_char'] batch_size = global_config['test']['batch_size'] # batch_dev_data = dataset.get_dataloader_dev(batch_size) batch_dev_data = list(dataset.get_batch_dev(batch_size)) # to just evaluate score or write answer to file if out_path is None: criterion = MyNLLLoss() score_em, score_f1, sum_loss = eval_on_model(model=model, criterion=criterion, batch_data=batch_dev_data, epoch=None, device=device, enable_char=enable_char, batch_char_func=dataset.gen_batch_with_char) logger.info("test: ave_score_em=%.2f, ave_score_f1=%.2f, sum_loss=%.5f" % (score_em, score_f1, sum_loss)) else: predict_ans = predict_on_model(model=model, batch_data=batch_dev_data, device=device, enable_char=enable_char, batch_char_func=dataset.gen_batch_with_char, id_to_word_func=dataset.sentence_id2word) samples_id = dataset.get_all_samples_id_dev() ans_with_id = dict(zip(samples_id, predict_ans)) logging.info('writing predict answer to file %s' % out_path) with open(out_path, 'w') as f: json.dump(ans_with_id, f) logging.info('finished.')