def update_train_insts(config: Config, train_insts: List[List[Instance]], model_names): # assign hard prediction to other folds if config.variant == "hard": print("\n\n[Data Info] Assigning labels for the HARD approach") else: print( "\n\n[Data Info] Performing marginal decoding to assign the marginals" ) train_batches = [ batching_list_instances(config, insts) for insts in train_insts ] for fold_id, folded_train_insts in enumerate(train_insts): model = NNCRF(config) model_name = model_names[fold_id] model.load_state_dict(torch.load(model_name)) predict_with_constraints( config=config, model=model, fold_batches=train_batches[1 - fold_id], folded_insts=train_insts[1 - fold_id]) ## set a new label id print("\n\n") return train_insts
def train_model(config: Config, epoch: int, train_insts: List[Instance], dev_insts: List[Instance], test_insts: List[Instance]): model = NNCRF(config) optimizer = get_optimizer(config, model) train_num = len(train_insts) print("number of instances: %d" % (train_num)) print(colored("[Shuffled] Shuffle the training instance ids", "red")) random.shuffle(train_insts) batched_data = batching_list_instances(config, train_insts) dev_batches = batching_list_instances(config, dev_insts) test_batches = batching_list_instances(config, test_insts) best_dev = [-1, 0] best_test = [-1, 0] model_folder = config.model_folder res_folder = "results" if os.path.exists(model_folder): raise FileExistsError( f"The folder {model_folder} exists. Please either delete it or create a new one " f"to avoid override.") model_name = model_folder + "/lstm_crf.m".format() config_name = model_folder + "/config.conf" res_name = res_folder + "/lstm_crf.results".format() print("[Info] The model will be saved to: %s.tar.gz" % (model_folder)) if not os.path.exists(model_folder): os.makedirs(model_folder) if not os.path.exists(res_folder): os.makedirs(res_folder) for i in range(1, epoch + 1): epoch_loss = 0 start_time = time.time() model.zero_grad() if config.optimizer.lower() == "sgd": optimizer = lr_decay(config, optimizer, i) for index in np.random.permutation(len(batched_data)): model.train() loss = model(*batched_data[index]) epoch_loss += loss.item() loss.backward() optimizer.step() model.zero_grad() loss.detach() end_time = time.time() print("Epoch %d: %.5f, Time is %.2fs" % (i, epoch_loss, end_time - start_time), flush=True) model.eval() dev_metrics = evaluate_model(config, model, dev_batches, "dev", dev_insts) test_metrics = evaluate_model(config, model, test_batches, "test", test_insts) if test_metrics[1][2] > best_test[0]: print("saving the best model...") best_dev[0] = dev_metrics[1][2] best_dev[1] = i best_test[0] = test_metrics[1][2] best_test[1] = i torch.save(model.state_dict(), model_name) # Save the corresponding config as well. f = open(config_name, 'wb') pickle.dump(config, f) f.close() print('Exact\n') print_report(test_metrics[-2]) print('Overlap\n') print_report(test_metrics[-1]) write_results(res_name, test_insts) print("Archiving the best Model...") with tarfile.open(model_folder + "/" + model_folder + ".tar.gz", "w:gz") as tar: tar.add(model_folder, arcname=os.path.basename(model_folder)) model.zero_grad() print("Finished archiving the models") print("The best dev: %.2f" % (best_dev[0])) print("The corresponding test: %.2f" % (best_test[0])) print("Final testing.") model.load_state_dict(torch.load(model_name)) model.eval() evaluate_model(config, model, test_batches, "test", test_insts) write_results(res_name, test_insts)
def train_model(config: Config, epoch: int, train_insts: List[Instance], dev_insts: List[Instance], test_insts: List[Instance]): model = NNCRF(config) optimizer = get_optimizer(config, model) train_num = len(train_insts) print("number of instances: %d" % (train_num)) print(colored("[Shuffled] Shuffle the training instance ids", "red")) random.shuffle(train_insts) batched_data = batching_list_instances(config, train_insts) dev_batches = batching_list_instances(config, dev_insts) test_batches = batching_list_instances(config, test_insts) best_dev = [-1, 0] best_test = [-1, 0] model_folder = config.model_folder res_folder = "results" if os.path.exists("model_files/" + model_folder): raise FileExistsError( f"The folder model_files/{model_folder} exists. Please either delete it or create a new one " f"to avoid override.") model_path = f"model_files/{model_folder}/lstm_crf.m" config_path = f"model_files/{model_folder}/config.conf" res_path = f"{res_folder}/{model_folder}.results" print("[Info] The model will be saved to: %s.tar.gz" % (model_folder)) os.makedirs(f"model_files/{model_folder}", exist_ok=True) ## create model files. not raise error if exist os.makedirs(res_folder, exist_ok=True) no_incre_dev = 0 for i in tqdm(range(1, epoch + 1), desc="Epoch"): epoch_loss = 0 start_time = time.time() model.zero_grad() if config.optimizer.lower() == "sgd": optimizer = lr_decay(config, optimizer, i) for index in tqdm(np.random.permutation(len(batched_data)), desc="--training batch", total=len(batched_data)): model.train() loss = model(*batched_data[index]) epoch_loss += loss.item() loss.backward() optimizer.step() model.zero_grad() end_time = time.time() print("Epoch %d: %.5f, Time is %.2fs" % (i, epoch_loss, end_time - start_time), flush=True) model.eval() dev_metrics = evaluate_model(config, model, dev_batches, "dev", dev_insts) test_metrics = evaluate_model(config, model, test_batches, "test", test_insts) if dev_metrics[2] > best_dev[0]: print("saving the best model...") no_incre_dev = 0 best_dev[0] = dev_metrics[2] best_dev[1] = i best_test[0] = test_metrics[2] best_test[1] = i torch.save(model.state_dict(), model_path) # Save the corresponding config as well. f = open(config_path, 'wb') pickle.dump(config, f) f.close() write_results(res_path, test_insts) else: no_incre_dev += 1 model.zero_grad() if no_incre_dev >= config.max_no_incre: print( "early stop because there are %d epochs not increasing f1 on dev" % no_incre_dev) break print("Archiving the best Model...") with tarfile.open(f"model_files/{model_folder}/{model_folder}.tar.gz", "w:gz") as tar: tar.add(f"model_files/{model_folder}", arcname=os.path.basename(model_folder)) print("Finished archiving the models") print("The best dev: %.2f" % (best_dev[0])) print("The corresponding test: %.2f" % (best_test[0])) print("Final testing.") model.load_state_dict(torch.load(model_path)) model.eval() evaluate_model(config, model, test_batches, "test", test_insts) write_results(res_path, test_insts)
def train_model(config: Config, train_insts: List[List[Instance]], dev_insts: List[Instance], test_insts: List[Instance]): train_num = sum([len(insts) for insts in train_insts]) print(f"[Training Info] number of instances: {train_num:d}") dev_batches = batching_list_instances(config, dev_insts) test_batches = batching_list_instances(config, test_insts) best_dev = [-1, 0] best_test = [-1, 0] model_folder = config.model_folder res_folder = "results" # if os.path.exists(model_folder): # raise FileExistsError(f"The folder {model_folder} exists. Please either delete it or create a new one " # f"to avoid override.") print(f"[Training Info] The model will be saved to: {model_folder}.tar.gz") if not os.path.exists(model_folder): os.makedirs(model_folder) if not os.path.exists(res_folder): os.makedirs(res_folder) num_outer_iterations = config.num_outer_iterations for iter in range(num_outer_iterations): print(f"[Training Info] Running for {iter}th large iterations.") model_names = [] # model names for each fold train_batches = [batching_list_instances(config, insts) for insts in train_insts] for fold_id, folded_train_insts in enumerate(train_insts): print(f"[Training Info] Training fold {fold_id}.") model_name = model_folder + f"/lstm_crf_{fold_id}.m" model_names.append(model_name) train_one(config=config, train_batches=train_batches[fold_id], dev_insts=dev_insts, dev_batches=dev_batches, model_name=model_name) # assign hard prediction to other folds print("\n\n[Data Info] Assigning labels for the HARD approach") for fold_id, folded_train_insts in enumerate(train_insts): model = NNCRF(config) model_name = model_names[fold_id] model.load_state_dict(torch.load(model_name)) hard_constraint_predict(config=config, model=model, fold_batches=train_batches[1 - fold_id], folded_insts=train_insts[1 - fold_id]) # set a new label id print("\n\n") print("[Training Info] Training the final model") all_train_insts = list(itertools.chain.from_iterable(train_insts)) model_name = model_folder + "/final_lstm_crf.m" config_name = model_folder + "/config.conf" res_name = res_folder + "/lstm_crf.results".format() all_train_batches = batching_list_instances(config=config, insts=all_train_insts) model = train_one(config=config, train_batches=all_train_batches, dev_insts=dev_insts, dev_batches=dev_batches, model_name=model_name, config_name=config_name, test_insts=test_insts, test_batches=test_batches, result_filename=res_name) print("Archiving the best Model...") with tarfile.open(model_folder + "/" + model_folder + ".tar.gz", "w:gz") as tar: tar.add(model_folder, arcname=os.path.basename(model_folder)) # print("The best dev: %.2f" % (best_dev[0])) # print("The corresponding test: %.2f" % (best_test[0])) # print("Final testing.") model.load_state_dict(torch.load(model_name)) model.eval() evaluate_model(config, model, test_batches, "test", test_insts) write_results(res_name, test_insts)