def train_one(config: Config,
              train_insts: List[Instance],
              dev_insts: List[Instance],
              model_name: str,
              test_insts: List[Instance] = None,
              config_name: str = None,
              result_filename: str = None) -> NNCRF:
    train_batches = batching_list_instances(config, train_insts)
    dev_batches = batching_list_instances(config, dev_insts)
    if test_insts:
        test_batches = simple_batching(config, test_insts)
    else:
        test_batches = None
    model = NNCRF(config)
    model.train()
    optimizer = get_optimizer(config, model)
    epoch = config.num_epochs
    best_dev_f1 = -1
    saved_test_metrics = None
    for i in range(1, epoch + 1):
        epoch_loss = 0
        start_time = time.time()
        model.zero_grad()
        if config.optimizer.lower() == "sgd":
            optimizer = lr_decay(config, optimizer, i)
        for index in np.random.permutation(len(train_batches)):
            model.train()
            loss = model(*train_batches[index])
            epoch_loss += loss.item()
            loss.backward()
            optimizer.step()
            model.zero_grad()
        end_time = time.time()
        print("Epoch %d: %.5f, Time is %.2fs" %
              (i, epoch_loss, end_time - start_time),
              flush=True)

        model.eval()
        # metric is [precision, recall, f_score]
        dev_metrics = evaluate_model(config, model, "dev", dev_insts)
        if test_insts is not None:
            test_metrics = evaluate_model(config, model, "test", test_insts)
        if dev_metrics[2] > best_dev_f1:
            print("saving the best model...")
            best_dev_f1 = dev_metrics[2]
            if test_insts is not None:
                saved_test_metrics = test_metrics
            torch.save(model.state_dict(), model_name)
            # # Save the corresponding config as well.
            if config_name:
                f = open(config_name, 'wb')
                pickle.dump(config, f)
                f.close()
            if result_filename:
                write_results(result_filename, test_insts)
        model.zero_grad()
    if test_insts is not None:
        print(f"The best dev F1: {best_dev_f1}")
        print(f"The corresponding test: {saved_test_metrics}")
    return model
def evaluate_on_test(config: Config, all_train_insts: List[Instance],
                     dev_insts: List[Instance], test_insts: List[Instance]):
    print("[Training Info] Training the final model")
    model_folder = config.model_folder
    res_folder = config.result_folder
    model_name = model_folder + "/final_lstm_crf.m"
    config_name = model_folder + "/config.conf"
    res_name = res_folder + "/lstm_crf.results".format()
    model = train_one(config=config,
                      train_insts=all_train_insts,
                      dev_insts=dev_insts,
                      model_name=model_name,
                      config_name=config_name,
                      test_insts=test_insts,
                      result_filename=res_name)
    print("Archiving the best Model...")
    with tarfile.open(model_folder + "/" + model_folder + ".tar.gz",
                      "w:gz") as tar:
        tar.add(model_folder, arcname=os.path.basename(model_folder))
    # print("The best dev: %.2f" % (best_dev[0]))
    # print("The corresponding test: %.2f" % (best_test[0]))
    # print("Final testing.")
    model.load_state_dict(torch.load(model_name))
    model.eval()
    evaluate_model(config, model, "test", test_insts)
    write_results(res_name, test_insts)
def train_model(config: Config, epoch: int, train_insts: List[Instance],
                dev_insts: List[Instance], test_insts: List[Instance]):
    ### Data Processing Info
    train_num = len(train_insts)
    print("number of instances: %d" % (train_num))
    print(colored("[Shuffled] Shuffle the training instance ids", "red"))
    random.shuffle(train_insts)

    batched_data = batching_list_instances(config, train_insts)
    dev_batches = batching_list_instances(config, dev_insts)
    test_batches = batching_list_instances(config, test_insts)

    if config.embedder_type == "normal":
        model = NNCRF(config)
        optimizer = get_optimizer(config, model)
        scheduler = None
    else:
        print(
            colored(
                f"[Model Info]: Working with transformers package from huggingface with {config.embedder_type}",
                'red'))
        print(
            colored(
                f"[Optimizer Info]: You should be aware that you are using the optimizer from huggingface.",
                'red'))
        print(
            colored(
                f"[Optimizer Info]: Change the optimier in transformers_util.py if you want to make some modifications.",
                'red'))
        model = TransformersCRF(config)
        optimizer, scheduler = get_huggingface_optimizer_and_scheduler(
            config,
            model,
            num_training_steps=len(batched_data) * epoch,
            weight_decay=0.0,
            eps=1e-8,
            warmup_step=0)
        print(
            colored(f"[Optimizer Info] Modify the optimizer info as you need.",
                    'red'))
        print(optimizer)

    model.to(config.device)

    best_dev = [-1, 0]
    best_test = [-1, 0]

    model_folder = config.model_folder
    res_folder = "results"
    if os.path.exists("model_files/" + model_folder):
        raise FileExistsError(
            f"The folder model_files/{model_folder} exists. Please either delete it or create a new one "
            f"to avoid override.")
    model_path = f"model_files/{model_folder}/lstm_crf.m"
    config_path = f"model_files/{model_folder}/config.conf"
    res_path = f"{res_folder}/{model_folder}.results"
    print("[Info] The model will be saved to: %s.tar.gz" % (model_folder))
    os.makedirs(f"model_files/{model_folder}",
                exist_ok=True)  ## create model files. not raise error if exist
    os.makedirs(res_folder, exist_ok=True)
    no_incre_dev = 0
    print(
        colored(
            f"[Train Info] Start training, you have set to stop if performace not increase for {config.max_no_incre} epochs",
            'red'))
    for i in tqdm(range(1, epoch + 1), desc="Epoch"):
        epoch_loss = 0
        start_time = time.time()
        model.zero_grad()
        if config.optimizer.lower() == "sgd":
            optimizer = lr_decay(config, optimizer, i)
        for index in tqdm(np.random.permutation(len(batched_data)),
                          desc="--training batch",
                          total=len(batched_data)):
            model.train()
            loss = model(**batched_data[index])
            epoch_loss += loss.item()
            loss.backward()
            if config.max_grad_norm > 0:
                torch.nn.utils.clip_grad_norm_(model.parameters(),
                                               config.max_grad_norm)
            optimizer.step()
            optimizer.zero_grad()
            model.zero_grad()
            if scheduler is not None:
                scheduler.step()
        end_time = time.time()
        print("Epoch %d: %.5f, Time is %.2fs" %
              (i, epoch_loss, end_time - start_time),
              flush=True)

        model.eval()
        dev_metrics = evaluate_model(config, model, dev_batches, "dev",
                                     dev_insts)
        test_metrics = evaluate_model(config, model, test_batches, "test",
                                      test_insts)
        if dev_metrics[2] > best_dev[0]:
            print("saving the best model...")
            no_incre_dev = 0
            best_dev[0] = dev_metrics[2]
            best_dev[1] = i
            best_test[0] = test_metrics[2]
            best_test[1] = i
            torch.save(model.state_dict(), model_path)
            # Save the corresponding config as well.
            f = open(config_path, 'wb')
            pickle.dump(config, f)
            f.close()
            write_results(res_path, test_insts)
        else:
            no_incre_dev += 1
        model.zero_grad()
        if no_incre_dev >= config.max_no_incre:
            print(
                "early stop because there are %d epochs not increasing f1 on dev"
                % no_incre_dev)
            break

    print("Archiving the best Model...")
    with tarfile.open(f"model_files/{model_folder}/{model_folder}.tar.gz",
                      "w:gz") as tar:
        tar.add(f"model_files/{model_folder}",
                arcname=os.path.basename(model_folder))

    print("Finished archiving the models")

    print("The best dev: %.2f" % (best_dev[0]))
    print("The corresponding test: %.2f" % (best_test[0]))
    print("Final testing.")
    model.load_state_dict(torch.load(model_path))
    model.eval()
    evaluate_model(config, model, test_batches, "test", test_insts)
    write_results(res_path, test_insts)
def train_model(config: Config, epoch: int, train_insts: List[Instance],
                dev_insts: List[Instance], test_insts: List[Instance]):
    model = NNCRF(config)
    optimizer = get_optimizer(config, model)
    train_num = len(train_insts)
    print("number of instances: %d" % (train_num))
    print(colored("[Shuffled] Shuffle the training instance ids", "red"))
    random.shuffle(train_insts)

    batched_data = batching_list_instances(config, train_insts)
    dev_batches = batching_list_instances(config, dev_insts)
    test_batches = batching_list_instances(config, test_insts)

    best_dev = [-1, 0]
    best_test = [-1, 0]

    model_folder = config.model_folder
    res_folder = "results"
    if os.path.exists(model_folder):
        raise FileExistsError(
            f"The folder {model_folder} exists. Please either delete it or create a new one "
            f"to avoid override.")
    model_name = model_folder + "/lstm_crf.m".format()
    config_name = model_folder + "/config.conf"
    res_name = res_folder + "/lstm_crf.results".format()
    print("[Info] The model will be saved to: %s.tar.gz" % (model_folder))
    if not os.path.exists(model_folder):
        os.makedirs(model_folder)
    if not os.path.exists(res_folder):
        os.makedirs(res_folder)

    for i in range(1, epoch + 1):
        epoch_loss = 0
        start_time = time.time()
        model.zero_grad()
        if config.optimizer.lower() == "sgd":
            optimizer = lr_decay(config, optimizer, i)
        for index in np.random.permutation(len(batched_data)):
            model.train()
            loss = model(*batched_data[index])
            epoch_loss += loss.item()
            loss.backward()
            optimizer.step()
            model.zero_grad()
            loss.detach()

        end_time = time.time()
        print("Epoch %d: %.5f, Time is %.2fs" %
              (i, epoch_loss, end_time - start_time),
              flush=True)

        model.eval()
        dev_metrics = evaluate_model(config, model, dev_batches, "dev",
                                     dev_insts)
        test_metrics = evaluate_model(config, model, test_batches, "test",
                                      test_insts)
        if test_metrics[1][2] > best_test[0]:
            print("saving the best model...")
            best_dev[0] = dev_metrics[1][2]
            best_dev[1] = i
            best_test[0] = test_metrics[1][2]
            best_test[1] = i
            torch.save(model.state_dict(), model_name)
            # Save the corresponding config as well.
            f = open(config_name, 'wb')
            pickle.dump(config, f)
            f.close()
            print('Exact\n')
            print_report(test_metrics[-2])
            print('Overlap\n')
            print_report(test_metrics[-1])
            write_results(res_name, test_insts)
            print("Archiving the best Model...")
            with tarfile.open(model_folder + "/" + model_folder + ".tar.gz",
                              "w:gz") as tar:
                tar.add(model_folder, arcname=os.path.basename(model_folder))
        model.zero_grad()

    print("Finished archiving the models")

    print("The best dev: %.2f" % (best_dev[0]))
    print("The corresponding test: %.2f" % (best_test[0]))
    print("Final testing.")
    model.load_state_dict(torch.load(model_name))
    model.eval()
    evaluate_model(config, model, test_batches, "test", test_insts)
    write_results(res_name, test_insts)
Beispiel #5
0
def train_model(config: Config, epoch: int, train_insts: List[Instance],
                dev_insts: List[Instance], test_insts: List[Instance]):
    model = NNCRF(config)
    optimizer = get_optimizer(config, model)
    train_num = len(train_insts)
    print("number of instances: %d" % (train_num))
    print(colored("[Shuffled] Shuffle the training instance ids", "red"))
    random.shuffle(train_insts)

    batched_data = batching_list_instances(config, train_insts)
    dev_batches = batching_list_instances(config, dev_insts)
    test_batches = batching_list_instances(config, test_insts)

    best_dev = [-1, 0]
    best_test = [-1, 0]

    model_folder = config.model_folder
    res_folder = "results"
    if os.path.exists("model_files/" + model_folder):
        raise FileExistsError(
            f"The folder model_files/{model_folder} exists. Please either delete it or create a new one "
            f"to avoid override.")
    model_path = f"model_files/{model_folder}/lstm_crf.m"
    config_path = f"model_files/{model_folder}/config.conf"
    res_path = f"{res_folder}/{model_folder}.results"
    print("[Info] The model will be saved to: %s.tar.gz" % (model_folder))
    os.makedirs(f"model_files/{model_folder}",
                exist_ok=True)  ## create model files. not raise error if exist
    os.makedirs(res_folder, exist_ok=True)
    no_incre_dev = 0
    for i in tqdm(range(1, epoch + 1), desc="Epoch"):
        epoch_loss = 0
        start_time = time.time()
        model.zero_grad()
        if config.optimizer.lower() == "sgd":
            optimizer = lr_decay(config, optimizer, i)
        for index in tqdm(np.random.permutation(len(batched_data)),
                          desc="--training batch",
                          total=len(batched_data)):
            model.train()
            loss = model(*batched_data[index])
            epoch_loss += loss.item()
            loss.backward()
            optimizer.step()
            model.zero_grad()

        end_time = time.time()
        print("Epoch %d: %.5f, Time is %.2fs" %
              (i, epoch_loss, end_time - start_time),
              flush=True)

        model.eval()
        dev_metrics = evaluate_model(config, model, dev_batches, "dev",
                                     dev_insts)
        test_metrics = evaluate_model(config, model, test_batches, "test",
                                      test_insts)
        if dev_metrics[2] > best_dev[0]:
            print("saving the best model...")
            no_incre_dev = 0
            best_dev[0] = dev_metrics[2]
            best_dev[1] = i
            best_test[0] = test_metrics[2]
            best_test[1] = i
            torch.save(model.state_dict(), model_path)
            # Save the corresponding config as well.
            f = open(config_path, 'wb')
            pickle.dump(config, f)
            f.close()
            write_results(res_path, test_insts)
        else:
            no_incre_dev += 1
        model.zero_grad()
        if no_incre_dev >= config.max_no_incre:
            print(
                "early stop because there are %d epochs not increasing f1 on dev"
                % no_incre_dev)
            break

    print("Archiving the best Model...")
    with tarfile.open(f"model_files/{model_folder}/{model_folder}.tar.gz",
                      "w:gz") as tar:
        tar.add(f"model_files/{model_folder}",
                arcname=os.path.basename(model_folder))

    print("Finished archiving the models")

    print("The best dev: %.2f" % (best_dev[0]))
    print("The corresponding test: %.2f" % (best_test[0]))
    print("Final testing.")
    model.load_state_dict(torch.load(model_path))
    model.eval()
    evaluate_model(config, model, test_batches, "test", test_insts)
    write_results(res_path, test_insts)
def train_model(config: Config, train_insts: List[List[Instance]], dev_insts: List[Instance],
                test_insts: List[Instance]):
    train_num = sum([len(insts) for insts in train_insts])
    print(f"[Training Info] number of instances: {train_num:d}")

    dev_batches = batching_list_instances(config, dev_insts)
    test_batches = batching_list_instances(config, test_insts)

    best_dev = [-1, 0]
    best_test = [-1, 0]

    model_folder = config.model_folder
    res_folder = "results"
    # if os.path.exists(model_folder):
    #     raise FileExistsError(f"The folder {model_folder} exists. Please either delete it or create a new one "
    #                           f"to avoid override.")

    print(f"[Training Info] The model will be saved to: {model_folder}.tar.gz")
    if not os.path.exists(model_folder):
        os.makedirs(model_folder)
    if not os.path.exists(res_folder):
        os.makedirs(res_folder)

    num_outer_iterations = config.num_outer_iterations
    for iter in range(num_outer_iterations):
        print(f"[Training Info] Running for {iter}th large iterations.")
        model_names = []  # model names for each fold
        train_batches = [batching_list_instances(config, insts) for insts in train_insts]
        for fold_id, folded_train_insts in enumerate(train_insts):
            print(f"[Training Info] Training fold {fold_id}.")
            model_name = model_folder + f"/lstm_crf_{fold_id}.m"
            model_names.append(model_name)
            train_one(config=config, train_batches=train_batches[fold_id],
                      dev_insts=dev_insts, dev_batches=dev_batches, model_name=model_name)

        # assign hard prediction to other folds
        print("\n\n[Data Info] Assigning labels for the HARD approach")

        for fold_id, folded_train_insts in enumerate(train_insts):
            model = NNCRF(config)
            model_name = model_names[fold_id]
            model.load_state_dict(torch.load(model_name))
            hard_constraint_predict(config=config, model=model,
                                    fold_batches=train_batches[1 - fold_id],
                                    folded_insts=train_insts[1 - fold_id])  # set a new label id
        print("\n\n")

        print("[Training Info] Training the final model")
        all_train_insts = list(itertools.chain.from_iterable(train_insts))
        model_name = model_folder + "/final_lstm_crf.m"
        config_name = model_folder + "/config.conf"
        res_name = res_folder + "/lstm_crf.results".format()
        all_train_batches = batching_list_instances(config=config, insts=all_train_insts)
        model = train_one(config=config, train_batches=all_train_batches, dev_insts=dev_insts, dev_batches=dev_batches,
                          model_name=model_name, config_name=config_name, test_insts=test_insts,
                          test_batches=test_batches, result_filename=res_name)
        print("Archiving the best Model...")
        with tarfile.open(model_folder + "/" + model_folder + ".tar.gz", "w:gz") as tar:
            tar.add(model_folder, arcname=os.path.basename(model_folder))
        # print("The best dev: %.2f" % (best_dev[0]))
        # print("The corresponding test: %.2f" % (best_test[0]))
        # print("Final testing.")
        model.load_state_dict(torch.load(model_name))
        model.eval()
        evaluate_model(config, model, test_batches, "test", test_insts)
        write_results(res_name, test_insts)
def train_one(config: Config,
              train_batches: List[Tuple],
              dev_insts: List[Instance],
              dev_batches: List[Tuple],
              model_name: str,
              test_insts: List[Instance] = None,
              test_batches: List[Tuple] = None,
              config_name: str = None,
              result_filename: str = None,
              rate_schedule_neg=None,
              rate_schedule_pos=None) -> NNCRF_sl:
    model = NNCRF_sl(config)
    model.train()
    optimizer = get_optimizer(config, model)
    epoch = config.num_epochs
    best_dev_f1 = -1
    saved_test_metrics = None
    for i in range(1, epoch + 1):
        ratios_sum = [0] * 6
        forget_rate_neg = rate_schedule_neg[i - 1]
        forget_rate_pos = rate_schedule_pos[i - 1]
        epoch_loss = 0
        epoch_loss_neg = 0
        epoch_loss_pos = 0
        start_time = time.time()
        model.zero_grad()
        if config.optimizer.lower() == "sgd":
            optimizer = lr_decay(config, optimizer, i)
        is_constrain = config.is_constrain
        for index in np.random.permutation(len(train_batches)):
            model.train()
            tmp = tuple(
                list(train_batches[index]) +
                [forget_rate_neg, forget_rate_pos, is_constrain])

            loss, ratios, loss_neg, loss_pos = model(*tmp)
            ratios_sum = [
                ratios_sum[i] + ratios[i] for i in range(len(ratios))
            ]
            epoch_loss += loss.item()
            epoch_loss_neg += loss_neg.item()
            epoch_loss_pos += loss_pos.item()
            loss.backward()
            optimizer.step()
            model.zero_grad()
        end_time = time.time()
        print("Epoch %d: %.5f, Time is %.2fs" %
              (i, epoch_loss / epoch, end_time - start_time),
              flush=True)
        print('avg neg NLL: ' + str(epoch_loss_neg / epoch) +
              ' avg pos NLL: ' + str(epoch_loss_pos / epoch))
        model.eval()
        # metric is [precision, recall, f_score]
        dev_metrics = evaluate_model(config, model, dev_batches, "dev",
                                     dev_insts)
        if test_insts is not None:
            test_metrics = evaluate_model(config, model, test_batches, "test",
                                          test_insts)
        if dev_metrics[2] > best_dev_f1:
            print("saving the best model..." + ' epoch' + str(i))
            best_dev_f1 = dev_metrics[2]
            if test_insts is not None:
                saved_test_metrics = test_metrics
            torch.save(model.state_dict(), model_name)
            # # Save the corresponding config as well.
            if config_name:
                f = open(config_name, 'wb')
                pickle.dump(config, f)
                f.close()
            if result_filename:
                write_results(result_filename, test_insts)
        model.zero_grad()
    if test_insts is not None:
        print(f"The best dev F1: {best_dev_f1}")
        print(f"The corresponding test: {saved_test_metrics}")
    return model
def train_model(config: Config, train_insts: List[List[Instance]],
                dev_insts: List[Instance], test_insts: List[Instance]):
    train_num = sum([len(insts) for insts in train_insts])
    print("[Training Info] number of instances: %d" % (train_num))

    dev_batches = batching_list_instances(config, dev_insts)
    test_batches = batching_list_instances(config, test_insts)

    best_dev = [-1, 0]
    best_test = [-1, 0]

    model_folder = config.model_folder
    res_folder = config.res_folder
    if os.path.exists(model_folder):
        raise FileExistsError(
            f"The folder {model_folder} exists. Please either delete it or create a new one "
            f"to avoid override.")

    print("[Training Info] The model will be saved to: %s.tar.gz" %
          (model_folder))
    if not os.path.exists(model_folder):
        os.makedirs(model_folder)
    if not os.path.exists(res_folder):
        os.makedirs(res_folder)

    num_outer_iterations = config.num_outer_iterations

    SL_warmup = 2

    for iter in range(num_outer_iterations):
        print(f"[Training Info] Running for {iter}th large iterations.")

        #change fold devision every two iter

        if (iter > 0 and iter // 2 != (iter - 1) // 2):
            train_insts = train_insts[0] + train_insts[1]
            random.shuffle(train_insts)
            num_insts_in_fold = math.ceil(len(train_insts) / config.num_folds)
            train_insts = [
                train_insts[i * num_insts_in_fold:(i + 1) * num_insts_in_fold]
                for i in range(config.num_folds)
            ]

        model_names = []  #model names for each fold
        train_batches = [
            batching_list_instances(config, insts) for insts in train_insts
        ]

        neg_noise_rate_gold, pos_noise_rate_gold = ratio_estimation(
            config, train_insts)
        if (config.neg_noise_rate >= 0):
            neg_noise_rate = config.neg_noise_rate
        else:
            neg_noise_rate = neg_noise_rate_gold
        if (config.pos_noise_rate >= 0):
            pos_noise_rate = config.pos_noise_rate
        else:
            pos_noise_rate = pos_noise_rate_gold

        if (iter > 0):
            neg_noise_rate = 0.005
            pos_noise_rate = 0.15

        print('negative noise rate: ' + str(neg_noise_rate))
        print('positve noise rate: ' + str(pos_noise_rate))

        if (config.warm_up_num == 0):
            rate_schedule_neg, rate_schedule_pos = gen_forget_rate(
                config.num_epochs, neg_noise_rate, pos_noise_rate,
                config.num_gradual_neg, config.num_gradual_pos)
        else:
            rate_schedule_neg, rate_schedule_pos = gen_forget_rate_warmup(
                config.num_epochs, neg_noise_rate, pos_noise_rate,
                config.warm_up_num, config.num_gradual_neg,
                config.num_gradual_pos)

        for fold_id, folded_train_insts in enumerate(train_insts):
            print(f"[Training Info] Training fold {fold_id}.")
            model_name = model_folder + f"/lstm_crf_{fold_id}.m"
            model_names.append(model_name)
            train_one(config=config,
                      train_batches=train_batches[fold_id],
                      dev_insts=dev_insts,
                      dev_batches=dev_batches,
                      model_name=model_name,
                      rate_schedule_neg=rate_schedule_neg,
                      rate_schedule_pos=rate_schedule_pos)

        # assign hard prediction to other folds
        print("\n\n[Data Info] Assigning labels for the HARD approach")

        for fold_id, folded_train_insts in enumerate(train_insts):
            model = NNCRF_sl(config)
            model_name = model_names[fold_id]
            model.load_state_dict(torch.load(model_name))
            hard_constraint_predict(
                config=config,
                model=model,
                fold_batches=train_batches[1 - fold_id],
                folded_insts=train_insts[1 - fold_id])  ## set a new label id
        print("\n\n")

        print("[Training Info] Training the final model")
        all_train_insts = list(itertools.chain.from_iterable(train_insts))
        model_name = model_folder + "/num_outer_iterations_final_lstm_crf.m"
        config_name = model_folder + "/num_outer_iterations_config.conf"
        res_name = res_folder + "/num_outer_iterations_lstm_crf.results".format(
        )
        all_train_batches = batching_list_instances(config=config,
                                                    insts=all_train_insts)

        neg_noise_rate, pos_noise_rate = ratio_estimation(config, train_insts)

        rate_schedule_neg = np.zeros(config.num_epochs)
        rate_schedule_pos = np.zeros(config.num_epochs)

        model = train_one(config=config,
                          train_batches=all_train_batches,
                          dev_insts=dev_insts,
                          dev_batches=dev_batches,
                          model_name=model_name,
                          config_name=config_name,
                          test_insts=test_insts,
                          test_batches=test_batches,
                          result_filename=res_name,
                          rate_schedule_neg=rate_schedule_neg,
                          rate_schedule_pos=rate_schedule_pos)
        print("Archiving the best Model...")
        with tarfile.open(
                model_folder + "/" + str(num_outer_iterations) + model_folder +
                ".tar.gz", "w:gz") as tar:
            tar.add(model_folder, arcname=os.path.basename(model_folder))

        model.load_state_dict(torch.load(model_name))
        model.eval()
        evaluate_model(config, model, test_batches, "test", test_insts)
        write_results(res_name, test_insts)
Beispiel #9
0
def train_one(
    config: Config,
    train_batches: List[Tuple],
    dev_insts: List[Instance],
    dev_batches: List[Tuple],
    model_name: str,
    test_insts: List[Instance] = None,
    test_batches: List[Tuple] = None,
    config_name: str = None,
    result_filename: str = None,
) -> NNCRF:
    model = NNCRF(config)
    model.train()
    optimizer = get_optimizer(config, model)
    lr_scheduler = SlantedTriangular(optimizer,
                                     config.num_epochs,
                                     num_steps_per_epoch=len(train_batches),
                                     ratio=16)
    epoch = config.num_epochs
    best_dev_f1 = -1
    saved_test_metrics = None
    for i in range(1, epoch + 1):
        epoch_loss = 0
        start_time = time.time()
        model.zero_grad()
        # if config.optimizer.lower() == "sgd":
        #     optimizer = lr_decay(config, optimizer, i)
        lr_scheduler.step(epoch=i)

        for index in tqdm(np.random.permutation(len(train_batches)),
                          f"Training epoch {i}", len(train_batches)):
            model.train()
            loss = model(*train_batches[index])
            epoch_loss += loss.item()
            # print(f"Batch loss: {loss.item()}")
            loss.backward()
            optimizer.step()
            model.zero_grad()
            lr_scheduler.step_batch()
        end_time = time.time()
        print("Epoch %d: %.5f, Time is %.2fs" %
              (i, epoch_loss / len(train_batches), end_time - start_time),
              flush=True)

        model.eval()
        # metric is [precision, recall, f_score]
        dev_metrics = evaluate_model(config, model, dev_batches, "dev",
                                     dev_insts)
        if test_insts is not None:
            test_metrics = evaluate_model(config, model, test_batches, "test",
                                          test_insts)
        if dev_metrics[2] > best_dev_f1:
            print("saving the best model...")
            best_dev_f1 = dev_metrics[2]
            if test_insts is not None:
                saved_test_metrics = test_metrics
            torch.save(model.state_dict(), model_name)
            # # Save the corresponding config as well.
            if config_name:
                f = open(config_name, "wb")
                pickle.dump(config, f)
                f.close()
            if result_filename:
                write_results(result_filename, test_insts)
        model.zero_grad()
    if test_insts is not None:
        print(f"The best dev F1: {best_dev_f1}")
        print(f"The corresponding test: {saved_test_metrics}")
    return model