Ejemplo n.º 1
0
def train_single_model(train_data: List[InputExample],
                       eval_data: List[InputExample],
                       dev32_data: List[InputExample],
                       pattern_iter_output_dir: str,
                       model: TransformerModelWrapper,
                       config: TrainConfig,
                       eval_config: EvalConfig):
    """
    Train a single model.
    :param model: the model to train
    :param train_data: the training examples to use
    :param config: the training config
    :param eval_config: the evaluation config
    :return: a dictionary containing the global step, average loss and (optionally) results on the train set
    """

    results_dict = {}

    results_dict['train_set_before_training'] = evaluate(model, train_data, eval_config)['scores']['acc']

    if not train_data:
        logger.warning('Training method was called without training examples')
    else:
        global_step, tr_loss = model.train(
            pattern_iter_output_dir=pattern_iter_output_dir,
            eval_config=eval_config,
            train_data=train_data,
            dev32_data=dev32_data,
            eval_data=eval_data,
            per_gpu_train_batch_size=config.per_gpu_train_batch_size,
            n_gpu=config.n_gpu,
            num_train_epochs=config.num_train_epochs,
            max_steps=config.max_steps,
            gradient_accumulation_steps=config.gradient_accumulation_steps,
            weight_decay=config.weight_decay,
            learning_rate=config.learning_rate,
            adam_epsilon=config.adam_epsilon,
            warmup_steps=config.warmup_steps,
            max_grad_norm=config.max_grad_norm,
            alpha=config.alpha
        )
        results_dict['global_step'] = global_step
        results_dict['average_loss'] = tr_loss

    model = TransformerModelWrapper.from_pretrained(pattern_iter_output_dir)
    results_dict['train_set_after_training'] = evaluate(model, train_data, eval_config)['scores']['acc']
    return results_dict
Ejemplo n.º 2
0
def train_pet_ensemble(
    model_config: WrapperConfig,
    train_config: TrainConfig,
    eval_config: EvalConfig,
    pattern_ids: List[Union[str, int]],
    output_dir: str,
    ipet_data_dir: str = None,
    repetitions: int = 3,
    train_data: List[InputExample] = None,
    unlabeled_data: List[InputExample] = None,
    dev_data: List[InputExample] = None,
    test_data: List[InputExample] = None,
    do_train: bool = True,
    do_eval: bool = True,
    save_unlabeled_logits: bool = False,
    seed: int = 42,
    overwrite_dir: bool = False,
    save_model=False,
    local_rank=-1,
):
    """
    Train and evaluate an ensemble of PET models without knowledge distillation.

    :param model_config: the model configuration to use
    :param train_config: the training configuration to use
    :param eval_config: the evaluation configuration to use
    :param pattern_ids: the ids of all PVPs to use
    :param output_dir: the output directory
    :param ipet_data_dir: optional directory containing additional training data for iPET
    :param repetitions: the number of training repetitions
    :param train_data: the training examples to use
    :param unlabeled_data: the unlabeled examples to use
    :param dev_data: the evaluation examples to use
    :param do_train: whether to perform training
    :param do_eval: whether to perform evaluation
    :param save_unlabeled_logits: whether logits for unlabeled examples should be saved in a file ``logits.txt``. This
           is required for both iPET and knowledge distillation.
    :param seed: the random seed to use
    """

    results = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
    set_seed(seed)

    for pattern_id in pattern_ids:
        for iteration in range(repetitions):

            model_config.pattern_id = pattern_id
            results_dict = {}

            shots = 0 if train_data is None else len(train_data)
            pattern_iter_output_dir = "{}/{}shots-{}-i{}-seed{}".format(
                output_dir, shots, pattern_name(pattern_id), iteration, seed)

            if os.path.exists(pattern_iter_output_dir) and not overwrite_dir:
                logger.warning(
                    f"Path {pattern_iter_output_dir} already exists, skipping it..."
                )
                continue

            if not os.path.exists(pattern_iter_output_dir) and local_rank in [
                    -1, 0
            ]:
                os.makedirs(pattern_iter_output_dir)

            wrapper = init_model(model_config)

            # Training
            if do_train:
                if ipet_data_dir:
                    p = os.path.join(
                        ipet_data_dir,
                        "{}-i{}-train.bin".format(pattern_name(pattern_id),
                                                  iteration))
                    ipet_train_data = InputExample.load_examples(p)
                    for example in ipet_train_data:
                        example.logits = None
                else:
                    ipet_train_data = None

                results_dict.update(
                    train_single_model(
                        wrapper,
                        train_data,
                        train_config,
                        pattern_iter_output_dir,
                        dev_data,
                        eval_config,
                        ipet_train_data=ipet_train_data,
                        unlabeled_data=unlabeled_data,
                        return_train_set_results=False,
                        local_rank=local_rank,
                    ))

                with open(os.path.join(pattern_iter_output_dir, "results.txt"),
                          "w") as fh:
                    fh.write(str(results_dict))

                if local_rank in [-1, 0]:
                    logger.info("Saving trained model at {}...".format(
                        pattern_iter_output_dir))
                    train_config.save(
                        os.path.join(pattern_iter_output_dir,
                                     "train_config.json"))
                    eval_config.save(
                        os.path.join(pattern_iter_output_dir,
                                     "eval_config.json"))
                    logger.info("Saving complete")

                    if save_unlabeled_logits:
                        logits = evaluate(wrapper,
                                          unlabeled_data,
                                          eval_config,
                                          local_rank=local_rank)["logits"]
                        save_logits(
                            os.path.join(pattern_iter_output_dir,
                                         "logits.txt"), logits)

                if not do_eval:
                    wrapper.model = None
                    wrapper = None
                    torch.cuda.empty_cache()

            # Evaluation
            if do_eval:
                logger.info("Starting evaluation...")
                try:
                    wrapper = TransformerModelWrapper.from_pretrained(
                        pattern_iter_output_dir)
                except OSError:
                    warnings.warn(
                        "No model found saved, proceeding with current model instead of best"
                    )
                    pass

                for split, eval_data in {
                        "dev": dev_data,
                        "test": test_data
                }.items():
                    if eval_data is None:
                        continue
                    eval_result = evaluate(wrapper,
                                           eval_data,
                                           eval_config,
                                           priming_data=train_data,
                                           local_rank=local_rank)

                    if local_rank in [-1, 0]:
                        save_predictions(
                            os.path.join(pattern_iter_output_dir,
                                         "predictions.jsonl"), wrapper,
                            eval_result)
                        save_logits(
                            os.path.join(pattern_iter_output_dir,
                                         "eval_logits.txt"),
                            eval_result["logits"])

                    scores = eval_result["scores"]
                    logger.info(
                        "--- {} result (pattern_id={}, iteration={}) ---".
                        format(split, pattern_id, iteration))
                    logger.info(scores)

                    results_dict[f"{split}_set_after_training"] = scores
                    with open(
                            os.path.join(pattern_iter_output_dir,
                                         "results.json"), "w") as fh:
                        json.dump(results_dict, fh)

                    for metric, value in scores.items():
                        results[split][metric][pattern_id].append(value)

                wrapper.model = None
                wrapper = None
                torch.cuda.empty_cache()

    if do_eval:
        logger.info("=== OVERALL RESULTS ===")
        results_to_log = _write_results(
            os.path.join(output_dir, "result_test.txt"), results)
    else:
        logger.info("=== ENSEMBLE TRAINING COMPLETE ===")
        results_to_log = None

    if do_train and not save_model:
        outputs = os.listdir(pattern_iter_output_dir)
        for item in outputs:
            if item.endswith(".bin"):
                os.remove(os.path.join(pattern_iter_output_dir, item))

    return results_to_log
Ejemplo n.º 3
0
def train_pet_ensemble(model_config: WrapperConfig,
                       train_config: TrainConfig,
                       eval_config: EvalConfig,
                       pattern_ids: List[int],
                       output_dir: str,
                       ipet_data_dir: str = None,
                       repetitions: int = 3,
                       train_data: List[InputExample] = None,
                       unlabeled_data: List[InputExample] = None,
                       eval_data: List[InputExample] = None,
                       do_train: bool = True,
                       do_eval: bool = True,
                       save_unlabeled_logits: bool = False,
                       seed: int = 42):
    """
    Train and evaluate an ensemble of PET models without knowledge distillation.

    :param model_config: the model configuration to use
    :param train_config: the training configuration to use
    :param eval_config: the evaluation configuration to use
    :param pattern_ids: the ids of all PVPs to use
    :param output_dir: the output directory
    :param ipet_data_dir: optional directory containing additional training data for iPET
    :param repetitions: the number of training repetitions
    :param train_data: the training examples to use
    :param unlabeled_data: the unlabeled examples to use
    :param eval_data: the evaluation examples to use
    :param do_train: whether to perform training
    :param do_eval: whether to perform evaluation
    :param save_unlabeled_logits: whether logits for unlabeled examples should be saved in a file ``logits.txt``. This
           is required for both iPET and knowledge distillation.
    :param seed: the random seed to use
    """

    results = defaultdict(lambda: defaultdict(list))
    set_seed(seed)

    for pattern_id in pattern_ids:
        for iteration in range(repetitions):

            model_config.pattern_id = pattern_id
            results_dict = {}

            pattern_iter_output_dir = "{}/p{}-i{}".format(
                output_dir, pattern_id, iteration)

            if os.path.exists(pattern_iter_output_dir):
                logger.warning(
                    f"Path {pattern_iter_output_dir} already exists, skipping it..."
                )
                continue

            if not os.path.exists(pattern_iter_output_dir):
                os.makedirs(pattern_iter_output_dir)

            wrapper = init_model(model_config)

            # Training
            if do_train:
                if ipet_data_dir:
                    p = os.path.join(
                        ipet_data_dir,
                        'p{}-i{}-train.bin'.format(pattern_id, iteration))
                    ipet_train_data = InputExample.load_examples(p)
                    for example in ipet_train_data:
                        example.logits = None
                else:
                    ipet_train_data = None

                results_dict.update(
                    train_single_model(wrapper,
                                       train_data,
                                       train_config,
                                       eval_config,
                                       ipet_train_data=ipet_train_data,
                                       unlabeled_data=unlabeled_data))

                with open(os.path.join(pattern_iter_output_dir, 'results.txt'),
                          'w') as fh:
                    fh.write(str(results_dict))

                logger.info("Saving trained model at {}...".format(
                    pattern_iter_output_dir))
                wrapper.save(pattern_iter_output_dir)
                train_config.save(
                    os.path.join(pattern_iter_output_dir, 'train_config.json'))
                eval_config.save(
                    os.path.join(pattern_iter_output_dir, 'eval_config.json'))
                logger.info("Saving complete")

                if save_unlabeled_logits:
                    logits = evaluate(wrapper, unlabeled_data,
                                      eval_config)['logits']
                    save_logits(
                        os.path.join(pattern_iter_output_dir, 'logits.txt'),
                        logits)

                if not do_eval:
                    wrapper.model = None
                    wrapper = None
                    torch.cuda.empty_cache()

            # Evaluation
            if do_eval:
                logger.info("Starting evaluation...")
                if not wrapper:
                    wrapper = TransformerModelWrapper.from_pretrained(
                        pattern_iter_output_dir)

                eval_result = evaluate(wrapper,
                                       eval_data,
                                       eval_config,
                                       priming_data=train_data)

                save_predictions(
                    os.path.join(pattern_iter_output_dir, 'predictions.jsonl'),
                    wrapper, eval_result)
                save_logits(
                    os.path.join(pattern_iter_output_dir, 'eval_logits.txt'),
                    eval_result['logits'])

                scores = eval_result['scores']
                logger.info(
                    "--- RESULT (pattern_id={}, iteration={}) ---".format(
                        pattern_id, iteration))
                logger.info(scores)

                results_dict['test_set_after_training'] = scores
                with open(
                        os.path.join(pattern_iter_output_dir, 'results.json'),
                        'w') as fh:
                    json.dump(results_dict, fh)

                for metric, value in scores.items():
                    results[metric][pattern_id].append(value)

                wrapper.model = None
                wrapper = None
                torch.cuda.empty_cache()

    if do_eval:
        logger.info("=== OVERALL RESULTS ===")
        _write_results(os.path.join(output_dir, 'result_test.txt'), results)
    else:
        logger.info("=== ENSEMBLE TRAINING COMPLETE ===")
Ejemplo n.º 4
0
def train_pet(train_data: List[InputExample],
              eval_data: List[InputExample],
              dev32_data: List[InputExample],
              model_config: WrapperConfig,
              train_config: TrainConfig,
              eval_config: EvalConfig,
              pattern_ids: List[int],
              output_dir: str,
              repetitions: int = 3,
              do_train: bool = True,
              do_eval: bool = True,
              seed: int = 42
              ):

    """
    Train and evaluate a new PET model for a given task.

    :param model_config: the model configuration for each model corresponding to an individual PVP
    :param train_config: the training configuration for each model corresponding to an individual PVP
    :param eval_config: the evaluation configuration for each model corresponding to an individual PVP
    :param pattern_ids: the ids of all PVPs to use
    :param output_dir: the output directory
    :param repetitions: the number of training repetitions for each model corresponding to an individual PVP
    :param train_data: the training examples to use
    :param dev32_data: the dev32 examples to use
    :param eval_data: the evaluation examples to use
    :param do_train: whether to perform training
    :param do_eval: whether to perform evaluation
    :param seed: the random seed to use
    """

    results = defaultdict(lambda: defaultdict(list))
    dev32_results = defaultdict(lambda: defaultdict(list))
    # set_seed(seed)

    assert model_config.task_type == "single_task"

    for pattern_id in pattern_ids: # pattern只有1个

        model_config.pattern_id = pattern_id
        results_dict = {}

        pattern_iter_output_dir = "{}/p{}-i{}".format(output_dir, pattern_id, 1)

        # if os.path.exists(pattern_iter_output_dir):
        #     logger.warning(f"Path {pattern_iter_output_dir} already exists, skipping it...")
        #     continue

        if not os.path.exists(pattern_iter_output_dir):
            os.makedirs(pattern_iter_output_dir)

        wrapper = init_model(model_config) # 初始化一个模型

        # Training
        if do_train:
            # 开始多轮epoch训练,并将训练的结果保存到results_dict中
            results_dict.update(train_single_model(train_data, eval_data, dev32_data, pattern_iter_output_dir, \
                                                   wrapper, train_config, eval_config))

            with open(os.path.join(pattern_iter_output_dir, 'results.txt'), 'w') as fh:
                fh.write(str(results_dict))

            train_config.save(os.path.join(pattern_iter_output_dir, 'train_config.json'))
            eval_config.save(os.path.join(pattern_iter_output_dir, 'eval_config.json'))
            logger.info("Saving complete")

            if not do_eval:
                wrapper.model = None
                wrapper = None
                torch.cuda.empty_cache()

        # Evaluation
        if do_eval:
            logger.info("Starting evaluation...")
            logger.info("Single: Task {} 's Test examples number: {}".format(model_config.task_name, len(eval_data)))

            logger.info("************Test Example:**************")
            logger.info("text_a={}".format(eval_data[0].text_a))
            logger.info("text_b={}".format(eval_data[0].text_b))
            logger.info("task={}".format(eval_data[0].task))
            logger.info("label={}".format(eval_data[0].label))
            logger.info("**********************************")

            # if not wrapper:
            wrapper = TransformerModelWrapper.from_pretrained(pattern_iter_output_dir)

            eval_result = evaluate(wrapper, eval_data, eval_config)
            # dev32_result = evaluate(wrapper, dev32_data, eval_config)

            save_predictions(os.path.join(pattern_iter_output_dir, 'eval_predictions.jsonl'), wrapper, eval_result)
            save_logits(os.path.join(pattern_iter_output_dir, 'eval_logits.txt'), eval_result['logits'])

            # save_predictions(os.path.join(pattern_iter_output_dir, 'dev32_predictions.jsonl'), wrapper, dev32_result)
            # save_logits(os.path.join(pattern_iter_output_dir, 'dev32_logits.txt'), dev32_result['logits'])

            logger.info("--- RESULT (pattern_id={}, Task={}) ---".format(pattern_id, model_config.task_name))
            logger.info("eval_results:")
            logger.info(eval_result['scores'])
            # logger.info("dev32_results:")
            # logger.info(dev32_result['scores'])

            # results_dict['eval_set_after_training'] = eval_result['scores']
            # results_dict['dev32_set_after_training'] = dev32_result['scores']
            # with open(os.path.join(pattern_iter_output_dir, 'results.json'), 'w') as fh:
            #     json.dump(results_dict, fh)
            #
            # for metric, value in eval_result['scores'].items():
            #     results[metric][pattern_id].append(value)
            #
            # for metric, value in dev32_result['scores'].items():
            #     dev32_results[metric][pattern_id].append(value)

            wrapper.model = None
            wrapper = None
            torch.cuda.empty_cache()
Ejemplo n.º 5
0
def train_pet_cross(train_data: List[InputExample],
              # eval_data: List[InputExample],
              dev32_data: List[InputExample],
              model_config: WrapperConfig,
              train_config: TrainConfig,
              eval_config: EvalConfig,
              pattern_ids: List[int],
              output_dir: str,
              repetitions: int = 3,
              do_train: bool = True,
              do_eval: bool = True,
              seed: int = 42
              ):

    """
    Train and evaluate a new PET model for a given task.

    :param model_config: the model configuration for each model corresponding to an individual PVP
    :param train_config: the training configuration for each model corresponding to an individual PVP
    :param eval_config: the evaluation configuration for each model corresponding to an individual PVP
    :param pattern_ids: the ids of all PVPs to use
    :param output_dir: the output directory
    :param repetitions: the number of training repetitions for each model corresponding to an individual PVP
    :param train_data: the training examples to use
    :param dev32_data: the dev32 examples to use
    :param eval_data: the evaluation examples to use
    :param do_train: whether to perform training
    :param do_eval: whether to perform evaluation
    :param seed: the random seed to use
    """

    results = defaultdict(lambda: defaultdict(list))
    dev32_results = defaultdict(lambda: defaultdict(list))
    # set_seed(seed)

    assert model_config.task_type == "cross_task"
    # 当前是cross-task,则task_name是group的名称,需要获得group内的所有task
    tasks = groups[model_config.task_name]

    for pattern_id in pattern_ids: # 只选择1个模式

        model_config.pattern_id = pattern_id
        results_dict = {}

        pattern_iter_output_dir = "{}/p{}-i{}".format(output_dir, pattern_id, 1)

        # if os.path.exists(pattern_iter_output_dir):
        #     logger.warning(f"Path {pattern_iter_output_dir} already exists, skipping it...")
        #     continue

        if not os.path.exists(pattern_iter_output_dir):
            os.makedirs(pattern_iter_output_dir)

        wrapper = init_model(model_config) # 初始化一个模型

        # Training
        if do_train:
            # 开始多轮epoch训练,并将训练的结果保存到results_dict中
            # edit by wjn : eval_data -> None
            results_dict.update(train_single_model(train_data, None, dev32_data, pattern_iter_output_dir, \
                                                   wrapper, train_config, eval_config))

            with open(os.path.join(pattern_iter_output_dir, 'results.txt'), 'w') as fh:
                fh.write(str(results_dict))

            train_config.save(os.path.join(pattern_iter_output_dir, 'train_config.json'))
            eval_config.save(os.path.join(pattern_iter_output_dir, 'eval_config.json'))
            logger.info("Saving complete")

            if not do_eval:
                wrapper.model = None
                wrapper = None
                torch.cuda.empty_cache()

        # Evaluation
        if do_eval:
            logger.info("Starting evaluation...")

            # if not wrapper:
            wrapper = TransformerModelWrapper.from_pretrained(pattern_iter_output_dir)
            cross_data_dir = "data/k-shot-cross/"
            # add by wjn
            ## 当前是cross-task,对当前group内的所有task,分别进行测试
            for task_name in tasks:
                eval_data = load_examples(
                    task_name, cross_data_dir + data_to_name[task_name] + "/" + str(model_config.k) + "-" + str(seed),
                    TEST_SET, num_examples=-1, num_examples_per_label=None)
                logger.info("Group {}: Task {} 's Test examples number: {}".format(model_config.task_name, task_name, len(eval_data)))

                logger.info("************Test Example:**************")
                logger.info("text_a={}".format(eval_data[0].text_a))
                logger.info("text_b={}".format(eval_data[0].text_b))
                logger.info("task={}".format(eval_data[0].task))
                logger.info("label={}".format(eval_data[0].label))
                logger.info("**********************************")

                # 更新当前group task的metrics:
                eval_config.metrics = METRICS.get(task_name, DEFAULT_METRICS) # cross-task group 的 metrics
                eval_result = evaluate(wrapper, eval_data, eval_config)
            # dev32_result = evaluate(wrapper, dev32_data, eval_config)

                save_predictions(os.path.join(pattern_iter_output_dir, 'eval_predictions.jsonl'), wrapper, eval_result)
                save_logits(os.path.join(pattern_iter_output_dir, 'eval_logits.txt'), eval_result['logits'])

                # save_predictions(os.path.join(pattern_iter_output_dir, 'dev32_predictions.jsonl'), wrapper, dev32_result)
                # save_logits(os.path.join(pattern_iter_output_dir, 'dev32_logits.txt'), dev32_result['logits'])

                logger.info("--- RESULT (pattern_id={}, Group={}, Task={}) ---".format(pattern_id, model_config.task_name, task_name))
                logger.info("eval_results:")
                logger.info(eval_result['scores'])
                # logger.info("dev32_results:")
                # logger.info(dev32_result['scores'])

            # results_dict['eval_set_after_training'] = eval_result['scores']
            # # results_dict['dev32_set_after_training'] = dev32_result['scores']
            # with open(os.path.join(pattern_iter_output_dir, 'results.json'), 'w') as fh:
            #     json.dump(results_dict, fh)
            #
            # for metric, value in eval_result['scores'].items():
            #     results[metric][pattern_id].append(value)
            #
            # for metric, value in dev32_result['scores'].items():
            #     dev32_results[metric][pattern_id].append(value)

            wrapper.model = None
            wrapper = None
            torch.cuda.empty_cache()
Ejemplo n.º 6
0
def train_pet(train_data: List[InputExample],
              eval_data: List[InputExample],
              dev32_data: List[InputExample],
              model_config: WrapperConfig,
              train_config: TrainConfig,
              eval_config: EvalConfig,
              pattern_ids: List[int],
              output_dir: str,
              repetitions: int = 3,
              do_train: bool = True,
              do_eval: bool = True,
              seed: int = 42
              ):
    """
    Train and evaluate a new PET model for a given task.

    :param model_config: the model configuration for each model corresponding to an individual PVP
    :param train_config: the training configuration for each model corresponding to an individual PVP
    :param eval_config: the evaluation configuration for each model corresponding to an individual PVP
    :param pattern_ids: the ids of all PVPs to use
    :param output_dir: the output directory
    :param repetitions: the number of training repetitions for each model corresponding to an individual PVP
    :param train_data: the training examples to use
    :param dev32_data: the dev32 examples to use
    :param eval_data: the evaluation examples to use
    :param do_train: whether to perform training
    :param do_eval: whether to perform evaluation
    :param seed: the random seed to use
    """

    results = defaultdict(lambda: defaultdict(list))
    dev32_results = defaultdict(lambda: defaultdict(list))
    set_seed(seed)

    for pattern_id in pattern_ids:
        for iteration in range(repetitions):

            model_config.pattern_id = pattern_id
            results_dict = {}

            pattern_iter_output_dir = "{}/p{}-i{}".format(output_dir, pattern_id, iteration)

            if os.path.exists(pattern_iter_output_dir):
                logger.warning(f"Path {pattern_iter_output_dir} already exists, skipping it...")
                continue

            if not os.path.exists(pattern_iter_output_dir):
                os.makedirs(pattern_iter_output_dir)

            wrapper = init_model(model_config)

            # Training
            if do_train:

                results_dict.update(train_single_model(train_data, eval_data, dev32_data, pattern_iter_output_dir, \
                                                       wrapper, train_config, eval_config))

                with open(os.path.join(pattern_iter_output_dir, 'results.txt'), 'w') as fh:
                    fh.write(str(results_dict))

                train_config.save(os.path.join(pattern_iter_output_dir, 'train_config.json'))
                eval_config.save(os.path.join(pattern_iter_output_dir, 'eval_config.json'))
                logger.info("Saving complete")

                if not do_eval:
                    wrapper.model = None
                    wrapper = None
                    torch.cuda.empty_cache()

            # Evaluation
            if do_eval:
                logger.info("Starting evaluation...")

                # if not wrapper:
                wrapper = TransformerModelWrapper.from_pretrained(pattern_iter_output_dir)

                eval_result = evaluate(wrapper, eval_data, eval_config)
                dev32_result = evaluate(wrapper, dev32_data, eval_config)

                save_predictions(os.path.join(pattern_iter_output_dir, 'eval_predictions.jsonl'), wrapper, eval_result)
                save_logits(os.path.join(pattern_iter_output_dir, 'eval_logits.txt'), eval_result['logits'])

                save_predictions(os.path.join(pattern_iter_output_dir, 'dev32_predictions.jsonl'), wrapper,
                                 dev32_result)
                save_logits(os.path.join(pattern_iter_output_dir, 'dev32_logits.txt'), dev32_result['logits'])

                logger.info("--- RESULT (pattern_id={}, iteration={}) ---".format(pattern_id, iteration))
                logger.info("eval_results:")
                logger.info(eval_result['scores'])
                logger.info("dev32_results:")
                logger.info(dev32_result['scores'])

                results_dict['eval_set_after_training'] = eval_result['scores']
                results_dict['dev32_set_after_training'] = dev32_result['scores']
                with open(os.path.join(pattern_iter_output_dir, 'results.json'), 'w') as fh:
                    json.dump(results_dict, fh)

                for metric, value in eval_result['scores'].items():
                    results[metric][pattern_id].append(value)

                for metric, value in dev32_result['scores'].items():
                    dev32_results[metric][pattern_id].append(value)

                wrapper.model = None
                wrapper = None
                torch.cuda.empty_cache()

    if do_eval:
        logger.info("=== OVERALL RESULTS ===")
        _write_results(os.path.join(output_dir, 'result_test.txt'), results, dev32_results)
    else:
        logger.info("=== ENSEMBLE TRAINING COMPLETE ===")