示例#1
0
def train_single_model(train_data: List[InputExample],
                       eval_data: List[InputExample],
                       dev32_data: List[InputExample],
                       pattern_iter_output_dir: str,
                       model: TransformerModelWrapper,
                       config: TrainConfig,
                       eval_config: EvalConfig,
                       use_debias: bool = False):
    """
    Train a single model.
    :param model: the model to train
    :param train_data: the training examples to use
    :param config: the training config
    :param eval_config: the evaluation config
    :return: a dictionary containing the global step, average loss and (optionally) results on the train set
    """

    results_dict = {}

    # results_dict['train_set_before_training'] = evaluate(model, train_data, eval_config)['scores']['acc']

    if not train_data:
        logger.warning('Training method was called without training examples')
    else:
        global_step, tr_loss = model.train(
            pattern_iter_output_dir=pattern_iter_output_dir,
            eval_config=eval_config,
            train_data=train_data,
            dev32_data=dev32_data,
            eval_data=eval_data,
            per_gpu_train_batch_size=config.per_gpu_train_batch_size,
            n_gpu=config.n_gpu,
            num_train_epochs=config.num_train_epochs,
            max_steps=config.max_steps,
            gradient_accumulation_steps=config.gradient_accumulation_steps,
            weight_decay=config.weight_decay,
            learning_rate=config.learning_rate,
            adam_epsilon=config.adam_epsilon,
            warmup_steps=config.warmup_steps,
            max_grad_norm=config.max_grad_norm,
            alpha=config.alpha,
            use_debias=use_debias
        )
        results_dict['global_step'] = global_step
        results_dict['average_loss'] = tr_loss

    # 加载训练好的模型
    # model = TransformerModelWrapper.from_pretrained(pattern_iter_output_dir)
    # results_dict['train_set_after_training'] = evaluate(model, train_data, eval_config)['scores']['acc']
    return results_dict
示例#2
0
def train_single_model(
    model: TransformerModelWrapper,
    train_data: List[InputExample],
    config: TrainConfig,
    output_dir,
    dev_data: List[InputExample] = None,
    eval_config: EvalConfig = None,
    ipet_train_data: List[InputExample] = None,
    unlabeled_data: List[InputExample] = None,
    return_train_set_results: bool = True,
    local_rank=-1,
):
    """
    Train a single model.

    :param model: the model to train
    :param train_data: the training examples to use
    :param config: the training config
    :param eval_config: the evaluation config
    :param ipet_train_data: an optional list of iPET training examples to use
    :param unlabeled_data: an optional list of unlabeled examples to use
    :param return_train_set_results: whether results on the train set before and after training should be computed and
           returned
    :return: a dictionary containing the global step, average loss and (optionally) results on the train set
    """

    device = torch.device(config.device if config.device else "cuda" if torch.
                          cuda.is_available() else "cpu")
    if not ipet_train_data:
        ipet_train_data = []

    results_dict = {}

    model.model.to(device)

    if train_data and return_train_set_results:
        results_dict["train_set_before_training"] = evaluate(
            model, train_data, eval_config,
            local_rank=local_rank)["scores"]["acc"]

    all_train_data = train_data + ipet_train_data

    if dev_data is not None and eval_config is not None:
        eval_kwargs = {
            "eval_data": dev_data,
            "device": device,
            "per_gpu_eval_batch_size": eval_config.per_gpu_eval_batch_size,
            "n_gpu": eval_config.n_gpu,
            "decoding_strategy": eval_config.decoding_strategy,
            "priming": eval_config.priming,
            "local_rank": local_rank,
            "metrics": eval_config.metrics,
        }
    else:
        eval_kwargs = None

    if not all_train_data and not config.use_logits:
        logger.warning("Training method was called without training examples")
    else:
        global_step, tr_loss = model.train(
            all_train_data,
            device,
            per_gpu_train_batch_size=config.per_gpu_train_batch_size,
            per_gpu_unlabeled_batch_size=config.per_gpu_unlabeled_batch_size,
            n_gpu=config.n_gpu,
            num_train_epochs=config.num_train_epochs,
            max_steps=config.max_steps,
            min_steps=config.min_steps,
            gradient_accumulation_steps=config.gradient_accumulation_steps,
            weight_decay=config.weight_decay,
            learning_rate=config.learning_rate,
            adam_epsilon=config.adam_epsilon,
            warmup_steps=config.warmup_steps,
            max_grad_norm=config.max_grad_norm,
            logging_steps=config.logging_steps,
            logging_number=config.logging_number,
            unlabeled_data=unlabeled_data
            if config.lm_training or config.use_logits else None,
            lm_training=config.lm_training,
            use_logits=config.use_logits,
            alpha=config.alpha,
            temperature=config.temperature,
            output_dir=output_dir,
            eval_kwargs=eval_kwargs,
            local_rank=local_rank,
        )
        results_dict["global_step"] = global_step
        results_dict["average_loss"] = tr_loss

    if train_data and return_train_set_results:
        results_dict["train_set_after_training"] = evaluate(
            model, train_data, eval_config,
            local_rank=local_rank)["scores"]["acc"]

    return results_dict
示例#3
0
文件: modeling.py 项目: dwright37/pet
def train_single_model(model: TransformerModelWrapper,
                       train_data: List[InputExample],
                       config: TrainConfig,
                       eval_config: EvalConfig = None,
                       ipet_train_data: List[InputExample] = None,
                       unlabeled_data: List[InputExample] = None,
                       return_train_set_results: bool = True):
    """
    Train a single model.

    :param model: the model to train
    :param train_data: the training examples to use
    :param config: the training config
    :param eval_config: the evaluation config
    :param ipet_train_data: an optional list of iPET training examples to use
    :param unlabeled_data: an optional list of unlabeled examples to use
    :param return_train_set_results: whether results on the train set before and after training should be computed and
           returned
    :return: a dictionary containing the global step, average loss and (optionally) results on the train set
    """

    device = torch.device(config.device if config.device else "cuda" if torch.
                          cuda.is_available() else "cpu")
    if not ipet_train_data:
        ipet_train_data = []

    results_dict = {}

    model.model.to(device)

    if train_data and return_train_set_results:
        results_dict['train_set_before_training'] = evaluate(
            model, train_data, eval_config)['scores']['acc']

    all_train_data = train_data + ipet_train_data

    if not all_train_data and not config.use_logits:
        logger.warning('Training method was called without training examples')
    else:
        global_step, tr_loss = model.train(
            all_train_data,
            device,
            per_gpu_train_batch_size=config.per_gpu_train_batch_size,
            per_gpu_unlabeled_batch_size=config.per_gpu_unlabeled_batch_size,
            n_gpu=config.n_gpu,
            num_train_epochs=config.num_train_epochs,
            max_steps=config.max_steps,
            gradient_accumulation_steps=config.gradient_accumulation_steps,
            weight_decay=config.weight_decay,
            learning_rate=config.learning_rate,
            adam_epsilon=config.adam_epsilon,
            warmup_steps=config.warmup_steps,
            max_grad_norm=config.max_grad_norm,
            unlabeled_data=unlabeled_data
            if config.lm_training or config.use_logits else None,
            lm_training=config.lm_training,
            use_logits=config.use_logits,
            alpha=config.alpha,
            temperature=config.temperature,
            mlm_logits=config.mlm_logits)
        results_dict['global_step'] = global_step
        results_dict['average_loss'] = tr_loss

    if train_data and return_train_set_results:
        results_dict['train_set_after_training'] = evaluate(
            model, train_data, eval_config)['scores']['acc']

    return results_dict