Beispiel #1
0
def main(args):
    if args.src_seq_length > args.max_position_embeddings:
        args.max_position_embeddings = args.src_seq_length
    if args.task.lower() in ['gigaword', 'blank']:
        finetune(args,
                 train_valid_datasets_provider, {},
                 end_of_epoch_callback_provider=metrics_func_provider,
                 forward_step=seq2seq_forward_step)
    else:
        raise NotImplementedError(args.task)
Beispiel #2
0
def main(args):
    if args.src_seq_length > args.max_position_embeddings:
        args.max_position_embeddings = args.src_seq_length
    if args.task.lower() in [
            'cnn_dm', 'cnn_dm_original', 'gigaword', 'blank',
            'squad_generation', 'xsum', 'squad', 'squad_v1', 'extraction',
            'cmrc'
    ]:
        finetune(args,
                 train_valid_datasets_provider, {},
                 end_of_epoch_callback_provider=metrics_func_provider,
                 forward_step=seq2seq_forward_step)
    else:
        raise NotImplementedError(args.task)
Beispiel #3
0
def main(args):
    model_kwargs = {}
    if args.task.lower(
    ) == 'wsc' and args.cloze_eval and not args.wsc_negative:
        from tasks.language_model.finetune import lm_forward_step
        finetune(args,
                 train_valid_datasets_provider,
                 model_kwargs,
                 end_of_epoch_callback_provider=metrics_func_provider,
                 forward_step=lm_forward_step)
    else:
        processor = PROCESSORS[args.task.lower()](args)
        pvp = PVPS[args.task.lower()]
        model_kwargs[
            "model_type"] = "cloze" if args.cloze_eval else "classification"
        model_kwargs["multi_token"] = pvp.is_multi_token
        model_kwargs["num_labels"] = len(processor.get_labels())
        finetune(args,
                 train_valid_datasets_provider,
                 model_kwargs,
                 end_of_epoch_callback_provider=metrics_func_provider)
Beispiel #4
0
def main(args):
    model_kwargs = {}
    processor = PROCESSORS[args.task.lower()](args)
    pvp = PVPS[args.task.lower()](args,
                                  None,
                                  processor.get_labels(),
                                  args.seq_length,
                                  pattern_id=args.pattern_id,
                                  is_multi_token=args.multi_token,
                                  num_prompt_tokens=args.num_prompt_tokens)
    if args.continuous_prompt:
        model_kwargs["spell_length"] = pvp.spell_length
    if args.task.lower(
    ) == 'wsc' and args.cloze_eval and not args.wsc_negative:
        from tasks.language_model.finetune import lm_forward_step
        finetune(args,
                 train_valid_datasets_provider,
                 model_kwargs,
                 end_of_epoch_callback_provider=metrics_func_provider,
                 forward_step=lm_forward_step)
    else:
        if args.cloze_eval:
            multi_token = pvp.is_multi_token
        else:
            multi_token = args.task.lower() in MULTI_CHOICE_DATASETS
        args.multi_token = multi_token
        if not multi_token:
            model_kwargs[
                "model_type"] = "multiple_choice" if args.cloze_eval else "classification"
            model_kwargs["multi_token"] = False
            model_kwargs["num_labels"] = len(processor.get_labels())
        else:
            model_kwargs["model_type"] = "multiple_choice"
            model_kwargs["multi_token"] = True
            model_kwargs["num_labels"] = 1
        finetune(args,
                 train_valid_datasets_provider,
                 model_kwargs,
                 end_of_epoch_callback_provider=metrics_func_provider)
Beispiel #5
0
def main(args):
    """Main program."""
    finetune(args,
             None, {},
             end_of_epoch_callback_provider=metrics_func_provider,
             forward_step=lm_forward_step)