def cli_main():
    """
    MODIFIED: task defaults to gec
    """
    parser = options.get_generation_parser(default_task='gec')
    args = options.parse_args_and_arch(parser)
    main(args)
Example #2
0
def generate_main(data_dir, extra_flags=None):
    generate_parser = options.get_generation_parser()
    generate_args = options.parse_args_and_arch(
        generate_parser,
        [
            data_dir,
            '--path',
            os.path.join(data_dir, 'checkpoint_last.pt'),
            '--beam',
            '3',
            '--batch-size',
            '64',
            '--max-len-b',
            '5',
            '--gen-subset',
            'valid',
            '--no-progress-bar',
            '--print-alignment',
        ] + (extra_flags or []),
    )

    # evaluate model in batch mode
    generate.main(generate_args)

    # evaluate model interactively
    generate_args.buffer_size = 0
    generate_args.input = '-'
    generate_args.max_sentences = None
    orig_stdin = sys.stdin
    sys.stdin = StringIO('h e l l o\n')
    interactive.main(generate_args)
    sys.stdin = orig_stdin
Example #3
0
def train_translation_model(data_dir, arch, extra_flags=None):
    train_parser = options.get_training_parser()
    train_args = options.parse_args_and_arch(
        train_parser,
        [
            '--task',
            'translation',
            data_dir,
            '--save-dir',
            data_dir,
            '--arch',
            arch,
            '--optimizer',
            'nag',
            '--lr',
            '0.05',
            '--max-tokens',
            '500',
            '--max-epoch',
            '1',
            '--no-progress-bar',
            '--distributed-world-size',
            '1',
            '--source-lang',
            'in',
            '--target-lang',
            'out',
        ] + (extra_flags or []),
    )
    train.main(train_args)
Example #4
0
def cli_main():
    parser = options.get_training_parser()
    args = options.parse_args_and_arch(parser)

    if args.distributed_init_method is None:
        distributed_utils.infer_init_method(args)

    if args.distributed_init_method is not None:
        # distributed training
        distributed_main(args.device_id, args)
    elif args.distributed_world_size > 1:
        # fallback for single node with multiple GPUs
        port = random.randint(10000, 20000)
        args.distributed_init_method = 'tcp://localhost:{port}'.format(port=port)
        args.distributed_rank = None  # set based on device id
        if max(args.update_freq) > 1 and args.ddp_backend != 'no_c10d':
            print('| NOTE: you may get better performance with: --ddp-backend=no_c10d')
        torch.multiprocessing.spawn(
            fn=distributed_main,
            args=(args, ),
            nprocs=args.distributed_world_size,
        )
    else:
        # single GPU training
        main(args)
Example #5
0
def eval_lm_main(data_dir):
    eval_lm_parser = options.get_eval_lm_parser()
    eval_lm_args = options.parse_args_and_arch(
        eval_lm_parser,
        [
            data_dir,
            '--path',
            os.path.join(data_dir, 'checkpoint_last.pt'),
            '--no-progress-bar',
        ],
    )
    eval_lm.main(eval_lm_args)
Example #6
0
def train_language_model(data_dir, arch):
    train_parser = options.get_training_parser()
    train_args = options.parse_args_and_arch(
        train_parser,
        [
            '--task',
            'language_modeling',
            data_dir,
            '--arch',
            arch,
            '--optimizer',
            'nag',
            '--lr',
            '0.1',
            '--criterion',
            'adaptive_loss',
            '--adaptive-softmax-cutoff',
            '5,10,15',
            '--decoder-layers',
            '[(850, 3)] * 2 + [(1024,4)]',
            '--decoder-embed-dim',
            '280',
            '--max-tokens',
            '500',
            '--tokens-per-sample',
            '500',
            '--save-dir',
            data_dir,
            '--max-epoch',
            '1',
            '--no-progress-bar',
            '--distributed-world-size',
            '1',
            '--ddp-backend',
            'no_c10d',
        ],
    )
    train.main(train_args)
Example #7
0
def cli_main():
    parser = options.get_eval_lm_parser()
    args = options.parse_args_and_arch(parser)
    main(args)
def cli_main():
    parser = options.get_generation_parser(interactive=True)
    args = options.parse_args_and_arch(parser)
    main(args)
Example #9
0
def cli_main():
    parser = options.get_generation_parser()
    args = options.parse_args_and_arch(parser)
    main(args)
Example #10
0
def cli_main():
    parser = get_lm_scorer_parser()
    args = options.parse_args_and_arch(parser)

    main(args)