예제 #1
0
def parse_args():
    parser = argparse.ArgumentParser(
        description="Train classification models on ImageNet",
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    models.add_model_args(parser)
    fit.add_fit_args(parser)
    data.add_data_args(parser)
    dali.add_dali_args(parser)
    data.add_data_aug_args(parser)
    return parser.parse_args()
예제 #2
0
def get_args():
    parser = ArgumentParser()

    base_group = parser.add_argument_group('base')
    base_group.add_argument('--nmr',
                            action='store_true',
                            help='determine whether use nmr data or not')
    base_group.add_argument(
        '--drop_smile',
        action='store_true',
        help='If true, do not use SMILES data for prediction')

    add_data_util_args(parser)
    add_model_args(parser)
    add_progress_args(parser)
    add_training_args(parser)
    add_learning_rate_option(parser)

    return parser.parse_args()
예제 #3
0
def get_training_args():
    parser = argparse.ArgumentParser()
    # Dynet
    dynn.command_line.add_dynet_args(parser)
    # Data
    data_group = parser.add_argument_group("Data arguments")
    data_group.add_argument("--dataset",
                            default="sst",
                            choices=["sst", "amazon"])
    data_group.add_argument("--data-dir", default="data")
    data_group.add_argument("--reprocess-data", action="store_true")
    data_group.add_argument("--lowercase", action="store_true")
    # Optimization
    optim_group = parser.add_argument_group("Optimization arguments")
    optim_group.add_argument("--batch-size", default=150, type=int)
    optim_group.add_argument("--max-tokens-per-batch", default=4000, type=int)
    optim_group.add_argument("--n-epochs", default=10, type=int)
    optim_group.add_argument("--patience", default=2, type=int)
    optim_group.add_argument("--lr", default=0.001, type=float)
    optim_group.add_argument("--lr-decay", default=0.1, type=float)
    # Model
    model_group = parser.add_argument_group("Model arguments")
    model_group.add_argument("--model-type",
                             default="sopa",
                             choices=models.supported_model_types)
    model_group.add_argument("--model-file", default="sopa_sst.npz")
    model_group.add_argument("--pretrained-embeds", default=None, type=str)
    model_group.add_argument("--freeze-embeds", action="store_true")
    model_group.add_argument("--normalize-embeds", action="store_true")
    # Misc
    misc_group = parser.add_argument_group("Miscellaneous arguments")
    model_group.add_argument("--verbose", action="store_true")
    misc_group.add_argument("--log-file", default=None, type=str)
    misc_group.add_argument("--n-explain", default=10, type=int)
    misc_group.add_argument("--n-top-contrib", default=10, type=int)
    # Parse args to get model type
    args, _ = parser.parse_known_args()
    # Add model specific arguments
    models.add_model_args(args.model_type, parser)
    # Parse again to get all arguments
    args = parser.parse_args()
    return args
예제 #4
0
def run_training_pipeline(parser):
    parser.add_argument("-f",
                        "--config",
                        default=False,
                        type=str,
                        help="Path to a YAML config file.")
    parser.add_argument(
        "--optimizer",
        default=False,
        type=str,
        help="Optimizer to be used during training.",
    )
    parser.add_argument(
        "--scheduler",
        default=False,
        type=str,
        help="LR scheduler to be used during training.",
    )
    parser.add_argument(
        "--model",
        default=False,
        type=str,
        help="The estimator architecture we we wish to use.",
    )
    args, _ = parser.parse_known_args()

    if not args.optimizer and not args.scheduler and not args.model:
        optimizer, scheduler, model = get_main_args_from_yaml(args)
    else:
        optimizer = args.optimizer
        scheduler = args.scheduler
        model = args.model

    parser = add_optimizer_args(parser, optimizer)
    parser = add_scheduler_args(parser, scheduler)
    parser = add_model_args(parser, model)
    parser = add_trainer_specific_args(parser)
    hparams = load_yaml_args(parser=parser, log=log)

    set_seed(hparams.seed)
    model = build_model(hparams)
    trainer = setup_training(hparams)

    if hparams.load_weights:
        model.load_weights(hparams.load_weights)

    log.info(f"{model.__class__.__name__} train starting:")
    trainer.fit(model)
예제 #5
0
    parser.add_argument('--patience_factor', default=2, type=int)

    parser.add_argument('--max_patience', default=64, type=int)

    parser.add_argument('--min_lr', default=1e-6, type=float)

    parser.add_argument('--threshold', default=1e-4, type=float)

    # optim.lr_scheduler.common
    parser.add_argument('--gamma', default=0.25, type=float)

    parser.add_argument('--early_stopping', default=128, type=int)

    parser.add_argument(
        '--model_summary',
        default=False,
        action='store_true',
    )

    # development
    parser.add_argument('--devrun', default=False, action='store_true')

    parser.add_argument('--nosave', default=False, action='store_true')

    tmp_args, _ = parser.parse_known_args()
    parser = add_model_args(parser, tmp_args)

    args = parser.parse_args()

    main(args)