예제 #1
0
def argParser():
    """
    This function creates a parser object which parses all the flags from the command line
    We can access the parsed command line values using the args object returned by this function
    Usage:
        First field is the flag name.
        dest=NAME is the name to reference when using the parameter (args.NAME)
        default is the default value of the parameter
    Example:
        > python run.py --batch-size 100
        args.batch_size <-- 100
    """
    # parser = argparse.ArgumentParser()
    parser = HyperOptArgumentParser(strategy='random_search')

    # trainer arguments
    parser.add_argument("--gpu", dest="gpu", default='0', help="GPU number")
    parser.add_argument("--mode", dest="mode", default='train', help="Mode is one of 'train', 'test'")
    parser.add_argument("--encode", dest="encode", default=0, type=int, help="encode is 0 or 1, default 0")
    parser.add_argument("--ntrials", dest="ntrials", default=20, type=int, help="Number of trials to run for hyperparameter tuning")

    # model-specific arguments
    # (non-tunable)
    parser.add_argument("--model", dest="model", default="baseline_lstm", help="Name of model to use")
    parser.add_argument("--epochs", dest="epochs", type=int, default=10, help="Number of epochs to train for")
    parser.add_argument("--patience", dest="patience", type=int, default=10, help="Learning rate decay scheduler patience, number of epochs")

    # (tunable arguments)
    parser.opt_list("--batch-size", dest="batch_size", type=int, default=100, help="Size of the minibatch",
        tunable=False, options=[32, 64, 128, 256])
    parser.opt_range("--learning-rate", dest="learning_rate", type=float, default=1e-3, help="Learning rate for training",
        tunable=True, low=1e-3, high=1e-1, nb_samples=4)
    parser.opt_list("--hidden-size", dest="hidden_size", type=int, default=100, help="Dimension of hidden layers",
        tunable=False, options=[32, 64, 128, 256])
    parser.opt_list('--optimizer', dest="optimizer", type=str, default='SGD', help='Optimizer to use (default: SGD)',
        tunable=False, options=['SGD', 'Adam'])
    parser.opt_range('--weight-decay', dest="weight_decay", type=float, default=1e-5,
        help='Weight decay for L2 regularization.',
        tunable=True, low=1e-6, high=1e-1, nb_samples=10)
    parser.opt_list('--frame-freq', dest="frame_freq", type=int, default=5,
        help='Frequency for sub-sampling frames from a video',
        tunable=True, options=[10, 30, 60, 75, 100])
    # (tcn-only arguments)
    parser.opt_list('--dropout', dest="dropout", type=float, default=0.05, help='Dropout applied to layers (default: 0.05)',
        tunable=True, options=[0.05, 0.1, 0.3, 0.5, 0.7])
    parser.opt_list('--levels', dest="levels", type=int, default=8, help='# of levels for TCN (default: 8)',
        tunable=True, options=[6, 8, 10, 12])
    # LSTM only arguments
    parser.opt_list('--num_layers', dest="num_layers", type=int, default=1, help='# of layers in LSTM (default:1',
        tunable=True, options=[1, 2, 3, 4, 5])

    # program arguments (dataset and logger paths)
    parser.add_argument("--raw_data_path", dest="raw_data_path", default="/mnt/disks/disk1/raw", help="Path to raw dataset")
    parser.add_argument('--proc_data_path', dest="proc_data_path", default="/mnt/disks/disk1/processed", help="Path to processed dataset")
    parser.add_argument("--log", dest="log", default='', help="Unique log directory name under log/. If the name is empty, do not store logs")
    parser.add_argument("--checkpoint", dest="checkpoint", type=str, default="", help="Path to the .pth checkpoint file. Used to continue training from checkpoint")

    # create argparser
    args = parser.parse_args()
    return args
예제 #2
0
def get_args():
    parser = HyperOptArgumentParser()
    parser.add_argument('--work_dir',
                        metavar='DIR',
                        default="./work_dir",
                        type=str,
                        help='path to save output')
    parser.add_argument('--proj_name', type=str)
    parser.add_argument('--name', type=str)
    parser.add_argument('--gpus', type=str, default='7', help='how many gpus')
    parser.add_argument('--dist_bd',
                        type=str,
                        default='dp',
                        choices=('dp', 'ddp', 'ddp2'),
                        help='supports three options dp, ddp, ddp2')
    parser.add_argument('--use_16bit',
                        dest='use_16bit',
                        action='store_true',
                        help='if true uses 16 bit precision')
    parser.add_argument('--eval',
                        '--evaluate',
                        dest='evaluate',
                        action='store_true',
                        help='evaluate model on validation set')
    parser.add_argument('--seed', default=1, type=int)
    parser.add_argument('--load_mem', action='store_true')
    parser.add_argument('--track_grad_norm', action='store_true')

    parser = CGCNModel.add_model_specific_args(parser)
    return parser.parse_args()
예제 #3
0
def get_args():
    parser = argparse.ArgumentParser()
    parser.add_argument('-s',
                        '--sweep',
                        action='store_true',
                        help='Run a hyperparameter sweep over all options')

    # DataModule args
    parser = MNISTDataModule.add_argparse_args(parser)

    # Trainer args (https://pytorch-lightning.readthedocs.io/en/latest/common/trainer.html#trainer-flags)
    parser = Trainer.add_argparse_args(parser)
    # Set some sane defaults
    for x in parser._actions:
        if x.dest == 'gpus':
            x.default = 1
        if x.dest == 'max_epochs':
            x.default = 100

    # TestTube args - hyperparam parser & slurm info
    parser = HyperOptArgumentParser(strategy='grid_search',
                                    add_help=False,
                                    parents=[parser])
    parser.add_argument('--test_tube_exp_name', default='sweep_test')
    parser.add_argument('--log_path', default='./pytorch-slurm')

    # LightningModule args (hyperparameters)
    parser = MNISTClassifier.add_model_specific_args(parser)

    args = parser.parse_args()
    return args
예제 #4
0
def parse_args():
    def path(str):
        return os.path.abspath((os.path.expanduser(str)))

    parser = HyperOptArgumentParser()
    parser.add_argument("--dataset", type=str, default="trec_web.1-200.asrc")
    parser.add_argument("--asrc-path", type=path, default=None)
    parser.add_argument("--log-path", type=path, default="robustness_log")
    parser.add_argument("--test", action='store_true')
    parser.add_argument("--fp16", action='store_true')
    parser.add_argument("--gpu-num", type=int, default=1)
    parser.add_argument("--model",
                        type=str,
                        choices=["bert", "mp", "conv_knrm", "all"],
                        default="all")
    parser.add_argument("--exp",
                        type=str,
                        default="weight_decay",
                        choices=["dropout", "weight_decay"])
    parser.add_argument("--saved-preprocessor",
                        type=path,
                        default="preprocessor")
    parser.opt_list("--weight-decay",
                    type=float,
                    tunable=True,
                    options=[
                        0.0001, 0.001, 0.01, 0.02, 0.04, 0.06, 0.08, 0.1, 0.12,
                        0.14, 0.16, 0.18, 0.20
                    ])
    args = parser.parse_args()
    return args
예제 #5
0
def main():
    parser = HyperOptArgumentParser(
        description='Train a PyTorch Lightning model on Yest dataset',
        strategy='random_search'
    )

    parser.opt_list('--nb_layers', default=2, type=int, tunable=False, options=[2, 4, 8])
    parser.opt_range('--layer_size', default=20, type=int, tunable=False, low=10, high=200, nb_samples=10, help="size of the hidden layer")

    parser.add_argument('--model', default="model.ptl", help="path to save the model")
    parser.add_argument('--train', default="yeast_train.svm", help="path to the training data")
    parser.add_argument('--val', default="yeast_test.svm", help="path to the training data")

    hparams = parser.parse_args()
    hparams.optimize_parallel_cpu(train_main, nb_trials=20, nb_workers=8)
 def test_preprocessing_noise(self):
     dataset = MaestroDataset('/Volumes/Elements/Datasets/maestro-v2.0.0',
                              item_length=176400,
                              sampling_rate=44100,
                              mode='validation',
                              max_file_count=20,
                              shuffle_with_seed=123)
     parser = HyperOptArgumentParser(strategy='random_search',
                                     add_help=False)
     #parser = ArgumentParser()
     parser = ContrastivePredictiveSystem.add_model_specific_args(
         parser, root_dir='../')
     hparams = parser.parse_args()
     preprocessing = PreprocessingModule(hparams)
     pass
예제 #7
0
def load_yaml_args(parser: HyperOptArgumentParser, log):
    """ Function that load the args defined in a YAML file and replaces the values
        parsed by the HyperOptArgumentParser """
    old_args = vars(parser.parse_args())
    configs = old_args.get("config")
    if configs:
        yaml_file = yaml.load(open(configs).read(), Loader=yaml.FullLoader)
        for key, value in yaml_file.items():
            if key in old_args:
                old_args[key] = value
            else:
                raise Exception(
                    "{} argument defined in {} is not valid!".format(
                        key, configs))
    else:
        log.warning("We recommend the usage of YAML files to keep track \
            of the hyperparameter during testing and training.")
    return TTNamespace(**old_args)
def get_all_params(search_type='grid_search', args=None):

    # Raise error if user has other command line arguments specified (as could override configs in
    # confusing ways)
    if args is not None and len(args) != 8:
        raise ValueError(
            'No command line arguments allowed other than config file names')
    elif args is None and len(sys.argv[1:]) != 8:
        raise ValueError(
            'No command line arguments allowed other than config file names')

    # Create parser
    parser = HyperOptArgumentParser(strategy=search_type)
    parser.add_argument('--data_config', type=str)
    parser.add_argument('--model_config', type=str)
    parser.add_argument('--training_config', type=str)
    parser.add_argument('--compute_config', type=str)

    namespace, extra = parser.parse_known_args(args)

    # Add arguments from all configs
    configs = [
        namespace.data_config, namespace.model_config,
        namespace.training_config, namespace.compute_config
    ]
    for config in configs:
        config_json = commentjson.load(open(config, 'r'))
        for (key, value) in config_json.items():
            add_to_parser(parser, key, value)

    # Add save/user dirs
    parser.add_argument('--save_dir', default=get_user_dir('save'), type=str)
    parser.add_argument('--data_dir', default=get_user_dir('data'), type=str)

    # Add parameters dependent on previous inputs
    namespace, extra = parser.parse_known_args(args)
    add_dependent_params(parser, namespace)

    return parser.parse_args(args)
예제 #9
0
def get_all_params():

    # raise error if user has other command line arguments specified
    if len(sys.argv[1:]) != 6:
        raise ValueError(
            'No command line arguments allowed other than config file names')

    def add_to_parser(parser, arg_name, value):
        if arg_name == 'expt_ids' or arg_name == 'expt_ids_to_keep':
            # treat expt_ids differently, want to parse full lists as one
            if isinstance(value, list):
                value = ';'.join(value)
            parser.add_argument('--' + arg_name, default=value)
        elif isinstance(value, list):
            parser.opt_list('--' + arg_name, options=value, tunable=True)
        else:
            parser.add_argument('--' + arg_name, default=value)

    # create parser
    parser = HyperOptArgumentParser(strategy='grid_search')
    parser.add_argument('--data_config', type=str)
    parser.add_argument('--model_config', type=str)
    parser.add_argument('--train_config', type=str)

    namespace, extra = parser.parse_known_args()

    # add arguments from all configs
    configs = [
        namespace.data_config, namespace.model_config, namespace.train_config
    ]
    for config in configs:
        config_dict = yaml.safe_load(open(config))
        for (key, value) in config_dict.items():
            add_to_parser(parser, key, value)

    return parser.parse_args()
예제 #10
0
    # define tensorflow graph
    x = tf.placeholder(dtype=tf.int32, name='x')
    y = tf.placeholder(dtype=tf.int32, name='y')
    out = x * y

    sess = tf.Session()

    # Run the tf op
    for train_step in range(0, 100):
        output = sess.run(out, feed_dict={x: hparams.x_val, y: hparams.y_val})
        exp.log({'fake_err': output})

    # save exp when we're done
    exp.save()


# set up our argparser and make the y_val tunable
parser = HyperOptArgumentParser(strategy='random_search')
parser.add_argument('--test_tube_exp_name', default='my_test')
parser.add_argument('--log_path', default='/Users/waf/Desktop/test')
parser.opt_list('--y_val', default=12, options=[1, 2, 3, 4], tunable=True)
parser.opt_list('--x_val', default=12, options=[20, 12, 30, 45], tunable=True)
hyperparams = parser.parse_args()

# optimize on 4 gpus at the same time
# each gpu will get 1 experiment with a set of hyperparams
hyperparams.optimize_parallel_gpu(train,
                                  gpu_ids=['1', '0', '3', '2'],
                                  nb_trials=4,
                                  nb_workers=4)
예제 #11
0
# module purge
# module load icc/2018.1.163-GCC-6.4.0-2.28
# module load OpenMPI/2.1.2
# module load goolfc/2017b
# module load TensorFlow/1.7.0-Python-3.6.3
# MPIRUNFILE=/share/apps/software/Compiler/intel/2018.1.163-GCC-6.4.0-2.28/OpenMPI/2.1.2/bin/mpirun


if __name__ == '__main__':
    # Set up our argparser and make the y_val tunable.
    hyper_parser = HyperOptArgumentParser(strategy='random_search')
    hyper_parser.add_argument('--test_tube_exp_name', default='my_test')
    hyper_parser.add_argument('--log_path', default='.')
    hyper_parser.add_argument('--seed', default=42, type=int)
    hyper_parser = esnnparms(hyper_parser)
    hyperparams = hyper_parser.parse_args()
    print(hyperparams)
    # Enable cluster training.
    cluster = SlurmCluster(
        hyperparam_optimizer=hyperparams,
        log_path=hyperparams.log_path,
        python_cmd='python3',
        enable_log_err=True,
        enable_log_out=True
        #test_tube_exp_name=hyperparams.test_tube_exp_name
    )

    # Email results if your hpc supports it.
    cluster.notify_job_status(
        email='*****@*****.**', on_done=False, on_fail=True)
예제 #12
0
def get_args(command=None):
    # parser = argparse.ArgumentParser()
    parser = HyperOptArgumentParser(strategy="random_search", add_help=False)
    # Analysis args
    # parser.add_argument("--bert_model", default='bert-base-uncased', type=str, help="bert model name")
    parser.add_argument(
        "--bert_model",
        default="distilbert-base-uncased",
        type=str,
        help="bert model name",
    )
    parser.add_argument(
        "--emb_file",
        default="~/checkpoint/bert_vectors/",
        type=str,
        help="location of embedding file",
    )
    parser.add_argument(
        "--data_loc",
        default="~/checkpoint/dialog_metric/convai2_data/",
        type=str,
        help="location of data dump",
    )
    parser.add_argument("--data_name",
                        default="convai2",
                        type=str,
                        help="convai2/cornell_movie")
    parser.add_argument("--tok_file",
                        default="na",
                        type=str,
                        help="tokens and word dict file")
    parser.add_argument("--pca_file",
                        default="na",
                        type=str,
                        help="pca saved weights file")
    parser.opt_list(
        "--learn_down",
        default=False,
        action="store_true",
        options=[True, False],
        tunable=False,
    )
    parser.opt_list(
        "--fix_down",
        default=False,
        action="store_true",
        options=[True, False],
        tunable=False,
    )
    parser.add_argument(
        "--trained_bert_suffix",
        default="ep_10_lm",
        type=str,
        help="folder to look for trained bert",
    )
    parser.add_argument("--tc", default=False, action="store_true")
    parser.opt_list(
        "--downsample",
        default=True,
        action="store_true",
        options=[True, False],
        tunable=False,
    )
    parser.opt_list("--down_dim",
                    type=int,
                    default=300,
                    options=[100, 300, 400],
                    tunable=False)
    parser.add_argument("--load_fine_tuned", default=True, action="store_true")
    # parser.add_argument("--fine_tune_model", default="~/checkpoint/dialog_metric/cleaned/bert_lm",type=str)
    parser.add_argument(
        "--fine_tune_model",
        default="~/checkpoint/dialog_metric/cleaned/distilbert_lm",
        type=str,
    )
    # Experiment ID
    parser.add_argument("--id", default="ruber_bs", type=str)
    # Model training args
    parser.add_argument("--device", default="cuda", type=str, help="cuda/cpu")
    parser.add_argument(
        "--model",
        default="models.TransitionPredictorMaxPoolLearnedDownsample",
        type=str,
        help="full model name path",
    )
    parser.opt_list(
        "--optim",
        default="adam,lr=0.0001",
        type=str,
        help="optimizer",
        options=["adam,lr=0.001", "adam,lr=0.01", "adam,lr=0.0001"],
        tunable=False,
    )
    parser.add_argument("--epochs",
                        default=10,
                        type=int,
                        help="number of epochs")
    parser.add_argument("--margin", default=0.5, type=float, help="margin")
    parser.add_argument(
        "--train_mode",
        default="ref_score",
        type=str,
        help="ref_score/cont_score/all/nce",
    )
    parser.add_argument("--num_nce",
                        type=int,
                        default=5,
                        help="number of nce samples per scheme")
    parser.add_argument(
        "--model_save_dir",
        default="~/checkpoint/dialog_metric/",
        type=str,
        help="model save dir",
    )
    parser.add_argument(
        "--model_load_path",
        default="~/checkpoint/dialog_metric/",
        type=str,
        help="if there is a need of different load path",
    )
    parser.add_argument("--batch_size",
                        default=64,
                        type=int,
                        help="batch size")
    parser.add_argument(
        "--load_model",
        default=False,
        action="store_true",
        help="load model from previous checkpoint",
    )
    parser.add_argument(
        "--logger_dir",
        default="./",
        type=str,
        help="log directory (must be created)",
    )
    parser.add_argument("--log_interval",
                        default=100,
                        type=int,
                        help="log interval")
    parser.add_argument("--watch_model",
                        default=False,
                        action="store_true",
                        help="wandb watch model")
    parser.add_argument(
        "--vector_mode",
        default=True,
        action="store_true",
        help="if false, train with word representations",
    )
    parser.add_argument(
        "--remote_logging",
        default=False,
        action="store_true",
        help="wandb remote loggin on or off",
    )
    parser.add_argument("--wandb_project", default="dialog-metric", type=str)
    parser.add_argument("--bidirectional", default=False, action="store_true")
    parser.add_argument("--dataloader_threads", default=8, type=int)
    parser.add_argument("--exp_data_folder",
                        default="na",
                        help="exp data folder")
    parser.add_argument("--num_workers",
                        default=4,
                        type=int,
                        help="dataloader num workers")
    parser.opt_list(
        "--clip",
        default=0.5,
        type=float,
        help="gradient clipping",
        options=[0.0, 0.5, 1.0],
        tunable=False,
    )
    parser.opt_list(
        "--dropout",
        default=0.2,
        type=float,
        help="gradient clipping",
        options=[0.0, 0.2],
        tunable=False,
    )
    parser.opt_list(
        "--decoder_hidden",
        default=200,
        type=int,
        help="decoder hidden values",
        options=[100, 200, 500, 700],
        tunable=False,
    )
    parser.add_argument("--gpus",
                        type=str,
                        default="-1",
                        help="how many gpus to use in the node")
    parser.add_argument("--debug",
                        default=False,
                        action="store_true",
                        help="if true, set debug modes")
    ## Evaluation args
    parser.add_argument(
        "--corrupt_type",
        default="rand_utt",
        type=str,
        help=
        "all/word_drop/word_order/word_repeat/rand_utt/model_false/rand_back/only_semantics/only_syntax/context_corrupt",
    )
    parser.add_argument(
        "--corrupt_context_type",
        default="rand",
        type=str,
        help="rand/drop/shuffle/model_true/model_false/progress/none",
    )
    parser.add_argument("--drop_per",
                        default=0.50,
                        type=float,
                        help="drop percentage")
    parser.add_argument("--eval_val",
                        default=False,
                        action="store_true",
                        help="only eval val set")
    parser.add_argument(
        "--model_response_pre",
        default="na",
        type=str,
        help="model response file prefix",
    )
    parser.add_argument(
        "--load_model_responses",
        default=True,
        action="store_true",
        help="load model responses",
    )
    parser.add_argument(
        "--corrupt_model_names",
        default="seq2seq",
        type=str,
        help="comma separated models",
    )
    parser.add_argument(
        "--restore_version",
        default=-1,
        type=int,
        help="if > -1, restore training from the given version",
    )

    # Baseline args
    parser.add_argument("--train_baseline",
                        default="na",
                        help="ruber/bilstm",
                        type=str)
    ## RUBER
    parser.add_argument(
        "--word2vec_context_size",
        default=3,
        type=int,
        help="context size for word2vec training",
    )
    parser.add_argument("--word2vec_embedding_dim",
                        default=300,
                        type=int,
                        help="embedding dim")
    parser.add_argument("--word2vec_epochs",
                        default=100,
                        type=int,
                        help="word2vec training epochs")
    parser.add_argument(
        "--word2vec_out",
        default="~/checkpoint/dialog_metric/ruber/w2v.pt",
        type=str,
        help="word2vec output location",
    )
    parser.add_argument("--word2vec_lr",
                        default=0.001,
                        type=float,
                        help="word2vec lr")
    parser.add_argument("--word2vec_batchsize", default=512, type=int)
    parser.add_argument("--ruber_ref_pooling_type",
                        default="max_min",
                        type=str,
                        help="max_min/avg")
    parser.add_argument("--ruber_unref_pooling_type",
                        default="max",
                        type=str,
                        help="max/mean")
    parser.add_argument("--ruber_load_emb",
                        action="store_true",
                        help="load trained word2vec")
    parser.add_argument("--ruber_lstm_dim",
                        default=300,
                        type=int,
                        help="dimensions of ruber encoder")
    parser.add_argument("--ruber_mlp_dim",
                        default=200,
                        type=int,
                        help="dimensions of ruber encoder")
    parser.add_argument("--ruber_dropout",
                        default=0.1,
                        type=float,
                        help="ruber dropout")
    parser.add_argument("--num_words", default=-1, type=int)

    ## Data collection args
    parser.add_argument("--agent",
                        type=str,
                        default="kvmemnn",
                        help="repeat/ir/seq2seq")
    parser.add_argument("--mode",
                        type=str,
                        default="train",
                        help="train/test/valid")
    parser.add_argument(
        "--models",
        type=str,
        default="seq2seq,repeat",
        help="comma separated model values",
    )
    parser.add_argument("--response_file",
                        type=str,
                        default="~/Projects/online_dialog_eval/elisa_data/")
    parser.add_argument(
        "--mf",
        type=str,
        default=
        "/checkpoint/parlai/zoo/convai2/seq2seq_naacl2019_abibaseline/model",
        help="only for special cases",
    )
    parser.add_argument(
        "--only_data",
        action="store_true",
        default=False,
        help="only extract and store dialog data",
    )

    ## SLURM args
    parser.add_argument(
        "--slurm_log_path",
        type=str,
        default="~/checkpoint/dialog_metrics/ckpt/",
        help="slurm log path",
    )
    parser.add_argument("--per_experiment_nb_gpus",
                        type=int,
                        default=1,
                        help="number of gpus")
    parser.add_argument("--per_experiment_nb_cpus",
                        type=int,
                        default=16,
                        help="number of cpus")
    parser.add_argument("--nb_gpu_nodes",
                        type=int,
                        default=1,
                        help="number of gpu nodes")
    parser.add_argument("--job_time",
                        type=str,
                        default="23:59:00",
                        help="time")
    parser.add_argument("--gpu_type",
                        type=str,
                        default="volta",
                        help="gpu type")
    parser.add_argument("--gpu_partition",
                        type=str,
                        default="learnfair",
                        help="gpu type")
    parser.add_argument(
        "--nb_hopt_trials",
        type=int,
        default=1,
        help="how many grid search trials to run",
    )
    parser.add_argument("--train_per_check", type=float, default=1.0)
    parser.add_argument("--val_per_check", type=float, default=1.0)
    parser.add_argument("--test_per_check", type=float, default=1.0)
    parser.add_argument(
        "--use_cluster",
        action="store_true",
        default=False,
        help="activate cluster mode",
    )
    ## Inference args
    parser.add_argument("--model_name",
                        type=str,
                        default="na",
                        help="model name")
    parser.add_argument("--model_version",
                        type=str,
                        default="version_0",
                        help="model version")
    parser.add_argument("--use_ddp", action="store_true", default=False)
    parser.add_argument("--human_eval", action="store_true", default=False)
    parser.add_argument(
        "--human_eval_file",
        type=str,
        default="~/checkpoint/dialog_metric/controllable_dialogs.csv",
    )
    parser.add_argument("--results_file",
                        type=str,
                        default="test_results.jsonl")
    ## Corruption args
    parser.add_argument(
        "--corrupt_pre",
        type=str,
        default="~/checkpoint/dialog_metric/convai2_data/convai2_test_",
    )
    parser.add_argument("--corrupt_ne", type=int, default=1)
    parser.add_argument("--test_suffix", type=str, default="true_response")
    parser.add_argument("--test_column", type=str, default="true_response")

    if command:
        return parser.parse_args(command.split(" "))
    else:
        return parser.parse_args()
예제 #13
0
 def hparams(self):
     parser = HyperOptArgumentParser()
     parser = AdamW.add_optim_specific_args(parser)
     return parser.parse_args()
예제 #14
0
 def hparams(self):
     parser = HyperOptArgumentParser()
     parser = WarmupConstant.add_scheduler_specific_args(parser)
     return parser.parse_args()
예제 #15
0
    # Batching
    parser.add_argument("--batch_size",
                        default=6,
                        type=int,
                        help="Batch size to be used.")
    parser.add_argument(
        "--accumulate_grad_batches",
        default=2,
        type=int,
        help=("Accumulated gradients runs K small batches of size N before "
              "doing a backwards pass."),
    )

    parser.add_argument("--num_nodes", type=int, default=1)

    # gpu args
    parser.add_argument("--gpus", type=int, default=0, help="How many gpus")
    parser.add_argument(
        "--val_percent_check",
        default=1.0,
        type=float,
        help=
        ("If you don't want to use the entire dev set (for debugging or "
         "if it's huge), set how much of the dev set you want to use with this flag."
         ),
    )

    parser = BERTClassifier.add_model_specific_args(parser)
    hparams = parser.parse_args()
    main(hparams)
예제 #16
0
# In paper, ant was dropped into 9 envs, but its S,A were same. No transfer
# learning yet.

exp = Experiment("meta learning shared hierarchies", save_dir="logs")

parser = HyperOptArgumentParser(strategy="random_search")
parser.opt_list(
    "--batch_size",
    default=128,
    type=int,
    tunable=True,
    options=[2**n for n in range(5, 10)],
)

args = parser.parse_args()

args.max_steps = 1000
args.subpolicy_duration = 200
args.num_policies = 10
args.max_buffer_size = 1_000_000
args.env_names = ["Ant-v2"]

exp.argparse(args)

State = Any
Action = Any
Timestep = int


class MasterPolicy(nn.Module):
def test_add_dependent_params(tmpdir):

    # -----------------
    # ae
    # -----------------
    # arch params correctly added to parser
    parser = HyperOptArgumentParser(strategy='grid_search')
    utils.add_to_parser(parser, 'model_class', 'ae')
    utils.add_to_parser(parser, 'model_type', 'conv')
    utils.add_to_parser(parser, 'n_ae_latents', 32)
    utils.add_to_parser(parser, 'n_input_channels', 2)
    utils.add_to_parser(parser, 'y_pixels', 32)
    utils.add_to_parser(parser, 'x_pixels', 32)
    utils.add_to_parser(parser, 'ae_arch_json', None)
    utils.add_to_parser(parser, 'approx_batch_size', 200)
    utils.add_to_parser(parser, 'mem_limit_gb', 10)
    namespace, _ = parser.parse_known_args([])
    utils.add_dependent_params(parser, namespace)
    assert '--architecture_params' in parser.opt_args

    # linear autoencoder
    parser = HyperOptArgumentParser(strategy='grid_search')
    utils.add_to_parser(parser, 'model_class', 'ae')
    utils.add_to_parser(parser, 'model_type', 'linear')
    utils.add_to_parser(parser, 'n_ae_latents', 32)
    utils.add_to_parser(parser, 'n_input_channels', 2)
    utils.add_to_parser(parser, 'y_pixels', 32)
    utils.add_to_parser(parser, 'x_pixels', 32)
    utils.add_to_parser(parser, 'ae_arch_json', None)
    utils.add_to_parser(parser, 'approx_batch_size', 200)
    utils.add_to_parser(parser, 'mem_limit_gb', 10)
    namespace, _ = parser.parse_known_args([])
    utils.add_dependent_params(parser, namespace)
    print(parser)
    assert namespace.model_type == 'linear'
    assert namespace.n_latents == '32'

    # raise exception when max latents exceeded
    # parser = HyperOptArgumentParser(strategy='grid_search')
    # utils.add_to_parser(parser, 'model_class', 'ae')
    # utils.add_to_parser(parser, 'n_ae_latents', 100000)
    # utils.add_to_parser(parser, 'n_input_channels', 2)
    # utils.add_to_parser(parser, 'y_pixels', 32)
    # utils.add_to_parser(parser, 'x_pixels', 32)
    # utils.add_to_parser(parser, 'ae_arch_json', None)
    # utils.add_to_parser(parser, 'approx_batch_size', 200)
    # utils.add_to_parser(parser, 'mem_limit_gb', 10)
    # namespace, _ = parser.parse_known_args([])
    # with pytest.raises(ValueError):
    #     utils.add_dependent_params(parser, namespace)

    # -----------------
    # vae
    # -----------------
    # arch params correctly added to parser
    parser = HyperOptArgumentParser(strategy='grid_search')
    utils.add_to_parser(parser, 'model_class', 'vae')
    utils.add_to_parser(parser, 'model_type', 'conv')
    utils.add_to_parser(parser, 'n_ae_latents', 32)
    utils.add_to_parser(parser, 'n_input_channels', 2)
    utils.add_to_parser(parser, 'y_pixels', 32)
    utils.add_to_parser(parser, 'x_pixels', 32)
    utils.add_to_parser(parser, 'ae_arch_json', None)
    utils.add_to_parser(parser, 'approx_batch_size', 200)
    utils.add_to_parser(parser, 'mem_limit_gb', 10)
    namespace, _ = parser.parse_known_args([])
    utils.add_dependent_params(parser, namespace)
    assert '--architecture_params' in parser.opt_args

    # -----------------
    # neural
    # -----------------
    # make tmp hdf5 file
    path = tmpdir.join('data.hdf5')
    idx_data = {
        'i0': np.array([0, 1, 2]),
        'i1': np.array([3, 4, 5]),
        'i2': np.array([6, 7, 8])
    }
    with h5py.File(path, 'w') as f:
        group0 = f.create_group('regions')
        # groupa = f.create_group('neural')
        group1 = group0.create_group('indxs')
        group1.create_dataset('i0', data=idx_data['i0'])
        group1.create_dataset('i1', data=idx_data['i1'])
        group1.create_dataset('i2', data=idx_data['i2'])

    # subsample idxs not added to parser when not requested
    parser = HyperOptArgumentParser(strategy='grid_search')
    utils.add_to_parser(parser, 'model_class', 'neural-ae')
    utils.add_to_parser(parser, 'subsample_method', 'none')
    namespace, _ = parser.parse_known_args([])
    utils.add_dependent_params(parser, namespace)
    assert '--subsample_idxs_name' not in parser.opt_args

    # subsample idxs added to parser when requested (all datasets)
    parser = HyperOptArgumentParser(strategy='grid_search')
    utils.add_to_parser(parser, 'data_dir', tmpdir)
    utils.add_to_parser(parser, 'lab', '')
    utils.add_to_parser(parser, 'expt', '')
    utils.add_to_parser(parser, 'animal', '')
    utils.add_to_parser(parser, 'session', '')
    utils.add_to_parser(parser, 'model_class', 'neural-ae')
    utils.add_to_parser(parser, 'subsample_method', 'single')
    utils.add_to_parser(parser, 'subsample_idxs_dataset', 'all')
    namespace, _ = parser.parse_known_args([])
    utils.add_dependent_params(parser, namespace)
    assert '--subsample_idxs_name' in parser.opt_args
    parser_vals = parser.opt_args['--subsample_idxs_name'].opt_values.keys()
    assert sorted(['i0', 'i1', 'i2']) == sorted(parser_vals)

    # subsample idxs added to parser when requested (single dataset)
    parser = HyperOptArgumentParser(strategy='grid_search')
    utils.add_to_parser(parser, 'data_dir', tmpdir)
    utils.add_to_parser(parser, 'lab', '')
    utils.add_to_parser(parser, 'expt', '')
    utils.add_to_parser(parser, 'animal', '')
    utils.add_to_parser(parser, 'session', '')
    utils.add_to_parser(parser, 'model_class', 'neural-ae')
    utils.add_to_parser(parser, 'subsample_method', 'single')
    utils.add_to_parser(parser, 'subsample_idxs_dataset', 'i0')
    namespace, _ = parser.parse_known_args([])
    utils.add_dependent_params(parser, namespace)
    parser.parse_args([])
    assert parser.parsed_args['subsample_idxs_name'] == 'i0'

    # raise exception when dataset is not a string
    parser = HyperOptArgumentParser(strategy='grid_search')
    utils.add_to_parser(parser, 'model_class', 'neural-ae')
    utils.add_to_parser(parser, 'subsample_method', 'single')
    utils.add_to_parser(parser, 'subsample_idxs_dataset', ['i0', 'i1'])
    namespace, _ = parser.parse_known_args([])
    with pytest.raises(ValueError):
        utils.add_dependent_params(parser, namespace)