예제 #1
0
 def add_optim_specific_args(
     parser: HyperOptArgumentParser, ) -> HyperOptArgumentParser:
     """
     Functions that parses Optimizer specific arguments and adds 
         them to the Namespace
     :param parent_parser: 
     """
     parser = super(RAdam, RAdam).add_optim_specific_args(parser)
     parser.add_argument("--b1",
                         default=0.9,
                         type=float,
                         help="Adams beta parameters (b1, b2).")
     parser.add_argument("--b2",
                         default=0.999,
                         type=float,
                         help="Adams beta parameters (b1, b2).")
     parser.add_argument("--eps",
                         default=1e-6,
                         type=float,
                         help="Adams epsilon.")
     parser.add_argument("--weight_decay",
                         default=0.0,
                         type=float,
                         help="Weight decay.")
     parser.add_argument(
         "--degenerated_to_sgd",
         default=False,
         help=
         "If this flag is on the degenerated_to_sgd RAdam parameter is set to True.",
         action="store_true",
     )
     return parser
예제 #2
0
파일: adam.py 프로젝트: Unbabel/caption
 def add_optim_specific_args(
     parser: HyperOptArgumentParser, ) -> HyperOptArgumentParser:
     """
     Functions that parses Optimizer specific arguments and adds 
         them to the Namespace
     :param parser: 
     """
     parser = super(Adam, Adam).add_optim_specific_args(parser)
     parser.add_argument("--b1",
                         default=0.9,
                         type=float,
                         help="Adams beta parameters (b1, b2).")
     parser.add_argument("--b2",
                         default=0.999,
                         type=float,
                         help="Adams beta parameters (b1, b2).")
     parser.add_argument("--eps",
                         default=1e-6,
                         type=float,
                         help="Adams epsilon.")
     parser.add_argument("--weight_decay",
                         default=0.0,
                         type=float,
                         help="Weight decay.")
     parser.add_argument(
         "--amsgrad",
         default=False,
         help=
         "Whether to use the AMSGrad variant of this algorithm from the paper:\
             'On the Convergence of Adam and Beyond'",
         action="store_true",
     )
     return parser
예제 #3
0
파일: train.py 프로젝트: agatan/vampire
 def add_model_specific_args(parent: HyperOptArgumentParser, root_dir):
     parser = HyperOptArgumentParser(strategy=parent.strategy, parents=[parent])
     parser.add_argument(
         "--data_root",
         default=os.path.join(root_dir, "livedoor-news-corpus", "text"),
     )
     parser.add_argument("--max_vocab", default=30000, type=int)
     parser.add_argument("--latent_dim", default=80, type=int)
     parser.add_argument("--encoder_num_layers", default=2, type=int)
     return parser
예제 #4
0
 def add_scheduler_specific_args(
     parser: HyperOptArgumentParser, ) -> HyperOptArgumentParser:
     """
     Functions that parses scheduler specific arguments and adds 
         them to the Namespace
     :param parser: 
     """
     parser.add_argument("--last_epoch",
                         default=-1,
                         type=int,
                         help="Scheduler last epoch step")
     return parser
예제 #5
0
파일: train.py 프로젝트: agatan/vampire
def main():
    parent_parser = HyperOptArgumentParser(strategy="grid_search", add_help=False)
    logdir = "logs"
    parent_parser.add_argument(
        "--test_tube_save_path", default=os.path.join(logdir, "test_tube_data")
    )
    parent_parser.add_argument(
        "--model_save_path", default=os.path.join(logdir, "model_weights")
    )
    parent_parser.add_argument(
        "--experiment_name", default=os.path.join(logdir, "vampire")
    )
    parser = VAMPIRE.add_model_specific_args(parent_parser, ".")
    hparams = parser.parse_args()

    model = VAMPIRE(hparams)

    exp = Experiment(
        name=hparams.experiment_name,
        save_dir=hparams.test_tube_save_path,
        autosave=False,
    )
    exp.argparse(hparams)
    exp.save()

    trainer = Trainer(experiment=exp, fast_dev_run=False)
    trainer.fit(model)
예제 #6
0
파일: adamax.py 프로젝트: Unbabel/caption
 def add_optim_specific_args(
     parser: HyperOptArgumentParser, ) -> HyperOptArgumentParser:
     """
     Functions that parses Optimizer specific arguments and adds 
         them to the Namespace
     :param parser: 
     """
     parser = super(Adamax, Adamax).add_optim_specific_args(parser)
     parser.add_argument("--b1",
                         default=0.9,
                         type=float,
                         help="Adams beta parameters (b1, b2).")
     parser.add_argument("--b2",
                         default=0.999,
                         type=float,
                         help="Adams beta parameters (b1, b2).")
     parser.add_argument("--eps",
                         default=1e-6,
                         type=float,
                         help="Adams epsilon.")
     parser.add_argument("--weight_decay",
                         default=0.0,
                         type=float,
                         help="Weight decay.")
     return parser
 def test_preprocessing_noise(self):
     dataset = MaestroDataset('/Volumes/Elements/Datasets/maestro-v2.0.0',
                              item_length=176400,
                              sampling_rate=44100,
                              mode='validation',
                              max_file_count=20,
                              shuffle_with_seed=123)
     parser = HyperOptArgumentParser(strategy='random_search',
                                     add_help=False)
     #parser = ArgumentParser()
     parser = ContrastivePredictiveSystem.add_model_specific_args(
         parser, root_dir='../')
     hparams = parser.parse_args()
     preprocessing = PreprocessingModule(hparams)
     pass
예제 #8
0
def main(force_test=False):
    mp.set_start_method("spawn", force=True)
    parser = (argparse.ArgumentParser(description='Order embedding arguments')
              if not HYPERPARAM_SEARCH else HyperOptArgumentParser(
                  strategy='grid_search'))

    utils.parse_optimizer(parser)
    parse_encoder(parser)
    args = parser.parse_args()
    print(args)
    args.n_workers = 1

    if force_test:
        args.test = True

    # Currently due to parallelism in multi-gpu training, this code performs
    # sequential hyperparameter tuning.
    # All gpus are used for every run of training in hyperparameter search.
    if HYPERPARAM_SEARCH:
        for i, hparam_trial in enumerate(
                args.trials(HYPERPARAM_SEARCH_N_TRIALS)):
            print("Running hyperparameter search trial", i)
            print(hparam_trial)
            train_loop(hparam_trial)
    else:
        train_loop(args)
예제 #9
0
 def add_optim_specific_args(
     parser: HyperOptArgumentParser, ) -> HyperOptArgumentParser:
     """
     Functions that parses Optimizer specific arguments and adds 
         them to the Namespace
     :param parser: 
     """
     parser.opt_list(
         "--learning_rate",
         default=5e-5,
         type=float,
         tunable=True,
         options=[1e-05, 3e-05, 5e-05, 8e-05, 1e-04],
         help="Optimizer learning rate.",
     )
     return parser
def test_add_to_parser():

    parser = HyperOptArgumentParser(strategy='grid_search')
    utils.add_to_parser(parser, 'test0', '4')
    utils.add_to_parser(parser, 'test1', '5')
    utils.add_to_parser(parser, 'test2', [1, 2, 3])

    namespace, _ = parser.parse_known_args(['--test0', '3'])

    # single argument
    assert namespace.test0 == '3'  # user defined arg
    assert namespace.test1 == '5'  # default arg

    # list argument
    assert namespace.test2 is None
    assert parser.opt_args['--test2'].opt_values == [1, 2, 3]
    assert parser.opt_args['--test2'].tunable
예제 #11
0
 def add_scheduler_specific_args(
     parser: HyperOptArgumentParser, ) -> HyperOptArgumentParser:
     """
     Functions that parses Optimizer specific arguments and adds 
         them to the Namespace
     :param parent_parser: 
     """
     parser = super(WarmupConstant,
                    WarmupConstant).add_scheduler_specific_args(parser)
     parser.add_argument(
         "--warmup_steps",
         type=int,
         default=1,
         help=
         "Linearly increases learning rate from 0*learning_rate to 1*learning_rate over warmup_steps.",
     )
     return parser
예제 #12
0
def get_args():
    parser = argparse.ArgumentParser()
    parser.add_argument('-s',
                        '--sweep',
                        action='store_true',
                        help='Run a hyperparameter sweep over all options')

    # DataModule args
    parser = MNISTDataModule.add_argparse_args(parser)

    # Trainer args (https://pytorch-lightning.readthedocs.io/en/latest/common/trainer.html#trainer-flags)
    parser = Trainer.add_argparse_args(parser)
    # Set some sane defaults
    for x in parser._actions:
        if x.dest == 'gpus':
            x.default = 1
        if x.dest == 'max_epochs':
            x.default = 100

    # TestTube args - hyperparam parser & slurm info
    parser = HyperOptArgumentParser(strategy='grid_search',
                                    add_help=False,
                                    parents=[parser])
    parser.add_argument('--test_tube_exp_name', default='sweep_test')
    parser.add_argument('--log_path', default='./pytorch-slurm')

    # LightningModule args (hyperparameters)
    parser = MNISTClassifier.add_model_specific_args(parser)

    args = parser.parse_args()
    return args
예제 #13
0
    def add_model_specific_args(parent_parser):
        parser = HyperOptArgumentParser(parents=[parent_parser],
                                        add_help=False)

        # want to optimize this parameter
        #parser.opt_list('--batch_size', type=int, default=16, options=[16, 10, 8], tunable=False)
        parser.opt_list('--learning_rate',
                        type=float,
                        default=0.001,
                        options=[1e-3, 1e-4, 1e-5],
                        tunable=True)
        parser.add_argument('--batch_size', type=int, default=10)
        # fixed arguments
        parser.add_argument('--output_img_freq', type=int, default=100)
        parser.add_argument('--unfreeze_epoch_no', type=int, default=0)

        parser.add_argument('--mse_loss', default=False, action='store_true')
        return parser
예제 #14
0
def get_default_parser(strategy, root_dir):

    possible_model_names = list(AVAILABLE_MODELS.keys())
    parser = HyperOptArgumentParser(strategy=strategy, add_help=False)
    add_default_args(parser,
                     root_dir,
                     possible_model_names=possible_model_names,
                     rand_seed=SEED)
    return parser
예제 #15
0
 def hparams(self):
     parser = HyperOptArgumentParser()
     # metric mode and monitor are hparams required by COMET models
     # and lightning trainer.
     parser.add_argument("--monitor", default="slot_error_rate")
     parser.add_argument("--metric_mode", default="min")
     parser = TransformerTagger.add_model_specific_args(parser)
     hparams, _ = parser.parse_known_args([])
     return hparams
예제 #16
0
def add_data_args(parser: HyperOptArgumentParser) -> HyperOptArgumentParser:
    """
    Functions that parses dataset specific arguments/hyperparameters.
    :param hparams: HyperOptArgumentParser obj.

    Returns:
        - updated parser
    """
    parser.add_argument(
        "--data_type",
        default="csv",
        type=str,
        help="The type of the file containing the training/dev/test data.",
        choices=["csv"],
    )
    parser.add_argument(
        "--train_path",
        default="data/dummy_train.csv",
        type=str,
        help="Path to the file containing the train data.",
    )
    parser.add_argument(
        "--dev_path",
        default="data/dummy_test.csv",
        type=str,
        help="Path to the file containing the dev data.",
    )
    parser.add_argument(
        "--test_path",
        default="data/dummy_test.csv",
        type=str,
        help="Path to the file containing the test data.",
    )
    parser.add_argument(
        "--loader_workers",
        default=0,
        type=int,
        help="How many subprocesses to use for data loading. 0 means that \
            the data will be loaded in the main process.",
    )
    return parser
예제 #17
0
    def add_model_specific_args(
        cls, parser: HyperOptArgumentParser
    ) -> HyperOptArgumentParser:
        """ Parser for Estimator specific arguments/hyperparameters. 
        :param parser: HyperOptArgumentParser obj

        Returns:
            - updated parser
        """
        parser.add_argument(
            "--learning_rate", default=3e-05, type=float, help="Learning rate.",
        )
        # Data Args:
        parser.add_argument(
            "--train_csv",
            default="data/train_data.csv",
            type=str,
            help="Path to the file containing the train data.",
        )
        parser.add_argument(
            "--dev_csv",
            default="data/valid_data.csv",
            type=str,
            help="Path to the file containing the dev data.",
        )
        parser.add_argument(
            "--test_csv",
            default="data/valid_data.csv",
            type=str,
            help="Path to the file containing the dev data.",
        )
        parser.add_argument(
            "--loader_workers",
            default=8,
            type=int,
            help="How many subprocesses to use for data loading. 0 means that \
                the data will be loaded in the main process.",
        )
        return parser
예제 #18
0
 def add_scheduler_specific_args(
     parser: HyperOptArgumentParser,
 ) -> HyperOptArgumentParser:
     """
     Functions that parses Optimizer specific arguments and adds 
         them to the Namespace
     :param parent_parser: 
     """
     parser = super(LinearWarmup, LinearWarmup).add_scheduler_specific_args(parser)
     parser.add_argument(
         "--warmup_steps",
         type=int,
         default=1,
         help="Linearly increases learning rate from 0 to 1 over warmup_steps.",
     )
     parser.add_argument(
         "--num_training_steps",
         type=int,
         default=sys.maxsize,
         help="Linearly decreases learning rate from 1*learning_rate to 0*learning_rate over \
             remaining t_total - warmup_steps steps.",
     )
     return parser
예제 #19
0
def load_yaml_args(parser: HyperOptArgumentParser, log):
    """ Function that load the args defined in a YAML file and replaces the values
        parsed by the HyperOptArgumentParser """
    old_args = vars(parser.parse_args())
    configs = old_args.get("config")
    if configs:
        yaml_file = yaml.load(open(configs).read(), Loader=yaml.FullLoader)
        for key, value in yaml_file.items():
            if key in old_args:
                old_args[key] = value
            else:
                raise Exception(
                    "{} argument defined in {} is not valid!".format(
                        key, configs))
    else:
        log.warning("We recommend the usage of YAML files to keep track \
            of the hyperparameter during testing and training.")
    return TTNamespace(**old_args)
예제 #20
0
def get_all_params():

    # raise error if user has other command line arguments specified
    if len(sys.argv[1:]) != 6:
        raise ValueError(
            'No command line arguments allowed other than config file names')

    def add_to_parser(parser, arg_name, value):
        if arg_name == 'expt_ids' or arg_name == 'expt_ids_to_keep':
            # treat expt_ids differently, want to parse full lists as one
            if isinstance(value, list):
                value = ';'.join(value)
            parser.add_argument('--' + arg_name, default=value)
        elif isinstance(value, list):
            parser.opt_list('--' + arg_name, options=value, tunable=True)
        else:
            parser.add_argument('--' + arg_name, default=value)

    # create parser
    parser = HyperOptArgumentParser(strategy='grid_search')
    parser.add_argument('--data_config', type=str)
    parser.add_argument('--model_config', type=str)
    parser.add_argument('--train_config', type=str)

    namespace, extra = parser.parse_known_args()

    # add arguments from all configs
    configs = [
        namespace.data_config, namespace.model_config, namespace.train_config
    ]
    for config in configs:
        config_dict = yaml.safe_load(open(config))
        for (key, value) in config_dict.items():
            add_to_parser(parser, key, value)

    return parser.parse_args()
    def add_model_specific_args(parent_parser, root_dir):  # pragma: no cover
        """
        Parameters you define here will be available to your model through self.hparams
        :param parent_parser:
        :param root_dir:
        :return:
        """
        parser = HyperOptArgumentParser(strategy=parent_parser.strategy,
                                        parents=[parent_parser])

        # param overwrites
        # parser.set_defaults(gradient_clip=5.0)

        # network params
        parser.add_argument('--in_features', default=28 * 28, type=int)
        parser.add_argument('--out_features', default=10, type=int)
        parser.add_argument(
            '--hidden_dim', default=50000,
            type=int)  # use 500 for CPU, 50000 for GPU to see speed difference
        parser.opt_list('--drop_prob',
                        default=0.2,
                        options=[0.2, 0.5],
                        type=float,
                        tunable=False)

        # data
        parser.add_argument('--data_root',
                            default=os.path.join(root_dir, 'mnist'),
                            type=str)

        # training params (opt)
        parser.opt_list('--learning_rate',
                        default=0.001 * 8,
                        type=float,
                        options=[0.0001, 0.0005, 0.001, 0.005],
                        tunable=False)
        parser.opt_list('--optimizer_name',
                        default='adam',
                        type=str,
                        options=['adam'],
                        tunable=False)

        # if using 2 nodes with 4 gpus each the batch size here (256) will be 256 / (2*8) = 16 per gpu
        parser.opt_list(
            '--batch_size',
            default=256 * 8,
            type=int,
            options=[32, 64, 128, 256],
            tunable=False,
            help=
            'batch size will be divided over all the gpus being used across all nodes'
        )
        return parser
예제 #22
0
    cluster.optimize_parallel_cluster_gpu(main,
                                          nb_trials=hyperparams.nb_hopt_trials,
                                          job_name=hyperparams.experiment_name)


if __name__ == '__main__':

    # use default args
    root_dir = os.path.dirname(os.path.realpath(__file__))
    demo_log_dir = os.path.join(root_dir, 'pt_lightning_demo_logs')

    checkpoint_dir = os.path.join(demo_log_dir, 'model_weights')
    test_tube_dir = os.path.join(demo_log_dir, 'test_tube_data')
    slurm_out_dir = os.path.join(demo_log_dir, 'slurm_scripts')

    parent_parser = HyperOptArgumentParser(strategy='grid_search',
                                           add_help=False)

    # cluster args not defined inside the model
    parent_parser.add_argument('--gpu_partition',
                               type=str,
                               help='consult your cluster manual')

    # TODO: make 1 param
    parent_parser.add_argument('--per_experiment_nb_gpus',
                               type=int,
                               default=2,
                               help='how many gpus to use in a node')
    parent_parser.add_argument('--gpus',
                               type=str,
                               default='-1',
                               help='how many gpus to use in the node')
예제 #23
0
from model import GNNModel
from test_tube import Experiment
from test_tube import HyperOptArgumentParser
from pytorch_lightning.callbacks import ModelCheckpoint
import os
from pytorch_lightning.callbacks import EarlyStopping
from pytorch_lightning.logging import TestTubeLogger
import uuid 
from datetime import datetime 
import random 




if __name__ == '__main__':
    parser = HyperOptArgumentParser()
    # runtime params 
    parser.add_argument('--dataset', type=str, choices=['cora','citeseer','pubmed','amazon'], default='cora') 
    parser.add_argument('--percentage', type=float) 
    parser.add_argument('--log-dir', type=str, default='log')
    parser.add_argument('--ckpt-name', type=str)
    parser.add_argument('--seed', type=int, default=2020) 
    parser.add_argument('--opt', default='adam', choices=['adam','lbfgs'])
    parser.add_argument("--gpus", type=str, default='0',
            help="gpu")
    parser.add_argument("--lr", type=float, default=1e-2,
            help="learning rate")
    parser.add_argument("--weight-decay", type=float, default=5e-6,
            help="Weight for L2 loss")
    parser.add_argument("--n-epochs", type=int, default=200,
            help="number of training epochs")
예제 #24
0
    def add_model_specific_args(parent_parser, root_dir):  # pragma: no cover
        """
        Parameters you define here will be available to your model through self.hparams
        """
        parser = HyperOptArgumentParser(strategy=parent_parser.strategy,
                                        parents=[parent_parser])

        # param overwrites
        # parser.set_defaults(gradient_clip=5.0)

        # network params
        parser.opt_list('--local',
                        default=3,
                        options=[3, 5, 7],
                        type=int,
                        tunable=True)
        parser.opt_list('--n_kernels',
                        default=32,
                        options=[32, 50, 100],
                        type=int,
                        tunable=True)
        parser.add_argument('-w_kernel', type=int, default=1)
        parser.opt_list('--d_model',
                        type=int,
                        default=512,
                        options=[512],
                        tunable=False)
        parser.opt_list('--d_inner',
                        type=int,
                        default=2048,
                        options=[2048],
                        tunable=False)
        parser.opt_list('--d_k',
                        type=int,
                        default=64,
                        options=[64],
                        tunable=False)
        parser.opt_list('--d_v',
                        type=int,
                        default=64,
                        options=[64],
                        tunable=False)
        parser.opt_list('--n_head',
                        type=int,
                        default=8,
                        options=[8],
                        tunable=False)
        parser.opt_list('--n_layers',
                        type=int,
                        default=6,
                        options=[6],
                        tunable=False)
        parser.opt_list('--drop_prob',
                        type=float,
                        default=0.1,
                        options=[0.1, 0.2, 0.5],
                        tunable=False)

        # arguments from dataset
        parser.add_argument('--data_name', type=str)
        parser.add_argument('--data_dir', default='.\data', type=str)

        parser.add_argument('--n_multiv', type=int)
        parser.opt_list('--window',
                        default=64,
                        type=int,
                        options=[32, 64, 128],
                        tunable=True)
        parser.opt_list('--horizon',
                        default=3,
                        type=int,
                        options=[3, 6, 12, 24],
                        tunable=True)

        # training params (opt)
        parser.opt_list('--learning_rate',
                        default=0.005,
                        type=float,
                        options=[0.0001, 0.0005, 0.001, 0.005, 0.008],
                        tunable=True)
        parser.opt_list('--optimizer_name',
                        default='adam',
                        type=str,
                        options=['adam'],
                        tunable=False)
        parser.opt_list('--criterion',
                        default='mse_loss',
                        type=str,
                        options=['l1_loss', 'mse_loss'],
                        tunable=False)

        # if using 2 nodes with 4 gpus each the batch size here (256) will be 256 / (2*8) = 16 per gpu
        parser.opt_list(
            '--batch_size',
            default=16,
            type=int,
            options=[16, 32, 64, 128, 256],
            tunable=False,
            help=
            'batch size will be divided over all the gpus being used across all nodes'
        )
        return parser
    # ------------------------
    # 4 START TRAINING
    # ------------------------
    trainer.fit(model)


if __name__ == '__main__':

    # dirs
    root_dir = os.path.dirname(os.path.realpath(__file__))
    demo_log_dir = os.path.join(root_dir, 'pt_lightning_demo_logs')
    checkpoint_dir = os.path.join(demo_log_dir, 'model_weights')
    test_tube_dir = os.path.join(demo_log_dir, 'test_tube_data')

    # although we user hyperOptParser, we are using it only as argparse right now
    parent_parser = HyperOptArgumentParser(strategy='grid_search',
                                           add_help=False)

    # gpu args
    parent_parser.add_argument('--gpus',
                               type=str,
                               default='-1',
                               help='how many gpus to use in the node.'
                               'value -1 uses all the gpus on the node')
    parent_parser.add_argument('--test_tube_save_path',
                               type=str,
                               default=test_tube_dir,
                               help='where to save logs')
    parent_parser.add_argument('--model_save_path',
                               type=str,
                               default=checkpoint_dir,
                               help='where to save model')
예제 #26
0
        save_top_k=hparams.save_top_k,
        verbose=True,
        monitor=hparams.monitor,
        period=1,
        mode=hparams.metric_mode,
    )
    trainer.checkpoint_callback = checkpoint_callback

    trainer.fit(model)


if __name__ == "__main__":

    parser = HyperOptArgumentParser(
        strategy="random_search",
        description="Minimalist BERT Classifier",
        add_help=True,
    )
    parser.add_argument("--seed", type=int, default=3, help="Training seed.")
    parser.add_argument(
        "--save_top_k",
        default=1,
        type=int,
        help=
        "The best k models according to the quantity monitored will be saved.",
    )
    # Early Stopping
    parser.add_argument("--monitor",
                        default="val_acc",
                        type=str,
                        help="Quantity to monitor.")
예제 #27
0
    def add_model_specific_args(
            cls, parser: HyperOptArgumentParser) -> HyperOptArgumentParser:
        """ Parser for Estimator specific arguments/hyperparameters. 
        :param parser: HyperOptArgumentParser obj

        Returns:
            - updated parser
        """
        parser = super(Tagger, Tagger).add_model_specific_args(parser)
        parser.add_argument(
            "--tag_set",
            type=str,
            default="L,U,T",
            help="Task tags we want to use.\
                 Note that the 'default' label should appear first",
        )
        # Loss
        parser.add_argument(
            "--loss",
            default="cross_entropy",
            type=str,
            help="Loss function to be used.",
            choices=["cross_entropy"],
        )
        parser.add_argument(
            "--class_weights",
            default="ignore",
            type=str,
            help=
            'Weights for each of the classes we want to tag (e.g: "1.0,7.0,8.0").',
        )
        ## Data args:
        parser.add_argument(
            "--data_type",
            default="csv",
            type=str,
            help="The type of the file containing the training/dev/test data.",
            choices=["csv"],
        )
        parser.add_argument(
            "--train_path",
            default="data/dummy_train.csv",
            type=str,
            help="Path to the file containing the train data.",
        )
        parser.add_argument(
            "--dev_path",
            default="data/dummy_test.csv",
            type=str,
            help="Path to the file containing the dev data.",
        )
        parser.add_argument(
            "--test_path",
            default="data/dummy_test.csv",
            type=str,
            help="Path to the file containing the test data.",
        )
        parser.add_argument(
            "--loader_workers",
            default=0,
            type=int,
            help=("How many subprocesses to use for data loading. 0 means that"
                  "the data will be loaded in the main process."),
        )
        # Metric args:
        parser.add_argument(
            "--ignore_first_title",
            default=False,
            help="When used, this flag ignores T tags in the first position.",
            action="store_true",
        )
        parser.add_argument(
            "--ignore_last_tag",
            default=False,
            help="When used, this flag ignores S tags in the last position.",
            action="store_true",
        )
        return parser
예제 #28
0
import torch
import torch.nn.functional as F
from test_tube import Experiment, HyperOptArgumentParser, HyperParamOptimizer
from torch import distributions, nn
from torch.autograd import Variable
from torch.nn import Linear
from torch.optim import Adam

from utils import ReplayBuffer, Step, np_to_var

# In paper, ant was dropped into 9 envs, but its S,A were same. No transfer
# learning yet.

exp = Experiment("meta learning shared hierarchies", save_dir="logs")

parser = HyperOptArgumentParser(strategy="random_search")
parser.opt_list(
    "--batch_size",
    default=128,
    type=int,
    tunable=True,
    options=[2**n for n in range(5, 10)],
)

args = parser.parse_args()

args.max_steps = 1000
args.subpolicy_duration = 200
args.num_policies = 10
args.max_buffer_size = 1_000_000
args.env_names = ["Ant-v2"]
예제 #29
0
        # distributed_backend="dp",
        # overfit_pct=0.01
    )

    trainer.fit(model)


if __name__ == "__main__":

    SEED = 2538
    torch.manual_seed(SEED)
    np.random.seed(SEED)

    # use default args given by lightning
    root_dir = os.getcwd()
    parent_parser = HyperOptArgumentParser(strategy="random_search",
                                           add_help=False)
    add_default_args(parent_parser, root_dir, rand_seed=SEED)
    parent_parser.add_argument(
        "--dist_backend",
        type=str,
        default="dp",
        help=
        "When using multiple GPUs set Trainer(distributed_backend=dp) (or ddp)",
    )
    # allow model to overwrite or extend args
    parser = AutoregressiveFaceVAE.add_model_specific_args(
        parent_parser, root_dir)

    hyperparams = parser.parse_args()
    print(hyperparams)
    # train model
예제 #30
0
    # define tensorflow graph
    x = tf.placeholder(dtype=tf.int32, name='x')
    y = tf.placeholder(dtype=tf.int32, name='y')
    out = x * y

    sess = tf.Session()

    # Run the tf op
    for train_step in range(0, 100):
        output = sess.run(out, feed_dict={x: hparams.x_val, y: hparams.y_val})
        exp.log({'fake_err': output})

    # save exp when we're done
    exp.save()


# set up our argparser and make the y_val tunable
parser = HyperOptArgumentParser(strategy='random_search')
parser.add_argument('--test_tube_exp_name', default='my_test')
parser.add_argument('--log_path', default='/Users/waf/Desktop/test')
parser.opt_list('--y_val', default=12, options=[1, 2, 3, 4], tunable=True)
parser.opt_list('--x_val', default=12, options=[20, 12, 30, 45], tunable=True)
hyperparams = parser.parse_args()

# optimize on 4 gpus at the same time
# each gpu will get 1 experiment with a set of hyperparams
hyperparams.optimize_parallel_gpu(train,
                                  gpu_ids=['1', '0', '3', '2'],
                                  nb_trials=4,
                                  nb_workers=4)