Example #1
0
def parse():
    parser = argparse.ArgumentParser()
    parser.add_argument('-gpu', type=int, default=None, help="Gpu to use")
    # OPTIMIZATION PARAMETERS
    opt_group = parser.add_argument_group('OPTIMIZATION PARAMETERS')
    opt_group.add_argument('-epochs', type=int, default=5000)
    opt_group.add_argument('-lr',
                           type=float,
                           default=0.001,
                           help='Step size for gradient descent.')
    opt_group.add_argument(
        '-eval_metric',
        type=str,
        default='loop_dev_loss',
        help='Metric for model selection and early stopping.')
    opt_group.add_argument(
        '-patience',
        type=int,
        default=5,
        help=
        'How many epochs to allow for no improvement in eval metric before early stopping.'
    )
    opt_group.add_argument(
        '-warmup',
        type=int,
        default=0,
        help='Number of epochs to wait before enacting early stopping policy.')
    opt_group.add_argument(
        '-skip_eval_sim',
        action='store_true',
        help='Whether to run simulator during evaluation phase of training.')

    #################
    # DATA PARAMETERS
    data_group = parser.add_argument_group('DATA PARAMETERS')
    data_group.add_argument(
        '-nsteps',
        type=int,
        default=32,
        help='Number of steps for open loop during training.')
    data_group.add_argument('-system',
                            type=str,
                            default='Reno_ROM40',
                            choices=list(systems.keys()),
                            help='select particular dataset with keyword')
    data_group.add_argument(
        '-nsim',
        type=int,
        default=10000,
        help='Number of time steps for full dataset. (ntrain + ndev + ntest)'
        'train, dev, and test will be split evenly from contiguous, sequential, '
        'non-overlapping chunks of nsim datapoints, e.g. first nsim/3 art train,'
        'next nsim/3 are dev and next nsim/3 simulation steps are test points.'
        'None will use a default nsim from the selected dataset or emulator')
    data_group.add_argument('-norm',
                            nargs='+',
                            default=['U', 'D', 'Y'],
                            choices=['U', 'D', 'Y'],
                            help='List of sequences to max-min normalize')
    data_group.add_argument(
        '-batch_type',
        default='batch',
        choices=['mh', 'batch'],
        help='option for creating batches of time series data')

    ##################
    # MODEL PARAMETERS
    model_group = parser.add_argument_group('MODEL PARAMETERS')
    model_group.add_argument('-ssm_type',
                             type=str,
                             choices=['blackbox', 'block'],
                             default='blackbox')
    model_group.add_argument(
        '-residual',
        type=int,
        choices=[0, 1],
        default=0,
        help=
        'Whether to model state space output as residual from previous state')
    model_group.add_argument('-xoe',
                             type=str,
                             choices=[k for k in operators],
                             default='addmul',
                             help='Block aggregation operator for fX and fE')
    model_group.add_argument('-xou',
                             type=str,
                             choices=[k for k in operators],
                             default='addmul',
                             help='Block aggregation operator for fX and fU')
    model_group.add_argument('-xod',
                             type=str,
                             choices=[k for k in operators],
                             default='addmul',
                             help='Block aggregation operator for fX and fD')
    model_group.add_argument('-xmin',
                             type=float,
                             default=-0.2,
                             help='Constraint on minimum state value')
    model_group.add_argument('-xmax',
                             type=float,
                             default=1.2,
                             help='Constraint on maximum state value')
    model_group.add_argument(
        '-dxudmin',
        type=float,
        default=-0.05,
        help='Constraint on contribution of U and D to state')
    model_group.add_argument(
        '-dxudmax',
        type=float,
        default=0.05,
        help='Constraint on contribution of U and D to state')
    model_group.add_argument(
        '-koopman',
        type=int,
        default=0,
        help=
        'Whether to enforce regularization so that fy is inverse of state estimator'
    )

    ##################
    # fxud PARAMETERS
    fxud_group = parser.add_argument_group('fxud PARAMETERS')
    fxud_group.add_argument('-fxud',
                            type=str,
                            default='mlp',
                            choices=[k for k in blocks],
                            help='Main transition dynamics block type.')
    fxud_group.add_argument('-fxud_hidden',
                            type=int,
                            default=20,
                            help='fxud hidden state dimension.')
    fxud_group.add_argument(
        '-fxud_layers',
        type=int,
        default=2,
        help='Number of hidden layers of single time-step state transition')
    fxud_group.add_argument('-fxud_map',
                            type=str,
                            choices=[k for k in maps],
                            default='linear',
                            help='Linear map fxud uses as subcomponents')
    fxud_group.add_argument('-fxud_bias',
                            action='store_true',
                            help='Whether to use bias in the fxud network.')
    fxud_group.add_argument('-fxud_act',
                            choices=[k for k in activations],
                            default='softexp',
                            help='Activation function for fxud network')
    fxud_group.add_argument('-fxud_sigma_min', type=float, default=0.1)
    fxud_group.add_argument('-fxud_sigma_max', type=float, default=1.0)

    ##################
    # FX PARAMETERS
    fx_group = parser.add_argument_group('FX PARAMETERS')
    fx_group.add_argument('-fx',
                          type=str,
                          default='mlp',
                          choices=[k for k in blocks],
                          help='Main transition dynamics block type.')
    fx_group.add_argument('-fx_hidden',
                          type=int,
                          default=20,
                          help='fx hidden state dimension.')
    fx_group.add_argument(
        '-fx_layers',
        type=int,
        default=2,
        help='Number of hidden layers of single time-step state transition')
    fx_group.add_argument('-fx_map',
                          type=str,
                          choices=[k for k in maps],
                          default='linear',
                          help='Linear map fx uses as subcomponents')
    fx_group.add_argument('-fx_bias',
                          action='store_true',
                          help='Whether to use bias in the fx network.')
    fx_group.add_argument('-fx_act',
                          choices=[k for k in activations],
                          default='softexp',
                          help='Activation function for fx network')
    fx_group.add_argument('-fx_sigma_min', type=float, default=0.1)
    fx_group.add_argument('-fx_sigma_max', type=float, default=1.0)

    ##################
    # FU PARAMETERS
    fu_group = parser.add_argument_group('fu PARAMETERS')
    fu_group.add_argument('-fu',
                          type=str,
                          default='mlp',
                          choices=[k for k in blocks],
                          help='Main transition dynamics block type.')
    fu_group.add_argument('-fu_hidden',
                          type=int,
                          default=20,
                          help='fu hidden state dimension.')
    fu_group.add_argument(
        '-fu_layers',
        type=int,
        default=2,
        help='Number of hidden layers of single time-step state transition')
    fu_group.add_argument('-fu_map',
                          type=str,
                          choices=[k for k in maps],
                          default='linear',
                          help='Linear map fu uses as subcomponents')
    fu_group.add_argument('-fu_bias',
                          action='store_true',
                          help='Whether to use bias in the fu network.')
    fu_group.add_argument('-fu_act',
                          choices=[k for k in activations],
                          default='softexp',
                          help='Activation function for fu network')
    fu_group.add_argument('-fu_sigma_min', type=float, default=0.1)
    fu_group.add_argument('-fu_sigma_max', type=float, default=1.0)

    ##################
    # fd PARAMETERS
    fd_group = parser.add_argument_group('fd PARAMETERS')
    fd_group.add_argument('-fd',
                          type=str,
                          default='mlp',
                          choices=[k for k in blocks],
                          help='Main transition dynamics block type.')
    fd_group.add_argument('-fd_hidden',
                          type=int,
                          default=20,
                          help='fd hidden state dimension.')
    fd_group.add_argument('-fd_layers',
                          type=int,
                          default=2,
                          help='Number of hidden layers for fd')
    fd_group.add_argument('-fd_map',
                          type=str,
                          choices=[k for k in maps],
                          default='linearor',
                          help='Linear map fd uses as subcomponents')
    fd_group.add_argument('-fd_bias',
                          action='store_true',
                          help='Whether to use bias in the fd network.')
    fd_group.add_argument('-fd_act',
                          choices=[k for k in activations],
                          default='softexp',
                          help='Activation fdnction for fd network')
    fd_group.add_argument('-fd_sigma_min', type=float, default=0.1)
    fd_group.add_argument('-fd_sigma_max', type=float, default=1.0)

    ##################
    # fe PARAMETERS
    fe_group = parser.add_argument_group('fe PARAMETERS')
    fe_group.add_argument('-fe',
                          type=str,
                          default=None,
                          choices=[k for k in blocks],
                          help='Main transition dynamics block type.')
    fe_group.add_argument('-fe_hidden',
                          type=int,
                          default=20,
                          help='fe hidden state dimension.')
    fe_group.add_argument('-fe_layers',
                          type=int,
                          default=2,
                          help='Number of hidden layers for fe')
    fe_group.add_argument('-fe_map',
                          type=str,
                          choices=[k for k in maps],
                          default='linear',
                          help='Linear map fe uses as subcomponents')
    fe_group.add_argument('-fe_bias',
                          action='store_true',
                          help='Whether to use bias in the fe network.')
    fe_group.add_argument('-fe_act',
                          choices=[k for k in activations],
                          default='softexp',
                          help='Activation function for fe network')
    fe_group.add_argument('-fe_sigma_min', type=float, default=0.1)
    fe_group.add_argument('-fe_sigma_max', type=float, default=1.0)

    ##################
    # fy PARAMETERS
    fy_group = parser.add_argument_group('fy PARAMETERS')
    fy_group.add_argument('-fy',
                          type=str,
                          default='mlp',
                          choices=[k for k in blocks],
                          help='Main transition dynamics block type.')
    fy_group.add_argument('-fy_hidden',
                          type=int,
                          default=20,
                          help='fy hidden state dimension.')
    fy_group.add_argument('-fy_layers',
                          type=int,
                          default=2,
                          help='Number of hidden layers for fy')
    fy_group.add_argument('-fy_map',
                          type=str,
                          choices=[k for k in maps],
                          default='linear',
                          help='Linear map fy uses as subcomponents')
    fy_group.add_argument('-fy_bias',
                          action='store_true',
                          help='Whether to use bias in the fy network.')
    fy_group.add_argument('-fy_act',
                          choices=[k for k in activations],
                          default='softexp',
                          help='Activation function for fy network')
    fy_group.add_argument('-fy_sigma_min', type=float, default=0.1)
    fy_group.add_argument('-fy_sigma_max', type=float, default=1.0)

    ##################
    # STATE ESTIMATOR PARAMETERS
    est_group = parser.add_argument_group('STATE ESTIMATOR PARAMETERS')
    est_group.add_argument('-est',
                           type=str,
                           choices=[k for k in estimators],
                           default='mlp')
    est_group.add_argument('-est_keys',
                           nargs='+',
                           default=['Yp'],
                           help='Keys defining input to the state estimator.')
    est_group.add_argument(
        '-est_input_window',
        type=int,
        default=1,
        help=
        "Number of previous time steps measurements to include in state estimator input"
    )
    est_group.add_argument('-est_hidden',
                           type=int,
                           default=20,
                           help='estimator hidden state dimension.')
    est_group.add_argument(
        '-est_layers',
        type=int,
        default=2,
        help='Number of hidden layers for state estimator network')
    est_group.add_argument(
        '-est_map',
        type=str,
        choices=[k for k in maps],
        default='linear',
        help='Linear map state estimator uses as subcomponents')
    est_group.add_argument(
        '-est_bias',
        action='store_true',
        help='Whether to use bias in the state estimator network.')
    est_group.add_argument(
        '-est_act',
        choices=[k for k in activations],
        default='softexp',
        help='Activation function for state estimator network')
    est_group.add_argument('-est_sigma_min', type=float, default=0.1)
    est_group.add_argument('-est_sigma_max', type=float, default=1.0)

    ##################
    # Weight PARAMETERS
    weight_group = parser.add_argument_group('WEIGHT PARAMETERS')
    weight_group.add_argument('-Q_con_x',
                              type=float,
                              default=1.0,
                              help='Hidden state constraints penalty weight.')
    weight_group.add_argument(
        '-Q_dx',
        type=float,
        default=0.2,
        help='Penalty weight on hidden state difference in one time step.')
    weight_group.add_argument('-Q_sub',
                              type=float,
                              default=0.2,
                              help='Linear maps regularization weight.')
    weight_group.add_argument('-Q_y',
                              type=float,
                              default=1.0,
                              help='Output tracking penalty weight')
    weight_group.add_argument(
        '-Q_e',
        type=float,
        default=1.0,
        help='State estimator hidden prediction penalty weight')
    weight_group.add_argument(
        '-Q_con_fdu',
        type=float,
        default=0.0,
        help='Penalty weight on control actions and disturbances.')

    ####################
    # LOGGING PARAMETERS
    log_group = parser.add_argument_group('LOGGING PARAMETERS')
    log_group.add_argument(
        '-savedir',
        type=str,
        default='test',
        help="Where should your trained model and plots be saved (temp)")
    log_group.add_argument('-verbosity',
                           type=int,
                           default=1,
                           help="How many epochs in between status updates")
    log_group.add_argument(
        '-exp',
        type=str,
        default='test',
        help='Will group all run under this experiment name.')
    log_group.add_argument(
        '-location',
        type=str,
        default='mlruns',
        help='Where to write mlflow experiment tracking stuff')
    log_group.add_argument(
        '-run',
        type=str,
        default='neuromancer',
        help='Some name to tell what the experiment run was about.')
    log_group.add_argument('-logger',
                           type=str,
                           choices=['mlflow', 'stdout'],
                           default='stdout',
                           help='Logging setup to use')
    log_group.add_argument(
        '-train_visuals',
        action='store_true',
        help='Whether to create visuals, e.g. animations during training loop')
    log_group.add_argument(
        '-trace_movie',
        action='store_true',
        help='Whether to plot an animation of the simulated and true dynamics')
    return parser
Example #2
0
def get_parser(parser=None):
    if parser is None:
        parser = get_base_parser()

    # optimization parameters
    opt_group = parser.add_argument_group("OPTIMIZATION PARAMETERS")
    opt_group.add_argument("-epochs", type=int, default=100)
    opt_group.add_argument("-lr",
                           type=float,
                           default=0.001,
                           help="Step size for gradient descent.")
    opt_group.add_argument(
        "-eval_metric",
        type=str,
        default="loop_dev_ref_loss",
        help="Metric for model selection and early stopping.",
    )
    opt_group.add_argument(
        "-patience",
        type=int,
        default=30,
        help=
        "How many epochs to allow for no improvement in eval metric before early stopping.",
    )
    opt_group.add_argument(
        "-warmup",
        type=int,
        default=100,
        help="Number of epochs to wait before enacting early stopping policy.",
    )
    opt_group.add_argument(
        "-skip_eval_sim",
        action="store_true",
        help="Whether to run simulator during evaluation phase of training.",
    )

    # data parameters
    data_group = parser.add_argument_group("DATA PARAMETERS")
    data_group.add_argument(
        "-system",
        type=str,
        default='Reno_full',
        choices=list(systems.keys()),
        help="select particular dataset with keyword",
    )
    data_group.add_argument(
        "-nsim",
        type=int,
        default=18000,
        help="Number of time steps for full dataset. (ntrain + ndev + ntest) "
        "train, dev, and test will be split evenly from contiguous, sequential, "
        "non-overlapping chunks of nsim datapoints, e.g. first nsim/3 art train, "
        "next nsim/3 are dev and next nsim/3 simulation steps are test points. "
        "None will use a default nsim from the selected dataset or emulator",
    )
    data_group.add_argument(
        "-nsteps",
        type=int,
        default=8,
        help="Number of steps for open loop during training.",
    )
    data_group.add_argument(
        "-norm",
        nargs="+",
        default=["U", "D", "Y"],
        choices=["U", "D", "Y"],
        help="List of sequences to max-min normalize",
    )
    data_group.add_argument("-data_seed",
                            type=int,
                            default=408,
                            help="Random seed used for simulated data")

    # model parameters
    model_group = parser.add_argument_group("MODEL PARAMETERS")
    model_group.add_argument(
        "-ssm_type",
        type=str,
        choices=["blackbox", "hw", "hammerstein", "blocknlin", "linear"],
        default="hammerstein",
    )
    model_group.add_argument("-nx_hidden",
                             type=int,
                             default=6,
                             help="Number of hidden states per output")
    model_group.add_argument(
        "-n_layers",
        type=int,
        default=2,
        help="Number of hidden layers of single time-step state transition",
    )
    model_group.add_argument(
        "-state_estimator",
        type=str,
        choices=["rnn", "mlp", "linear", "residual_mlp", "fully_observable"],
        default="mlp",
    )
    model_group.add_argument(
        "-estimator_input_window",
        type=int,
        default=8,
        help=
        "Number of previous time steps measurements to include in state estimator input",
    )
    model_group.add_argument(
        "-nonlinear_map",
        type=str,
        default="mlp",
        choices=["mlp", "rnn", "pytorch_rnn", "linear", "residual_mlp"],
    )
    model_group.add_argument(
        "-bias",
        action="store_true",
        help="Whether to use bias in the neural network models.",
    )
    model_group.add_argument(
        "-activation",
        choices=activations.keys(),
        default="gelu",
        help="Activation function for neural networks",
    )
    model_group.add_argument(
        "-seed",
        type=int,
        default=408,
        help="Random seed used for weight initialization.")

    # linear parameters
    linear_group = parser.add_argument_group("LINEAR PARAMETERS")
    linear_group.add_argument("-linear_map",
                              type=str,
                              choices=list(slim.maps.keys()),
                              default="linear")
    linear_group.add_argument("-sigma_min", type=float, default=0.9)
    linear_group.add_argument("-sigma_max", type=float, default=1.0)

    # weight parameters
    weight_group = parser.add_argument_group("WEIGHT PARAMETERS")
    weight_group.add_argument(
        "-Q_con_x",
        type=float,
        default=1.0,
        help="Hidden state constraints penalty weight.",
    )
    weight_group.add_argument(
        "-Q_dx",
        type=float,
        default=1.0,
        help="Penalty weight on hidden state difference in one time step.",
    )
    weight_group.add_argument("-Q_sub",
                              type=float,
                              default=0.1,
                              help="Linear maps regularization weight.")
    weight_group.add_argument("-Q_y",
                              type=float,
                              default=1.0,
                              help="Output tracking penalty weight")
    weight_group.add_argument(
        "-Q_e",
        type=float,
        default=1.0,
        help="State estimator hidden prediction penalty weight",
    )
    weight_group.add_argument(
        "-Q_con_fdu",
        type=float,
        default=0.0,
        help="Penalty weight on control actions and disturbances.",
    )

    return parser
Example #3
0
def parse():
    parser = argparse.ArgumentParser()
    parser.add_argument('-gpu', type=int, default=None,
                        help="Gpu to use")
    # OPTIMIZATION PARAMETERS
    opt_group = parser.add_argument_group('OPTIMIZATION PARAMETERS')
    opt_group.add_argument('-epochs', type=int, default=100)
    opt_group.add_argument('-lr', type=float, default=0.001,
                           help='Step size for gradient descent.')
    opt_group.add_argument('-clip', type=float, default=2.0,
                           help='Value to clip norm of gradients.')
    opt_group.add_argument('-eval_metric', type=str, default='loop_dev_loss',
                           help='Metric for model selection and early stopping.')
    opt_group.add_argument('-patience', type=int, default=5,
                           help='How many epochs to allow for no improvement in eval metric before early stopping.')
    opt_group.add_argument('-warmup', type=int, default=0,
                           help='Number of epochs to wait before enacting early stopping policy.')
    opt_group.add_argument('-skip_eval_sim', action='store_true',
                           help='Whether to run simulator during evaluation phase of training.')
    opt_group.add_argument('-lr_scheduler', action='store_true',
                           help='Whether to use reduce learnrate on plateau scheduler halving lr at each update')

    #################
    # DATA PARAMETERS
    data_group = parser.add_argument_group('DATA PARAMETERS')
    data_group.add_argument('-nsteps', type=int, default=32,
                            help='Number of steps for open loop during training.')
    data_group.add_argument('-system', type=str, default='fsw_phase_2', choices=list(systems.keys()),
                            help='select particular dataset with keyword')
    data_group.add_argument('-nsim', type=int, default=100000000000,
                            help='Number of time steps for full dataset. (ntrain + ndev + ntest)'
                                 'train, dev, and test will be split evenly from contiguous, sequential, '
                                 'non-overlapping chunks of nsim datapoints, e.g. first nsim/3 art train,'
                                 'next nsim/3 are dev and next nsim/3 simulation steps are test points.'
                                 'None will use a default nsim from the selected dataset or emulator')
    data_group.add_argument('-norm', nargs='+', default=['U', 'D', 'Y'], choices=['U', 'D', 'Y'],
                            help='List of sequences to max-min normalize')
    data_group.add_argument('-trainset', type=str, choices=list(datasplits.keys()), default='pid',
                            help='Weld type data to use for training.')
    
    ##################
    # MODEL PARAMETERS
    model_group = parser.add_argument_group('MODEL PARAMETERS')
    model_group.add_argument('-ssm_type', type=str, choices=['blackbox', 'hw', 'hammerstein', 'blocknlin', 'linear'],
                             default='blocknlin')
    model_group.add_argument('-nx_hidden', type=int, default=48, help='Number of hidden states per output')
    model_group.add_argument('-n_layers', type=int, default=5, help='Number of hidden layers of single time-step state transition')
    model_group.add_argument('-state_estimator', type=str,
                             choices=['rnn', 'mlp', 'linear', 'residual_mlp'], default='mlp')
    model_group.add_argument('-estimator_input_window', type=int, default=10,
                             help="Number of previous time steps measurements to include in state estimator input")
    model_group.add_argument('-linear_map', type=str, choices=list(slim.maps.keys()),
                             default='linear')
    model_group.add_argument('-nonlinear_map', type=str, default='residual_mlp',
                             choices=['mlp', 'rnn', 'pytorch_rnn', 'linear', 'residual_mlp'])
    model_group.add_argument('-bias', type=int, default=0, choices=[0, 1], help='Whether to use bias in the neural network models.')
    model_group.add_argument('-activation', choices=list(activations.keys()), default='gelu',
                             help='Activation function for neural networks')
    model_group.add_argument('-timedelay', type=int, default=8, help='time delayed features of SSM')

    ##################
    # Weight PARAMETERS
    weight_group = parser.add_argument_group('WEIGHT PARAMETERS')
    weight_group.add_argument('-Q_con_x', type=float,  default=1.0, help='Hidden state constraints penalty weight.')
    weight_group.add_argument('-Q_dx', type=float,  default=0.2,
                              help='Penalty weight on hidden state difference in one time step.')
    weight_group.add_argument('-Q_sub', type=float,  default=0.2, help='Linear maps regularization weight.')
    weight_group.add_argument('-Q_y', type=float,  default=1.0, help='Output tracking penalty weight')
    weight_group.add_argument('-Q_e', type=float,  default=1.0, help='State estimator hidden prediction penalty weight')
    weight_group.add_argument('-Q_con_fdu', type=float,  default=0.0, help='Penalty weight on control actions and disturbances.')

    ####################
    # LOGGING PARAMETERS
    log_group = parser.add_argument_group('LOGGING PARAMETERS')
    log_group.add_argument('-savedir', type=str, default='test',
                           help="Where should your trained model and plots be saved (temp)")
    log_group.add_argument('-verbosity', type=int, default=1,
                           help="How many epochs in between status updates")
    log_group.add_argument('-exp', type=str, default='test',
                           help='Will group all run under this experiment name.')
    log_group.add_argument('-location', type=str, default='mlruns',
                           help='Where to write mlflow experiment tracking stuff')
    log_group.add_argument('-run', type=str, default='neuromancer',
                           help='Some name to tell what the experiment run was about.')
    log_group.add_argument('-logger', type=str, choices=['mlflow', 'stdout'], default='stdout',
                           help='Logging setup to use')
    log_group.add_argument('-train_visuals', action='store_true',
                           help='Whether to create visuals, e.g. animations during training loop')
    log_group.add_argument('-trace_movie', action='store_true',
                           help='Whether to plot an animation of the simulated and true dynamics')
    return parser
Example #4
0
def parse():
    parser = argparse.ArgumentParser()
    parser.add_argument('-gpu', type=int, default=None,
                        help="Gpu to use")
    # OPTIMIZATION PARAMETERS
    opt_group = parser.add_argument_group('OPTIMIZATION PARAMETERS')
    opt_group.add_argument('-epochs', type=int, default=0)
    opt_group.add_argument('-lr', type=float, default=0.001,
                           help='Step size for gradient descent.')
    opt_group.add_argument('-eval_metric', type=str, default='loop_dev_loss',
                           help='Metric for model selection and early stopping.')
    opt_group.add_argument('-patience', type=int, default=5,
                           help='How many epochs to allow for no improvement in eval metric before early stopping.')
    opt_group.add_argument('-warmup', type=int, default=0,
                           help='Number of epochs to wait before enacting early stopping policy.')
    opt_group.add_argument('-skip_eval_sim', action='store_true',
                           help='Whether to run simulator during evaluation phase of training.')

    #################
    # DATA PARAMETERS
    data_group = parser.add_argument_group('DATA PARAMETERS')
    data_group.add_argument('-nsteps', type=int, default=32,
                            help='Number of steps for open loop during training.')
    data_group.add_argument('-system', type=str, default='flexy_air', choices=list(systems.keys()),
                            help='select particular dataset with keyword')
    data_group.add_argument('-nsim', type=int, default=10000,
                            help='Number of time steps for full dataset. (ntrain + ndev + ntest)'
                                 'train, dev, and test will be split evenly from contiguous, sequential, '
                                 'non-overlapping chunks of nsim datapoints, e.g. first nsim/3 art train,'
                                 'next nsim/3 are dev and next nsim/3 simulation steps are test points.'
                                 'None will use a default nsim from the selected dataset or emulator')
    data_group.add_argument('-norm', nargs='+', default=['U', 'D', 'Y'], choices=['U', 'D', 'Y'],
                            help='List of sequences to max-min normalize')
    data_group.add_argument('-batch_type', default='batch', choices=['mh', 'batch'], help='option for creating batches of time series data')
    
    ##################
    # MODEL PARAMETERS
    model_group = parser.add_argument_group('MODEL PARAMETERS')
    model_group.add_argument('-model_file', type=str, default='../datasets/Flexy_air/best_model_flexy1.pth')
    model_group.add_argument('-ssm_type', type=str, choices=['blackbox', 'block'],
                             default='blackbox')
    model_group.add_argument('-xmin', type=float, default=-0.2, help='Constraint on minimum state value')
    model_group.add_argument('-xmax', type=float, default=1.2, help='Constraint on maximum state value')
    model_group.add_argument('-dxudmin', type=float, default=-0.05,
                             help='Constraint on contribution of U and D to state')
    model_group.add_argument('-dxudmax', type=float, default=0.05,
                             help='Constraint on contribution of U and D to state')
    model_group.add_argument('-koopman', type=int, default=0,
                             help='Whether to enforce regularization so that fy is inverse of state estimator')

    ##################
    # Weight PARAMETERS
    weight_group = parser.add_argument_group('WEIGHT PARAMETERS')
    weight_group.add_argument('-Q_con_x', type=float,  default=1.0, help='Hidden state constraints penalty weight.')
    weight_group.add_argument('-Q_dx', type=float,  default=0.2,
                              help='Penalty weight on hidden state difference in one time step.')
    weight_group.add_argument('-Q_sub', type=float,  default=0.2, help='Linear maps regularization weight.')
    weight_group.add_argument('-Q_y', type=float,  default=1.0, help='Output tracking penalty weight')
    weight_group.add_argument('-Q_e', type=float,  default=1.0, help='State estimator hidden prediction penalty weight')
    weight_group.add_argument('-Q_con_fdu', type=float,  default=0.0, help='Penalty weight on control actions and disturbances.')

    ####################
    # LOGGING PARAMETERS
    log_group = parser.add_argument_group('LOGGING PARAMETERS')
    log_group.add_argument('-savedir', type=str, default='test',
                           help="Where should your trained model and plots be saved (temp)")
    log_group.add_argument('-verbosity', type=int, default=1,
                           help="How many epochs in between status updates")
    log_group.add_argument('-exp', type=str, default='test',
                           help='Will group all run under this experiment name.')
    log_group.add_argument('-location', type=str, default='mlruns',
                           help='Where to write mlflow experiment tracking stuff')
    log_group.add_argument('-run', type=str, default='neuromancer',
                           help='Some name to tell what the experiment run was about.')
    log_group.add_argument('-logger', type=str, choices=['mlflow', 'stdout'], default='stdout',
                           help='Logging setup to use')
    log_group.add_argument('-train_visuals', action='store_true',
                           help='Whether to create visuals, e.g. animations during training loop')
    log_group.add_argument('-trace_movie', action='store_true',
                           help='Whether to plot an animation of the simulated and true dynamics')
    return parser