def arg_dpc_problem(prefix=''):
    """
    Command line parser for DPC problem definition arguments

    :param prefix: (str) Optional prefix for command line arguments to resolve naming conflicts when multiple parsers
                         are bundled as parents.
    :return: (arg.ArgParse) A command line parser
    """
    parser = arg.ArgParser(prefix=prefix, add_help=False)
    gp = parser.group("DPC")
    gp.add("-nsteps", type=int, default=10,
           help="prediction horizon.")          # tuned values: 1, 2
    gp.add("-nx_hidden", type=int, default=20,
           help="Number of hidden states")
    gp.add("-n_layers", type=int, default=4,
           help="Number of hidden layers")
    gp.add("-bias", action="store_true",
           help="Whether to use bias in the neural network block component models.")
    gp.add("-epochs", type=int, default=2000,
           help='Number of training epochs')
    gp.add("-lr", type=float, default=0.001,
           help="Step size for gradient descent.")
    gp.add("-patience", type=int, default=100,
           help="How many epochs to allow for no improvement in eval metric before early stopping.")
    gp.add("-warmup", type=int, default=10,
           help="Number of epochs to wait before enacting early stopping policy.")
    return parser
def arg_dpc_problem(prefix=''):
    """
    Command line parser for DPC problem definition arguments

    :param prefix: (str) Optional prefix for command line arguments to resolve naming conflicts when multiple parsers
                         are bundled as parents.
    :return: (arg.ArgParse) A command line parser
    """
    parser = arg.ArgParser(prefix=prefix, add_help=False)
    gp = parser.group("DPC")
    gp.add("-nsteps", type=int, default=1, help="prediction horizon.")
    gp.add("-Qx", type=float, default=1.0,
           help="state weight.")  # tuned value: 1.0
    gp.add("-Qu", type=float, default=1.0, help="control action weight."
           )  # tuned value: 1.0,  unstable for paper: 0.5
    gp.add("-Qn", type=float, default=1.0, help="terminal penalty weight."
           )  # tuned value: 1.0,  unstable for paper: 0.0
    gp.add("-Q_sub", type=float, default=0.0, help="regularization weight.")
    gp.add("-Q_con_x",
           type=float,
           default=10.0,
           help="state constraints penalty weight.")  # tuned value: 10.0
    gp.add("-Q_con_u",
           type=float,
           default=20.0,
           help="Input constraints penalty weight.")  # tuned value: 20.0
    gp.add("-nx_hidden", type=int, default=20, help="Number of hidden states")
    gp.add("-n_layers", type=int, default=4, help="Number of hidden layers")
    gp.add(
        "-bias",
        action="store_true",
        help="Whether to use bias in the neural network block component models."
    )
    gp.add("-norm",
           nargs="+",
           default=[],
           choices=["U", "D", "Y", "X"],
           help="List of sequences to max-min normalize")
    gp.add("-data_seed",
           type=int,
           default=408,
           help="Random seed used for simulated data")
    gp.add("-epochs", type=int, default=1000, help='Number of training epochs')
    gp.add("-lr",
           type=float,
           default=0.001,
           help="Step size for gradient descent.")
    gp.add(
        "-patience",
        type=int,
        default=100,
        help=
        "How many epochs to allow for no improvement in eval metric before early stopping."
    )
    gp.add(
        "-warmup",
        type=int,
        default=10,
        help="Number of epochs to wait before enacting early stopping policy.")
    return parser
Exemple #3
0
import os
from neuromancer import arg
import argparse
from neuromancer import datasets

parser = argparse.ArgumentParser()
parser.add_argument('-gpu', type=int, default=None)
args = parser.parse_args()
if args.gpu is not None:
    gpu = f'-gpu {args.gpu}'

p = arg.ArgParser(parents=[
    arg.log(),
    arg.opt(),
    arg.data(),
    arg.loss(),
    arg.lin(),
    arg.ssm()
])
options = {
    k: v.choices
    for k, v in p._option_string_actions.items()
    if v.choices is not None and k != '-norm'
}
systems = list(datasets.systems.keys())
for i, (k, v) in enumerate(options.items()):
    for j, opt in enumerate(v):
        print(k, opt)
        code = os.system(
            f'python ../train_scripts/system_id.py -norm Y '
            f'{k} {opt} -epochs 1 -nsteps 8 -verbosity 1 -nsim 128 -system {systems[(i*j) % len(systems)]}'
Exemple #4
0
        "Y_min": 0.7 * np.ones([nsim, ny]),
        "U_max": np.ones([nsim, nu]),
        "U_min": np.zeros([nsim, nu]),
        "R": psl.Periodic(nx=ny, nsim=nsim, numPeriods=20, xmax=0.9, xmin=0.8)[:nsim, :]
        # 'Y_ctrl_': psl.WhiteNoise(nx=ny, nsim=nsim, xmax=[1.0] * ny, xmin=[0.0] * ny)
    })
    # indices of controlled states, e.g. [0, 1, 3] out of 5 outputs
    dataset.ctrl_outputs = args.controlled_outputs
    return dataset


if __name__ == "__main__":
    # for available systems in PSL library check: psl.systems.keys()
    system = 'CSTR'         # keyword of selected system
    parser = arg.ArgParser(parents=[arg.log(), arg.log(prefix='sysid_'),
                                    arg.opt(), arg.data(system=system),
                                    arg.loss(), arg.lin(), arg.policy(),
                                    arg.ctrl_loss(), arg.freeze()])
    path = './test/CSTR_best_model.pth'
    parser.add('-model_file', type=str, default=path,
               help='Path to pytorch pickled model.')

    args = parser.parse_args()
    args.savedir = 'test_control'

    log_constructor = MLFlowLogger if args.logger == 'mlflow' else BasicLogger
    metrics = ["nstep_dev_loss", "loop_dev_loss", "best_loop_dev_loss",
               "nstep_dev_ref_loss", "loop_dev_ref_loss"]
    logger = log_constructor(args=args, savedir=args.savedir, verbosity=args.verbosity, stdout=metrics)

    print({k: str(getattr(args, k)) for k in vars(args) if getattr(args, k)})
    device = f"cuda:{args.gpu}" if args.gpu is not None else "cpu"
Exemple #5
0
    if umax is not None:
        u_max = umax * np.ones([nstep + 1, umax.shape[0]])
        ax[1].plot(u_max, 'k--', linewidth=2)
    ax[1].set(xlabel='time')
    ax[1].grid()
    ax[1].set_xlim(0, nstep)
    plt.tight_layout()
    if save_path is not None:
        plt.savefig(save_path + '/closed_loop_quadcopter_dpc.pdf')


if __name__ == "__main__":
    """
    # # #  Arguments
    """
    parser = arg.ArgParser(parents=[arg.log(), arg_dpc_problem()])
    args, grps = parser.parse_arg_groups()
    args.bias = True
    """
    # # # 3D quadcopter model 
    """

    # Discrete time model of a quadcopter
    A = np.array(
        [[1., 0., 0., 0., 0., 0., 0.1, 0., 0., 0., 0., 0.],
         [0., 1., 0., 0., 0., 0., 0., 0.1, 0., 0., 0., 0.],
         [0., 0., 1., 0., 0., 0., 0., 0., 0.1, 0., 0., 0.],
         [0.0488, 0., 0., 1., 0., 0., 0.0016, 0., 0., 0.0992, 0., 0.],
         [0., -0.0488, 0., 0., 1., 0., 0., -0.0016, 0., 0., 0.0992, 0.],
         [0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0.0992],
         [0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0.],
Exemple #6
0
                disturbances_max_influence_lb,
                disturbances_max_influence_ub,
            ]

    return objectives, constraints


if __name__ == "__main__":

    # for available systems in PSL library check: psl.systems.keys()
    # for available datasets in PSL library check: psl.datasets.keys()
    system = 'aero'  # keyword of selected system
    parser = arg.ArgParser(parents=[
        arg.log(),
        arg.opt(),
        arg.data(system=system),
        arg.loss(),
        arg.lin(),
        arg.ssm()
    ])

    grp = parser.group('OPTIMIZATION')
    grp.add("-eval_metric",
            type=str,
            default="loop_dev_ref_loss",
            help="Metric for model selection and early stopping.")
    args, grps = parser.parse_arg_groups()
    print({k: str(getattr(args, k)) for k in vars(args) if getattr(args, k)})

    device = f"cuda:{args.gpu}" if args.gpu is not None else "cpu"

    log_constructor = MLFlowLogger if args.logger == 'mlflow' else BasicLogger