示例#1
0
def plot_val(name,
             width=64,
             lstm_width=49,
             nu=1,
             ny=2,
             layers=2,
             train_batches=50,
             nl=None,
             T=1000,
             plot_str='k',
             x0=None,
             LSTM=False):

    path = "./experimental_results/pendulum/"
    non_lin = torch.relu if nl is None else nl

    data = io.loadmat(path + name + ".mat")

    if LSTM:
        model = lstm.lstm(nu, lstm_width, ny, layers)
        model.load_state_dict(torch.load(path + "p_" + name))
        model.output_layer = model.output
    else:
        model = diRNN.diRNN(nu,
                            width,
                            ny,
                            layers,
                            nBatches=train_batches,
                            nl=non_lin,
                            learn_init_state=False)
        model.load_state_dict(torch.load(path + "p_" + name))

    u, yest = get_ic_response(model, x0, T=T, batches=1000)
    plt.plot(yest[:, 0].detach().numpy().T, yest[:, 1].detach().numpy().T,
             plot_str)
    plt.pause(0.01)
            # Optimize over models with weights confined to spectral norm ball (I.e. what Miller and Hardt did)
            name = "spectral_norm_sub{:d}_val{:d}".format(subject, val_set)
            model = dnb.dnbRNN(nu, width, ny, layers, nBatches=9, init_var=1.3)
            model.project_norm_ball(eps)
            log, best_model = train.train_model(model,
                                                train_loader=train_loader,
                                                val_loader=val_loader,
                                                test_loader=test_loader,
                                                options=solver_options,
                                                LMIs=model.norm_ball_lmi(eps))

            test_and_save_model(name, best_model, train_loader, val_loader,
                                test_loader, log)

            # Train Contracting model
            name = "contracting_sub{:d}_val{:d}".format(subject, val_set)
            model = diRNN.diRNN(nu, width, ny, layers, nBatches=9)
            # model.project_l2(mu=mu, epsilon=eps)
            model.init_l2(mu=mu, epsilon=eps, init_var=1.3)

            log, best_model = train.train_model(model,
                                                train_loader=train_loader,
                                                val_loader=val_loader,
                                                test_loader=test_loader,
                                                options=solver_options,
                                                LMIs=model.contraction_lmi(
                                                    mu=mu, epsilon=eps))

            test_and_save_model(name, best_model, train_loader, val_loader,
                                test_loader, log)
            max_epochs=max_epochs,
            lr=lr,
            mu0=10,
            lr_decay=lr_decay,
            patience=patience)

        # for gamma in [0.5, 1, 1.5, 2.5, 5]:
        for gamma in [0.5, 1, 1.5, 2.5, 5]:
            # Train l2 gain bounded implicit model ------------------------------------------------------------------------
            name = "dl2_gamma{:1.2f}_sub{:d}_val{:d}".format(
                gamma, subject, val_set)

            print("training model: " + name)
            model = diRNN.diRNN(nu,
                                width,
                                ny,
                                layers,
                                nBatches=9,
                                nl=torch.tanh)

            # Add 0.1 to ensure we are strictly feasible after initialization
            model.init_dl2(epsilon=eps + init_offset,
                           gamma=gamma,
                           init_var=init_var,
                           custom_seed=val_set + this_seed)
            log, best_model = train.train_model_ipm(model,
                                                    train_loader=train_loader,
                                                    val_loader=val_loader,
                                                    test_loader=test_loader,
                                                    options=solver_options,
                                                    LMIs=model.dl2_lmi(
                                                        gamma=gamma,
示例#4
0
            nu = train_loader.nu
            ny = train_loader.ny

            # Options for the solver
            solver_options = nlsdp.make_stochastic_nlsdp_options(
                max_epochs=max_epochs,
                lr=0.1E-4,
                mu0=2000,
                lr_decay=0.96,
                patience=10)

            # Train Contracting implicit model ------------------------------------------------------------------------
            name = "contracting_sub{:d}_val{:d}".format(subject, val_set)
            model = diRNN.diRNN(nu,
                                width,
                                ny,
                                layers,
                                nBatches=dataset_options["train_batch_size"] -
                                1)
            model.init_l2(mu=mu, epsilon=eps, init_var=1.2)

            log, best_model = train.train_model(model,
                                                train_loader=train_loader,
                                                val_loader=val_loader,
                                                test_loader=test_loader,
                                                options=solver_options,
                                                LMIs=model.contraction_lmi(
                                                    mu=mu, epsilon=eps))

            # eModel = best_model.make_explicit()
            # compare_exp_imp(eModel, best_model, train_loader)
            test_and_save_model(name, best_model, train_loader, val_loader,
示例#5
0
    ss = 0.5
    plot_sim_step_response(step_size=ss, T=T, Ts=0.1, meas_sd=0.01)

    gammas = [0.2, 0.5, 1.0, 5.0, 10.0, 50.0]
    colors = cm.hot(np.linspace(0.25, 0.75, gammas.__len__()))

    for val_set in range(1, 9):

        for (cid, gamma) in enumerate(gammas):
            color = colors[cid]
            plot_args = {"color": color, "linestyle": '-'}
            model_c = diRNN.diRNN(nu,
                                  width,
                                  ny,
                                  layers,
                                  nBatches=train_batches,
                                  nl=torch.relu,
                                  learn_init_state=False)
            model_c.load_state_dict(
                torch.load(
                    "./experimental_results/pendulum/p_dl2_gamma{:1.1f}_val{:d}"
                    .format(gamma, val_set)))
            plot_step_response(model_c, step_size=ss, T=T, plot_args=plot_args)

        plot_args = {"color": 'g', "linestyle": '-'}
        model_c = diRNN.diRNN(nu,
                              width,
                              ny,
                              layers,
                              nBatches=train_batches,