예제 #1
0
            ]

    ##########################################
    ########## OPTIMIZE SOLUTION ############
    ##########################################
    model = Problem(objectives, constraints, components).to(device)
    optimizer = torch.optim.AdamW(model.parameters(), lr=args.lr)
    visualizer = VisualizerOpen(dataset,
                                dynamics_model,
                                args.verbosity,
                                args.savedir,
                                training_visuals=args.train_visuals,
                                trace_movie=args.trace_movie)
    # simulator = OpenLoopSimulator(model=model, dataset=dataset, eval_sim=not args.skip_eval_sim)
    simulator = MHOpenLoopSimulator(model=model,
                                    dataset=dataset,
                                    eval_sim=not args.skip_eval_sim)
    trainer = Trainer(model,
                      dataset,
                      optimizer,
                      logger=logger,
                      visualizer=visualizer,
                      simulator=simulator,
                      epochs=args.epochs,
                      eval_metric=args.eval_metric,
                      patience=args.patience,
                      warmup=args.warmup)
    best_model = trainer.train()
    trainer.evaluate(best_model)
    logger.clean_up()
예제 #2
0
                         savedir=args.savedir,
                         verbosity=args.verbosity,
                         stdout=metrics)
    logger.args.system = 'dpc_ref'
    # device and optimizer
    device = f"cuda:{args.gpu}" if args.gpu is not None else "cpu"
    model = model.to(device)
    optimizer = torch.optim.AdamW(model.parameters(), lr=args.lr)

    # trainer
    trainer = Trainer(
        model,
        train_data,
        dev_data,
        test_data,
        optimizer,
        logger=logger,
        epochs=args.epochs,
        patience=args.patience,
        eval_metric='nstep_dev_loss',
        warmup=args.warmup,
    )
    # Train control policy
    best_model = trainer.train()
    best_outputs = trainer.test(best_model)
    """
    # # #  Plots and Analysis
    """
    # plot closed loop trajectories from different initial conditions
    cl_simulate(A,
                B,
                C,
예제 #3
0
        inputs_lower_bound_penalty, inputs_upper_bound_penalty
    ]

    ##########################################
    ########## OPTIMIZE SOLUTION ############
    ##########################################
    model = Problem(objectives, constraints, components).to(device)
    freeze_weight(model, module_names=args.freeze)
    unfreeze_weight(model, module_names=args.unfreeze)
    optimizer = torch.optim.AdamW(model.parameters(), lr=args.lr)
    plot_keys = ['Y_pred', 'U_pred']  # variables to be plotted
    visualizer = VisualizerClosedLoop(dataset,
                                      dynamics_model,
                                      plot_keys,
                                      args.verbosity,
                                      savedir=args.savedir)
    emulator = dynamics_model
    simulator = ClosedLoopSimulator(model=model,
                                    dataset=dataset,
                                    emulator=emulator)
    trainer = Trainer(model,
                      dataset,
                      optimizer,
                      logger=logger,
                      visualizer=visualizer,
                      simulator=simulator,
                      epochs=args.epochs)
    best_model = trainer.train()
    trainer.evaluate(best_model)
    logger.clean_up()
예제 #4
0
 logger.args.system = 'integrator'
 # device and optimizer
 device = f"cuda:{args.gpu}" if args.gpu is not None else "cpu"
 model = model.to(device)
 optimizer = torch.optim.AdamW(model.parameters(), lr=args.lr)
 # simulator
 simulator = ClosedLoopSimulator(model=model,
                                 dataset=dataset,
                                 emulator=dynamics_model,
                                 policy=policy)
 # trainer
 trainer = Trainer(
     model,
     dataset,
     optimizer,
     logger=logger,
     simulator=simulator,
     epochs=args.epochs,
     patience=args.patience,
     warmup=args.warmup,
 )
 # Train control policy
 best_model = trainer.train()
 """
 # # #  Plots and Analysis
 """
 # plot closed loop trajectories
 cl_simulate(A,
             B,
             policy.net,
             nstep=40,
             x0=1.5 * np.ones([2, 1]),