Пример #1
0
def get_loss(args, dataset, components):
    estimator, dynamics = components
    estimator_loss = Objective([f'X_pred_{dynamics.name}', f'x0_{estimator.name}'],
                               lambda X_pred, x0: F.mse_loss(X_pred[-1, :-1, :], x0[1:]),
                               weight=args.Q_e, name='arrival_cost')
    regularization = Objective([f'reg_error_{estimator.name}', f'reg_error_{dynamics.name}'],
                               lambda reg1, reg2: reg1 + reg2, weight=args.Q_sub, name='reg_error')
    reference_loss = Objective([f'Y_pred_{dynamics.name}', 'Yf'], F.mse_loss, weight=args.Q_y,
                               name='ref_loss')
    state_smoothing = Objective([f'X_pred_{dynamics.name}'], lambda x: F.mse_loss(x[1:], x[:-1]), weight=args.Q_dx,
                                name='state_smoothing')
    observation_lower_bound_penalty = Objective([f'Y_pred_{dynamics.name}'],
                                                lambda x: torch.mean(F.relu(-x + args.xmin)), weight=args.Q_con_x,
                                                name='y_low_bound_error')
    observation_upper_bound_penalty = Objective([f'Y_pred_{dynamics.name}'],
                                                lambda x: torch.mean(F.relu(x - args.xmax)), weight=args.Q_con_x,
                                                name='y_up_bound_error')

    objectives = [regularization, reference_loss, estimator_loss]
    constraints = [state_smoothing, observation_lower_bound_penalty, observation_upper_bound_penalty]

    if args.ssm_type != 'blackbox':
        if 'U' in dataset.data:
            inputs_max_influence_lb = Objective([f'fU_{dynamics.name}'], lambda x: torch.mean(F.relu(-x + args.dxudmin)),
                                                weight=args.Q_con_fdu,
                                                name='input_influence_lb')
            inputs_max_influence_ub = Objective([f'fU_{dynamics.name}'], lambda x: torch.mean(F.relu(x - args.dxudmax)),
                                                weight=args.Q_con_fdu, name='input_influence_ub')
            constraints += [inputs_max_influence_lb, inputs_max_influence_ub]
        if 'D' in dataset.data:
            disturbances_max_influence_lb = Objective([f'fD_{dynamics.name}'], lambda x: torch.mean(F.relu(-x + args.dxudmin)),
                                                      weight=args.Q_con_fdu, name='dist_influence_lb')
            disturbances_max_influence_ub = Objective([f'fD_{dynamics.name}'], lambda x: torch.mean(F.relu(x - args.dxudmax)),
                                                      weight=args.Q_con_fdu, name='dist_influence_ub')
            constraints += [disturbances_max_influence_lb, disturbances_max_influence_ub]
    return objectives, constraints
Пример #2
0
                     residual=args.residual)

    # linearizer = ....

    components = [estimator, dynamics_model]

    ##########################################
    ########## MULTI-OBJECTIVE LOSS ##########
    ##########################################
    xmin = -0.2
    xmax = 1.2
    dxudmin = -0.05
    dxudmax = 0.05
    estimator_loss = Objective(
        ['X_pred', 'x0'],
        lambda X_pred, x0: F.mse_loss(X_pred[-1, :-1, :], x0[1:]),
        weight=args.Q_e,
        name='arrival_cost')
    regularization = Objective([f'reg_error_estim', f'reg_error_dynamics'],
                               lambda reg1, reg2: reg1 + reg2,
                               weight=args.Q_sub,
                               name='reg_error')
    reference_loss = Objective(['Y_pred_dynamics', 'Yf'],
                               F.mse_loss,
                               weight=args.Q_y,
                               name='ref_loss')
    state_smoothing = Objective(['X_pred_dynamics'],
                                lambda x: F.mse_loss(x[1:], x[:-1]),
                                weight=args.Q_dx,
                                name='state_smoothing')
    observation_lower_bound_penalty = Objective(
Пример #3
0
                   name='policy')

    share_weights(dynamics_model_ctrl, dynamics_model)
    share_weights(estimator_ctrl, estimator)

    components = [
        estimator, dynamics_model, estimator_ctrl, policy, dynamics_model_ctrl
    ]

    ##########################################
    ########## MULTI-OBJECTIVE LOSS ##########
    ##########################################
    regularization = Objective(
        [
            'reg_error_policy', 'reg_error_dynamics_ctrl',
            'reg_error_dynamics', 'reg_error_estim_ctrl', 'reg_error_estim'
        ],
        lambda reg1, reg2, reg3, reg4, reg5: reg1 + reg2 + reg3 + reg4 + reg5,
        weight=args.Q_sub)
    system_ID_loss = Objective(
        ['Y_pred_dynamics', 'Yf'],
        lambda pred, ref: F.mse_loss(pred[:, :, :1], ref[:, :, :1]),
        weight=args.Q_r,
        name='ref_loss')
    reference_loss = Objective(
        ['Y_pred_dynamics_ctrl', 'Rf'],
        lambda pred, ref: F.mse_loss(pred[:, :, :1], ref),
        weight=args.Q_r,
        name='ref_loss')
    control_smoothing = Objective(['U_pred_policy'],
                                  lambda x: F.mse_loss(x[1:], x[:-1]),
Пример #4
0
    ##########################################
    ########## PROBLEM COMPONENTS ############
    ##########################################
    print(dataset.dims)
    estimator, dynamics_model = get_components(args, dataset)
    components = [estimator, dynamics_model]

    ##########################################
    ########## MULTI-OBJECTIVE LOSS ##########
    ##########################################
    objectives, constraints = get_loss(args, dataset, components)
    if args.koopman:
        components.append(Decoder(dynamics_model.fy))
        autoencoder_loss = Objective(['Yp', 'yhat'],
                                     lambda Y, yhat: F.mse_loss(Y[-1], yhat),
                                     name='inverse')
        objectives.append(autoencoder_loss)
    ##########################################
    ########## OPTIMIZE SOLUTION ############
    ##########################################
    model = Problem(objectives, constraints, components).to(device)
    optimizer = torch.optim.AdamW(model.parameters(), lr=args.lr)
    visualizer = VisualizerOpen(dataset,
                                dynamics_model,
                                args.verbosity,
                                args.savedir,
                                training_visuals=args.train_visuals,
                                trace_movie=args.trace_movie)
    simulator = OpenLoopSimulator(model=model,
                                  dataset=dataset,
Пример #5
0
    signal_generator = WhiteNoisePeriodicGenerator(args.nsteps, args.ny, xmax=(0.8, 0.7), xmin=0.2,
                                                   min_period=1, max_period=20, name='Y_ctrl_', device=device).to(device)
    # reference_generator = PeriodicGenerator(args.nsteps, args.ny, xmax=0.7, xmin=0.3,
    #                                                min_period=1, max_period=20, name='R')
    # dynamics_generator = SignalGeneratorDynamics(dynamics_model, estimator, args.nsteps, xmax=1.0, xmin=0.0, name='Y_ctrl_')

    noise_generator = NoiseGenerator(ratio=0.05, keys=['Y_pred_dynamics'], name='_noise', device=device).to(device)

    # components = [dynamics_generator, estimator, policy, dynamics_model]
    components = [signal_generator, estimator, policy, dynamics_model, noise_generator]
    # components = [signal_generator, reference_generator, estimator, policy, dynamics_model]

    ##########################################
    ########## MULTI-OBJECTIVE LOSS ##########
    ##########################################
    regularization = Objective(['reg_error_policy'], lambda reg: reg,
                               weight=args.Q_sub).to(device)
    reference_loss = Objective(['Y_pred_dynamics', 'Rf'], lambda pred, ref: F.mse_loss(pred[:, :, :1], ref),
                               weight=args.Q_r, name='ref_loss').to(device)
    control_smoothing = Objective(['U_pred_policy'], lambda x: F.mse_loss(x[1:], x[:-1]),
                                  weight=args.Q_du, name='control_smoothing').to(device)
    observation_lower_bound_penalty = Objective(['Y_pred_dynamics', 'Y_minf'],
                                                lambda x, xmin: torch.mean(F.relu(-x[:, :, :1] + xmin)),
                                                weight=args.Q_con_y, name='observation_lower_bound').to(device)
    observation_upper_bound_penalty = Objective(['Y_pred_dynamics', 'Y_maxf'],
                                                lambda x, xmax: torch.mean(F.relu(x[:, :, :1] - xmax)),
                                                weight=args.Q_con_y, name='observation_upper_bound').to(device)
    inputs_lower_bound_penalty = Objective(['U_pred_policy', 'U_minf'], lambda x, xmin: torch.mean(F.relu(-x + xmin)),
                                           weight=args.Q_con_u, name='input_lower_bound').to(device)
    inputs_upper_bound_penalty = Objective(['U_pred_policy', 'U_maxf'], lambda x, xmax: torch.mean(F.relu(x - xmax)),
                                           weight=args.Q_con_u, name='input_upper_bound').to(device)
Пример #6
0
def get_objective_terms(args, policy):
    if args.noise:
        output_key = "Y_pred_dynamics_noise"
    else:
        output_key = "Y_pred_dynamics"

    reference_loss = Objective(
        [output_key, "Rf"],
        lambda pred, ref: F.mse_loss(pred[:, :, args.controlled_outputs], ref),
        weight=args.Q_r,
        name="ref_loss",
    )
    regularization = Objective(
        [f"reg_error_{policy.name}"], lambda reg: reg, weight=args.Q_sub, name="reg_loss",
    )
    control_smoothing = Objective(
        [f"U_pred_{policy.name}"],
        lambda x: F.mse_loss(x[1:], x[:-1]),
        weight=args.Q_du,
        name="control_smoothing",
    )
    observation_lower_bound_penalty = Objective(
        [output_key, "Y_minf"],
        lambda x, xmin: torch.mean(F.relu(-x[:, :, args.controlled_outputs] + xmin)),
        weight=args.Q_con_y,
        name="observation_lower_bound",
    )
    observation_upper_bound_penalty = Objective(
        [output_key, "Y_maxf"],
        lambda x, xmax: torch.mean(F.relu(x[:, :, args.controlled_outputs] - xmax)),
        weight=args.Q_con_y,
        name="observation_upper_bound",
    )
    inputs_lower_bound_penalty = Objective(
        [f"U_pred_{policy.name}", "U_minf"],
        lambda x, xmin: torch.mean(F.relu(-x + xmin)),
        weight=args.Q_con_u,
        name="input_lower_bound",
    )
    inputs_upper_bound_penalty = Objective(
        [f"U_pred_{policy.name}", "U_maxf"],
        lambda x, xmax: torch.mean(F.relu(x - xmax)),
        weight=args.Q_con_u,
        name="input_upper_bound",
    )

    # Constraints tightening
    if args.con_tighten:
        observation_lower_bound_penalty = Objective(
            [output_key, "Y_minf"],
            lambda x, xmin: torch.mean(F.relu(-x[:, :, args.controlled_outputs] + xmin + args.tighten)),
            weight=args.Q_con_y,
            name="observation_lower_bound",
        )
        observation_upper_bound_penalty = Objective(
            [output_key, "Y_maxf"],
            lambda x, xmax: torch.mean(F.relu(x[:, :, args.controlled_outputs] - xmax + args.tighten)),
            weight=args.Q_con_y,
            name="observation_upper_bound",
        )
        inputs_lower_bound_penalty = Objective(
            [f"U_pred_{policy.name}", "U_minf"],
            lambda x, xmin: torch.mean(F.relu(-x + xmin + args.tighten)),
            weight=args.Q_con_u,
            name="input_lower_bound",
        )
        inputs_upper_bound_penalty = Objective(
            [f"U_pred_{policy.name}", "U_maxf"],
            lambda x, xmax: torch.mean(F.relu(x - xmax + args.tighten)),
            weight=args.Q_con_u,
            name="input_upper_bound",
        )

    # Loss clipping
    if args.loss_clip:
        reference_loss = Objective(
            [output_key, "Rf", "Y_minf", "Y_maxf"],
            lambda pred, ref, xmin, xmax: F.mse_loss(
                pred[:, :, args.controlled_outputs] * torch.gt(ref, xmin).int() * torch.lt(ref, xmax).int(),
                ref * torch.gt(ref, xmin).int() * torch.lt(ref, xmax).int(),
            ),
            weight=args.Q_r,
            name="ref_loss",
        )

    objectives = [regularization, reference_loss]
    constraints = [
        observation_lower_bound_penalty,
        observation_upper_bound_penalty,
        inputs_lower_bound_penalty,
        inputs_upper_bound_penalty,
    ]

    return objectives, constraints
Пример #7
0
def get_objective_terms(args, dataset, estimator, dynamics_model):
    xmin = -0.2
    xmax = 1.2
    dxudmin = -0.05
    dxudmax = 0.05
    estimator_loss = Objective(
        [f"X_pred_{dynamics_model.name}", f"x0_{estimator.name}"],
        lambda X_pred, x0: F.mse_loss(X_pred[-1, :-1, :], x0[1:]),
        weight=args.Q_e,
        name="arrival_cost",
    )
    regularization = Objective(
        [f"reg_error_{estimator.name}", f"reg_error_{dynamics_model.name}"],
        lambda reg1, reg2: reg1 + reg2,
        weight=args.Q_sub,
        name="reg_error",
    )
    reference_loss = Objective([f"Y_pred_{dynamics_model.name}", "Yf"],
                               F.mse_loss,
                               weight=args.Q_y,
                               name="ref_loss")
    state_smoothing = Objective(
        [f"X_pred_{dynamics_model.name}"],
        lambda x: F.mse_loss(x[1:], x[:-1]),
        weight=args.Q_dx,
        name="state_smoothing",
    )
    observation_lower_bound_penalty = Objective(
        [f"Y_pred_{dynamics_model.name}"],
        lambda x: torch.mean(F.relu(-x + xmin)),
        weight=args.Q_con_x,
        name="y_low_bound_error",
    )
    observation_upper_bound_penalty = Objective(
        [f"Y_pred_{dynamics_model.name}"],
        lambda x: torch.mean(F.relu(x - xmax)),
        weight=args.Q_con_x,
        name="y_up_bound_error",
    )

    objectives = [regularization, reference_loss, estimator_loss]
    constraints = [
        state_smoothing,
        observation_lower_bound_penalty,
        observation_upper_bound_penalty,
    ]

    if args.ssm_type != "blackbox":
        if "U" in dataset.data:
            inputs_max_influence_lb = Objective(
                [f"fU_{dynamics_model.name}"],
                lambda x: torch.mean(F.relu(-x + dxudmin)),
                weight=args.Q_con_fdu,
                name="input_influence_lb",
            )
            inputs_max_influence_ub = Objective(
                [f"fU_{dynamics_model.name}"],
                lambda x: torch.mean(F.relu(x - dxudmax)),
                weight=args.Q_con_fdu,
                name="input_influence_ub",
            )
            constraints += [inputs_max_influence_lb, inputs_max_influence_ub]
        if "D" in dataset.data:
            disturbances_max_influence_lb = Objective(
                [f"fD_{dynamics_model.name}"],
                lambda x: torch.mean(F.relu(-x + dxudmin)),
                weight=args.Q_con_fdu,
                name="dist_influence_lb",
            )
            disturbances_max_influence_ub = Objective(
                [f"fD_{dynamics_model.name}"],
                lambda x: torch.mean(F.relu(x - dxudmax)),
                weight=args.Q_con_fdu,
                name="dist_influence_ub",
            )
            constraints += [
                disturbances_max_influence_lb,
                disturbances_max_influence_ub,
            ]

    return objectives, constraints
Пример #8
0
     },
     nsteps=args.nsteps,
     bias=args.bias,
     linear_map=linmap,
     nonlin=activation,
     hsizes=[args.nx_hidden] * args.n_layers,
     input_keys=['Yp'],
     name='policy',
 )
 """
 # # #  DPC objectives and constraints
 """
 # objectives
 regulation_loss = Objective(
     [f'Y_pred_{dynamics_model.name}'],
     lambda x: F.mse_loss(x, x),
     weight=args.Qx,
     name="x^T*Qx*x loss",
 )
 action_loss = Objective(
     [f"U_pred_{policy.name}"],
     lambda x: F.mse_loss(x, x),
     weight=args.Qu,
     name="u^T*Qu*u loss",
 )
 # regularization
 regularization = Objective(
     [f"reg_error_{policy.name}"],
     lambda reg: reg,
     weight=args.Q_sub,
     name="reg_loss",
 )