示例#1
0
def ge(x, xmin, weight=1.0, penalty=F.relu, p=2):
    """
    high level wrapper greater or equal then constraint: x \ge xmin

    :param x: (dict, {str: torch.Tensor})
    :param xmax: (dict, {str: torch.Tensor})
    :param weight: weight of the penalty
    :param penalty: type of the penalty
    :param p: order of the penalty
    :return: Objective object
    """
    if isinstance(x, str):
        a = x
    elif isinstance(x, dict):
        a = list(x.keys())[0]
    else:
        warnings.warn('argument must be string or dictionary')
    if isinstance(xmin, str):
        b = xmin
    elif isinstance(xmin, dict):
        b = list(xmin.keys())[0]
    else:
        warnings.warn('argument must be string or dictionary')
    expression = Objective([a, b], lambda x, xmin: torch.mean((penalty(-x + xmin))**p), weight=weight)
    return expression
示例#2
0
def equals(x1, x2, weight=1.0, penalty=torch.nn.functional.mse_loss):
    """
    high level wrapper equality constraint x1 = x2

    :param x1: (dict, {str: torch.Tensor})
    :param x2: (dict, {str: torch.Tensor})
    :return: Objective object
    """
    if isinstance(x1, str):
        a = x1
    elif isinstance(x1, dict):
        a = list(x1.keys())[0]
    else:
        warnings.warn('argument must be string or dictionary')
    if isinstance(x2, str):
        b = x2
    elif isinstance(x2, dict):
        b = list(x2.keys())[0]
    else:
        warnings.warn('argument must be string or dictionary')
    expression = Objective([a, b], penalty, weight=weight)
    return expression
示例#3
0
        dynamics_model_ctrl
    ]

    # component variables
    input_keys = list(set.union(*[set(comp.input_keys)
                                  for comp in components]))
    output_keys = list(
        set.union(*[set(comp.output_keys) for comp in components]))
    dataset_keys = list(set(dataset.train_data.keys()))
    plot_keys = {'Y_ctrl', 'U_ctrl'}  # variables to be plotted

    ##########################################
    ########## MULTI-OBJECTIVE LOSS ##########
    ##########################################
    regularization = Objective(['policy_reg_error'],
                               lambda reg: reg,
                               weight=args.Q_sub)
    reference_loss = Objective(['Y_ctrl', 'Rf'], F.mse_loss, weight=args.Q_r)
    system_id_loss = Objective(['Y_id', 'Yf'], F.mse_loss, weight=args.Q_y)
    observation_lower_bound_penalty = Objective(
        ['Y_ctrl', 'Y_minf'],
        lambda x, xmin: torch.mean(F.relu(-x + -xmin)),
        weight=args.Q_con_y)
    observation_upper_bound_penalty = Objective(
        ['Y_ctrl', 'Y_maxf'],
        lambda x, xmax: torch.mean(F.relu(x - xmax)),
        weight=args.Q_con_y)
    inputs_lower_bound_penalty = Objective(
        ['U_ctrl', 'U_minf'],
        lambda x, xmin: torch.mean(F.relu(-x + -xmin)),
        weight=args.Q_con_u)
示例#4
0
    components = [estimator, policy, dynamics_model]

    # component variables
    input_keys = list(set.union(*[set(comp.input_keys)
                                  for comp in components]))
    output_keys = list(
        set.union(*[set(comp.output_keys) for comp in components]))
    dataset_keys = list(set(dataset.train_data.keys()))
    plot_keys = ['Y_pred', 'U_pred']  # variables to be plotted

    ##########################################
    ########## MULTI-OBJECTIVE LOSS ##########
    ##########################################
    regularization = Objective(['policy_reg_error'],
                               lambda reg: reg,
                               weight=args.Q_sub)
    reference_loss = Objective(['Y_pred', 'Rf'], F.mse_loss, weight=args.Q_r)
    control_smoothing = Objective(['U_pred'],
                                  lambda x: F.mse_loss(x[1:], x[:-1]),
                                  weight=args.Q_du)
    observation_lower_bound_penalty = Objective(
        ['Y_pred', 'Y_minf'],
        lambda x, xmin: torch.mean(F.relu(-x + xmin)),
        weight=args.Q_con_y)
    observation_upper_bound_penalty = Objective(
        ['Y_pred', 'Y_maxf'],
        lambda x, xmax: torch.mean(F.relu(x - xmax)),
        weight=args.Q_con_y)
    inputs_lower_bound_penalty = Objective(
        ['U_pred', 'U_minf'],
示例#5
0
    # component variables
    input_keys = list(set.union(*[set(comp.input_keys)
                                  for comp in components]))
    output_keys = list(
        set.union(*[set(comp.output_keys) for comp in components]))
    dataset_keys = list(set(dataset.train_data.keys()))
    plot_keys = ['X', 'Z']  # variables to be plotted

    ##########################################
    ########## MULTI-OBJECTIVE LOSS ##########
    ##########################################
    # min_W ||z-z_ref||^2
    # s.t:  A * z - b + E * x <= 0
    #       z = f_W(x)
    regularization = Objective(['sol_map_reg_error'],
                               lambda reg1: reg1,
                               weight=args.Q_sub)
    quadratic_loss = Objective(['Z', 'Z_ref'], F.mse_loss, weight=args.Q_z)
    ineq_constraint = Objective(
        ['Z', 'X'],
        lambda z, x: torch.mean(
            F.relu(torch.mm(A, z.T) - b + torch.mm(E, x.T) - 0)),
        weight=args.Q_con_z)

    objectives = [regularization, quadratic_loss]
    constraints = [ineq_constraint]

    ##########################################
    ########## OPTIMIZE SOLUTION ############
    ##########################################
    model = Problem(objectives, constraints, components).to(device)