Ejemplo n.º 1
0
def constraint_variable_to_interval(variable, LB, UB):
    p1 = Constraint(ConstraintType('GREATER'))
    p1.monomials = [Monomial(1, variable)]
    p1.scalar = LB  # 0 #
    #
    p2 = Constraint(ConstraintType('LESS'))
    p2.monomials = [Monomial(1, variable)]
    p2.scalar = UB
    return [p1, p2]
Ejemplo n.º 2
0
 def __init__(self, n_worker=4):
     # initialize "clean" query
     self.clear()
     self.n_worker = n_worker
     self.eq_type_map = {
         ConstraintType('EQUALITY'): MarabouCore.Equation.EQ,
         ConstraintType('LESS_EQ'): MarabouCore.Equation.LE,
         ConstraintType('GREATER_EQ'): MarabouCore.Equation.GE
     }
Ejemplo n.º 3
0
 def add_marabou_eq(self, coeffs, variables, eq_type, scalar):
     if eq_type in [ConstraintType('LESS'), ConstraintType('GREATER')]:
         raise NotImplementedError
         # TODO: apply epsilon conversion by adding a slack variable = epsilon
         # to convert from a strict inequality to a non-strict one
     elif eq_type == ConstraintType('NOT_EQUAL'):
         raise NotImplementedError
     assert (len(coeffs) == len(variables))
     eq = MarabouCore.Equation(self.eq_type_map[eq_type])
     for i in range(len(coeffs)):
         eq.addAddend(coeffs[i], variables[i])
     eq.setScalar(scalar)
     self.ipq.addEquation(eq)
Ejemplo n.º 4
0
    def set_input_output(self, condensed=False):
        # find input variables of the model
        input_idx = [l.input.name
                     for l in self.layers].index(self.model.input.name)
        self.model_input_vars = self.input_vars[input_idx].copy()

        # find output variables of the model
        output_layer_idx = [l.output.name
                            for l in self.layers].index(self.model.output.name)
        output_layer_activ_func = self.activations[output_layer_idx]
        if (condensed) and output_layer_activ_func == "linear":
            self.model_output_vars = self.output_vars[output_layer_idx].copy()
            # no additional constraint
        else:
            output_layer_before_activ_func = self.output_vars[output_layer_idx]
            output_layer_after_active_func = [
                getNewVariable() for _ in self.output_vars[output_layer_idx]
            ]
            self.model_output_vars = output_layer_after_active_func
            if output_layer_activ_func == "linear":
                w = np.eye(len(output_layer_before_activ_func))
                A = np.hstack((w, -w))
                b = np.zeros(2 * len(output_layer_before_activ_func))
                x = np.array(output_layer_before_activ_func +
                             output_layer_after_active_func)
                self.constraints.append(
                    MatrixConstraint(ConstraintType('EQUALITY'), A=A, x=x,
                                     b=b))
            else:
                for v_in, v_out in zip(output_layer_before_activ_func,
                                       output_layer_after_active_func):
                    self.constraints.append(
                        ReluConstraint(varin=v_in, varout=v_out))
Ejemplo n.º 5
0
 def additionConstraint(self, op):
     """
     Function to generate equations corresponding to addition
     Arguments:
         op: (tf.op) representing add operation
     """
     input_ops = [i.op for i in op.inputs]
     assert len(input_ops) == 2
     input1 = input_ops[0]
     input2 = input_ops[1]
     assert self.isVariable(input1)
     if self.isVariable(input2):
         outputVars = self.getValues(op).reshape(-1, 1)
         input1Vars = self.getValues(input1).reshape(-1, 1)
         input2Vars = self.getValues(input2).reshape(-1, 1)
         assert len(input1Vars) == len(input2Vars)
         assert len(outputVars) == len(input1Vars)
         # x + y = z   -->   [I, I, -I] [x; y; z] = 0
         A = np.hstack((np.eye(len(input1Vars)), np.eye(len(input2Vars)),
                        -np.eye(len(outputVars))))
         x_constraint = np.vstack((input1Vars, input2Vars, outputVars))
         b_constraint = np.zeros((len(outputVars), 1))
         c = MatrixConstraint(ConstraintType('EQUALITY'),
                              A=A,
                              x=x_constraint,
                              b=b_constraint)
         self.constraints.append(c)
     else:
         self.biasAddConstraint(op)
Ejemplo n.º 6
0
    def biasAddConstraint(self, op):
        """
        Function to generate equations corresponding to bias addition
        Arguments:
            op: (tf.op) representing bias add operation
        """
        ### Get variables and constants of inputs ###
        input_ops = [i.op for i in op.inputs]
        assert len(input_ops) == 2
        inputValues = [self.getValues(i) for i in input_ops]
        outputValues = self.getValues(op)
        inputVars = inputValues[0].reshape(-1, 1)
        inputConsts = inputValues[1].reshape(-1, 1)
        # broadcasting
        inputConsts = np.tile(inputConsts, len(inputVars) // len(inputConsts))
        outputVars = outputValues.reshape(-1, 1)
        assert len(inputVars) == len(outputVars) and len(outputVars) == len(
            inputConsts)
        ### END getting inputs ###

        # x + b = y  --> x - y = -b
        # [I -I] [x; y] = -b
        # (nx2n)x(2nx1) = (nx1)
        n = inputVars.shape[0]
        A = np.hstack((np.eye(n), -np.eye(n)))
        x = np.vstack((inputVars, outputVars))
        c = MatrixConstraint(ConstraintType('EQUALITY'),
                             A=A,
                             x=x,
                             b=-inputConsts)
        self.constraints.append(c)
Ejemplo n.º 7
0
 def setup_euler_constraint(self, dx_vec, dt):
     for x, dx, next_x in zip(self.states.reshape(-1), dx_vec,
                              self.next_states.reshape(-1)):
         c = Constraint(ConstraintType('EQUALITY'))
         c.monomials = [
             Monomial(1, x),
             Monomial(dt, dx),
             Monomial(-1, next_x)
         ]
         self.euler_constraints.append(c)
Ejemplo n.º 8
0
 def setup_euler_constraints(self):
     for x, dx, next_x in zip(self.states.reshape(-1), self.dx,
                              self.next_states.reshape(-1)):
         # next_x = x + dx*dt
         c = Constraint(ConstraintType('EQUALITY'))
         c.monomials = [
             Monomial(1, x),
             Monomial(self.dt, dx),
             Monomial(-1, next_x)
         ]
         self.constraints.append(c)
Ejemplo n.º 9
0
def matrix_equality_constraint(varsin, varsout):
    """
    preconditions: 
    varsin, varsout are vertical vectors (nx1), of type 2D np array or similar
    """
    # x = y (want in form Az = b)
    # z := [x; y]
    # A = [I, -I]
    # b = 0
    # [I, -I][x; y] = 0  -> x - y = 0
    A = np.hstack((np.eye(len(varsin)), -np.eye(len(varsout))))
    z = np.vstack((varsin, varsout))
    b = np.zeros((A.shape[0], 1))
    return MatrixConstraint(ConstraintType('EQUALITY'), A=A, x=z, b=b)
Ejemplo n.º 10
0
def equality_constraint(varsin, varsout):
    """
    If you need a list of scalar constraints instead of a single matrix constraint.
    """
    assert (len(varsin) == len(varsout))
    if len(varsin) > 1:
        mc = matrix_equality_constraint(varsin, varsout)
        return matrix_to_scalar(mc)
    else:
        mono1 = Monomial(1, varsin[0][0])
        mono2 = Monomial(-1, varsout[0][0])
        return Constraint(ConstraintType('EQUALITY'),
                          monomials=[mono1, mono2],
                          scalar=0)
Ejemplo n.º 11
0
    def read_equations(self):
        for i in range(self.n_eq):
            vars = self.f['/eq/vars%d' % (i + 1)][()]
            for v in vars:
                if v not in self.var_dict.keys():
                    self.var_dict[v] = getNewVariable('xd')

            coeffs = self.f['/eq/coeffs%d' % (i + 1)][()].astype(np.float)
            b = self.f['/eq/scalar%d' % (i + 1)][()].astype(np.float)[0]
            monomial_list = [
                Monomial(c, self.var_dict[v]) for (c, v) in zip(coeffs, vars)
            ]
            self.eq_list.append(
                Constraint(ConstraintType('EQUALITY'), monomial_list, b))
Ejemplo n.º 12
0
    def read_inequalities(self):
        for i in range(self.n_ineq):
            left_var = self.f['/ineq/varleft%d' % (i + 1)][()][0]
            rite_var = self.f['/ineq/varright%d' % (i + 1)][()][0]
            for v in [left_var, rite_var]:
                if v not in self.var_dict.keys():
                    self.var_dict[v] = getNewVariable('xd')

            # add lvar <= rvar
            monomial_list = [
                Monomial(1, self.var_dict[left_var]),
                Monomial(-1, self.var_dict[rite_var])
            ]
            self.ineq_list.append(
                Constraint(ConstraintType('LESS_EQ'), monomial_list, 0))
Ejemplo n.º 13
0
    def mulConstraint(self, op):
        """
        Function to generate equations corresponding to elementwise matrix multiplication 
        Arguments:
            op: (tf.op) representing elementwise multiplication operation
        TODO: this is unecessarily verbose
        """
        ### Get variables and constants of inputs ###
        input_ops = [i.op for i in op.inputs]
        inputValues = [self.getValues(i) for i in input_ops]
        outputValues = self.getValues(op)
        assert not (self.isVariable(input_ops[0])
                    and self.isVariable(input_ops[1]))
        if self.isVariable(input_ops[0]):
            #convention = "xW"
            x = inputValues[0]
            W = inputValues[1]
        elif self.isVariable(input_ops[1]):
            #convention = "Wx"
            W = inputValues[0]
            x = inputValues[1]
        else:
            print("Multiplying two constants not supported")
            import pdb
            pdb.set_trace()
            raise NotImplementedError
        W = W.reshape(-1)
        x = x.reshape(-1)
        if x.shape != W.shape:
            # broadcast
            W = np.tile(W, len(x) // len(W))
        assert x.shape == W.shape
        y = outputValues.reshape(-1)
        assert x.shape == y.shape
        ### END getting inputs ###

        ### Generate actual equations ###
        # w^T x = y
        # [w^T -I] [x; y] = 0
        #  1xn 1x1
        A = np.hstack((W.T, -np.eye(1)))
        x_constraint = np.vstack((x, y))
        b_constraint = np.zeros((1, 1))
        c = MatrixConstraint(ConstraintType('EQUALITY'),
                             A=A,
                             x=x_constraint,
                             b=b_constraint)
        self.constraints.append(c)
Ejemplo n.º 14
0
    def cnf_conversion_helper(self, constraint):
        """
        Take a constraint of the form 
        5x + 6y -5 R 0 and turns it into

        Y == 5x + 6y - 5
        (for later to assert: Y >=0 or max(Y, somethingelse) >= 0)
        """
        # turn into >= inequality: -5x -6y <= -5  -->  5x + 6y >= 5
        geq_comp = constraint.get_geq()
        # define new var: Y = 5x + 6y - 5
        new_var_constraint = copy.deepcopy(geq_comp)
        new_var_constraint.type = ConstraintType('EQUALITY')
        Y = self.get_new_var()
        new_var_constraint.monomials += [Monomial(
            -1, Y)]  # -a + 5x + 6y == 5  ->  5x + 6y -5 == a
        return [new_var_constraint, Y]
Ejemplo n.º 15
0
def test_marabou_interface(alpha,
                           prop_desc,
                           n_invar,
                           with_relu=False,
                           with_max=False):
    # create controller object, this is just a place holder. I will modify the object later.
    model = load_model(
        "../OverApprox/models/single_pend_nn_controller_lqr_data.h5")
    controller = KerasController(keras_model=model)

    # rewrite to make a simple controller that is always equal to alpha*x
    controller.control_outputs = [['c']]
    controller.state_inputs = [['xc']]
    fake_constraint = []
    if with_relu:
        alpha_times_x = 'var1'
        monomial_list = [
            Monomial(alpha, controller.state_inputs[0][0]),
            Monomial(-1, alpha_times_x)
        ]
        fake_constraint.append(
            Constraint(ConstraintType('EQUALITY'), monomial_list, 0.0))
        relu_constraint = [
            ReluConstraint(varin=alpha_times_x,
                           varout=controller.control_outputs[0][0])
        ]
        controller.constraints = relu_constraint + fake_constraint
        controller.relus = relu_constraint
    elif with_max:
        alpha_times_x = 'var1'
        monomial_list = [
            Monomial(alpha, controller.state_inputs[0][0]),
            Monomial(-1, alpha_times_x)
        ]
        fake_constraint.append(
            Constraint(ConstraintType('EQUALITY'), monomial_list, 0.0))
        max_second_arg = 'var2'
        fake_constraint.append(
            Constraint(ConstraintType('EQUALITY'),
                       [Monomial(1, max_second_arg)], -1 / 2))
        max_constraint = [
            MaxConstraint(varsin=[alpha_times_x, max_second_arg],
                          varout=controller.control_outputs[0][0])
        ]
        controller.constraints = max_constraint + fake_constraint
        controller.relus = []
    else:
        monomial_list = [
            Monomial(-1, controller.control_outputs[0][0]),
            Monomial(alpha, controller.state_inputs[0][0])
        ]
        fake_constraint = [
            Constraint(ConstraintType('EQUALITY'), monomial_list, 0.0)
        ]
        controller.constraints = fake_constraint
        controller.relus = []

    # create overt dynamics objects. this is just a place holder. I will modify the object later.
    overt_obj = OvertConstraint(
        "../OverApprox/models/single_pend_acceleration_overt.h5")

    # rewrite to make a simple controller that is always equal to x
    overt_obj.control_vars = [['cd']]
    overt_obj.state_vars = [['x']]
    overt_obj.output_vars = [['dx']]
    monomial_list2 = [
        Monomial(1, overt_obj.control_vars[0][0]),
        Monomial(-1, overt_obj.output_vars[0][0])
    ]
    fake_constraint2 = [
        Constraint(ConstraintType('EQUALITY'), monomial_list2, 0.5)
    ]
    overt_obj.constraints = fake_constraint2

    simple_dynamics = Dynamics(np.array(['x']), np.array(['cd']))
    next_states = simple_dynamics.next_states.reshape(1, )

    # x_next = x + dt*dx
    dt = 1
    c1 = Constraint(ConstraintType('EQUALITY'))
    c1.monomials = [
        Monomial(1, overt_obj.state_vars[0][0]),
        Monomial(dt, overt_obj.output_vars[0][0]),
        Monomial(-1, next_states[0])
    ]

    simple_dynamics.constraints = [c1] + overt_obj.constraints

    print(len(simple_dynamics.constraints))
    print(len(controller.constraints))

    # create transition relation using controller and dynamics
    tr = TFControlledTransitionRelation(dynamics_obj=simple_dynamics,
                                        controller_obj=controller)

    # initial set
    init_set = {overt_obj.state_vars[0][0]: (0., 1.)}

    # build the transition system as an (S, I(S), TR) tuple
    ts = TransitionSystem(states=tr.states,
                          initial_set=init_set,
                          transition_relation=tr)

    # property x< 0.105, x' < 0.2
    p = Constraint(ConstraintType(prop_desc["type"]))
    p.monomials = [Monomial(1, overt_obj.state_vars[0][0])]
    p.scalar = prop_desc["scalar"]  #
    prop = ConstraintProperty([p], [overt_obj.state_vars[0][0]])

    # solver
    solver = MarabouWrapper()
    algo = BMC(ts=ts, prop=prop, solver=solver)
    result, vals, stats = algo.check_invariant_until(n_invar)
    return result.name
Ejemplo n.º 16
0
    b1 = np.random.rand(2,1)
    output = tf.nn.relu(tf.matmul(W1,x) + b1)
    W2 =  np.random.rand(1,2)
    b2 =  np.random.rand(1,1)
    output = tf.nn.relu(tf.matmul(W2,output) + b2)
    sess.run(tf.global_variables_initializer()) # actually sets Variable values to values specified

# smoosh all tf.Variables to tf.Constants, put into new graph
new_graph = smoosh_to_const(sess, output.op.name)
# create controller object with network
controller = TFController(tf_sess=tf.Session(graph=new_graph), inputNames=[x.op.name], outputName=output.op.name)

# create a super simple plant directly using constraint objects
dynamics = Dynamics(states=np.array([["x"], ["y"]]), controls=["u"], fun=np.sin)
# x' = relu(x + u)   ->   x + u - z = 0 , x' = relu(z)
c1 = Constraint(ConstraintType('EQUALITY'))
c1.monomials = [Monomial(1, "x"), Monomial(1,"u"), Monomial(-1,"z")]
c3 = ReluConstraint(varin="z", varout="x'")
# y' = y  ->  y - y' = 0
c2 = Constraint(ConstraintType('EQUALITY'))
c2.monomials = [Monomial(1,"y"), Monomial(-1, "y'")]
dynamics.constraints = [c1,c2,c3]

# create transition relation using controller and dynamics
tr = TFControlledTransitionRelation(dynamics_obj=dynamics, 
                                        controller_obj=controller)

# initial set
init_set = {"x": (1.1,2), "y": (-1,1)}

# build the transition system as an (S, I(S), TR) tuple
Ejemplo n.º 17
0
    def matMulConstraint(self, op):
        """
        Function to generate constraints corresponding to matrix multiplication
        Arguments:
            op: (tf.op) representing matrix multiplication operation
        """

        ### Get variables and constants of inputs ###
        input_ops = [i.op for i in op.inputs]
        if self.isVariable(input_ops[0]):
            convention = "xW"
        elif self.isVariable(input_ops[1]):
            convention = "Wx"
        else:
            raise NotImplementedError
        inputValues = [self.getValues(i) for i in input_ops]
        outputValues = self.getValues(op)
        aTranspose = op.node_def.attr['transpose_a'].b
        bTranspose = op.node_def.attr['transpose_b'].b
        a = inputValues[0]
        b = inputValues[1]
        if aTranspose:
            a = np.transpose(a)
        if bTranspose:
            b = np.transpose(b)
        assert (a.shape[0], b.shape[1]) == outputValues.shape
        assert a.shape[1] == b.shape[0]
        ### END getting inputs ###

        ### Generate actual constraints ###
        # Wx = y
        # [W, -I] [x; y] = 0
        # W \in mxn
        # I \in mxm
        # x \in nx1
        # y \in mx1
        if convention == "xW":
            x = a
            W = b
            # take transpose of W and store: from xW = y to W^T x^T = y^T to [W.T, -I] [x^T; y^T] = 0
            A = np.hstack((W.T, -np.eye(W.shape[1])))
            constraint_x = np.vstack((x.T, outputValues.T))
            constraint_b = np.zeros((W.shape[1], 1))
            c = MatrixConstraint(ConstraintType('EQUALITY'),
                                 A=A,
                                 x=constraint_x,
                                 b=constraint_b)
            self.constraints.append(c)
        elif convention == "Wx":
            W = a
            x = b
            # Wx = y -> [W, -I] [x; y] = 0
            A = np.hstack((W, -np.eye(W.shape[0])))
            constraint_x = np.vstack((x, outputValues))
            constraint_b = np.zeros((W.shape[0], 1))
            c = MatrixConstraint(ConstraintType('EQUALITY'),
                                 A=A,
                                 x=constraint_x,
                                 b=constraint_b)
            self.constraints.append(c)
        else:
            print("Whatchyu doin bro??")
            raise NotImplementedError
Ejemplo n.º 18
0
    def set_constraint(self, condensed=False):
        """
        This function assigned input and output variables to each layer.
        The construct the equality matrix constraints that prescribe the behavior of that model
        If there is any RNN layer, variables are built for all rollouts, i.e. n_input = n_output = n_input_1 * n_t
        The output of each layer is connected via the input of the next layer via the activation function.
        If the activation is relu, a reluconstraint is built. If the activation is linear, an identity maping is built.
        The later is somewhat redundant, but more expressive.

        if condensed=True, instead of creating identity mapping for layers with linear activation,
        the output of the layer is directly assigned to the input of the next layer.
        No new variables are created for the next layer inputs.
        """
        n_t = self.n_time
        for i in range(len(self.layers)):
            l, s_in, s_out = self.layers[i], self.input_sizes[
                i], self.output_sizes[i]

            # assign all input output variables.
            # if condensed option, and no activation, output variables of previous layer is assigned to the input of the current layer
            if (condensed) and (i > 0) and (self.activations[i - 1]
                                            == "linear"):
                x_in = self.output_vars[-1]  # assign last layer output
            else:
                x_in = [getNewVariable()
                        for _ in range(s_in * n_t)]  # assign new variables.

            # assign all input output variables.
            x_out = [getNewVariable() for _ in range(s_out * n_t)]
            self.input_vars.append(x_in)
            self.output_vars.append(x_out)

            # setup equality constraints depending on the layer type
            if isinstance(l, Dense):
                self.type.append("Dense")
                A, b = self.dense_constraint(n_t, *l.get_weights())
            elif isinstance(l, SimpleRNN):
                self.type.append("SimpleRNN")
                A, b = self.rnn_constraint(n_t, *l.get_weights())
            else:
                raise (NotImplementedError(
                    "only Dense and SimpleRNN layers are supported."))

            # add constraint to the list of constraints. variables include all input and output variables.
            x = np.array(x_in + x_out)  # concatenate two lists
            self.constraints.append(
                MatrixConstraint(ConstraintType('EQUALITY'), A=A, x=x, b=b))

        # setup activation function equality (for linear) or inequality (for relu) constraints.
        for i in range(len(self.layers) - 1):
            assert len(self.input_vars[i + 1]) == len(self.output_vars[i])
            if self.activations[i] == "relu":
                for v_in, v_out in zip(self.output_vars[i],
                                       self.input_vars[i + 1]):
                    self.constraints.append(
                        ReluConstraint(varin=v_in, varout=v_out))
            elif self.activations[i] == "linear":
                if condensed:
                    pass  # no mapping is necessary
                else:
                    w = np.eye(len(self.output_vars[i]))
                    A = np.hstack((w, -w))
                    b = np.zeros(2 * len(self.output_vars[i]))
                    x = np.array(self.input_vars[i + 1] + self.output_vars[i])
                    self.constraints.append(
                        MatrixConstraint(ConstraintType('EQUALITY'),
                                         A=A,
                                         x=x,
                                         b=b))
            else:
                raise (IOError("Activation %s is not supported" %
                               self.activations[i]))
Ejemplo n.º 19
0
    def convert_to_CNF(self, DNF_complements):
        """
        Converts complements of constraints in DNF to CNF
        using Max.
        Populates self.constraint_complements.
        """
        CNF_complements = []
        n_clauses = len(DNF_complements)
        if n_clauses == 0:
            pass

        elif n_clauses == 1:
            # define new var: Y = 5x + 6y - 5
            Y_definition, Y = self.cnf_conversion_helper(DNF_complements[0])
            # we want Y >= 0
            Y_ineq = Constraint(ConstraintType('GREATER_EQ'),
                                monomials=[Monomial(1, Y)],
                                scalar=0)
            CNF_complements.extend([Y_definition, Y_ineq])

        else:  # nclauses > 1
            # handle first ineq
            Y_def, Y = self.cnf_conversion_helper(DNF_complements[0])
            CNF_complements.append(Y_def)
            # then disjunct all the complements using MaxConstraint
            for c in DNF_complements[1:]:
                # take complement, and turn into >= inequality
                Z_def, Z = self.cnf_conversion_helper(c)
                # begin disjunct train max(Z,Y) >= 0 ...
                Q = self.get_new_var()
                ###########################################################
                # changing max to be represented with Relu
                YmZ = self.get_new_var()
                YmZdef = Constraint('EQUALITY',
                                    monomials=[
                                        Monomial(1, Y),
                                        Monomial(-1, Z),
                                        Monomial(-1, YmZ)
                                    ],
                                    scalar=0)
                RYmZ = self.get_new_var()
                RYmZdef = ReluConstraint(varin=YmZ, varout=RYmZ)
                # Q = relu(Y-Z) + Z
                max_constraint = Constraint('EQUALITY',
                                            monomials=[
                                                Monomial(1, RYmZ),
                                                Monomial(1, Z),
                                                Monomial(-1, Q)
                                            ],
                                            scalar=0)
                ###########################################################
                # max_constraint = MaxConstraint((Y,Z), Q) # version with max
                # CNF_complements.extend([Z_def, max_constraint]) # version with max
                ############################################################
                CNF_complements.extend(
                    [Z_def, YmZdef, RYmZdef, max_constraint])
                Y = Q
            # Q >= 0
            geq0 = Constraint(ConstraintType('GREATER_EQ'),
                              monomials=[Monomial(1, Q)],
                              scalar=0)
            CNF_complements.append(geq0)

        self.constraint_complements = CNF_complements
        return CNF_complements
Ejemplo n.º 20
0
# initial set
x1_init_set = (0.5, 1)
x2_init_set = (-0.5, 0.5)
init_set = {states[0]: x1_init_set, states[1]: x2_init_set}

# build the transition system as an (S, I(S), TR) tuple
ts = TransitionSystem(states=tr.states,
                      initial_set=init_set,
                      transition_relation=tr)

# solver
solver = GurobiPyWrapper()  #MarabouWrapper()

prop_list = []
p1 = Constraint(ConstraintType('GREATER'))
p1.monomials = [Monomial(1, states[0])]
p1.scalar = 0.3
prop_list.append(p1)

p2 = Constraint(ConstraintType('LESS'))
p2.monomials = [Monomial(1, states[0])]
p2.scalar = 1.15
prop_list.append(p2)

# p3 = Constraint(ConstraintType('GREATER'))
# p3.monomials = [Monomial(1, states[1])]
# p3.scalar = -1.1
# prop_list.append(p3)
# #
# p4 = Constraint(ConstraintType('LESS'))
Ejemplo n.º 21
0
theta2d = states[3]


controls = overt_obj_1.control_vars
acceleration_1 = overt_obj_1.output_vars[0]
acceleration_2 = overt_obj_2.output_vars[0]

double_pendulum_dynamics = Dynamics(np.array(states).reshape(-1, 1), np.array(controls).reshape(-1, 1))
next_states = double_pendulum_dynamics.next_states.reshape(4,)

print(states, controls, acceleration_1, acceleration_2, next_states)

dt = 0.01

# x1_next = x1 + dt*u1
c1 = Constraint(ConstraintType('EQUALITY'))
c1.monomials = [Monomial(1, theta1), Monomial(dt, theta1d), Monomial(-1, next_states[0])]
print(c1.monomials)


# x2_next = x2 + dt*u2
c2 = Constraint(ConstraintType('EQUALITY'))
c2.monomials = [Monomial(1, theta2), Monomial(dt, theta2d), Monomial(-1, next_states[1])]
print(c2.monomials)

# u1_next = u1 + dt*a1
c3 = Constraint(ConstraintType('EQUALITY'))
c3.monomials = [Monomial(1, theta1d), Monomial(dt, acceleration_1), Monomial(-1, next_states[2])]
print(c3.monomials)

# u2_next = u2 + dt*a2