Esempio n. 1
0
 def additionConstraint(self, op):
     """
     Function to generate equations corresponding to addition
     Arguments:
         op: (tf.op) representing add operation
     """
     input_ops = [i.op for i in op.inputs]
     assert len(input_ops) == 2
     input1 = input_ops[0]
     input2 = input_ops[1]
     assert self.isVariable(input1)
     if self.isVariable(input2):
         outputVars = self.getValues(op).reshape(-1, 1)
         input1Vars = self.getValues(input1).reshape(-1, 1)
         input2Vars = self.getValues(input2).reshape(-1, 1)
         assert len(input1Vars) == len(input2Vars)
         assert len(outputVars) == len(input1Vars)
         # x + y = z   -->   [I, I, -I] [x; y; z] = 0
         A = np.hstack((np.eye(len(input1Vars)), np.eye(len(input2Vars)),
                        -np.eye(len(outputVars))))
         x_constraint = np.vstack((input1Vars, input2Vars, outputVars))
         b_constraint = np.zeros((len(outputVars), 1))
         c = MatrixConstraint(ConstraintType('EQUALITY'),
                              A=A,
                              x=x_constraint,
                              b=b_constraint)
         self.constraints.append(c)
     else:
         self.biasAddConstraint(op)
Esempio n. 2
0
    def set_input_output(self, condensed=False):
        # find input variables of the model
        input_idx = [l.input.name
                     for l in self.layers].index(self.model.input.name)
        self.model_input_vars = self.input_vars[input_idx].copy()

        # find output variables of the model
        output_layer_idx = [l.output.name
                            for l in self.layers].index(self.model.output.name)
        output_layer_activ_func = self.activations[output_layer_idx]
        if (condensed) and output_layer_activ_func == "linear":
            self.model_output_vars = self.output_vars[output_layer_idx].copy()
            # no additional constraint
        else:
            output_layer_before_activ_func = self.output_vars[output_layer_idx]
            output_layer_after_active_func = [
                getNewVariable() for _ in self.output_vars[output_layer_idx]
            ]
            self.model_output_vars = output_layer_after_active_func
            if output_layer_activ_func == "linear":
                w = np.eye(len(output_layer_before_activ_func))
                A = np.hstack((w, -w))
                b = np.zeros(2 * len(output_layer_before_activ_func))
                x = np.array(output_layer_before_activ_func +
                             output_layer_after_active_func)
                self.constraints.append(
                    MatrixConstraint(ConstraintType('EQUALITY'), A=A, x=x,
                                     b=b))
            else:
                for v_in, v_out in zip(output_layer_before_activ_func,
                                       output_layer_after_active_func):
                    self.constraints.append(
                        ReluConstraint(varin=v_in, varout=v_out))
Esempio n. 3
0
    def biasAddConstraint(self, op):
        """
        Function to generate equations corresponding to bias addition
        Arguments:
            op: (tf.op) representing bias add operation
        """
        ### Get variables and constants of inputs ###
        input_ops = [i.op for i in op.inputs]
        assert len(input_ops) == 2
        inputValues = [self.getValues(i) for i in input_ops]
        outputValues = self.getValues(op)
        inputVars = inputValues[0].reshape(-1, 1)
        inputConsts = inputValues[1].reshape(-1, 1)
        # broadcasting
        inputConsts = np.tile(inputConsts, len(inputVars) // len(inputConsts))
        outputVars = outputValues.reshape(-1, 1)
        assert len(inputVars) == len(outputVars) and len(outputVars) == len(
            inputConsts)
        ### END getting inputs ###

        # x + b = y  --> x - y = -b
        # [I -I] [x; y] = -b
        # (nx2n)x(2nx1) = (nx1)
        n = inputVars.shape[0]
        A = np.hstack((np.eye(n), -np.eye(n)))
        x = np.vstack((inputVars, outputVars))
        c = MatrixConstraint(ConstraintType('EQUALITY'),
                             A=A,
                             x=x,
                             b=-inputConsts)
        self.constraints.append(c)
Esempio n. 4
0
 def convert_MatrixConstraint(self, c: MatrixConstraint):
     """
     Convert a constraint of type MatrixConstraint.
     Ax = b 
     to a list of 1D constraints in smtlib
     """
     constraints = []
     m = c.A.shape[0]
     c.b = c.b.reshape(-1, 1)
     for row in range(m):
         constraints += self.convert_constraint_helper(
             c.A[row, :], c.x.flatten(), c.type.__repr__(), c.b[row][0])
     return constraints
Esempio n. 5
0
def matrix_equality_constraint(varsin, varsout):
    """
    preconditions: 
    varsin, varsout are vertical vectors (nx1), of type 2D np array or similar
    """
    # x = y (want in form Az = b)
    # z := [x; y]
    # A = [I, -I]
    # b = 0
    # [I, -I][x; y] = 0  -> x - y = 0
    A = np.hstack((np.eye(len(varsin)), -np.eye(len(varsout))))
    z = np.vstack((varsin, varsout))
    b = np.zeros((A.shape[0], 1))
    return MatrixConstraint(ConstraintType('EQUALITY'), A=A, x=z, b=b)
Esempio n. 6
0
    def mulConstraint(self, op):
        """
        Function to generate equations corresponding to elementwise matrix multiplication 
        Arguments:
            op: (tf.op) representing elementwise multiplication operation
        TODO: this is unecessarily verbose
        """
        ### Get variables and constants of inputs ###
        input_ops = [i.op for i in op.inputs]
        inputValues = [self.getValues(i) for i in input_ops]
        outputValues = self.getValues(op)
        assert not (self.isVariable(input_ops[0])
                    and self.isVariable(input_ops[1]))
        if self.isVariable(input_ops[0]):
            #convention = "xW"
            x = inputValues[0]
            W = inputValues[1]
        elif self.isVariable(input_ops[1]):
            #convention = "Wx"
            W = inputValues[0]
            x = inputValues[1]
        else:
            print("Multiplying two constants not supported")
            import pdb
            pdb.set_trace()
            raise NotImplementedError
        W = W.reshape(-1)
        x = x.reshape(-1)
        if x.shape != W.shape:
            # broadcast
            W = np.tile(W, len(x) // len(W))
        assert x.shape == W.shape
        y = outputValues.reshape(-1)
        assert x.shape == y.shape
        ### END getting inputs ###

        ### Generate actual equations ###
        # w^T x = y
        # [w^T -I] [x; y] = 0
        #  1xn 1x1
        A = np.hstack((W.T, -np.eye(1)))
        x_constraint = np.vstack((x, y))
        b_constraint = np.zeros((1, 1))
        c = MatrixConstraint(ConstraintType('EQUALITY'),
                             A=A,
                             x=x_constraint,
                             b=b_constraint)
        self.constraints.append(c)
def feasible_problem():
    c1 = Constraint('EQUALITY',
                    monomials=[Monomial(1., "theta@0"),
                               Monomial(-2., "x@1")],
                    scalar=1.3)
    c2 = MatrixConstraint('LESS_EQ',
                          A=np.random.rand(3, 3),
                          x=["x", "theta", "x2"],
                          b=np.random.rand(3, ))
    # NOTE: FOR GUROBI INTERFACE, variable stringnames in x must be in a list NOT a numpy array, inside MatrixConstraint
    c3 = MaxConstraint(varsin=['t', 'b'], varout="theta")
    c4 = ReluConstraint(varin="bob", varout="alice")

    solver = GurobiPyWrapper()

    solver.assert_init({'theta': [7, 9], 'bob': [-10, 1]})

    solver.assert_constraints([c1, c2, c3, c4])

    result, vals, stats = solver.check_sat()
Esempio n. 8
0
    def set_constraint(self, condensed=False):
        """
        This function assigned input and output variables to each layer.
        The construct the equality matrix constraints that prescribe the behavior of that model
        If there is any RNN layer, variables are built for all rollouts, i.e. n_input = n_output = n_input_1 * n_t
        The output of each layer is connected via the input of the next layer via the activation function.
        If the activation is relu, a reluconstraint is built. If the activation is linear, an identity maping is built.
        The later is somewhat redundant, but more expressive.

        if condensed=True, instead of creating identity mapping for layers with linear activation,
        the output of the layer is directly assigned to the input of the next layer.
        No new variables are created for the next layer inputs.
        """
        n_t = self.n_time
        for i in range(len(self.layers)):
            l, s_in, s_out = self.layers[i], self.input_sizes[
                i], self.output_sizes[i]

            # assign all input output variables.
            # if condensed option, and no activation, output variables of previous layer is assigned to the input of the current layer
            if (condensed) and (i > 0) and (self.activations[i - 1]
                                            == "linear"):
                x_in = self.output_vars[-1]  # assign last layer output
            else:
                x_in = [getNewVariable()
                        for _ in range(s_in * n_t)]  # assign new variables.

            # assign all input output variables.
            x_out = [getNewVariable() for _ in range(s_out * n_t)]
            self.input_vars.append(x_in)
            self.output_vars.append(x_out)

            # setup equality constraints depending on the layer type
            if isinstance(l, Dense):
                self.type.append("Dense")
                A, b = self.dense_constraint(n_t, *l.get_weights())
            elif isinstance(l, SimpleRNN):
                self.type.append("SimpleRNN")
                A, b = self.rnn_constraint(n_t, *l.get_weights())
            else:
                raise (NotImplementedError(
                    "only Dense and SimpleRNN layers are supported."))

            # add constraint to the list of constraints. variables include all input and output variables.
            x = np.array(x_in + x_out)  # concatenate two lists
            self.constraints.append(
                MatrixConstraint(ConstraintType('EQUALITY'), A=A, x=x, b=b))

        # setup activation function equality (for linear) or inequality (for relu) constraints.
        for i in range(len(self.layers) - 1):
            assert len(self.input_vars[i + 1]) == len(self.output_vars[i])
            if self.activations[i] == "relu":
                for v_in, v_out in zip(self.output_vars[i],
                                       self.input_vars[i + 1]):
                    self.constraints.append(
                        ReluConstraint(varin=v_in, varout=v_out))
            elif self.activations[i] == "linear":
                if condensed:
                    pass  # no mapping is necessary
                else:
                    w = np.eye(len(self.output_vars[i]))
                    A = np.hstack((w, -w))
                    b = np.zeros(2 * len(self.output_vars[i]))
                    x = np.array(self.input_vars[i + 1] + self.output_vars[i])
                    self.constraints.append(
                        MatrixConstraint(ConstraintType('EQUALITY'),
                                         A=A,
                                         x=x,
                                         b=b))
            else:
                raise (IOError("Activation %s is not supported" %
                               self.activations[i]))
Esempio n. 9
0
    def matMulConstraint(self, op):
        """
        Function to generate constraints corresponding to matrix multiplication
        Arguments:
            op: (tf.op) representing matrix multiplication operation
        """

        ### Get variables and constants of inputs ###
        input_ops = [i.op for i in op.inputs]
        if self.isVariable(input_ops[0]):
            convention = "xW"
        elif self.isVariable(input_ops[1]):
            convention = "Wx"
        else:
            raise NotImplementedError
        inputValues = [self.getValues(i) for i in input_ops]
        outputValues = self.getValues(op)
        aTranspose = op.node_def.attr['transpose_a'].b
        bTranspose = op.node_def.attr['transpose_b'].b
        a = inputValues[0]
        b = inputValues[1]
        if aTranspose:
            a = np.transpose(a)
        if bTranspose:
            b = np.transpose(b)
        assert (a.shape[0], b.shape[1]) == outputValues.shape
        assert a.shape[1] == b.shape[0]
        ### END getting inputs ###

        ### Generate actual constraints ###
        # Wx = y
        # [W, -I] [x; y] = 0
        # W \in mxn
        # I \in mxm
        # x \in nx1
        # y \in mx1
        if convention == "xW":
            x = a
            W = b
            # take transpose of W and store: from xW = y to W^T x^T = y^T to [W.T, -I] [x^T; y^T] = 0
            A = np.hstack((W.T, -np.eye(W.shape[1])))
            constraint_x = np.vstack((x.T, outputValues.T))
            constraint_b = np.zeros((W.shape[1], 1))
            c = MatrixConstraint(ConstraintType('EQUALITY'),
                                 A=A,
                                 x=constraint_x,
                                 b=constraint_b)
            self.constraints.append(c)
        elif convention == "Wx":
            W = a
            x = b
            # Wx = y -> [W, -I] [x; y] = 0
            A = np.hstack((W, -np.eye(W.shape[0])))
            constraint_x = np.vstack((x, outputValues))
            constraint_b = np.zeros((W.shape[0], 1))
            c = MatrixConstraint(ConstraintType('EQUALITY'),
                                 A=A,
                                 x=constraint_x,
                                 b=constraint_b)
            self.constraints.append(c)
        else:
            print("Whatchyu doin bro??")
            raise NotImplementedError
Esempio n. 10
0
print(f.define_atom("A", "(< y 5)"))

print(f.negate("(< y 5)"))

c1 = Constraint('LESS_EQ', [Monomial(-6, "x"), Monomial(5, "y")], -2)
print(f.convert_Constraint(c1))

c2 = MaxConstraint(['v1', 'v2'], 'v3')
print(f.convert_MaxConstraint(c2))

c3 = ReluConstraint('p', 'q')
print(f.convert_ReluConstraint(c3))

c4 = MatrixConstraint('EQUALITY',
                      A=np.random.rand(2, 2),
                      x=np.array([['x'], ['y']], dtype='object'),
                      b=np.zeros((2, 1)))
print(f.convert_MatrixConstraint(c4))

c5 = NLConstraint('EQUALITY', "v1", "sin", "x")
print(f.convert_NLConstraint(c5))

print(f.declare_list([c1, c1, c2, c2, c3, c3, c4]))

print('\n'.join(f.declare_conjunction([c1, c2, c3, c4])[0]))


def linear_plant_test():
    """
    linear plant test
    # phi