示例#1
0
    def mulEquations(self, idx, op):
        """
        Function to generate equations corresponding to mul
        Arguments:
            op: (tf.op) representing  mul operation
        """

        input_ops = [i.op for i in op.inputs]
        assert len(input_ops) == 2
        input1 = input_ops[0]
        input2 = input_ops[1]
        assert (self.isVariable(input1) and not self.isVariable(input2)) or (
            not self.isVariable(input1) and self.isVariable(input2))
        curVars = self.getValues(idx, op).reshape(-1)
        prevVars1 = self.getValues(idx, input1).reshape(-1)
        prevVars2 = self.getValues(idx, input2).reshape(-1)

        assert len(prevVars1) == len(prevVars2)
        assert len(curVars) == len(prevVars1)
        if self.isVariable(input1):
            for i in range(len(curVars)):
                e = MarabouUtils.Equation()
                e.addAddend(prevVars2[i], prevVars1[i])
                e.addAddend(-1, curVars[i])
                e.setScalar(0.0)
                self.addEquation(e)
        else:  #self.isVariable(input2)
            for i in range(len(curVars)):
                e = MarabouUtils.Equation()
                e.addAddend(prevVars1[i], prevVars2[i])
                e.addAddend(-1, curVars[i])
                e.setScalar(0.0)
                self.addEquation(e)
示例#2
0
    def mulEquations(self, op):
        """Function to generate equations corresponding to multiply and divide operations

        Args:
            op: (tf.op) representing an element-wise multiply or divide operation

        :meta private:
        """
        # Get inputs and outputs
        assert len(op.inputs) == 2
        input1, input1_isVar = self.getValues(op.inputs[0].op)
        input2, input2_isVar = self.getValues(op.inputs[1].op)

        # For linear equations, both inputs cannot be variables
        assert not input1_isVar or not input2_isVar

        # If multiplying by 1, no need for new equations
        if input1_isVar and np.all(input2 == 1.0):
            self.varMap[op] = input1
            return
        if input2_isVar and np.all(input1 == 1.0):
            self.varMap[op] = input2
            return

        # Broadcast and flatten. Assert that lengths are all the same
        input1, input2 = np.broadcast_arrays(input1, input2)
        outputVars = self.makeNewVariables(op)
        scalars, sgnVar, sgnScalar = self.getScalars(op, outputVars)
        input1 = input1.flatten()
        input2 = input2.flatten()
        outputVars = outputVars.flatten()
        scalars = scalars.flatten()
        assert len(input1) == len(input2)
        assert len(outputVars) == len(input1)

        # Handle divide by negating power
        power = 1.0
        if op.node_def.op in ["RealDiv"]:
            power = -1.0

        # Equations
        if input1_isVar:
            for i in range(len(outputVars)):
                e = MarabouUtils.Equation()
                e.addAddend(sgnVar * input2[i]**power, input1[i])
                e.addAddend(-1, outputVars[i])
                e.setScalar(-sgnScalar * scalars[i])
                self.addEquation(e)
        else:
            if power == -1.0:
                raise RuntimeError(
                    "Dividing a constant by a variable is not allowed")
            for i in range(len(outputVars)):
                e = MarabouUtils.Equation()
                e.addAddend(sgnVar * input1[i], input2[i])
                e.addAddend(-1, outputVars[i])
                e.setScalar(-sgnScalar * scalars[i])
                self.addEquation(e)
示例#3
0
文件: tot_net.py 项目: grese/scad_tot
    def set_expected_category(
            self,
            y_index: int,
            allowed_misclassifications: AllowedMisclassifications = None):
        '''Sets up the Marabou output query

        Args:
            y_index (int): The index of the expected label
            allowed_misclassifications (AllowedMisclassifications, optional): Additional labels which are allowed. Defaults to None.
        '''
        n_outputs = self.num_outputs
        assert (y_index < n_outputs)
        allowed_classes = tuple(
        ) if allowed_misclassifications is None else allowed_misclassifications.get_allowed_classes(
            y=y_index)
        other_ys = [
            y for y in range(n_outputs)
            if (y != y_index) and (y not in allowed_classes)
        ]
        for other_y in other_ys:
            eq = MarabouUtils.Equation(EquationType=MarabouCore.Equation.LE)
            eq.addAddend(1, self.get_output_var(other_y))
            eq.addAddend(-1, self.get_output_var(y_index))
            eq.setScalar(0)
            self.network.addEquation(eq)
 def createVarEpsilonEquation(self, var, epsilon, val):
     # var = val + epsilon => var - epsilon = val
     e = MarabouUtils.Equation()
     e.addAddend(1, var)
     e.addAddend(-1, epsilon)
     e.setScalar(val)
     self.addEquation(e)
示例#5
0
    def processBiasAddRelations(self):
        """
        Either add an equation representing a bias add,
        Or eliminate one of the two variables in every other relation
        """
        biasAddUpdates = dict()
        participations = [rel[0] for rel in self.biasAddRelations] + \
                            [rel[1] for rel in self.biasAddRelations]
        for (x, xprime, c) in self.biasAddRelations:
            # x = xprime + c
            # replace x only if it does not occur anywhere else in the system
            if self.lowerBoundExists(x) or self.upperBoundExists(x) or \
                    self.participatesInPLConstraint(x) or \
                    len([p for p in participations if p == x]) > 1:
                e = MarabouUtils.Equation()
                e.addAddend(1.0, x)
                e.addAddend(-1.0, xprime)
                e.setScalar(c)
                self.addEquation(e)
            else:
                biasAddUpdates[x] = (xprime, c)
                self.setLowerBound(x, 0.0)
                self.setUpperBound(x, 0.0)

        for equ in self.equList:
            participating = equ.getParticipatingVariables()
            for x in participating:
                if x in biasAddUpdates:  # if a variable to remove is part of this equation
                    xprime, c = biasAddUpdates[x]
                    equ.replaceVariable(x, xprime, c)
示例#6
0
    def matMulEquations(self, op):
        """
        Function to generate equations corresponding to matrix multiplication
        Arguments:
            op: (tf.op) representing matrix multiplication operation
        """

        ### Get variables and constants of inputs ###
        input_ops = [i.op for i in op.inputs]
        prevValues = [self.getValues(i) for i in input_ops]
        curValues = self.getValues(op)
        aTranspose = op.node_def.attr['transpose_a'].b
        bTranspose = op.node_def.attr['transpose_b'].b
        A = prevValues[0]
        B = prevValues[1]
        if aTranspose:
            A = np.transpose(A)
        if bTranspose:
            B = np.transpose(B)
        assert (A.shape[0], B.shape[1]) == curValues.shape
        assert A.shape[1] == B.shape[0]
        m, n = curValues.shape
        p = A.shape[1]
        ### END getting inputs ###

        ### Generate actual equations ###
        for i in range(m):
            for j in range(n):
                e = []
                e = MarabouUtils.Equation()
                for k in range(p):
                    e.addAddend(B[k][j], A[i][k])
                e.addAddend(-1, curValues[i][j])
                e.setScalar(0.0)
                self.addEquation(e)
    def mulEquations2(self, op):
        """
        Function to generate equations corresponding to mul
        Arguments:
            op: (tf.op) representing  mul operation
        """

        ### Get variables and constants of inputs ###
        input_ops = [i.op for i in op.inputs]
        assert len(input_ops) == 2
        prevValues = [self.getValues(i) for i in input_ops]
        curValues = self.getValues(op)

        prevVars = prevValues[0].reshape(-1)
        prevConsts = prevValues[1].reshape(-1)
        # broadcasting
        # prevConsts = np.tile(prevConsts, len(prevVars) // len(prevConsts))
        curVars = curValues.reshape(-1)
        assert len(prevVars) == len(curVars) and len(curVars) == len(
            prevConsts)
        ### END getting inputs ###
        for i in range(len(prevVars)):
            e = MarabouUtils.Equation()
            e.addAddend(prevConsts[i], prevVars[i])
            e.addAddend(-1, curVars[i])
            e.setScalar(0.0)
            self.addEquation(e)
    def sparseTensorDenseMatMulEquations(self, op):
        """
        Function to generate equations corresponding sparseTensorDenseMatMul
        Arguments:
            op: (tf.op) representing sparseTensorDenseMatMul operation
        """

        ### Get variables and constants of inputs ###
        input_ops = [i.op for i in op.inputs]
        prevValues = [self.getValues(i) for i in input_ops]
        curValues = self.getValues(op)
        a_indices = prevValues[0]
        a_values = prevValues[1]
        a_shape = prevValues[2]
        b = prevValues[3]
        # assert (A.shape[0], B.shape[1]) == curValues.shape
        # assert A.shape[1] == B.shape[0]
        ### END getting inputs ###

        for i in range(len(curValues)):
            e = MarabouUtils.Equation()
            e.addAddend(a_values[0], b[0][i])

            e.addAddend(-1, curValues[0][i])
            e.setScalar(0.0)
            self.addEquation(e)
示例#9
0
    def evaluateSingleOutput(self, epsilon, network, prediction, output):
        outputVars = network.outputVars[0]
        abs_epsilons = list()
        for k in network.matMulLayers.keys():
            n, m = network.matMulLayers[k]['vals'].shape
            print(n, m)
            for i in range(n):
                for j in range(m):
                    if j in [prediction, output]:
                        epsilon_var = network.epsilons[i][j]
                        network.setUpperBound(epsilon_var, epsilon)
                        network.setLowerBound(epsilon_var, -epsilon)
                        abs_epsilon_var = self.epsilonABS(network, epsilon_var)
                        abs_epsilons.append(abs_epsilon_var)
                    else:
                        epsilon_var = network.epsilons[i][j]
                        network.setUpperBound(epsilon_var, 0)
                        network.setLowerBound(epsilon_var, 0)

        e = MarabouUtils.Equation(
            EquationType=MarabouUtils.MarabouCore.Equation.LE)
        for i in range(len(abs_epsilons)):
            e.addAddend(1, abs_epsilons[i])
        e.setScalar(epsilon)
        network.addEquation(e)

        MarabouUtils.addInequality(
            network, [outputVars[prediction], outputVars[output]], [1, -1], 0)
        return network.solve(verbose=True)
    def mulEquations(self, node, makeEquations):
        nodeName = node.output[0]

        # Get the inputs
        inputName1, inputName2 = node.input
        shape1 = self.shapeMap[inputName1]
        shape2 = self.shapeMap[inputName2]

        # Get the broadcasted shape
        outShape = shape1
        self.shapeMap[nodeName] = outShape
        if not makeEquations:
            return

        multiple = self.constantMap[inputName2]
        input1 = self.varMap[inputName1]
        outputVariables = self.makeNewVariables(nodeName)
        input1 = input1.reshape(-1)
        outputVariables = outputVariables.reshape(-1)

        for i in range(len(input1)):
            e = MarabouUtils.Equation()
            e.addAddend(multiple, input1[i])
            e.addAddend(-1, outputVariables[i])
            e.setScalar(0.0)
            self.addEquation(e)
        return
示例#11
0
def input50p(net, pa, emptyArray,env):
    for i in range(num_of_new_jobs):
        env.step(5)
    print(("Job queue occupation is {}/5\nJob backlog occupation is {}/60 ").format(
        len(env.job_slot.slot), env.job_backlog.curr_size))
    jobLog = env.observe()
    resource_cols = np.concatenate(
        [emptyArray[:, 0:10], emptyArray[:, 60:70]]).ravel()
    varNumArr = np.concatenate(emptyArray).ravel()
    LowerBoundList = np.concatenate(jobLog).ravel()
    UpperBoundList = np.concatenate(jobLog).ravel()
    for i in np.nditer(resource_cols[:100]):
        LowerBoundList[i] = 0.1
    for i in np.nditer(resource_cols[:100]):
        UpperBoundList[i] = 1
    for i in np.nditer(resource_cols[200:300]):
        LowerBoundList[i] = 0.1
    for i in np.nditer(resource_cols[200:300]):
        UpperBoundList[i] = 1
    for var in zip(varNumArr, LowerBoundList, UpperBoundList):
        net.setLowerBound(var[0], var[1])
        net.setUpperBound(var[0], var[2])

    for outputVar, i in enumerate(net.outputVars[0][0:5]):
        eq = MarabouUtils.Equation(EquationType=MarabouCore.Equation.LE)
        eq.addAddend(-1, net.outputVars[0][5])
        eq.addAddend(1, i)
        eq.setScalar(0)
        net.addEquation(eq)

    return jobLog
 def subEquations(self, op):
     """
     Function to generate equations corresponding to subtraction
     Arguments:
         op: (tf.op) representing sub operation
     """
     input_ops = [i.op for i in op.inputs]
     assert len(input_ops) == 2
     input1 = input_ops[0]
     input2 = input_ops[1]
     assert self.isVariable(input1)
     if self.isVariable(input2):
         curVars = self.getValues(op).reshape(-1)
         prevVars1 = self.getValues(input1).reshape(-1)
         prevVars2 = self.getValues(input2).reshape(-1)
         assert len(prevVars1) == len(prevVars2)
         assert len(curVars) == len(prevVars1)
         for i in range(len(curVars)):
             e = MarabouUtils.Equation()
             e.addAddend(1, prevVars1[i])
             e.addAddend(-1, prevVars2[i])
             e.addAddend(-1, curVars[i])
             e.setScalar(0.0)
             self.addEquation(e)
     else:
         self.biasAddEquations(op)
    def conv2DEquations(self, op):
        """
        Function to generate equations corresponding to 2D convolution operation
        Arguments:
            op: (tf.op) representing conv2D operation
        """

        ### Get variables and constants of inputs ###
        input_ops = [i.op for i in op.inputs]
        prevValues = [self.getValues(i) for i in input_ops]
        curValues = self.getValues(op)
        padding = op.node_def.attr['padding'].s.decode()
        strides = list(op.node_def.attr['strides'].list.i)
        prevValues, prevConsts = prevValues[0], prevValues[1]
        _, out_height, out_width, out_channels = curValues.shape
        _, in_height, in_width, in_channels = prevValues.shape
        filter_height, filter_width, filter_channels, num_filters = prevConsts.shape
        assert filter_channels == in_channels
        assert out_channels == num_filters
        # Use padding to determine top and left offsets
        # See https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/kernels/quantized_conv_ops.cc#L51
        if padding == 'SAME':
            pad_top = (
                (out_height - 1) * strides[1] + filter_height - in_height) // 2
            pad_left = (
                (out_width - 1) * strides[2] + filter_width - in_width) // 2
        elif padding == 'VALID':
            pad_top = ((out_height - 1) * strides[1] + filter_height -
                       in_height + 1) // 2
            pad_left = ((out_width - 1) * strides[2] + filter_width -
                        in_width + 1) // 2
        else:
            raise NotImplementedError
        ### END getting inputs ###

        ### Generate actual equations ###
        # There is one equation for every output variable
        for i in range(out_height):
            for j in range(out_width):
                for k in range(out_channels
                               ):  # Out_channel corresponds to filter number
                    e = MarabouUtils.Equation()
                    # The equation convolves the filter with the specified input region
                    # Iterate over the filter
                    for di in range(filter_height):
                        for dj in range(filter_width):
                            for dk in range(filter_channels):

                                h_ind = int(strides[1] * i + di - pad_top)
                                w_ind = int(strides[2] * j + dj - pad_left)
                                if h_ind < in_height and h_ind >= 0 and w_ind < in_width and w_ind >= 0:
                                    var = prevValues[0][h_ind][w_ind][dk]
                                    c = prevConsts[di][dj][dk][k]
                                    e.addAddend(c, var)

                    # Add output variable
                    e.addAddend(-1, curValues[0][i][j][k])
                    e.setScalar(0.0)
                    self.addEquation(e)
示例#14
0
 def set_expected_category(self, y_index, allowed_misclassifications=[]):
     n_outputs = self.get_num_outputs()
     assert(y_index < n_outputs)
     other_ys = [y for y in range(n_outputs) if (y != y_index) and (y not in allowed_misclassifications)]
     for other_y in other_ys:
         eq = MarabouUtils.Equation(EquationType=MarabouCore.Equation.LE)
         eq.addAddend(1, self.get_output_var(other_y))
         eq.addAddend(-1, self.get_output_var(y_index))
         eq.setScalar(0)
         self.network.addEquation(eq)
    def gemmEquations(self, node, makeEquations):
        """
        Function to generate equations corresponding to Gemm (general matrix multiplication)
        Arguments:
            node: (node) representing the Gemm operation
            makeEquations: (bool) True if we need to create new variables and write Marabou equations
        """
        nodeName = node.output[0]

        # Get inputs
        inputName1, inputName2, inputName3 = node.input
        shape1 = self.shapeMap[inputName1]
        shape2 = self.shapeMap[inputName2]
        shape3 = self.shapeMap[inputName3]
        input1 = self.varMap[inputName1]
        input2 = self.constantMap[inputName2]
        input3 = self.constantMap[inputName3]

        self.shapeMap[nodeName] = self.shapeMap[inputName3]
        if makeEquations:

            # Pad shape if needed
            if len(shape1) == 1:
                shape1 = [1] + shape1
                input1 = input1.reshape(shape1)
            elif shape1[1] == 1:
                shape1 = shape1[::-1]
                input1 = input1.reshape(shape1)
            if len(shape3) == 1:
                shape3 = [1] + shape3
                input3 = input3.reshape(shape3)
            if shape1[0] != shape3[0]:
                shape3 = shape3[::-1]
                input3 = input3.reshape(shape3)

            # Assume that first input is variables, second is Matrix for MatMul, and third is bias addition
            assert shape1[-1] == shape2[0]
            assert shape1[0] == shape3[0]
            assert shape2[1] == shape3[1]

            # Create new variables
            self.shapeMap[nodeName] = self.shapeMap[node.input[2]]
            outputVariables = self.makeNewVariables(nodeName)
            outputVariables = outputVariables.reshape(shape3)
            # Generate equations
            for i in range(shape1[0]):
                for j in range(shape2[1]):
                    e = MarabouUtils.Equation()
                    for k in range(shape1[1]):
                        e.addAddend(input2[k][j], input1[i][k])

                    # Put output variable as the last addend last
                    e.addAddend(-1, outputVariables[i][j])
                    e.setScalar(-input3[i][j])
                    self.addEquation(e)
    def batchNorm(self, node, makeEquations):
        """Function to generate equations for a BatchNormalization

        Args:
            node (node): ONNX node representing the BatchNormalization operation

        :meta private
        """

        nodeName = node.output[0]
        inputName = node.input[0]
        self.shapeMap[nodeName] = self.shapeMap[inputName]

        # Get attributes
        epsilon = None
        for attr in node.attribute:
            if attr.name == "epsilon":
                epsilon = get_attribute_value(attr)

        # Get inputs
        scales = self.constantMap[node.input[1]].reshape(-1)
        biases = self.constantMap[node.input[2]].reshape(-1)
        input_means = self.constantMap[node.input[3]].reshape(-1)
        input_variances = self.constantMap[node.input[4]].reshape(-1)

        if not makeEquations:
            return

        numChannels = len(scales)

        # Get variables
        inputVars = self.varMap[inputName].reshape(numChannels, -1)
        outputVars = self.makeNewVariables(nodeName).reshape(numChannels, -1)
        assert (inputVars.shape == outputVars.shape)

        numInputs = inputVars.shape[1]

        for i in range(numChannels):
            for j in range(numInputs):
                # Add equation
                # To know this computation,
                # refer to https://github.com/onnx/onnx/blob/master/docs/Operators.md#batchnormalization.
                e = MarabouUtils.Equation()
                e.addAddend(-1, outputVars[i][j])
                e.addAddend(
                    1 / np.sqrt(input_variances[i] + epsilon) * scales[i],
                    inputVars[i][j])
                e.setScalar(input_means[i] /
                            np.sqrt(input_variances[i] + epsilon) * scales[i] -
                            biases[i])
                self.addEquation(e)
    def evaluateEpsilon(self, epsilon, network):
        # for outputNum in [0, 1]:
        outputVars = network.outputVars
        abs_epsilons = list()
        n, m = network.epsilons.shape
        print(n, m)
        for i in range(n):
            for j in range(m):
                epsilon_var = network.epsilons[i][j]
                network.setUpperBound(epsilon_var, epsilon)
                network.setLowerBound(epsilon_var, -epsilon)
                abs_epsilon_var = self.epsilonABS(network, epsilon_var)
                abs_epsilons.append(abs_epsilon_var)

        e = MarabouUtils.Equation(
            EquationType=MarabouUtils.MarabouCore.Equation.LE)
        for i in range(len(abs_epsilons)):
            e.addAddend(1, abs_epsilons[i])
        e.setScalar(epsilon)
        network.addEquation(e)
        for i in range(outputVars.shape[0]):
            MarabouUtils.addInequality(network,
                                       [outputVars[i][0], outputVars[i][2]],
                                       [1, -1], self.correct_diff)
            MarabouUtils.addInequality(network,
                                       [outputVars[i][0], outputVars[i][3]],
                                       [1, -1], self.correct_diff)
            MarabouUtils.addInequality(network,
                                       [outputVars[i][0], outputVars[i][4]],
                                       [1, -1], self.correct_diff)
            MarabouUtils.addInequality(network,
                                       [outputVars[i][1], outputVars[i][2]],
                                       [1, -1], self.correct_diff)
            MarabouUtils.addInequality(network,
                                       [outputVars[i][1], outputVars[i][3]],
                                       [1, -1], self.correct_diff)
            MarabouUtils.addInequality(network,
                                       [outputVars[i][1], outputVars[i][4]],
                                       [1, -1], self.correct_diff)
            # MarabouUtils.addInequality(network, [outputVars[i][outputNum], outputVars[i][2]], [1, -1], self.correct_diff)
            # MarabouUtils.addInequality(network, [outputVars[i][outputNum], outputVars[i][3]], [1, -1], self.correct_diff)
            # MarabouUtils.addInequality(network, [outputVars[i][outputNum], outputVars[i][4]], [1, -1], self.correct_diff)
        vals = network.solve(verbose=True)
        if vals[0]:
            return sat, vals
        else:
            return unsat, vals
示例#18
0
    def matMulEquations(self, op):
        """Function to generate equations corresponding to matrix multiplication

        Args:
            op: (tf.op) representing matrix multiplication operation

        :meta private:
        """
        # Get variables and constants of inputs
        assert len(op.inputs) == 2
        A, A_isVar = self.getValues(op.inputs[0].op)
        B, B_isVar = self.getValues(op.inputs[1].op)

        # For linear equations, both inputs cannot be variables
        assert not A_isVar or not B_isVar

        # Handle transpose attributes
        aTranspose = op.node_def.attr['transpose_a'].b
        bTranspose = op.node_def.attr['transpose_b'].b
        if aTranspose:
            A = np.transpose(A)
        if bTranspose:
            B = np.transpose(B)

        # Get output variables and scalar values (if matmul is the input to an addition operation)
        outputVars = self.makeNewVariables(op)
        scalars, sgnVar, sgnScalar = self.getScalars(op, outputVars)

        # Make sure shapes are valid
        assert (A.shape[0], B.shape[1]) == outputVars.shape
        assert A.shape[1] == B.shape[0]
        m, n = outputVars.shape
        p = A.shape[1]

        # Generate equations
        for i in range(m):
            for j in range(n):
                e = MarabouUtils.Equation()
                for k in range(p):
                    # Make sure addend is added with weight, the variable number
                    if A_isVar:
                        e.addAddend(B[k][j] * sgnVar, A[i][k])
                    else:
                        e.addAddend(A[i][k] * sgnVar, B[k][j])
                e.addAddend(-1, outputVars[i][j])
                e.setScalar(-sgnScalar * scalars[i][j])
                self.addEquation(e)
示例#19
0
    def addInequality(self, vars, coeffs, scalar):
        """Function to add inequality constraint to network

        .. math::
            \sum_i vars_i * coeffs_i \le scalar

        Args:
            vars (list of int): Variable numbers
            coeffs (list of float): Coefficients
            scalar (float): Right hand side constant of inequality
        """
        assert len(vars) == len(coeffs)
        e = MarabouUtils.Equation(MarabouCore.Equation.LE)
        for i in range(len(vars)):
            e.addAddend(coeffs[i], vars[i])
        e.setScalar(scalar)
        self.addEquation(e)
示例#20
0
def test(net, pa, emptyArray):
    # 100% resource occupation - with 5 pending big jobs (20 tu) *(10,10)
    num_of_new_jobs = pa.num_nw
    nw_len_seqs = np.zeros((1, 5), dtype='int32')
    nw_len_seqs.fill(20)
    nw_size_seq = np.zeros((1, num_of_new_jobs, 2), dtype='int32')
    nw_size_seq.fill(10)

    env = Env(pa,
              nw_len_seqs=nw_len_seqs,
              nw_size_seqs=nw_size_seq,
              render=False,
              end="all_done")

    for i in range(num_of_new_jobs):
        env.step(5)
    print(("Job queue occupation is {}/5\nJob backlog occupation is {}/60 "
           ).format(len(env.job_slot.slot), env.job_backlog.curr_size))
    jobLog = env.observe()
    env.plot_state()

    resource_cols = np.concatenate([emptyArray[:, 0:10],
                                    emptyArray[:, 60:70]]).ravel()
    varNumArr = np.concatenate(emptyArray).ravel()
    LowerBoundList = np.concatenate(jobLog).ravel()
    UpperBoundList = np.concatenate(jobLog).ravel()
    job_cols = np.concatenate([emptyArray[:, 10:60],
                               emptyArray[:, 70:120]]).ravel()

    for i in np.nditer(resource_cols):
        LowerBoundList[i] = 0.1
    for i in np.nditer(resource_cols):
        UpperBoundList[i] = 1
    for var in zip(varNumArr, LowerBoundList, UpperBoundList):
        net.setLowerBound(var[0], var[1])
        net.setUpperBound(var[0], var[2])

    for outputVar, i in enumerate(net.outputVars[0][0:5]):
        eq = MarabouUtils.Equation(EquationType=MarabouCore.Equation.GE)
        eq.addAddend(-1, net.outputVars[0][5])
        eq.addAddend(1, i)
        eq.setScalar(0)
        net.addEquation(eq)

    return jobLog
    def addInequality(self, vars, coeffs, scalar, isProperty=False):
        """Function to add inequality constraint to network

        .. math::
            \sum_i vars_i * coeffs_i \le scalar

        Args:
            vars (list of int): Variable numbers
            coeffs (list of float): Coefficients
            scalar (float): Right hand side constant of inequality
            isProperty (bool): If true, this constraint can be removed later by clearProperty() method
        """
        assert len(vars) == len(coeffs)
        e = MarabouUtils.Equation(MarabouCore.Equation.LE)
        for i in range(len(vars)):
            e.addAddend(coeffs[i], vars[i])
        e.setScalar(scalar)
        self.addEquation(e, isProperty)
示例#22
0
def test(net, pa, emptyArray):
    # 0p resource occupance, 1 big job in backlog
    num_of_new_jobs = pa.num_nw + pa.backlog_size
    nw_len_seqs = np.zeros((1, num_of_new_jobs), dtype='int32')
    nw_size_seq = np.zeros((1, num_of_new_jobs, 2), dtype='int32')
    env = Env(pa,
              nw_len_seqs=nw_len_seqs,
              nw_size_seqs=nw_size_seq,
              render=False,
              end="all_done")
    nw_len_seqs[0][0] = 15
    nw_size_seq[0][0] = [10, 10]

    for i in range(num_of_new_jobs):
        env.step(5)
    print(("Job queue occupation is {}/5\nJob backlog occupation is {}/60 "
           ).format(len(env.job_slot.slot), env.job_backlog.curr_size))
    jobLog = env.observe()
    env.plot_state()

    resource_cols = np.concatenate([emptyArray[:, 0:10],
                                    emptyArray[:, 60:70]]).ravel()
    varNumArr = np.concatenate(emptyArray).ravel()
    LowerBoundList = np.concatenate(jobLog).ravel()
    UpperBoundList = np.concatenate(jobLog).ravel()
    job_cols = np.concatenate([emptyArray[:, 10:60],
                               emptyArray[:, 70:120]]).ravel()

    for var in zip(varNumArr, LowerBoundList, UpperBoundList):
        net.setLowerBound(var[0], var[1])
        net.setUpperBound(var[0], var[2])

    for outputVar, i in enumerate(net.outputVars[0]):
        net.setLowerBound(i, -2000000)
        net.setUpperBound(i, 2000000)

    for outputVar, i in enumerate(net.outputVars[0][1:6]):
        eq = MarabouUtils.Equation(EquationType=MarabouCore.Equation.GE)
        eq.addAddend(-1, net.outputVars[0][0])
        eq.addAddend(1, i)
        eq.setScalar(0)
        net.addEquation(eq)

    return jobLog
    def matMulEquations(self, op):
        """
        Function to generate equations corresponding to matrix multiplication
        Arguments:
            op: (tf.op) representing matrix multiplication operation
        """
        ### Get variables and constants of inputs ###
        self.numOfLayers += 1

        input_ops = [i.op for i in op.inputs]
        prevValues = [self.getValues(i) for i in input_ops]
        curValues = self.getValues(op)
        self.matMulLayers[self.numOfLayers] = prevValues[1]

        aTranspose = op.node_def.attr['transpose_a'].b
        bTranspose = op.node_def.attr['transpose_b'].b
        A = prevValues[0]
        variables = prevValues[1]['vars']
        values = prevValues[1]['vals']
        epsilons = prevValues[1]['epsilons']
        if aTranspose:
            A = np.transpose(A)
        if bTranspose:
            variables = np.transpose(variables)
        assert (A.shape[0], variables.shape[1]) == curValues.shape
        assert A.shape[1] == variables.shape[0]
        m, n = curValues.shape
        p = A.shape[1]
        ### END getting inputs ###

        ### Generate actual equations ###
        for i in range(m):
            for j in range(n):
                e = []
                e = MarabouUtils.Equation()
                for k in range(p):
                    self.createVarEpsilonEquation(variables[k][j],
                                                  epsilons[k][j], values[k][j])
                    e.addAddend(A[i][k], variables[k][j])
                e.addAddend(-1, curValues[i][j])
                e.addAddend
                e.setScalar(0.0)
                self.addEquation(e)
    def evaluateEpsilon(self, epsilon, network, prediction):
        outputVars = network.outputVars
        abs_epsilons = list()
        preds = list()
        predIndices = np.flip(np.argsort(prediction, axis=1), axis=1)
        for i in range(outputVars.shape[0]):
            preds.append((predIndices[i][0], predIndices[i][1]))
        n, m = network.epsilons.shape
        print(n, m)
        for i in range(n):
            for j in range(m):
                if j in list(chain.from_iterable(preds)):
                    epsilon_var = network.epsilons[i][j]
                    network.setUpperBound(epsilon_var, epsilon)
                    network.setLowerBound(epsilon_var, -epsilon)
                    abs_epsilon_var = self.epsilonABS(network, epsilon_var)
                    abs_epsilons.append(abs_epsilon_var)
                else:
                    epsilon_var = network.epsilons[i][j]
                    network.setUpperBound(epsilon_var, 0)
                    network.setLowerBound(epsilon_var, 0)

        e = MarabouUtils.Equation(
            EquationType=MarabouUtils.MarabouCore.Equation.LE)
        for i in range(len(abs_epsilons)):
            e.addAddend(1, abs_epsilons[i])
        e.setScalar(epsilon)
        network.addEquation(e)

        for i in range(outputVars.shape[0]):
            MarabouUtils.addInequality(
                network,
                [outputVars[i][preds[i][0]], outputVars[i][preds[i][1]]],
                [1, -1], 0)

        options = Marabou.createOptions(numWorkers=6, dnc=False)
        stats = network.solve(verbose=False, options=options)
        newOut = predIndices[:, 1]
        if stats[0]:
            return sat, stats, newOut
        else:
            return unsat, stats, newOut
 def processBiasAddRelations(self):
     """
     Either add an equation representing a bias add,
     Or eliminate one of the two variables in every other relation
     """
     biasAddUpdates = dict()
     # participations = [rel[0] for rel in self.biasAddRelations] + \
     #                     [rel[1] for rel in self.biasAddRelations]
     for (x, xprime, c) in self.biasAddRelations:
         # x = xprime + c
         # replace x only if it does not occur anywhere else in the system
         # if self.lowerBoundExists(x) or self.upperBoundExists(x) or \
         #         self.participatesInPLConstraint(x) or \
         #         len([p for p in participations if p == x]) > 1:
         e = MarabouUtils.Equation()
         e.addAddend(1, x)
         e.addAddend(-1, xprime)
         e.addAddend(-1, c)
         e.setScalar(0.0)
         self.addEquation(e)
def k_test(filename, k, download_time, bitrate):
    QUERY_BITRATE = bitrate
    DOWNLOAD_TIME = download_time
    network, input_op_names, output_op_name = create_network(filename, k)
    inputVars = network.inputVars
    outputVars = network.outputVars
    assert (len(outputVars) % utils.A_DIM == 0)


    all_inputs, used_inputs, unused_inputs, last_chunk_bit_rate, current_buffer_size, past_chunk_throughput, \
    past_chunk_download_time, next_chunk_sizes, number_of_chunks_left = utils.prep_input_for_query(inputVars, k)

    all_outputs = utils.prep_outputs_for_query(outputVars, k)

    past_chunk_download_time_eps = []
    for j in range(k):
        eps = network.getNewVariable()
        # network.userDefineInputVars.append(eps)
        # 0-4 SECONDS
        network.setLowerBound(eps, DOWNLOAD_TIME)  #-MARABOU_ERR)
        network.setUpperBound(eps, DOWNLOAD_TIME)  #+MARABOU_ERR)  # max : 4s
        past_chunk_download_time_eps.append(eps)

    chunk_size_lower_bounds = [.1, .3, .5, .8, 1.2, 1.93]
    chunk_size_upper_bounds = [.2, .45, .71, 1.1, 1.75, 2.4]
    first_chunk_size = network.getNewVariable()
    network.setLowerBound(first_chunk_size, chunk_size_lower_bounds[1])
    network.setUpperBound(first_chunk_size, chunk_size_lower_bounds[1])

    for var in unused_inputs:
        l = 0
        u = 0
        network.setLowerBound(var, l)
        network.setUpperBound(var, u)

    for j in range(k):

        # last_chunk_bit_rate
        # one of VIDEO_BIT_RATE[bit_rate] / float(np.max(VIDEO_BIT_RATE))
        for var in last_chunk_bit_rate[j]:
            if j == 0:
                l = utils.VIDEO_BIT_RATE[1] / utils.VIDEO_BIT_RATE[-1]
                u = utils.VIDEO_BIT_RATE[1] / utils.VIDEO_BIT_RATE[-1]
                network.setLowerBound(var, l)
                network.setUpperBound(var, u)
            else:
                l = utils.VIDEO_BIT_RATE[QUERY_BITRATE] / utils.VIDEO_BIT_RATE[
                    -1]
                u = utils.VIDEO_BIT_RATE[QUERY_BITRATE] / utils.VIDEO_BIT_RATE[
                    -1]
                network.setLowerBound(var, l)
                network.setUpperBound(var, u)

        # current_buffer_size
        for var in current_buffer_size[j]:
            l = 0.4  #
            u = 0.4  #
            network.setLowerBound(var, l)
            network.setUpperBound(var, u)

        # past_chunk_throughput
        i = 0
        a = [0, 0, 0, 0, 0, 0, 0, 0]
        for var in past_chunk_throughput[j]:
            eq = MarabouUtils.Equation(EquationType=MarabouCore.Equation.EQ)
            eq.addAddend(1, var)
            if j == 0:
                if i == (utils.S_LEN) - 1:
                    eq.addAddend(-0.1 / DOWNLOAD_TIME, first_chunk_size)
                    a[i] = 'f'
                else:
                    eq.addAddend(0, 0)  # 0
            else:
                if i == (utils.S_LEN) - 1:
                    eq.addAddend(-0.1 / DOWNLOAD_TIME,
                                 next_chunk_sizes[j - 1][QUERY_BITRATE])
                    a[i] = 'f'
                else:
                    eq.addAddend(-1, past_chunk_throughput[j - 1][i + 1])

            eq.setScalar(0)
            network.addEquation(eq)
            i += 1
        # past_chunk_download_time
        i = 0
        a = [0, 0, 0, 0, 0, 0, 0, 0]

        for var in past_chunk_download_time[j]:
            # l = 0.1
            # u = 40 => 4s
            eq = MarabouUtils.Equation(EquationType=MarabouCore.Equation.EQ)
            eq.addAddend(-1, var)
            if i >= (utils.S_LEN - j) - 1:
                eq.addAddend(1, past_chunk_download_time_eps[j])
                a[i] = 1
            else:
                eq.addAddend(0, 0)  # 0
            eq.setScalar(0)
            network.addEquation(eq)
            i += 1
        print("past_chunk_download_time")
        print(a)

        # next_chunk_sizes
        i = 0

        assert len(next_chunk_sizes[j]) == len(utils.VIDEO_BIT_RATE)
        for var in next_chunk_sizes[j]:
            # All sizes
            # chunk_size = utils.VIDEO_BIT_RATE[size_i]
            # print("chunk_size", chunk_size)
            if i == 0:
                l = chunk_size_lower_bounds[0]  # chunk_size
                u = chunk_size_upper_bounds[0]  # chunk_size
                network.setLowerBound(var, l)
                network.setUpperBound(var, u)
            else:
                eq = MarabouUtils.Equation(
                    EquationType=MarabouCore.Equation.EQ)
                eq.addAddend(-1, var)
                eq.addAddend(utils.VIDEO_BIT_RATE[i] / utils.VIDEO_BIT_RATE[0],
                             next_chunk_sizes[j][0])
                eq.setScalar(0)
                network.addEquation(eq)
            i += 1

        # number_of_chunks_left
        for var in number_of_chunks_left[j]:
            l = ((k - j) - 1) / (k)
            u = ((k - j) - 1) / (k)
            network.setLowerBound(var, l)
            network.setUpperBound(var, u)

    for j in range(len(outputVars)):
        network.setLowerBound(outputVars[j], -1e6)
        network.setUpperBound(outputVars[j], 1e6)

    for network_output in all_outputs:
        print("=============")
        for bit_rate_var in network_output:
            if bit_rate_var == network_output[QUERY_BITRATE]:
                continue
            eq = MarabouUtils.Equation(EquationType=MarabouCore.Equation.GE)
            eq.addAddend(
                1, network_output[QUERY_BITRATE])  # HD > rest of bit rates
            eq.addAddend(-1, bit_rate_var)
            eq.setScalar(0)
            network.addEquation(eq)
            # print(network_output[-1],">",bit_rate_var )

    print("\nMarabou results:\n")

    vals, stats = network.solve(verbose=True)

    print("all_inputs = ", all_inputs)
    print("used_inputs = ", used_inputs)
    result = utils.handle_results("rebuf_bitrate" + str(QUERY_BITRATE), k,
                                  DOWNLOAD_TIME, vals, last_chunk_bit_rate,
                                  current_buffer_size, past_chunk_throughput,
                                  past_chunk_download_time, next_chunk_sizes,
                                  number_of_chunks_left, all_outputs)
    return result
示例#27
0
    -3.77124648, -7.53252938, 6.1390369, -7.75015215, 0.80588465
]])

# # Set input bounds
for var in inputVars:
    network.setLowerBound(var, x[var] - delta)
    network.setUpperBound(var, x[var] + delta)

# Set output bounds
for var in outputVars:
    network.setLowerBound(var, -large)
    network.setUpperBound(var, large)

new_var = network.getNewVariable()
network.setLowerBound(new_var, 0)

equation1 = MarabouUtils.Equation(MarabouCore.Equation.EquationType.EQ)
equation1.addAddend(1, outputVars[7])
equation1.addAddend(-1, outputVars[9])
equation1.addAddend(1, new_var)
equation1.setScalar(0)

network.addEquation(equation1)

network.outputVars = np.array([[new_var]])

# network.evaluateWithMarabou(np.array([x]))
# # Call to C++ Marabou solver
# options = Marabou.createOptions(dnc=True, numWorkers=6, initialDivides=2, verbosity=0)
network.saveQuery("query_20_0.3")
示例#28
0
def export_marabou(view):
    nn = MarabouNetwork()

    # Important note: Marabou does not preserve input order, so their's
    # variables must be created in the required order
    trans = {
        n: nn.getNewVariable()
        for n in view.inputs + list(set(view.nodes) - set(view.inputs))
    }
    assert len(trans) == len(view.nodes)

    # Convert every Node to Marabou representation
    for node in view.nodes:
        nodev = trans[node]
        lower, upper = node.limit
        if lower is not None: nn.setLowerBound(nodev, lower)
        if upper is not None: nn.setUpperBound(nodev, upper)

        if isinstance(node, nodes.NodeSum):
            e = MarabouUtils.Equation()
            for c, v in node.inputs:
                e.addAddend(c, trans[v])
            e.addAddend(-1, nodev)
            e.setScalar(-node.scalar)
            nn.addEquation(e)
        elif isinstance(node, nodes.NodeReLU):
            bf = (trans[node.input], nodev)
            nn.addRelu(*bf)
            if node.relaxed:
                nn.relaxedReluList.append(bf)
        elif isinstance(node, nodes.NodeAbs):
            bf = (trans[node.input], nodev)
            nn.addAbsConstraint(*bf)
        elif type(node) == nodes.Node:
            pass
        else:
            assert False, "Unknown node type %r" % (node, )

    # Convert equations
    compareTrans = {
        equations.Equation.Comparator.GE: MarabouCore.Equation.GE,
        equations.Equation.Comparator.LE: MarabouCore.Equation.LE,
        equations.Equation.Comparator.EQ: MarabouCore.Equation.EQ,
    }
    for equation in view.equations:
        e = MarabouUtils.Equation(compareTrans[equation.comparator])
        for c, v in equation.terms:
            e.addAddend(c, trans[v])
        e.setScalar(equation.scalar)
        nn.addEquation(e)

    # Assign inputs/outputs
    nn.inputVars = [np.array([trans[n] for n in view.inputs])]
    nn.outputVars = np.array([trans[n] for n in view.outputs])

    # Table for future conversion from ViewIO Nodes to Marabou variables and conversely
    nn.translate = {}
    for n, v in trans.items():
        nn.translate[n] = v
        nn.translate[v] = n

    # Note: this method add nodes to the input. Use nn.realInputs
    mitigate_marabou_constant_nodes_bug(view, nn)

    return nn
示例#29
0
    def conv2DEquations(self, op):
        """Function to generate equations corresponding to 2D convolution operation

        Args:
            op: (tf.op) representing conv2D operation

        :meta private:
        """

        # Get input variables and constants
        assert len(op.inputs) == 2
        inputVars = self.varMap[op.inputs[0].op]
        filters = self.constantMap[op.inputs[1].op]

        # Make new variables for output
        outputVars = self.makeNewVariables(op)

        # Extract attributes
        padding = op.node_def.attr['padding'].s.decode().upper()
        strides = list(op.node_def.attr['strides'].list.i)
        data_format = op.node_def.attr['data_format'].s.decode().upper()

        # Handle different data formats
        if data_format == 'NHWC':
            out_num, out_height, out_width, out_channels = outputVars.shape
            in_num, in_height, in_width, in_channels = inputVars.shape
            strides_height = strides[1]
            strides_width = strides[2]
        elif data_format == 'NCHW':
            out_num, out_channels, out_height, out_width = outputVars.shape
            in_num, in_channels, in_height, in_width = inputVars.shape
            strides_height = strides[2]
            strides_width = strides[3]
        else:
            raise NotImplementedError(
                "Network uses %s data format. Only 'NHWC' and 'NCHW' are currently supported"
                % data_format)

        # Assert that dimensions match up
        filter_height, filter_width, filter_channels, num_filters = filters.shape
        assert out_num == in_num
        assert filter_channels == in_channels
        assert out_channels == num_filters

        # Use padding to determine top and left offsets
        if padding == 'SAME':
            pad_top = max(
                (out_height - 1) * strides_height + filter_height - in_height,
                0) // 2
            pad_left = max(
                (out_width - 1) * strides_width + filter_width - in_width,
                0) // 2
        elif padding == 'VALID':
            pad_top = 0
            pad_left = 0
        else:
            raise NotImplementedError(
                "Network uses %s for conv padding. Only 'SAME' and 'VALID' padding are supported"
                % padding)

        # Try to get scalar values in case this operation is followed by BiasAddition
        scalars, sgnVar, sgnScalar = self.getScalars(op, outputVars)

        # Generate equations
        # There is one equation for every output variable
        for n in range(out_num):
            for i in range(out_height):
                for j in range(out_width):
                    for k in range(
                            out_channels
                    ):  # Out_channel also corresponds to filter number
                        e = MarabouUtils.Equation()

                        # The equation convolves the filter with the specified input region
                        # Iterate over the filter
                        for di in range(filter_height):
                            for dj in range(filter_width):
                                for dk in range(filter_channels):

                                    # Get 2D location of filter with respect to input variables
                                    h_ind = strides_height * i + di - pad_top
                                    w_ind = strides_width * j + dj - pad_left

                                    # Build equation when h_ind and w_ind are valid
                                    if h_ind < in_height and h_ind >= 0 and w_ind < in_width and w_ind >= 0:
                                        if data_format == 'NHWC':
                                            var = inputVars[n][h_ind][w_ind][
                                                dk]
                                            c = filters[di][dj][dk][k] * sgnVar
                                            e.addAddend(c, var)
                                        else:
                                            var = inputVars[n][dk][h_ind][
                                                w_ind]
                                            c = filters[di][dj][dk][k] * sgnVar
                                            e.addAddend(c, var)

                        # Add output variable
                        if data_format == 'NHWC':
                            e.addAddend(-1, outputVars[n][i][j][k])
                            e.setScalar(-sgnScalar * scalars[n][i][j][k])
                            self.addEquation(e)
                        else:
                            e.addAddend(-1, outputVars[n][k][i][j])
                            e.setScalar(-sgnScalar * scalars[n][k][i][j])
                            self.addEquation(e)
示例#30
0
    def addEquations(self, op):
        """Function to generate equations corresponding to all types of add/subtraction operations

        Args:
            op: (tf.op) representing addition or subtraction operations

        :meta private:
        """
        # We may have included the add equation with a prior matmul equation
        if op in self.varMap:
            return

        # Get inputs and outputs
        assert len(op.inputs) == 2
        input1, input1_isVar = self.getValues(op.inputs[0].op)
        input2, input2_isVar = self.getValues(op.inputs[1].op)
        outputVars = self.makeNewVariables(op).flatten()

        # Special case for BiasAdd with NCHW format. We need to add the bias along the channels dimension
        if op.node_def.op == 'BiasAdd':
            data_format = 'NHWC'
            if 'data_format' in op.node_def.attr:
                data_format = op.node_def.attr['data_format'].s.decode().upper(
                )
            if data_format == 'NCHW':
                input2 = input2.reshape((1, len(input2), 1, 1))

        # Broadcast and flatten. Assert that lengths are all the same
        input1, input2 = np.broadcast_arrays(input1, input2)
        input1 = input1.flatten()
        input2 = input2.flatten()
        assert len(input1) == len(input2)
        assert len(outputVars) == len(input1)

        # Signs for addition/subtraction
        sgn1 = 1
        sgn2 = 1
        if op.node_def.op in ["Sub"]:
            sgn2 = -1

        # Create new equations depending on if the inputs are variables or constants
        # At least one input must be a variable, otherwise this operation is a constant,
        # which gets caught in makeGraphEquations.
        assert input1_isVar or input2_isVar

        # Always negate the scalar term because it changes sides in equation, from
        # w1*x1+...wk*xk + b = x_out
        # to
        # w1*x1+...wk+xk - x_out = -b
        if input1_isVar and input2_isVar:
            for i in range(len(outputVars)):
                e = MarabouUtils.Equation()
                e.addAddend(sgn1, input1[i])
                e.addAddend(sgn2, input2[i])
                e.addAddend(-1, outputVars[i])
                e.setScalar(0.0)
                self.addEquation(e)
        elif input1_isVar:
            for i in range(len(outputVars)):
                e = MarabouUtils.Equation()
                e.addAddend(sgn1, input1[i])
                e.addAddend(-1, outputVars[i])
                e.setScalar(-sgn2 * input2[i])
                self.addEquation(e)
        else:
            for i in range(len(outputVars)):
                e = MarabouUtils.Equation()
                e.addAddend(sgn2, input2[i])
                e.addAddend(-1, outputVars[i])
                e.setScalar(-sgn1 * input1[i])
                self.addEquation(e)