Exemple #1
0
def encode_ranking_layer(prev_neurons, layerIndex, netPrefix):
    order_constrs = []

    n = len(prev_neurons)
    outs = [Variable(layerIndex, i, netPrefix, 'o') for i in range(n)]
    # !!! careful, because NN rows and columns in index are swapped
    # p_ij in matrix, but p_j_i in printed output
    # but for calculation permute matrix is stored as array of rows (as in math)
    permute_matrix = [[
        Variable(j, i, netPrefix, 'pi', type='Int') for j in range(n)
    ] for i in range(n)]

    # perm_matrix * prev_neurons = outs
    res_vars, permute_constrs = encode_binmult_matrix(prev_neurons, layerIndex,
                                                      netPrefix,
                                                      permute_matrix, outs)

    # o_i >= o_i+1
    for o, o_next in zip(outs, outs[1:]):
        order_constrs.append(Geq(o, o_next))

    # doubly stochastic
    one = Constant(1, netPrefix, layerIndex, 0)
    for i in range(len(prev_neurons)):
        # row stochastic
        permute_constrs.append(Linear(Sum(permute_matrix[i]), one))

    for j in range(len(prev_neurons)):
        # column stochastic
        permute_constrs.append(Linear(Sum([p[j] for p in permute_matrix]),
                                      one))

    constraints = permute_constrs + order_constrs
    return permute_matrix, (res_vars + outs), constraints
 def test_variable(self):
     scoped_id = 'var_foo_1'
     self._registry.push_new_scope({'foo': (scoped_id, False)})
     v = Variable('foo')
     v_id = v.add_to_rules(self._rules, self._registry)
     self.assertEqual(v_id, scoped_id)
     self.assertEqual([], self._rules.instance_of_calls)
Exemple #3
0
    def test_multi_let(self):
        l = Literal('Int', 123)
        lt = Let([('x', Variable('y')), ('y', l)], Variable('x'))
        lt_id = lt.add_to_rules(self._rules, self._registry)
        l_id = self._registry.get_id_for(l)

        result = self._rules.infer()
        self.assertEqual('Int', result.get_type_by_id(lt_id))
    def test_let_with_lambda(self):
        lm = Lambda(['x'], Variable('x'))
        var_id = Variable('id')
        app = Application(var_id, [Literal('Int', 123)])
        lt = Let([('id', lm)], app)

        lt_id = lt.add_to_rules(self._rules, self._registry)
        app_id = self._registry.get_id_for(app)
        self.assertIn((lt_id, app_id), self._rules.equal_calls)
Exemple #5
0
    def one_hot_comparison(oh1, oh2, net, layer, row, desired='different'):
        '''
        Compares two one-hot vectors and returns constraints that can only be satisfied,
        if the vectors are equal/different
        :param oh1: one-hot vector
        :param oh2: one-hot vector
        :param net: netPrefix
        :param layer: layer of the net, in which this operation takes place
        :param row: row of the net, in which this operation takes place
        :param desired: keyword
            different - the constraints can only be satisfied, if the vectors are different
            equal - the constraints can only be satisfied, if the vectors are equal
        :return: a tuple of (deltas, diffs, constraints) where constraints are as described above and deltas, diffs
            are variables used in these constraints
        '''
        # requires that oh_i are one-hot vectors
        oh_deltas = []
        oh_diffs = []
        oh_constraints = []

        desired_result = 1
        if desired == 'different':
            desired_result = 1
        elif desired == 'equal':
            desired_result = 0

        terms = []
        x = 1
        for i, (oh1, oh2) in enumerate(zip(oh1, oh2)):
            constant = Constant(x, net, layer, row)
            terms.append(Multiplication(constant, oh1))
            terms.append(Neg(Multiplication(constant, oh2)))
            x *= 2

        sumvar = Variable(layer, row, net, 's', 'Int')
        oh_constraints.append(Linear(Sum(terms), sumvar))

        delta_gt = Variable(layer, row, net, 'dg', 'Int')
        delta_lt = Variable(layer, row, net, 'dl', 'Int')
        zero = Constant(0, net, layer, row)

        oh_constraints.append(Gt_Int(sumvar, zero, delta_gt))
        oh_constraints.append(Gt_Int(zero, sumvar, delta_lt))
        oh_constraints.append(
            Geq(Sum([delta_lt, delta_gt]),
                Constant(desired_result, net, layer, row)))

        oh_deltas.append(delta_gt)
        oh_deltas.append(delta_lt)

        oh_diffs.append(sumvar)

        return oh_deltas, oh_diffs, oh_constraints
Exemple #6
0
def encode_relu_layer(prev_neurons, layerIndex, netPrefix):
    deltas = []
    outs = []
    ineqs = []
    for i, neuron in enumerate(prev_neurons):
        output = Variable(layerIndex, i, netPrefix, 'o')
        delta = Variable(layerIndex, i, netPrefix, 'd', 'Int')
        outs.append(output)
        deltas.append(delta)
        ineqs.append(Relu(neuron, output, delta))

    return outs, deltas, ineqs
    def test_let(self):
        l = Literal('Int', 123)
        var_x = Variable('x')
        var_y = Variable('y')
        lt = Let([('x', var_y), ('y', l)], var_x)
        lt_id = lt.add_to_rules(self._rules, self._registry)
        l_id = self._registry.get_id_for(l)
        var_x_id = self._registry.get_id_for(var_x)
        var_y_id = self._registry.get_id_for(var_y)

        self.assertIn((lt_id, var_x_id), self._rules.equal_calls)
        self.assertIn(('var_x_2', var_y_id), self._rules.equal_calls)
        self.assertIn(('var_y_3', l_id), self._rules.equal_calls)
Exemple #8
0
    def test_let_with_lambda(self):
        ''' ML code:
        let id = \\x -> x
        in id 'foo'
        '''
        lm = Lambda(['x'], Variable('x'))
        var_id = Variable('id')
        app = Application(var_id, [Literal('String', 'foo')])
        lt = Let([('id', lm)], app)
        lt_id = lt.add_to_rules(self._rules, self._registry)

        result = self._rules.infer()
        self.assertEqual('String', result.get_type_by_id(lt_id))
Exemple #9
0
    def test_polymorphism(self):
        ''' ML code:
        let id = \\x -> x
        in (id id) 123
        '''
        lm = Lambda(['x'], Variable('x'))
        app1 = Application(Variable('id'), [Variable('id')])
        app2 = Application(app1, [Literal('Int', 123)])
        lt = Let([('id', lm)], app2)
        lt_id = lt.add_to_rules(self._rules, self._registry)

        result = self._rules.infer()
        self.assertEqual('Int', result.get_type_by_id(lt_id))
Exemple #10
0
    def number_comparison(n1, n2, net, layer, row, epsilon=0):
        '''
        Compares two arbitrary numbers and returns constraints, s.t. one of the deltas is equal to 1, if the numbers
        are not equal
        :param n1: number
        :param n2: number
        :param net: netPrefix
        :param layer: layer of the net, in which this operation takes place
        :param row: row of the net, in which this operation takes place
        :return: a tuple of (deltas, diffs, constraints) where constraints are as described above and deltas, diffs
            are variables used in these constraints
        '''
        v_deltas = []
        v_diffs = []
        v_constraints = []

        delta_gt = Variable(layer, row, net, 'dg', 'Int')
        delta_lt = Variable(layer + 1, row, net, 'dl', 'Int')

        if epsilon > 0:
            eps = Constant(epsilon, net, layer + 1, row)
            diff_minus_eps = Variable(layer, row, net, 'x_m')
            diff_plus_eps = Variable(layer, row, net, 'x_p')

            v_constraints.append(
                Linear(Sum([n2, Neg(n1), Neg(eps)]), diff_minus_eps))
            v_constraints.append(Linear(Sum([n2, Neg(n1), eps]),
                                        diff_plus_eps))

            v_constraints.append(Greater_Zero(diff_minus_eps, delta_gt))
            v_constraints.append(Greater_Zero(Neg(diff_plus_eps), delta_lt))

            v_diffs.append(diff_minus_eps)
            v_diffs.append(diff_plus_eps)
        else:
            diff = Variable(layer, row, net, 'x')

            v_constraints.append(Linear(Sum([n1, Neg(n2)]), diff))
            v_constraints.append(Greater_Zero(diff, delta_gt))
            v_constraints.append(Greater_Zero(Neg(diff), delta_lt))

            v_diffs.append(diff)

        v_deltas.append(delta_gt)
        v_deltas.append(delta_lt)

        #v_constraints.append(Geq(Sum(v_deltas), Constant(desired_result, net, layer + 1, row)))

        return v_deltas, v_diffs, v_constraints
Exemple #11
0
def test_abs(number):
    input = Constant(number, '', 0, 0)
    delta = Variable(0, 0, '', 'd', 'Int')
    delta.update_bounds(0, 1)
    output = Variable(0, 0, '', 'a')

    vars = [delta, output]
    constrs = [Abs(input, output, delta)]

    model = create_gurobi_model(vars, constrs)
    model.optimize()

    res = model.getVarByName('a_0_0').X

    return vars, constrs, res
Exemple #12
0
def random_expression(max_vars: int, max_depth: int) -> Expression:
    if not 0 <= max_vars <= 26:
        raise ValueError("max_vars must be >= 0 and <= 26.")
    if max_depth <= 0:
        raise ValueError("max_depth must be > 0.")

    variables = random.choices(
        [Variable(c) for c in "abcdefghijklmnopqrstuvwxyz"], k=max_vars)
    literals = [literal_f, literal_t]

    def _random_expression(depth=0):
        r = random.randint(1, 100)
        simple_cutoff = round((depth / max_depth) * 100)
        if r <= simple_cutoff:
            if max_vars == 0 or random.randint(0, 1) == 0:
                return random.choice(literals)
            else:
                return random.choice(variables)
        elif r <= simple_cutoff + ((100 - simple_cutoff) / 3):
            return random.choice(unary_operations)(_random_expression(depth +
                                                                      1))
        else:
            return random.choice(binary_operations)(
                _random_expression(depth + 1), _random_expression(depth + 1))

    return _random_expression()
Exemple #13
0
 def test_lambda_exprssion(self):
     lm = Lambda(['x'], Variable('x'))
     lmid = lm.add_to_rules(self._rules, self._registry)
     self.assertEqual(
         [(1, ('Fn_1', 'var_x_2', 'var_x_2'))],
          self._rules.specify_calls
      )
Exemple #14
0
    def test_lambda_exprssion(self):
        lm = Lambda(['x'], Variable('x'))
        lmid = lm.add_to_rules(self._rules, self._registry)

        result = self._rules.infer()
        self.assertEqual(
            ('Fn_1', 'var_x_2', 'var_x_2'),
            result.get_type_by_id(lmid)
        )
Exemple #15
0
def encode_sort_one_hot_layer(prev_neurons, layerIndex, netPrefix, mode):
    n = len(prev_neurons)
    one_hot_vec = [
        Variable(layerIndex, i, netPrefix, 'pi', type='Int') for i in range(n)
    ]

    top = Variable(layerIndex, 0, netPrefix, 'top')
    # one_hot_vec and top need to be enclosed in [], so that indexing in binmult_matrix works
    res_vars, mat_constrs = encode_binmult_matrix(prev_neurons, 0, netPrefix,
                                                  [one_hot_vec], [top])

    oh_constraint = Linear(Sum(one_hot_vec),
                           Constant(1, netPrefix, layerIndex, 0))

    if fc.use_eps_maximum:
        eps = Constant(fc.epsilon, netPrefix, layerIndex, 0)
        order_constrs = [
            Impl(pi, 0, Sum([neuron, eps]), top)
            for neuron, pi in zip(prev_neurons, one_hot_vec)
        ]
        pretty_print([], order_constrs)
    else:
        order_constrs = [Geq(top, neuron) for neuron in prev_neurons]

    if fc.use_context_groups:
        context = TopKGroup(top, prev_neurons, 1)
        order_constrs.append(context)

    outs = None
    vars = None
    if mode == 'vector':
        outs = one_hot_vec
        vars = res_vars + [top]
    elif mode == 'out':
        outs = [top]
        vars = res_vars + one_hot_vec
    else:
        raise ValueError(
            'Unknown mode for encoding of sort_one_hot layer: {name}'.format(
                name=mode))

    return outs, vars, [oh_constraint] + mat_constrs + order_constrs
Exemple #16
0
def encode_partial_layer(top_k, prev_neurons, layerIndex, netPrefix):
    order_constrs = []

    n = len(prev_neurons)
    outs = [Variable(layerIndex, i, netPrefix, 'o') for i in range(top_k)]
    # !!! careful, because NN rows and columns in index are swapped
    # p_ij in matrix, but p_j_i in printed output
    # but for calculation permute matrix is stored as array of rows (as in math)
    partial_matrix = [[
        Variable(j, i, netPrefix, 'pi', type='Int') for j in range(n)
    ] for i in range(top_k)]

    # perm_matrix * prev_neurons = outs
    res_vars, permute_constrs = encode_binmult_matrix(prev_neurons, layerIndex,
                                                      netPrefix,
                                                      partial_matrix, outs)

    # almost doubly stochastic
    one = Constant(1, netPrefix, layerIndex, 0)
    for i in range(top_k):
        # row stochastic
        permute_constrs.append(Linear(Sum(partial_matrix[i]), one))

    set_vars = []
    for j in range(len(prev_neurons)):
        # almost column stochastic (<= 1)
        s = Variable(layerIndex, j, netPrefix, 'set', type='Int')
        set_vars.append(s)
        permute_constrs.append(Linear(Sum([p[j] for p in partial_matrix]), s))
        permute_constrs.append(Geq(one, s))

    # o_i >= o_i+1 (for top_k)
    for o, o_next in zip(outs, outs[1:]):
        order_constrs.append(Geq(o, o_next))

    # x_i <= o_k-1 for all i, that are not inside top_k
    for i, s in enumerate(set_vars):
        order_constrs.append(Impl(s, 0, prev_neurons[i], outs[-1]))

    constraints = permute_constrs + order_constrs
    return [partial_matrix, set_vars], (res_vars + outs), constraints
Exemple #17
0
        def add_absolute_value_constraints(radius, dimension, netPrefix,
                                           centered_inputs):
            ineqs = []
            additional_vars = []

            deltas = [
                Variable(0, i, netPrefix, 'd', 'Int') for i in range(dimension)
            ]
            abs_outs = [
                Variable(0, i, netPrefix, 'abs') for i in range(dimension)
            ]
            ineqs.append([
                Abs(ci, aout, d)
                for ci, aout, d in zip(centered_inputs, abs_outs, deltas)
            ])
            ineqs.append(Geq(radius, Sum(abs_outs)))

            additional_vars.append(deltas)
            additional_vars.append(abs_outs)

            return ineqs, additional_vars
Exemple #18
0
def encode_one_hot(prev_neurons, layerIndex, netPrefix):
    max_outs, deltas, ineqs = encode_maxpool_layer(prev_neurons, layerIndex,
                                                   netPrefix)
    max_out = max_outs[-1]

    outs = []
    diffs = []
    eqs = []
    one_hot_constraints = []

    for i, x in enumerate(prev_neurons):
        output = Variable(layerIndex, i, netPrefix, 'o', 'Int')
        diff = Variable(layerIndex, i, netPrefix, 'x')
        outs.append(output)
        diffs.append(diff)

        eqs.append(Linear(Sum([x, Neg(max_out)]), diff))
        one_hot_constraints.append(One_hot(diff, output))

    constraints = ineqs + eqs + one_hot_constraints
    return outs, (deltas + diffs + max_outs), constraints
Exemple #19
0
    def test_application(self):
        scoped_id = 'var_times2_1'
        self._registry.push_new_scope({'times2': (scoped_id, True)})
        v = Variable('times2')
        l = Literal('Int', 123)
        a = Application(v, [l])
        a_id = a.add_to_rules(self._rules, self._registry)
        v_id = self._registry.get_id_for(v)
        l_id = self._registry.get_id_for(l)

        self.assertIn((v_id, scoped_id), self._rules.instance_of_calls)
        self.assertIn((v_id, ('Fn_1', l_id, a_id)), self._rules.specify_calls)
Exemple #20
0
    def test_application(self):
        self._registry.push_new_scope({'times2': ('var_times2_1', True)})
        v = Variable('times2')
        l = Literal('Int', 123)
        a = Application(v, [l])
        a_id = a.add_to_rules(self._rules, self._registry)
        v_id = self._registry.get_id_for(v)
        l_id = self._registry.get_id_for(l)

        result = self._rules.infer()
        self.assertEqual('Int', result.get_type_by_id(l_id))
        self.assertEqual(None, result.get_type_by_id(a_id))
Exemple #21
0
    def test_mutual_recursion(self):
        '''
        Equivalent ML:

        let-rec f = if True then 123 else g
                g = f
        in f
        '''
        test = Literal('Bool', True)
        if_case = Literal('Int', 123)
        else_case = Application(Variable('g'), [])
        if_block = If(test, if_case, else_case)
        f_func = Lambda([], if_block)

        g_body = Application(Variable('f'), [])
        g_func = Lambda([], g_body)

        let_body = Variable('f')
        let_expr = Let([('f', f_func), ('g', g_func)], let_body)

        let_id = let_expr.add_to_rules(self._rules, self._registry)
        result = self._rules.infer()
        self.assertEqual(('Fn_0', 'Int'), result.get_full_type_by_id(let_id))
Exemple #22
0
def encode_inputs(lower_bounds, upper_bounds, netPrefix=''):
    vars = []
    for i, (l, h) in enumerate(zip(lower_bounds, upper_bounds)):
        input_var = Variable(0, i, netPrefix, 'i')
        input_var.setLo(l)
        input_var.setHi(h)
        vars.append(input_var)

    return vars
Exemple #23
0
def encode_maxpool_layer(prev_neurons, layerIndex, netPrefix):
    # last variable in outs is output of maxpool
    deltas = []
    outs = []
    ineqs = []

    if len(prev_neurons) == 1:
        # will create duplicate bounds for input_var, but needed,
        # s.t. other encodings can access output of this layer
        # through the outs list.
        return prev_neurons, deltas, ineqs

    current_neurons = prev_neurons
    depth = 0
    while len(current_neurons) >= 2:
        current_depth_outs = []
        for i in range(0, len(current_neurons), 2):
            if i + 1 >= len(current_neurons):
                out = current_neurons[i]
                current_depth_outs.append(out)
                # don't append to outs as already has constraint
            else:
                out = Variable(layerIndex, 0, netPrefix, 'o_' + str(depth))
                delta = Variable(layerIndex, 0, netPrefix, out.name + '_d',
                                 'Int')
                ineq = Max(current_neurons[i], current_neurons[i + 1], out,
                           delta)
                ineqs.append(ineq)
                deltas.append(delta)
                current_depth_outs.append(out)
                outs.append(out)

        current_neurons = current_depth_outs
        depth += 1

    return outs, deltas, ineqs
Exemple #24
0
    def check_equivalence_layer(self, layer_idx):
        opt_vars = []
        opt_constrs = []
        if layer_idx == 0:
            opt_vars += self.input_layer.get_outvars()[:]
        else:
            a_outs = self.a_layers[layer_idx - 1].get_outvars()[:]
            b_outs = self.b_layers[layer_idx - 1].get_outvars()[:]
            opt_vars += a_outs + b_outs

            # at this stage we assume the previous layers to be equivalent
            for avar, bvar in zip(a_outs, b_outs):
                opt_constrs += [Linear(avar, bvar)]

        bounds = []

        for i, (a_var, a_constr, b_var, b_constr) in enumerate(
                zip(self.a_layers[layer_idx].get_optimization_vars(),
                    self.a_layers[layer_idx].get_optimization_constraints(),
                    self.b_layers[layer_idx].get_optimization_vars(),
                    self.b_layers[layer_idx].get_optimization_constraints())):
            diff = Variable(layer_idx, i, 'E', 'diff')
            diff_constr = Linear(Sum([a_var, Neg(b_var)]), diff)

            if i == 1:
                pretty_print(opt_vars + [a_var, b_var, diff],
                             opt_constrs + [a_constr, b_constr, diff_constr])

            lb, ub = self.optimize_variable(
                diff, opt_vars + [a_var, b_var, diff],
                opt_constrs + [a_constr, b_constr, diff_constr])
            diff.update_bounds(lb, ub)

            bounds.append((lb, ub))

        return bounds
Exemple #25
0
    def encode_inputs(self, lower_bounds, upper_bounds, netPrefix=''):
        vars = []
        for i, (l, h) in enumerate(zip(lower_bounds, upper_bounds)):
            input_var = Variable(0, i, netPrefix, 'i')
            input_var.setLo(l)
            input_var.setHi(h)
            vars.append(input_var)

        num_neurons = len(lower_bounds)
        return InputLayer(num_neurons, vars)
Exemple #26
0
 def add(self, *rules: str) -> "Ruleset":
     res = self.copy()
     for expr in rules:
         rule = expr if isinstance(expr, Term) else _(expr)
         if isinstance(rule, Imp):
             if isinstance(rule.get_left(), VariadicOp) and rule.get_left().commutes() and rule.get_left().placeholder == "*":
                 res.add_raw(type(rule.get_left())((Variable("$@"), *rule.get_left().get_args())), type(rule.get_left())((Variable("$@"), rule.get_right())))
             res.add_raw(rule.get_left(), rule.get_right())
         elif isinstance(rule, Equ):
             res.add_raw(rule.get_left(), rule.get_right(), True)
         elif isinstance(rule, Not):
             res.add_raw(rule.elem, Negative())
         else:
             res.add_raw(rule, Positive())
         # else:
         #     raise TypeError("Invalid rule: " + str(rule))
     return res
Exemple #27
0
def encode_linear_layer(prev_neurons, weights, numNeurons, layerIndex,
                        netPrefix):
    vars = []
    equations = []
    prev_num = len(prev_neurons)
    for i in range(0, numNeurons):
        var = Variable(layerIndex, i, netPrefix, 'x')
        vars.append(var)
        terms = [
            Multiplication(
                Constant(weights[row][i], netPrefix, layerIndex, row),
                prev_neurons[row]) for row in range(0, prev_num)
        ]
        terms.append(Constant(weights[-1][i], netPrefix, layerIndex, prev_num))
        equations.append(Linear(Sum(terms), var))

    return vars, equations
Exemple #28
0
def encode_binmult_matrix(prev_neurons, layerIndex, netPrefix, matrix, outs):
    res_vars = []

    lin_constrs = []
    permute_constrs = []

    for i in range(len(outs)):
        res_vars_i = []
        for j, neuron in enumerate(prev_neurons):
            y = Variable(j, i, netPrefix, 'y')
            res_vars_i.append(y)

            # TODO: check indexes in BinMult for printing
            lin_constrs.append(BinMult(matrix[i][j], neuron, y))

        permute_constrs.append(Linear(Sum(res_vars_i), outs[i]))

        res_vars.append(res_vars_i)

    # lin_constrs before permute_constrs, s.t. interval arithmetic can tighten intervals
    # as we have no dependency graph, order of constraints is important
    return res_vars, (lin_constrs + permute_constrs)
Exemple #29
0
    def test_generic_mutual_recursion(self):
        '''
        Equivalent ML:

        let-rec f x = if True then x else g x
                g y = f y
        in g
        '''
        test = Literal('Bool', True)
        if_case = Variable('x')
        else_case = Application(Variable('g'), [Variable('x')])
        if_block = If(test, if_case, else_case)
        f_func = Lambda(['x'], if_block)

        g_body = Application(Variable('f'), [Variable('y')])
        g_func = Lambda(['y'], g_body)

        let_body = Variable('f')
        let_expr = Let([('f', f_func), ('g', g_func)], let_body)

        let_id = let_expr.add_to_rules(self._rules, self._registry)
        result = self._rules.infer()
        self.assertEqual(('Fn_1', 'a0', 'a0'), result.get_full_type_by_id(let_id))
Exemple #30
0
 def test_polymorphic_variable(self):
     scoped_id = 'var_foo_1'
     self._registry.push_new_scope({'foo': (scoped_id, True)})
     v = Variable('foo')
     v_id = v.add_to_rules(self._rules, self._registry)
     self.assertIn((v_id, scoped_id), self._rules.instance_of_calls)