コード例 #1
0
def encode_ranking_layer(prev_neurons, layerIndex, netPrefix):
    order_constrs = []

    n = len(prev_neurons)
    outs = [Variable(layerIndex, i, netPrefix, 'o') for i in range(n)]
    # !!! careful, because NN rows and columns in index are swapped
    # p_ij in matrix, but p_j_i in printed output
    # but for calculation permute matrix is stored as array of rows (as in math)
    permute_matrix = [[
        Variable(j, i, netPrefix, 'pi', type='Int') for j in range(n)
    ] for i in range(n)]

    # perm_matrix * prev_neurons = outs
    res_vars, permute_constrs = encode_binmult_matrix(prev_neurons, layerIndex,
                                                      netPrefix,
                                                      permute_matrix, outs)

    # o_i >= o_i+1
    for o, o_next in zip(outs, outs[1:]):
        order_constrs.append(Geq(o, o_next))

    # doubly stochastic
    one = Constant(1, netPrefix, layerIndex, 0)
    for i in range(len(prev_neurons)):
        # row stochastic
        permute_constrs.append(Linear(Sum(permute_matrix[i]), one))

    for j in range(len(prev_neurons)):
        # column stochastic
        permute_constrs.append(Linear(Sum([p[j] for p in permute_matrix]),
                                      one))

    constraints = permute_constrs + order_constrs
    return permute_matrix, (res_vars + outs), constraints
コード例 #2
0
    def one_hot_comparison(oh1, oh2, net, layer, row, desired='different'):
        '''
        Compares two one-hot vectors and returns constraints that can only be satisfied,
        if the vectors are equal/different
        :param oh1: one-hot vector
        :param oh2: one-hot vector
        :param net: netPrefix
        :param layer: layer of the net, in which this operation takes place
        :param row: row of the net, in which this operation takes place
        :param desired: keyword
            different - the constraints can only be satisfied, if the vectors are different
            equal - the constraints can only be satisfied, if the vectors are equal
        :return: a tuple of (deltas, diffs, constraints) where constraints are as described above and deltas, diffs
            are variables used in these constraints
        '''
        # requires that oh_i are one-hot vectors
        oh_deltas = []
        oh_diffs = []
        oh_constraints = []

        desired_result = 1
        if desired == 'different':
            desired_result = 1
        elif desired == 'equal':
            desired_result = 0

        terms = []
        x = 1
        for i, (oh1, oh2) in enumerate(zip(oh1, oh2)):
            constant = Constant(x, net, layer, row)
            terms.append(Multiplication(constant, oh1))
            terms.append(Neg(Multiplication(constant, oh2)))
            x *= 2

        sumvar = Variable(layer, row, net, 's', 'Int')
        oh_constraints.append(Linear(Sum(terms), sumvar))

        delta_gt = Variable(layer, row, net, 'dg', 'Int')
        delta_lt = Variable(layer, row, net, 'dl', 'Int')
        zero = Constant(0, net, layer, row)

        oh_constraints.append(Gt_Int(sumvar, zero, delta_gt))
        oh_constraints.append(Gt_Int(zero, sumvar, delta_lt))
        oh_constraints.append(
            Geq(Sum([delta_lt, delta_gt]),
                Constant(desired_result, net, layer, row)))

        oh_deltas.append(delta_gt)
        oh_deltas.append(delta_lt)

        oh_diffs.append(sumvar)

        return oh_deltas, oh_diffs, oh_constraints
コード例 #3
0
    def number_comparison(n1, n2, net, layer, row, epsilon=0):
        '''
        Compares two arbitrary numbers and returns constraints, s.t. one of the deltas is equal to 1, if the numbers
        are not equal
        :param n1: number
        :param n2: number
        :param net: netPrefix
        :param layer: layer of the net, in which this operation takes place
        :param row: row of the net, in which this operation takes place
        :return: a tuple of (deltas, diffs, constraints) where constraints are as described above and deltas, diffs
            are variables used in these constraints
        '''
        v_deltas = []
        v_diffs = []
        v_constraints = []

        delta_gt = Variable(layer, row, net, 'dg', 'Int')
        delta_lt = Variable(layer + 1, row, net, 'dl', 'Int')

        if epsilon > 0:
            eps = Constant(epsilon, net, layer + 1, row)
            diff_minus_eps = Variable(layer, row, net, 'x_m')
            diff_plus_eps = Variable(layer, row, net, 'x_p')

            v_constraints.append(
                Linear(Sum([n2, Neg(n1), Neg(eps)]), diff_minus_eps))
            v_constraints.append(Linear(Sum([n2, Neg(n1), eps]),
                                        diff_plus_eps))

            v_constraints.append(Greater_Zero(diff_minus_eps, delta_gt))
            v_constraints.append(Greater_Zero(Neg(diff_plus_eps), delta_lt))

            v_diffs.append(diff_minus_eps)
            v_diffs.append(diff_plus_eps)
        else:
            diff = Variable(layer, row, net, 'x')

            v_constraints.append(Linear(Sum([n1, Neg(n2)]), diff))
            v_constraints.append(Greater_Zero(diff, delta_gt))
            v_constraints.append(Greater_Zero(Neg(diff), delta_lt))

            v_diffs.append(diff)

        v_deltas.append(delta_gt)
        v_deltas.append(delta_lt)

        #v_constraints.append(Geq(Sum(v_deltas), Constant(desired_result, net, layer + 1, row)))

        return v_deltas, v_diffs, v_constraints
コード例 #4
0
def encode_sort_one_hot_layer(prev_neurons, layerIndex, netPrefix, mode):
    n = len(prev_neurons)
    one_hot_vec = [
        Variable(layerIndex, i, netPrefix, 'pi', type='Int') for i in range(n)
    ]

    top = Variable(layerIndex, 0, netPrefix, 'top')
    # one_hot_vec and top need to be enclosed in [], so that indexing in binmult_matrix works
    res_vars, mat_constrs = encode_binmult_matrix(prev_neurons, 0, netPrefix,
                                                  [one_hot_vec], [top])

    oh_constraint = Linear(Sum(one_hot_vec),
                           Constant(1, netPrefix, layerIndex, 0))

    if fc.use_eps_maximum:
        eps = Constant(fc.epsilon, netPrefix, layerIndex, 0)
        order_constrs = [
            Impl(pi, 0, Sum([neuron, eps]), top)
            for neuron, pi in zip(prev_neurons, one_hot_vec)
        ]
        pretty_print([], order_constrs)
    else:
        order_constrs = [Geq(top, neuron) for neuron in prev_neurons]

    if fc.use_context_groups:
        context = TopKGroup(top, prev_neurons, 1)
        order_constrs.append(context)

    outs = None
    vars = None
    if mode == 'vector':
        outs = one_hot_vec
        vars = res_vars + [top]
    elif mode == 'out':
        outs = [top]
        vars = res_vars + one_hot_vec
    else:
        raise ValueError(
            'Unknown mode for encoding of sort_one_hot layer: {name}'.format(
                name=mode))

    return outs, vars, [oh_constraint] + mat_constrs + order_constrs
コード例 #5
0
def encode_partial_layer(top_k, prev_neurons, layerIndex, netPrefix):
    order_constrs = []

    n = len(prev_neurons)
    outs = [Variable(layerIndex, i, netPrefix, 'o') for i in range(top_k)]
    # !!! careful, because NN rows and columns in index are swapped
    # p_ij in matrix, but p_j_i in printed output
    # but for calculation permute matrix is stored as array of rows (as in math)
    partial_matrix = [[
        Variable(j, i, netPrefix, 'pi', type='Int') for j in range(n)
    ] for i in range(top_k)]

    # perm_matrix * prev_neurons = outs
    res_vars, permute_constrs = encode_binmult_matrix(prev_neurons, layerIndex,
                                                      netPrefix,
                                                      partial_matrix, outs)

    # almost doubly stochastic
    one = Constant(1, netPrefix, layerIndex, 0)
    for i in range(top_k):
        # row stochastic
        permute_constrs.append(Linear(Sum(partial_matrix[i]), one))

    set_vars = []
    for j in range(len(prev_neurons)):
        # almost column stochastic (<= 1)
        s = Variable(layerIndex, j, netPrefix, 'set', type='Int')
        set_vars.append(s)
        permute_constrs.append(Linear(Sum([p[j] for p in partial_matrix]), s))
        permute_constrs.append(Geq(one, s))

    # o_i >= o_i+1 (for top_k)
    for o, o_next in zip(outs, outs[1:]):
        order_constrs.append(Geq(o, o_next))

    # x_i <= o_k-1 for all i, that are not inside top_k
    for i, s in enumerate(set_vars):
        order_constrs.append(Impl(s, 0, prev_neurons[i], outs[-1]))

    constraints = permute_constrs + order_constrs
    return [partial_matrix, set_vars], (res_vars + outs), constraints
コード例 #6
0
        def add_direct_constraints(radius, dimension, centered_inputs):
            ineqs = []
            for i in range(2**dimension):
                terms = []
                for j in range(dimension):
                    neg = (i // 2**j) % 2
                    if neg > 0:
                        terms.append(Neg(centered_inputs[j]))
                    else:
                        terms.append(centered_inputs[j])

                ineqs.append(Geq(radius, Sum(terms)))

            return ineqs, []
コード例 #7
0
def encode_linear_layer(prev_neurons, weights, numNeurons, layerIndex,
                        netPrefix):
    vars = []
    equations = []
    prev_num = len(prev_neurons)
    for i in range(0, numNeurons):
        var = Variable(layerIndex, i, netPrefix, 'x')
        vars.append(var)
        terms = [
            Multiplication(
                Constant(weights[row][i], netPrefix, layerIndex, row),
                prev_neurons[row]) for row in range(0, prev_num)
        ]
        terms.append(Constant(weights[-1][i], netPrefix, layerIndex, prev_num))
        equations.append(Linear(Sum(terms), var))

    return vars, equations
コード例 #8
0
    def calc_cluster_boundary(self, c1, c2, epsilon):
        c1 = np.array(c1)
        c2 = np.array(c2)

        factors = c2 - c1
        constant = (epsilon / 2) * np.linalg.norm(c2 - c1)**2
        constant += (np.linalg.norm(c1)**2 - np.linalg.norm(c2)**2) / 2

        invars = self.input_layer.get_outvars()
        netPrefix, _, _ = invars[0].getIndex()
        zero = Constant(0, netPrefix, 0, 0)

        terms = [
            Multiplication(Constant(factor, netPrefix, 0, 0), i)
            for factor, i in zip(factors, invars)
        ]
        terms.append(Constant(constant, netPrefix, 0, 0))

        bound = [Geq(zero, Sum(terms))]

        return bound
コード例 #9
0
        def add_absolute_value_constraints(radius, dimension, netPrefix,
                                           centered_inputs):
            ineqs = []
            additional_vars = []

            deltas = [
                Variable(0, i, netPrefix, 'd', 'Int') for i in range(dimension)
            ]
            abs_outs = [
                Variable(0, i, netPrefix, 'abs') for i in range(dimension)
            ]
            ineqs.append([
                Abs(ci, aout, d)
                for ci, aout, d in zip(centered_inputs, abs_outs, deltas)
            ])
            ineqs.append(Geq(radius, Sum(abs_outs)))

            additional_vars.append(deltas)
            additional_vars.append(abs_outs)

            return ineqs, additional_vars
コード例 #10
0
def encode_one_hot(prev_neurons, layerIndex, netPrefix):
    max_outs, deltas, ineqs = encode_maxpool_layer(prev_neurons, layerIndex,
                                                   netPrefix)
    max_out = max_outs[-1]

    outs = []
    diffs = []
    eqs = []
    one_hot_constraints = []

    for i, x in enumerate(prev_neurons):
        output = Variable(layerIndex, i, netPrefix, 'o', 'Int')
        diff = Variable(layerIndex, i, netPrefix, 'x')
        outs.append(output)
        diffs.append(diff)

        eqs.append(Linear(Sum([x, Neg(max_out)]), diff))
        one_hot_constraints.append(One_hot(diff, output))

    constraints = ineqs + eqs + one_hot_constraints
    return outs, (deltas + diffs + max_outs), constraints
コード例 #11
0
def encode_binmult_matrix(prev_neurons, layerIndex, netPrefix, matrix, outs):
    res_vars = []

    lin_constrs = []
    permute_constrs = []

    for i in range(len(outs)):
        res_vars_i = []
        for j, neuron in enumerate(prev_neurons):
            y = Variable(j, i, netPrefix, 'y')
            res_vars_i.append(y)

            # TODO: check indexes in BinMult for printing
            lin_constrs.append(BinMult(matrix[i][j], neuron, y))

        permute_constrs.append(Linear(Sum(res_vars_i), outs[i]))

        res_vars.append(res_vars_i)

    # lin_constrs before permute_constrs, s.t. interval arithmetic can tighten intervals
    # as we have no dependency graph, order of constraints is important
    return res_vars, (lin_constrs + permute_constrs)
コード例 #12
0
    def check_equivalence_layer(self, layer_idx):
        opt_vars = []
        opt_constrs = []
        if layer_idx == 0:
            opt_vars += self.input_layer.get_outvars()[:]
        else:
            a_outs = self.a_layers[layer_idx - 1].get_outvars()[:]
            b_outs = self.b_layers[layer_idx - 1].get_outvars()[:]
            opt_vars += a_outs + b_outs

            # at this stage we assume the previous layers to be equivalent
            for avar, bvar in zip(a_outs, b_outs):
                opt_constrs += [Linear(avar, bvar)]

        bounds = []

        for i, (a_var, a_constr, b_var, b_constr) in enumerate(
                zip(self.a_layers[layer_idx].get_optimization_vars(),
                    self.a_layers[layer_idx].get_optimization_constraints(),
                    self.b_layers[layer_idx].get_optimization_vars(),
                    self.b_layers[layer_idx].get_optimization_constraints())):
            diff = Variable(layer_idx, i, 'E', 'diff')
            diff_constr = Linear(Sum([a_var, Neg(b_var)]), diff)

            if i == 1:
                pretty_print(opt_vars + [a_var, b_var, diff],
                             opt_constrs + [a_constr, b_constr, diff_constr])

            lb, ub = self.optimize_variable(
                diff, opt_vars + [a_var, b_var, diff],
                opt_constrs + [a_constr, b_constr, diff_constr])
            diff.update_bounds(lb, ub)

            bounds.append((lb, ub))

        return bounds
コード例 #13
0
    def add_input_radius(self,
                         center,
                         radius,
                         metric='manhattan',
                         radius_mode='constant',
                         radius_lo=0):
        '''
        Constrains input values, s.t. they have to be within a circle around a specified center
         of specified radius according to a specified metric.

         If radius_mode = 'variable' is chosen, the radius, for which the difference of the top-values is
         positive (i.e. the NNs are not equivalent) is minimized.
         -> For the found solutions of the radius there are counterexamples to equivalence
         -> For the calculated bounds, the NNs are equivalent (at least within bound - eps they should be)

        :param center: The center of the circle
        :param radius: The radius of the circle if radius_mode = 'constant' or the upper bound on the radius variable,
            if radius_mode = 'variable'
        :param metric: either chebyshev OR manhattan is supported
        :param radius_mode: 'constant' - the radius is a constant and the difference between two nn2
            around the center with this radius can be optimized.
            'variable' - the radius is a variable and the radius, for which two nns are equivalent can be optimized
        :param radius_lo: lower bound of the radius, if radius_mode = 'variable' is selected
        :return:
        '''

        self.radius_mode = radius_mode

        def add_absolute_value_constraints(radius, dimension, netPrefix,
                                           centered_inputs):
            ineqs = []
            additional_vars = []

            deltas = [
                Variable(0, i, netPrefix, 'd', 'Int') for i in range(dimension)
            ]
            abs_outs = [
                Variable(0, i, netPrefix, 'abs') for i in range(dimension)
            ]
            ineqs.append([
                Abs(ci, aout, d)
                for ci, aout, d in zip(centered_inputs, abs_outs, deltas)
            ])
            ineqs.append(Geq(radius, Sum(abs_outs)))

            additional_vars.append(deltas)
            additional_vars.append(abs_outs)

            return ineqs, additional_vars

        def add_direct_constraints(radius, dimension, centered_inputs):
            ineqs = []
            for i in range(2**dimension):
                terms = []
                for j in range(dimension):
                    neg = (i // 2**j) % 2
                    if neg > 0:
                        terms.append(Neg(centered_inputs[j]))
                    else:
                        terms.append(centered_inputs[j])

                ineqs.append(Geq(radius, Sum(terms)))

            return ineqs, []

        if not metric in ['manhattan', 'chebyshev']:
            raise ValueError('Metric {m} is not supported!'.format(m=metric))

        invars = self.input_layer.get_outvars()
        dim = len(invars)

        if not len(center) == dim:
            raise ValueError(
                'Center has dimension {cdim}, but input has dimension {idim}'.
                format(cdim=len(center), idim=dim))

        for i, invar in enumerate(invars):
            invar.update_bounds(center[i] - radius, center[i] + radius)

        netPrefix, _, _ = invars[0].getIndex()
        additional_vars = []
        additional_ineqs = []
        r = None

        if radius_mode == 'constant':
            # need float as somehow gurobi can't handle float64 as type
            r = Constant(float(radius), netPrefix, 0, 0)
        elif radius_mode == 'variable':
            r = Variable(0, 0, netPrefix, 'r')
            r.update_bounds(float(radius_lo), float(radius))
            additional_vars.append(r)

            diff = self.equivalence_layer.get_outvars()[-1]
            additional_ineqs.append(
                Geq(diff, Constant(fc.not_equiv_tolerance, netPrefix, 0, 0)))
            #additional_ineqs.append(Geq(diff, Constant(0, netPrefix, 0, 0)))
        else:
            raise ValueError(
                'radius_mode: {} is not supported!'.format(radius_mode))

        if metric == 'chebyshev' and radius_mode == 'variable':
            for i, invar in enumerate(invars):
                center_i = Constant(float(center[i]), netPrefix, 0, 0)
                additional_ineqs.append(Geq(invar, Sum([center_i, Neg(r)])))
                additional_ineqs.append(Geq(Sum([center_i, r]), invar))

        if metric == 'manhattan':
            centered_inputs = []

            for i in range(dim):
                centered_inputs.append(
                    Sum([invars[i],
                         Neg(Constant(center[i], netPrefix, 0, i))]))

            if fc.manhattan_use_absolute_value:
                ineqs, constraint_vars = add_absolute_value_constraints(
                    r, dim, netPrefix, centered_inputs)
            else:
                ineqs, constraint_vars = add_direct_constraints(
                    r, dim, centered_inputs)

            additional_vars += constraint_vars
            additional_ineqs += ineqs

        self.input_layer.add_input_constraints(additional_ineqs,
                                               additional_vars)
コード例 #14
0
def encode_equivalence_layer(outs1, outs2, mode='diff_zero'):
    def one_hot_comparison(oh1, oh2, net, layer, row, desired='different'):
        '''
        Compares two one-hot vectors and returns constraints that can only be satisfied,
        if the vectors are equal/different
        :param oh1: one-hot vector
        :param oh2: one-hot vector
        :param net: netPrefix
        :param layer: layer of the net, in which this operation takes place
        :param row: row of the net, in which this operation takes place
        :param desired: keyword
            different - the constraints can only be satisfied, if the vectors are different
            equal - the constraints can only be satisfied, if the vectors are equal
        :return: a tuple of (deltas, diffs, constraints) where constraints are as described above and deltas, diffs
            are variables used in these constraints
        '''
        # requires that oh_i are one-hot vectors
        oh_deltas = []
        oh_diffs = []
        oh_constraints = []

        desired_result = 1
        if desired == 'different':
            desired_result = 1
        elif desired == 'equal':
            desired_result = 0

        terms = []
        x = 1
        for i, (oh1, oh2) in enumerate(zip(oh1, oh2)):
            constant = Constant(x, net, layer, row)
            terms.append(Multiplication(constant, oh1))
            terms.append(Neg(Multiplication(constant, oh2)))
            x *= 2

        sumvar = Variable(layer, row, net, 's', 'Int')
        oh_constraints.append(Linear(Sum(terms), sumvar))

        delta_gt = Variable(layer, row, net, 'dg', 'Int')
        delta_lt = Variable(layer, row, net, 'dl', 'Int')
        zero = Constant(0, net, layer, row)

        oh_constraints.append(Gt_Int(sumvar, zero, delta_gt))
        oh_constraints.append(Gt_Int(zero, sumvar, delta_lt))
        oh_constraints.append(
            Geq(Sum([delta_lt, delta_gt]),
                Constant(desired_result, net, layer, row)))

        oh_deltas.append(delta_gt)
        oh_deltas.append(delta_lt)

        oh_diffs.append(sumvar)

        return oh_deltas, oh_diffs, oh_constraints

    def number_comparison(n1, n2, net, layer, row, epsilon=0):
        '''
        Compares two arbitrary numbers and returns constraints, s.t. one of the deltas is equal to 1, if the numbers
        are not equal
        :param n1: number
        :param n2: number
        :param net: netPrefix
        :param layer: layer of the net, in which this operation takes place
        :param row: row of the net, in which this operation takes place
        :return: a tuple of (deltas, diffs, constraints) where constraints are as described above and deltas, diffs
            are variables used in these constraints
        '''
        v_deltas = []
        v_diffs = []
        v_constraints = []

        delta_gt = Variable(layer, row, net, 'dg', 'Int')
        delta_lt = Variable(layer + 1, row, net, 'dl', 'Int')

        if epsilon > 0:
            eps = Constant(epsilon, net, layer + 1, row)
            diff_minus_eps = Variable(layer, row, net, 'x_m')
            diff_plus_eps = Variable(layer, row, net, 'x_p')

            v_constraints.append(
                Linear(Sum([n2, Neg(n1), Neg(eps)]), diff_minus_eps))
            v_constraints.append(Linear(Sum([n2, Neg(n1), eps]),
                                        diff_plus_eps))

            v_constraints.append(Greater_Zero(diff_minus_eps, delta_gt))
            v_constraints.append(Greater_Zero(Neg(diff_plus_eps), delta_lt))

            v_diffs.append(diff_minus_eps)
            v_diffs.append(diff_plus_eps)
        else:
            diff = Variable(layer, row, net, 'x')

            v_constraints.append(Linear(Sum([n1, Neg(n2)]), diff))
            v_constraints.append(Greater_Zero(diff, delta_gt))
            v_constraints.append(Greater_Zero(Neg(diff), delta_lt))

            v_diffs.append(diff)

        v_deltas.append(delta_gt)
        v_deltas.append(delta_lt)

        #v_constraints.append(Geq(Sum(v_deltas), Constant(desired_result, net, layer + 1, row)))

        return v_deltas, v_diffs, v_constraints

    deltas = []
    diffs = []
    constraints = []

    if mode == 'diff_zero' or mode.startswith('epsilon_'):
        eps = 0
        if mode.startswith('epsilon_'):
            eps = float(mode.split('_')[-1])

        for i, (out1, out2) in enumerate(zip(outs1, outs2)):
            n_deltas, n_diffs, n_constraints = number_comparison(out1,
                                                                 out2,
                                                                 'E',
                                                                 0,
                                                                 i,
                                                                 epsilon=eps)

            deltas += n_deltas
            diffs += n_diffs
            constraints += n_constraints

        constraints.append(Geq(Sum(deltas), Constant(1, 'E', 1, 0)))
    elif mode in [
            'optimize_diff', 'optimize_diff_manhattan',
            'optimize_diff_chebyshev'
    ]:
        for i, (out1, out2) in enumerate(zip(outs1, outs2)):
            diff_i = Variable(0, i, 'E', 'diff')
            constraints.append(Linear(Sum([out1, Neg(out2)]), diff_i))

            diffs.append(diff_i)

        # will continue to be either optimize_diff_manhattan or ..._chebyshev
        if mode.startswith('optimize_diff_'):
            abs_vals = []
            for i, diff in enumerate(diffs):
                abs_val_i = Variable(0, i, 'E', 'abs_d')
                abs_vals.append(abs_val_i)

                delta_i = Variable(0, i, 'E', 'd', 'Int')
                delta_i.update_bounds(0, 1)
                deltas.append(delta_i)

                constraints.append(Abs(diff, abs_val_i, delta_i))

            diffs.append(abs_vals)

            if mode == 'optimize_diff_manhattan':
                norm = Variable(1, 0, 'E', 'norm')

                constraints.append(Linear(Sum(abs_vals), norm))

                diffs.append(norm)

            elif mode == 'optimize_diff_chebyshev':
                partial_matrix, partial_vars, partial_constrs = encode_partial_layer(
                    1, abs_vals, 1, 'E')
                diffs.append(partial_vars)
                constraints.append(partial_constrs)
                deltas.append(partial_matrix)

                context_constraints = []
                if fc.use_context_groups:
                    # partial_vars = ([E_y_ij, ...] + [E_o_1_0])
                    context_constraints.append(
                        TopKGroup(partial_vars[-1], abs_vals, 1))

                constraints.append(context_constraints)

                # only for interface to norm optimization, otherwise would have to optimize E_o_1_0
                norm = Variable(1, 0, 'E', 'norm')
                constraints.append(Linear(partial_vars[-1], norm))

                diffs.append(norm)

    elif mode == 'diff_one_hot':
        # requires that outs_i are the pi_1_js in of the respective permutation matrices
        # or input to this layer are one-hot vectors

        deltas, diffs, constraints = one_hot_comparison(outs1,
                                                        outs2,
                                                        'E',
                                                        0,
                                                        0,
                                                        desired='different')
    elif mode.startswith('ranking_top_'):
        # assumes outs1 = one-hot vector with maximum output of NN1
        # outs2 = (one-hot biggest, one-hot 2nd biggest, ...) of NN2

        k = int(mode.split('_')[-1])

        for i in range(k):
            k_deltas, k_diffs, k_constraints = one_hot_comparison(
                outs1, outs2[i], 'E', 0, i, desired='different')
            deltas += k_deltas
            diffs += k_diffs
            constraints += k_constraints
    elif mode.startswith('one_ranking_top_'):
        # assumes outs1 = permutation matrix of NN1
        # outs2 = outputs of NN1

        k = int(mode.split('_')[-1])

        matrix = outs1
        ordered2 = [Variable(0, i, 'E', 'o') for i in range(len(outs2))]

        res_vars, mat_constrs = encode_binmult_matrix(outs2, 0, 'E', matrix,
                                                      ordered2)

        order_constrs = []
        deltas = []
        for i in range(k, len(outs2)):
            delta_i = Variable(0, i, 'E', 'd', type='Int')
            deltas.append(delta_i)
            # o_1 < o_i <--> d = 1
            # 0 < o_i - o_1 <--> d = 1
            order_constrs.append(
                Greater_Zero(Sum([ordered2[i], Neg(ordered2[0])]), delta_i))

        order_constrs.append(Geq(Sum(deltas), Constant(1, 'E', 0, 0)))

        constraints = mat_constrs + order_constrs
        diffs = res_vars + ordered2
    elif mode.startswith('optimize_ranking_top_'):
        k = int(mode.split('_')[-1])

        matrix = outs1
        ordered2 = [Variable(0, i, 'E', 'o') for i in range(len(outs2))]

        res_vars, mat_constrs = encode_binmult_matrix(outs2, 0, 'E', matrix,
                                                      ordered2)

        order_constrs = []
        diffs = []
        for i in range(k, len(outs2)):
            diff_i = Variable(0, i, 'E', 'diff')
            diffs.append(diff_i)
            order_constrs.append(
                Linear(Sum([ordered2[i], Neg(ordered2[0])]), diff_i))

        constraints = mat_constrs + order_constrs
        deltas = res_vars + ordered2
    elif mode.startswith('partial_top_'):
        # assumes outs1 = [partial matrix, set-var] of NN1
        # assumes outs2 = outputs of NN2
        partial_matrix = outs1[0]
        one_hot_vec = partial_matrix[0]
        set_var = outs1[1]

        top = Variable(0, 0, 'E', 'top')
        # one_hot_vec and top need to be enclosed in [], so that indexing in binmult_matrix works
        res_vars, mat_constrs = encode_binmult_matrix(outs2, 0, 'E',
                                                      [one_hot_vec], [top])

        order_constrs = []
        for i in range(len(outs2)):
            order_constrs.append(
                Impl(set_var[i], 0, Sum([outs2[i], Neg(top)]),
                     Constant(0, 'E', 0, 0)))

        constraints = mat_constrs + order_constrs
        deltas = res_vars
        diffs = [top]
    elif mode.startswith('optimize_partial_top_'):
        # assumes outs1 = [partial matrix, set-var] of NN1
        # assumes outs2 = outputs of NN2
        partial_matrix = outs1[0]
        one_hot_vec = partial_matrix[0]
        set_var = outs1[1]

        top = Variable(0, 0, 'E', 'top')
        # one_hot_vec and top need to be enclosed in [], so that indexing in binmult_matrix works
        res_vars, mat_constrs = encode_binmult_matrix(outs2, 0, 'E',
                                                      [one_hot_vec], [top])

        order_constrs = []
        diffs = [Variable(0, i, 'E', 'diff') for i in range(len(outs2))]
        order_constrs.append(
            IndicatorToggle(
                set_var, 0,
                [Sum([outs2[i], Neg(top)]) for i in range(len(outs2))], diffs))

        max_diff_vec = [
            Variable(1, i, 'E', 'pi', 'Int') for i in range(len(diffs))
        ]
        max_diff = Variable(1, 0, 'E', 'max_diff')
        res_vars2, mat_constrs2 = encode_binmult_matrix(
            diffs, 1, 'Emax', [max_diff_vec], [max_diff])
        for diff in diffs:
            order_constrs.append(Geq(max_diff, diff))

        diffs.append(max_diff)

        constraints = mat_constrs + order_constrs + mat_constrs2
        deltas = res_vars + [top] + max_diff_vec + res_vars2

    elif mode.startswith('one_hot_partial_top_'):
        k = int(mode.split('_')[-1])
        # assumes outs1 = one hot vector of NN1
        # assumes outs2 = output of NN2
        one_hot_vec = outs1

        top = Variable(0, 0, 'E', 'top')
        # one_hot_vec and top need to be enclosed in [], so that indexing in binmult_matrix works
        res_vars, mat_constrs = encode_binmult_matrix(outs2, 0, 'Eoh',
                                                      [one_hot_vec], [top])

        partial_matrix, partial_vars, partial_constrs = encode_partial_layer(
            k, outs2, 1, 'E')

        context_constraints = []
        if fc.use_context_groups:
            context_constraints.append(ExtremeGroup(top, outs2))
            # partial_vars = ([E_y_ij, ...] + [E_o_1_0, E_o_1_1, ..., E_o_1_(k-1)])
            for i in range(1, k + 1):
                context_constraints.append(
                    TopKGroup(partial_vars[i - (k + 1)], outs2, i))

        diff = Variable(0, k, 'E', 'diff')
        diff_constr = Linear(Sum([partial_vars[-1], Neg(top)]), diff)

        deltas = [top] + res_vars + partial_matrix + partial_vars
        diffs = [diff]
        constraints = mat_constrs + partial_constrs + context_constraints + [
            diff_constr
        ]
    elif mode == 'one_hot_diff':
        # assumes outs1 = one hot vector of NN1
        # assumes outs2 = output of NN2
        one_hot_vec = outs1
        top = Variable(0, 0, 'E', 'top')
        # one_hot_vec and top need to be enclosed in [], so that indexing in binmult_matrix works
        res_vars, mat_constrs = encode_binmult_matrix(outs2, 0, 'E',
                                                      [one_hot_vec], [top])

        diffs = [Variable(0, i, 'E', 'diff') for i in range(len(outs2))]
        diff_constrs = [
            Linear(Sum([out, Neg(top)]), diff)
            for out, diff in zip(outs2, diffs)
        ]

        deltas = [top] + res_vars
        constraints = mat_constrs + diff_constrs
    else:
        raise ValueError('There is no \'' + mode +
                         '\' keyword for parameter mode')

    return deltas, diffs, constraints