def gen_consistency_constraints(constraint: Constraint, counter: int):
    """
    Args:
        constraint: Consistency constraint on tensors
        counter: for variable tracking

    Returns: Equality and consistency constraints on dimensions

    """

    all_constraints = []

    for i in range(1, MAX_TENSOR_RANK + 1):
        new_dims_rhs_1, counter = gen_tensor_dims(i, counter)
        new_dims_rhs_2, counter = gen_tensor_dims(i, counter)

        nat_constraints = gen_nat_constraints(new_dims_rhs_1 + new_dims_rhs_2)

        c_tensor_i = Conj([
            BinConstraintT(constraint.lhs, TensorType(new_dims_rhs_1), op_eq),
            BinConstraintT(constraint.rhs, TensorType(new_dims_rhs_2), op_eq)
        ] + [
            BinConstraintD(d1, d2, op_consistency)
            for d1, d2 in zip(new_dims_rhs_1, new_dims_rhs_2)
        ] + nat_constraints)

        all_constraints.append(c_tensor_i)

    return all_constraints, counter
def create_equality_constraints_for_broadcasting(e1: TVar, e2: TVar, e11: TVar,
                                                 e12: TVar, d1: List[DVar],
                                                 d2: List[DVar],
                                                 d11: List[DVar],
                                                 d12: List[DVar]):
    """
    Create equality constraints for when no broadcasting occurs
    Args:
        e1: Input 1
        e2: Input 2
        e11: Broadcasted input 1
        e12: Broadcasted input 2
        d1: Variables that store dimensions for e1
        d2: Variables that store dimensions for e2
        d11: Variables that store dimensions for e11
        d12: Variables that store dimensions for e22

    Returns: Four equality constraints

    """

    e1_tensor = BinConstraintT(e1, TensorType(d1), op_eq)
    e11_tensor = BinConstraintT(e11, TensorType(d11), op_eq)
    e2_tensor = BinConstraintT(e2, TensorType(d2), op_eq)
    e12_tensor = BinConstraintT(e12, TensorType(d12), op_eq)
    return [e1_tensor, e11_tensor, e2_tensor, e12_tensor]
def transform_get_item_tensor(constraint, counter):
    """
    When the index is a tuple, then the output will be a tensor
    TODO: we have to check if this is the case for all HF models

    The cases we are covrering here are a tuple with one of:
     - slice with default argument
     - None

     None appends 1 to the input tensor dimensions
     so each occurrence of 'None' increases the rank by 1

     slice with default arguments does not change the rank
    """
    assert isinstance(constraint.index_tuple, tuple)

    # generate a result tensor of the expected size
    dims, counter = gen_tensor_dims(constraint.tensor_size, counter)
    nat_constraints = gen_nat_constraints(dims)

    # generate a place-holder list of the right rank
    # where "slice" does not contribute to the rank and "None" does
    none_c = constraint.index_tuple.count(None)
    resulting_tensor_dims = (none_c + len(dims)) * [None]

    dim_index = 0
    for i in range(len(constraint.index_tuple)):

        # append 1 to the right location of the resulting tensor
        if constraint.index_tuple[i] is None:
            resulting_tensor_dims[i] = 1

        elif constraint.index_tuple[i] == slice(None, None, None):
            pass

        else:
            raise NotImplementedError('Method not yet implemented')

    # append the remaining dimensions to the right location
    dim_index = 0
    for i in range(len(resulting_tensor_dims)):
        if resulting_tensor_dims[i] is None:
            resulting_tensor_dims[i] = dims[dim_index]
            dim_index += 1

    # check if the index is valid
    is_valid_index = valid_index_tensor(constraint.index_tuple, dims)

    # check if the resulting tensor is within bounds
    if len(resulting_tensor_dims) > 4:
        return F(), counter

    else:
        constraints = [
            BinConstraintT(constraint.input_var, TensorType(dims), op_eq),
            BinConstraintT(constraint.res, TensorType(resulting_tensor_dims),
                           op_eq), *nat_constraints, is_valid_index
        ]
        return Conj(constraints), counter
def generate_calc_product(constraint, counter):
    """
    Transform flatten constraints
    """
    start = constraint.start
    end = constraint.end
    dims = constraint.dims_to_flatten
    flattened = constraint.flattened
    n = len(constraint.dims_to_flatten)

    # this will be evaluated right here
    boundary_check = (0 <= start and start < end and end <= n)

    c_boundary = T() if boundary_check else F()

    lhs = dims[0:start]
    rhs = dims[end:]
    mid = dims[start:end]

    all_possibilities = generate_all_int_dyn_dim_possibilities(mid)

    all_constraints = []

    for p in all_possibilities:
        p = list(p)
        # this tells us there is a dynamic variable
        contains_dyn = not (all([constraint.op == op_neq for constraint in p]))
        if contains_dyn:
            mid_var = [Dyn]
            total_constraints = lhs + mid_var + rhs
            if len(total_constraints) > 4:
                all_constraints.append(F())
            else:
                all_constraints.append(
                    Conj([
                        BinConstraintT(flattened,
                                       TensorType(lhs + mid_var + rhs), op_eq)
                    ] + p))
        else:
            new_var, counter = gen_dvar(counter)
            mid_eq_prod = Conj([
                BinConstraintD(new_var, Prod(mid), op_eq),
                BinConstraintD(new_var, Dyn, op_neq)
            ])
            mid_var = [new_var]
            total_constraints = lhs + mid_var + rhs
            if len(total_constraints) > 4:
                all_constraints.append(F())
            else:
                all_constraints.append(
                    Conj([
                        BinConstraintT(flattened,
                                       TensorType(lhs + mid_var +
                                                  rhs), op_eq), mid_eq_prod
                    ] + p))

    return Conj([Disj(all_constraints), c_boundary]), counter
def generate_broadcasting(constraint, counter):
    """
    Transform broadcasting constraints
    """
    e11, e12 = constraint.res1, constraint.res2
    e1, e2 = constraint.input1, constraint.input2

    e1_dyn = BinConstraintT(e1, Dyn, op_eq)
    e2_dyn = BinConstraintT(e2, Dyn, op_eq)

    # Introduce dimensions
    e1_equal_e11 = BinConstraintT(e1, e11, op_eq)
    e2_equal_e12 = BinConstraintT(e2, e12, op_eq)

    # dyn possibility
    e1_dyn_constraint = Conj([e1_dyn, e1_equal_e11, e2_equal_e12])
    e2_dyn_constraint = Conj([e2_dyn, e1_equal_e11, e2_equal_e12])

    # tensor possibility
    # generate dimensions to create tensors of size 1
    final_tensor_1_constraint, _, _, nat_dims_1, counter = \
        gen_broadcasting_constraints(e1, e2, e11, e12, 1, counter)

    # generate dimensions to create tensors of size 2
    final_tensor_2_constraint_no_padding, final_tensor_2_constraint_padding_arg1, \
        final_tensor_2_constraint_padding_arg2, nat_dims_2, counter = \
        gen_broadcasting_constraints(e1, e2, e11, e12, 2, counter)

    # generate dimensions to create tensors of size 3
    final_tensor_3_constraint_no_padding, final_tensor_3_constraint_padding_arg1, \
        final_tensor_3_constraint_padding_arg2, nat_dims_3, counter = \
        gen_broadcasting_constraints(e1, e2, e11, e12, 3, counter)

    # generate dimensions to create tensors of size 4
    final_tensor_4_constraint_no_padding, final_tensor_4_constraint_padding_arg1, \
        final_tensor_4_constraint_padding_arg2, nat_dims_4, counter = \
        gen_broadcasting_constraints(e1, e2, e11, e12, 4, counter)

    final_result = Disj([
        e1_dyn_constraint, e2_dyn_constraint, final_tensor_1_constraint,
        final_tensor_2_constraint_no_padding,
        final_tensor_2_constraint_padding_arg1,
        final_tensor_2_constraint_padding_arg2,
        final_tensor_3_constraint_no_padding,
        final_tensor_3_constraint_padding_arg1,
        final_tensor_3_constraint_padding_arg2,
        final_tensor_4_constraint_no_padding,
        final_tensor_4_constraint_padding_arg1,
        final_tensor_4_constraint_padding_arg2
    ])

    return Conj(
        [final_result, *nat_dims_1, *nat_dims_2, *nat_dims_3,
         *nat_dims_4]), counter
def transform_get_item(constraint, counter):
    """
    generate an equality of the form:
    t = [a1, ..., an]
    then generate constraints that check if the given index is valid
    given this particular tensor size.
    If the index is valid, generate a constraint to get the item
    Note that we already handled the Dyn input case in the previous
    step.
    Args:
        constraint: GetItem which assumes we are getting an item from a tensor (not Dyn)
        counter: variable tracking
    Returns: simplified constraints for GetItem

    """
    dims, counter = gen_tensor_dims(constraint.tensor_size, counter)
    nat_constraints = gen_nat_constraints(dims)

    is_valid_index = valid_index(constraint.index, dims)

    all_constraints = [
        BinConstraintT(constraint.input_var, TensorType(dims), op_eq),
        *nat_constraints, is_valid_index
    ]

    # if the index is valid, we generate a constraint for getting an item
    # otherwise this clause will have been UNSAT due to the wrong index
    if is_valid_index == T():
        all_constraints.append(
            BinConstraintD(constraint.res, dims[constraint.index], op_eq))

    return Conj(all_constraints), counter
def generate_calc_conv(constraint, counter):
    d, counter = gen_tensor_dims(4, counter)
    conv_result = TensorType([d[0], d[1], d[2], d[3]])

    # the convolution result is a tensor of size 4
    c1 = BinConstraintT(constraint.conv_result, conv_result, op_eq)

    # the second dimension of the output is equal to the output channels
    c2 = Conj([
        BinConstraintD(d[1], constraint.c_out, op_eq),
        BinConstraintD(d[1], Dyn, op_neq)
    ])

    # the input corresponds to the output in the first dimension of the convolution
    c3 = BinConstraintD(constraint.matching_constraint[0], d[0], op_eq)

    c4, c5 = calc_last_two_dims(constraint, d)

    leq_constraints = Conj([
        BinConstraintD(0, d[0], op_leq),
        BinConstraintD(0, d[1], op_leq),
        BinConstraintD(0, d[2], op_leq),
        BinConstraintD(0, d[3], op_leq)
    ])

    return Conj([c1, c2, c3, c4, c5, leq_constraints]), counter
def generate_gub(constraint, counter):
    """
    Transform greatest upper bound for tensors. Results in equality and Greatest Upper Bound
    on dimensions
    """
    c1 = Conj([
        Disj([
            BinConstraintT(constraint.rhs1, Dyn, op_eq),
            BinConstraintT(constraint.rhs2, Dyn, op_eq)
        ]),
        BinConstraintT(constraint.res, Dyn, op_eq)
    ])

    [c2, c3, c4, c5], counter = gen_greatest_upper_bound(constraint, counter)

    return Disj([c1, c2, c3, c4, c5]), counter
def apply_padding(e1_var: TVar, e11: BinConstraintT, e2: BinConstraintT,
                  e12: BinConstraintT, d2: List[DVar], d11: List[DVar],
                  d12: List[DVar], counter: int):
    """
    We are considering the possibility where one input has less dimensions than
    another input, so we apply padding to the broadcasted results

    Args:
        e1_var: Variable representing the first input where padding will be
        e11: constraint of the form e11 = Tensortype[d1, ..., dn]
        e2:  constraint of the form e2 = Tensortype[d1, ..., dn]
        e12: constraint of the form e11 = Tensortype[d1, ..., dn]
        d2: Tensor variables for the second input
        d11: Tensor variables for the broadcasted first input
        d12: Tensor variables for the broadcasted second input
        counter: variable tracking

    Returns: A new constraint whose goal is to apply padding to the broadcasted result

    """

    res = []

    # pad the shorter input with None so we can pass it to the broadcasting helper function
    for i in range(1, len(d2)):

        d1, counter = gen_tensor_dims(i, counter)

        nat_constraints = gen_nat_constraints(d1 + d2 + d11 + d12)

        e1 = BinConstraintT(e1_var, TensorType(d1), op_eq)

        simulate_padding = [None] * (len(d2) - i)

        assert len(simulate_padding + d1) == len(d2)

        broadcast_padding = []

        # for every padding size, we also consider broadcasting
        for j in range((len(d2) - i)):
            broadcast_padding.append(
                broadcast_dim(simulate_padding, d2, d11, d12, j, True))

        # we consider the possibilities for broadcasting for every dimension. Since we already
        # padded d1, we do not consider it while broadcasting
        all_broadcasting_possibilities = generate_all_broadcasting_possibilities_no_padding(
            d1, d2[(len(d2) - i):], d11[(len(d2) - i):], d12[(len(d2) - i):])
        # combine all constraints into a conjunction
        c = Conj([
            e1, e11, e2, e12, *broadcast_padding,
            all_broadcasting_possibilities, *nat_constraints
        ])
        res.append(c)

    return Disj(res), counter
Exemple #10
0
def transform_transpose(constraint, counter):
    """
    Similar to a sequence of two index-selects
    """
    dims, counter = gen_tensor_dims(constraint.tensor_size, counter)
    is_valid_index1 = valid_index(constraint.index1, dims)
    is_valid_index2 = valid_index(constraint.index2, dims)
    new_dims = copy.deepcopy(dims)
    nat_constraints = gen_nat_constraints(dims)

    if is_valid_index1 == T() and is_valid_index2 == T():
        new_dims[constraint.index1] = dims[constraint.index2]
        new_dims[constraint.index2] = dims[constraint.index1]

    transformed_constraint = Conj([
        BinConstraintT(constraint.input_var, TensorType(dims), op_eq),
        *nat_constraints, is_valid_index1, is_valid_index2,
        BinConstraintT(constraint.output, TensorType(new_dims), op_eq)
    ])
    return transformed_constraint, counter
def gen_greatest_upper_bound(constraint: TGreatestUpperBound, counter: int):
    """
    Args:
        constraint: Greatest upper bound on tensors
        counter: variable tracking

    Returns: A set of equality constraints and DGreatestUpperBound constraints

    """

    all_constraints = []

    for i in range(1, MAX_TENSOR_RANK + 1):
        c = []
        dims1, counter = gen_tensor_dims(i, counter)
        c1tensor = TensorType(dims1)

        dims2, counter = gen_tensor_dims(i, counter)
        c2tensor = TensorType(dims2)

        dims3, counter = gen_tensor_dims(i, counter)
        c3tensor = TensorType(dims3)

        c += [BinConstraintT(constraint.rhs1, c1tensor, op_eq),
              BinConstraintT(constraint.rhs2, c2tensor, op_eq),
              BinConstraintT(constraint.res, c3tensor, op_eq)] + \
            gen_nat_constraints(dims1 + dims2 + dims3)

        assert len(c3tensor.__args__) == len(c1tensor.__args__) == len(
            c2tensor.__args__)
        for i in range(len(c3tensor.__args__)):
            c.append(
                DGreatestUpperBound(c3tensor.__args__[i], c1tensor.__args__[i],
                                    c2tensor.__args__[i]))

        all_constraints.append(Conj(c))
    return all_constraints, counter
Exemple #12
0
def transform_index_select(constraint, counter):
    """
    The constraints consider the given tensor size, checks if the index is valid
    and if so, generates a constraint for replacing the input dimension
    with the required dimension
    """
    dims, counter = gen_tensor_dims(constraint.tensor_size, counter)
    is_valid_index = valid_index(constraint.index, dims)
    nat_constraints = gen_nat_constraints(dims)

    # if the index is valid then replace the input dimension with the new dimension
    # otherwise the dimension will not be replaced and the clause will contain False
    if is_valid_index == T():
        new_dims = copy.deepcopy((dims))
        new_dims[constraint.index] = constraint.dim_replace

    transformed_constraint = Conj([
        BinConstraintT(constraint.input_var, TensorType(dims), op_eq),
        *nat_constraints, is_valid_index,
        BinConstraintT(constraint.output, TensorType(new_dims), op_eq)
    ])

    # print(constraints)
    return transformed_constraint, counter
def generate_calc_maxpool(constraint, counter):
    """
    Transform maxpool constraints
    """
    d, counter = gen_tensor_dims(4, counter)
    maxpool_result = TensorType([d[0], d[1], d[2], d[3]])

    # the maxpool result is a tensor of size 4
    c1 = BinConstraintT(constraint.maxpool_result, maxpool_result, op_eq)

    # the input corresponds to the output in the first and second dimension of maxpool
    c2 = BinConstraintD(constraint.matching_constraint[1], d[1], op_eq)
    c3 = BinConstraintD(constraint.matching_constraint[0], d[0], op_eq)
    c4, c5 = calc_last_two_dims(constraint, d)

    leq_constraints = Conj([
        BinConstraintD(0, d[0], op_leq),
        BinConstraintD(0, d[1], op_leq),
        BinConstraintD(0, d[2], op_leq),
        BinConstraintD(0, d[3], op_leq)
    ])

    return Conj([c1, c2, c3, c4, c5, leq_constraints]), counter
def generate_reshape(constraint, counter):
    """
    Transform reshape constraints
    """
    d, counter = gen_tensor_dims(4, counter)

    d1 = d[0]
    d2 = d[1]
    d3 = d[2]
    d4 = d[3]

    target = constraint.target.__args__

    is_fully_static = all([d != Dyn for d in target])

    # dynamic tensor
    c1_dyn = BinConstraintT(constraint.src, Dyn, op_eq)
    c2_tensor1 = BinConstraintT(constraint.src, TensorType([d1]), op_eq)
    c2_tensor2 = BinConstraintT(constraint.src, TensorType([d1, d2]), op_eq)
    c2_tensor3 = BinConstraintT(constraint.src, TensorType([d1, d2, d3]),
                                op_eq)
    c2_tensor4 = BinConstraintT(constraint.src, TensorType([d1, d2, d3, d4]),
                                op_eq)

    d1_eq_dyn = BinConstraintD(d1, Dyn, op_eq)
    d1_neq_dyn = BinConstraintD(d1, Dyn, op_neq)

    d2_eq_dyn = BinConstraintD(d2, Dyn, op_eq)
    d2_neq_dyn = BinConstraintD(d2, Dyn, op_neq)

    d3_eq_dyn = BinConstraintD(d3, Dyn, op_eq)
    d3_neq_dyn = BinConstraintD(d3, Dyn, op_neq)

    d4_eq_dyn = BinConstraintD(d3, Dyn, op_eq)
    d4_neq_dyn = BinConstraintD(d3, Dyn, op_neq)

    nat_d1 = BinConstraintD(0, d1, op_leq)
    nat_d2 = BinConstraintD(0, d2, op_leq)
    nat_d3 = BinConstraintD(0, d3, op_leq)
    nat_d4 = BinConstraintD(0, d4, op_leq)

    if is_fully_static:
        # size 1 tensor
        c3_tensor1 = Disj([
            d1_eq_dyn,
            (Conj([d1_neq_dyn,
                   BinConstraintD(d1, Prod(target), op_eq)]))
        ])
        all_tensor_1 = Conj([c2_tensor1, c3_tensor1])

        # size 2 tensor
        all_tensor_2 = Conj(
            [c2_tensor2,
             gen_all_reshape_possibilities([d1, d2], target)])

        # size 3 tensor
        all_tensor_3 = Conj(
            [c2_tensor3,
             gen_all_reshape_possibilities([d1, d2, d3], target)])

        # size 4 tensor
        all_tensor_4 = Conj([
            c2_tensor4,
            gen_all_reshape_possibilities([d1, d2, d3, d4], target)
        ])

        return Conj([
            Disj([
                c1_dyn, all_tensor_1, all_tensor_2, all_tensor_3, all_tensor_4
            ]), nat_d1, nat_d2, nat_d3, nat_d4
        ]), counter

    # then there must be exactly one occurrence of dyn
    else:
        new_target = []

        for n in target:
            if n != Dyn:
                new_target.append(n)

        # tensor 1
        c3_tensor1 = Disj([
            d1_eq_dyn,
            (Conj([d1_neq_dyn,
                   is_dim_div_by_target(new_target, d1)]))
        ])
        all_tensor_1 = Conj([c2_tensor1, c3_tensor1])

        # tensor 2
        c21 = Disj([d1_eq_dyn, d2_eq_dyn])
        c22 = Conj([
            d1_neq_dyn, d2_neq_dyn,
            is_dim_div_by_target(new_target, Prod([d1, d2]))
        ])
        all_tensor_2 = Conj([c2_tensor2, Disj([c21, c22])])

        # tensor 3
        c31 = Disj([d1_eq_dyn, d2_eq_dyn, d3_eq_dyn])
        c32 = Conj([
            d1_neq_dyn, d2_neq_dyn, d3_neq_dyn,
            is_dim_div_by_target(new_target, Prod([d1, d2, d3]))
        ])
        all_tensor_3 = Conj([c2_tensor3, Disj([c31, c32])])

        # tensor 4
        c41 = Disj([d1_eq_dyn, d2_eq_dyn, d3_eq_dyn, d4_eq_dyn])
        c42 = Conj([
            d1_neq_dyn, d2_neq_dyn, d3_neq_dyn, d4_neq_dyn,
            is_dim_div_by_target(new_target, Prod([d1, d2, d3, d4]))
        ])
        all_tensor_4 = Conj([c2_tensor4, Disj([c41, c42])])

        return Conj([
            Disj([
                c1_dyn, all_tensor_1, all_tensor_2, all_tensor_3, all_tensor_4
            ]), nat_d1, nat_d2, nat_d3, nat_d4
        ]), counter
def generate_binconstraint_t(constraint, counter):
    """
    Transform binary constraints for tensors
    """

    # precision constraints
    if constraint.op == op_precision:
        if constraint.lhs == Dyn:
            return T(), counter
        elif isinstance(constraint.lhs, TensorType):
            is_fully_static = all([d != Dyn for d in constraint.lhs.__args__])
            if is_fully_static:
                return BinConstraintT(constraint.lhs, constraint.rhs,
                                      op_eq), counter
            else:
                new_dims = []

                for _ in range(len(constraint.lhs.__args__)):
                    dim, counter = gen_dvar(counter)
                    new_dims.append(dim)

                new_dim_constraints = [BinConstraintD(old_dim, new_dim, op_precision) for
                                       new_dim, old_dim in zip(new_dims, constraint.lhs.__args__)] + \
                                      [BinConstraintT(constraint.rhs, TensorType(new_dims), op_eq)] + \
                                      [BinConstraintD(1, new_dim, op_leq) for
                                       new_dim in new_dims]
                return Conj(new_dim_constraints), counter

    # matching
    elif constraint.op == op_matching:
        assert isinstance(constraint.rhs, TensorType)
        d1 = constraint.rhs.__args__[0]
        d2 = constraint.rhs.__args__[1]
        d3 = constraint.rhs.__args__[2]
        d4 = constraint.rhs.__args__[3]

        conj = [
            BinConstraintT(constraint.lhs, Dyn, op_eq),
            BinConstraintD(d1, Dyn, op_eq),
            BinConstraintD(d2, Dyn, op_eq),
            BinConstraintD(d3, Dyn, op_eq),
            BinConstraintD(d4, Dyn, op_eq)
        ]
        return Disj([
            Conj(conj),
            BinConstraintT(constraint.lhs, TensorType([d1, d2, d3, d4]), op_eq)
        ]), counter

    elif constraint.op == op_consistency:
        c_dyn = Disj([
            BinConstraintT(constraint.lhs, Dyn, op_eq),
            BinConstraintT(constraint.rhs, Dyn, op_eq)
        ])
        [c_tensor_1, c_tensor_2, c_tensor_3, c_tensor_4
         ], counter = gen_consistency_constraints(constraint, counter)

        return Disj([c_dyn, c_tensor_1, c_tensor_2, c_tensor_3,
                     c_tensor_4]), counter

    elif constraint.op == op_leq:
        assert isinstance(constraint.rhs, int)
        disj = []
        for i in range(1, constraint.rhs + 1):
            dims = []
            for j in range(1, i + 1):
                dim_var, counter = gen_dvar(counter)
                dims.append(dim_var)
            disj.append(BinConstraintT(constraint.lhs, TensorType(dims),
                                       op_eq))
        return Disj(disj), counter
    else:
        return constraint, counter