Ejemplo n.º 1
0
def view_inference_rule(n: Node, symbols, constraints, counter):
    """
    Similar to reshape but with an extra condition on the strides
    """
    assert isinstance(n.args[0], Node)

    # generate the new variable
    my_view, counter = gen_tvar(counter)
    symbols[n] = my_view


    src_var = symbols[n.args[0]]
    t2 = [symbols[elem] if isinstance(elem, Node) else elem for elem in n.args[1:]]  # target shape
    t2_type = []
    num_constraints = []

    for t in t2:
        if t == -1:
            var, counter = gen_dvar(counter)
            t2_type.append(var)
            num_constraints.append(BinConstraintD(var, Dyn, op_neq))

        else:
            num_constraints.append(BinConstraintD(t, Dyn, op_neq))
            t2_type.append(t)

    t2_type = TensorType(t2_type)  # type: ignore[assignment]

    c1 = BinConstraintT(my_view, t2_type, op_eq)
    c2 = CanReshape(src_var, t2_type)

    # TODO: add the extra check mentioned here:
    # https://pytorch.org/docs/stable/generated/torch.Tensor.view.html#torch.Tensor.view

    return [c1, c2] + num_constraints, counter  # type: ignore[operator]
def generate_calc_product(constraint, counter):
    """
    Transform flatten constraints
    """
    start = constraint.start
    end = constraint.end
    dims = constraint.dims_to_flatten
    flattened = constraint.flattened
    n = len(constraint.dims_to_flatten)

    # this will be evaluated right here
    boundary_check = (0 <= start and start < end and end <= n)

    c_boundary = T() if boundary_check else F()

    lhs = dims[0:start]
    rhs = dims[end:]
    mid = dims[start:end]

    all_possibilities = generate_all_int_dyn_dim_possibilities(mid)

    all_constraints = []

    for p in all_possibilities:
        p = list(p)
        # this tells us there is a dynamic variable
        contains_dyn = not (all([constraint.op == op_neq for constraint in p]))
        if contains_dyn:
            mid_var = [Dyn]
            total_constraints = lhs + mid_var + rhs
            if len(total_constraints) > 4:
                all_constraints.append(F())
            else:
                all_constraints.append(
                    Conj([
                        BinConstraintT(flattened,
                                       TensorType(lhs + mid_var + rhs), op_eq)
                    ] + p))
        else:
            new_var, counter = gen_dvar(counter)
            mid_eq_prod = Conj([
                BinConstraintD(new_var, Prod(mid), op_eq),
                BinConstraintD(new_var, Dyn, op_neq)
            ])
            mid_var = [new_var]
            total_constraints = lhs + mid_var + rhs
            if len(total_constraints) > 4:
                all_constraints.append(F())
            else:
                all_constraints.append(
                    Conj([
                        BinConstraintT(flattened,
                                       TensorType(lhs + mid_var +
                                                  rhs), op_eq), mid_eq_prod
                    ] + p))

    return Conj([Disj(all_constraints), c_boundary]), counter
Ejemplo n.º 3
0
def getitem_inference_rule(n: Node, symbols, constraints, counter):
    assert isinstance(n.args[0], Node)

    # dimension output case
    if isinstance(n.args[1], int):
        # create and store the new dimension variable
        get_item_output, counter = gen_dvar(counter)
        symbols[n] = get_item_output

        # retreive arg variables
        get_item_arg = symbols[n.args[0]]
        assert isinstance(get_item_arg, TVar)

        # if the input is dynamic, we accept any index and return
        # a dynamic dimension as output
        input_dyn = BinConstraintT(get_item_arg, Dyn, op_eq)
        output_dyn = BinConstraintD(get_item_output, Dyn, op_eq)
        c1 = Conj([input_dyn, output_dyn])

        # if the input is a tensor,
        # generate a getItem constraint which will be expanded based on the
        # tensor dimension.

        c2 = [
            GetItem(i + 1, n.args[1], get_item_output, get_item_arg)
            for i in range(MAX_TENSOR_RANK)
        ]

        # since the output is a dimension, we make sure it's a natural number
        # added as a conjunction to the disjuction of c2
        c3 = BinConstraintD(0, get_item_output, op_leq)
        return [Disj([c1, Conj([Disj(c2), c3])])], counter

    # tensor output case
    elif isinstance(n.args[1], tuple):
        # create and store the new tensor variable
        get_item_output, counter = gen_tvar(counter)
        symbols[n] = get_item_output

        # retreive arg variables
        get_item_arg = symbols[n.args[0]]
        assert isinstance(get_item_arg, TVar)

        input_dyn = BinConstraintT(get_item_arg, Dyn, op_eq)
        output_dyn = BinConstraintT(get_item_output, Dyn,
                                    op_eq)  # type: ignore[assignment]
        c1 = Conj([input_dyn, output_dyn])

        c2 = [
            GetItemTensor(i + 1, n.args[1], get_item_output, get_item_arg)
            for i in range(MAX_TENSOR_RANK)
        ]  # type: ignore[misc]

        return [Disj([c1, *c2])], counter

    else:
        raise RuntimeError('Method not yet implemented')
def is_dim_div_by_target(target: List[int], dim: List[DVar]):
    """
    Generate constraints to check if the input dimensions is divisible by the target dimensions
    Args:
        target: Target dimensions
        dim:  Input dimensions

    Returns: Constraints to check divisibility

    """
    return BinConstraintD(BinConstraintD(dim, Prod(target), op_mod), 0, op_eq)
Ejemplo n.º 5
0
def add_linear_constraints(dims1, dims2, module_instance):
    assert len(dims1) == len(dims2)
    constraints = []
    for i in range(len(dims1)):
        if i == len(dims1) - 1:
            constraints.append(BinConstraintD(dims1[i], module_instance.in_features, op_consistency))
            constraints.append(BinConstraintD(dims2[i], module_instance.out_features, op_eq))
        else:
            constraints.append(BinConstraintD(dims1[i], dims2[i], op_eq))

    return constraints
Ejemplo n.º 6
0
def conv2d_inference_rule(n: Node, module_instance, symbols, constraints, counter):
    assert isinstance(n.args[0], Node)

    my_conv, counter = gen_tvar(counter)
    symbols[n] = my_conv
    input_var = symbols[n.args[0]]

    # dim vars
    [d1, d2, d3, d4], counter = gen_tensor_dims(MAX_TENSOR_RANK, counter)

    # c1 = Matching(input_var, TensorType([d1, d2, d3, d4]))
    c1 = BinConstraintT(input_var, TensorType([d1, d2, d3, d4]), op_matching)

    # c2 = DConsistency(module_instance.in_channels, d2)
    c2 = BinConstraintD(module_instance.in_channels, d2, op_consistency)

    c3 = CalcConv(my_conv, input_var,
                  module_instance.out_channels,
                  module_instance.kernel_size,
                  module_instance.padding,
                  module_instance.stride,
                  module_instance.dilation, [d1, d2, d3, d4])

    nat_constraints = gen_nat_constraints([d1, d2, d3, d4])

    return [c1, c2, c3, *nat_constraints], counter
Ejemplo n.º 7
0
def expand_inference_rule(n: Node, symbols, constraints, counter):
    """
    We generate the exact constraints as we do for tensor additions but we constraint
    the rank of this expression to be equal to len(n.args[1:]) so that only
    those cases get considered for the output
    """
    assert isinstance(n.args[0], Node)

    # define the output for expand
    expand, counter = gen_tvar(counter)
    symbols[n] = expand

    # since we do not have two nodes here, we will construct an argument variable
    e1 = symbols[n.args[0]]
    e2, counter = gen_tvar(counter)

    e2_nat_constraints = []
    for arg in n.args[1:]:
        assert isinstance(arg, Node) or isinstance(arg, int)
        if isinstance(arg, Node):
            assert isinstance(symbols[arg], DVar)
            e2_nat_constraints.append(BinConstraintD(0, symbols[arg], op_leq))

    e2_constraint = BinConstraintT(e2, TensorType([arg if isinstance(arg, int) else symbols[arg] for arg in n.args[1:]]), op_eq)

    constraints, counter = gen_broadcasting_constraints(e1, e2, symbols, counter, expand)

    # constraint the output size
    dims, counter = gen_tensor_dims(len(n.args[1:]), counter)
    nat_constraints = gen_nat_constraints(dims)
    c = [BinConstraintT(expand, TensorType(dims), op_eq), *nat_constraints, e2_constraint, *e2_nat_constraints]
    constraints += c

    return constraints, counter
def transform_get_item(constraint, counter):
    """
    generate an equality of the form:
    t = [a1, ..., an]
    then generate constraints that check if the given index is valid
    given this particular tensor size.
    If the index is valid, generate a constraint to get the item
    Note that we already handled the Dyn input case in the previous
    step.
    Args:
        constraint: GetItem which assumes we are getting an item from a tensor (not Dyn)
        counter: variable tracking
    Returns: simplified constraints for GetItem

    """
    dims, counter = gen_tensor_dims(constraint.tensor_size, counter)
    nat_constraints = gen_nat_constraints(dims)

    is_valid_index = valid_index(constraint.index, dims)

    all_constraints = [
        BinConstraintT(constraint.input_var, TensorType(dims), op_eq),
        *nat_constraints, is_valid_index
    ]

    # if the index is valid, we generate a constraint for getting an item
    # otherwise this clause will have been UNSAT due to the wrong index
    if is_valid_index == T():
        all_constraints.append(
            BinConstraintD(constraint.res, dims[constraint.index], op_eq))

    return Conj(all_constraints), counter
def gen_consistency_constraints(constraint: Constraint, counter: int):
    """
    Args:
        constraint: Consistency constraint on tensors
        counter: for variable tracking

    Returns: Equality and consistency constraints on dimensions

    """

    all_constraints = []

    for i in range(1, MAX_TENSOR_RANK + 1):
        new_dims_rhs_1, counter = gen_tensor_dims(i, counter)
        new_dims_rhs_2, counter = gen_tensor_dims(i, counter)

        nat_constraints = gen_nat_constraints(new_dims_rhs_1 + new_dims_rhs_2)

        c_tensor_i = Conj([
            BinConstraintT(constraint.lhs, TensorType(new_dims_rhs_1), op_eq),
            BinConstraintT(constraint.rhs, TensorType(new_dims_rhs_2), op_eq)
        ] + [
            BinConstraintD(d1, d2, op_consistency)
            for d1, d2 in zip(new_dims_rhs_1, new_dims_rhs_2)
        ] + nat_constraints)

        all_constraints.append(c_tensor_i)

    return all_constraints, counter
Ejemplo n.º 10
0
def bmm_inference_rule(n: Node, symbols, constraints, counter):
    """
    Constraints that match the input to a size 3 tensor
    and switch the dimensions according to the rules
    of batch multiplication
    """
    assert isinstance(n.args[0], Node)
    assert isinstance(n.args[1], Node)

    bmm_output, counter = gen_tvar(counter)
    symbols[n] = bmm_output

    bmm_input1 = symbols[n.args[0]]
    bmm_input2 = symbols[n.args[1]]

    dims_input1, counter = gen_tensor_dims(3, counter)
    dims_input2, counter = gen_tensor_dims(3, counter)

    inputs_dyn = Conj([
        BinConstraintT(bmm_input1, Dyn, op_eq),
        BinConstraintT(bmm_input2, Dyn, op_eq),
        BinConstraintT(bmm_output, Dyn, op_eq)
    ])

    input1_dyn = Conj([
        BinConstraintT(bmm_input1, Dyn, op_eq),
        BinConstraintT(bmm_input2, TensorType(dims_input2), op_eq),
        BinConstraintT(bmm_output,
                       TensorType([dims_input2[0], Dyn, dims_input2[2]]),
                       op_eq)
    ])

    input2_dyn = Conj([
        BinConstraintT(bmm_input2, Dyn, op_eq),
        BinConstraintT(bmm_input1, TensorType(dims_input1), op_eq),
        BinConstraintT(bmm_output,
                       TensorType([dims_input1[0], dims_input1[1], Dyn]),
                       op_eq)
    ])

    consistency_constraints = [
        BinConstraintD(dims_input1[0], dims_input2[0], op_consistency)
    ]

    batch_size, counter = gen_dvar(counter)

    inputs_are_tensors = Conj([
        BinConstraintT(bmm_input1, TensorType(dims_input1), op_eq),
        BinConstraintT(bmm_input2, TensorType(dims_input2), op_eq),
        BinConstraintT(
            bmm_output,
            TensorType([batch_size, dims_input1[1], dims_input2[2]]), op_eq),
        *consistency_constraints,
        DGreatestUpperBound(batch_size, dims_input1[0], dims_input2[0])
    ])

    return [Disj([inputs_dyn, input1_dyn, input2_dyn,
                  inputs_are_tensors])], counter
Ejemplo n.º 11
0
def generate_calc_conv(constraint, counter):
    d, counter = gen_tensor_dims(4, counter)
    conv_result = TensorType([d[0], d[1], d[2], d[3]])

    # the convolution result is a tensor of size 4
    c1 = BinConstraintT(constraint.conv_result, conv_result, op_eq)

    # the second dimension of the output is equal to the output channels
    c2 = Conj([
        BinConstraintD(d[1], constraint.c_out, op_eq),
        BinConstraintD(d[1], Dyn, op_neq)
    ])

    # the input corresponds to the output in the first dimension of the convolution
    c3 = BinConstraintD(constraint.matching_constraint[0], d[0], op_eq)

    c4, c5 = calc_last_two_dims(constraint, d)

    leq_constraints = Conj([
        BinConstraintD(0, d[0], op_leq),
        BinConstraintD(0, d[1], op_leq),
        BinConstraintD(0, d[2], op_leq),
        BinConstraintD(0, d[3], op_leq)
    ])

    return Conj([c1, c2, c3, c4, c5, leq_constraints]), counter
Ejemplo n.º 12
0
def generate_binconstraint_d(constraint, counter):
    """
    Transform binary constraints for dimensions
    """
    if constraint.op == op_precision:
        if isinstance(constraint.lhs, int):
            return BinConstraintD(constraint.lhs, constraint.rhs,
                                  op_eq), counter
        elif constraint.lhs == Dyn:
            return T(), counter

    elif constraint.op == op_consistency:
        return Disj([
            BinConstraintD(constraint.lhs, constraint.rhs, op_eq),
            BinConstraintD(constraint.rhs, Dyn, op_eq),
            BinConstraintD(constraint.lhs, Dyn, op_eq)
        ]), counter

    else:
        return constraint, counter
Ejemplo n.º 13
0
def lt_inference_rule(n: Node, symbols, constraints, counter):
    assert isinstance(n.args[0], Node) or isinstance(n.args[0], int)
    assert isinstance(n.args[1], Node) or isinstance(n.args[1], int)

    # We make sure this node will not be used again. We do not
    # generate a constraint about that node. Only about the operands.

    e1 = symbols[n.args[0]] if isinstance(n.args[0], Node) else n.args[0]
    e2 = symbols[n.args[1]] if isinstance(n.args[1], Node) else n.args[1]

    if isinstance(n.args[0], Node) and isinstance(n.args[1], Node):
        if isinstance(e1, TVar) and isinstance(e2, TVar):
            lt_tensor, counter = gen_tvar(counter)
            symbols[n] = lt_tensor
            return gen_broadcasting_constraints(e1, e2, symbols, counter,
                                                lt_tensor)

        elif isinstance(e1, DVar) and isinstance(e2, DVar):
            # This is meant to be used for flow analysis only
            lt_constraint = BinConstraintD(e1, e2, op_lt)

            my_lt, counter = gen_bvar(counter)
            equality_constraint = BinConstraintD(my_lt, lt_constraint, op_eq)
            return [equality_constraint], counter

        else:
            raise RuntimeError('Sort Mismatch')

    elif isinstance(n.args[0], Node) and not isinstance(n.args[1], Node):
        if isinstance(e1, DVar):
            # This is meant to be used for flow analysis only
            lt_constraint = BinConstraintD(e1, e2, op_lt)

            my_lt, counter = gen_bvar(counter)
            equality_constraint = BinConstraintD(my_lt, lt_constraint, op_eq)
            return [equality_constraint], counter
        else:
            raise NotImplementedError('Method not yet implemented')

    else:
        raise NotImplementedError('Method not yet implemented')
Ejemplo n.º 14
0
def torch_dim_inference_rule(n: Node, symbols, constraints, counter):
    assert isinstance(n.args[0], Node)
    my_dim, counter = gen_dvar(counter)
    symbols[n] = my_dim
    input = symbols[n.args[0]]

    input_dyn = BinConstraintT(input, Dyn, op_eq)
    output_dyn = BinConstraintD(my_dim, Dyn, op_eq)

    c1 = []

    for i in range(1, MAX_TENSOR_RANK + 1):
        new_dims_rhs_1, counter = gen_tensor_dims(i, counter)

        c_tensor_i = Conj([
            BinConstraintT(input, TensorType(new_dims_rhs_1), op_eq),
            BinConstraintD(my_dim, i, op_eq)
        ])
        c1.append(c_tensor_i)

    return [Disj([Conj([input_dyn, output_dyn]), Disj(c1)])], counter
Ejemplo n.º 15
0
def add_inference_rule(n: Node, symbols, constraints, counter):

    if isinstance(n.args[0], Node) and isinstance(n.args[1], Node):
        if isinstance(symbols[n.args[0]], TVar) and isinstance(
                symbols[n.args[1]], TVar):
            my_add, counter = gen_tvar(counter)
            symbols[n] = my_add
            e1 = symbols[n.args[0]]
            e2 = symbols[n.args[1]]

            return gen_broadcasting_constraints(e1, e2, symbols, counter,
                                                my_add)
        else:
            raise NotImplementedError('Method not yet implemented')

    elif isinstance(n.args[0], Node) and isinstance(n.args[1], int):
        if isinstance(symbols[n.args[0]], TVar):
            my_add, counter = gen_tvar(counter)
            symbols[n] = my_add
            e1 = symbols[n.args[0]]
            return [BinConstraintT(my_add, e1, op_eq)], counter
        elif isinstance(symbols[n.args[0]], DVar):
            my_add, counter = gen_dvar(counter)
            symbols[n] = my_add
            e1 = symbols[n.args[0]]

            # we will propagate the runtime value here since this is regular addition
            c = Conj([
                BinConstraintD(my_add, BinConstraintD(e1, n.args[1], op_add),
                               op_eq),
                BinConstraintD(0, my_add, op_leq)
            ])
            return [c], counter

        else:
            raise NotImplementedError('Method not yet implemented')

    else:
        # TODO generate add constraints for scalar addition
        raise NotImplementedError('Addition not yet implemented')
Ejemplo n.º 16
0
def no_broadcast_dim_with_index(d1: List[DVar], d2: List[DVar], d3: List[DVar],
                                d4: List[DVar], i: int):
    """
    Args:
        d1: inpput 1
        d2: inpput 2
        d3: simulated broadcasting for input 1
        d4: simulated broadcasting for input 2
        i: the rank of the resulting tensor addition

    Returns: Constraints for when no broadcasting occurs
    """
    return Conj([
        Disj([
            Conj([
                BinConstraintD(d1[i], 1, op_eq),
                BinConstraintD(d2[i], 1, op_eq)
            ]),
            Conj([
                BinConstraintD(d1[i], 1, op_neq),
                BinConstraintD(d2[i], 1, op_neq)
            ])
        ]),
        BinConstraintD(d1[i], d3[i], op_eq),
        BinConstraintD(d2[i], d4[i], op_eq)
    ])
Ejemplo n.º 17
0
def generate_all_int_dyn_dim_possibilities(my_list: List[DVar]):
    """
    Generate all possibilities of being equal or not equal to dyn for my_list
    Args:
        my_list: List of tensor dimensions

    Returns: A list of a list of constraints. Each list of constraints corresponds to
    one possibility about the values of the dimension variables
    """
    # generate all possibilities of being equal or not equal to dyn for my_list
    eq_possibilities = [
        BinConstraintD(my_list[i], Dyn, op_eq) for i in range(len(my_list))
    ]
    neq_possibilities = [
        BinConstraintD(my_list[i], Dyn, op_neq) for i in range(len(my_list))
    ]
    d_possibilities = []

    for i in zip(eq_possibilities, neq_possibilities):
        d_possibilities.append(list(i))
    all_possibilities = list(itertools.product(*d_possibilities))
    return all_possibilities
Ejemplo n.º 18
0
def gt_inference_rule(n: Node, symbols, constraints, counter):
    assert isinstance(n.args[0], Node) or isinstance(n.args[0], int)
    assert isinstance(n.args[1], Node) or isinstance(n.args[1], int)

    # We make sure this node will not be used again. We do not
    # generate a constraint about that node. Only about the operands.

    e1 = symbols[n.args[0]] if isinstance(n.args[0], Node) else n.args[0]
    e2 = symbols[n.args[1]] if isinstance(n.args[1], Node) else n.args[1]

    if isinstance(n.args[0], Node) and isinstance(n.args[1], Node):
        if isinstance(e1, TVar) and isinstance(e2, TVar):
            gt_tensor, counter = gen_tvar(counter)
            symbols[n] = gt_tensor
            return gen_broadcasting_constraints(e1, e2, symbols, counter,
                                                gt_tensor)

        elif isinstance(e1, DVar) and isinstance(e2, DVar):
            # This is meant to be used for flow analysis only
            gt_constraint = BinConstraintD(e1, e2, op_gt)

            my_gt, counter = gen_bvar(counter)
            equality_constraint = BinConstraintD(my_gt, gt_constraint, op_eq)
            return [equality_constraint], counter

        else:
            raise RuntimeError('Sort Mismatch')

    elif isinstance(n.args[0], Node) and not isinstance(n.args[1], Node):
        if isinstance(e1, DVar):
            # This is meant to be used for flow analysis only
            gt_constraint = BinConstraintD(e1, e2, op_gt)

            my_gt, counter = gen_bvar(counter)
            equality_constraint = BinConstraintD(my_gt, gt_constraint, op_eq)
            return [equality_constraint], counter

        elif isinstance(e1, TVar) and isinstance(e2, int):
            # then we made the wrong assumption about the argument being a tensor
            # so we should fix the assumption
            warnings.warn(
                f'Made the wrong assumption for node {n}. Correctness not guaranteed.'
            )

            new_e1, counter = gen_dvar(counter)
            symbols[n.args[0]] = new_e1
            symbols[n.args[0]]

            gt_constraint = BinConstraintD(new_e1, e2, op_gt)

            my_gt, counter = gen_bvar(counter)
            equality_constraint = BinConstraintD(my_gt, gt_constraint, op_eq)
            return [equality_constraint], counter

        else:
            raise NotImplementedError('Method not yet implemented')

    else:
        raise NotImplementedError('Method not yet implemented')
Ejemplo n.º 19
0
def size_inference_rule(n: Node, symbols, constraints, counter):
    """
    The constraint is just lhs = rhs.
    Ex: size = input_ids.size()
    """

    if len(n.args) == 1:
        # generate the new variable
        size, counter = gen_tvar(counter)
        symbols[n] = size
        input = symbols[n.args[0]]
        c = BinConstraintT(input, size, op_eq)
        return [c], counter

    elif len(n.args) == 2:
        # TODO: review this rule; should input = dyn; output = dyn be included here?
        if isinstance(n.args[1], int):
            # generate the new variable
            size_index, counter = gen_dvar(counter)
            symbols[n] = size_index
            input = symbols[n.args[0]]
            c2 = [
                GetItem(i + 1, n.args[1], size_index, input)
                for i in range(MAX_TENSOR_RANK)
            ]
            c3 = BinConstraintD(0, size_index, op_leq)

            input_dyn = BinConstraintT(input, Dyn, op_eq)
            output_dyn = BinConstraintD(size_index, Dyn, op_eq)
            c1 = Conj([input_dyn, output_dyn])

            return [Disj([c1, Conj([Disj(c2), c3])])], counter

        else:
            raise NotImplementedError

    else:
        raise NotImplementedError
Ejemplo n.º 20
0
def eq_inference_rule(n: Node, symbols, constraints, counter):
    assert isinstance(n.args[0], Node) or isinstance(n.args[0], int)
    assert isinstance(n.args[1], Node) or isinstance(n.args[1], int)

    e1 = symbols[n.args[0]] if isinstance(n.args[0], Node) else n.args[0]
    e2 = symbols[n.args[1]] if isinstance(n.args[1], Node) else n.args[1]

    if isinstance(n.args[0], Node) and isinstance(n.args[1], Node):
        if isinstance(e1, TVar) and isinstance(e2, TVar):
            eq_tensor, counter = gen_tvar(counter)
            symbols[n] = eq_tensor
            return gen_broadcasting_constraints(e1, e2, symbols, counter,
                                                eq_tensor)

        elif isinstance(e1, DVar) and isinstance(e2, DVar):
            # This is meant to be used for flow analysis only
            eq_constraint = BinConstraintD(e1, e2, op_eq)

            my_eq, counter = gen_bvar(counter)
            equality_constraint = BinConstraintD(my_eq, eq_constraint, op_eq)
            return [equality_constraint], counter

        else:
            raise RuntimeError('Sort Mismatch')

    elif isinstance(n.args[0], Node) and not isinstance(n.args[1], Node):
        if isinstance(e1, DVar):
            # This is meant to be used for flow analysis only
            eq_constraint = BinConstraintD(e1, e2, op_eq)

            my_eq, counter = gen_bvar(counter)
            equality_constraint = BinConstraintD(my_eq, eq_constraint, op_eq)
            return [equality_constraint], counter
        else:
            raise NotImplementedError('Method not yet implemented')
    else:
        raise NotImplementedError('Method not yet implemented')
Ejemplo n.º 21
0
def broadcasting_inference_rule(n: Node, symbols, constraints, counter):

    op_code = None
    if n.target == operator.add or n.target == torch.add:
        op_code = op_add
    elif n.target == operator.mul:
        op_code = op_mul

    if isinstance(n.args[0], Node) and isinstance(n.args[1], Node):
        if isinstance(symbols[n.args[0]], TVar) and isinstance(symbols[n.args[1]], TVar):
            my_output, counter = gen_tvar(counter)
            symbols[n] = my_output
            e1 = symbols[n.args[0]]
            e2 = symbols[n.args[1]]

            return gen_broadcasting_constraints(e1, e2, symbols, counter, my_output)
        else:
            raise NotImplementedError('Method not yet implemented')

    elif isinstance(n.args[0], Node) and (isinstance(n.args[1], int) or isinstance(n.args[1], float)):
        if isinstance(symbols[n.args[0]], TVar):
            my_output, counter = gen_tvar(counter)
            symbols[n] = my_output
            e1 = symbols[n.args[0]]
            return [BinConstraintT(my_output, e1, op_eq)], counter
        elif isinstance(symbols[n.args[0]], DVar):
            my_output, counter = gen_dvar(counter)
            symbols[n] = my_output
            e1 = symbols[n.args[0]]

            # we will propagate the runtime value here since this is regular addition
            c = Conj([BinConstraintD(my_output, BinConstraintD(e1, n.args[1], op_code), op_eq),
                      BinConstraintD(0, my_output, op_leq)])
            return [c], counter

    elif isinstance(n.args[1], Node) and (isinstance(n.args[0], int) or isinstance(n.args[1], float)):
        if isinstance(symbols[n.args[1]], TVar):
            my_output, counter = gen_tvar(counter)
            symbols[n] = my_output
            e2 = symbols[n.args[1]]
            return [BinConstraintT(my_output, e2, op_eq)], counter
        elif isinstance(symbols[n.args[1]], DVar):
            my_output, counter = gen_dvar(counter)
            symbols[n] = my_output
            e2 = symbols[n.args[1]]

            # we will propagate the runtime value here since this is regular addition
            c = Conj([BinConstraintD(my_output, BinConstraintD(e2, n.args[0], op_code), op_eq),
                      BinConstraintD(0, my_output, op_leq)])
            return [c], counter

        else:
            raise NotImplementedError('Method not yet implemented')

    else:
        # TODO generate add constraints for scalar addition
        raise NotImplementedError('Addition not yet implemented')
Ejemplo n.º 22
0
def broadcast_dim(tensor_input1,
                  tensor_input2,
                  res1,
                  res2,
                  index,
                  padding=False):
    """
    Apply broadcasting to the 'index' dimension of tensor_input1.
    Args:
        tensor_input1: should represent [d1, ..., d_index, ...] where d_index = 1
        tensor_input2: represents the second input
        res1: broadcasted result 1
        res2: broadcasted result 2
        index: the index to broadcast
        padding: If padding was used, then tensor_input1[index] does not exist

    Returns:

    """
    if tensor_input1[index] is None:
        assert padding

    if not padding:
        # then the inputs are the same length so they all have dimensions at "index"
        return Conj([
            BinConstraintD(tensor_input1[index], 1, op_eq),
            BinConstraintD(res1[index], res2[index], op_eq),
            BinConstraintD(res2[index], tensor_input2[index], op_eq)
        ])

    else:
        # we don't set the input dimension to 1, since it doesn't exist.
        return Conj([
            BinConstraintD(res1[index], res2[index], op_eq),
            BinConstraintD(res2[index], tensor_input2[index], op_eq)
        ])
Ejemplo n.º 23
0
def add_layer_norm_constraints(input_dim, normalized_dim):
    """
    The constraints say that the type has te form: [*, 1024, 1024]
     while the normalized_dim have the form [1024, 1024]
    Args:
        input_dim: Input shape of layer norm
        normalized_dim: normalized_dim parameter of the module instance

    """

    # in this case we return false since there's a pattern mismatch
    if len(normalized_dim) > len(input_dim):
        return [F()]

    else:
        constraints = []
        for i, n in zip(reversed(input_dim), reversed(normalized_dim)):
            constraints.append(BinConstraintD(i, n, op_consistency))
        return constraints
Ejemplo n.º 24
0
def gen_all_reshape_possibilities(list_of_dims, target):
    """
    Consider all possibilities what the input dimensions could be (number or dynamic)
    Then generate the appropriate constraints using multiplication or mod depending on the possibility
    The possibilities we consider here are the cross product of being equal to dyn or not equal to dyn
    for the input. Target is fixed because at most one dimension could be dyn.
    We have different cases for this.

    Args:
        list_of_dims: The input list of dimensions
        target: The tensor we want to reshape to

    Returns: A disjuncition of transformed reshape constraints

    """
    all_possibilities = generate_all_int_dyn_dim_possibilities(list_of_dims)

    all_constraints = []

    for p in all_possibilities:
        to_multiply = []

        p = list(p)

        for constraint in p:
            assert isinstance(constraint, BinConstraintD)
            if constraint.op == op_neq:
                to_multiply.append(constraint.lhs)

        if not to_multiply:
            all_constraints.append(Conj(p))

        elif len(to_multiply) < len(list_of_dims):
            all_constraints.append(
                Conj(p + [is_target_div_by_dim(target, Prod(to_multiply))]))
        else:
            all_constraints.append(
                Conj(
                    p +
                    [BinConstraintD(Prod(list_of_dims), Prod(target), op_eq)]))

    return Disj(all_constraints)
Ejemplo n.º 25
0
def generate_d_gub(constraint, counter):
    """
    Transform greatest upper bound for dimensions into equality constraints
    """
    c1 = Conj([
        BinConstraintD(constraint.rhs1, Dyn, op_eq),
        BinConstraintD(constraint.res, constraint.rhs2, op_eq)
    ])
    c2 = Conj([
        BinConstraintD(constraint.rhs2, Dyn, op_eq),
        BinConstraintD(constraint.res, constraint.rhs1, op_eq)
    ])
    c3 = Conj([
        BinConstraintD(constraint.rhs2, constraint.rhs1, op_eq),
        BinConstraintD(constraint.res, constraint.rhs1, op_eq)
    ])
    return Disj([c1, c2, c3]), counter
Ejemplo n.º 26
0
def generate_calc_maxpool(constraint, counter):
    """
    Transform maxpool constraints
    """
    d, counter = gen_tensor_dims(4, counter)
    maxpool_result = TensorType([d[0], d[1], d[2], d[3]])

    # the maxpool result is a tensor of size 4
    c1 = BinConstraintT(constraint.maxpool_result, maxpool_result, op_eq)

    # the input corresponds to the output in the first and second dimension of maxpool
    c2 = BinConstraintD(constraint.matching_constraint[1], d[1], op_eq)
    c3 = BinConstraintD(constraint.matching_constraint[0], d[0], op_eq)
    c4, c5 = calc_last_two_dims(constraint, d)

    leq_constraints = Conj([
        BinConstraintD(0, d[0], op_leq),
        BinConstraintD(0, d[1], op_leq),
        BinConstraintD(0, d[2], op_leq),
        BinConstraintD(0, d[3], op_leq)
    ])

    return Conj([c1, c2, c3, c4, c5, leq_constraints]), counter
Ejemplo n.º 27
0
def generate_binconstraint_t(constraint, counter):
    """
    Transform binary constraints for tensors
    """

    # precision constraints
    if constraint.op == op_precision:
        if constraint.lhs == Dyn:
            return T(), counter
        elif isinstance(constraint.lhs, TensorType):
            is_fully_static = all([d != Dyn for d in constraint.lhs.__args__])
            if is_fully_static:
                return BinConstraintT(constraint.lhs, constraint.rhs,
                                      op_eq), counter
            else:
                new_dims = []

                for _ in range(len(constraint.lhs.__args__)):
                    dim, counter = gen_dvar(counter)
                    new_dims.append(dim)

                new_dim_constraints = [BinConstraintD(old_dim, new_dim, op_precision) for
                                       new_dim, old_dim in zip(new_dims, constraint.lhs.__args__)] + \
                                      [BinConstraintT(constraint.rhs, TensorType(new_dims), op_eq)] + \
                                      [BinConstraintD(1, new_dim, op_leq) for
                                       new_dim in new_dims]
                return Conj(new_dim_constraints), counter

    # matching
    elif constraint.op == op_matching:
        assert isinstance(constraint.rhs, TensorType)
        d1 = constraint.rhs.__args__[0]
        d2 = constraint.rhs.__args__[1]
        d3 = constraint.rhs.__args__[2]
        d4 = constraint.rhs.__args__[3]

        conj = [
            BinConstraintT(constraint.lhs, Dyn, op_eq),
            BinConstraintD(d1, Dyn, op_eq),
            BinConstraintD(d2, Dyn, op_eq),
            BinConstraintD(d3, Dyn, op_eq),
            BinConstraintD(d4, Dyn, op_eq)
        ]
        return Disj([
            Conj(conj),
            BinConstraintT(constraint.lhs, TensorType([d1, d2, d3, d4]), op_eq)
        ]), counter

    elif constraint.op == op_consistency:
        c_dyn = Disj([
            BinConstraintT(constraint.lhs, Dyn, op_eq),
            BinConstraintT(constraint.rhs, Dyn, op_eq)
        ])
        [c_tensor_1, c_tensor_2, c_tensor_3, c_tensor_4
         ], counter = gen_consistency_constraints(constraint, counter)

        return Disj([c_dyn, c_tensor_1, c_tensor_2, c_tensor_3,
                     c_tensor_4]), counter

    elif constraint.op == op_leq:
        assert isinstance(constraint.rhs, int)
        disj = []
        for i in range(1, constraint.rhs + 1):
            dims = []
            for j in range(1, i + 1):
                dim_var, counter = gen_dvar(counter)
                dims.append(dim_var)
            disj.append(BinConstraintT(constraint.lhs, TensorType(dims),
                                       op_eq))
        return Disj(disj), counter
    else:
        return constraint, counter
Ejemplo n.º 28
0
def calc_last_two_dims(constraint, d: List[DVar]):
    """
    Generates constraints for the last two dimensions of a convolution or a maxpool output
    Args:
        constraint: CalcConv or CalcMaxPool
        d: The list of output dimensions

    Returns: Constraints for calculating the last two dimensions of the output

    """

    assert isinstance(constraint, CalcConv) or isinstance(
        constraint, CalcMaxPool)

    b3 = constraint.matching_constraint[2]
    b4 = constraint.matching_constraint[3]

    b3_dyn = Conj(
        [BinConstraintD(d[2], Dyn, op_eq),
         BinConstraintD(b3, Dyn, op_eq)])
    b4_dyn = Conj(
        [BinConstraintD(d[3], Dyn, op_eq),
         BinConstraintD(b4, Dyn, op_eq)])

    d3_not_dyn = Conj(
        [BinConstraintD(d[2], Dyn, op_neq),
         BinConstraintD(b3, Dyn, op_neq)])
    d4_not_dyn = Conj(
        [BinConstraintD(d[3], Dyn, op_neq),
         BinConstraintD(b4, Dyn, op_neq)])

    # transform parameters into tuples incase they are not already
    padding = (constraint.padding, constraint.padding) \
        if isinstance(constraint.padding, int) else constraint.padding
    kernel = (constraint.kernel, constraint.kernel) \
        if isinstance(constraint.kernel, int) else constraint.kernel
    stride = (constraint.stride, constraint.stride) \
        if isinstance(constraint.stride, int) else constraint.stride
    dilation = (constraint.dilation, constraint.dilation) \
        if isinstance(constraint.dilation, int) else constraint.dilation

    f1 = BinConstraintD(b3, BinConstraintD(2, padding[0], op_mul), op_add)
    f2 = BinConstraintD(dilation[0], BinConstraintD(kernel[0], 1, op_sub),
                        op_mul)
    f3 = BinConstraintD(
        BinConstraintD(BinConstraintD(f1, f2, op_sub), 1, op_sub), stride[0],
        op_div)
    f4 = BinConstraintD(f3, 1, op_add)

    c4 = Disj([b3_dyn, Conj([d3_not_dyn, BinConstraintD(d[2], f4, op_eq)])])

    f11 = BinConstraintD(b4, BinConstraintD(2, padding[1], op_mul), op_add)
    f22 = BinConstraintD(dilation[1], BinConstraintD(kernel[1], 1, op_sub),
                         op_mul)
    f33 = BinConstraintD(
        BinConstraintD(BinConstraintD(f11, f22, op_sub), 1, op_sub), stride[1],
        op_div)
    f44 = BinConstraintD(f33, 1, op_add)

    c5 = Disj([b4_dyn, Conj([d4_not_dyn, BinConstraintD(d[3], f44, op_eq)])])

    return c4, c5
Ejemplo n.º 29
0
def generate_reshape(constraint, counter):
    """
    Transform reshape constraints
    """
    d, counter = gen_tensor_dims(4, counter)

    d1 = d[0]
    d2 = d[1]
    d3 = d[2]
    d4 = d[3]

    target = constraint.target.__args__

    is_fully_static = all([d != Dyn for d in target])

    # dynamic tensor
    c1_dyn = BinConstraintT(constraint.src, Dyn, op_eq)
    c2_tensor1 = BinConstraintT(constraint.src, TensorType([d1]), op_eq)
    c2_tensor2 = BinConstraintT(constraint.src, TensorType([d1, d2]), op_eq)
    c2_tensor3 = BinConstraintT(constraint.src, TensorType([d1, d2, d3]),
                                op_eq)
    c2_tensor4 = BinConstraintT(constraint.src, TensorType([d1, d2, d3, d4]),
                                op_eq)

    d1_eq_dyn = BinConstraintD(d1, Dyn, op_eq)
    d1_neq_dyn = BinConstraintD(d1, Dyn, op_neq)

    d2_eq_dyn = BinConstraintD(d2, Dyn, op_eq)
    d2_neq_dyn = BinConstraintD(d2, Dyn, op_neq)

    d3_eq_dyn = BinConstraintD(d3, Dyn, op_eq)
    d3_neq_dyn = BinConstraintD(d3, Dyn, op_neq)

    d4_eq_dyn = BinConstraintD(d3, Dyn, op_eq)
    d4_neq_dyn = BinConstraintD(d3, Dyn, op_neq)

    nat_d1 = BinConstraintD(0, d1, op_leq)
    nat_d2 = BinConstraintD(0, d2, op_leq)
    nat_d3 = BinConstraintD(0, d3, op_leq)
    nat_d4 = BinConstraintD(0, d4, op_leq)

    if is_fully_static:
        # size 1 tensor
        c3_tensor1 = Disj([
            d1_eq_dyn,
            (Conj([d1_neq_dyn,
                   BinConstraintD(d1, Prod(target), op_eq)]))
        ])
        all_tensor_1 = Conj([c2_tensor1, c3_tensor1])

        # size 2 tensor
        all_tensor_2 = Conj(
            [c2_tensor2,
             gen_all_reshape_possibilities([d1, d2], target)])

        # size 3 tensor
        all_tensor_3 = Conj(
            [c2_tensor3,
             gen_all_reshape_possibilities([d1, d2, d3], target)])

        # size 4 tensor
        all_tensor_4 = Conj([
            c2_tensor4,
            gen_all_reshape_possibilities([d1, d2, d3, d4], target)
        ])

        return Conj([
            Disj([
                c1_dyn, all_tensor_1, all_tensor_2, all_tensor_3, all_tensor_4
            ]), nat_d1, nat_d2, nat_d3, nat_d4
        ]), counter

    # then there must be exactly one occurrence of dyn
    else:
        new_target = []

        for n in target:
            if n != Dyn:
                new_target.append(n)

        # tensor 1
        c3_tensor1 = Disj([
            d1_eq_dyn,
            (Conj([d1_neq_dyn,
                   is_dim_div_by_target(new_target, d1)]))
        ])
        all_tensor_1 = Conj([c2_tensor1, c3_tensor1])

        # tensor 2
        c21 = Disj([d1_eq_dyn, d2_eq_dyn])
        c22 = Conj([
            d1_neq_dyn, d2_neq_dyn,
            is_dim_div_by_target(new_target, Prod([d1, d2]))
        ])
        all_tensor_2 = Conj([c2_tensor2, Disj([c21, c22])])

        # tensor 3
        c31 = Disj([d1_eq_dyn, d2_eq_dyn, d3_eq_dyn])
        c32 = Conj([
            d1_neq_dyn, d2_neq_dyn, d3_neq_dyn,
            is_dim_div_by_target(new_target, Prod([d1, d2, d3]))
        ])
        all_tensor_3 = Conj([c2_tensor3, Disj([c31, c32])])

        # tensor 4
        c41 = Disj([d1_eq_dyn, d2_eq_dyn, d3_eq_dyn, d4_eq_dyn])
        c42 = Conj([
            d1_neq_dyn, d2_neq_dyn, d3_neq_dyn, d4_neq_dyn,
            is_dim_div_by_target(new_target, Prod([d1, d2, d3, d4]))
        ])
        all_tensor_4 = Conj([c2_tensor4, Disj([c41, c42])])

        return Conj([
            Disj([
                c1_dyn, all_tensor_1, all_tensor_2, all_tensor_3, all_tensor_4
            ]), nat_d1, nat_d2, nat_d3, nat_d4
        ]), counter
Ejemplo n.º 30
0
def arange_inference_rule(n: Node, symbols, constraints, counter):
    start = 0
    step = 1

    if len(n.args) == 1:
        end = symbols[n.args[0]]
    else:
        raise NotImplementedError('Not yet implemented')

    # int((end - start) / step)
    d1, counter = gen_dvar(counter)
    size_constraint = BinConstraintD(d1, BinConstraintD(BinConstraintD(end, start, op_sub), step, op_div), op_eq)
    arange, counter = gen_tvar(counter)
    symbols[n] = arange

    # either the a parameter is a number or it is Dyn
    c1 = Disj([BinConstraintD(end, Dyn, op_eq),
               BinConstraintD(start, Dyn, op_eq),
               BinConstraintD(step, Dyn, op_eq)])
    c2 = BinConstraintD(d1, Dyn, op_eq)
    both_dyn = Conj([c1, c2])

    c11 = Conj([BinConstraintD(end, Dyn, op_neq),
                BinConstraintD(start, Dyn, op_neq),
                BinConstraintD(step, Dyn, op_neq)])
    c22 = BinConstraintD(d1, Dyn, op_neq)
    both_numbers = Conj([c11, c22, size_constraint])

    return [BinConstraintT(arange, TensorType([d1]), op_eq), Disj([both_dyn, both_numbers])], counter