Exemplo n.º 1
0
def test_constraint_axis_exists():
    # Missing axis attribute
    op = testutil.create_op_with_quant_tensors(Op.Concat, [1, 1, 1, 4], [1, 1, 1, 8])
    ifm2 = Tensor([1, 1, 1, 4], DataType.uint8, "in2")
    ifm2.quantization = testutil.default_quant_params()
    op.add_input_tensor(ifm2)
    assert not support.is_operator_supported(op)
Exemplo n.º 2
0
def test_constraint_matching_dimensionality():
    # Mismatching dimensionality: 4D+2D=4D
    op = testutil.create_op_with_quant_tensors(Op.Concat, [1, 1, 1, 4], [1, 1, 1, 8])
    ifm2 = Tensor([1, 4], DataType.uint8, "in2")
    ifm2.quantization = testutil.default_quant_params()
    op.add_input_tensor(ifm2)
    op.attrs["axis"] = 3
    assert not support.is_operator_supported(op)
Exemplo n.º 3
0
def test_constraint_concat_pass():
    # A working concat
    op = testutil.create_op_with_quant_tensors(Op.Concat, [1, 1, 1, 4], [1, 1, 1, 8])
    ifm2 = Tensor([1, 1, 1, 4], DataType.uint8, "in2")
    ifm2.quantization = testutil.default_quant_params()
    op.add_input_tensor(ifm2)
    op.attrs["axis"] = 3
    assert support.is_operator_supported(op)
Exemplo n.º 4
0
def test_constraint_tconv_stride():
    # Strides must be 2
    op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 2, 2, 1], weights_shape=[1, 1, 1, 1])
    op.attrs = {"stride_w": 1, "stride_h": 1, "padding": b"SAME"}
    ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
    ifm.quantization = testutil.default_quant_params()
    op.add_input_tensor(ifm)
    assert not support.is_operator_supported(op)
Exemplo n.º 5
0
def test_constraint_bias_40bit():
    # Bias must not exceed 40-bit
    op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 1, 1, 1], [1, 1, 1, 1], weights_shape=[1, 1, 1, 1])
    op.attrs = {"stride_w": 1, "stride_h": 1}
    bias = Tensor([1, 1, 1, 1], DataType.int64, "bias")
    bias.quant_values = np.array([0x01FF_FFFF_FFFF])
    op.add_input_tensor(bias)
    assert not support.is_operator_supported(op)
Exemplo n.º 6
0
def test_constraint_weights_const():
    # Weight tensor cannot be non-const tensors
    op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8])
    op.attrs = {"stride_w": 1, "stride_h": 1}
    weights = Tensor([64, 64, 1, 1], DataType.uint8, "weights")
    weights.quantization = testutil.default_quant_params()
    op.add_input_tensor(weights)
    assert not support.is_operator_supported(op)
Exemplo n.º 7
0
def test_constraint_valid_dimensions():
    # Mismatching dimension value:
    # ifm2 has w and h as 2, which is not the axis to concat and doesnt match ifm1 or ofm
    op = testutil.create_op_with_quant_tensors(Op.Concat, [1, 1, 1, 4], [1, 1, 1, 8])
    ifm2 = Tensor([1, 2, 2, 4], DataType.uint8, "in2")
    ifm2.quantization = testutil.default_quant_params()
    op.add_input_tensor(ifm2)
    op.attrs["axis"] = 3
    assert not support.is_operator_supported(op)
Exemplo n.º 8
0
def test_constraint_bias_type():
    # Bias must have a certain datatype
    op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[1, 1, 1, 1])
    op.attrs = {"stride_w": 1, "stride_h": 1}
    bias = Tensor([1, 8, 8, 8], DataType.uint8, "bias")
    op.add_input_tensor(bias)
    assert not support.is_operator_supported(op)
Exemplo n.º 9
0
def create_elemwise_op(
        op_type,
        name,
        ifm_shape,
        ifm2_shape,
        ofm_shape,
        datatype=DataType.uint8,
        ifm_quant=default_quant_params(),
        ifm2_quant=default_quant_params(),
        ofm_quant=default_quant_params(),
):
    # Creates elementwise operation with constant IFM/IFM2
    if datatype.size_in_bytes() == 1:
        np_type = np.uint8
    elif datatype.size_in_bytes() == 2:
        np_type = np.int16
    else:
        np_type = np.int32
    op = Operation(op_type, name)
    op.add_input_tensor(
        create_const_tensor(name + "_ifm",
                            ifm_shape,
                            datatype,
                            np.zeros(ifm_shape),
                            np_type,
                            quantization=ifm_quant))
    if ifm2_shape is not None:
        op.add_input_tensor(
            create_const_tensor(name + "_ifm2",
                                ifm2_shape,
                                datatype,
                                np.zeros(ifm2_shape),
                                np_type,
                                quantization=ifm2_quant))
    ofm = Tensor(ofm_shape, datatype, name + "_ofm")
    ofm.quantization = ofm_quant
    op.set_output_tensor(ofm)
    return op
Exemplo n.º 10
0
def create_strided_slice_op(in_shape, out_shape, start_offsets, end_offsets):
    qp = testutil.default_quant_params()
    in0 = Tensor(in_shape, DataType.uint8, "in")
    in0.quantization = qp
    in1 = create_const_tensor("begin", [len(start_offsets)], DataType.uint8, start_offsets, quantization=qp)
    in2 = create_const_tensor("end", [len(end_offsets)], DataType.uint8, end_offsets, quantization=qp)
    in3 = create_const_tensor("strides", [len(end_offsets)], DataType.uint8, len(end_offsets) * [1], quantization=qp)
    out = Tensor(out_shape, DataType.uint8, "out")
    out.quantization = qp
    attrs = {"ellipsis_mask": 0, "new_axis_mask": 0, "shrink_axis_mask": 0, "begin_mask": 0, "end_mask": 0}
    return testutil.create_op(Op.StridedSlice, [in0, in1, in2, in3], out, attrs=attrs)
Exemplo n.º 11
0
def test_constraint_tconv_valid():
    # Valid
    op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 4, 4, 1], weights_shape=[4, 4, 1, 1])
    op.attrs = {"stride_w": 2, "stride_h": 2, "padding": b"VALID"}
    ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
    ifm.quantization = testutil.default_quant_params()
    op.add_input_tensor(ifm)
    assert support.is_operator_supported(op)
    # Invalid
    op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 4, 4, 1], weights_shape=[2, 2, 1, 1])
    op.attrs = {"stride_w": 2, "stride_h": 2, "padding": b"VALID"}
    ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
    ifm.quantization = testutil.default_quant_params()
    op.add_input_tensor(ifm)
    assert not support.is_operator_supported(op)
Exemplo n.º 12
0
def create_op_with_quant_tensors(op_type,
                                 ifm_shape,
                                 ofm_shape,
                                 weights_shape=None,
                                 bias_shape=None,
                                 datatype=DataType.uint8):
    ifm = Tensor(ifm_shape, datatype, "in")
    ifm.quantization = default_quant_params()
    ofm = Tensor(ofm_shape, datatype, "out")
    ofm.quantization = default_quant_params()
    op = Operation(op_type, "op")
    op.add_input_tensor(ifm)
    op.set_output_tensor(ofm)
    # Optional weight tensor
    if weights_shape is not None:
        if datatype.size_in_bytes() == 1:
            np_type = np.uint8
        elif datatype.size_in_bytes() == 2:
            np_type = np.int16
        else:
            np_type = np.int32
        qp = default_quant_params()
        qp.zero_point = np.zeros(weights_shape)
        weights = create_const_tensor("weights",
                                      weights_shape,
                                      datatype,
                                      np.zeros(weights_shape),
                                      np_type,
                                      quantization=qp)
        op.add_input_tensor(weights)
    # Optional bias tensor
    if bias_shape is not None:
        qp = default_quant_params()
        qp.zero_point = np.zeros(bias_shape)
        bias = create_const_tensor("bias",
                                   bias_shape,
                                   DataType.int32,
                                   np.zeros(bias_shape),
                                   np.int32,
                                   quantization=qp)
        op.add_input_tensor(bias)
    return op