Esempio n. 1
0
def test_constraint_splitv_inferred():
    # SplitV requires a maximum of one inferred shape (-1)
    qp = testutil.default_quant_params()
    op = testutil.create_op_with_quant_tensors(Op.SplitV, [1, 1, 1, 8], [1, 1, 1, 8])
    sizes = create_const_tensor("sizes", [1, 1, 1, 4], DataType.int16, [[[[0, -1, 2, -1]]]], np.int16, quantization=qp)
    op.add_input_tensor(sizes)
    assert not support.is_operator_supported(op)
    op = testutil.create_op_with_quant_tensors(Op.SplitV, [1, 1, 1, 8], [1, 1, 1, 8])
    sizes = create_const_tensor("sizes", [1, 1, 1, 4], DataType.int16, [[[[0, 1, 2, -1]]]], np.int16, quantization=qp)
    op.add_input_tensor(sizes)
    assert support.is_operator_supported(op)
Esempio n. 2
0
def test_constraint_tconv_valid():
    # Valid
    op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 4, 4, 1], weights_shape=[4, 4, 1, 1])
    op.attrs = {"stride_w": 2, "stride_h": 2, "padding": b"VALID"}
    ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
    ifm.quantization = testutil.default_quant_params()
    op.add_input_tensor(ifm)
    assert support.is_operator_supported(op)
    # Invalid
    op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 4, 4, 1], weights_shape=[2, 2, 1, 1])
    op.attrs = {"stride_w": 2, "stride_h": 2, "padding": b"VALID"}
    ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
    ifm.quantization = testutil.default_quant_params()
    op.add_input_tensor(ifm)
    assert not support.is_operator_supported(op)
Esempio n. 3
0
def test_constraint_beta_value_range():
    # beta must be positive
    op = testutil.create_op_with_quant_tensors(Op.Softmax, [1, 1, 1, 8], [1, 1, 1, 8])
    op.attrs["beta"] = -1.0
    assert not support.is_operator_supported(op)
    op.attrs["beta"] = 0.0
    assert support.is_operator_supported(op)
Esempio n. 4
0
def test_constraint_bias_type():
    # Bias must have a certain datatype
    op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[1, 1, 1, 1])
    op.attrs = {"stride_w": 1, "stride_h": 1}
    bias = Tensor([1, 8, 8, 8], DataType.uint8, "bias")
    op.add_input_tensor(bias)
    assert not support.is_operator_supported(op)
Esempio n. 5
0
def test_constraint_axis_exists():
    # Missing axis attribute
    op = testutil.create_op_with_quant_tensors(Op.Concat, [1, 1, 1, 4], [1, 1, 1, 8])
    ifm2 = Tensor([1, 1, 1, 4], DataType.uint8, "in2")
    ifm2.quantization = testutil.default_quant_params()
    op.add_input_tensor(ifm2)
    assert not support.is_operator_supported(op)
Esempio n. 6
0
def test_constraint_weights_type():
    # Weight tensor must be 8-bit
    op = testutil.create_op_with_quant_tensors(
        Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[1, 1, 1, 1], datatype=DataType.int16
    )
    op.attrs = {"stride_w": 1, "stride_h": 1}
    assert not support.is_operator_supported(op)
Esempio n. 7
0
def test_constraint_tconv_stride():
    # Strides must be 2
    op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 2, 2, 1], weights_shape=[1, 1, 1, 1])
    op.attrs = {"stride_w": 1, "stride_h": 1, "padding": b"SAME"}
    ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
    ifm.quantization = testutil.default_quant_params()
    op.add_input_tensor(ifm)
    assert not support.is_operator_supported(op)
Esempio n. 8
0
def test_constraint_tens_input_scalar():
    # Shapeless input is allowed if its of a certain type:
    op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [], [1, 8, 8, 8])
    assert support.is_operator_supported(op)
    # Invalid shapeless input due to op type:
    op = testutil.create_op_with_quant_tensors(Op.Relu, [], [1, 8, 8, 8])
    op.ifm.values = 0.5
    assert not support.is_operator_supported(op)
Esempio n. 9
0
def test_constraint_bias_40bit():
    # Bias must not exceed 40-bit
    op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 1, 1, 1], [1, 1, 1, 1], weights_shape=[1, 1, 1, 1])
    op.attrs = {"stride_w": 1, "stride_h": 1}
    bias = Tensor([1, 1, 1, 1], DataType.int64, "bias")
    bias.quant_values = np.array([0x01FF_FFFF_FFFF])
    op.add_input_tensor(bias)
    assert not support.is_operator_supported(op)
Esempio n. 10
0
def test_constraint_matching_dimensionality():
    # Mismatching dimensionality: 4D+2D=4D
    op = testutil.create_op_with_quant_tensors(Op.Concat, [1, 1, 1, 4], [1, 1, 1, 8])
    ifm2 = Tensor([1, 4], DataType.uint8, "in2")
    ifm2.quantization = testutil.default_quant_params()
    op.add_input_tensor(ifm2)
    op.attrs["axis"] = 3
    assert not support.is_operator_supported(op)
Esempio n. 11
0
def test_constraint_matching_in_out_types():
    # Valid
    op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])
    op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 2, "filter_height": 2, "padding": b"SAME"}
    assert support.is_operator_supported(op)
    # Invalid. datatypes for ifm and ofm must match (default uint8)
    op.ifm.dtype = DataType.int8
    assert not support.is_operator_supported(op)
Esempio n. 12
0
def test_constraint_weights_const():
    # Weight tensor cannot be non-const tensors
    op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8])
    op.attrs = {"stride_w": 1, "stride_h": 1}
    weights = Tensor([64, 64, 1, 1], DataType.uint8, "weights")
    weights.quantization = testutil.default_quant_params()
    op.add_input_tensor(weights)
    assert not support.is_operator_supported(op)
Esempio n. 13
0
def test_constraint_concat_pass():
    # A working concat
    op = testutil.create_op_with_quant_tensors(Op.Concat, [1, 1, 1, 4], [1, 1, 1, 8])
    ifm2 = Tensor([1, 1, 1, 4], DataType.uint8, "in2")
    ifm2.quantization = testutil.default_quant_params()
    op.add_input_tensor(ifm2)
    op.attrs["axis"] = 3
    assert support.is_operator_supported(op)
Esempio n. 14
0
def test_constraint_filter_product_height_range_valid_pad():
    # Avg pool restrictions are dependent on padding:
    op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])
    op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 256, "filter_height": 256, "padding": b"VALID"}
    assert support.is_operator_supported(op)
    # VALID padding restricts filter W x H to 256x256
    op.attrs["filter_width"] = 257
    assert not support.is_operator_supported(op)
Esempio n. 15
0
def test_constraint_valid_dimensions():
    # Mismatching dimension value:
    # ifm2 has w and h as 2, which is not the axis to concat and doesnt match ifm1 or ofm
    op = testutil.create_op_with_quant_tensors(Op.Concat, [1, 1, 1, 4], [1, 1, 1, 8])
    ifm2 = Tensor([1, 2, 2, 4], DataType.uint8, "in2")
    ifm2.quantization = testutil.default_quant_params()
    op.add_input_tensor(ifm2)
    op.attrs["axis"] = 3
    assert not support.is_operator_supported(op)
Esempio n. 16
0
def test_constraint_filter_range():
    # Avg pool restrictions are dependent on padding:
    # SAME padding restricts both W and H to max 8
    op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])
    op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 20, "filter_height": 20, "padding": b"SAME"}
    assert not support.is_operator_supported(op)
    # VALID padding limits are much larger
    op.attrs["padding"] = b"VALID"
    assert support.is_operator_supported(op)
Esempio n. 17
0
def test_constraint_resize():
    # IFM W and H == 1
    op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 1, 1, 8], [1, 8, 8, 8])
    assert support.is_operator_supported(op)
    # IFM == OFM
    op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 8, 8, 8], [1, 8, 8, 8])
    assert support.is_operator_supported(op)
    # IFM x2 == OFM ; align_corners = False
    op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 4, 4, 8], [1, 8, 8, 8])
    assert support.is_operator_supported(op)
    # IFM x2 -1 == OFM ; align_corners = True
    op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 4, 4, 8], [1, 7, 7, 8])
    op.attrs["align_corners"] = True
    assert support.is_operator_supported(op)
    # Invalid cases
    op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 4, 4, 8], [1, 20, 20, 8])
    assert not support.is_operator_supported(op)
    op.attrs["align_corners"] = True
    assert not support.is_operator_supported(op)
Esempio n. 18
0
def test_constraint_depth_multiplier():
    # Valid. Depth multiplier is 1 so no further constraints
    op = testutil.create_op_with_quant_tensors(
        Op.DepthwiseConv2DBias, [1, 1, 1, 1], [1, 1, 1, 2], weights_shape=[1, 1, 1, 1]
    )
    op.attrs = {"stride_w": 1, "stride_h": 1, "depth_multiplier": 1}
    assert support.is_operator_supported(op)
    # Invalid. Depth multiplier doesnt equal ofm channel
    op = testutil.create_op_with_quant_tensors(
        Op.DepthwiseConv2DBias, [1, 1, 1, 1], [1, 1, 1, 1], weights_shape=[1, 1, 1, 1]
    )
    op.attrs = {"stride_w": 1, "stride_h": 1, "depth_multiplier": 2}
    assert not support.is_operator_supported(op)
    # Valid. Depth multiplier is equal to ofm channel
    op = testutil.create_op_with_quant_tensors(
        Op.DepthwiseConv2DBias, [1, 1, 1, 1], [1, 1, 1, 2], weights_shape=[1, 1, 1, 1]
    )
    op.attrs = {"stride_w": 1, "stride_h": 1, "depth_multiplier": 2}
    assert support.is_operator_supported(op)
Esempio n. 19
0
def test_constraint_filter_product_height_range():
    # Max pool restrictions arent dependent on padding
    op = testutil.create_op_with_quant_tensors(Op.MaxPool, [1, 8, 8, 8], [1, 8, 8, 8])
    op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 256, "filter_height": 256, "padding": b"SAME"}
    assert support.is_operator_supported(op)
    # Restricts filter W x H to 256x256
    op.attrs["filter_width"] = 257
    assert not support.is_operator_supported(op)
    # Doesnt matter if SAME or VALID
    op.attrs["padding"] = b"VALID"
    assert not support.is_operator_supported(op)
Esempio n. 20
0
def test_constraint_tens_quant_per_axis_is_supp():
    op = testutil.create_op_with_quant_tensors(
        Op.Conv2DBias, [1, 1, 1, 3], [1, 1, 1, 3], weights_shape=[1, 1, 1, 3], bias_shape=[1, 1, 1, 3]
    )
    op.attrs = {"stride_w": 1, "stride_h": 1}
    assert support.is_operator_supported(op)
    qp = QuantizationParameters()
    qp.zero_point = np.zeros((1, 3))
    qp.scale_f32 = np.ones((1, 3))
    op.bias.quantization = qp
    assert support.is_operator_supported(op)
Esempio n. 21
0
def test_constraint_tens_dimension():
    # Tensors can only have values in the inclusive range of 1-65535
    op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 0], [1, 8, 8, 65536])
    assert not support.is_operator_supported(op)
Esempio n. 22
0
def test_constraint_tens_dtype():
    # Tensors can only be of type uint8, int8, int16 and int32
    op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.float32)
    assert not support.is_operator_supported(op)
Esempio n. 23
0
def test_constraint_tens_int32_ops():
    # For int32, only select op types are allowed:
    op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [], [1, 8, 8, 8], datatype=DataType.int32)
    assert support.is_operator_supported(op)
    op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int32)
    assert not support.is_operator_supported(op)
Esempio n. 24
0
def test_constraint_batch_size():
    op = testutil.create_op_with_quant_tensors(Op.Conv2D, [2, 8, 8, 8], [1, 8, 8, 8], weights_shape=[1, 1, 1, 1])
    op.attrs = {"stride_w": 1, "stride_h": 1}
    assert not support.is_operator_supported(op)
Esempio n. 25
0
def test_constraint_tens_shape_size():
    # Tensors cannot be > 4D
    op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 1, 8, 8, 8], [1, 1, 8, 8, 8])
    assert not support.is_operator_supported(op)
Esempio n. 26
0
def test_constraint_quant_scale_inf():
    op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [1, 8, 8, 8])
    op.ofm.quantization.scale_f32 = np.float32(1e-39)
    assert not support.is_operator_supported(op)
Esempio n. 27
0
def test_constraint_filter_type():
    # Filter width/height must be integers
    op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])
    op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 2.5, "filter_height": "2", "padding": b"SAME"}
    assert not support.is_operator_supported(op)
Esempio n. 28
0
def test_constraint_tens_no_dynamic():
    # Tensors cannot be dynamic (no shape, not a scalar)
    op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [])
    assert not support.is_operator_supported(op)
Esempio n. 29
0
def test_constraint_tens_defined_shape():
    # Tensors cannot have None in them
    op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, None, 8], [1, 8, 8, 8])
    assert not support.is_operator_supported(op)
Esempio n. 30
0
def test_constraint_matching_shapes():
    # Softmax requires the ifm and ofm shapes to match
    op = testutil.create_op_with_quant_tensors(Op.Softmax, [1, 1, 1, 8], [1, 2, 2, 4])
    assert not support.is_operator_supported(op)
    op = testutil.create_op_with_quant_tensors(Op.Softmax, [1, 1, 1, 8], [1, 1, 1, 8])
    assert support.is_operator_supported(op)