コード例 #1
0
def test_constraint_broadcast_shapes():
    # BINARY CASE
    # Only allow broadcast to 1 dim, for 1 rank index
    op = testutil.create_elemwise_op(Op.Add, "op", [1, 1, 4], [1, 2, 4], [1, 2, 4])
    assert support.is_operator_supported(op)
    op = testutil.create_elemwise_op(Op.Add, "op", [1, 2, 4], [1, 1, 4], [1, 2, 4])
    assert support.is_operator_supported(op)
    # Only allow broadcast to 1 dim, for 3 rank indexes
    op = testutil.create_elemwise_op(Op.Add, "op", [1, 1, 1, 1], [1, 4, 8, 16], [1, 4, 8, 16])
    assert support.is_operator_supported(op)
    op = testutil.create_elemwise_op(Op.Add, "op", [1, 4, 8, 16], [1, 1, 1, 1], [1, 4, 8, 16])
    assert support.is_operator_supported(op)
    # One broadcast dim not 1
    op = testutil.create_elemwise_op(Op.Add, "op", [1, 2, 4], [1, 4, 4], [1, 4, 4])
    assert not support.is_operator_supported(op)
    op = testutil.create_elemwise_op(Op.Add, "op", [1, 4, 4], [1, 2, 4], [1, 4, 4])
    assert not support.is_operator_supported(op)
    # OFM shape dim largest ifm/ifm2 shape dim
    op = testutil.create_elemwise_op(Op.Add, "op", [1, 4], [4, 4], [1, 4])
    assert not support.is_operator_supported(op)
    op = testutil.create_elemwise_op(Op.Add, "op", [1, 4], [4, 4], [1, 4])
    assert not support.is_operator_supported(op)
    op = testutil.create_elemwise_op(Op.Add, "op", [1, 4, 1, 16], [1, 1, 4, 1], [1, 4, 1, 16])
    assert not support.is_operator_supported(op)
    op = testutil.create_elemwise_op(Op.Add, "op", [1, 1, 4, 1], [1, 4, 1, 16], [1, 4, 1, 16])
    assert not support.is_operator_supported(op)
コード例 #2
0
def test_constraint_inputs_int32():
    # both inputs must be type int32
    op = testutil.create_elemwise_op(Op.SHL, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
    assert not support.is_operator_supported(op)
    op = testutil.create_elemwise_op(Op.SHL, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int32)
    assert support.is_operator_supported(op)
    op.ifm2.dtype = DataType.int16
    assert not support.is_operator_supported(op)
コード例 #3
0
def test_constraint_alpha_valid():
    # Alpha cannot be negative
    op = testutil.create_elemwise_op(Op.LeakyRelu, "op", [2, 2], None, [2, 2])
    op.attrs["alpha"] = 0
    assert support.is_operator_supported(op)
    op.attrs["alpha"] = -1
    assert not support.is_operator_supported(op)
コード例 #4
0
def test_constraint_tens_quant_scale():
    # Quantization scale cannot be infinit
    qp = QuantizationParameters()
    qp.zero_point = 0
    qp.scale_f32 = np.inf
    op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [], [1, 8, 8, 8], ifm_quant=qp)
    assert not support.is_operator_supported(op)
コード例 #5
0
def test_constraint_tens_quant_per_axis_not_supp():
    # Quantization scale cannot be array-valued for elemwise ops
    qp = QuantizationParameters()
    qp.zero_point = np.zeros((1, 3))
    qp.scale_f32 = np.ones((1, 3))
    op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [], [1, 8, 8, 8], ifm_quant=qp)
    assert not support.is_operator_supported(op)
コード例 #6
0
def test_constraint_tens_input_scalar():
    # Shapeless input is allowed if its of a certain type:
    op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [], [1, 8, 8, 8])
    assert support.is_operator_supported(op)
    # Invalid shapeless input due to op type:
    op = testutil.create_op_with_quant_tensors(Op.Relu, [], [1, 8, 8, 8])
    op.ifm.values = 0.5
    assert not support.is_operator_supported(op)
コード例 #7
0
ファイル: test_lut.py プロジェクト: DemonGiggle/vela
def test_optimize_high_level_cmd_stream_2K():
    # Tests lut.optimize_high_level_cmd_stream, blending 256 byte and 2K luts
    arch = testutil.create_arch()
    shape = [1, 1, 1, 1]
    # u8 LUT op, should lead to DMA
    op0 = testutil.create_elemwise_op(Op.Add, "op0", shape, shape, shape)
    set_256_lut(op0, "lut0")
    # u8 LUT op, should lead to DMA
    op1 = testutil.create_elemwise_op(Op.Add, "op1", shape, shape, shape)
    set_256_lut(op1, "lut1")
    # u8 LUT op with different LUT, should lead to DMA
    op2 = testutil.create_elemwise_op(Op.Add, "op2", shape, shape, shape)
    set_256_lut(op2, "lut2")
    # u8 LUT op with same LUT as in op1, should not lead to DMA
    op3 = testutil.create_elemwise_op(Op.Add, "op3", shape, shape, shape)
    set_256_lut(op3, "lut1")
    # u8 LUT op with same LUT as in op2, should not lead to DMA
    op4 = testutil.create_elemwise_op(Op.Add, "op4", shape, shape, shape)
    set_256_lut(op4, "lut2")
    # 2K LUT op, should lead to DMA, and will overwrite all previous LUTs in SHRAM
    op5_2K = testutil.create_elemwise_op(Op.Add, "op5", shape, shape, shape)
    set_2K_lut(op5_2K, "lut5")
    # Another 2K LUT op, should lead to DMA, and will overwrite the previous LUT in SHRAM
    op6_2K = testutil.create_elemwise_op(Op.Add, "op6", shape, shape, shape)
    set_2K_lut(op6_2K, "lut6")
    # u8 LUT op with same LUT as in op1, should lead to DMA
    op7 = testutil.create_elemwise_op(Op.Add, "op7", shape, shape, shape)
    set_256_lut(op7, "lut1")

    op_list = [op0, op1, op2, op3, op4, op5_2K, op6_2K, op7]
    sg = process(arch, op_list)
    orig_cmd_list = sg.high_level_command_stream
    sg.high_level_command_stream = orig_cmd_list
    lut.optimize_high_level_cmd_stream(sg, arch)
    cmd_list = sg.high_level_command_stream
    # Check that only the needed DMA commands are left
    expected_dma_ops = [op0, op1, op2, op5_2K, op6_2K, op7]

    cmd_list = filter_lut_cmds(cmd_list)
    orig_cmd_list = filter_lut_cmds(orig_cmd_list)

    for (cmd, op) in zip(cmd_list, expected_dma_ops):
        assert cmd.in_tensor == op.activation_lut
    # Check that lut0, lut1 and lut2 in op0, op1, op2 are stored on different addresses
    assert orig_cmd_list[0].out_tensor.address != orig_cmd_list[1].out_tensor.address
    assert orig_cmd_list[0].out_tensor.address != orig_cmd_list[2].out_tensor.address
    assert orig_cmd_list[1].out_tensor.address != orig_cmd_list[2].out_tensor.address
    # Check that lut1 in op1 and op3 have same address
    assert orig_cmd_list[1].out_tensor.address == orig_cmd_list[3].out_tensor.address
    # Check that lut2 in op2 and op4 have same address
    assert orig_cmd_list[2].out_tensor.address == orig_cmd_list[4].out_tensor.address
    # Check that lut-s for 16 bit (op5 and op6) are stored on same address
    assert orig_cmd_list[5].out_tensor.address == orig_cmd_list[6].out_tensor.address
コード例 #8
0
ファイル: test_lut.py プロジェクト: DemonGiggle/vela
def test_optimize_high_level_cmd_stream_1K():
    # Tests lut.optimize_high_level_cmd_stream, blending 256 and 1K luts
    arch = testutil.create_arch()
    shape = [1, 1, 1, 1]
    # u8 LUT op, should lead to DMA
    op0 = testutil.create_elemwise_op(Op.Add, "op0", shape, shape, shape)
    set_256_lut(op0, "lut0")
    # u8 LUT op, should lead to DMA
    op1 = testutil.create_elemwise_op(Op.Add, "op1", shape, shape, shape)
    set_256_lut(op1, "lut1")
    # 1K LUT op with different LUT, should lead to DMA
    op2_1K = testutil.create_elemwise_op(Op.Add, "op2", shape, shape, shape)
    set_1K_lut(op2_1K, "lut2")
    # u8 LUT op with same LUT as in op1, should not lead to DMA
    op3 = testutil.create_elemwise_op(Op.Add, "op3", shape, shape, shape)
    set_256_lut(op3, "lut1")
    # 1K LUT op with same LUT as in op2, should not lead to DMA
    op4_1K = testutil.create_elemwise_op(Op.Add, "op4", shape, shape, shape)
    set_1K_lut(op4_1K, "lut2")
    # 1K LUT op, should lead to DMA, and will overwrite lut2
    op5_2K = testutil.create_elemwise_op(Op.Add, "op5", shape, shape, shape)
    set_1K_lut(op5_2K, "lut5")
    # u8 LUT op, lut0 should still be present, should not lead to DMA
    op6 = testutil.create_elemwise_op(Op.Add, "op6", shape, shape, shape)
    set_256_lut(op6, "lut0")
    # 1K LUT op with same LUT as in op2, should lead to DMA
    op7 = testutil.create_elemwise_op(Op.Add, "op7", shape, shape, shape)
    set_1K_lut(op7, "lut2")

    op_list = [op0, op1, op2_1K, op3, op4_1K, op5_2K, op6, op7]
    sg = process(arch, op_list)
    orig_cmd_list = sg.high_level_command_stream
    sg.high_level_command_stream = orig_cmd_list
    lut.optimize_high_level_cmd_stream(sg, arch)
    cmd_list = sg.high_level_command_stream

    cmd_list = filter_lut_cmds(cmd_list)
    orig_cmd_list = filter_lut_cmds(orig_cmd_list)

    # Check that only the needed DMA commands are left
    expected_dma_ops = [op0, op1, op2_1K, op5_2K, op7]
    for (cmd, op) in zip(cmd_list, expected_dma_ops):
        assert cmd.in_tensor == op.activation_lut
    # Check that lut0, lut1 and lut2 in op0, op1, op2 are stored on different addresses
    assert orig_cmd_list[0].out_tensor.address != orig_cmd_list[1].out_tensor.address
    assert orig_cmd_list[0].out_tensor.address != orig_cmd_list[2].out_tensor.address
    assert orig_cmd_list[1].out_tensor.address != orig_cmd_list[2].out_tensor.address
    # Check that lut1 in op1 and op3 have same address
    assert orig_cmd_list[1].out_tensor.address == orig_cmd_list[3].out_tensor.address
    # Check that lut2 in op2 and op4 and op7 have same address
    assert orig_cmd_list[2].out_tensor.address == orig_cmd_list[4].out_tensor.address
    assert orig_cmd_list[2].out_tensor.address == orig_cmd_list[7].out_tensor.address
コード例 #9
0
def test_constraint_unsigned_valid():
    # unsigned inputs require output to be either:
    op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
    # the same (default uint8)
    assert support.is_operator_supported(op)
    op.ofm.dtype = DataType.int8
    assert not support.is_operator_supported(op)
    op.ofm.dtype = DataType.int16
    assert not support.is_operator_supported(op)
    # or int32
    op.ofm.dtype = DataType.int32
    assert support.is_operator_supported(op)
コード例 #10
0
def test_constraint_matching_quantization_parameters():
    qp = QuantizationParameters()
    qp.scale_f32 = np.float32(1.5)
    qp.zero_point = 128
    # valid - all matching (uses default quant params)
    op = testutil.create_elemwise_op(Op.Minimum, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
    assert support.is_operator_supported(op)
    # invalid - ifm mismatch ofm
    op.ifm.quantization = qp
    assert not support.is_operator_supported(op)
    # invalid - ifm2 mismatch ofm
    op = testutil.create_elemwise_op(Op.Minimum, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
    op.ifm2.quantization = qp
    assert not support.is_operator_supported(op)
    # invalid - both ifm and ifm2 mismatch ofm
    op = testutil.create_elemwise_op(Op.Minimum, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
    op.ifm.quantization = qp
    op.ifm2.quantization = qp
    assert not support.is_operator_supported(op)
    # valid - all matching
    op.ofm.quantization = qp
    assert support.is_operator_supported(op)
コード例 #11
0
def test_constraint_matching_either_shapes():
    # BINARY CASE
    # At least one ifm shape must match ofm's shape
    op = testutil.create_elemwise_op(Op.Add, "op", [1, 4], [4, 4], [4, 4])
    assert support.is_operator_supported(op)
    op = testutil.create_elemwise_op(Op.Add, "op", [4, 4], [1, 4], [4, 4])
    assert support.is_operator_supported(op)
    op = testutil.create_elemwise_op(Op.Add, "op", [4, 4], [4, 4], [2, 2])
    assert not support.is_operator_supported(op)
    op = testutil.create_elemwise_op(Op.Add, "op", [1, 4, 1, 16], [1, 1, 4, 1], [1, 4, 4, 16])
    assert not support.is_operator_supported(op)
    op = testutil.create_elemwise_op(Op.Add, "op", [1, 1, 4, 1], [1, 4, 1, 16], [1, 4, 4, 16])
    assert not support.is_operator_supported(op)

    # UNARY CASE
    # No second input so this is treated the same as requiring ifm shape to match ofm shape
    op = testutil.create_elemwise_op(Op.CLZ, "op", [2, 2], None, [2, 2], datatype=DataType.int32)
    assert support.is_operator_supported(op)
    op = testutil.create_elemwise_op(Op.CLZ, "op", [4, 4], None, [2, 2], datatype=DataType.int32)
    assert not support.is_operator_supported(op)
コード例 #12
0
def test_constraint_elemwise_batch_size():
    # BINARY CASE
    # Batch can be >1 if dims is <=2D
    op = testutil.create_elemwise_op(Op.Add, "op", [2, 2], [2, 2], [2, 2])
    assert support.is_operator_supported(op)
    # For dims >2D, batch must be 1
    op = testutil.create_elemwise_op(Op.Add, "op", [1, 2, 2], [1, 2, 2], [1, 2, 2])
    assert support.is_operator_supported(op)
    # invalid case
    op = testutil.create_elemwise_op(Op.Add, "op", [2, 2, 2], [2, 2, 2], [2, 2, 2])
    assert not support.is_operator_supported(op)

    # UNARY CASE
    # Batch can be >1 if dims is <=2D
    op = testutil.create_elemwise_op(Op.CLZ, "op", [2, 2], None, [2, 2], datatype=DataType.int32)
    assert support.is_operator_supported(op)
    # For dims >2D, batch must be 1
    op = testutil.create_elemwise_op(Op.CLZ, "op", [1, 2, 2], None, [1, 2, 2], datatype=DataType.int32)
    assert support.is_operator_supported(op)
    # invalid case
    op = testutil.create_elemwise_op(Op.CLZ, "op", [2, 2, 2], None, [2, 2, 2], datatype=DataType.int32)
    assert not support.is_operator_supported(op)
コード例 #13
0
def test_constraint_matching_inputs_types():
    # input data types must match (default is uint8)
    op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
    op.ifm2.dtype = DataType.int8
    assert not support.is_operator_supported(op)
コード例 #14
0
def test_constraint_tens_quant_none_check():
    # Tensors must have quantization parameters
    op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [], [1, 8, 8, 8], ifm2_quant=None)
    assert not support.is_operator_supported(op)
コード例 #15
0
def test_constraint_tens_int32_ops():
    # For int32, only select op types are allowed:
    op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [], [1, 8, 8, 8], datatype=DataType.int32)
    assert support.is_operator_supported(op)
    op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int32)
    assert not support.is_operator_supported(op)
コード例 #16
0
def test_constraint_matching_signed():
    # signed inputs require output to also be signed
    op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int8)
    op.ofm.dtype = DataType.uint8
    assert not support.is_operator_supported(op)
コード例 #17
0
def test_constraint_tens_output_scalar():
    # Scalar output is not allowed at all:
    op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [1, 8, 8, 8], [])
    op.ofm.values = 0.5
    assert not support.is_operator_supported(op)