Beispiel #1
0
def create_strided_slice_op(in_shape, out_shape, start_offsets, end_offsets):
    qp = testutil.default_quant_params()
    in0 = Tensor(in_shape, DataType.uint8, "in")
    in0.quantization = qp
    in1 = create_const_tensor("begin", [len(start_offsets)], DataType.uint8, start_offsets, quantization=qp)
    in2 = create_const_tensor("end", [len(end_offsets)], DataType.uint8, end_offsets, quantization=qp)
    in3 = create_const_tensor("strides", [len(end_offsets)], DataType.uint8, len(end_offsets) * [1], quantization=qp)
    out = Tensor(out_shape, DataType.uint8, "out")
    out.quantization = qp
    attrs = {"ellipsis_mask": 0, "new_axis_mask": 0, "shrink_axis_mask": 0, "begin_mask": 0, "end_mask": 0}
    return testutil.create_op(Op.StridedSlice, [in0, in1, in2, in3], out, attrs=attrs)
Beispiel #2
0
def test_constraint_splitv_inferred():
    # SplitV requires a maximum of one inferred shape (-1)
    qp = testutil.default_quant_params()
    op = testutil.create_op_with_quant_tensors(Op.SplitV, [1, 1, 1, 8], [1, 1, 1, 8])
    sizes = create_const_tensor("sizes", [1, 1, 1, 4], DataType.int16, [[[[0, -1, 2, -1]]]], np.int16, quantization=qp)
    op.add_input_tensor(sizes)
    assert not support.is_operator_supported(op)
    op = testutil.create_op_with_quant_tensors(Op.SplitV, [1, 1, 1, 8], [1, 1, 1, 8])
    sizes = create_const_tensor("sizes", [1, 1, 1, 4], DataType.int16, [[[[0, 1, 2, -1]]]], np.int16, quantization=qp)
    op.add_input_tensor(sizes)
    assert support.is_operator_supported(op)
Beispiel #3
0
def set_2K_lut(op, key):
    random.seed(key)
    values = random.choices(range(512), k=512)
    lut_tensor = create_const_tensor(
        op.name + "_lut", [1, 1, 1, 512], DataType.int32, values, np.uint32, TensorPurpose.LUT
    )
    op.set_activation_lut(lut_tensor)
Beispiel #4
0
def set_256_lut(op, key):
    random.seed(key)
    values = random.choices(range(256), k=256)
    lut_tensor = create_const_tensor(
        op.name + "_lut", [1, 1, 1, 256], DataType.int8, values, np.uint8, TensorPurpose.LUT
    )
    op.set_activation_lut(lut_tensor)
Beispiel #5
0
def create_op_with_quant_tensors(op_type,
                                 ifm_shape,
                                 ofm_shape,
                                 weights_shape=None,
                                 bias_shape=None,
                                 datatype=DataType.uint8):
    ifm = Tensor(ifm_shape, datatype, "in")
    ifm.quantization = default_quant_params()
    ofm = Tensor(ofm_shape, datatype, "out")
    ofm.quantization = default_quant_params()
    op = Operation(op_type, "op")
    op.add_input_tensor(ifm)
    op.set_output_tensor(ofm)
    # Optional weight tensor
    if weights_shape is not None:
        if datatype.size_in_bytes() == 1:
            np_type = np.uint8
        elif datatype.size_in_bytes() == 2:
            np_type = np.int16
        else:
            np_type = np.int32
        qp = default_quant_params()
        qp.zero_point = np.zeros(weights_shape)
        weights = create_const_tensor("weights",
                                      weights_shape,
                                      datatype,
                                      np.zeros(weights_shape),
                                      np_type,
                                      quantization=qp)
        op.add_input_tensor(weights)
    # Optional bias tensor
    if bias_shape is not None:
        qp = default_quant_params()
        qp.zero_point = np.zeros(bias_shape)
        bias = create_const_tensor("bias",
                                   bias_shape,
                                   DataType.int32,
                                   np.zeros(bias_shape),
                                   np.int32,
                                   quantization=qp)
        op.add_input_tensor(bias)
    return op
Beispiel #6
0
def create_elemwise_op(
        op_type,
        name,
        ifm_shape,
        ifm2_shape,
        ofm_shape,
        datatype=DataType.uint8,
        ifm_quant=default_quant_params(),
        ifm2_quant=default_quant_params(),
        ofm_quant=default_quant_params(),
):
    # Creates elementwise operation with constant IFM/IFM2
    if datatype.size_in_bytes() == 1:
        np_type = np.uint8
    elif datatype.size_in_bytes() == 2:
        np_type = np.int16
    else:
        np_type = np.int32
    op = Operation(op_type, name)
    op.add_input_tensor(
        create_const_tensor(name + "_ifm",
                            ifm_shape,
                            datatype,
                            np.zeros(ifm_shape),
                            np_type,
                            quantization=ifm_quant))
    if ifm2_shape is not None:
        op.add_input_tensor(
            create_const_tensor(name + "_ifm2",
                                ifm2_shape,
                                datatype,
                                np.zeros(ifm2_shape),
                                np_type,
                                quantization=ifm2_quant))
    ofm = Tensor(ofm_shape, datatype, name + "_ofm")
    ofm.quantization = ofm_quant
    op.set_output_tensor(ofm)
    return op