Beispiel #1
0
    def callback(self, pre: tvm.relay.Expr, post: tvm.relay.Expr,
                 node_map: tvm.ir.container.Map) -> tvm.relay.Expr:
        params = ethosu_patterns.QnnDepthwiseConv2DParams(post.op.body)
        params.ifm.tensor = post.args[0]
        channels_map = {
            "NHWC": 3,
        }
        kernel_shape_map = {
            "HWOI": params.weights.shape[0:2],
        }

        weights_values = params.weights.values
        weights_values_ohwi = np.moveaxis(weights_values, [0, 1, 2, 3],
                                          [1, 2, 0, 3])

        activation = "NONE"
        # Activations requiring LUT is currently not supported, so setting it to an empty list
        lut = relay.const([], "int8")
        clip_min = 0
        clip_max = 0
        if params.activation:
            activation = ethosu_patterns.QnnDepthwiseConv2DParams.activation_map[
                params.activation.op.name]
            if activation == "CLIP":
                clip_min = int(params.activation.attrs.a_min)
                clip_max = int(params.activation.attrs.a_max)
        scale_bias = vela_api.pack_biases(
            biases=params.biases.tensor.data.asnumpy(),
            ifm_scale=params.ifm.q_params.scale_f32,
            ifm_dtype=np.dtype(params.ifm.dtype),
            weight_scales=params.weights.q_params.scale_f32,
            ofm_scale=params.ofm.q_params.scale_f32,
            is_activation_tanh_or_sigmoid=activation in ["TANH", "SIGMOID"],
        )

        ethosu_depthwise_conv2d = ethosu_ops.ethosu_depthwise_conv2d(
            post.args[0],  # IFM
            relay.const(weights_values_ohwi, params.weights.values.dtype),
            relay.const(scale_bias, "uint8"),
            lut,
            float(params.ifm.q_params.scale_f32),
            int(params.ifm.q_params.zero_point),
            int(params.weights.q_params.zero_point),
            float(params.ofm.q_params.scale_f32),
            int(params.ofm.q_params.zero_point),
            kernel_shape_map[str(params.weights.layout)],
            params.ofm.shape[channels_map[str(params.ofm.layout)]],
            strides=params.strides,
            padding=params.padding,
            dilation=params.dilation,
            activation=activation,
            clip_min=clip_min,
            clip_max=clip_max,
            upscale="NONE",
            ifm_layout=str(params.ifm.layout),
            ofm_layout=str(params.ofm.layout),
            ofm_dtype=str(params.ofm.dtype),
        )
        return ethosu_depthwise_conv2d
Beispiel #2
0
def make_ethosu_depthwise_conv2d(
    ifm,
    channels,
    kernel_shape,
    padding,
    strides,
    dilation,
    activation="NONE",
    ifm_layout="NHWC",
    ofm_layout="NHWC",
    weight_dtype="int8",
    scale_bias_dtype="uint8",
    rounding_mode="TFL",
):
    # params
    weight_shape = (channels, kernel_shape[0], kernel_shape[1], 1)
    padding = get_pad_tuple(padding, kernel_shape)

    scale_bias_data = generate_weights_data((weight_shape[0], 10),
                                            scale_bias_dtype)
    scale_bias = relay.const(scale_bias_data, dtype=scale_bias_dtype)
    weight_data = generate_weights_data(weight_shape, weight_dtype)
    weight = relay.const(weight_data, dtype=weight_dtype)
    depthwise = ethosu_ops.ethosu_depthwise_conv2d(
        ifm,
        weight,
        scale_bias,
        lut=relay.const([], dtype="int8"),
        ifm_scale=0.6,
        ifm_zero_point=11,
        weight_zero_point=13,
        ofm_scale=0.26,
        ofm_zero_point=15,
        kernel_shape=kernel_shape,
        ofm_channels=channels,
        strides=strides,
        padding=padding,
        dilation=dilation,
        activation=activation,
        clip_min=15 if activation == "CLIP" else 0,
        clip_max=105 if activation == "CLIP" else 0,
        rounding_mode=rounding_mode,
        upscale="NONE",
        ifm_layout=ifm_layout,
        ofm_layout=ofm_layout,
    )
    return depthwise
Beispiel #3
0
    def callback(self, pre: tvm.relay.Expr, post: tvm.relay.Expr,
                 node_map: tvm.ir.container.Map) -> tvm.relay.Expr:
        params = ethosu_patterns.MeanParams(post.op.body)
        params.ifm.tensor = post.args[0]

        ifm_shape = params.ifm.shape
        ofm_shape = params.ofm.shape
        lut = relay.const([], "int8")
        axis = params.axis
        reduced_op = params.ifm.tensor

        # Enforce 4d input
        if len(ifm_shape) < 4:
            axis = [x + 1 for x in axis]
            if len(ifm_shape) == 3:
                ifm_shape = [1, params.height, params.width, ifm_shape[2]]
            else:
                ifm_shape = [1, params.height, params.width, 1]
            reduced_op = relay.reshape(reduced_op, ifm_shape)

        filter_height = ifm_shape[1] if 1 in axis else 1
        filter_width = ifm_shape[2] if 2 in axis else 1
        in_channels = out_channels = ifm_shape[-1]

        # If the height is greater than max kernel height, reshape the input
        # from [filter_height, filter_width] to [1, (filter_height*filter_width)]
        # only in the case the axis is [1, 2].
        if axis == [1, 2] and filter_height > 64:
            ifm_shape = (ifm_shape[0], 1, filter_height * filter_width,
                         in_channels)
            filter_width = filter_height * filter_width
            filter_height = 1
            reduced_op = relay.reshape(reduced_op, ifm_shape)

        if axis == [1, 2] and params.keepdims:
            weight_scale = 1
            weight_values = np.ones(
                [out_channels, filter_height, filter_width, in_channels])
            scale_bias = vela_api.pack_biases(
                biases=np.zeros(ifm_shape[-1]),
                ifm_scale=params.ifm.q_params.scale_f32,
                ifm_dtype=np.dtype(params.ifm.dtype),
                weight_scales=np.array([weight_scale], dtype=np.float),
                ofm_scale=params.ofm.q_params.scale_f32,
                is_activation_tanh_or_sigmoid=False,
            )

            reduced_op = ethosu_ops.ethosu_depthwise_conv2d(
                ifm=reduced_op,
                weight=relay.const(weight_values, params.ifm.dtype),
                scale_bias=relay.const(scale_bias, "uint8"),
                lut=lut,
                ifm_scale=float(params.ifm.q_params.scale_f32),
                ifm_zero_point=int(params.ifm.q_params.zero_point),
                weight_zero_point=0,
                ofm_scale=float(params.ofm.q_params.scale_f32),
                ofm_zero_point=int(params.ofm.q_params.zero_point),
                kernel_shape=(filter_height, filter_width),
                ofm_channels=out_channels,
                ofm_dtype="int16",
            )

            n = int(filter_height * filter_width)
            eps = 1 / (256 * (n + 1)) if n % 2 == 0 else 0

            scalar_tensor = relay.const(np.ones([1, 1, 1, 1], dtype="int16"),
                                        dtype="int16")

            reduced_op = ethosu_ops.ethosu_binary_elementwise(
                ifm=reduced_op,
                ifm2=scalar_tensor,
                lut=lut,
                operator_type="MUL",
                ifm_scale=float(params.ofm.q_params.scale_f32),
                ifm_zero_point=int(params.ofm.q_params.zero_point),
                ifm2_scale=1 / (n - eps),
                ifm2_zero_point=0,
                ofm_scale=float(params.ofm.q_params.scale_f32),
                ofm_zero_point=int(params.ofm.q_params.zero_point),
                ifm_channels=out_channels,
                ifm2_channels=out_channels,
                reversed_operands=False,
                ofm_dtype="int8",
                rounding_mode="NATURAL",
            )
        elif (params.ifm.q_params.scale_f32 == params.ofm.q_params.scale_f32
              and params.ifm.q_params.zero_point
              == params.ofm.q_params.zero_point):
            reduced_op = ethosu_ops.ethosu_pooling(
                ifm=reduced_op,
                lut=lut,
                pooling_type="AVG",
                ifm_scale=float(params.ifm.q_params.scale_f32),
                ifm_zero_point=0,
                ofm_scale=float(params.ofm.q_params.scale_f32),
                ofm_zero_point=0,
                pool_shape=(filter_height, filter_width),
                ofm_channels=out_channels,
                rounding_mode="TRUNCATE",
            )
        else:
            weight_scale = 1 / (filter_height * filter_width)
            weight_values = np.ones(
                [out_channels, filter_height, filter_width, in_channels])
            bias = -1 * int(
                params.ifm.q_params.zero_point) * filter_height * filter_width

            scale_bias = vela_api.pack_biases(
                biases=np.ones([ifm_shape[-1]]) * bias,
                ifm_scale=params.ifm.q_params.scale_f32,
                ifm_dtype=np.dtype(params.ifm.dtype),
                weight_scales=np.array([weight_scale], dtype=np.float),
                ofm_scale=params.ofm.q_params.scale_f32,
                is_activation_tanh_or_sigmoid=False,
            )
            reduced_op = ethosu_ops.ethosu_depthwise_conv2d(
                ifm=reduced_op,
                weight=relay.const(weight_values, params.ifm.dtype),
                scale_bias=relay.const(scale_bias, "uint8"),
                lut=lut,
                ifm_scale=float(params.ifm.q_params.scale_f32),
                ifm_zero_point=0,
                weight_zero_point=0,
                ofm_scale=float(params.ofm.q_params.scale_f32),
                ofm_zero_point=int(params.ofm.q_params.zero_point),
                kernel_shape=(filter_height, filter_width),
                ofm_channels=out_channels,
                rounding_mode="NATURAL",
            )

        # Reshape to original ofm shape
        if len(ofm_shape) < 4:
            reduced_op = relay.reshape(reduced_op, ofm_shape)

        return reduced_op