예제 #1
0
    def leaky_relu(g, x, negative_slope, inplace, op_scale, op_zero_point):
        x, _, _, _ = symbolic_helper.dequantize_helper(g, x)

        output = opset9.leaky_relu(g, x, negative_slope, inplace)

        return symbolic_helper.quantize_helper(g, output, op_scale,
                                               op_zero_point)
예제 #2
0
    def mul(g, x, y, op_scale, op_zero_point):
        x, _, _, _ = sym_help.dequantize_helper(g, x)
        y, _, _, _ = sym_help.dequantize_helper(g, y)

        output = mul(g, x, y)

        return sym_help.quantize_helper(g, output, op_scale, op_zero_point)
예제 #3
0
    def conv2d(
        g,
        q_input,
        q_weight,
        bias,
        stride,
        padding,
        dilation,
        groups,
        op_scale,
        op_zero_point,
    ):
        input, input_scale, _, _ = symbolic_helper.dequantize_helper(
            g, q_input)
        weight, weight_scale, _, _ = symbolic_helper.dequantize_helper(
            g, q_weight)
        q_bias = symbolic_helper.requantize_bias_helper(
            g, bias, input_scale, weight_scale)
        bias, _, _, _ = symbolic_helper.dequantize_helper(g, q_bias)

        output = opset9.conv2d(g, input, weight, bias, stride, padding,
                               dilation, groups)

        return symbolic_helper.quantize_helper(g, output, op_scale,
                                               op_zero_point)
예제 #4
0
def quantize_per_tensor(g, input, scale, zero_point, dtype):
    dtype = symbolic_helper._get_const(dtype, "i", "dtype")
    zero_point = g.op("Cast",
                      zero_point,
                      to_i=symbolic_helper.scalar_type_to_onnx[dtype])
    scale = g.op("Cast", scale, to_i=_C_onnx.TensorProtoDataType.FLOAT)
    return symbolic_helper.quantize_helper(g, input, scale, zero_point)
예제 #5
0
    def hardswish(g, x, op_scale, op_zero_point):
        x, _, _, _ = symbolic_helper.dequantize_helper(g, x)

        output = opset9.hardswish(g, x)

        return symbolic_helper.quantize_helper(g, output, op_scale,
                                               op_zero_point)
예제 #6
0
    def sigmoid(g, x, op_scale, op_zero_point):
        x, _, _, _ = symbolic_helper.dequantize_helper(g, x)

        output = opset9.sigmoid(g, x)

        return symbolic_helper.quantize_helper(g, output, op_scale,
                                               op_zero_point)
예제 #7
0
def quantize_per_tensor(g, input, scale, zero_point, dtype):
    dtype = symbolic_helper._get_const(dtype, "i", "dtype")
    # TODO(justinchuby): Extract all the cast ops into a helper function.
    zero_point = g.op("Cast",
                      zero_point,
                      to_i=_type_utils.JitScalarType(dtype).onnx_type())
    scale = g.op("Cast", scale, to_i=_C_onnx.TensorProtoDataType.FLOAT)
    return symbolic_helper.quantize_helper(g, input, scale, zero_point)
예제 #8
0
    def mul(g, x, y, op_scale, op_zero_point):
        x, _, _, _ = symbolic_helper.dequantize_helper(g, x)
        y, _, _, _ = symbolic_helper.dequantize_helper(g, y)

        output = opset9.mul(g, x, y)

        return symbolic_helper.quantize_helper(g, output, op_scale,
                                               op_zero_point)
예제 #9
0
    def group_norm(g, x, num_groups, weight, bias, eps, op_scale,
                   op_zero_point):
        x, _, _, _ = symbolic_helper.dequantize_helper(g, x)

        output = opset9.group_norm(g, x, num_groups, weight, bias, eps, False)

        return symbolic_helper.quantize_helper(g, output, op_scale,
                                               op_zero_point)
예제 #10
0
    def add_relu(g, x, y, op_scale, op_zero_point):
        x, _, _, _ = symbolic_helper.dequantize_helper(g, x)
        y, _, _, _ = symbolic_helper.dequantize_helper(g, y)

        output = opset9.add(g, x, y)
        output = opset9.relu(g, output)

        return symbolic_helper.quantize_helper(g, output, op_scale,
                                               op_zero_point)
예제 #11
0
    def linear(g, q_input, q_weight, bias, op_scale, op_zero_point):
        input, input_scale, _ = sym_help.dequantize_helper(g, q_input)
        weight, weight_scale, _ = sym_help.dequantize_helper(g, q_weight)
        q_bias = sym_help.requantize_bias_helper(g, bias, input_scale, weight_scale)
        bias, _, _ = sym_help.dequantize_helper(g, q_bias)

        output = linear(g, input, weight, bias)

        return sym_help.quantize_helper(g, output, op_scale, op_zero_point)
예제 #12
0
    def layer_norm(g, x, normalized_shape, weight, bias, eps, op_scale,
                   op_zero_point):
        x, _, _, _ = symbolic_helper.dequantize_helper(g, x)

        output = opset9.layer_norm(g, x, normalized_shape, weight, bias, eps,
                                   False)

        return symbolic_helper.quantize_helper(g, output, op_scale,
                                               op_zero_point)
예제 #13
0
    def conv2d_relu(g, q_input, q_weight, bias, stride, padding, dilation, groups, op_scale, op_zero_point):
        input, input_scale, _ = sym_help.dequantize_helper(g, q_input)
        weight, weight_scale, _ = sym_help.dequantize_helper(g, q_weight)
        q_bias = sym_help.requantize_bias_helper(g, bias, input_scale, weight_scale)
        bias, _, _ = sym_help.dequantize_helper(g, q_bias)

        output = conv2d(g, input, weight, bias, stride, padding, dilation, groups)
        output = relu(g, output)

        return sym_help.quantize_helper(g, output, op_scale, op_zero_point)
예제 #14
0
    def linear(g, q_input, q_weight, bias, op_scale, op_zero_point):
        input, input_scale, _, _ = symbolic_helper.dequantize_helper(g, q_input)
        weight, weight_scale, _, axis = symbolic_helper.dequantize_helper(g, q_weight)
        q_bias = symbolic_helper.requantize_bias_helper(
            g, bias, input_scale, weight_scale, axis
        )
        bias, _, _, _ = symbolic_helper.dequantize_helper(g, q_bias)

        output = opset9.linear(g, input, weight, bias)

        return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point)
예제 #15
0
 def cat(
     g,
     q_inputs: _C.Value,
     dim: int,
     op_scale: _C.Value,
     op_zero_point: _C.Value,
 ) -> _C.Value:
     unpacked_inputs = symbolic_helper._unpack_list(q_inputs)
     dequantized = [
         symbolic_helper.dequantize_helper(g, input)[0]
         for input in unpacked_inputs
     ]
     concatenated = g.op("Concat", *dequantized, axis_i=dim)
     return symbolic_helper.quantize_helper(g, concatenated, op_scale,
                                            op_zero_point)
예제 #16
0
    def instance_norm(
        g,
        q_input,
        weight,
        bias,
        eps,
        op_scale,
        op_zero_point,
    ):
        input, _, _, _ = symbolic_helper.dequantize_helper(g, q_input)

        output = opset9.instance_norm(g, input, weight, bias, None, None,
                                      False, 0, eps, False)

        return symbolic_helper.quantize_helper(g, output, op_scale,
                                               op_zero_point)
예제 #17
0
    def hardswish(g, x, op_scale, op_zero_point):
        x, _, _ = sym_help.dequantize_helper(g, x)

        output = hardswish(g, x)

        return sym_help.quantize_helper(g, output, op_scale, op_zero_point)