def globals_generator(cls, gen, node, qrec, pnode, fnode) -> bool:
        if isinstance(pnode, FcParameters):
            gen_scales(gen, pnode, pnode, qrec)
            infos, infos_comment = np.array([0, 0, 0, 0, 0]), "no activation"
            fnode = pnode
            filt_q = qrec
        elif isinstance(pnode, LinearFusionParameters) and isinstance(
                fnode, FcParameters) and pnode.fusion_type == "linear_active":
            cnodes = pnode.contained_nodes()
            quants = [
                gen.G.quantization[NodeId(pnode, fnode)] for fnode in cnodes
            ]
            filt_q = quants[0]
            gen_scales(gen, pnode, cnodes[0], quants[0])
            infos, infos_comment = gen_act_infos(cnodes[1], quants[1])
        else:
            return False
        infos = np.append(infos, [0, 0, 0, 0])
        comment = str.format("BiasQ: {}", 0) + infos_comment
        infos[5] = 0  # BiasQ

        if filt_q.cache.get('ne16'):
            conv_mul_bias = filt_q.cache.get('mul_biases_q')
            prenorm = conv_mul_bias.pre_normalization if isinstance(
                conv_mul_bias, MultMulBiasScaleQType) else 0
            pad_value = np.array(filt_q.in_qs[0].zero_point).astype(np.int16)
            pad_value1 = np.bitwise_and(pad_value, 0xFF)
            pad_value2 = np.bitwise_and(pad_value, 0xFF00) >> 8
            w_offset = -np.array(filt_q.in_qs[1].zero_point).astype(np.int32)
            w_offset1 = np.bitwise_and(w_offset, 0xFF)
            w_offset2 = np.bitwise_and(w_offset, 0xFF00) >> 8
            w_offset3 = np.bitwise_and(w_offset, 0xFF0000) >> 16
            w_offset4 = np.bitwise_and(w_offset, 0xFF000000) >> 24

            infos = np.append(
                infos,
                verify_scalar([
                    prenorm if prenorm else 0, pad_value1, pad_value2,
                    w_offset1, w_offset2, w_offset3, w_offset4
                ]))

        cname, file_name = gen_constant(gen, pnode, fnode, INFOS)
        const_info = ConstantInfo(file_name,
                                  QType.Pow2(bits=8, q=0, signed=True),
                                  contents=infos)
        gen.globals.append(
            GlobalArgInfo("int8",
                          cname,
                          gen.opts['default_global_home_location'],
                          gen.opts['default_global_exec_location'],
                          const_info=const_info,
                          comment=comment))
        return True
예제 #2
0
    def globals_generator(cls, gen, node, qrec, pnode, fnode) -> bool:
        if isinstance(pnode, MatMulOpParameters):
            mul_node = pnode
            mul_qrec = qrec
            fnode = pnode
            infos, comment = np.array([0, 0, 0, 0, 0]), "no activation"
        elif isinstance(pnode, MatMulOpFusionParameters) and isinstance(fnode, MatMulOpParameters):
            cnodes = pnode.contained_nodes()
            quants = [gen.G.quantization[NodeId(
                pnode, fnode)] for fnode in cnodes]
            mul_node = cnodes[0]
            mul_qrec = quants[0]
            infos, comment = gen_act_infos(cnodes[1], quants[1])
        else:
            return False

        if len(mul_qrec.in_qs[1].scale) > 1:
            gen_scales(gen, pnode, mul_node, mul_qrec)
            pl_scale = 0
            pl_scalen = 0
        else:
            pl_scale = mul_qrec.cache['mul_biases_q'].qbiases[0]
            pl_scalen = mul_qrec.cache['mul_biases_q'].qnorms[0]

        infos = np.append(infos, [0, 0, pl_scale, pl_scalen])

        if mul_qrec.cache.get('ne16'):
            conv_mul_bias = mul_qrec.cache.get('mul_biases_q')
            prenorm = conv_mul_bias.pre_normalization if isinstance(conv_mul_bias, MultMulBiasScaleQType) else 0
            pad_value = np.array(mul_qrec.in_qs[0].zero_point).astype(np.int16)
            pad_value1 = np.bitwise_and(pad_value, 0xFF)
            pad_value2 = np.bitwise_and(pad_value, 0xFF00) >> 8
            w_offset = -np.array(mul_qrec.in_qs[1].zero_point).astype(np.int32)
            w_offset1 = np.bitwise_and(w_offset, 0xFF)
            w_offset2 = np.bitwise_and(w_offset, 0xFF00) >> 8
            w_offset3 = np.bitwise_and(w_offset, 0xFF0000) >> 16
            w_offset4 = np.bitwise_and(w_offset, 0xFF000000) >> 24

            infos = np.append(
                infos, verify_scalar([prenorm if prenorm else 0, pad_value1, pad_value2, w_offset1, w_offset2, w_offset3, w_offset4]))

        cname, file_name = gen_constant(gen, pnode, fnode, INFOS)
        const_info = ConstantInfo(file_name, QType.Pow2(bits=8, q=0, signed=True), contents=infos)
        gen.globals.append(GlobalArgInfo("int8", cname,
                           gen.opts['default_global_home_location'],
                           gen.opts['default_global_exec_location'],
                           const_info=const_info,
                           comment=comment))
        return True
예제 #3
0
    def globals_generator(cls, gen, node, qrec, pnode, fnode) -> bool:
        if isinstance(pnode, (GlobalPoolingParameters, PoolingParameters,
                              GlobalSumPoolParameters)):
            compute_in_out_scale(qrec)
            infos, comment = np.array([
                qrec.cache['scale_mul_biases_q'].qbiases[0],
                qrec.cache['scale_mul_biases_q'].qnorms[0], 0, 0, 0
            ]), "no activation"
            fnode = pnode
            pool_q = qrec
        elif isinstance(pnode, ActivationFusion) and isinstance(
                fnode, (GlobalPoolingParameters, PoolingParameters)):
            cnodes = pnode.contained_nodes()
            quants = [
                gen.G.quantization[NodeId(pnode, fnode)] for fnode in cnodes
            ]
            pool_q = quants[0]
            infos, comment = gen_act_infos(cnodes[1], quants[1])
        else:
            return False
        infos = np.append(infos, [0, 0, 0, 0])
        if isinstance(fnode, GlobalSumPoolParameters):
            compute_in_out_scale(pool_q, in_idx=0, out_idx=0)
            infos[0] = 0
            infos[1] = 0
            infos[5] = pool_q.cache['scale_mul_biases_q'].qbiases[0]
            infos[6] = pool_q.cache['scale_mul_biases_q'].qnorms[0]

        cname, file_name = gen_constant(gen, pnode, fnode, INFOS)
        const_info = ConstantInfo(file_name,
                                  QType.Pow2(bits=8, q=0, signed=True),
                                  contents=infos)
        gen.globals.append(
            GlobalArgInfo("int8",
                          cname,
                          gen.opts['default_global_home_location'],
                          gen.opts['default_global_exec_location'],
                          const_info=const_info,
                          comment=comment))
        return True
예제 #4
0
    def globals_generator(cls, gen, node, qrec, pnode, fnode) -> bool:
        if not cls.cache_values(node, qrec):
            return False
        in_q = qrec.in_qs[0]
        out_q = qrec.out_qs[0]
        comment = f'in q: {in_q} out_q: {out_q}'
        if qrec.cache['kernel_type'] == 'KOP_CONVERT_FP_FP_ZEROPOINT':
            bits = 8 if in_q.dtype in [np.int8, np.uint8] else 16
            if in_q.signed:
                offset = ((int(math.pow(2, bits)) + in_q.zero_point[0] -
                           out_q.zero_point[0]) %
                          int(math.pow(2, bits))).astype(out_q.dtype)
            else:
                offset = (int(math.pow(2, bits)) - in_q.zero_point[0] +
                          out_q.zero_point[0]).astype(out_q.dtype)
            contents = np.array(list(offset.tobytes()) + ([0] * 7),
                                dtype=np.uint8)
        elif qrec.cache['kernel_type'] == 'KOP_CONVERT_FP_FP':
            # no infos needed
            return True
        elif qrec.cache['kernel_type'] == 'KOP_CONVERT_FP_FP_SCALE':
            scale = in_q.scale / out_q.scale
            in_abs_zp = in_q.zero_point.astype(np.int32)
            out_abs_zp = out_q.zero_point.astype(np.int32)
            if out_q.bits > in_q.bits:
                zero_adjust = (np.round(-in_abs_zp * scale) +
                               out_abs_zp).astype(np.int32)
            else:
                zero_adjust = (-in_abs_zp +
                               np.round(out_abs_zp * 1 / scale)).astype(
                                   np.int32)

            zero_adjust = list(zero_adjust.tobytes())

            if len(scale) > 1:
                raise NotImplementedError(
                    'multiscale conversion not supported')
            scale = scale[0]
            if in_q.dtype_bits == 8 and out_q.dtype_bits == 16:
                # scale Q16 * Q8 OK
                scale_adjust = MultMulBiasScaleQType(scale=scale,
                                                     dtype=np.int16,
                                                     available_bits=16)
            else:
                scale_adjust = MultMulBiasScaleQType(scale=scale,
                                                     dtype=np.int8,
                                                     available_bits=8)
            qbias = list(scale_adjust.qbiases.tobytes())
            qbias = qbias + [0] * (2 - len(qbias))
            qnorm = list(scale_adjust.qnorms.tobytes())
            contents = np.array(zero_adjust + qbias + qnorm + [0],
                                dtype=np.int8)
        elif qrec.cache['kernel_type'] == 'KOP_CONVERT_FL_FP':
            qbias = list((1 / out_q.scale).astype(np.float32).tobytes())
            zero_adjust = list((out_q.zero_point.astype(np.int32) *
                                out_q.scale).astype(np.float32).tobytes())
            contents = np.array(zero_adjust + qbias, dtype=np.int8)
        elif qrec.cache['kernel_type'] == 'KOP_CONVERT_FP_FL':
            qbias = list((in_q.scale).astype(np.float32).tobytes())
            zero_adjust = list((-in_q.zero_point.astype(np.int32)).astype(
                np.float32).tobytes())
            contents = np.array(zero_adjust + qbias, dtype=np.int8)
        else:
            raise ValueError(f"strange dtype change in {pnode.name}")
        cname, file_name = gen_constant(gen, pnode, pnode, INFOS)
        const_info = ConstantInfo(file_name,
                                  QType.Pow2(bits=8, q=0, signed=True),
                                  contents=contents)

        gen.globals.append(
            GlobalArgInfo("int8",
                          cname,
                          gen.opts['default_global_home_location'],
                          gen.opts['default_global_exec_location'],
                          const_info=const_info,
                          comment=comment))
예제 #5
0
    def globals_generator(cls, gen, node, qrec, pnode, fnode) -> bool:
        names = {val: idx for idx, val in enumerate(RNNParameters.INPUT_NAMES)}
        w_q = qrec.in_qs[names['r_2_i_w']]
        out_q = qrec.out_qs[0]
        out_scale = qrec.cache["s_2_o_q"]
        assert len(w_q.zero_point) == 1
        assert len(out_scale.qbiases) == 1
        assert len(out_scale.qnorms) == 1
        if out_q.dtype == np.uint8:
            if qrec.cache['act_qtype']:
                min_val = qrec.cache['act_qtype'].quantize(-1)
                max_val = qrec.cache['act_qtype'].quantize(1)
            else:
                min_val = max_val = 0
            contents = np.array([
                min_val, max_val, (-w_q.zero_point[0]).astype(np.int8),
                out_q.zero_point[0], 0, out_scale.qbiases[0].astype(
                    np.int8), out_scale.qnorms[0].astype(np.int8), 0, 0
            ],
                                dtype=np.int8)
        else:
            out_zp = out_q.zero_point[0].astype(np.uint16)
            contents = np.array([
                0, 0, (-w_q.zero_point[0]).astype(np.int8), out_zp & 0xff,
                out_zp >> 8, out_scale.qbiases[0].astype(
                    np.int8), out_scale.qnorms[0].astype(
                        np.int8), qrec.cache["i_2_s_q"].pre_normalization,
                qrec.cache["s_2_s_q"].pre_normalization
            ],
                                dtype=np.int8)
        comment = f"A0: {1} B0: {-1}, ZP: {w_q.zero_point}, OutS: {out_scale.qbiases[0]}, OutN: {out_scale.qnorms[0]}"

        cname, file_name = gen_constant(gen, pnode, pnode, INFOS)
        const_info = ConstantInfo(file_name,
                                  QType.Pow2(bits=8, q=0, signed=True),
                                  contents=contents)

        gen.globals.append(
            GlobalArgInfo("int8",
                          cname,
                          gen.opts['default_global_home_location'],
                          gen.opts['default_global_exec_location'],
                          const_info=const_info,
                          comment=comment))

        state_scale = qrec.cache["s_2_s_q"]
        if node.rnn_same_inout_scale:
            contents = interleave(state_scale.qbiases, state_scale.qnorms)
        else:
            input_scale = qrec.cache["i_2_s_q"]
            contents = interleave(state_scale.qbiases, input_scale.qbiases,
                                  state_scale.qnorms, input_scale.qnorms)

        cname, file_name = gen_constant(gen, pnode, pnode, "scalenorm")
        const_info = ConstantInfo(file_name,
                                  QType.Pow2(bits=8, q=0, signed=False),
                                  contents=contents)
        gen.globals.append(
            GlobalArgInfo("uint8",
                          cname,
                          gen.opts['default_global_home_location'],
                          gen.opts['default_global_exec_location'],
                          const_info=const_info,
                          comment=f"{node.name} scales and norms"))
        if node.rnn_states_as_inputs:
            gen.globals.append(
                GlobalResetArgInfo(f"{node.name}_Reset", 'AT_MEM_L2',
                                   'AT_MEM_UNDEF'))
        return True
예제 #6
0
    def globals_generator(cls, gen, node, qrec, pnode, fnode) -> bool:
        names = {val: idx for idx, val in enumerate(GRUParameters.INPUT_NAMES)}
        scales = []
        weight_zero = None
        for gate in ['r', 'h', 'z']:
            input_order = ['r', 'w'] if gate == 'h' else ['w', 'r']
            for input_tensor in input_order:
                scale_name = f'{input_tensor}_2_{gate}_q'
                weight_name = f'{input_tensor}_2_{gate}_w'
                if weight_zero is None:
                    weight_zero = qrec.in_qs[names[weight_name]].zero_point[0]
                else:
                    assert weight_zero == qrec.in_qs[
                        names[weight_name]].zero_point[0]
                qscale = qrec.cache[scale_name]
                scales.append(qscale.qbiases)
                scales.append(qscale.qnorms)

        contents = interleave(*scales)

        cname, file_name = gen_constant(gen, pnode, pnode, "scalenorm")
        const_info = ConstantInfo(file_name,
                                  QType.Pow2(bits=8, q=0, signed=False),
                                  contents=contents)
        gen.globals.append(
            GlobalArgInfo("uint8",
                          cname,
                          gen.opts['default_global_home_location'],
                          gen.opts['default_global_exec_location'],
                          const_info=const_info,
                          comment=f"{node.name} scales and norms"))
        if node.rnn_states_as_inputs:
            gen.globals.append(
                GlobalResetArgInfo(f"{node.name}_Reset", 'AT_MEM_L2',
                                   'AT_MEM_UNDEF'))

        out_q = qrec.out_qs[0]

        sigmoid_table = interleave(SIGMOID_TABLE & 0xff,
                                   SIGMOID_TABLE >> 8).astype(np.int8)
        if out_q.dtype == np.uint8:
            contents = np.concatenate(
                (sigmoid_table,
                 np.array([-weight_zero.astype(np.int8), 0], dtype=np.int8)))
        else:
            contents = np.concatenate(
                (sigmoid_table,
                 np.array([
                     -weight_zero.astype(np.int8), qrec.cache['gate_prenorm']
                 ],
                          dtype=np.int8)))

        comment = (f"WZP: {weight_zero}")
        cname, file_name = gen_constant(gen, pnode, pnode, INFOS)
        const_info = ConstantInfo(file_name,
                                  QType.Pow2(bits=8, q=0, signed=True),
                                  contents=contents)

        gen.globals.append(
            GlobalArgInfo("int8",
                          cname,
                          gen.opts['default_global_home_location'],
                          gen.opts['default_global_exec_location'],
                          const_info=const_info,
                          comment=comment))

        if node.rnn_states_as_inputs:
            gen.globals.append(
                GlobalResetArgInfo(f"{node.name}_Reset", 'AT_MEM_L2',
                                   'AT_MEM_UNDEF'))
        return True
def mult8_infos_generator(gen, node, qrec, pnode, fnode) -> bool:
    if fnode is not None:
        return False
    # if isinstance(pnode, Conv2DParameters):
    #     for_ne16 = qrec.cache.get('ne16')
    #     in_zero_point = qrec.in_qs[0].zero_point
    #     conv_mul_bias = qrec.cache.get('mul_biases_q')
    #     prenorm = conv_mul_bias.pre_normalization if isinstance(conv_mul_bias, MultMulBiasScaleQType) else 0
    #     act_infos(gen, pnode, pnode, None, None, prenorm=prenorm, extra1=0,
    #               for_ne16=for_ne16, in_zero_point=in_zero_point)
    # elif isinstance(pnode, (GlobalPoolingParameters, PoolingParameters)):
    #     compute_in_out_scale(qrec)
    #     act_infos(gen, pnode, pnode, None, qrec)
    elif isinstance(pnode, ActivationParameters):
        act_infos(gen, pnode, pnode, pnode, gen.G.quantization[NodeId(pnode)])
    # elif isinstance(pnode, ConvFusionParameters):
    #     cnodes = node.contained_nodes()
    #     quants = [gen.G.quantization[NodeId(node, fnode)] for fnode in cnodes]
    #     for_ne16 = any([qrec.cache.get('ne16') for qrec in quants])
    #     in_zero_point = quants[0].in_qs[0].zero_point
    #     for qrec in quants:
    #         compute_in_out_scale(qrec)
    #     if node.fusion_type.startswith('linear') or node.fusion_type.startswith('conv') or node.fusion_type.startswith('pool'):
    #         if node.fusion_type in ("pool_active"):
    #             act_infos(gen, pnode, cnodes[0], cnodes[1], quants[1],
    #                       extra1=0, for_ne16=for_ne16, in_zero_point=in_zero_point)
    #         else:
    #             conv_mul_bias = quants[0].cache.get('mul_biases_q')
    #             prenorm = conv_mul_bias.pre_normalization if isinstance(conv_mul_bias, MultMulBiasScaleQType) else 0
    #             if node.fusion_type in ("conv_active_pool", "conv_active", "linear_active"):
    #                 act_infos(gen, pnode, cnodes[0], cnodes[1], quants[1], prenorm=prenorm,
    #                           extra1=0, for_ne16=for_ne16, in_zero_point=in_zero_point)
    #             elif node.fusion_type == "conv_pool_active":
    #                 act_infos(gen, pnode, cnodes[0], cnodes[2], quants[2], prenorm=prenorm,
    #                           extra1=0, for_ne16=for_ne16, in_zero_point=in_zero_point)
    #             elif node.fusion_type == "conv_pool":
    #                 act_infos(gen, pnode, cnodes[0], None, None, prenorm=prenorm,
    #                           extra1=0, for_ne16=for_ne16)
    elif isinstance(pnode, MatrixMulParameters):
        compute_in_out_scale(qrec, in_idx=(0, 1), out_idx=0)
        act_infos(gen,
                  pnode,
                  pnode,
                  None,
                  None,
                  extra1=qrec.cache['scale_mul_biases_q'].qbiases[0],
                  extra2=qrec.cache['scale_mul_biases_q'].qnorms[0])
    elif isinstance(pnode, SoftMaxParameters):
        act_infos(gen, pnode, pnode, pnode, qrec)
    # elif isinstance(pnode, ActivationFusionBase):
    #     cnodes = node.contained_nodes()
    #     quants = [gen.G.quantization[NodeId(node, fnode)] for fnode in cnodes]
    #     for qrec in quants:
    #         compute_in_out_scale(qrec)
    #     if isinstance(cnodes[0], (GlobalPoolingParameters, PoolingParameters)):
    #         act_infos(gen, pnode, cnodes[0], cnodes[1], quants[1])
    #     else:
    #         return False
    #     return True
    elif isinstance(pnode, (MatMulOpParameters, MatMulOpFusionParameters)):
        if isinstance(pnode, MatMulOpFusionParameters):
            cnodes = node.contained_nodes()
            quants = [
                gen.G.quantization[NodeId(node, fnode)] for fnode in cnodes
            ]
            mul_node = cnodes[0]
            mul_qrec = quants[0]
            act_node = cnodes[1]
            act_qrec = quants[1]
        else:
            mul_node = pnode
            mul_qrec = qrec
            act_node = None
            act_qrec = None

        if len(pnode.in_dims) == 3 and len(mul_qrec.in_qs[0].scale) > 1:
            gen_scales(gen, pnode, mul_node, mul_qrec)
            extra3 = 0
            extra4 = 0
        else:
            extra3 = mul_qrec.cache['mul_biases_q'].qbiases[0]
            extra4 = mul_qrec.cache['mul_biases_q'].qnorms[0]

        act_infos(gen,
                  pnode,
                  mul_node,
                  act_node,
                  act_qrec,
                  extra3=extra3,
                  extra4=extra4)
    elif isinstance(pnode, QuantizeParameters):
        in_q = qrec.in_qs[0]
        out_q = qrec.out_qs[0]
        comment = f'in q: {in_q} out_q: {out_q}'
        if qrec.cache['kernel_type'] == 'KOP_CONVERT_FP_FP_ZEROPOINT':
            bits = 8 if in_q.dtype == np.int8 else 16
            if in_q.signed:
                contents = ((int(math.pow(2, bits)) + in_q.zero_point[0] -
                             out_q.zero_point[0]) %
                            int(math.pow(2, bits))).astype(np.uint8)
            else:
                contents = (int(math.pow(2, bits)) - in_q.zero_point[0] +
                            out_q.zero_point[0]).astype(np.uint8)
        # if in_q.dtype == np.int8 and out_q.dtype == np.uint8:
        #     if not np.allclose(in_q.scale, out_q.scale):
        #         return False
        #     if not np.all(in_q.zero_point == (out_q.zero_point - 128)):
        #         return False
        #     contents = (
        #         (256 + in_q.zero_point[0] - out_q.zero_point[0]) % 256).astype(np.uint8)
        # elif in_q.dtype == np.uint8 and out_q.dtype == np.int8:
        #     if not np.allclose(in_q.scale, out_q.scale):
        #         return False
        #     if not np.all(in_q.zero_point == (out_q.zero_point - 128)):
        #         return False
        #     contents = (
        #         256 - in_q.zero_point[0] + out_q.zero_point[0]).astype(np.uint8)
        elif in_q.dtype == np.int8 and out_q.dtype == np.int16:
            if qrec.cache['kernel_type'] == 'KOP_CONVERT_FP_FP':
                return True
            raise NotImplementedError()
        elif in_q.dtype == np.int16 and out_q.dtype == np.int8:
            if qrec.cache['kernel_type'] == 'KOP_CONVERT_FP_FP':
                return True
            raise NotImplementedError()
        else:
            raise ValueError(f"strange dtype change in {pnode.name}")
        cname, file_name = gen_constant(gen, pnode, pnode, INFOS)
        const_info = ConstantInfo(file_name,
                                  QType.Pow2(bits=8, q=0, signed=True),
                                  contents=contents)

        gen.globals.append(
            GlobalArgInfo("int8",
                          cname,
                          gen.opts['default_global_home_location'],
                          gen.opts['default_global_exec_location'],
                          const_info=const_info,
                          comment=comment))
    else:
        return False
    return True
def act_infos(gen,
              pnode,
              fnode,
              act_params,
              act_q,
              extra1=0,
              extra2=0,
              extra3=0,
              extra4=0,
              extra5=None,
              extra6=None,
              prenorm=0,
              extra_name='',
              for_ne16=False,
              in_zero_point=0):
    if isinstance(pnode, FilterParameters):
        comment = str.format("BiasQ: {}", extra1)
    elif isinstance(pnode, MatrixAddParameters):
        comment = str.format(
            "In1Scale: {} In1ScaleN: {} OutScale: {} OutScaleN: {}", extra1,
            extra2, extra3, extra4)
    else:
        comment = ""

    if act_params is None:
        contents = np.array([0, 0, 0, 0, 0], dtype=np.int8)
    elif isinstance(act_params, ReluActivationParameters):
        compute_in_out_scale(act_q)
        actscale = act_q.cache['scale_mul_biases_q'].qbiases[0]
        actscalen = act_q.cache['scale_mul_biases_q'].qnorms[0]
        if act_params.upper_bound is None:  # or fnode is not None:
            if act_q.in_qs[0].zero_point == 0:
                contents = np.array([actscale, actscalen, 0, 0, 0],
                                    dtype=np.int8)
                if len(comment) == 0:
                    comment = "all 0"
            else:
                fac_1 = act_q.in_qs[0].zero_point
                contents = np.array([actscale, actscalen, fac_1, 0, 0],
                                    dtype=np.int8)
                comment += str.format(
                    "in: {:05f} out: {:05f} A0: {} B0: 0 C0: 0",
                    act_q.in_qs[0].scale[0], act_q.out_qs[0].scale[0],
                    fac_1[0])
        else:
            if act_q.in_qs[0].zero_point == 0:
                fac_1 = act_q.in_qs[0].quantize(act_params.upper_bound)
                contents = np.array([actscale, actscalen, fac_1, 0, 0],
                                    dtype=np.int8)
                comment += str.format(
                    "in: {:05f} out: {:05f} A0: {} B0: 0 C0: 0",
                    act_q.in_qs[0].scale[0], act_q.out_qs[0].scale[0],
                    fac_1[0])
            else:
                fac_1 = act_q.in_qs[0].zero_point
                fac_2 = act_q.in_qs[0].quantize(act_params.upper_bound)
                contents = np.array([actscale, actscalen, fac_1, fac_2, 0],
                                    dtype=np.int8)
                comment += str.format(
                    "in: {:05f} out: {:05f} A0: {} B0: {} C0: 0",
                    act_q.in_qs[0].scale[0], act_q.out_qs[0].scale[0],
                    fac_1[0], fac_2[0])
    elif isinstance(act_params, HSigmoidActivationParameters):
        # currently combines all scaling factors into one scale and shift
        assert act_q.in_qs[0].zero_point == 0 and act_q.out_qs[
            0].zero_point == 0, "asymmetric not supported"
        fac_1, upper_bound, _ = hsigmoid_mult_gen_factors(act_params, act_q)
        contents = np.array([
            act_q.cache['scale_mul_biases_q'].qbiases[0],
            act_q.cache['scale_mul_biases_q'].qnorms[0], upper_bound, fac_1, 1
        ],
                            dtype=np.int8)
        comment += str.format(
            "in: {:05f} out: {:05f} qbias: {} qnorm: {} A0: {} B0: {} C0: 1",
            act_q.in_qs[0].scale[0], act_q.out_qs[0].scale[0],
            act_q.cache['scale_mul_biases_q'].qbiases[0],
            act_q.cache['scale_mul_biases_q'].qnorms[0], upper_bound[0],
            fac_1[0])
    elif isinstance(act_params, HSwishActivationParameters):
        # currently combines all scaling factors into one scale and shift
        assert act_q.in_qs[0].zero_point == 0 and act_q.out_qs[
            0].zero_point == 0, "asymmetric not supported"
        fac_1, upper_bound, _ = hswish_mult_gen_factors(act_q)
        contents = np.array([
            act_q.cache['scale_mul_biases_q'].qbiases[0],
            act_q.cache['scale_mul_biases_q'].qnorms[0], upper_bound, fac_1, 1
        ],
                            dtype=np.int8)
        comment += str.format(
            "in: {:05f} out: {:05f} qbias: {} qnorm: {} A0: {} B0: {} C0: 1",
            act_q.in_qs[0].scale[0], act_q.out_qs[0].scale[0],
            act_q.cache['scale_mul_biases_q'].qbiases[0],
            act_q.cache['scale_mul_biases_q'].qnorms[0], upper_bound[0],
            fac_1[0])
    elif isinstance(act_params, SoftMaxParameters):
        assert act_q.in_qs[0].zero_point == 0 and act_q.out_qs[
            0].zero_point == 0, "asymmetric not supported"
        norm = 15 + np.ceil(np.log2(act_q.in_qs[0].scale))
        contents = np.array([norm, 0, 0, 0, 0], dtype=np.int8)
        comment += str.format("in: {:05f} out: {:05f} NORM: {}",
                              act_q.in_qs[0].scale[0],
                              act_q.out_qs[0].scale[0], int(norm[0]))
    elif isinstance(act_params, LeakyActivationParameters):
        assert act_q.in_qs[0].zero_point == 0 and act_q.out_qs[
            0].zero_point == 0, "asymmetric not supported"
        compute_in_out_scale(act_q)
        leak_factor_quant = leak_mult_gen_factor_q7(act_params)
        contents = np.array([
            act_q.cache['scale_mul_biases_q'].qbiases[0],
            act_q.cache['scale_mul_biases_q'].qnorms[0], leak_factor_quant, 0,
            0
        ],
                            dtype=np.int8)
        comment += str.format(
            "in: {:05f} out: {:05f} qbias: {} qnorm: {} A0: {} B0: x C0: x",
            act_q.in_qs[0].scale[0], act_q.out_qs[0].scale[0],
            act_q.cache['scale_mul_biases_q'].qbiases[0],
            act_q.cache['scale_mul_biases_q'].qnorms[0], leak_factor_quant)
    elif isinstance(act_params,
                    (SigmoidActivationParameters, TanHActivationParameters)):
        assert act_q.in_qs[0].zero_point == 0 and act_q.out_qs[
            0].zero_point == 0, "asymmetric not supported"
        compute_in_out_scale(
            act_q,
            extra_scale=QType.Pow2(bits=32, q=7, signed=True).scale /
            act_q.in_qs[0].scale)
        contents = np.array([
            act_q.cache['scale_mul_biases_q'].qbiases[0],
            act_q.cache['scale_mul_biases_q'].qnorms[0], 0, 0, 0
        ],
                            dtype=np.int8)
        comment += str.format(
            "in: {:05f} out: {:05f} qbias: {} qnorm: {} A0: x B0: x C0: x",
            act_q.in_qs[0].scale[0], act_q.out_qs[0].scale[0],
            act_q.cache['scale_mul_biases_q'].qbiases[0],
            act_q.cache['scale_mul_biases_q'].qnorms[0])
    else:
        raise NotImplementedError("activation tye not implemented")

    if isinstance(pnode, (GlobalPoolingParameters, PoolingParameters)):
        contents = np.array([
            act_q.cache['scale_mul_biases_q'].qbiases[0],
            act_q.cache['scale_mul_biases_q'].qnorms[0], 0, 0, 0
        ],
                            dtype=np.int8)

    contents = np.append(contents, [extra1, extra2, extra3, extra4])
    if extra5 is not None:
        contents = np.append(contents, [extra5])
    if extra6 is not None:
        contents = np.append(contents, [extra6])

    if for_ne16:
        # append weights_offset and pad_val for ne16
        # TODO - default config maybe in future
        if isinstance(pnode, (ConvFusionParameters, LinearFusionParameters)):
            filt_q = gen.G.quantization[NodeId(pnode, fnode)]
        else:
            filt_q = gen.G.quantization[NodeId(pnode)]
        pad_value = np.array(in_zero_point).astype(np.int16)
        pad_value1 = np.bitwise_and(pad_value, 0xFF)
        pad_value2 = np.bitwise_and(pad_value, 0xFF00) >> 8
        w_offset = -np.array(filt_q.in_qs[1].zero_point).astype(np.int32)
        w_offset1 = np.bitwise_and(w_offset, 0xFF)
        w_offset2 = np.bitwise_and(w_offset, 0xFF00) >> 8
        w_offset3 = np.bitwise_and(w_offset, 0xFF0000) >> 16
        w_offset4 = np.bitwise_and(w_offset, 0xFF000000) >> 24

        contents = np.append(
            contents, [[prenorm] if prenorm else [0], pad_value1, pad_value2,
                       w_offset1, w_offset2, w_offset3, w_offset4])

    cname, file_name = gen_constant(gen, pnode, fnode, INFOS, extra_name)
    const_info = ConstantInfo(file_name,
                              QType.Pow2(bits=8, q=0, signed=True),
                              contents=contents)

    gen.globals.append(
        GlobalArgInfo("int8",
                      cname,
                      gen.opts['default_global_home_location'],
                      gen.opts['default_global_exec_location'],
                      const_info=const_info,
                      comment=comment))
예제 #9
0
    def globals_generator(cls, gen, node, qrec, pnode, fnode) -> bool:
        names = {
            val: idx
            for idx, val in enumerate(LSTMParameters.INPUT_NAMES)
        }
        scales = []
        weight_zero = None
        for gate in ['i', 'c', 'f', 'o']:
            for input_tensor in ['i', 'r']:
                scale_name = f'{input_tensor}_2_{gate}_q'
                weight_name = f'{input_tensor}_2_{gate}_w'
                if weight_zero is None:
                    weight_zero = qrec.in_qs[names[weight_name]].zero_point[0]
                else:
                    assert weight_zero == qrec.in_qs[
                        names[weight_name]].zero_point[0]
                w_q = qrec.in_qs[names['r_2_i_w']]
                qscale = qrec.cache[scale_name]
                scales.append(qscale.qbiases)
                scales.append(qscale.qnorms)

        contents = interleave(*scales)

        cname, file_name = gen_constant(gen, pnode, pnode, "scalenorm")
        const_info = ConstantInfo(file_name,
                                  QType.Pow2(bits=8, q=0, signed=False),
                                  contents=contents)
        gen.globals.append(
            GlobalArgInfo("uint8",
                          cname,
                          gen.opts['default_global_home_location'],
                          gen.opts['default_global_exec_location'],
                          const_info=const_info,
                          comment=f"{node.name} scales and norms"))
        if node.rnn_states_as_inputs:
            gen.globals.append(
                GlobalResetArgInfo(f"{node.name}_Reset", 'AT_MEM_L2',
                                   'AT_MEM_UNDEF'))

        out_q = qrec.out_qs[0]
        out_scale = qrec.cache["state_out_q"].qbiases[0]
        out_scalen = qrec.cache["state_out_q"].qnorms[0]
        cin_scale = qrec.cache["cell_in_q"].qbiases[0]
        cin_scalen = qrec.cache["cell_in_q"].qnorms[0]
        cout_scale = qrec.cache["cell_out_q"].qbiases[0]
        cout_scalen = qrec.cache["cell_out_q"].qnorms[0]
        out_zeropoint = out_q.zero_point[0]

        # define LSTM_NE16_W_ZEROPOINT   0
        # define LSTM_NE16_GATE_PRENORM  1
        # define LSTM_NE16_CIN_SCALE     (0 + LSTM_NE16_OUT_OFF)
        # define LSTM_NE16_CIN_SCALEN    (1 + LSTM_NE16_OUT_OFF)
        # define LSTM_NE16_COUT_SCALE    (2 + LSTM_NE16_OUT_OFF)
        # define LSTM_NE16_COUT_SCALEN   (3 + LSTM_NE16_OUT_OFF)
        # define LSTM_NE16_OUT_SCALE     (4 + LSTM_NE16_OUT_OFF)
        # define LSTM_NE16_OUT_SCALEN    (5 + LSTM_NE16_OUT_OFF)
        # define LSTM_NE16_OUT_ZEROPOINT (6 + LSTM_NE16_OUT_OFF)

        # define LSTM_NE16_INT_A0        (0 + LSTM_NE16_INT_OFF)
        # define LSTM_NE16_INT_B0        (1 + LSTM_NE16_INT_OFF)
        # define LSTM_NE16_INT_C0        (2 + LSTM_NE16_INT_OFF)

        sigmoid_table = interleave(SIGMOID_TABLE & 0xff,
                                   SIGMOID_TABLE >> 8).astype(np.int8)
        if out_q.dtype == np.uint8:
            # Maybe get rid of this
            if qrec.cache.get('act_qtype'):
                min_val = qrec.cache['act_qtype'].quantize(-1)
                max_val = qrec.cache['act_qtype'].quantize(1)
            else:
                min_val = max_val = 0
            contents = np.concatenate(
                (sigmoid_table,
                 np.array([
                     -weight_zero.astype(np.int8), qrec.cache['gate_prenorm'],
                     cin_scale.astype(np.int8),
                     cin_scalen.astype(np.int8),
                     cout_scale.astype(np.int8),
                     cout_scalen.astype(np.int8),
                     out_scale.astype(np.int8),
                     out_scalen.astype(np.int8),
                     out_zeropoint.astype(np.int8), 0, 0, 0, 0
                 ],
                          dtype=np.int8)))
        else:
            contents = np.concatenate(
                (sigmoid_table,
                 np.array([
                     -weight_zero.astype(np.int8),
                     qrec.cache['gate_prenorm'],
                     cin_scale.astype(np.int8),
                     cin_scalen.astype(np.int8),
                     cout_scale.astype(np.int8),
                     cout_scalen.astype(np.int8),
                     out_scale.astype(np.int8),
                     out_scalen.astype(np.int8),
                     out_zeropoint.astype(np.uint16) & 0xff,
                     out_zeropoint.astype(np.uint16) >> 8,
                 ],
                          dtype=np.int8)))

        comment = (
            f"WZP: {weight_zero}, Out: {out_scale}/{out_scalen}, Cin: {cin_scale}/{cin_scalen}"
            f"Cout: {cout_scale}/{cout_scalen}, OZP: {out_zeropoint}")
        cname, file_name = gen_constant(gen, pnode, pnode, INFOS)
        const_info = ConstantInfo(file_name,
                                  QType.Pow2(bits=8, q=0, signed=True),
                                  contents=contents)

        gen.globals.append(
            GlobalArgInfo("int8",
                          cname,
                          gen.opts['default_global_home_location'],
                          gen.opts['default_global_exec_location'],
                          const_info=const_info,
                          comment=comment))

        if node.rnn_states_as_inputs:
            gen.globals.append(
                GlobalResetArgInfo(f"{node.name}_Reset", 'AT_MEM_L2',
                                   'AT_MEM_UNDEF'))
        return True