Exemplo n.º 1
0
 def matscale3(cls, in_tensors, qrec):
     assert qrec.in_qs[0].bits == qrec.in_qs[1].bits
     assert qrec.in_qs[1].bits == qrec.in_qs[2].bits
     if qrec.in_qs[0].bits == 8:
         q_calc = QType.Pow2(bits=32,
                             q=qrec.in_qs[0].q + qrec.in_qs[1].q +
                             qrec.in_qs[2].q,
                             signed=True)
         res = np.multiply(np.multiply(in_tensors[0],
                                       in_tensors[1],
                                       dtype=np.int32),
                           in_tensors[2],
                           dtype=np.int32)
         res = qrec.out_qs[0].reduce_from(res, q_calc)
     elif qrec.in_qs[0].bits == 16:
         q_calc = QType.Pow2(bits=32,
                             q=qrec.in_qs[0].q + qrec.in_qs[1].q,
                             signed=True)
         res = np.multiply(in_tensors[0], in_tensors[1], dtype=np.int32)
         res = qrec.out_qs[0].reduce_from(res, q_calc)
         q_calc = QType.Pow2(bits=32,
                             q=qrec.in_qs[2].q + qrec.out_qs[0].q,
                             signed=True)
         res = np.multiply(res, in_tensors[2], dtype=np.int32)
         res = qrec.out_qs[0].reduce_from(res, q_calc)
     else:
         raise ValueError("only 8 and 16 bits supported")
     return res
Exemplo n.º 2
0
    def execute(cls, params,
                in_tensors,
                qrec: QRec,
                **kwargs):

        in_tensors = qrec.prepare_inputs(params, in_tensors, ktype="symmetric")
        func = PIECEWISE_OPS[params.__class__]
        op = func['op']
        if func['is_mult']:
            i1 = in_tensors[0].astype(np.int32)
            i2 = in_tensors[1].astype(np.int32)
            res = op(i1, i2, np.int32)
            q_calc = QType.Pow2(
                bits=32, q=qrec.in_qs[0].q+qrec.in_qs[1].q, signed=True)
            res = qrec.out_qs[0].reduce_from(res, q_calc)
        else:
            off_in = abs(qrec.in_qs[0].q - qrec.in_qs[1].q)
            if qrec.in_qs[0].q > qrec.in_qs[1].q:
                i1 = at_norm(in_tensors[0].astype(np.int32), off_in)
                i2 = in_tensors[1].astype(np.int32)
            else:
                i1 = in_tensors[0].astype(np.int32)
                i2 = at_norm(in_tensors[1].astype(np.int32), off_in)
            res = op(i1, i2, None)
            q_calc = QType.Pow2(bits=32, q=min(qrec.in_qs[0].q, qrec.in_qs[1].q), signed=True)
            res = qrec.out_qs[0].reduce_from(res, q_calc)
        return qrec.get_outputs(params, [res], ktype="symmetric")
Exemplo n.º 3
0
 def get_outputs(self,
                 params: Parameters,
                 output_tensors: Sequence[np.ndarray],
                 ktype: str = None) -> Sequence[np.ndarray]:
     if ktype == "symmetric":
         if isinstance(params, (MatrixAddParameters, MatrixSubParameters)):
             q_calc = QType.Pow2(bits=32,
                                 q=min(self.in_qs[0].q, self.in_qs[1].q),
                                 signed=True)
             output_tensors = [
                 self.out_qs[0].reduce_from(output_tensors[0], q_calc)
             ]
         elif isinstance(params,
                         (MatrixMulParameters, MatrixDivParameters)):
             q_calc = QType.Pow2(bits=32,
                                 q=self.in_qs[0].q + self.in_qs[1].q,
                                 signed=True)
             output_tensors = [
                 self.out_qs[0].reduce_from(output_tensors[0], q_calc)
             ]
         elif isinstance(
                 params,
                 GlobalPoolParameters) and params.pool_type == "sum":
             output_tensors = [
                 self.out_qs[0].reduce_from(output_tensors[0],
                                            self.in_qs[0])
             ]
         if self._auto_dequantize_outputs:
             return [
                 self.out_qs[idx].dequantize(output_tensor)
                 for idx, output_tensor in enumerate(output_tensors)
             ]
     return output_tensors
Exemplo n.º 4
0
 def _quantize(cls, params, in_qs, stats, **kwargs):
     force_out_qs, _ = cls.get_pow2_opts(**kwargs)
     force_out_q = force_out_qs and force_out_qs[0]
     out_q = QType.Pow2(16, 15, True)
     if force_out_q and force_out_q != out_q:
         return None
     return SymmetricQuantizationRecord(in_qs=in_qs, out_qs=[QType.Pow2(16, 15, True)])
Exemplo n.º 5
0
def gen_ssd_globals(gen, node, qrec):
    qrec.set_scales(node)
    scores_q = qrec.in_qs[1]
    scores_scale, scores_norm = compute_mul_bias(scores_q.scale)

    cname_scales, file_name_scales = gen_constant(gen, node, node, SSD_SCALES)
    contents = np.array([qrec.scale_x_q.qbiases,
                         qrec.scale_x_anc_q.qbiases,
                         qrec.scale_y_q.qbiases,
                         qrec.scale_y_anc_q.qbiases,
                         qrec.scale_h_q.qbiases,
                         qrec.scale_w_q.qbiases,
                         qrec.scale_ao_q.qbiases,
                         scores_scale], dtype=np.int8)
    scale_info = ConstantInfo(file_name_scales, QType.Pow2(bits=8, q=0, signed=True), contents=contents)

    cname_norms, file_name_norms = gen_constant(gen, node, node, SSD_NORMS)
    contents = np.array([qrec.scale_x_q.qnorms,
                         qrec.scale_x_anc_q.qnorms,
                         qrec.scale_y_q.qnorms,
                         qrec.scale_y_anc_q.qnorms,
                         qrec.scale_h_q.qnorms,
                         qrec.scale_w_q.qnorms,
                         qrec.scale_ao_q.qnorms,
                         scores_norm], dtype=np.int8)
    norms_info = ConstantInfo(file_name_norms, QType.Pow2(bits=8, q=0, signed=True), contents=contents)

    score_threshold = scores_q.quantize(node.nms_score_threshold)
    cname_infos, file_name_infos = gen_constant(gen, node, node, INFOS)
    contents = np.array([round(node.nms_iou_threshold * 2**7),     # Q7
                         score_threshold,                          # Q0 [0:255]
                         node.max_detections,                      # Q0 [0:255]
                         node.max_classes_per_detection,           # Q0 [0:255]
                         node.max_bb_before_nms >> 8,
                         node.max_bb_before_nms], dtype=np.int8)   # max_bb = Infos[4]<<8 + Infos[5]
    ssd_infos = ConstantInfo(file_name_infos, QType.Pow2(bits=8, q=0, signed=True), contents=contents)

    gen.globals.append(GlobalArgInfo(qrec.scale_x_q.ctype, cname_scales,
                                     gen.opts['default_global_home_location'],
                                     gen.opts['default_global_exec_location'],
                                     const_info=scale_info))

    gen.globals.append(GlobalArgInfo(qrec.scale_x_q.shift_ctype, cname_norms,
                                     gen.opts['default_global_home_location'],
                                     gen.opts['default_global_exec_location'],
                                     const_info=norms_info))

    gen.globals.append(GlobalArgInfo('uint8', cname_infos,
                                     gen.opts['default_global_home_location'],
                                     gen.opts['default_global_exec_location'],
                                     const_info=ssd_infos))
Exemplo n.º 6
0
    def _quantize(cls, params, in_qs, stats, **kwargs):
        force_out_qs, params_dtype = cls.get_pow2_opts(**kwargs)
        force_out_q = force_out_qs and force_out_qs[0]

        fusion = kwargs.get('fusion', None)

        cls.check_valid_ranges(params, stats, idx=0, dirs='out')
        if fusion:
            activation = fusion.contained_nodes()[1]
            if isinstance(activation, ReluActivationParameters):
                # Take stats from activation after the convolution
                range_out = kwargs['all_stats'][NodeId(
                    fusion, activation)]['range_out'][0]
                out_dtype = np.int32
        else:
            out_dtype = params_dtype
            range_out = stats['range_out'][0]

        in_q1 = deepcopy(in_qs[0]).scale_to_pow2()
        in_q2 = deepcopy(in_qs[0]).scale_to_pow2()
        biases_q = QType.Pow2(32, in_q1.q + in_q2.q, True)

        if force_out_q:
            o_q = force_out_q
        else:
            o_q = QType.from_min_max_pow2(range_out['min'],
                                          range_out['max'],
                                          dtype=out_dtype)
        if len(in_qs) == 3:
            return QRec.symmetric(in_qs=[in_q1, in_q2, biases_q], out_qs=[o_q])
        return QRec.symmetric(in_qs=[in_q1, in_q2], out_qs=[o_q])
Exemplo n.º 7
0
def rnn_infos(gen, node, qrec):
    i_state_q = qrec.in_qs[node.INPUT_NAMES.index('i_state')]

    contents = []
    comments = []

    # info for activation (scale the act input to the proper scale)
    info, comment = INFOS_FUNCS[node.activation]("f", qrec.s_2_s_q, i_state_q)
    contents.append(info)
    comments.append(comment)

    # info for input scaling (only used with non SameInputStateScale kernels)
    info, comment = scale_infos("f", getattr(qrec, "i_2_a_q"))
    contents.append(info)
    comments.append(comment)

    # info for scaling the activation out to out scale (only used for non Hard activations kernels)
    info, comment = scale_infos("f", getattr(qrec, "s_2_o_q"))
    contents.append(info)
    comments.append(comment)

    cname, file_name = gen_constant(gen, node, node, INFOS)
    const_info = ConstantInfo(file_name,
                              QType.Pow2(bits=8, q=0, signed=True),
                              contents=np.hstack(tuple(contents)))

    gen.globals.append(
        GlobalArgInfo("int8",
                      cname,
                      gen.opts['default_global_home_location'],
                      gen.opts['default_global_exec_location'],
                      const_info=const_info,
                      comment=comment))
Exemplo n.º 8
0
    def execute(cls, params, in_tensors, qrec: QRec, **kwargs):

        in_tensors = [
            in_tensor.astype(np.int32) for in_tensor in qrec.prepare_inputs(
                params, in_tensors, ktype="symmetric")
        ]

        if isinstance(params, MatMulTransposedParameters):
            mat1, mat2 = in_tensors[0], np.transpose(in_tensors[1], (1, 0))
        else:
            mat1, mat2 = in_tensors[0], in_tensors[1]

        if len(in_tensors) > 2:
            biases = in_tensors[2]
            if len(biases.shape) == 1:
                if biases.shape[0] == mat1.shape[0]:
                    biases = np.expand_dims(biases, -1)
        else:
            biases = 0

        # expect biases in in_q1 + in_q2
        q_calc = QType.Pow2(bits=32,
                            q=qrec.in_qs[0].q + qrec.in_qs[1].q,
                            signed=True)
        out_tensor = np.matmul(mat1, mat2) + biases
        out_tensor = qrec.out_qs[0].reduce_from(out_tensor, q_calc)

        return qrec.get_outputs(params, [out_tensor], ktype="symmetric")
Exemplo n.º 9
0
def gru_infos(gen, node, qrec):
    i_qtype = internal_qtype(qrec)
    contents = []
    comments = []
    r_to_int_scale = qrec.cache['r_WR_2_int_q'].qbiases[0]
    r_to_int_scalen = qrec.cache['r_WR_2_int_q'].qnorms[0]
    r_to_in_scale = qrec.cache['i_2_r_WR_q'].qbiases[0]
    r_to_in_scalen = qrec.cache['i_2_r_WR_q'].qnorms[0]
    z_to_int_scale = qrec.cache['z_WR_2_int_q'].qbiases[0]
    z_to_int_scalen = qrec.cache['z_WR_2_int_q'].qnorms[0]
    z_to_in_scale = qrec.cache['i_2_z_WR_q'].qbiases[0]
    z_to_in_scalen = qrec.cache['i_2_z_WR_q'].qnorms[0]
    ht_to_in_scale = qrec.cache['i_2_h_WR_q'].qbiases[0]
    ht_to_in_scalen = qrec.cache['i_2_h_WR_q'].qnorms[0]
    h_to_int_scale = qrec.cache['h_WR_2_int_q'].qbiases[0]
    h_to_int_scalen = qrec.cache['h_WR_2_int_q'].qnorms[0]

    # GRU_R_INFOS
    comments.append(str.format("r_to_int_scale: {} r_to_int_scalen: {} r_to_in_scale: {} r_to_in_scalen: {}",
                               r_to_int_scale, r_to_int_scalen, r_to_in_scale, r_to_in_scalen,))
    contents.append(np.array(
        [r_to_int_scale, r_to_int_scalen, r_to_in_scale, r_to_in_scalen], dtype=np.int8))

    # GRU_Z_INFOS
    comments.append(str.format("z_to_int_scale: {} z_to_int_scalen: {} z_to_in_scale: {} z_to_in_scalen: {}",
                               z_to_int_scale, z_to_int_scalen, z_to_in_scale, z_to_in_scalen,))
    contents.append(np.array(
        [z_to_int_scale, z_to_int_scalen, z_to_in_scale, z_to_in_scalen], dtype=np.int8))

    # GRU_HT_INFOS
    comments.append(str.format("ht_to_in_scale: {} ht_to_in_scalen: {}",
                               ht_to_in_scale, ht_to_in_scalen,))
    contents.append(np.array([ht_to_in_scale, ht_to_in_scalen], dtype=np.int8))

    # GRU_H_INFOS
    comments.append(str.format("h_to_int_scale: {} h_to_int_scalen: {}",
                               h_to_int_scale, h_to_int_scalen,))
    contents.append(np.array([h_to_int_scale, h_to_int_scalen], dtype=np.int8))

    three = i_qtype.quantize(np.array([3]))[0]
    six = i_qtype.quantize(np.array([6]))[0]
    sixth = i_qtype.quantize(np.array([1/6]))[0]

    comments.append(str.format("int_q: {} A0: {} B0: {} C0: {}",
                               i_qtype.q, six, three, sixth))
    contents.append(np.array([lowb(six), highb(six),
                              lowb(three), highb(three),
                              lowb(sixth), highb(sixth), i_qtype.q],
                             dtype=np.int8))

    cname, file_name = gen_constant(gen, node, node, INFOS)
    const_info = ConstantInfo(file_name, QType.Pow2(bits=8, q=0, signed=True),
                              contents=np.hstack(tuple(contents)))

    gen.globals.append(GlobalArgInfo("int8", cname,
                                     gen.opts['default_global_home_location'],
                                     gen.opts['default_global_exec_location'],
                                     const_info=const_info,
                                     comment=" ".join(comments)))
Exemplo n.º 10
0
 def matscale2(cls, in_tensors, qrec=None):
     assert qrec.in_qs[0].bits == qrec.in_qs[1].bits
     q_calc = QType.Pow2(bits=32,
                         q=qrec.in_qs[0].q + qrec.in_qs[1].q,
                         signed=True)
     res = np.multiply(in_tensors[0], in_tensors[1], dtype=np.int32)
     res = qrec.out_qs[0].reduce_from(res, q_calc)
     return res
Exemplo n.º 11
0
 def _quantize(cls, params, in_qs, stats, **kwargs):
     force_out_qs, _ = cls.get_pow2_opts(**kwargs)
     force_out_q = force_out_qs and force_out_qs[0]
     in_q = deepcopy(in_qs[0]).scale_to_pow2()
     in_q.set_forced()
     out_q = QType.Pow2(16, 15, True, forced=True)
     if force_out_q and force_out_q != out_q:
         return None
     return QRec.symmetric(in_qs=[in_q], out_qs=[out_q])
Exemplo n.º 12
0
    def globals_generator(cls, gen, node, qrec, pnode, fnode) -> bool:
        if isinstance(pnode, FcParameters):
            gen_scales(gen, pnode, pnode, qrec)
            infos, infos_comment = np.array([0, 0, 0, 0, 0]), "no activation"
            fnode = pnode
            filt_q = qrec
        elif isinstance(pnode, LinearFusionParameters) and isinstance(
                fnode, FcParameters) and pnode.fusion_type == "linear_active":
            cnodes = pnode.contained_nodes()
            quants = [
                gen.G.quantization[NodeId(pnode, fnode)] for fnode in cnodes
            ]
            filt_q = quants[0]
            gen_scales(gen, pnode, cnodes[0], quants[0])
            infos, infos_comment = gen_act_infos(cnodes[1], quants[1])
        else:
            return False
        infos = np.append(infos, [0, 0, 0, 0])
        comment = str.format("BiasQ: {}", 0) + infos_comment
        infos[5] = 0  # BiasQ

        if filt_q.cache.get('ne16'):
            conv_mul_bias = filt_q.cache.get('mul_biases_q')
            prenorm = conv_mul_bias.pre_normalization if isinstance(
                conv_mul_bias, MultMulBiasScaleQType) else 0
            pad_value = np.array(filt_q.in_qs[0].zero_point).astype(np.int16)
            pad_value1 = np.bitwise_and(pad_value, 0xFF)
            pad_value2 = np.bitwise_and(pad_value, 0xFF00) >> 8
            w_offset = -np.array(filt_q.in_qs[1].zero_point).astype(np.int32)
            w_offset1 = np.bitwise_and(w_offset, 0xFF)
            w_offset2 = np.bitwise_and(w_offset, 0xFF00) >> 8
            w_offset3 = np.bitwise_and(w_offset, 0xFF0000) >> 16
            w_offset4 = np.bitwise_and(w_offset, 0xFF000000) >> 24

            infos = np.append(
                infos,
                verify_scalar([
                    prenorm if prenorm else 0, pad_value1, pad_value2,
                    w_offset1, w_offset2, w_offset3, w_offset4
                ]))

        cname, file_name = gen_constant(gen, pnode, fnode, INFOS)
        const_info = ConstantInfo(file_name,
                                  QType.Pow2(bits=8, q=0, signed=True),
                                  contents=infos)
        gen.globals.append(
            GlobalArgInfo("int8",
                          cname,
                          gen.opts['default_global_home_location'],
                          gen.opts['default_global_exec_location'],
                          const_info=const_info,
                          comment=comment))
        return True
Exemplo n.º 13
0
    def execute(cls, params, in_tensors, qrec: QRec, **kwargs):
        in_tensor = qrec.prepare_inputs(params, in_tensors,
                                        ktype="symmetric")[0]
        out_q15 = tanh_lut(in_tensor.astype(np.int32) << 8)
        compute_in_out_scale(
            qrec,
            extra_scale=QType.Pow2(bits=32, q=7, signed=True).scale /
            qrec.in_qs[0].scale)
        scale_mul_biases_q = qrec.cache['scale_mul_biases_q']
        output = scale_mul_biases_q.apply_scales(out_q15 >> 8)

        return qrec.get_outputs(params, [output], ktype="symmetric")
 def compute_scales(cls, params, qrec):
     if isinstance(params,
                   (SigmoidScaledSymmetricMult, TanHActivationParameters)):
         compute_in_out_scale(
             qrec,
             extra_scale=QType.Pow2(bits=32, q=7, signed=True).scale /
             qrec.in_qs[0].scale)
     elif isinstance(params, HSwishActivationParameters):
         compute_in_out_scale(qrec, extra_scale=qrec.in_qs[0].scale * 1 / 6)
     else:
         compute_in_out_scale(qrec)
     return qrec
Exemplo n.º 15
0
    def _quantize(cls, params, in_qs, stats, **kwargs):
        force_out_qs, _ = cls.get_mult_opts(**kwargs)
        force_out_q = force_out_qs and force_out_qs[0]
        if force_out_q:
            return None

        in_q, win_q, fft_twiddles_q, swap_table_q, rfft_twiddles_q, fft_out_q, spect_q = cls.get_spectrogram_in_out_q(
            in_qs[0], params)
        melcoeff_q = QType.Pow2(bits=16, signed=True, q=MFCC_COEFF_Q)
        mel_sparsity_table_q = QType.Pow2(bits=16, signed=False, q=0)
        dctmat_q = QType.Pow2(bits=16, signed=True, q=DCT_TWIDDLE_Q)
        if params.mel_type == "melspectrogram":
            out_q = QType.Pow2(bits=32, signed=True, q=16)
        elif params.mel_type == "logmelspectrogram":
            out_q = QType.Pow2(bits=16, signed=True, q=15 - params.quant_norm)
        else:
            out_q = QType.Pow2(bits=16,
                               signed=True,
                               q=15 - params.quant_norm - DCT_TWIDDLE_Q)

        return QRec.symmetric(in_qs=[
            in_q, win_q, fft_twiddles_q, swap_table_q, rfft_twiddles_q,
            mel_sparsity_table_q, melcoeff_q, dctmat_q
        ],
                              out_qs=[out_q],
                              fft_out_q=fft_out_q)
Exemplo n.º 16
0
    def globals_generator(cls, gen, node, qrec, pnode, fnode) -> bool:
        if isinstance(pnode, MatMulOpParameters):
            mul_node = pnode
            mul_qrec = qrec
            fnode = pnode
            infos, comment = np.array([0, 0, 0, 0, 0]), "no activation"
        elif isinstance(pnode, MatMulOpFusionParameters) and isinstance(fnode, MatMulOpParameters):
            cnodes = pnode.contained_nodes()
            quants = [gen.G.quantization[NodeId(
                pnode, fnode)] for fnode in cnodes]
            mul_node = cnodes[0]
            mul_qrec = quants[0]
            infos, comment = gen_act_infos(cnodes[1], quants[1])
        else:
            return False

        if len(mul_qrec.in_qs[1].scale) > 1:
            gen_scales(gen, pnode, mul_node, mul_qrec)
            pl_scale = 0
            pl_scalen = 0
        else:
            pl_scale = mul_qrec.cache['mul_biases_q'].qbiases[0]
            pl_scalen = mul_qrec.cache['mul_biases_q'].qnorms[0]

        infos = np.append(infos, [0, 0, pl_scale, pl_scalen])

        if mul_qrec.cache.get('ne16'):
            conv_mul_bias = mul_qrec.cache.get('mul_biases_q')
            prenorm = conv_mul_bias.pre_normalization if isinstance(conv_mul_bias, MultMulBiasScaleQType) else 0
            pad_value = np.array(mul_qrec.in_qs[0].zero_point).astype(np.int16)
            pad_value1 = np.bitwise_and(pad_value, 0xFF)
            pad_value2 = np.bitwise_and(pad_value, 0xFF00) >> 8
            w_offset = -np.array(mul_qrec.in_qs[1].zero_point).astype(np.int32)
            w_offset1 = np.bitwise_and(w_offset, 0xFF)
            w_offset2 = np.bitwise_and(w_offset, 0xFF00) >> 8
            w_offset3 = np.bitwise_and(w_offset, 0xFF0000) >> 16
            w_offset4 = np.bitwise_and(w_offset, 0xFF000000) >> 24

            infos = np.append(
                infos, verify_scalar([prenorm if prenorm else 0, pad_value1, pad_value2, w_offset1, w_offset2, w_offset3, w_offset4]))

        cname, file_name = gen_constant(gen, pnode, fnode, INFOS)
        const_info = ConstantInfo(file_name, QType.Pow2(bits=8, q=0, signed=True), contents=infos)
        gen.globals.append(GlobalArgInfo("int8", cname,
                           gen.opts['default_global_home_location'],
                           gen.opts['default_global_exec_location'],
                           const_info=const_info,
                           comment=comment))
        return True
Exemplo n.º 17
0
    def execute(cls, params,
                in_tensors,
                qrec: QuantizationRecordBase,
                **kwargs):
        in_tensor = qrec.prepare_inputs(params, in_tensors, ktype="symmetric")[0]

        calc_q = QType.Pow2(bits=32, q=qrec.in_qs[0].q + 15, signed=True)

        fac_1 = qrec.in_qs[0].quantize(np.array([params.offset]))
        fac_2 = (1 << 15) // 6
        upper_bound = qrec.in_qs[0].quantize(np.array([6.]))
        lower_bound = qrec.in_qs[0].quantize(np.array([0.]))
        in_tensor = in_tensor.astype(np.int32)
        in_tensor = np.multiply(np.minimum(np.maximum(in_tensor + fac_1, lower_bound),
                                           upper_bound), fac_2, dtype=np.int32)
        return qrec.get_outputs(params, [qrec.out_qs[0].reduce_from(in_tensor, calc_q)], ktype="symmetric")
Exemplo n.º 18
0
def lstm_infos(gen, node, qrec):
    i_qtype = internal_qtype(qrec)
    contents = []
    comments = []
    for k, v in LSTM_INFOS_ORDER.items():
        info, comment = scale_infos(k, qrec.cache["r_2_%s_q" % k])
        contents.append(info)
        comments.append(comment)
    cin_scale = qrec.cache['cell_in_q'].qbiases[0]
    cin_scalen = qrec.cache['cell_in_q'].qnorms[0]
    cout_scale = qrec.cache['cell_out_q'].qbiases[0]
    cout_scalen = qrec.cache['cell_out_q'].qnorms[0]
    out_scale = qrec.cache['state_out_q'].qbiases[0]
    out_scalen = qrec.cache['state_out_q'].qnorms[0]
    comments.append(str.format("cin_scale: {} cin_scale_n: {} cout_scale: {} cout_scale_n: {}",
                               cin_scale, cin_scalen, cout_scale, cout_scalen,))

    comments.append(str.format("out_scale: {} out_scale_n: {}",
                               out_scale, out_scalen))
    contents.append(np.array([cin_scale, cin_scalen, cout_scale, cout_scalen,
                              out_scale, out_scalen], dtype=np.int8))

    three = i_qtype.quantize(np.array([3]))[0]
    six = i_qtype.quantize(np.array([6]))[0]
    sixth = i_qtype.quantize(np.array([1/6]))[0]

    comments.append(str.format("int_q: {} A0: {} B0: {} C0: {}",
                               i_qtype.q, six, three, sixth))
    contents.append(np.array([lowb(six), highb(six),
                              lowb(three), highb(three),
                              lowb(sixth), highb(sixth), i_qtype.q],
                             dtype=np.int8))

    for k in LSTM_INFOS_ORDER.keys():
        info, comment = scale_infos(k, qrec.cache["i_2_%s_q" % k])
        contents.append(info)
        comments.append(comment)

    cname, file_name = gen_constant(gen, node, node, INFOS)
    const_info = ConstantInfo(file_name, QType.Pow2(bits=8, q=0, signed=True),
                              contents=np.hstack(tuple(contents)))

    gen.globals.append(GlobalArgInfo("int8", cname,
                                     gen.opts['default_global_home_location'],
                                     gen.opts['default_global_exec_location'],
                                     const_info=const_info,
                                     comment=" ".join(comments)))
Exemplo n.º 19
0
    def _quantize(cls, params, in_qs, stats, **kwargs):
        force_out_qs, out_dtype = cls.get_mult_opts(**kwargs)
        force_out_q = force_out_qs and force_out_qs[0]
        fusion = kwargs.get('fusion', None)
        in_q = in_qs[0]
        if not fusion and in_q.dtype == np.int32:
            return None

        if isinstance(params, (HSwishActivationParameters, HSigmoidActivationParameters)):
            max_val = in_q.scale * pow(2, in_q.bits - 1)
            if max_val < 6:
                in_q = QType.from_min_max_sq(-6, 6, dtype=in_q.dtype, forced=True)
        elif isinstance(params, SigmoidActivationParameters):
            in_q = QType.from_min_max_sq(-8, 8, dtype=in_q.dtype, forced=True)

        if force_out_q:
            if force_out_q.signed != in_q.signed:
                return None
            if fusion and fusion.fusion_type in ['conv_active_pool', 'conv_active']:
                if not isinstance(params, (SigmoidActivationParameters, HTanHActivationParameters,
                                           HSwishActivationParameters, HSigmoidActivationParameters)):
                    in_q = deepcopy(force_out_q)
            o_q = deepcopy(force_out_q)
            # activation cannot move zeropoint unless it is a reduction step
            if o_q.zero_point != in_q.zero_point and in_q.dtype != np.int32:
                return None
        else:
            cls.check_valid_ranges(params, stats, idx=0, dirs='out')
            zero_point = in_q.zero_point if in_q.zero_point != 0 else None
            o_q = QType.from_min_max_sq(stats['range_out'][0]['min'],
                                        stats['range_out'][0]['max'],
                                        dtype=in_q.dtype,
                                        zero_point=zero_point)

        qrec = QRec.scaled(in_qs=[in_q], out_qs=[o_q])
        if isinstance(params, (SigmoidScaledSymmetricMult, TanHActivationParameters)):
            compute_in_out_scale(qrec, extra_scale=QType.Pow2(bits=32, q=7, signed=True).scale/qrec.in_qs[0].scale)
        elif isinstance(params, HSwishActivationParameters):
            compute_in_out_scale(qrec, extra_scale=qrec.in_qs[0].scale * 1/6)
        else:
            compute_in_out_scale(qrec)
        return qrec
Exemplo n.º 20
0
    def execute(cls, params,
                in_tensors,
                qrec: QRec,
                **kwargs):

        in_tensors = [in_tensor.astype(np.int32) for in_tensor in qrec.prepare_inputs(
            params, in_tensors, ktype="symmetric")]

        if len(in_tensors) > 2:
            biases = in_tensors[2]
            if len(biases.shape) == 1:
                biases = np.expand_dims(biases, -1)
        else:
            biases = 0
        # expect biases in in_q1 + in_q2
        q_calc = QType.Pow2(bits=32, q=qrec.in_qs[0].q + qrec.in_qs[1].q, signed=True)
        out_tensor = np.matmul(in_tensors[0], in_tensors[1]) + biases
        out_tensor = qrec.out_qs[0].reduce_from(out_tensor, q_calc)

        return qrec.get_outputs(params, [out_tensor], ktype="symmetric")
Exemplo n.º 21
0
    def get_spectrogram_in_out_q(cls, in_q, params):
        win_q = QType.Pow2(bits=16, signed=True, q=WINDOW_Q)
        fft_twiddles_q = QType.Pow2(bits=16, signed=True, q=FFT_TWIDDLES_Q)
        rfft_twiddles_q = QType.Pow2(bits=16, signed=True, q=FFT_TWIDDLES_Q)
        swap_table_q = QType.Pow2(bits=16, signed=False, q=0)
        in_q = QType.Pow2(bits=16,
                          signed=True,
                          q=int(-np.ceil(np.log2(in_q.scale))),
                          forced=True)
        if params.is_radix4():
            #in_q = QType.Pow2(bits=16, signed=True, q=12)
            fft_out_q = in_q.q - 2 * (int(np.log2(params.n_cfft) / 2) - 2) - 1
        else:
            #in_q = QType.Pow2(bits=16, signed=True, q=13)
            fft_out_q = in_q.q - (int(np.log2(params.n_cfft)) - 3) - 1

        fft_out_q = QType.Pow2(bits=16, signed=True, q=fft_out_q)
        if not params.magsquared:
            out_q = QType.Pow2(bits=32, signed=False, q=15)
        else:
            out_q = QType.Pow2(bits=32, signed=False, q=15)  #fft_out_q.q*2)
        return in_q, win_q, fft_twiddles_q, swap_table_q, rfft_twiddles_q, fft_out_q, out_q
Exemplo n.º 22
0
    def globals_generator(cls, gen, node, qrec, pnode, fnode) -> bool:
        if isinstance(pnode, (GlobalPoolingParameters, PoolingParameters,
                              GlobalSumPoolParameters)):
            compute_in_out_scale(qrec)
            infos, comment = np.array([
                qrec.cache['scale_mul_biases_q'].qbiases[0],
                qrec.cache['scale_mul_biases_q'].qnorms[0], 0, 0, 0
            ]), "no activation"
            fnode = pnode
            pool_q = qrec
        elif isinstance(pnode, ActivationFusion) and isinstance(
                fnode, (GlobalPoolingParameters, PoolingParameters)):
            cnodes = pnode.contained_nodes()
            quants = [
                gen.G.quantization[NodeId(pnode, fnode)] for fnode in cnodes
            ]
            pool_q = quants[0]
            infos, comment = gen_act_infos(cnodes[1], quants[1])
        else:
            return False
        infos = np.append(infos, [0, 0, 0, 0])
        if isinstance(fnode, GlobalSumPoolParameters):
            compute_in_out_scale(pool_q, in_idx=0, out_idx=0)
            infos[0] = 0
            infos[1] = 0
            infos[5] = pool_q.cache['scale_mul_biases_q'].qbiases[0]
            infos[6] = pool_q.cache['scale_mul_biases_q'].qnorms[0]

        cname, file_name = gen_constant(gen, pnode, fnode, INFOS)
        const_info = ConstantInfo(file_name,
                                  QType.Pow2(bits=8, q=0, signed=True),
                                  contents=infos)
        gen.globals.append(
            GlobalArgInfo("int8",
                          cname,
                          gen.opts['default_global_home_location'],
                          gen.opts['default_global_exec_location'],
                          const_info=const_info,
                          comment=comment))
        return True
Exemplo n.º 23
0
    def globals_generator(cls, gen, node, qrec, pnode, fnode) -> bool:
        names = {val: idx for idx, val in enumerate(RNNParameters.INPUT_NAMES)}
        w_q = qrec.in_qs[names['r_2_i_w']]
        out_q = qrec.out_qs[0]
        out_scale = qrec.cache["s_2_o_q"]
        assert len(w_q.zero_point) == 1
        assert len(out_scale.qbiases) == 1
        assert len(out_scale.qnorms) == 1
        if out_q.dtype == np.uint8:
            if qrec.cache['act_qtype']:
                min_val = qrec.cache['act_qtype'].quantize(-1)
                max_val = qrec.cache['act_qtype'].quantize(1)
            else:
                min_val = max_val = 0
            contents = np.array([
                min_val, max_val, (-w_q.zero_point[0]).astype(np.int8),
                out_q.zero_point[0], 0, out_scale.qbiases[0].astype(
                    np.int8), out_scale.qnorms[0].astype(np.int8), 0, 0
            ],
                                dtype=np.int8)
        else:
            out_zp = out_q.zero_point[0].astype(np.uint16)
            contents = np.array([
                0, 0, (-w_q.zero_point[0]).astype(np.int8), out_zp & 0xff,
                out_zp >> 8, out_scale.qbiases[0].astype(
                    np.int8), out_scale.qnorms[0].astype(
                        np.int8), qrec.cache["i_2_s_q"].pre_normalization,
                qrec.cache["s_2_s_q"].pre_normalization
            ],
                                dtype=np.int8)
        comment = f"A0: {1} B0: {-1}, ZP: {w_q.zero_point}, OutS: {out_scale.qbiases[0]}, OutN: {out_scale.qnorms[0]}"

        cname, file_name = gen_constant(gen, pnode, pnode, INFOS)
        const_info = ConstantInfo(file_name,
                                  QType.Pow2(bits=8, q=0, signed=True),
                                  contents=contents)

        gen.globals.append(
            GlobalArgInfo("int8",
                          cname,
                          gen.opts['default_global_home_location'],
                          gen.opts['default_global_exec_location'],
                          const_info=const_info,
                          comment=comment))

        state_scale = qrec.cache["s_2_s_q"]
        if node.rnn_same_inout_scale:
            contents = interleave(state_scale.qbiases, state_scale.qnorms)
        else:
            input_scale = qrec.cache["i_2_s_q"]
            contents = interleave(state_scale.qbiases, input_scale.qbiases,
                                  state_scale.qnorms, input_scale.qnorms)

        cname, file_name = gen_constant(gen, pnode, pnode, "scalenorm")
        const_info = ConstantInfo(file_name,
                                  QType.Pow2(bits=8, q=0, signed=False),
                                  contents=contents)
        gen.globals.append(
            GlobalArgInfo("uint8",
                          cname,
                          gen.opts['default_global_home_location'],
                          gen.opts['default_global_exec_location'],
                          const_info=const_info,
                          comment=f"{node.name} scales and norms"))
        if node.rnn_states_as_inputs:
            gen.globals.append(
                GlobalResetArgInfo(f"{node.name}_Reset", 'AT_MEM_L2',
                                   'AT_MEM_UNDEF'))
        return True
def mult8_infos_generator(gen, node, qrec, pnode, fnode) -> bool:
    if fnode is not None:
        return False
    # if isinstance(pnode, Conv2DParameters):
    #     for_ne16 = qrec.cache.get('ne16')
    #     in_zero_point = qrec.in_qs[0].zero_point
    #     conv_mul_bias = qrec.cache.get('mul_biases_q')
    #     prenorm = conv_mul_bias.pre_normalization if isinstance(conv_mul_bias, MultMulBiasScaleQType) else 0
    #     act_infos(gen, pnode, pnode, None, None, prenorm=prenorm, extra1=0,
    #               for_ne16=for_ne16, in_zero_point=in_zero_point)
    # elif isinstance(pnode, (GlobalPoolingParameters, PoolingParameters)):
    #     compute_in_out_scale(qrec)
    #     act_infos(gen, pnode, pnode, None, qrec)
    elif isinstance(pnode, ActivationParameters):
        act_infos(gen, pnode, pnode, pnode, gen.G.quantization[NodeId(pnode)])
    # elif isinstance(pnode, ConvFusionParameters):
    #     cnodes = node.contained_nodes()
    #     quants = [gen.G.quantization[NodeId(node, fnode)] for fnode in cnodes]
    #     for_ne16 = any([qrec.cache.get('ne16') for qrec in quants])
    #     in_zero_point = quants[0].in_qs[0].zero_point
    #     for qrec in quants:
    #         compute_in_out_scale(qrec)
    #     if node.fusion_type.startswith('linear') or node.fusion_type.startswith('conv') or node.fusion_type.startswith('pool'):
    #         if node.fusion_type in ("pool_active"):
    #             act_infos(gen, pnode, cnodes[0], cnodes[1], quants[1],
    #                       extra1=0, for_ne16=for_ne16, in_zero_point=in_zero_point)
    #         else:
    #             conv_mul_bias = quants[0].cache.get('mul_biases_q')
    #             prenorm = conv_mul_bias.pre_normalization if isinstance(conv_mul_bias, MultMulBiasScaleQType) else 0
    #             if node.fusion_type in ("conv_active_pool", "conv_active", "linear_active"):
    #                 act_infos(gen, pnode, cnodes[0], cnodes[1], quants[1], prenorm=prenorm,
    #                           extra1=0, for_ne16=for_ne16, in_zero_point=in_zero_point)
    #             elif node.fusion_type == "conv_pool_active":
    #                 act_infos(gen, pnode, cnodes[0], cnodes[2], quants[2], prenorm=prenorm,
    #                           extra1=0, for_ne16=for_ne16, in_zero_point=in_zero_point)
    #             elif node.fusion_type == "conv_pool":
    #                 act_infos(gen, pnode, cnodes[0], None, None, prenorm=prenorm,
    #                           extra1=0, for_ne16=for_ne16)
    elif isinstance(pnode, MatrixMulParameters):
        compute_in_out_scale(qrec, in_idx=(0, 1), out_idx=0)
        act_infos(gen,
                  pnode,
                  pnode,
                  None,
                  None,
                  extra1=qrec.cache['scale_mul_biases_q'].qbiases[0],
                  extra2=qrec.cache['scale_mul_biases_q'].qnorms[0])
    elif isinstance(pnode, SoftMaxParameters):
        act_infos(gen, pnode, pnode, pnode, qrec)
    # elif isinstance(pnode, ActivationFusionBase):
    #     cnodes = node.contained_nodes()
    #     quants = [gen.G.quantization[NodeId(node, fnode)] for fnode in cnodes]
    #     for qrec in quants:
    #         compute_in_out_scale(qrec)
    #     if isinstance(cnodes[0], (GlobalPoolingParameters, PoolingParameters)):
    #         act_infos(gen, pnode, cnodes[0], cnodes[1], quants[1])
    #     else:
    #         return False
    #     return True
    elif isinstance(pnode, (MatMulOpParameters, MatMulOpFusionParameters)):
        if isinstance(pnode, MatMulOpFusionParameters):
            cnodes = node.contained_nodes()
            quants = [
                gen.G.quantization[NodeId(node, fnode)] for fnode in cnodes
            ]
            mul_node = cnodes[0]
            mul_qrec = quants[0]
            act_node = cnodes[1]
            act_qrec = quants[1]
        else:
            mul_node = pnode
            mul_qrec = qrec
            act_node = None
            act_qrec = None

        if len(pnode.in_dims) == 3 and len(mul_qrec.in_qs[0].scale) > 1:
            gen_scales(gen, pnode, mul_node, mul_qrec)
            extra3 = 0
            extra4 = 0
        else:
            extra3 = mul_qrec.cache['mul_biases_q'].qbiases[0]
            extra4 = mul_qrec.cache['mul_biases_q'].qnorms[0]

        act_infos(gen,
                  pnode,
                  mul_node,
                  act_node,
                  act_qrec,
                  extra3=extra3,
                  extra4=extra4)
    elif isinstance(pnode, QuantizeParameters):
        in_q = qrec.in_qs[0]
        out_q = qrec.out_qs[0]
        comment = f'in q: {in_q} out_q: {out_q}'
        if qrec.cache['kernel_type'] == 'KOP_CONVERT_FP_FP_ZEROPOINT':
            bits = 8 if in_q.dtype == np.int8 else 16
            if in_q.signed:
                contents = ((int(math.pow(2, bits)) + in_q.zero_point[0] -
                             out_q.zero_point[0]) %
                            int(math.pow(2, bits))).astype(np.uint8)
            else:
                contents = (int(math.pow(2, bits)) - in_q.zero_point[0] +
                            out_q.zero_point[0]).astype(np.uint8)
        # if in_q.dtype == np.int8 and out_q.dtype == np.uint8:
        #     if not np.allclose(in_q.scale, out_q.scale):
        #         return False
        #     if not np.all(in_q.zero_point == (out_q.zero_point - 128)):
        #         return False
        #     contents = (
        #         (256 + in_q.zero_point[0] - out_q.zero_point[0]) % 256).astype(np.uint8)
        # elif in_q.dtype == np.uint8 and out_q.dtype == np.int8:
        #     if not np.allclose(in_q.scale, out_q.scale):
        #         return False
        #     if not np.all(in_q.zero_point == (out_q.zero_point - 128)):
        #         return False
        #     contents = (
        #         256 - in_q.zero_point[0] + out_q.zero_point[0]).astype(np.uint8)
        elif in_q.dtype == np.int8 and out_q.dtype == np.int16:
            if qrec.cache['kernel_type'] == 'KOP_CONVERT_FP_FP':
                return True
            raise NotImplementedError()
        elif in_q.dtype == np.int16 and out_q.dtype == np.int8:
            if qrec.cache['kernel_type'] == 'KOP_CONVERT_FP_FP':
                return True
            raise NotImplementedError()
        else:
            raise ValueError(f"strange dtype change in {pnode.name}")
        cname, file_name = gen_constant(gen, pnode, pnode, INFOS)
        const_info = ConstantInfo(file_name,
                                  QType.Pow2(bits=8, q=0, signed=True),
                                  contents=contents)

        gen.globals.append(
            GlobalArgInfo("int8",
                          cname,
                          gen.opts['default_global_home_location'],
                          gen.opts['default_global_exec_location'],
                          const_info=const_info,
                          comment=comment))
    else:
        return False
    return True
Exemplo n.º 25
0
    def globals_generator(cls, gen, node, qrec, pnode, fnode) -> bool:
        names = {val: idx for idx, val in enumerate(GRUParameters.INPUT_NAMES)}
        scales = []
        weight_zero = None
        for gate in ['r', 'h', 'z']:
            input_order = ['r', 'w'] if gate == 'h' else ['w', 'r']
            for input_tensor in input_order:
                scale_name = f'{input_tensor}_2_{gate}_q'
                weight_name = f'{input_tensor}_2_{gate}_w'
                if weight_zero is None:
                    weight_zero = qrec.in_qs[names[weight_name]].zero_point[0]
                else:
                    assert weight_zero == qrec.in_qs[
                        names[weight_name]].zero_point[0]
                qscale = qrec.cache[scale_name]
                scales.append(qscale.qbiases)
                scales.append(qscale.qnorms)

        contents = interleave(*scales)

        cname, file_name = gen_constant(gen, pnode, pnode, "scalenorm")
        const_info = ConstantInfo(file_name,
                                  QType.Pow2(bits=8, q=0, signed=False),
                                  contents=contents)
        gen.globals.append(
            GlobalArgInfo("uint8",
                          cname,
                          gen.opts['default_global_home_location'],
                          gen.opts['default_global_exec_location'],
                          const_info=const_info,
                          comment=f"{node.name} scales and norms"))
        if node.rnn_states_as_inputs:
            gen.globals.append(
                GlobalResetArgInfo(f"{node.name}_Reset", 'AT_MEM_L2',
                                   'AT_MEM_UNDEF'))

        out_q = qrec.out_qs[0]

        sigmoid_table = interleave(SIGMOID_TABLE & 0xff,
                                   SIGMOID_TABLE >> 8).astype(np.int8)
        if out_q.dtype == np.uint8:
            contents = np.concatenate(
                (sigmoid_table,
                 np.array([-weight_zero.astype(np.int8), 0], dtype=np.int8)))
        else:
            contents = np.concatenate(
                (sigmoid_table,
                 np.array([
                     -weight_zero.astype(np.int8), qrec.cache['gate_prenorm']
                 ],
                          dtype=np.int8)))

        comment = (f"WZP: {weight_zero}")
        cname, file_name = gen_constant(gen, pnode, pnode, INFOS)
        const_info = ConstantInfo(file_name,
                                  QType.Pow2(bits=8, q=0, signed=True),
                                  contents=contents)

        gen.globals.append(
            GlobalArgInfo("int8",
                          cname,
                          gen.opts['default_global_home_location'],
                          gen.opts['default_global_exec_location'],
                          const_info=const_info,
                          comment=comment))

        if node.rnn_states_as_inputs:
            gen.globals.append(
                GlobalResetArgInfo(f"{node.name}_Reset", 'AT_MEM_L2',
                                   'AT_MEM_UNDEF'))
        return True
Exemplo n.º 26
0
    def step_kernel(cls, params: LSTMParameters, args: Mapping[str,
                                                               np.ndarray],
                    idx: int, input_tensor: np.ndarray, qrec):

        use_cifg = 'i_2_i_w' in args and args['i_2_i_w'][0] is None
        use_peephole = 'c_2_o_w' in args and args['c_2_o_w'][0] is not None
        use_layer_norm = 'f_norm' in args and args['f_norm'][0] is not None
        if use_cifg:
            raise NotImplementedError("cifg mode is not supported")
        if use_peephole:
            raise NotImplementedError("peephole mode is not supported")
        if use_layer_norm:
            raise NotImplementedError("layer norm mode is not supported")

        # INPUT vs WEIGHTS
        # For each cell: compute input_weight * input if there is an input
        input_gate_scratch = 0
        forget_gate_scratch = 0
        cell_scratch = 0
        output_gate_scratch = 0
        if idx < params.n_input_cells:
            input_gate_scratch += scale_lstm_input_input(
                qrec, args['i_2_i_w'][0].astype(np.int32).dot(
                    input_tensor[idx].astype(np.int32)), 0)
            forget_gate_scratch += scale_lstm_input_forget(
                qrec, args['i_2_f_w'][0].astype(np.int32).dot(
                    input_tensor[idx].astype(np.int32)), 0)
            cell_scratch += scale_lstm_input_cell(
                qrec, args['i_2_c_w'][0].astype(np.int32).dot(
                    input_tensor[idx].astype(np.int32)), 0)
            output_gate_scratch += scale_lstm_input_output(
                qrec, args['i_2_o_w'][0].astype(np.int32).dot(
                    input_tensor[idx].astype(np.int32)), 0)

        # Initialize scratch buffers with bias for regular lstm
        input_gate_scratch_state = args['i_b'][0].astype(np.int32)
        forget_gate_scratch_state = args['f_b'][0].astype(np.int32)
        cell_scratch_state = args['c_b'][0].astype(np.int32)
        output_gate_scratch_state = args['o_b'][0].astype(np.int32)

        # STATE vs WEIGHTS INITIALIZED WITH BIASES
        # For each cell: compute recurrent_weight * output_state
        input_gate_scratch_state += args['r_2_i_w'][0].astype(np.int32).dot(
            args['i_state'][0].astype(np.int32))
        forget_gate_scratch_state += args['r_2_f_w'][0].astype(np.int32).dot(
            args['i_state'][0].astype(np.int32))
        cell_scratch_state += args['r_2_c_w'][0].astype(np.int32).dot(
            args['i_state'][0].astype(np.int32))
        output_gate_scratch_state += args['r_2_o_w'][0].astype(np.int32).dot(
            args['i_state'][0].astype(np.int32))

        input_gate_scratch = scale_lstm_internal_input(
            qrec, input_gate_scratch_state + input_gate_scratch, 0)
        forget_gate_scratch = scale_lstm_internal_forget(
            qrec, forget_gate_scratch_state + forget_gate_scratch, 0)
        cell_scratch = scale_lstm_internal_cell(
            qrec, cell_scratch_state + cell_scratch, 0)
        output_gate_scratch = scale_lstm_internal_output(
            qrec, output_gate_scratch_state + output_gate_scratch, 0)

        # Apply activations in internal Q * 1
        input_gate_scratch = get_activation('sigmoid', params.hard_act)(
            input_gate_scratch, internal_qtype(qrec))

        forget_gate_scratch = get_activation('sigmoid', params.hard_act)(
            forget_gate_scratch, internal_qtype(qrec))

        output_gate_scratch = get_activation('sigmoid', params.hard_act)(
            output_gate_scratch, internal_qtype(qrec))

        cell_scratch = get_activation('tanh',
                                      params.hard_act)(cell_scratch,
                                                       internal_qtype(qrec))

        # cstate = cstate * Of + Og * Oi
        if params.hard_act:
            # Scale cell state to internal Q * 1
            cstate = scale_lstm_cellin(qrec,
                                       args['c_state'][0].astype(np.int32), 0)
            cstate = cstate * forget_gate_scratch + cell_scratch * input_gate_scratch
            # cstate now in (2 * Q) * 1
        else:
            # Multiply cstate [Scstate] * Of [Sq15] and scale to [Sq12]
            # Multiply Og [Sq15] * Oi [Sq15] --> [Sq30] >> 30-12 --> [Sq12]
            # cstate is now in q12 = internal_qtype
            cstate = scale_lstm_cellin(qrec, args['c_state'][0] * forget_gate_scratch, 0) \
                + ((cell_scratch * input_gate_scratch)
                   >> (15+(15-internal_qtype(qrec).q)))

        # if params.cell_clip > 0.0:
        #     args['c_state'] = abs_clip(args['c_state'], params.cell_clip)
        # if there is a clip value this should override the min max here
        # clip here

        args['c_state'][0] = scale_lstm_cellout(qrec, cstate, 0)

        if params.hard_act:
            two_qtype = QType.Pow2(
                internal_qtype(qrec).bits,
                internal_qtype(qrec).q * 2, True)
            cell_scratch = get_activation('tanh', params.hard_act)(cstate,
                                                                   two_qtype)
            # Assume scaling from internalq * 3 -> Q7 * 1
            output_gate_scratch *= cell_scratch
        else:
            cell_scratch = get_activation('tanh', params.hard_act)(
                cstate, internal_qtype(qrec))
            # output = Og[Sq15] * tanh(cell_scratch)[Sq15] -> [Sq30] >> 15 -> [Sq15]
            output_gate_scratch = (output_gate_scratch * cell_scratch) >> 15

        output = scale_lstm_output(qrec, output_gate_scratch, 0)
        output = qrec.out_qs[0].clip(output)

        use_projection_weight = 'proj_w' in args and args['proj_w'][
            0] is not None
        use_projection_bias = 'proj_b' in args and args['proj_b'][0] is not None

        if use_projection_weight or use_projection_bias:
            raise NotImplementedError("LSTMP is not yet supported by kernel")

        #args['i_state'][0] = qrec.scale_i_state(output_gate_scratch.copy(), 0, ktype="symmetric")
        args['i_state'][0] = output.copy()
        if params.lstm_output_c_state:
            return output, args['c_state'][0]
        return output, None
Exemplo n.º 27
0
def get_qtype(qparam1, qparam2):
    try:
        bits_idx = STATS_BITS.index(qparam1 + qparam2)
    except ValueError:
        raise TuneError("bit width is not valid")
    return QType.Pow2(STATS_BITS[bits_idx], qparam2, True)
Exemplo n.º 28
0
def gen_act_infos(act_params, act_q):
    comment = ""
    if isinstance(act_params, ReluActivationParameters):
        compute_in_out_scale(act_q)
        actscale = act_q.cache['scale_mul_biases_q'].qbiases[0]
        actscalen = act_q.cache['scale_mul_biases_q'].qnorms[0]
        if act_params.upper_bound is None:  # or fnode is not None:
            if act_q.in_qs[0].zero_point == 0:
                contents = np.array([actscale, actscalen, 0, 0, 0],
                                    dtype=np.int8)
                if len(comment) == 0:
                    comment = "all 0"
            else:
                fac_1 = act_q.in_qs[0].zero_point
                contents = np.array([actscale, actscalen, fac_1, 0, 0],
                                    dtype=np.int8)
                comment += str.format(
                    "in: {:05f} out: {:05f} A0: {} B0: 0 C0: 0",
                    act_q.in_qs[0].scale[0], act_q.out_qs[0].scale[0],
                    fac_1[0])
        else:
            if act_q.in_qs[0].zero_point == 0:
                fac_1 = act_q.in_qs[0].quantize(act_params.upper_bound)
                contents = np.array([actscale, actscalen, fac_1, 0, 0],
                                    dtype=np.int8)
                comment += str.format(
                    "in: {:05f} out: {:05f} A0: {} B0: 0 C0: 0",
                    act_q.in_qs[0].scale[0], act_q.out_qs[0].scale[0],
                    fac_1[0])
            else:
                fac_1 = act_q.in_qs[0].zero_point
                fac_2 = act_q.in_qs[0].quantize(act_params.upper_bound)
                contents = np.array([actscale, actscalen, fac_1, fac_2, 0],
                                    dtype=np.int8)
                comment += str.format(
                    "in: {:05f} out: {:05f} A0: {} B0: {} C0: 0",
                    act_q.in_qs[0].scale[0], act_q.out_qs[0].scale[0],
                    fac_1[0], fac_2[0])
    elif isinstance(act_params, HSigmoidActivationParameters):
        # currently combines all scaling factors into one scale and shift
        assert act_q.in_qs[0].zero_point == 0 and act_q.out_qs[
            0].zero_point == 0, "asymmetric not supported"
        fac_1, upper_bound, _ = hsigmoid_mult_gen_factors(act_params, act_q)
        contents = np.array([
            act_q.cache['scale_mul_biases_q'].qbiases[0],
            act_q.cache['scale_mul_biases_q'].qnorms[0], upper_bound, fac_1, 1
        ],
                            dtype=np.int8)
        comment += str.format(
            "in: {:05f} out: {:05f} qbias: {} qnorm: {} A0: {} B0: {} C0: 1",
            act_q.in_qs[0].scale[0], act_q.out_qs[0].scale[0],
            act_q.cache['scale_mul_biases_q'].qbiases[0],
            act_q.cache['scale_mul_biases_q'].qnorms[0], upper_bound[0],
            fac_1[0])
    elif isinstance(act_params, HSwishActivationParameters):
        # currently combines all scaling factors into one scale and shift
        assert act_q.in_qs[0].zero_point == 0 and act_q.out_qs[
            0].zero_point == 0, "asymmetric not supported"
        fac_1, upper_bound, _ = hswish_mult_gen_factors(act_q)
        contents = np.array([
            act_q.cache['scale_mul_biases_q'].qbiases[0],
            act_q.cache['scale_mul_biases_q'].qnorms[0], upper_bound, fac_1, 1
        ],
                            dtype=np.int8)
        comment += str.format(
            "in: {:05f} out: {:05f} qbias: {} qnorm: {} A0: {} B0: {} C0: 1",
            act_q.in_qs[0].scale[0], act_q.out_qs[0].scale[0],
            act_q.cache['scale_mul_biases_q'].qbiases[0],
            act_q.cache['scale_mul_biases_q'].qnorms[0], upper_bound[0],
            fac_1[0])
    elif isinstance(act_params, SoftMaxParameters):
        assert act_q.in_qs[0].zero_point == 0 and act_q.out_qs[
            0].zero_point == 0, "asymmetric not supported"
        norm = 15 + np.ceil(np.log2(act_q.in_qs[0].scale))
        contents = np.array([norm, 0, 0, 0, 0], dtype=np.int8)
        comment += str.format("in: {:05f} out: {:05f} NORM: {}",
                              act_q.in_qs[0].scale[0],
                              act_q.out_qs[0].scale[0], int(norm[0]))
    elif isinstance(act_params, LeakyActivationParameters):
        assert act_q.in_qs[0].zero_point == 0 and act_q.out_qs[
            0].zero_point == 0, "asymmetric not supported"
        compute_in_out_scale(act_q)
        leak_factor_quant = leak_mult_gen_factor_q7(act_params)
        contents = np.array([
            act_q.cache['scale_mul_biases_q'].qbiases[0],
            act_q.cache['scale_mul_biases_q'].qnorms[0], leak_factor_quant, 0,
            0
        ],
                            dtype=np.int8)
        comment += str.format(
            "in: {:05f} out: {:05f} qbias: {} qnorm: {} A0: {} B0: x C0: x",
            act_q.in_qs[0].scale[0], act_q.out_qs[0].scale[0],
            act_q.cache['scale_mul_biases_q'].qbiases[0],
            act_q.cache['scale_mul_biases_q'].qnorms[0], leak_factor_quant)
    elif isinstance(act_params,
                    (SigmoidActivationParameters, TanHActivationParameters)):
        assert act_q.in_qs[0].zero_point == 0 and act_q.out_qs[
            0].zero_point == 0, "asymmetric not supported"
        compute_in_out_scale(
            act_q,
            extra_scale=QType.Pow2(bits=32, q=7, signed=True).scale /
            act_q.in_qs[0].scale)
        contents = np.array([
            act_q.cache['scale_mul_biases_q'].qbiases[0],
            act_q.cache['scale_mul_biases_q'].qnorms[0], 0, 0, 0
        ],
                            dtype=np.int8)
        comment += str.format(
            "in: {:05f} out: {:05f} qbias: {} qnorm: {} A0: x B0: x C0: x",
            act_q.in_qs[0].scale[0], act_q.out_qs[0].scale[0],
            act_q.cache['scale_mul_biases_q'].qbiases[0],
            act_q.cache['scale_mul_biases_q'].qnorms[0])
    else:
        raise NotImplementedError("activation tye not implemented")

    return contents, comment
Exemplo n.º 29
0
    def globals_generator(cls, gen, node, qrec, pnode, fnode) -> bool:
        if not cls.cache_values(node, qrec):
            return False
        in_q = qrec.in_qs[0]
        out_q = qrec.out_qs[0]
        comment = f'in q: {in_q} out_q: {out_q}'
        if qrec.cache['kernel_type'] == 'KOP_CONVERT_FP_FP_ZEROPOINT':
            bits = 8 if in_q.dtype in [np.int8, np.uint8] else 16
            if in_q.signed:
                offset = ((int(math.pow(2, bits)) + in_q.zero_point[0] -
                           out_q.zero_point[0]) %
                          int(math.pow(2, bits))).astype(out_q.dtype)
            else:
                offset = (int(math.pow(2, bits)) - in_q.zero_point[0] +
                          out_q.zero_point[0]).astype(out_q.dtype)
            contents = np.array(list(offset.tobytes()) + ([0] * 7),
                                dtype=np.uint8)
        elif qrec.cache['kernel_type'] == 'KOP_CONVERT_FP_FP':
            # no infos needed
            return True
        elif qrec.cache['kernel_type'] == 'KOP_CONVERT_FP_FP_SCALE':
            scale = in_q.scale / out_q.scale
            in_abs_zp = in_q.zero_point.astype(np.int32)
            out_abs_zp = out_q.zero_point.astype(np.int32)
            if out_q.bits > in_q.bits:
                zero_adjust = (np.round(-in_abs_zp * scale) +
                               out_abs_zp).astype(np.int32)
            else:
                zero_adjust = (-in_abs_zp +
                               np.round(out_abs_zp * 1 / scale)).astype(
                                   np.int32)

            zero_adjust = list(zero_adjust.tobytes())

            if len(scale) > 1:
                raise NotImplementedError(
                    'multiscale conversion not supported')
            scale = scale[0]
            if in_q.dtype_bits == 8 and out_q.dtype_bits == 16:
                # scale Q16 * Q8 OK
                scale_adjust = MultMulBiasScaleQType(scale=scale,
                                                     dtype=np.int16,
                                                     available_bits=16)
            else:
                scale_adjust = MultMulBiasScaleQType(scale=scale,
                                                     dtype=np.int8,
                                                     available_bits=8)
            qbias = list(scale_adjust.qbiases.tobytes())
            qbias = qbias + [0] * (2 - len(qbias))
            qnorm = list(scale_adjust.qnorms.tobytes())
            contents = np.array(zero_adjust + qbias + qnorm + [0],
                                dtype=np.int8)
        elif qrec.cache['kernel_type'] == 'KOP_CONVERT_FL_FP':
            qbias = list((1 / out_q.scale).astype(np.float32).tobytes())
            zero_adjust = list((out_q.zero_point.astype(np.int32) *
                                out_q.scale).astype(np.float32).tobytes())
            contents = np.array(zero_adjust + qbias, dtype=np.int8)
        elif qrec.cache['kernel_type'] == 'KOP_CONVERT_FP_FL':
            qbias = list((in_q.scale).astype(np.float32).tobytes())
            zero_adjust = list((-in_q.zero_point.astype(np.int32)).astype(
                np.float32).tobytes())
            contents = np.array(zero_adjust + qbias, dtype=np.int8)
        else:
            raise ValueError(f"strange dtype change in {pnode.name}")
        cname, file_name = gen_constant(gen, pnode, pnode, INFOS)
        const_info = ConstantInfo(file_name,
                                  QType.Pow2(bits=8, q=0, signed=True),
                                  contents=contents)

        gen.globals.append(
            GlobalArgInfo("int8",
                          cname,
                          gen.opts['default_global_home_location'],
                          gen.opts['default_global_exec_location'],
                          const_info=const_info,
                          comment=comment))
Exemplo n.º 30
0
def act_infos(gen,
              pnode,
              fnode,
              act_params,
              act_q,
              extra1=0,
              extra2=0,
              extra3=0,
              extra4=0,
              extra_name=''):
    if isinstance(pnode, FilterParameters):
        comment = str.format("BiasQ: {}", extra1)
    elif isinstance(pnode, MatrixAddParameters):
        comment = str.format(
            "In1Scale: {} In1ScaleN: {} OutScale: {} OutScaleN: {}", extra1,
            extra2, extra3, extra4)
    else:
        comment = ""

    if act_params is None:
        contents = np.array([0, 0, 0, 0, 0, extra1, extra2, extra3, extra4],
                            dtype=np.int8)
    elif isinstance(act_params, ReluActivationParameters):
        actscale = act_q.scale_mul_biases_q.qbiases[0]
        actscalen = act_q.scale_mul_biases_q.qnorms[0]
        if act_params.upper_bound is None:  # or fnode is not None:
            contents = np.array(
                [actscale, actscalen, 0, 0, 0, extra1, extra2, extra3, extra4],
                dtype=np.int8)
            if len(comment) == 0:
                comment = "all 0"
        else:
            fac_1 = act_q.in_qs[0].quantize(act_params.upper_bound)
            contents = np.array([
                actscale, actscalen, fac_1, 0, 0, extra1, extra2, extra3,
                extra4
            ],
                                dtype=np.int8)
            comment += str.format("in: {:05f} out: {:05f} A0: {} B0: 0 C0: 0",
                                  act_q.in_qs[0].scale[0],
                                  act_q.out_qs[0].scale[0], fac_1[0])
    elif isinstance(act_params, HSigmoidActivationParameters):
        # currently combines all scaling factors into one scale and shift
        fac_1, upper_bound, _ = hsigmoid_mult_gen_factors(act_params, act_q)
        contents = np.array([
            act_q.scale_mul_biases_q.qbiases[0],
            act_q.scale_mul_biases_q.qnorms[0], upper_bound, fac_1, 1, extra1,
            extra2, extra3, extra4
        ],
                            dtype=np.int8)
        comment += str.format(
            "in: {:05f} out: {:05f} qbias: {} qnorm: {} A0: {} B0: {} C0: 1",
            act_q.in_qs[0].scale[0], act_q.out_qs[0].scale[0],
            act_q.scale_mul_biases_q.qbiases[0],
            act_q.scale_mul_biases_q.qnorms[0], upper_bound[0], fac_1[0])
    elif isinstance(act_params, HSwishActivationParameters):
        # currently combines all scaling factors into one scale and shift
        fac_1, upper_bound, _ = hswish_mult_gen_factors(act_q)
        contents = np.array([
            act_q.scale_mul_biases_q.qbiases[0],
            act_q.scale_mul_biases_q.qnorms[0], upper_bound, fac_1, 1, extra1,
            extra2, extra3, extra4
        ],
                            dtype=np.int8)
        comment += str.format(
            "in: {:05f} out: {:05f} qbias: {} qnorm: {} A0: {} B0: {} C0: 1",
            act_q.in_qs[0].scale[0], act_q.out_qs[0].scale[0],
            act_q.scale_mul_biases_q.qbiases[0],
            act_q.scale_mul_biases_q.qnorms[0], upper_bound[0], fac_1[0])
    elif isinstance(act_params, SoftMaxParameters):
        norm = 15 + np.ceil(np.log2(act_q.in_qs[0].scale))
        contents = np.array([norm, 0, 0, 0, 0, extra1, extra2, extra3, extra4],
                            dtype=np.int8)
        comment += str.format("in: {:05f} out: {:05f} NORM: {}",
                              act_q.in_qs[0].scale[0],
                              act_q.out_qs[0].scale[0], int(norm[0]))
    elif isinstance(act_params, LeakyActivationParameters):
        act_q.set_scale()
        leak_factor_quant = leak_mult_gen_factor_q7(act_params)
        contents = np.array([
            act_q.scale_mul_biases_q.qbiases[0],
            act_q.scale_mul_biases_q.qnorms[0], leak_factor_quant, 0, 0,
            extra1, extra2, extra3, extra4
        ],
                            dtype=np.int8)
        comment += str.format(
            "in: {:05f} out: {:05f} qbias: {} qnorm: {} A0: {} B0: x C0: x",
            act_q.in_qs[0].scale[0], act_q.out_qs[0].scale[0],
            act_q.scale_mul_biases_q.qbiases[0],
            act_q.scale_mul_biases_q.qnorms[0], leak_factor_quant)
    else:
        raise NotImplementedError("activation tye not implemented")

    if isinstance(pnode, (GlobalPoolParameters, PoolingParameters)):
        contents = np.array([
            act_q.scale_mul_biases_q.qbiases[0],
            act_q.scale_mul_biases_q.qnorms[0], 0, 0, 0, extra1, extra2,
            extra3, extra4
        ],
                            dtype=np.int8)
        comment += str.format("in: {:05f} out: {:05f}",
                              act_q.in_qs[0].scale[0],
                              act_q.out_qs[0].scale[0])

    cname, file_name = gen_constant(gen, pnode, fnode, INFOS, extra_name)
    const_info = ConstantInfo(file_name,
                              QType.Pow2(bits=8, q=0, signed=True),
                              contents=contents)

    gen.globals.append(
        GlobalArgInfo("int8",
                      cname,
                      gen.opts['default_global_home_location'],
                      gen.opts['default_global_exec_location'],
                      const_info=const_info,
                      comment=comment))