Ejemplo n.º 1
0
def get_type(quantizer_config):
    width = quantizer_config['config']['bits']
    integer = quantizer_config['config'].get('integer', 0)
    if quantizer_config['class_name'] == 'quantized_po2':
        return ExponentPrecisionType(width=width, signed=True)
    if width == integer:
        if width == 1:
            return XnorPrecisionType()
        else:
            return IntegerPrecisionType(width=width, signed=True)
    else:
        return FixedPrecisionType(width=width,
                                  integer=integer + 1,
                                  signed=True)
Ejemplo n.º 2
0
 def __init__(self, config):
     self.bits = config['config']['bits']
     self.quantizer_fn = get_quantizer(config)
     self.hls_type = ExponentPrecisionType(width=self.bits, signed=True)
Ejemplo n.º 3
0
    def transform(self, model, node):
        # The quantizer has to be applied to set the scale attribute
        # This must be applied to the _unquantized_ weights to obtain the correct scale
        quantizer = node.weights['weight'].quantizer.quantizer_fn # get QKeras quantizer
        weights = node.weights['weight'].data_unquantized # get weights
        qweights = quantizer(tf.convert_to_tensor(weights))
        if isinstance(quantizer.scale, (int, float)):
            scale = np.ones(shape=node.get_output_variable().shape) * quantizer.scale
        else:
            scale = quantizer.scale.numpy()
        unscale = 1. / scale

        new_weights = unscale * qweights # use the quantized weights for safety


        qcfg = quantizer.get_config()
        alpha = qcfg['alpha']
        # Set the alpha to 1 to avoid hitting this pass again
        qcfg['alpha'] = 1
        node.weights['weight'].quantizer.quantizer_fn = quantizer.from_config(qcfg)

        # update the weights also applying the hls4ml quantizer
        # this is only needed for the binary layers which encode -1 as 0
        node.weights['weight'].data = node.weights['weight'].quantizer(new_weights.numpy())

        # Move the biases from the Dense layer to the ApplyAlpha layer
        bias = node.weights['bias'].data
        bias_quantizer = None
        if hasattr(node.weights['bias'], 'quantizer'):
            bias_quantizer = node.weights['bias'].quantizer
        node.weights['bias'].data = np.zeros(bias.shape)

        has_w_quant = node.get_attr('weight_quantizer') is not None 
        has_b_quant = node.get_attr('bias_quantizer') is not None
        if has_w_quant: 
            node.attributes['weight_quantizer'].alpha = 1
        if has_b_quant:
            node.attributes['bias_quantizer'].alpha = 1

        # insert a Batch Normalization layer to apply the alpha scale
        if alpha == 'auto_po2':
            scale_bits = np.abs(np.log2(scale)).max().astype('int') + 1
            scale_t = ExponentPrecisionType(width=scale_bits, signed=True)
            scale_q = QKerasPO2Quantizer({'class_name' : 'quantized_po2', 'config': {'bits': scale_bits}})
        else:
            scale_t = FixedPrecisionType() # TODO: automate this
            scale_q = None

        attrs = {
            'name' : node.get_attr('name') + '_alpha',
            'class_name' : 'Alpha',
            'inputs' : node.outputs,
            'n_in' : node.get_attr('n_out'),
            'n_filt' : node.get_attr('n_filt', -1),
            'reuse_factor' : node.get_attr('reuse_factor'),
            'bias_t' : node.weights['bias'].type, 
            'scale_t' : scale_t,
            'Trace' : node.get_attr('Trace', False) 
        }
        alpha_layer = model.make_node('ApplyAlpha', node.name + '_alpha', attrs, node.outputs)

        alpha_layer.add_weights(scale, quantizer=scale_q)
        alpha_layer.add_bias(bias, quantizer=bias_quantizer)
        model.insert_node(alpha_layer)
        return True