Beispiel #1
0
def get_type(quantizer_config):
    width = quantizer_config['config']['bits']
    integer = quantizer_config['config'].get('integer', 0)
    if width == integer:
        if width == 1:
            return IntegerPrecisionType(width=1, signed=False)
        else:
            return IntegerPrecisionType(width=width, signed=True)
    else:
        return FixedPrecisionType(width=width + 1,
                                  integer=integer + 1,
                                  signed=True)
Beispiel #2
0
 def transform(self, model, node):
     oldtype = node.get_output_variable().type.precision
     if isinstance(oldtype, IntegerPrecisionType):
         newtype = IntegerPrecisionType(oldtype.width, oldtype.signed)
     elif isinstance(oldtype, FixedPrecisionType):
         newtype = FixedPrecisionType(oldtype.width, oldtype.integer, oldtype.signed, self.rounding_mode, self.saturation_mode, self.saturation_bits)
     else: # in case the precision is a string
         newtype = self.precision_string_modify(oldtype)
     node.get_output_variable().type.precision = newtype
     if node.get_attr('accum_t') is not None:
         node.set_attr('accum_t', newtype)
     return False
Beispiel #3
0
def get_type(quantizer_config):
    width = quantizer_config['config']['bits']
    integer = quantizer_config['config'].get('integer', 0)
    if quantizer_config['class_name'] == 'quantized_po2':
        return ExponentPrecisionType(width=width, signed=True)
    if width == integer:
        if width == 1:
            return XnorPrecisionType()
        else:
            return IntegerPrecisionType(width=width, signed=True)
    else:
        return FixedPrecisionType(width=width,
                                  integer=integer + 1,
                                  signed=True)
Beispiel #4
0
 def __init__(self, config):
     self.quantizer_fn = get_quantizer(config)
     self.alpha = config['config'].get('alpha', None)
     if config['class_name'] == 'quantized_bits':
         self.bits = config['config']['bits']
         self.hls_type = get_type(config)
     # ! includes stochastic_ternary
     elif 'ternary' in config['class_name']:
         self.bits = 2
         self.hls_type = IntegerPrecisionType(width=2, signed=True)
     # ! includes stochastic_binary
     elif 'binary' in config['class_name']:
         self.bits = 1
         self.hls_type = XnorPrecisionType()
     else:
         print("Unsupported quantizer: " + config['class_name'])
         self.bits = 16
         self.hls_type = FixedPrecisionType(width=16,
                                            integer=6,
                                            signed=True)
Beispiel #5
0
    def transform(self, model, node):
        shape = node.get_input_variable().shape
        scale = np.full(shape, 0.5 / node.get_attr('threshold', 0.5))
        bias = np.zeros_like(scale)
        node.set_attr('threshold', 0.5)

        attrs = {
            'name' : node.get_attr('name') + '_scale',
            'class_name' : 'Alpha',
            'inputs' : node.get_input_node().outputs,
            'outputs' : node.inputs,
            'n_filt' : node.get_attr('n_filt', -1),
            'reuse_factor' : node.get_attr('reuse_factor'),
            # These should just be placeholders
            'bias_t' : IntegerPrecisionType(1),
            'scale_t' : FixedPrecisionType(16,6),
            'Trace' : node.get_attr('Trace', False)
        }

        layer = model.make_node('ApplyAlpha', node.name + '_scale', attrs, node.inputs.copy())
        layer.add_weights(scale)
        layer.add_bias(bias)
        model.insert_node(layer, before=node)
        return True
Beispiel #6
0
    def transform(self, model, node):
        # The quantizer has to be applied to set the scale attribute
        # This must be applied to the _unquantized_ weights to obtain the correct scale
        quantizer = node.weights['weight'].quantizer.quantizer_fn # get QKeras quantizer
        weights = node.weights['weight'].data_unquantized # get weights
        qweights = quantizer(tf.convert_to_tensor(weights))
        if isinstance(quantizer.scale, (int, float)):
            scale = np.ones(shape=node.get_output_variable().shape) * quantizer.scale
        else:
            scale = quantizer.scale.numpy()
        unscale = 1. / scale

        new_weights = unscale * qweights # use the quantized weights for safety


        qcfg = quantizer.get_config()
        alpha = qcfg['alpha']
        # Set the alpha to 1 to avoid hitting this pass again
        qcfg['alpha'] = 1
        node.weights['weight'].quantizer.quantizer_fn = quantizer.from_config(qcfg)

        # update the weights also applying the hls4ml quantizer
        # this is only needed for the binary layers which encode -1 as 0
        node.weights['weight'].data = node.weights['weight'].quantizer(new_weights.numpy())

        # Move the biases from the Dense layer to the ApplyAlpha layer
        bias = node.weights['bias'].data
        bias_quantizer = None
        if hasattr(node.weights['bias'], 'quantizer'):
            bias_quantizer = node.weights['bias'].quantizer
        node.weights['bias'].data = np.zeros(bias.shape)

        has_w_quant = node.get_attr('weight_quantizer') is not None 
        has_b_quant = node.get_attr('bias_quantizer') is not None
        if has_w_quant: 
            node.attributes['weight_quantizer'].alpha = 1
        if has_b_quant:
            node.attributes['bias_quantizer'].alpha = 1

        # insert a Batch Normalization layer to apply the alpha scale
        if alpha == 'auto_po2':
            scale_bits = np.abs(np.log2(scale)).max().astype('int') + 1
            scale_t = ExponentPrecisionType(width=scale_bits, signed=True)
            scale_q = QKerasPO2Quantizer({'class_name' : 'quantized_po2', 'config': {'bits': scale_bits}})
        else:
            scale_t = FixedPrecisionType() # TODO: automate this
            scale_q = None

        attrs = {
            'name' : node.get_attr('name') + '_alpha',
            'class_name' : 'Alpha',
            'inputs' : node.outputs,
            'n_in' : node.get_attr('n_out'),
            'n_filt' : node.get_attr('n_filt', -1),
            'reuse_factor' : node.get_attr('reuse_factor'),
            'bias_t' : node.weights['bias'].type, 
            'scale_t' : scale_t,
            'Trace' : node.get_attr('Trace', False) 
        }
        alpha_layer = model.make_node('ApplyAlpha', node.name + '_alpha', attrs, node.outputs)

        alpha_layer.add_weights(scale, quantizer=scale_q)
        alpha_layer.add_bias(bias, quantizer=bias_quantizer)
        model.insert_node(alpha_layer)
        return True
Beispiel #7
0
    def transform(self, model, node):
        # The quantizer has to be applied to set the scale attribute
        # This must be applied to the _unquantized_ weights to obtain the correct scale
        quantizer = node.weights[
            'weight'].quantizer.quantizer_fn  # get QKeras quantizer
        weights = node.weights['weight'].data_unquantized  # get weights
        qweights = quantizer(tf.convert_to_tensor(weights))
        scale = quantizer.scale.numpy()
        unscale = 1. / scale

        new_weights = unscale * qweights  # use the quantized weights for safety

        # Set the alpha to 1 to avoid hitting this pass again
        qcfg = quantizer.get_config()
        qcfg['alpha'] = 1
        node.weights['weight'].quantizer.quantizer_fn = quantizer.from_config(
            qcfg)

        # update the weights also applying the hls4ml quantizer
        # this is only needed for the binary layers which encode -1 as 0
        node.weights['weight'].data = node.weights['weight'].quantizer(
            new_weights.numpy())

        # Move the biases from the Dense layer to the ApplyAlpha layer
        bias = node.weights['bias'].data
        bias_quantizer = None
        if hasattr(node.weights['bias'], 'quantizer'):
            bias_quantizer = node.weights['bias'].quantizer
        node.weights['bias'].data = np.zeros(bias.shape)

        has_w_quant = node.get_attr('weight_quantizer') is not None
        has_b_quant = node.get_attr('bias_quantizer') is not None
        if has_w_quant:
            node.attributes['weight_quantizer'].alpha = 1
        if has_b_quant:
            node.attributes['bias_quantizer'].alpha = 1

        # insert a Batch Normalization layer to apply the alpha scale
        attrs = {
            'name':
            node.get_attr('name') + '_alpha',
            'class_name':
            'Alpha',
            'inputs':
            node.outputs,
            'n_in':
            node.get_attr('n_out'),
            'n_filt':
            node.get_attr('n_filt')
            if node.get_attr('n_filt') is not None else -1,
            'reuse_factor':
            node.get_attr('reuse_factor'),
            'bias_t':
            node.weights['bias'].type,
            'scale_t':
            FixedPrecisionType()  # TODO automate this
        }
        alpha_layer = model.make_node('ApplyAlpha', node.name + '_alpha',
                                      attrs, node.outputs)
        alpha_layer.add_weights(scale, quantizer=None)
        alpha_layer.add_bias(bias, quantizer=bias_quantizer)
        model.insert_node(alpha_layer)
        return True