示例#1
0
 def __init__(self, bits=2):
     if bits == 1:
         hls_type = IntegerPrecisionType(width=1, signed=False)
     elif bits == 2:
         hls_type = IntegerPrecisionType(width=2)
     else:
         raise Exception('BinaryQuantizer suppots 1 or 2 bits, but called with bits={}'.format(bits))
     super(BinaryQuantizer, self).__init__(bits, hls_type)
示例#2
0
文件: qkeras.py 项目: zzulb/hls4ml
 def __init__(self, config, xnor=False):
     self.bits = 1 if xnor else 2
     self.hls_type = IntegerPrecisionType(
         width=1, signed=False) if xnor else IntegerPrecisionType(
             width=2, signed=True)
     self.alpha = config['config']['alpha']
     # Use the QKeras quantizer to handle any stochastic / alpha stuff
     self.quantizer_fn = get_quantizer(config)
     # Then we use our BinaryQuantizer to convert to '0,1' format
     self.binary_quantizer = BinaryQuantizer(
         1) if xnor else BinaryQuantizer(2)
示例#3
0
    def transform(self, model, node):
        # Compute the required precision and update the variables
        # Number of bits for output is log2 of number of input nodes
        # Since this is the number of uint<1>'s which are summed
        nbits = int(np.ceil(np.log2(node.attributes['n_in'])) + 2)
        out_type = IntegerPrecisionType(width=nbits)
        node.set_attr('accum_t', out_type)
        out_var = node.get_output_variable()
        out_var.type.precision = out_type

        quantized_data = None
        quantized_precision = None
        quantizer = node.get_attr('weight_quantizer')
        if quantizer.__class__.__name__ == 'BinaryQuantizer':
            quantized_precision = XnorPrecisionType()
        elif quantizer.__class__.__name__ == 'TernaryQuantizer':
            quantized_precision = IntegerPrecisionType(width=2)
        else:
            print('WARNING: Unknown quantizer - {}. Bailing out'.format(
                quantizer.__class__.__name__))
            return False
        quantizer.bits = quantized_precision.width
        quantizer.hls_type = quantized_precision
        quantized_data = quantizer(node.weights['weight'].data)

        weights = node.weights['weight']
        weights.data = quantized_data
        weights.type.name = 'weight{index}_t'.format(index=node.index)
        weights.update_precision(quantized_precision)

        bias = node.weights['bias']
        bias.data = np.zeros(shape=(node.get_attr('n_out')))
        bias.type.name = 'bias{index}_t'.format(index=node.index)
        bias.nzeros = 0
        bias.update_precision(quantized_precision)

        # If followed by the BatchNormalizationBinaryTanh, update its input
        # Also requantise the weights
        bd_out_nodes = node.get_output_nodes()
        for out_node in bd_out_nodes:
            if out_node.__class__.__name__ == 'BatchNormalizationQuantizedTanh':
                var_names = []
                if quantizer.__class__.__name__ == 'BinaryQuantizer':
                    var_names.append('threshold')
                elif quantizer.__class__.__name__ == 'TernaryQuantizer':
                    var_names.append('threshold_hi')
                    var_names.append('threshold_lo')
                for var_name in var_names:
                    threshold_var = out_node.weights[var_name]
                    threshold_var.update_precision(out_type)
                    threshold_var.data = np.floor(threshold_var.data)

        return False
示例#4
0
 def initialize(self):
     inp = self.get_input_variable()
     shape = inp.shape
     dims = inp.dim_names
     precision_bits = re.search('.+<(.+?)>',
                                inp.type.precision).group(1).split(',')
     if 'int' in str(inp.type.precision):
         W = int(precision_bits[0])
         I = W
         F = 0
     elif 'fixed' in str(inp.type.precision):
         W = int(precision_bits[0])
         I = int(precision_bits[1])
         F = W - I
     original_name = self.attributes.get('original_name')
     variance = self.model.get_weights_data(original_name,
                                            'moving_variance')
     mean = self.model.get_weights_data(original_name, 'moving_mean')
     gamma = self.model.get_weights_data(original_name, 'gamma')
     beta = self.model.get_weights_data(original_name, 'beta')
     epsilon = self.attributes.get('epsilon')
     threshold = mean - beta * np.sqrt(variance + epsilon) / gamma
     if self.get_attr('quantize') == 2:
         self.add_output_variable(shape,
                                  dims,
                                  precision=IntegerPrecisionType(
                                      width=1, signed=False))
         threshold = np.floor(threshold * 2**F) / 2**F
         self.add_weights_variable(name='threshold',
                                   var_name='t{index}',
                                   data=threshold,
                                   type_name='threshold{index}_t',
                                   precision=inp.type.precision)
     elif self.get_attr('quantize') == 3:
         self.add_output_variable(shape,
                                  dims,
                                  precision=IntegerPrecisionType(width=2))
         threshold_hi = 0.5 / (gamma /
                               np.sqrt(variance + epsilon)) + threshold
         threshold_lo = -0.5 / (gamma /
                                np.sqrt(variance + epsilon)) + threshold
         threshold_hi = np.floor(threshold_hi * 2**F) / 2**F
         threshold_lo = np.floor(threshold_lo * 2**F) / 2**F
         self.add_weights_variable(name='threshold_hi',
                                   var_name='th{index}',
                                   data=threshold_hi,
                                   type_name='threshold_hi_{index}_t',
                                   precision=inp.type.precision)
         self.add_weights_variable(name='threshold_lo',
                                   var_name='tl{index}',
                                   data=threshold_lo,
                                   type_name='threshold_lo_{index}_t',
                                   precision=inp.type.precision)
示例#5
0
文件: qkeras.py 项目: zzulb/hls4ml
def get_type(quantizer_config):
    width = quantizer_config['config']['bits']
    integer = quantizer_config['config'].get('integer', 0)
    if width == integer:
        if width == 1:
            return IntegerPrecisionType(width=1, signed=False)
        else:
            return IntegerPrecisionType(width=width, signed=True)
    else:
        return FixedPrecisionType(width=width + 1,
                                  integer=integer + 1,
                                  signed=True)
示例#6
0
 def initialize(self):
     inp = self.get_input_variable()
     shape = inp.shape
     dims = inp.dim_names
     precision = self.model.config.backend.convert_precision_string(
         inp.type.precision)
     W, I, F = precision.width, precision.integer, precision.fractional
     original_name = self.attributes.get('original_name')
     variance = self.model.get_weights_data(original_name,
                                            'moving_variance')
     mean = self.model.get_weights_data(original_name, 'moving_mean')
     gamma = self.model.get_weights_data(original_name, 'gamma')
     beta = self.model.get_weights_data(original_name, 'beta')
     mean_quantizer = self.get_attr('mean_quantizer')
     variance_quantizer = self.get_attr('variance_quantizer')
     gamma_quantizer = self.get_attr('gamma_quantizer')
     beta_quantizer = self.get_attr('beta_quantizer')
     mean = mean_quantizer(mean) if mean_quantizer is not None else mean
     variance = variance_quantizer(
         variance) if variance_quantizer is not None else variance
     gamma = gamma_quantizer(
         gamma) if gamma_quantizer is not None else gamma
     beta = beta_quantizer(beta) if beta_quantizer is not None else beta
     epsilon = self.attributes.get('epsilon')
     threshold = mean - beta * np.sqrt(variance + epsilon) / gamma
     if self.get_attr('quantize') == 2:
         self.add_output_variable(shape,
                                  dims,
                                  precision=XnorPrecisionType())
         threshold = np.floor(threshold * 2**F) / 2**F
         self.add_weights_variable(name='threshold',
                                   var_name='t{index}',
                                   data=threshold,
                                   type_name='threshold{index}_t',
                                   precision=inp.type.precision)
     elif self.get_attr('quantize') == 3:
         self.add_output_variable(shape,
                                  dims,
                                  precision=IntegerPrecisionType(width=2))
         threshold_hi = 0.5 / (gamma /
                               np.sqrt(variance + epsilon)) + threshold
         threshold_lo = -0.5 / (gamma /
                                np.sqrt(variance + epsilon)) + threshold
         threshold_hi = np.floor(threshold_hi * 2**F) / 2**F
         threshold_lo = np.floor(threshold_lo * 2**F) / 2**F
         self.add_weights_variable(name='threshold_hi',
                                   var_name='th{index}',
                                   data=threshold_hi,
                                   type_name='threshold_hi_{index}_t',
                                   precision=inp.type.precision)
         self.add_weights_variable(name='threshold_lo',
                                   var_name='tl{index}',
                                   data=threshold_lo,
                                   type_name='threshold_lo_{index}_t',
                                   precision=inp.type.precision)
示例#7
0
 def transform(self, model, node):
     oldtype = node.get_output_variable().type.precision
     if isinstance(oldtype, IntegerPrecisionType):
         newtype = IntegerPrecisionType(oldtype.width, oldtype.signed)
     elif isinstance(oldtype, FixedPrecisionType):
         newtype = FixedPrecisionType(oldtype.width, oldtype.integer, oldtype.signed, self.rounding_mode, self.saturation_mode, self.saturation_bits)
     else: # in case the precision is a string
         newtype = self.precision_string_modify(oldtype)
     node.get_output_variable().type.precision = newtype
     if node.get_attr('accum_t') is not None:
         node.set_attr('accum_t', newtype)
     return False
示例#8
0
def parse_input_layer(keras_layer, input_names, input_shapes, data_reader, config):
    assert(keras_layer['class_name'] == 'InputLayer')

    layer = parse_default_keras_layer(keras_layer, input_names)

    layer['input_shape'] = keras_layer['config']['batch_input_shape'][1:]
    if keras_layer['config']['dtype'] == 'int32':
        layer['type_name'] = 'integer_input_t'
        layer['precision'] = IntegerPrecisionType(width=32)
    output_shape = keras_layer['config']['batch_input_shape']
    
    return layer, output_shape
示例#9
0
def get_type(quantizer_config):
    width = quantizer_config['config']['bits']
    integer = quantizer_config['config'].get('integer', 0)
    if quantizer_config['class_name'] == 'quantized_po2':
        return ExponentPrecisionType(width=width, signed=True)
    if width == integer:
        if width == 1:
            return XnorPrecisionType()
        else:
            return IntegerPrecisionType(width=width, signed=True)
    else:
        return FixedPrecisionType(width=width,
                                  integer=integer + 1,
                                  signed=True)
示例#10
0
文件: qkeras.py 项目: zzulb/hls4ml
 def __init__(self, config):
     self.quantizer_fn = get_quantizer(config)
     self.alpha = config['config']['alpha']
     if config['class_name'] == 'quantized_bits':
         self.bits = config['config']['bits']
         self.hls_type = get_type(config)
     # ! includes stochastic_ternary
     elif 'ternary' in config['class_name']:
         self.bits = 2
         self.hls_type = IntegerPrecisionType(width=2, signed=True)
     # ! includes stochastic_binary
     elif 'binary' in config['class_name']:
         self.bits = 1
         self.hls_type = IntegerPrecisionType(width=1, signed=False)
     #elif config['class_name'] == 'quantized_po2':
     #    self.bits = config['config']['bits']
     #    self.hls_type = Po2Type(width=self.bits, signed=True)
     else:
         print("Unsupported quantizer: " + config['class_name'])
         self.bits = 16
         self.hls_type = FixedPrecisionType(width=16,
                                            integer=6,
                                            signed=True)
示例#11
0
 def initialize(self):
     inp = self.get_input_variable()
     shape = inp.shape
     dims = inp.dim_names
     if self.get_attr('quantize') == 2:
         self.add_output_variable(shape,
                                  dims,
                                  precision=XnorPrecisionType())
     elif self.get_attr('quantize') == 3:
         self.add_output_variable(shape,
                                  dims,
                                  precision=IntegerPrecisionType(width=2))
     else:
         raise Exception(
             'Unsupported quantize attribute for BatchNormalizationQuantizedTanh: {}'
             .format(self.get_attr('quantize')))
示例#12
0
def parse_input_layer(keras_layer, input_names, input_shapes, data_reader, config):
    assert(keras_layer['class_name'] == 'InputLayer')

    layer = parse_default_keras_layer(keras_layer, input_names)

    layer['input_shape'] = keras_layer['config']['batch_input_shape'][1:]

    dtype = keras_layer['config']['dtype']
    if dtype.startswith('int') or dtype.startswith('uint'):
        layer['type_name'] = 'integer_input_t'
        width = int(dtype[dtype.index('int') + 3:])
        signed = (not dtype.startswith('u'))
        layer['precision'] = IntegerPrecisionType(width=width, signed=signed)
    # elif bool, q[u]int, ...

    output_shape = keras_layer['config']['batch_input_shape']
    
    return layer, output_shape
示例#13
0
    def transform(self, model, node):
        shape = node.get_input_variable().shape
        scale = np.full(shape, 0.5 / node.get_attr('threshold', 0.5))
        bias = np.zeros_like(scale)
        node.set_attr('threshold', 0.5)

        attrs = {
            'name' : node.get_attr('name') + '_scale',
            'class_name' : 'Alpha',
            'inputs' : node.get_input_node().outputs,
            'outputs' : node.inputs,
            'n_filt' : node.get_attr('n_filt', -1),
            'reuse_factor' : node.get_attr('reuse_factor'),
            # These should just be placeholders
            'bias_t' : IntegerPrecisionType(1),
            'scale_t' : FixedPrecisionType(16,6),
            'Trace' : node.get_attr('Trace', False)
        }

        layer = model.make_node('ApplyAlpha', node.name + '_scale', attrs, node.inputs.copy())
        layer.add_weights(scale)
        layer.add_bias(bias)
        model.insert_node(layer, before=node)
        return True
示例#14
0
 def set_thresholds(self, scale, bias, ternary_threshold=0.5):
     inp = self.get_input_variable()
     shape = inp.shape
     dims = inp.dim_names
     precision = self.model.config.backend.convert_precision_string(
         inp.type.precision)
     W, I, F = precision.width, precision.integer, precision.fractional
     threshold = -bias / scale
     if self.get_attr('quantize') == 2:
         self.add_output_variable(shape,
                                  dims,
                                  precision=XnorPrecisionType())
         threshold = np.floor(threshold * 2**F) / 2**F
         self.add_weights_variable(name='threshold',
                                   var_name='t{index}',
                                   data=threshold,
                                   type_name='threshold{index}_t',
                                   precision=inp.type.precision)
     elif self.get_attr('quantize') == 3:
         self.add_output_variable(shape,
                                  dims,
                                  precision=IntegerPrecisionType(width=2))
         threshold_hi = ternary_threshold / scale + threshold
         threshold_lo = -ternary_threshold / scale + threshold
         threshold_hi = np.floor(threshold_hi * 2**F) / 2**F
         threshold_lo = np.floor(threshold_lo * 2**F) / 2**F
         self.add_weights_variable(name='threshold_hi',
                                   var_name='th{index}',
                                   data=threshold_hi,
                                   type_name='threshold_hi_{index}_t',
                                   precision=inp.type.precision)
         self.add_weights_variable(name='threshold_lo',
                                   var_name='tl{index}',
                                   data=threshold_lo,
                                   type_name='threshold_lo_{index}_t',
                                   precision=inp.type.precision)
示例#15
0
文件: core.py 项目: drankincms/hls4ml
 def __init__(self):
     super(TernaryQuantizer, self).__init__(2,
                                            IntegerPrecisionType(width=2))