Пример #1
0
 def __init__(self, bits=2):
     if bits == 1:
         hls_type = IntegerPrecisionType(width=1, signed=False)
     elif bits == 2:
         hls_type = IntegerPrecisionType(width=2)
     else:
         raise Exception('BinaryQuantizer suppots 1 or 2 bits, but called with bits={}'.format(bits))
     super(BinaryQuantizer, self).__init__(bits, hls_type)
Пример #2
0
    def transform(self, model, node):
        # Compute the required precision and update the variables
        # Number of bits for output is log2 of number of input nodes
        # Since this is the number of uint<1>'s which are summed
        nbits = int(np.ceil(np.log2(node.attributes['n_in'])) + 2)
        out_type = IntegerPrecisionType(width=nbits)
        accum_t = NamedType('layer{}_accum_t'.format(node.index), out_type)
        node.set_attr('accum_t', accum_t)
        out_var = node.get_output_variable()
        out_var.type.precision = out_type

        quantized_data = None
        quantized_precision = None
        quantizer = node.get_attr('weight_quantizer')
        if quantizer.__class__.__name__ == 'BinaryQuantizer':
            quantized_precision = XnorPrecisionType()
        elif quantizer.__class__.__name__ == 'TernaryQuantizer':
            quantized_precision = IntegerPrecisionType(width=2)
        else:
            print('WARNING: Unknown quantizer - {}. Bailing out'.format(
                quantizer.__class__.__name__))
            return False
        quantizer.bits = quantized_precision.width
        quantizer.hls_type = quantized_precision
        quantized_data = quantizer(node.weights['weight'].data)

        weights = node.weights['weight']
        weights.data = quantized_data
        weights.type.name = 'weight{index}_t'.format(index=node.index)
        weights.update_precision(quantized_precision)

        bias = node.weights['bias']
        bias.data = np.zeros(shape=(node.get_attr('n_out')))
        bias.type.name = 'bias{index}_t'.format(index=node.index)
        bias.nzeros = 0
        bias.update_precision(quantized_precision)

        # If followed by the BatchNormalizationBinaryTanh, update its input
        # Also requantise the weights
        bd_out_nodes = node.get_output_nodes()
        for out_node in bd_out_nodes:
            if isinstance(out_node, BatchNormalizationQuantizedTanh):
                var_names = []
                if quantizer.__class__.__name__ == 'BinaryQuantizer':
                    var_names.append('threshold')
                elif quantizer.__class__.__name__ == 'TernaryQuantizer':
                    var_names.append('threshold_hi')
                    var_names.append('threshold_lo')
                for var_name in var_names:
                    threshold_var = out_node.weights[var_name]
                    threshold_var.update_precision(out_type)
                    threshold_var.data = np.floor(threshold_var.data)

        return False
Пример #3
0
    def transform(self, model, node):
        old_precision = node.get_output_variable().type.precision
        if isinstance(old_precision, IntegerPrecisionType):
            new_precision = IntegerPrecisionType(old_precision.width,
                                                 old_precision.signed)
        elif isinstance(old_precision, FixedPrecisionType):
            new_precision = FixedPrecisionType(old_precision.width,
                                               old_precision.integer,
                                               old_precision.signed,
                                               self.rounding_mode,
                                               self.saturation_mode,
                                               self.saturation_bits)
        else:  # in case the precision is a string
            new_precision = self.precision_string_modify(old_precision)

        out_var = node.get_output_variable()
        out_t = NamedType(out_var.type.name, new_precision)
        out_var.type = out_t
        node.attributes['result_t'] = out_t

        if node.get_attr('accum_t') is not None:
            accum_t = NamedType('layer{}_accum_t'.format(node.index),
                                new_precision)
            node.set_attr('accum_t', new_precision)
        return False
Пример #4
0
    def init_gru(self, layer):
        reuse_factor = layer.model.config.get_reuse_factor(layer)
        layer.set_attr('recurrent_reuse_factor', reuse_factor)

        recurrent_bias = np.zeros(layer.weights['recurrent_weight'].shape[1])
        layer.add_weights_variable(name='recurrent_bias',
                                   var_name='br{index}',
                                   data=recurrent_bias)

        index_t = IntegerPrecisionType(width=1, signed=False)

        if 'table_t' not in layer.attributes:
            layer.set_attr('table_t', FixedPrecisionType(width=18, integer=8))
        if 'table_size' not in layer.attributes:
            layer.set_attr('table_size', 1024)
        if layer.model.config.is_resource_strategy(layer):
            n_in, n_out, n_in_recr, n_out_recr = self.get_layer_mult_size(
                layer)
            self.set_closest_reuse_factor(layer, n_in, n_out)
            self.set_closest_reuse_factor(layer,
                                          n_in_recr,
                                          n_out_recr,
                                          attribute='recurrent_reuse_factor')
            layer.weights['weight'].data = np.transpose(
                layer.weights['weight'].data)
            layer.weights['recurrent_weight'].data = np.transpose(
                layer.weights['recurrent_weight'].data)
            layer.set_attr('strategy', 'resource')
        else:
            layer.set_attr('strategy', 'latency')

        layer.set_attr('index_t', index_t)
Пример #5
0
 def _convert_ac_type(cls, precision):
     '''
     Convert a precision string (e.g. "ac_fixed<16,6>" to the internal FixedPrecisionTypes etc)
     '''
     bits = re.search('.+<(.+?)>', precision).group(1).split(',')
     signed = True  # default is signed
     sat_mode = None
     round_mode = None
     if 'fixed' in precision:
         W = int(bits[0])
         I = int(bits[1])
         fields = 2
         if len(bits) > 2:
             signed = bool(bits[2])
             fields = 3
     elif 'int' in precision:
         W = int(bits[0])
         I = W
         fields = 1
         if len(bits) > 1:
             signed = bool(bits[1])
             fields = 2
     if len(bits) > fields:
         round_mode = bits[fields]
     if len(bits) > fields + 1:
         sat_mode = bits[fields + 1]
     if 'fixed' in precision:
         return FixedPrecisionType(W, I, signed, round_mode, sat_mode)
     elif 'int' in precision:
         return IntegerPrecisionType(W, signed)
Пример #6
0
 def _convert_ap_type(cls, precision):
     '''
     Convert a precision string (e.g. "ap_fixed<16,6>" to the internal FixedPrecisionTypes etc)
     '''
     bits = re.search('.+<(.+?)>', precision).group(1).split(',')
     sat_mode = None
     round_mode = None
     sat_bits = None
     if 'fixed' in precision:
         W = int(bits[0])
         I = int(bits[1])
         fields = 2
         signed = not ('u' in precision)
     elif 'int' in precision:
         W = int(bits[0])
         I = W
         fields = 1
         signed = not ('u' in precision)
     if len(bits) > fields:
         round_mode = bits[fields]
     if len(bits) > fields + 1:
         sat_mode = bits[fields + 1]
     if len(bits) > fields + 2:
         sat_bits = int(bits[fields + 2])
     if 'fixed' in precision:
         return FixedPrecisionType(W, I, signed, round_mode, sat_mode,
                                   sat_bits)
     elif 'int' in precision:
         return IntegerPrecisionType(W, signed)
Пример #7
0
 def __init__(self, config, xnor=False):
     self.bits = 1 if xnor else 2
     self.hls_type = XnorPrecisionType() if xnor else IntegerPrecisionType(width=2, signed=True)
     self.alpha = config['config']['alpha']
     # Use the QKeras quantizer to handle any stochastic / alpha stuff
     self.quantizer_fn = get_quantizer(config)
     # Then we use our BinaryQuantizer to convert to '0,1' format
     self.binary_quantizer = BinaryQuantizer(1) if xnor else BinaryQuantizer(2)
Пример #8
0
 def initialize(self):
     inp = self.get_input_variable()
     shape = inp.shape
     dims = inp.dim_names
     if self.get_attr('quantize') == 2:
         self.add_output_variable(shape, dims, precision=XnorPrecisionType())
     elif self.get_attr('quantize') == 3:
         self.add_output_variable(shape, dims, precision=IntegerPrecisionType(width=2))
     else:
         raise Exception('Unsupported quantize attribute for BatchNormalizationQuantizedTanh: {}'.format(self.get_attr('quantize')))
Пример #9
0
    def add_bias(self, quantizer=None):
        data = self.model.get_weights_data(self.name, 'bias')
        precision = None
        type_name = None
        if data is None:
            data = np.zeros(self.get_output_variable().shape[-1])
            precision = IntegerPrecisionType(width=1, signed=False)
            type_name = 'bias{index}_t'
            quantizer = None # Don't quantize non-existant bias

        self.add_weights_variable(name='bias', var_name='b{index}', type_name=type_name, precision=precision, data=data, quantizer=quantizer)
Пример #10
0
def get_type(quantizer_config):
    width = quantizer_config['config']['bits']
    integer = quantizer_config['config'].get('integer', 0)
    if quantizer_config['class_name'] == 'quantized_po2':
        return ExponentPrecisionType(width=width, signed=True)
    if width == integer:
        if width == 1:
            return XnorPrecisionType()
        else:
            return IntegerPrecisionType(width=width, signed=True)
    else:
        return FixedPrecisionType(width=width, integer=integer+1, signed=True)
Пример #11
0
 def init_dense(self, layer):
     index_t = IntegerPrecisionType(width=1, signed=False)
     compression = layer.model.config.get_compression(layer)
     if layer.model.config.is_resource_strategy(layer):
         self.set_target_reuse_factor(layer)
         self.set_closest_reuse_factor(layer)
         if compression:
             layer.set_attr('strategy', 'compressed')
             index_t = layer.get_weights('weight').type.index_precision
         else:
             layer.set_attr('strategy', 'resource')
     else:
         layer.set_attr('strategy', 'latency')
     layer.set_attr('index_t',
                    NamedType('layer{}_index'.format(layer.index), index_t))
Пример #12
0
    def _add_variable(self, name, var_name, data, frac_width=10, quantize=False):
        # Wrapper for add_weights_variable with precision determination from data

        # automatically make the variable unsigned if data are all positive
        signed = (np.amin(data) < 0.)
        
        int_width = find_minimum_width(data, signed=signed)

        if quantize:
            precision = IntegerPrecisionType(width=int_width, signed=signed)
        else:
            width = int_width + frac_width
            precision = FixedPrecisionType(width=width, integer=int_width, signed=signed, rounding_mode='AP_RND', saturation_mode='AP_SAT')
            
        self.add_weights_variable(name=name, var_name=var_name, data=data, precision=precision)
Пример #13
0
 def __init__(self, config):
     self.quantizer_fn = get_quantizer(config)
     self.alpha = config['config'].get('alpha', None)
     if config['class_name'] == 'quantized_bits':
         self.bits = config['config']['bits']
         self.hls_type = get_type(config)
     # ! includes stochastic_ternary
     elif 'ternary' in config['class_name']:
         self.bits = 2
         self.hls_type = IntegerPrecisionType(width=2, signed=True)
     # ! includes stochastic_binary
     elif 'binary' in config['class_name']:
         self.bits = 1
         self.hls_type = XnorPrecisionType()
     else:
         print("Unsupported quantizer: " + config['class_name'])
         self.bits = 16
         self.hls_type = FixedPrecisionType(width=16, integer=6, signed=True)
Пример #14
0
def parse_input_layer(keras_layer, input_names, input_shapes, data_reader, config):
    assert(keras_layer['class_name'] == 'InputLayer')

    layer = parse_default_keras_layer(keras_layer, input_names)

    layer['input_shape'] = keras_layer['config']['batch_input_shape'][1:]

    dtype = keras_layer['config']['dtype']
    if dtype.startswith('int') or dtype.startswith('uint'):
        layer['type_name'] = 'integer_input_t'
        width = int(dtype[dtype.index('int') + 3:])
        signed = (not dtype.startswith('u'))
        layer['precision'] = IntegerPrecisionType(width=width, signed=signed)
    # elif bool, q[u]int, ...

    output_shape = keras_layer['config']['batch_input_shape']
    
    return layer, output_shape
Пример #15
0
 def set_thresholds(self, scale, bias, ternary_threshold=0.5):
     inp = self.get_input_variable()
     shape = inp.shape
     dims = inp.dim_names
     precision = self.model.config.backend.convert_precision_string(inp.type.precision)
     W, I, F = precision.width, precision.integer, precision.fractional
     threshold = - bias / scale
     if self.get_attr('quantize') == 2:
         self.add_output_variable(shape, dims, precision=XnorPrecisionType())
         threshold = np.floor(threshold * 2**F) / 2**F
         self.add_weights_variable(name='threshold', var_name='t{index}', data=threshold, type_name='threshold{index}_t', precision=inp.type.precision)
     elif self.get_attr('quantize') == 3:
         self.add_output_variable(shape, dims, precision=IntegerPrecisionType(width=2))
         threshold_hi = ternary_threshold / scale + threshold
         threshold_lo = -ternary_threshold / scale + threshold
         threshold_hi = np.floor(threshold_hi * 2**F) / 2**F
         threshold_lo = np.floor(threshold_lo * 2**F) / 2**F
         self.add_weights_variable(name='threshold_hi', var_name='th{index}', data=threshold_hi, type_name='threshold_hi_{index}_t', precision=inp.type.precision)
         self.add_weights_variable(name='threshold_lo', var_name='tl{index}', data=threshold_lo, type_name='threshold_lo_{index}_t', precision=inp.type.precision)
Пример #16
0
    def init_dense(self, layer):
        index_t = IntegerPrecisionType(width=1, signed=False)

        layer.set_attr('rfpad', 0)
        layer.set_attr('bfpad', 0)

        if layer.model.config.get_compression(layer):
            layer.set_attr('strategy', 'compressed')
        else:
            self.set_closest_reuse_factor(layer)
            self.gen_quartus_weight_array(layer)
            layer.set_attr('strategy', 'resource')

        if layer.model.config.is_resource_strategy(layer):
            if layer.model.config.get_compression(layer):
                index_t = layer.get_weights('weight').type.index_precision

        layer.set_attr('index_t',
                       NamedType('layer{}_index'.format(layer.index), index_t))
Пример #17
0
    def initialize(self):
        if self.get_attr('data_format') == 'channels_last':
            shape = [self.attributes['out_height'], self.attributes['out_width'], self.attributes['n_filt']]
            dims = ['OUT_HEIGHT_{}'.format(self.index), 'OUT_WIDTH_{}'.format(self.index), 'N_FILT_{}'.format(self.index)]
        else:
            shape = [self.attributes['n_filt'], self.attributes['out_height'], self.attributes['out_width']]
            dims = ['N_FILT_{}'.format(self.index), 'OUT_HEIGHT_{}'.format(self.index), 'OUT_WIDTH_{}'.format(self.index)]
        self.add_output_variable(shape, dims)
        
        depthwise_data = self.model.get_weights_data(self.name, 'depthwise_kernel')
        pointwise_data = self.model.get_weights_data(self.name, 'pointwise_kernel')

        self.add_weights_variable(name='depthwise', var_name='d{index}', data=depthwise_data, quantizer=self.get_attr('depthwise_quantizer'))
        self.add_weights_variable(name='pointwise', var_name='p{index}', data=pointwise_data, quantizer=self.get_attr('pointwise_quantizer'))
        
        zero_bias_data = np.zeros((self.attributes['n_chan'],))
        precision = IntegerPrecisionType(width=1, signed=False)
        self.add_weights_variable(name='zero_bias', var_name='z{index}', data=zero_bias_data, precision=precision)

        self.add_bias(quantizer=self.get_attr('bias_quantizer'))
Пример #18
0
 def __init__(self):
     super(TernaryQuantizer, self).__init__(2,
                                            IntegerPrecisionType(width=2))