Exemplo n.º 1
0
 def _convert_ac_type(cls, precision):
     '''
     Convert a precision string (e.g. "ac_fixed<16,6>" to the internal FixedPrecisionTypes etc)
     '''
     bits = re.search('.+<(.+?)>', precision).group(1).split(',')
     signed = True  # default is signed
     sat_mode = None
     round_mode = None
     if 'fixed' in precision:
         W = int(bits[0])
         I = int(bits[1])
         fields = 2
         if len(bits) > 2:
             signed = bool(bits[2])
             fields = 3
     elif 'int' in precision:
         W = int(bits[0])
         I = W
         fields = 1
         if len(bits) > 1:
             signed = bool(bits[1])
             fields = 2
     if len(bits) > fields:
         round_mode = bits[fields]
     if len(bits) > fields + 1:
         sat_mode = bits[fields + 1]
     if 'fixed' in precision:
         return FixedPrecisionType(W, I, signed, round_mode, sat_mode)
     elif 'int' in precision:
         return IntegerPrecisionType(W, signed)
Exemplo n.º 2
0
    def transform(self, model, node):
        old_precision = node.get_output_variable().type.precision
        if isinstance(old_precision, IntegerPrecisionType):
            new_precision = IntegerPrecisionType(old_precision.width,
                                                 old_precision.signed)
        elif isinstance(old_precision, FixedPrecisionType):
            new_precision = FixedPrecisionType(old_precision.width,
                                               old_precision.integer,
                                               old_precision.signed,
                                               self.rounding_mode,
                                               self.saturation_mode,
                                               self.saturation_bits)
        else:  # in case the precision is a string
            new_precision = self.precision_string_modify(old_precision)

        out_var = node.get_output_variable()
        out_t = NamedType(out_var.type.name, new_precision)
        out_var.type = out_t
        node.attributes['result_t'] = out_t

        if node.get_attr('accum_t') is not None:
            accum_t = NamedType('layer{}_accum_t'.format(node.index),
                                new_precision)
            node.set_attr('accum_t', new_precision)
        return False
Exemplo n.º 3
0
 def _convert_ap_type(cls, precision):
     '''
     Convert a precision string (e.g. "ap_fixed<16,6>" to the internal FixedPrecisionTypes etc)
     '''
     bits = re.search('.+<(.+?)>', precision).group(1).split(',')
     sat_mode = None
     round_mode = None
     sat_bits = None
     if 'fixed' in precision:
         W = int(bits[0])
         I = int(bits[1])
         fields = 2
         signed = not ('u' in precision)
     elif 'int' in precision:
         W = int(bits[0])
         I = W
         fields = 1
         signed = not ('u' in precision)
     if len(bits) > fields:
         round_mode = bits[fields]
     if len(bits) > fields + 1:
         sat_mode = bits[fields + 1]
     if len(bits) > fields + 2:
         sat_bits = int(bits[fields + 2])
     if 'fixed' in precision:
         return FixedPrecisionType(W, I, signed, round_mode, sat_mode,
                                   sat_bits)
     elif 'int' in precision:
         return IntegerPrecisionType(W, signed)
Exemplo n.º 4
0
    def init_gru(self, layer):
        reuse_factor = layer.model.config.get_reuse_factor(layer)
        layer.set_attr('recurrent_reuse_factor', reuse_factor)

        recurrent_bias = np.zeros(layer.weights['recurrent_weight'].shape[1])
        layer.add_weights_variable(name='recurrent_bias',
                                   var_name='br{index}',
                                   data=recurrent_bias)

        index_t = IntegerPrecisionType(width=1, signed=False)

        if 'table_t' not in layer.attributes:
            layer.set_attr('table_t', FixedPrecisionType(width=18, integer=8))
        if 'table_size' not in layer.attributes:
            layer.set_attr('table_size', 1024)
        if layer.model.config.is_resource_strategy(layer):
            n_in, n_out, n_in_recr, n_out_recr = self.get_layer_mult_size(
                layer)
            self.set_closest_reuse_factor(layer, n_in, n_out)
            self.set_closest_reuse_factor(layer,
                                          n_in_recr,
                                          n_out_recr,
                                          attribute='recurrent_reuse_factor')
            layer.weights['weight'].data = np.transpose(
                layer.weights['weight'].data)
            layer.weights['recurrent_weight'].data = np.transpose(
                layer.weights['recurrent_weight'].data)
            layer.set_attr('strategy', 'resource')
        else:
            layer.set_attr('strategy', 'latency')

        layer.set_attr('index_t', index_t)
Exemplo n.º 5
0
 def init_activation(self, layer):
     if 'table_t' not in layer.attributes:
         layer.set_attr(
             'table_t',
             NamedType(name=layer.name + '_table_t',
                       precision=FixedPrecisionType(width=18, integer=8)))
     if 'table_size' not in layer.attributes:
         layer.set_attr('table_size', 1024)
Exemplo n.º 6
0
    def format(self, node):
        params = self._default_config_params(node)

        params['n_vertices'] = node.attributes['n_vertices']
        params['n_vertices_width'] = int(np.log2(params['n_vertices']))
        params['distance_width'] = 12
        params['distance_nint'] = min(4, params['distance_width'] - 6) # this is tuned
        params['log2_reuse'] = int(np.log2(params['reuse']))

        ## Define default precisions for various internal arrays (can be overridden from the config file)
        # We always give 10 digits for the subintegral part
        fwidth = 10
        # Integral precision for aggr_t depends on how large the temporary sum for weighed feature mean will be
        aggr_intw = max(params['log2_reuse'], params['n_vertices_width'] - params['log2_reuse']) + 3 # safety factor 2**3
        aggr_w = aggr_intw + fwidth
        # edge_weight_aggr_t does not need the safety factor
        ew_aggr_intw = aggr_intw - 3
        ew_aggr_w = ew_aggr_intw + fwidth
        # Integral precision for norm is fixed to 4
        norm_intw = 4
        norm_w = norm_intw + fwidth

        vspecs = [
            ('edge_weight', FixedPrecisionType(10, 0, signed=False)),
            ('edge_weight_aggr', FixedPrecisionType(ew_aggr_w, ew_aggr_intw, signed=False)),
            ('aggr', FixedPrecisionType(aggr_w, aggr_intw)),
            ('norm', FixedPrecisionType(norm_w, norm_intw, signed=False))
        ]
        precision_converter = APTypeConverter()
        for vname, default_precision in vspecs:
            params['{}_t'.format(vname)], type_name = node.model.config.get_precision(node, var=vname)
            if type_name.endswith('default_t'):
                params['{}_t'.format(vname)] = precision_converter.convert(default_precision).definition_cpp()

        params['output_t'] = node.get_output_variable().type.name

        if node.attributes['collapse'] in ['mean', 'max']:
            params['collapse_type'] = 'collapse_{}'.format(node.attributes['collapse'])
        else:
            params['collapse_type'] = 'no_collapse'

        params['mean_by_nvert'] = str(node.attributes['mean_by_nvert']).lower()

        self.get_transforms_config(node, params)

        return self.template[0].format(**params)
Exemplo n.º 7
0
def get_type(quantizer_config):
    width = quantizer_config['config']['bits']
    integer = quantizer_config['config'].get('integer', 0)
    if quantizer_config['class_name'] == 'quantized_po2':
        return ExponentPrecisionType(width=width, signed=True)
    if width == integer:
        if width == 1:
            return XnorPrecisionType()
        else:
            return IntegerPrecisionType(width=width, signed=True)
    else:
        return FixedPrecisionType(width=width, integer=integer+1, signed=True)
Exemplo n.º 8
0
def get_concat_type(itype1, itype2):
    newwidth = max(itype1.width, itype2.width)
    newint = max(itype1.integer, itype2.integer)
    if (itype1.signed ^ itype2.signed):  # XOR
        newint += 1
        newwidth += 1
    newrmode = itype1.rounding_mode if itype1.rounding_mode is not None else itype2.rounding_mode
    newsmode = itype1.saturation_mode if itype1.saturation_mode is not None else itype2.saturation_mode
    newsbits = itype1.saturation_bits if itype1.saturation_bits is not None else itype2.saturation_bits

    newtype = FixedPrecisionType(newwidth, newint, itype1.signed
                                 or itype2.signed, newrmode, newsmode,
                                 newsbits)
    return newtype
Exemplo n.º 9
0
    def _add_variable(self, name, var_name, data, frac_width=10, quantize=False):
        # Wrapper for add_weights_variable with precision determination from data

        # automatically make the variable unsigned if data are all positive
        signed = (np.amin(data) < 0.)
        
        int_width = find_minimum_width(data, signed=signed)

        if quantize:
            precision = IntegerPrecisionType(width=int_width, signed=signed)
        else:
            width = int_width + frac_width
            precision = FixedPrecisionType(width=width, integer=int_width, signed=signed, rounding_mode='AP_RND', saturation_mode='AP_SAT')
            
        self.add_weights_variable(name=name, var_name=var_name, data=data, precision=precision)
Exemplo n.º 10
0
 def __init__(self, config):
     self.quantizer_fn = get_quantizer(config)
     self.alpha = config['config'].get('alpha', None)
     if config['class_name'] == 'quantized_bits':
         self.bits = config['config']['bits']
         self.hls_type = get_type(config)
     # ! includes stochastic_ternary
     elif 'ternary' in config['class_name']:
         self.bits = 2
         self.hls_type = IntegerPrecisionType(width=2, signed=True)
     # ! includes stochastic_binary
     elif 'binary' in config['class_name']:
         self.bits = 1
         self.hls_type = XnorPrecisionType()
     else:
         print("Unsupported quantizer: " + config['class_name'])
         self.bits = 16
         self.hls_type = FixedPrecisionType(width=16, integer=6, signed=True)