コード例 #1
0
        Inference computation uses BatchNormalization methods'''

    def initialize(self):
        inp = self.get_input_variable()
        shape = inp.shape
        dims = inp.dim_names
        self.add_output_variable(shape, dims)

    def add_weights(self, scale, quantizer=None):
        self.add_weights_variable(name='scale', var_name='s{index}', data=scale, quantizer=quantizer)

    def add_bias(self, bias, quantizer=None):
        self.add_weights_variable(name='bias', var_name='b{index}', data=bias, quantizer=quantizer)

# register the layer and its templates
register_layer('ApplyAlpha', ApplyAlpha)
# TODO ideally: for backend in backends
temps = templates.get_backend('Vivado')
temps.register_templates('ApplyAlpha', temps.get_function_template('BatchNormalization'), temps.get_config_template('BatchNormalization'), temps.get_include_list('BatchNormalization'))

class QKerasFactorizeAlpha(OptimizerPass):
    '''OptimizerPass for extracting alpha "scale" from QKeras quantized layer.
       The weights of the Q{Dense, Conv} layer are scaled to the common data type,
       and an 'ApplyAlpha' layer is inserted to reapply the scale.
    '''
    def match(self, node):
        q_layer = node.__class__.__name__ in ["Dense", "Conv1D", "Conv2D"]
        has_w_quant = node.get_attr('weight_quantizer') is not None 
        has_b_quant = node.get_attr('bias_quantizer') is not None
        has_w_alpha, has_b_alpha = False, False
        if has_w_quant:
コード例 #2
0
        params['n_in'] = self.get_input_variable().size_cpp()

        return self._config_template.format(**params)


batchnorm_quantized_tanh_config_template = """struct config{index} : nnet::batchnorm_quantized_tanh_config {{
    static const unsigned n_in = {n_in};
    static const unsigned n_filt = {n_filt};
    static const unsigned io_type = nnet::{iotype};
    static const unsigned reuse_factor = {reuse};
}};\n"""

batchnorm_quantized_tanh_function_template = 'nnet::normalize_{quantize}_tanh<{input_t}, {config}>({input}, {output}, {threshold});'

# Register the layer types to the layer map
register_layer('BatchNormalizationQuantizedTanh',
               BatchNormalizationQuantizedTanh)

# Register the templates for config and function
templates.get_backend('Vivado').register_templates(
    'BatchNormalizationQuantizedTanh',
    batchnorm_quantized_tanh_function_template,
    batchnorm_quantized_tanh_config_template)


class MergeBatchNormAndQuantizedTanh(OptimizerPass):
    def match(self, node):
        is_match = (node.__class__.__name__ == 'Activation'
                    and node.get_attr('activation')
                    in ['binary_tanh', 'ternary_tanh']
                    and isinstance(node.get_input_node(), BatchNormalization))
        return is_match
コード例 #3
0
    def function_cpp(self):
        params = self._default_function_params()
        params['size'] = self.get_attr('size')
        params['output1'] = self.variables[self.outputs[0]].name
        params['output2'] = self.variables[self.outputs[1]].name
        return [self._function_template.format(**params)]

    def config_cpp(self):
        return None


clone_function_template = 'nnet::clone_stream<{input_t}, {output_t}, {size}>({input}, {output1}, {output2});'
clone_include_list = ['nnet_utils/nnet_stream.h']

# Register the layer types to the layer map
register_layer('Clone', Clone)

# Register the templates for config and function
templates.get_backend('Vivado').register_templates('Clone',
                                                   clone_function_template,
                                                   None, clone_include_list)


class CloneOutput(OptimizerPass):
    ''' Clones streams that are used multiple times '''
    def match(self, node):
        # We may have already inserted the Clone layer
        if node.__class__.__name__ == 'Clone':
            return False

        return True
コード例 #4
0
        self.add_output_variable(shape, dims)

    def function_cpp(self):
        params = self._default_function_params()
        params['size'] = np.prod(self.get_output_variable().shape)
        return [self._function_template.format(**params)]

    def config_cpp(self):
        return None

repack_function_template = 'nnet::repack_stream<{input_t}, {output_t}, {size}>({input}, {output});'
repack_include_list = ['nnet_utils/nnet_stream.h']

# Register the layer types to the layer map
register_layer('Repack', Repack)

# Register the templates for config and function
templates.get_backend('Vivado').register_templates('Repack', repack_function_template, None, repack_include_list)


class ReshapeStream(OptimizerPass):
    ''' Repacks stream for Reshape layer '''
    def match(self, node):
        return node.__class__.__name__ == 'Reshape'

    def transform(self, model, node):
        if model.config.backend.name != 'Vivado' or \
            model.config.get_config_value('IOType') != 'io_stream':
            return False
コード例 #5
0
ファイル: pointwise.py プロジェクト: thaarres/hls4ml
    # Nothing to do, will pick up function and config from class name
    pass


pointwise_conv1d_function_template = 'nnet::pointwise_conv_1d_{data_format}<{input_t}, {output_t}, {config}>({input}, {output}, {w}, {b});'
pointwise_conv2d_function_template = 'nnet::pointwise_conv_2d_{data_format}<{input_t}, {output_t}, {config}>({input}, {output}, {w}, {b});'

sepconv1d_include_list = [
    'nnet_utils/nnet_conv1d.h', 'nnet_utils/nnet_sepconv1d_stream.h'
]
sepconv2d_include_list = [
    'nnet_utils/nnet_conv2d.h', 'nnet_utils/nnet_sepconv2d_stream.h'
]

# Register the layer types to the layer map
register_layer('PointwiseConv1D', PointwiseConv1D)
register_layer('PointwiseConv2D', PointwiseConv2D)

# Register the templates for config and function
for backend in ['Vivado', 'VivadoAccelerator']:
    templates.get_backend(backend).register_templates(
        'PointwiseConv1D', pointwise_conv1d_function_template,
        templates.get_backend(backend).get_config_template('Conv1D'),
        sepconv1d_include_list)

    templates.get_backend(backend).register_templates(
        'PointwiseConv2D', pointwise_conv2d_function_template,
        templates.get_backend(backend).get_config_template('Conv2D'),
        sepconv2d_include_list)

コード例 #6
0
        return self._config_template.format(**params)

repack_function_template = 'nnet::repack_stream<{input_t}, {output_t}, {size}>({input}, {output});'
repack_include_list = ['nnet_utils/nnet_stream.h']

broadcast_function_template = 'nnet::broadcast_stream<{input_t}, {output_t}, {config}>({input}, {output});'
broadcast_config_template = """struct config{index} : nnet::broadcast_config {{
static const unsigned in_width = {in_width};
static const unsigned in_height = {in_height};
static const unsigned n_chan = {n_chan};
static const unsigned n_dupl = {n_dupl};
}};\n"""
broadcast_include_list = ['nnet_utils/nnet_stream.h']

# Register the layer types to the layer map
register_layer('Repack', Repack)
register_layer('Broadcast', Broadcast)

# Register the templates for config and function
for backend in ['Vivado', 'VivadoAccelerator']:
    templates.get_backend(backend).register_templates('Repack', repack_function_template, None, repack_include_list)
    templates.get_backend(backend).register_templates('Broadcast', broadcast_function_template, broadcast_config_template, broadcast_include_list)


class ReshapeStream(OptimizerPass):
    ''' Repacks stream for Reshape layer '''
    def match(self, node):
        return node.__class__.__name__ == 'Reshape'

    def transform(self, model, node):
        if model.config.backend.name not in ['Vivado', 'VivadoAccelerator'] or \
コード例 #7
0
ファイル: bn_quant.py プロジェクト: zwl1671/hls4ml
        params['n_in'] = self.get_input_variable().size_cpp()

        return self._config_template.format(**params)


batchnorm_quantized_tanh_config_template = """struct config{index} : nnet::batchnorm_quantized_tanh_config {{
    static const unsigned n_in = {n_in};
    static const unsigned n_filt = {n_filt};
    static const unsigned io_type = nnet::{iotype};
    static const unsigned reuse_factor = {reuse};
}};\n"""

batchnorm_quantized_tanh_function_template = 'nnet::normalize_{quantize}_tanh<{input_t}, {config}>({input}, {output}, {threshold});'

# Register the layer types to the layer map
hls_model.register_layer('BatchNormalizationQuantizedTanh',
                         BatchNormalizationQuantizedTanh)

# Register the templates for config and function
templates.get_backend('Vivado').register_templates(
    'BatchNormalizationQuantizedTanh',
    batchnorm_quantized_tanh_function_template,
    batchnorm_quantized_tanh_config_template)


class MergeBatchNormAndQuantizedTanh(OptimizerPass):
    def match(self, node):
        is_match = (node.__class__.__name__ == 'Activation'
                    and node.get_attr('activation')
                    in ['binary_tanh', 'ternary_tanh']
                    and node.get_input_node().__class__.__name__
                    == 'BatchNormalization')