Пример #1
0
batchnorm_quantized_tanh_config_template = """struct config{index} : nnet::batchnorm_quantized_tanh_config {{
    static const unsigned n_in = {n_in};
    static const unsigned n_filt = {n_filt};
    static const unsigned io_type = nnet::{iotype};
    static const unsigned reuse_factor = {reuse};
}};\n"""

batchnorm_quantized_tanh_function_template = 'nnet::normalize_{quantize}_tanh<{input_t}, {config}>({input}, {output}, {threshold});'

# Register the layer types to the layer map
register_layer('BatchNormalizationQuantizedTanh',
               BatchNormalizationQuantizedTanh)

# Register the templates for config and function
templates.get_backend('Vivado').register_templates(
    'BatchNormalizationQuantizedTanh',
    batchnorm_quantized_tanh_function_template,
    batchnorm_quantized_tanh_config_template)


class MergeBatchNormAndQuantizedTanh(OptimizerPass):
    def match(self, node):
        is_match = (node.__class__.__name__ == 'Activation'
                    and node.get_attr('activation')
                    in ['binary_tanh', 'ternary_tanh']
                    and isinstance(node.get_input_node(), BatchNormalization))
        return is_match

    def transform(self, model, node):
        bn_layer = node.get_input_node()
        # Remove the Activation layer
        model.remove_node(node, rewire=True)
Пример #2
0
        dims = inp.dim_names
        self.add_output_variable(shape, dims)

    def add_weights(self, scale, bias):
        self.add_weights_variable(name='scale',
                                  var_name='s{index}',
                                  data=scale)
        self.add_weights_variable(name='bias', var_name='b{index}', data=bias)


# register the layer and its templates
register_layer('ApplyAlpha', ApplyAlpha)
# TODO ideally: for backend in backends
#temps = templates.get_backend('Vivado')
for backend in templates.backend_map:
    temp = templates.get_backend(backend)
    temp.register_templates('ApplyAlpha',
                            temp.get_function_template('BatchNormalization'),
                            temp.get_config_template('BatchNormalization'),
                            temp.get_include_list('BatchNormalization'))


class QKerasFactorizeAlpha(OptimizerPass):
    '''OptimizerPass for extracting alpha "scale" from QKeras quantized layer.
       The weights of the Q{Dense, Conv} layer are scaled to the common data type,
       and an 'ApplyAlpha' layer is inserted to reapply the scale.
    '''
    def match(self, node):
        q_layer = node.__class__.__name__ in ["Dense", "Conv1D", "Conv2D"]
        has_w_quant = node.get_attr('weight_quantizer') is not None
        has_b_quant = node.get_attr('bias_quantizer') is not None
Пример #3
0
    def initialize(self):
        inp = self.get_input_variable()
        shape = inp.shape
        dims = inp.dim_names
        self.add_output_variable(shape, dims)

    def add_weights(self, scale, quantizer=None):
        self.add_weights_variable(name='scale', var_name='s{index}', data=scale, quantizer=quantizer)

    def add_bias(self, bias, quantizer=None):
        self.add_weights_variable(name='bias', var_name='b{index}', data=bias, quantizer=quantizer)

# register the layer and its templates
register_layer('ApplyAlpha', ApplyAlpha)
# TODO ideally: for backend in backends
temps = templates.get_backend('Vivado')
temps.register_templates('ApplyAlpha', temps.get_function_template('BatchNormalization'), temps.get_config_template('BatchNormalization'), temps.get_include_list('BatchNormalization'))

class QKerasFactorizeAlpha(OptimizerPass):
    '''OptimizerPass for extracting alpha "scale" from QKeras quantized layer.
       The weights of the Q{Dense, Conv} layer are scaled to the common data type,
       and an 'ApplyAlpha' layer is inserted to reapply the scale.
    '''
    def match(self, node):
        q_layer = node.__class__.__name__ in ["Dense", "Conv1D", "Conv2D"]
        has_w_quant = node.get_attr('weight_quantizer') is not None 
        has_b_quant = node.get_attr('bias_quantizer') is not None
        has_w_alpha, has_b_alpha = False, False
        if has_w_quant:
            if hasattr(node.get_attr('weight_quantizer'), 'alpha'):
                w_alpha = node.get_attr('weight_quantizer').alpha
Пример #4
0
        params['output2'] = self.variables[self.outputs[1]].name
        return [self._function_template.format(**params)]

    def config_cpp(self):
        return None


clone_function_template = 'nnet::clone_stream<{input_t}, {output_t}, {size}>({input}, {output1}, {output2});'
clone_include_list = ['nnet_utils/nnet_stream.h']

# Register the layer types to the layer map
register_layer('Clone', Clone)

# Register the templates for config and function
templates.get_backend('Vivado').register_templates('Clone',
                                                   clone_function_template,
                                                   None, clone_include_list)


class CloneOutput(OptimizerPass):
    ''' Clones streams that are used multiple times '''
    def match(self, node):
        # We may have already inserted the Clone layer
        if node.__class__.__name__ == 'Clone':
            return False

        return True

    def transform(self, model, node):
        if model.config.backend.name != 'Vivado' or \
            model.config.get_config_value('IOType') != 'io_stream':
    def function_cpp(self):
        params = self._default_function_params()
        params['size'] = np.prod(self.get_output_variable().shape)
        return [self._function_template.format(**params)]

    def config_cpp(self):
        return None

repack_function_template = 'nnet::repack_stream<{input_t}, {output_t}, {size}>({input}, {output});'
repack_include_list = ['nnet_utils/nnet_stream.h']

# Register the layer types to the layer map
register_layer('Repack', Repack)

# Register the templates for config and function
templates.get_backend('Vivado').register_templates('Repack', repack_function_template, None, repack_include_list)


class ReshapeStream(OptimizerPass):
    ''' Repacks stream for Reshape layer '''
    def match(self, node):
        return node.__class__.__name__ == 'Reshape'

    def transform(self, model, node):
        if model.config.backend.name != 'Vivado' or \
            model.config.get_config_value('IOType') != 'io_stream':
            return False

        attrs = {
            'target_shape': node.get_attr('target_shape')
        }
Пример #6
0
sepconv1d_include_list = [
    'nnet_utils/nnet_conv1d.h', 'nnet_utils/nnet_sepconv1d_stream.h'
]
sepconv2d_include_list = [
    'nnet_utils/nnet_conv2d.h', 'nnet_utils/nnet_sepconv2d_stream.h'
]

# Register the layer types to the layer map
register_layer('PointwiseConv1D', PointwiseConv1D)
register_layer('PointwiseConv2D', PointwiseConv2D)

# Register the templates for config and function
for backend in ['Vivado', 'VivadoAccelerator']:
    templates.get_backend(backend).register_templates(
        'PointwiseConv1D', pointwise_conv1d_function_template,
        templates.get_backend(backend).get_config_template('Conv1D'),
        sepconv1d_include_list)

    templates.get_backend(backend).register_templates(
        'PointwiseConv2D', pointwise_conv2d_function_template,
        templates.get_backend(backend).get_config_template('Conv2D'),
        sepconv2d_include_list)


class OptimizePointwiseConv(OptimizerPass):
    def match(self, node):
        return node.__class__.__name__ in ['Conv1D', 'Conv2D'] and \
            node.get_attr('filt_height', 1) == 1 and \
            node.get_attr('filt_width') == 1

    def transform(self, model, node):
Пример #7
0
broadcast_function_template = 'nnet::broadcast_stream<{input_t}, {output_t}, {config}>({input}, {output});'
broadcast_config_template = """struct config{index} : nnet::broadcast_config {{
static const unsigned in_width = {in_width};
static const unsigned in_height = {in_height};
static const unsigned n_chan = {n_chan};
static const unsigned n_dupl = {n_dupl};
}};\n"""
broadcast_include_list = ['nnet_utils/nnet_stream.h']

# Register the layer types to the layer map
register_layer('Repack', Repack)
register_layer('Broadcast', Broadcast)

# Register the templates for config and function
for backend in ['Vivado', 'VivadoAccelerator']:
    templates.get_backend(backend).register_templates('Repack', repack_function_template, None, repack_include_list)
    templates.get_backend(backend).register_templates('Broadcast', broadcast_function_template, broadcast_config_template, broadcast_include_list)


class ReshapeStream(OptimizerPass):
    ''' Repacks stream for Reshape layer '''
    def match(self, node):
        return node.__class__.__name__ == 'Reshape'

    def transform(self, model, node):
        if model.config.backend.name not in ['Vivado', 'VivadoAccelerator'] or \
            model.config.get_config_value('IOType') != 'io_stream':
            return False

        attrs = {
            'target_shape': node.get_attr('target_shape')
Пример #8
0
    static const unsigned io_type = nnet::{iotype};
    static const unsigned reuse_factor = {reuse};
}};\n"""

batchnorm_quantized_tanh_function_template = 'nnet::normalize_{quantize}_tanh<{input_t}, {config}>({input}, {output}, {threshold});'

# Register the layer types to the layer map
register_layer('BatchNormalizationQuantizedTanh',
               BatchNormalizationQuantizedTanh)

from hls4ml.templates.vivado_template import batchnorm_include_list

# Register the templates for config and function
for backend in ['Vivado', 'VivadoAccelerator']:
    templates.get_backend(backend).register_templates(
        'BatchNormalizationQuantizedTanh',
        batchnorm_quantized_tanh_function_template,
        batchnorm_quantized_tanh_config_template, batchnorm_include_list)


class MergeBatchNormAndQuantizedTanh(OptimizerPass):
    def match(self, node):
        is_match = (node.__class__.__name__ == 'Activation'
                    and node.get_attr('activation')
                    in ['binary', 'binary_tanh', 'ternary', 'ternary_tanh']
                    or node.__class__.__name__ == 'TernaryTanh')
        is_match = is_match and isinstance(node.get_input_node(),
                                           BatchNormalization)
        return is_match

    def transform(self, model, node):
        bn_layer = node.get_input_node()