Пример #1
0
    def __init__(self, config):
        self.config = config

        self.backend = get_backend(self.config.get('Backend', 'Vivado'))
        self.writer = get_writer(self.config.get('Backend', 'Vivado'))

        self.model_precision = {}
        self.layer_type_precision = {}
        self.layer_name_precision = {}

        self.model_rf = None
        self.layer_type_rf = {}
        self.layer_name_rf = {}

        self.model_strategy = 'Latency'
        self.layer_type_strategy = {}
        self.layer_name_strategy = {}

        self.model_compression = False
        self.layer_type_compression = {}
        self.layer_name_compression = {}

        self.trace_output = self.get_config_value('TraceOutput', False)

        self._parse_hls_config()
        self._validate_hls_config()
Пример #2
0
 def __init__(self, config, data_reader, layer_list, inputs=None, outputs=None):
     self.config = HLSConfig(config)
     self.backend = get_backend(config.get('Backend', 'Vivado'))
     self.reader = data_reader
     # If not provided, assumes layer_list[0] is input, and layer_list[-1] is output
     self.inputs = inputs if inputs is not None else [layer_list[0]['name']]
     self.outputs = outputs if outputs is not None else [layer_list[-1]['name']]
     self.index = 0
     self.graph = OrderedDict()
     self.output_vars = {}
     self._top_function_lib = None
     self._make_graph(layer_list)
     self._optimize_model(self.config.optimizers)
Пример #3
0
batchnorm_quantized_tanh_config_template = """struct config{index} : nnet::batchnorm_quantized_tanh_config {{
    static const unsigned n_in = {n_in};
    static const unsigned n_filt = {n_filt};
    static const unsigned io_type = nnet::{iotype};
    static const unsigned reuse_factor = {reuse};
}};\n"""

batchnorm_quantized_tanh_function_template = 'nnet::normalize_{quantize}_tanh<{input_t}, {config}>({input}, {output}, {threshold});'

# Register the layer types to the layer map
hls_model.register_layer('BatchNormalizationQuantizedTanh',
                         BatchNormalizationQuantizedTanh)

# Register the templates for config and function
templates.get_backend('Vivado').register_templates(
    'BatchNormalizationQuantizedTanh',
    batchnorm_quantized_tanh_function_template,
    batchnorm_quantized_tanh_config_template)


class MergeBatchNormAndQuantizedTanh(OptimizerPass):
    def match(self, node):
        is_match = (node.__class__.__name__ == 'Activation'
                    and node.get_attr('activation')
                    in ['binary_tanh', 'ternary_tanh']
                    and node.get_input_node().__class__.__name__
                    == 'BatchNormalization')
        return is_match

    def transform(self, model, node):
        bn_layer = node.get_input_node()
        # Remove the Activation layer