Beispiel #1
0
class BatchNormalization(Layer):
    _expected_attributes = [
        Attribute('n_in'),
        Attribute('n_filt', default=0),

        WeightAttribute('scale'),
        WeightAttribute('bias'),

        TypeAttribute('scale'),
        TypeAttribute('bias'),
    ]

    def initialize(self):
        inp = self.get_input_variable()
        shape = inp.shape
        dims = inp.dim_names
        self.add_output_variable(shape, dims)

        gamma = self.model.get_weights_data(self.name, 'gamma')
        beta = self.model.get_weights_data(self.name, 'beta')
        mean = self.model.get_weights_data(self.name, 'moving_mean')
        var = self.model.get_weights_data(self.name, 'moving_variance')

        scale = gamma / np.sqrt(var + self.get_attr('epsilon'))
        bias = beta - gamma * mean / np.sqrt(var + self.get_attr('epsilon'))

        self.add_weights_variable(name='scale', var_name='s{index}', data=scale)
        self.add_weights_variable(name='bias', var_name='b{index}', data=bias)
Beispiel #2
0
class GlobalPooling1D(Layer):
    _expected_attributes = [
        Attribute('n_in'),
        Attribute('n_filt'),

        ChoiceAttribute('pool_op', ['Max', 'Average'])
    ]

    def initialize(self):
        shape = [self.attributes['n_filt']]
        dims = ['N_FILT_{}'.format(self.index)]
        self.add_output_variable(shape, dims)
        self.set_attr('pool_op', self.get_attr('class_name').split('Pooling')[0].replace('Global', ''))
Beispiel #3
0
class Conv1D(Layer):
    _expected_attributes = [
        Attribute('in_width'),
        Attribute('out_width'),

        Attribute('n_chan'),
        Attribute('n_filt'),

        Attribute('filt_width'),
        Attribute('stride_width'),

        Attribute('pad_left'),
        Attribute('pad_right'),

        WeightAttribute('weight'),
        WeightAttribute('bias'),

        TypeAttribute('weight'),
        TypeAttribute('bias'),
    ]

    def initialize(self):
        if self.get_attr('data_format') == 'channels_last':
            shape = [self.attributes['out_width'], self.attributes['n_filt']]
            dims = ['N_OUTPUTS_{}'.format(self.index), 'N_FILT_{}'.format(self.index)]
        else:
            shape = [self.attributes['n_filt'], self.attributes['out_width']]
            dims = ['N_FILT_{}'.format(self.index), 'N_OUTPUTS_{}'.format(self.index)]

        self.add_output_variable(shape, dims)
        self.add_weights(quantizer = self.get_attr('weight_quantizer'))
        self.add_bias(quantizer = self.get_attr('bias_quantizer'))
Beispiel #4
0
class Activation(Layer):
    _expected_attributes = [
        Attribute('n_in'),
        Attribute('activation', value_type=str),
        #Attribute('table_size', default=1024),
        
        #TypeAttribute('table')
    ]

    def initialize(self):
        inp = self.get_input_variable()
        shape = inp.shape
        dims = inp.dim_names
        self.add_output_variable(shape, dims)
        self.set_attr('n_in', self.get_input_variable().size())
Beispiel #5
0
class Pooling1D(Layer):
    _expected_attributes = [
        Attribute('n_in'),
        Attribute('n_out'),
        Attribute('n_filt'),

        Attribute('pool_width'),
        Attribute('stride_width'),

        Attribute('pad_left'),
        Attribute('pad_right'),

        ChoiceAttribute('pool_op', ['Max', 'Average'])
    ]

    def initialize(self):
        if self.get_attr('data_format') == 'channels_last':
            shape = [self.attributes['n_out'], self.attributes['n_filt']]
            dims = ['N_OUTPUTS_{}'.format(self.index), 'N_FILT_{}'.format(self.index)]
        else:
            shape = [self.attributes['n_filt'], self.attributes['n_out']]
            dims = ['N_FILT_{}'.format(self.index), 'N_OUTPUTS_{}'.format(self.index)]
        self.add_output_variable(shape, dims)
        self.set_attr('pool_op', self.get_attr('class_name').split('Pooling')[0])
        self.set_attr('implementation', self.model.config.get_conv_implementation(self).lower())
Beispiel #6
0
class ZeroPadding1D(Layer):
    _expected_attributes = [
        Attribute('in_width'),
        Attribute('out_width'),
        Attribute('n_chan'),

        Attribute('pad_left'),
        Attribute('pad_right'),
    ]

    def initialize(self):
        inp = self.get_input_variable()
        if self.get_attr('data_format') == 'channels_last':
            shape = [self.attributes['out_width'], self.attributes['n_chan']]
            dims = ['OUT_WIDTH_{}'.format(self.index), 'N_CHAN_{}'.format(self.index)]
        else:
            shape = [self.attributes['n_chan'], self.attributes['out_width']]
            dims = ['N_CHAN_{}'.format(self.index), 'OUT_WIDTH_{}'.format(self.index)]
        self.add_output_variable(shape, dims, precision=inp.type.precision)
Beispiel #7
0
class Embedding(Layer):
    _expected_attributes = [
        Attribute('n_in'),
        Attribute('n_out'),
        Attribute('vocab_size'),

        WeightAttribute('embeddings'),
        TypeAttribute('embeddings'),
    ]

    def initialize(self):
        shape = self.get_input_variable().shape[:]
        shape += [self.attributes['n_out']]
        if len(shape) > 1:
            dims = ['N_LAYER_{}_{}'.format(i, self.index) for i in range(1, len(shape) + 1)]
        else:
            dims = ['N_LAYER_{}'.format(self.index)]
        self.add_output_variable(shape, dims)

        data = self.model.get_weights_data(self.name, 'embeddings')
        self.add_weights_variable(name='embeddings', var_name='e{index}', data=data)
Beispiel #8
0
class Dense(Layer):
    _expected_attributes = [
        Attribute('n_in'),
        Attribute('n_out'),

        WeightAttribute('weight'),
        WeightAttribute('bias'),

        TypeAttribute('weight'),
        TypeAttribute('bias'),
    ]

    def initialize(self):
        shape = self.get_input_variable().shape[:]
        shape[-1] = self.attributes['n_out']
        if len(shape) > 1:
            dims = ['N_LAYER_{}_{}'.format(i, self.index) for i in range(1, len(shape) + 1)]
        else:
            dims = ['N_LAYER_{}'.format(self.index)]
        self.add_output_variable(shape, dims)
        self.add_weights(quantizer=self.get_attr('weight_quantizer'), compression=self.model.config.get_compression(self))
        self.add_bias(quantizer=self.get_attr('bias_quantizer'))
Beispiel #9
0
class SimpleRNN(Layer):
    _expected_attributes = [
        Attribute('n_out'),
        Attribute('activation', value_type=str),
        Attribute('return_sequences', value_type=bool, default=False),
        Attribute('return_state', value_type=bool, default=False),
        ChoiceAttribute('direction', ['forward', 'backward'], default='forward'),

        WeightAttribute('weight'),
        WeightAttribute('bias'),
        WeightAttribute('recurrent_weight'),

        TypeAttribute('weight'),
        TypeAttribute('bias'),
        TypeAttribute('recurrent_weight'),
    ]

    def initialize(self):
        if self.attributes['return_sequences']:
            shape = [self.attributes['n_timesteps'], self.attributes['n_out']]
            dims = ['N_TIME_STEPS_{}'.format(self.index), 'N_OUT_{}'.format(self.index)]
        else:
            shape = [self.attributes['n_out']]
            dims = ['N_OUT_{}'.format(self.index)]
        
        self.add_output_variable(shape, dims)

        if self.attributes['return_state']:
            state_shape = [self.attributes['n_out']]
            state_dims = ['N_OUT_{}'.format(self.index)]
            self.add_output_variable(state_shape, state_dims, out_name=self.outputs[1], var_name='layer{index}_h', type_name='layer{index}_h_t')
            self.add_output_variable(state_shape, state_dims, out_name=self.outputs[2], var_name='layer{index}_c', type_name='layer{index}_c_t')

        self.add_weights()
        self.add_bias()

        recurrent_weight = self.model.get_weights_data(self.name, 'recurrent_kernel')
        self.add_weights_variable(name='recurrent_weight', var_name='wr{index}', data=recurrent_weight)
Beispiel #10
0
class SeparableConv1D(Layer):
    _expected_attributes = [
        Attribute('in_width'),
        Attribute('out_width'),

        Attribute('n_chan'),
        Attribute('n_filt'),

        Attribute('filt_width'),
        Attribute('stride_width'),

        Attribute('pad_left'),
        Attribute('pad_right'),
        
        WeightAttribute('depthwise'),
        WeightAttribute('pointwise'),
        WeightAttribute('bias'),

        TypeAttribute('depthwise'),
        TypeAttribute('pointwise'),
        TypeAttribute('bias'),
    ]

    def initialize(self):
        if self.get_attr('data_format') == 'channels_last':
            shape = [self.attributes['out_width'], self.attributes['n_filt']]
            dims = ['N_OUTPUTS_{}'.format(self.index), 'N_FILT_{}'.format(self.index)]
        else:
            shape = [self.attributes['n_filt'], self.attributes['out_width']]
            dims = ['N_FILT_{}'.format(self.index), 'N_OUTPUTS_{}'.format(self.index)]
        self.add_output_variable(shape, dims)
        
        depthwise_data = self.model.get_weights_data(self.name, 'depthwise_kernel')
        pointwise_data = self.model.get_weights_data(self.name, 'pointwise_kernel')

        self.add_weights_variable(name='depthwise', var_name='d{index}', data=depthwise_data, quantizer=self.get_attr('depthwise_quantizer'))
        self.add_weights_variable(name='pointwise', var_name='p{index}', data=pointwise_data, quantizer=self.get_attr('pointwise_quantizer'))
        
        zero_bias_data = np.zeros((self.attributes['n_chan'],))
        precision = IntegerPrecisionType(width=1, signed=False)
        self.add_weights_variable(name='zero_bias', var_name='z{index}', data=zero_bias_data, precision=precision)

        self.add_bias(quantizer=self.get_attr('bias_quantizer'))
Beispiel #11
0
 def _register_layer_attributes(self):
     extended_attrs = {
         SimpleRNN: [
             Attribute('recurrent_reuse_factor', default=1),
             Attribute('static', value_type=bool, default=True)
         ],
         LSTM: [
             Attribute('recurrent_reuse_factor', default=1),
             Attribute('static', value_type=bool, default=True)
         ],
         GRU: [
             Attribute('recurrent_reuse_factor', default=1),
             Attribute('static', value_type=bool, default=True)
         ],
     }
     self.attribute_map.update(extended_attrs)
Beispiel #12
0
class Layer(object):
    _expected_attributes = [
        Attribute('index'),

        TypeAttribute('accum'),
        TypeAttribute('result'),
    ]

    @classproperty
    def expected_attributes(cls):
        """ Returns the expected attributes of a class. """
        all_attributes = []
        for base_cls in reversed(cls.mro()): # Iterate over all base classes in the hierarchy
            if cls == base_cls: # Skip adding attributes from self
                continue
            if hasattr(base_cls, '_expected_attributes'): # Only consider classes with '_expected_attributes' defined
                all_attributes.extend(base_cls._expected_attributes)
        all_attributes.extend(cls._expected_attributes)
        return all_attributes

    def __init__(self, model, name, attributes, inputs, outputs=None):
        if name == 'input':
            raise RuntimeError("No model layer should be named 'input' because that is a reserved;" + \
                               "layer name in ModelGraph; Please rename the layer in your model")
        self.model = model
        self.name = name
        self.index = model.next_layer()
        self.inputs = inputs
        self.outputs = outputs
        if self.outputs is None:
            self.outputs = [self.name]

        self.attributes = AttributeDict(self)
        self.attributes.update(attributes)

        self.set_attr('index', self.index)

        self.weights = WeightMapping(self.attributes)
        self.variables = VariableMapping(self.attributes)
        self.types = TypeMapping(self.attributes)

        accum_t = NamedType(*reversed(self.model.config.get_precision(self, 'accum')))
        self.set_attr('accum_t', accum_t)

        layer_config = self.model.config.get_layer_config(self)
        for config_key, config_value in layer_config.items():
            if config_key in self.attributes:
                print('WARNING: Config parameter "{}" overwrites an existing attribute in layer "{}" ({})'.format(config_key, self.name, self.class_name))
            if config_key.endswith('_t') and isinstance(config_value, str): #TODO maybe move this to __setitem__ of AttributeDict?
                precision = self.model.config.backend.convert_precision_string(config_value)
                config_value = NamedType(self.name + config_key, precision)
            self.attributes[config_key] = config_value

        self.initialize()
        self._validate_attributes()

    @property
    def class_name(self, include_wrapped=False):
        if include_wrapped:
            return self.__class__.__name__
        else:
            if hasattr(self, '_wrapped'):
                return self.__class__.__bases__[0].__name__
            else:
                return self.__class__.__name__

    def initialize(self):
        raise NotImplementedError

    def set_attr(self, key, value):
        self.attributes[key] = value

    def get_attr(self, key, default=None):
        return self.attributes.get(key, default)

    def _validate_attributes(self):
        all_attributes = {}
        for attr in self.expected_attributes:
            all_attributes[attr.name] = attr
        
        # Validate existing attributes
        for attr_name, attr_value in self.attributes.items():
            exp_attr = all_attributes.pop(attr_name, None)
            if exp_attr is not None:
                if not exp_attr.validate_value(attr_value):
                    raise Exception('Unexpected value of attribute "{}" of layer "{}" ({}). Expected {}, got {} ({})'
                        .format(attr_name, self.name, self.class_name, exp_attr.value_type, type(attr_value), attr_value))
            else:
                pass # TODO layer contains attribute that is not expected. we can log this for debugging
        
        # If any expected attributes remain, try adding their default values
        for attr_name, attr in all_attributes.items():
            if attr.default is not None:
                self.set_attr(attr_name, attr.default)
            else:
                raise Exception('Attribute "{}" of layer {} ({}) not set and no default value is specified.'.format(attr_name, self.name, self.class_name))

    def get_input_node(self, input_name=None):
        if input_name is not None:
            nodes = [node for node in self.model.graph.values() if input_name in node.outputs]
            if len(nodes) == 0:
                return None
            else:
                return nodes[0]
        else:
            return self.model.graph.get(self.inputs[0])

    def get_input_variable(self, input_name=None):
        if input_name is not None:
            return self.model.get_layer_output_variable(input_name)
        else:
            return self.model.get_layer_output_variable(self.inputs[0])

    def get_output_nodes(self, output_name=None):
        if output_name is None:
            output_name = self.outputs[0]
        return [node for node in self.model.graph.values() if node.inputs[0] == output_name]

    def get_output_variable(self, output_name=None):
        if output_name is not None:
            return self.variables[output_name]
        else:
            return next(iter(self.variables.values()))

    def get_weights(self, var_name=None):
        if var_name:
            return self.weights[var_name]

        return self.weights.values()

    def get_variables(self):
        return self.variables.values()

    def add_output_variable(self, shape, dim_names, out_name=None, var_name='layer{index}_out', type_name='layer{index}_t', precision=None):
        if out_name is None:
            out_name = self.outputs[0]

        if precision is None:
            precision, _ = self.model.config.get_precision(self, var='result')

        out = TensorVariable(shape, dim_names, var_name=var_name, type_name=type_name, precision=precision, index=self.index)

        self.set_attr(out_name, out)

    def add_weights(self, quantizer=None, compression=False):
        data = self.model.get_weights_data(self.name, 'kernel')

        self.add_weights_variable(name='weight', var_name='w{index}', data=data, quantizer=quantizer, compression=compression)

    def add_bias(self, quantizer=None):
        data = self.model.get_weights_data(self.name, 'bias')
        precision = None
        type_name = None
        if data is None:
            data = np.zeros(self.get_output_variable().shape[-1])
            precision = IntegerPrecisionType(width=1, signed=False)
            type_name = 'bias{index}_t'
            quantizer = None # Don't quantize non-existant bias

        self.add_weights_variable(name='bias', var_name='b{index}', type_name=type_name, precision=precision, data=data, quantizer=quantizer)

    def add_weights_variable(self, name, var_name=None, type_name=None, precision=None, data=None, quantizer=None, compression=False):
        if var_name is None:
            var_name = name + '{index}'

        if precision is None:
            precision, _ = self.model.config.get_precision(self, var=name)
        elif type_name is None:
            # If precision is specified but no type name is given, assign a dedicated
            # type name made from variable name and layer index
            type_name = name + '{index}_t'

        if type_name is None:
            _, type_name = self.model.config.get_precision(self, var=name)

        if data is None:
            data = self.model.get_weights_data(self.name, name)
        elif isinstance(data, six.string_types):
            data = self.model.get_weights_data(self.name, data)

        data_unquantized = data
        exponent_type = False
        if quantizer is not None:
            precision = quantizer.hls_type
            type_name = name + '{index}_t'
            data = quantizer(data)
            if isinstance(quantizer.hls_type, ExponentPrecisionType):
                exponent_type = True

        if compression:
            #TODO reuse factor may not be available here
            var = CompressedWeightVariable(var_name, type_name=type_name, precision=precision, quantizer=quantizer, data=data, reuse_factor=self.get_attr('reuse_factor', 1), index=self.index)
        elif exponent_type:
            var = ExponentWeightVariable(var_name, type_name=type_name, precision=precision, quantizer=quantizer, data=data, index=self.index)
        else:
            var = WeightVariable(var_name, type_name=type_name, precision=precision, quantizer=quantizer, data=data, index=self.index)

        var.data_unquantized = data_unquantized

        self.set_attr(name, var)

    def _default_function_params(self):
        params = {}
        params.update(self.attributes)
        params['config'] = 'config{}'.format(self.index)
        params['input_t'] = self.get_input_variable().type.name
        params['output_t'] = self.get_output_variable().type.name
        params['input'] = self.get_input_variable().name
        params['output'] = self.get_output_variable().name

        return params

    def _default_config_params(self):
        params = {}
        params.update(self.attributes)
        params['iotype'] = self.model.config.get_config_value('IOType')
        params['reuse'] = self.get_attr('reuse_factor')

        return params

    def get_layer_precision(self):
        precision = {}
        for data_type in self.types.values():
            precision[data_type.name] = data_type
        return precision

    def get_numbers_cpp(self):
        numbers = ''
        for k, v in self.get_output_variable().get_shape():
            numbers += '#define {} {}\n'.format(k,v)

        return numbers

    def precision_cpp(self):
        return 'typedef {precision} layer{index}_t;'.format(precision=self.get_output_variable().precision, index=self.index)
Beispiel #13
0
    def __init__(self, name):
        super(FPGABackend, self).__init__(name)

        self.writer = get_writer(self.name)

        self.attribute_map = {Layer: [Attribute('reuse_factor', default=1)]}