def build(self, input_shape):
     self.kernel = self.add_weight(name='kernel',
                                   shape=(input_shape[1], input_shape[2],
                                          1),
                                   initializer='ones',
                                   trainable=True)
     Layer.build(self, input_shape)
Exemple #2
0
    def __init__(self, **kwargs):
        self.config = kwargs.pop(str('config'), None)
        self.layer_type = self.class_name
        self.batch_size = self.config.getint('simulation', 'batch_size')
        self.dt = self.config.getfloat('simulation', 'dt')
        self.duration = self.config.getint('simulation', 'duration')
        self.tau_refrac = self.config.getfloat('cell', 'tau_refrac')
        self._v_thresh = self.config.getfloat('cell', 'v_thresh')
        self.v_thresh = None
        self.time = None
        self.mem = self.spiketrain = self.impulse = None
        self.refrac_until = None
        self.last_spiketimes = None

        allowed_kwargs = {'input_shape',
                          'batch_input_shape',
                          'batch_size',
                          'dtype',
                          'name',
                          'trainable',
                          'weights',
                          'input_dtype',  # legacy
                          }
        for kwarg in kwargs.copy():
            if kwarg not in allowed_kwargs:
                kwargs.pop(kwarg)
        Layer.__init__(self, **kwargs)
        self.stateful = True
    def __init__(self,
                 size,
                 initializer='glorot_uniform',
                 regularizer=None,
                 name=None,
                 **kwargs):
        self.size = tuple(size)
        self.initializer = initializers.get(initializer)
        self.regularizer = regularizers.get(regularizer)

        if not name:
            prefix = 'shared_weight'
            name = prefix + '_' + str(K.get_uid(prefix))

        Layer.__init__(self, name=name, **kwargs)

        with K.name_scope(self.name):
            self.kernel = self.add_weight(shape=self.size,
                                          initializer=self.initializer,
                                          name='kernel',
                                          regularizer=self.regularizer)

        self.trainable = True
        self.built = True
        # self.sparse = sparse

        # input_tensor = self.kernel * 1.0

        self.is_placeholder = False
Exemple #4
0
 def __init__(self,
              mask_value=0,
              include_self=True,
              flatten_indices_features=False,
              **kwargs):
     Layer.__init__(self, **kwargs)
     self.mask_value = mask_value
     self.include_self = include_self
     self.flatten_indices_features = flatten_indices_features
    def __init__(self, **kwargs):
        self.config = kwargs.pop(str('config'), None)
        self.layer_type = self.class_name
        self.dt = self.config.getfloat('simulation', 'dt')
        self.duration = self.config.getint('simulation', 'duration')
        self.tau_refrac = self.config.getfloat('cell', 'tau_refrac')
        # if 'v_thresh' in kwargs:
        #     self._v_thresh = kwargs['v_thresh']
        # else:
        #     self._v_thresh = self.config.getfloat('cell', 'v_thresh')
        self._v_thresh = self.config.getfloat('cell', 'v_thresh')
        self.v_thresh = None
        self.time = None
        self.mem = self.spiketrain = self.impulse = self.spikecounts = None
        self.refrac_until = self.max_spikerate = None
        if bias_relaxation:
            self.b0 = None
        if clamp_var:
            self.spikerate = self.var = None

        import os
        from snntoolbox.utils.utils import get_abs_path
        path, filename = \
            get_abs_path(self.config.get('paths', 'filename_clamp_indices'),
                         self.config)
        if filename != '':
            filepath = os.path.join(path, filename)
            assert os.path.isfile(filepath), \
                "File with clamp indices not found at {}.".format(filepath)
            self.filename_clamp_indices = filepath
            self.clamp_idx = None

        self.payloads = self.config.getboolean('cell', 'payloads')
        self.payloads_sum = None
        self.online_normalization = self.config.getboolean(
            'normalization', 'online_normalization')

        allowed_kwargs = {'input_shape',
                          'batch_input_shape',
                          'batch_size',
                          'dtype',
                          'name',
                          'trainable',
                          'weights',
                          'input_dtype',  # legacy
                          }
        for kwarg in kwargs.copy():
            if kwarg not in allowed_kwargs:
                kwargs.pop(kwarg)
        Layer.__init__(self, **kwargs)
        self.stateful = True
def interleave(base_layers: List[Layer], other_layer: Layer) -> List[Layer]:
    new_layers = []
    for layer in base_layers[:-1]:
        new_layers.append(layer)
        new_layers.append(other_layer.copy())
    new_layers.append(base_layers[-1])
    return new_layers
Exemple #7
0
def model_cnn(input_shape=(1800,3,1), output_dim=5, 
              c_size=[32, 16, 8], k_size=[3, 3, 3], h_dim=[256, 32], d_p=[0.25, 0.25]):
    """Define CNN model
    returns model_cnn : CNN Keras model
    """

    m = Sequential()
    m.add(Layer(input_shape=input_shape, name='input'))
    
    for idx_n, n in enumerate(c_size):
        
        m.add(Conv2D(n, (k_size[idx_n], 1), padding='same', activation='relu', name='c_'+str(idx_n+1)))
        m.add(MaxPooling2D((2, 1), padding='same', name='p_'+str(idx_n+1)))
    
    m.add(Flatten(name='flatten'))
              
    for idx_n, n in enumerate(h_dim):
        m.add(Dense(n, activation='relu', name='h_'+str(idx_n+len(c_size)+1)))
        m.add(Dropout(d_p[idx_n], name='d_'+str(idx_n+len(c_size)+1)))
    
    m.add(Dense(output_dim, activation='relu', name='output'))
    sgd_lr, sgd_momentum , sgd_decay = (0.1, 0.8, 0.003)
    sgd = keras.optimizers.SGD(lr=sgd_lr,
                       momentum=sgd_momentum, 
                       decay=sgd_decay,
                       nesterov=False)
    m.compile(loss='binary_crossentropy',
              metrics=['categorical_accuracy'],
              optimizer=sgd)
              
    m.name = 'cnn'
    
    return m
def AutoEncoder(input_dim,encoding_dim,drop,weights=None):
    
    #INPUT_DIM: is input size
    #ENCODING_DIM: is hidden layer size
    #DROP: Droupout for regularization 
    #WEIGHTS: If pre-trained weights available
    
    # We will create a sequential network
    net = Sequential()
    # Our input layer
    net.add(Layer(input_shape=(input_dim, )))
    # Lets add first hidden layer with relu activation
    net.add(Dense(encoding_dim, activation="relu"))
    # Add some dropout for regularization
    net.add(Dropout(drop))               
    # Second hidden layer 
    net.add(Dense(int(encoding_dim / 2), activation="relu"))
    net.add(Dropout(drop))
    # Third hidden layer
    net.add(Dense(int(encoding_dim / 2), activation='relu'))
    # Output layer
    net.add(Dense(input_dim, activation='linear'))
    
    # If trained weights provided load into the network
    if weights is not None:
        net.load_weights(weights)
    
    # Return the network
    return net
Exemple #9
0
 def get_config(self):
     config = {
         'size': self.size,
         'initializer': initializers.serialize(self.initializer),
         'regularizer': regularizers.serialize(self.regularizer)
     }
     base_config = Layer.get_config(self)
     return dict(list(base_config.items()) + list(config.items()))
Exemple #10
0
 def getConfig(self):
     tmpLayerCfg = Layer().get_config()
     tmpLayerCfg['name'] = self.getName()
     return {
         'class_name': 'Layer',
         'name': self.getName(),
         'config': tmpLayerCfg
     }
Exemple #11
0
    def __init__(self,
                 size,
                 initializer='glorot_uniform',
                 regularizer=None,
                 name=None,
                 **kwargs):
        self.size = tuple(size)
        self.initializer = initializers.get(initializer)
        self.regularizer = regularizers.get(regularizer)

        if not name:
            prefix = 'shared_weight'
            name = prefix + '_' + str(K.get_uid(prefix))

        Layer.__init__(self, name=name, **kwargs)

        with K.name_scope(self.name):  #self add weight define
            self.kernel = self.add_weight(shape=self.size,
                                          initializer=self.initializer,
                                          name='kernel',
                                          regularizer=self.regularizer)

        self.trainable = True
        self.built = True
        # self.sparse = sparse

        input_tensor = self.kernel * 1.0

        self.is_placeholder = False
        input_tensor._keras_shape = self.size

        input_tensor._uses_learning_phase = False
        input_tensor._keras_history = (self, 0, 0)

        Node(self,
             inbound_layers=[],
             node_indices=[],
             tensor_indices=[],
             input_tensors=[input_tensor],
             output_tensors=[input_tensor],
             input_masks=[None],
             output_masks=[None],
             input_shapes=[self.size],
             output_shapes=[self.size])
Exemple #12
0
    def __init__(self,
                 shape,
                 my_initializer='RandomNormal',
                 name=None,
                 mult=1.0,
                 **kwargs):
        self.shape = [1, *shape]
        self.my_initializer = my_initializer
        self.mult = mult

        if not name:
            prefix = 'param'
            name = '%s_%d' % (prefix, K.get_uid(prefix))
        Layer.__init__(self, name=name, **kwargs)

        # Create a trainable weight variable for this layer.
        with K.name_scope(self.name):
            self.kernel = self.add_weight(name='kernel',
                                          shape=self.shape,
                                          initializer=self.my_initializer,
                                          trainable=True)

        # prepare output tensor, which is essentially the kernel.
        output_tensor = self.kernel * self.mult
        output_tensor._keras_shape = self.shape
        output_tensor._uses_learning_phase = False
        output_tensor._keras_history = (self, 0, 0)
        output_tensor._batch_input_shape = self.shape

        self.trainable = True
        self.built = True
        self.is_placeholder = False

        # create new node
        Node(self,
             inbound_layers=[],
             node_indices=[],
             tensor_indices=[],
             input_tensors=[],
             output_tensors=[output_tensor],
             input_masks=[],
             output_masks=[None],
             input_shapes=[],
             output_shapes=[self.shape])
Exemple #13
0
    def __init__(self, **kwargs):
        self.config = kwargs.pop(str('config'), None)
        self.layer_type = self.class_name
        self.spikerates = None
        self.num_bits = self.config.getint('conversion', 'num_bits')

        allowed_kwargs = {
            'input_shape',
            'batch_input_shape',
            'batch_size',
            'dtype',
            'name',
            'trainable',
            'weights',
            'input_dtype',  # legacy
        }
        for kwarg in kwargs.copy():
            if kwarg not in allowed_kwargs:
                kwargs.pop(kwarg)
        Layer.__init__(self, **kwargs)
        self.stateful = True
Exemple #14
0
def segnet(shape=224):
    kernel = 3
    filter_size = 64
    pad = 1
    pool_size = 2
    model = Sequential()
    model.add(Layer(input_shape=(shape, shape, 3)))
    # encoder
    model.add(ZeroPadding2D(padding=(pad, pad)))
    model.add(Conv2D(filter_size, (kernel, kernel), padding='valid'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(pool_size, pool_size)))
    model.add(ZeroPadding2D(padding=(pad, pad)))
    model.add(Conv2D(128, (kernel, kernel), padding='valid'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(pool_size, pool_size)))
    model.add(ZeroPadding2D(padding=(pad, pad)))
    model.add(Conv2D(256, (kernel, kernel), padding='valid'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(pool_size, pool_size)))
    model.add(ZeroPadding2D(padding=(pad, pad)))
    model.add(Conv2D(512, (kernel, kernel), padding='valid'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    # decoder
    model.add(ZeroPadding2D(padding=(pad, pad)))
    model.add(Conv2D(512, (kernel, kernel), padding='valid'))
    model.add(BatchNormalization())
    model.add(UpSampling2D(size=(pool_size, pool_size)))
    model.add(ZeroPadding2D(padding=(pad, pad)))
    model.add(Conv2D(256, (kernel, kernel), padding='valid'))
    model.add(BatchNormalization())
    model.add(UpSampling2D(size=(pool_size, pool_size)))
    model.add(ZeroPadding2D(padding=(pad, pad)))
    model.add(Conv2D(128, (kernel, kernel), padding='valid'))
    model.add(BatchNormalization())
    model.add(UpSampling2D(size=(pool_size, pool_size)))
    model.add(ZeroPadding2D(padding=(pad, pad)))
    model.add(Conv2D(filter_size, (kernel, kernel), padding='valid'))
    model.add(BatchNormalization())
    model.add(Conv2D(
        2,
        (1, 1),
        padding='valid',
    ))
    model.outputHeight = model.output_shape[-2]
    model.outputWidth = model.output_shape[-3]
    model.add(Activation('softmax'))
    return model
Exemple #15
0
def build_CNN_model(seq_len=31, num_of_classes=13, embed_dim=128):
    model = Sequential()
    model.add(Layer(input_shape=(
        seq_len,
        embed_dim,
    )))
    model.add(Conv1D(64, kernel_size=3))
    model.add(Dropout(0.2))
    model.add(GlobalAveragePooling1D())
    model.add(Dense(num_of_classes, activation="softmax"))

    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    return model
Exemple #16
0
    def __init__(self, layer: Layer, state, copy_weights=False) -> None:
        # W-square rule works with squared weights and no biases.
        if copy_weights:
            weights = layer.get_weights()
        else:
            weights = layer.weights
        if layer.use_bias:
            weights = weights[:-1]
        weights = [x**2 for x in weights]

        self._layer_wo_act_b = kgraph.copy_layer_wo_activation(
            layer,
            keep_bias=False,
            weights=weights,
            name_template="reversed_kernel_%s")
Exemple #17
0
 def __init__(self, embed_dim, from_inputs_features=None, pos_divisor=10000, keep_ndim=True, fix_range=None,
              embeddings=['sin', 'cos'], **kwargs):
     """
     embed_dim: Te output embedding will have embed_dim floats for sin and cos (separately).
     from_inputs_features: If not specified, will use range(of the length of the input sequence) to generate
         (integer) positions that will be embedded by sins and coss.
         If specified, it needs to be a list of coordinates to the last dimension of the input vector,
          which will be taken as inputs into the positional ebedding.
          Then the output size will be len(from_inputs_features)*embed_dim*len(embeddings)
          Has no effect when fix<-range is set.
     pos_divisor: the division constant in the calculation.
     keep_ndim: if True, the output will have all embedded features concatenated/flattened into one dimension and so
         the input dimensions number is preserved.
     fix_range: if set, will produce a sequence of a fixed range (does not read from sequence length)
         and also disables from_inputs_features.
     embeddings: a list of 'sin', 'cos', 'lin' functions to be applied
     """
     Layer.__init__(self, **kwargs)
     self.pos_divisor = pos_divisor
     self.embed_dim = embed_dim
     self.keep_ndim = keep_ndim
     self.from_inputs_features = from_inputs_features
     self.fix_range = fix_range
     self.embeddings = embeddings
Exemple #18
0
def model_softmax(input_dim=1800, output_dim=5, optimizer='adadelta'):
    """Define softmax network
    returns m: Keras model with softmax output
    """
    
    m = Sequential()
    m.add(Layer(input_shape=(input_dim,), name='input'))
    m.add(Dense(output_dim, activation='softmax', name='output'))
    
    m.compile(loss='binary_crossentropy',
              metrics=['categorical_accuracy'],
              optimizer=optimizer)
    
    m.name = 'softmax'
    
    return m
Exemple #19
0
def model_ann(input_dim=1800, output_dim=5, h_dim=256, optimizer='adadelta'):
    """Define shallow ANN model
    returns m: shallow ANN Keras model
    """

    m = Sequential()
    m.add(Layer(input_shape=(input_dim,), name='input'))
    m.add(Dense(h_dim, activation='relu', name='h_1'))
    m.add(Dense(output_dim, activation='softmax', name='output'))
    
    m.compile(loss='binary_crossentropy',
              metrics=['categorical_accuracy'],
              optimizer=optimizer)
    
    m.name = 'ann'
        
    return m
Exemple #20
0
def build_RNN_model(num_of_classes=13, embed_dim=128):
    model = Sequential()
    model.add(Layer(input_shape=(
        None,
        embed_dim,
    )))
    model.add(
        Bidirectional(GRU(64, recurrent_dropout=0.2, return_sequences=True)))
    # model.add(GaussianDropout(0.05))
    model.add(Dropout(0.2))
    model.add(GlobalAveragePooling1D())
    model.add(Dense(num_of_classes, activation="softmax"))

    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    return model
Exemple #21
0
 def get_config(self):
     """Returns the config of the layer.
     A layer config is a Python dictionary (serializable)
     containing the configuration of a layer.
     The same layer can be reinstantiated later
     (without its trained weights) from this configuration.
     The config of a layer does not include connectivity
     information, nor the layer class name. These are handled
     by `Network` (one layer of abstraction above).
     # Returns
         Python dictionary.
     """
     config = {
         'size': self.size,
         'initializer': initializers.serialize(self.initializer),
         'regularizer': regularizers.serialize(self.regularizer)
     }
     base_config = Layer.get_config(self)
     return dict(list(base_config.items()) + list(config.items()))
Exemple #22
0
def create_whole_model(save_model=False):
    save_model=save_model
    net=models.Sequential()
    net.add(Layer(input_shape=(3,image_height, image_width)))
    print_last_layer_info(net)

    for layer in encoding_layers():
        net.add(layer)
        print_last_layer_info(net)
    for layer in decoding_layers():
        net.add(layer)
        print_last_layer_info(net)
    for layer in create_classification_layer():
        net.add(layer)
        print_last_layer_info(net)
    if save_model:
        with open('segmentation_model.json', 'w') as outfile:
            outfile.write(json.dumps(json.loads(net.to_json()), indent=2))
    net.summary()
    plot_model(net, show_shapes=True, show_layer_names=True, rankdir='TB', to_file='net.png')
Exemple #23
0
def get_segnet_full():
    segnet_basic = Sequential()
    segnet_basic.add(Layer(input_shape=(height // 2, width // 2, 3)))

    segnet_basic.encoding_layers = get_full_encoding_layers()
    for l in segnet_basic.encoding_layers:
        segnet_basic.add(l)

    segnet_basic.decoding_layers = get_full_decoding_layers()
    for l in segnet_basic.decoding_layers:
        segnet_basic.add(l)

    segnet_basic.add(Conv2D(classes, (1, 1)))

    segnet_basic.add(
        Reshape((height * width, classes),
                input_shape=(height, width, classes)))
    segnet_basic.add(Activation('softmax'))

    return segnet_basic
Exemple #24
0
    def __init__(
        self,
        layer: Layer,
        _state,
        alpha=None,
        beta=None,
        bias: bool = True,
        copy_weights=False,
    ) -> None:
        alpha, beta = rutils.assert_infer_lrp_alpha_beta_param(
            alpha, beta, self)
        self._alpha = alpha
        self._beta = beta

        # prepare positive and negative weights for computing positive
        # and negative preactivations z in apply_accordingly.
        if copy_weights:
            weights = layer.get_weights()
            if not bias and layer.use_bias:
                weights = weights[:-1]
            positive_weights = [x * (x > 0) for x in weights]
            negative_weights = [x * (x < 0) for x in weights]
        else:
            weights = layer.weights
            if not bias and layer.use_bias:
                weights = weights[:-1]
            positive_weights = [x * iK.to_floatx(x > 0) for x in weights]
            negative_weights = [x * iK.to_floatx(x < 0) for x in weights]

        self._layer_wo_act_positive = kgraph.copy_layer_wo_activation(
            layer,
            keep_bias=bias,
            weights=positive_weights,
            name_template="reversed_kernel_positive_%s",
        )
        self._layer_wo_act_negative = kgraph.copy_layer_wo_activation(
            layer,
            keep_bias=bias,
            weights=negative_weights,
            name_template="reversed_kernel_negative_%s",
        )
Exemple #25
0
    def __init__(self, layer: Layer, state, copy_weights=False):
        # The z-plus rule only works with positive weights and
        # no biases.
        # TODO: assert that layer inputs are always >= 0
        if copy_weights:
            weights = layer.get_weights()
            if layer.use_bias:
                weights = weights[:-1]
            weights = [x * (x > 0) for x in weights]
        else:
            weights = layer.weights
            if layer.use_bias:
                weights = weights[:-1]
            weights = [x * iK.to_floatx(x > 0) for x in weights]

        self._layer_wo_act_b_positive = kgraph.copy_layer_wo_activation(
            layer,
            keep_bias=False,
            weights=weights,
            name_template="reversed_kernel_positive_%s",
        )
Exemple #26
0
    def __init__(self,
                 layer: Layer,
                 state,
                 copy_weights: bool = False) -> None:
        # The flat rule works with weights equal to one and
        # no biases.
        if copy_weights:
            weights = layer.get_weights()
            if layer.use_bias:
                weights = weights[:-1]
            weights = [np.ones_like(x) for x in weights]
        else:
            weights = layer.weights
            if layer.use_bias:
                weights = weights[:-1]
            weights = [K.ones_like(x) for x in weights]

        self._layer_wo_act_b = kgraph.copy_layer_wo_activation(
            layer,
            keep_bias=False,
            weights=weights,
            name_template="reversed_kernel_%s")
Exemple #27
0
    def __init__(
        self,
        layer: Layer,
        _state,
        alpha: Tuple[float, float] = (0.5, 0.5),
        beta: Tuple[float, float] = (0.5, 0.5),
        bias: bool = True,
        copy_weights: bool = False,
    ) -> None:
        self._alpha = alpha
        self._beta = beta

        # prepare positive and negative weights for computing positive
        # and negative preactivations z in apply_accordingly.
        if copy_weights:
            weights = layer.get_weights()
            if not bias and getattr(layer, "use_bias", False):
                weights = weights[:-1]
            positive_weights = [x * (x > 0) for x in weights]
            negative_weights = [x * (x < 0) for x in weights]
        else:
            weights = layer.weights
            if not bias and getattr(layer, "use_bias", False):
                weights = weights[:-1]
            positive_weights = [x * iK.to_floatx(x > 0) for x in weights]
            negative_weights = [x * iK.to_floatx(x < 0) for x in weights]

        self._layer_wo_act_positive = kgraph.copy_layer_wo_activation(
            layer,
            keep_bias=bias,
            weights=positive_weights,
            name_template="reversed_kernel_positive_%s",
        )
        self._layer_wo_act_negative = kgraph.copy_layer_wo_activation(
            layer,
            keep_bias=bias,
            weights=negative_weights,
            name_template="reversed_kernel_negative_%s",
        )
Exemple #28
0
    def __init__(self,
                 layer: Layer,
                 _state,
                 low=-1,
                 high=1,
                 copy_weights: bool = False) -> None:
        self._low = low
        self._high = high

        # This rule works with three variants of the layer, all without biases.
        # One is the original form and two with only the positive or
        # negative weights.
        if copy_weights:
            weights = layer.get_weights()
            if layer.use_bias:
                weights = weights[:-1]
            positive_weights = [x * (x > 0) for x in weights]
            negative_weights = [x * (x < 0) for x in weights]
        else:
            weights = layer.weights
            if layer.use_bias:
                weights = weights[:-1]
            positive_weights = [x * iK.to_floatx(x > 0) for x in weights]
            negative_weights = [x * iK.to_floatx(x < 0) for x in weights]

        self._layer_wo_act = kgraph.copy_layer_wo_activation(
            layer, keep_bias=False, name_template="reversed_kernel_%s")
        self._layer_wo_act_positive = kgraph.copy_layer_wo_activation(
            layer,
            keep_bias=False,
            weights=positive_weights,
            name_template="reversed_kernel_positive_%s",
        )
        self._layer_wo_act_negative = kgraph.copy_layer_wo_activation(
            layer,
            keep_bias=False,
            weights=negative_weights,
            name_template="reversed_kernel_negative_%s",
        )
 def __init__(self, **kwargs):
     Layer.__init__(self, **kwargs)
     self.kernel = None
Exemple #30
0
 def __init__(self, **kwargs):
     Layer.__init__(self, **kwargs)