def build(self, input_shape):
        def add_layer(layer):
            [
                self._trainable_weights.append(x)
                for x in layer.trainable_weights
            ]

        if not len(input_shape) == 3:
            raise ValueError("expected ndim=3")
        self.INPUT_LENGTH = input_shape[1]
        self.INPUT_WIDTH = input_shape[2]
        with tf.variable_scope(self.name):
            # PSC integration as Conv1D, has only 1 channel (=>PSC is uniform), shared among all synapses
            self.psc = Conv1D(1,
                              self.psc_length,
                              name='psc',
                              data_format='channels_last',
                              trainable=True)
            self.psc.build((None, input_shape[1], 1))
            # add psc weights to self
            add_layer(self.psc)
            self.psc_weights = self.psc.trainable_weights
        # RNN unit, has only 1 unit (one neuron)
        self.rnn = SimpleRNNCell(self.units, activation=None)
        self.rnn.build((None, 1, self.INPUT_WIDTH))
        add_layer(self.rnn)
class SNNLayer(Layer):
    @staticmethod
    def InvokeRNN(cell, inputs):
        state = [cell.get_initial_state(inputs, None, None)]
        for i in range(inputs.shape[1]):
            output, state = cell(inputs[:, i, :], state)
            n_output = tf.sigmoid(3 * output)
            yield (output, n_output)
            #refraction period
            state[0] = state[0] * tf.sigmoid(2 * (0.5 - n_output))

    def __init__(self, units, psc_length, **kwargs):
        super(SNNLayer, self).__init__(self, **kwargs)
        self.units = units
        self.psc_length = psc_length

    def build(self, input_shape):
        def add_layer(layer):
            [
                self._trainable_weights.append(x)
                for x in layer.trainable_weights
            ]

        if not len(input_shape) == 3:
            raise ValueError("expected ndim=3")
        self.INPUT_LENGTH = input_shape[1]
        self.INPUT_WIDTH = input_shape[2]
        with tf.variable_scope(self.name):
            # PSC integration as Conv1D, has only 1 channel (=>PSC is uniform), shared among all synapses
            self.psc = Conv1D(1,
                              self.psc_length,
                              name='psc',
                              data_format='channels_last',
                              trainable=True)
            self.psc.build((None, input_shape[1], 1))
            # add psc weights to self
            add_layer(self.psc)
            self.psc_weights = self.psc.trainable_weights
        # RNN unit, has only 1 unit (one neuron)
        self.rnn = SimpleRNNCell(self.units, activation=None)
        self.rnn.build((None, 1, self.INPUT_WIDTH))
        add_layer(self.rnn)

    def call(self, inputs, **kwargs):
        # The same PSC is applied to all inputs channels
        syn_inputs = tf.concat(
            [self.psc(inputs[:, :, i:i + 1]) for i in range(self.INPUT_WIDTH)],
            axis=-1)
        # then the RNN units are called
        o = tf.stack([o for _, o in SNNLayer.InvokeRNN(self.rnn, syn_inputs)],
                     axis=1)
        return o
Beispiel #3
0
    def __init__(self, encoder=None, vocab_size=1, embedding_size=32, hidden_size=[64, 64]):
        """Initialization method.

        Args:
            encoder (IntegerEncoder): An index to vocabulary encoder.
            vocab_size (int): The size of the vocabulary.
            embedding_size (int): The size of the embedding layer.
            hidden_size (list): Amount of hidden neurons per cell.

        """

        logger.info('Overriding class: Generator -> StackedRNNGenerator.')

        # Overrides its parent class with any custom arguments if needed
        super(StackedRNNGenerator, self).__init__(name='G_stacked_rnn')

        # Creates a property for holding the used encoder
        self.encoder = encoder

        # Creates an embedding layer
        self.embedding = Embedding(vocab_size, embedding_size, name='embedding')

        # Creating a stack of RNN cells
        self.cells = [SimpleRNNCell(size, name=f'rnn_cell{i}') for (
            i, size) in enumerate(hidden_size)]

        # Creates the RNN loop itself
        self.rnn = RNN(self.cells, name='rnn_layer',
                              return_sequences=True,
                              stateful=True)

        # Creates the linear (Dense) layer
        self.linear = Dense(vocab_size, name='out')

        logger.debug(f'Number of cells: {len(hidden_size)}')
Beispiel #4
0
    def __init__(self, encoder=None, vocab_size=1, embedding_size=32, hidden_size=64):
        """Initialization method.

        Args:
            encoder (IntegerEncoder): An index to vocabulary encoder.
            vocab_size (int): The size of the vocabulary.
            embedding_size (int): The size of the embedding layer.
            hidden_size (int): The amount of hidden neurons.

        """

        logger.info('Overriding class: Generator -> RNNGenerator.')

        # Overrides its parent class with any custom arguments if needed
        super(RNNGenerator, self).__init__(name='G_rnn')

        # Creates a property for holding the used encoder
        self.encoder = encoder

        # Creates an embedding layer
        self.embedding = Embedding(vocab_size, embedding_size, name='embedding')

        # Creates a simple RNN cell
        self.cell = SimpleRNNCell(hidden_size, name='rnn_cell')

        # Creates the RNN loop itself
        self.rnn = RNN(self.cell, name='rnn_layer',
                              return_sequences=True,
                              stateful=True)

        # Creates the linear (Dense) layer
        self.linear = Dense(vocab_size, name='out')
def test_rnn_cell():
    n_inputs = 3
    n_units = 4
    batch_size = 2
    inputs = tx.Input(n_units=n_inputs)

    rnn0 = tx.RNNCell(inputs, n_units)

    # Keras RNN cell
    rnn1 = SimpleRNNCell(n_units)
    state = rnn1.get_initial_state(inputs, batch_size=1)
    assert tx.tensor_equal(state, rnn0.previous_state[0]())

    inputs.value = tf.ones([batch_size, n_inputs])
    res1 = rnn1(inputs, (state, ))

    rnn1.kernel = rnn0.layer_state.w.weights
    rnn1.bias = rnn0.layer_state.w.bias
    rnn1.recurrent_kernel = rnn0.layer_state.u.weights

    res2 = rnn1(inputs, (state, ))
    assert not tx.tensor_equal(res1[0], res2[0])
    assert not tx.tensor_equal(res1[1], res2[1])

    res0 = rnn0()
    assert tx.tensor_equal(res2[0], res0)
Beispiel #6
0
    def __init__(
        self,
        vocab_size: Optional[int] = 1,
        embedding_size: Optional[int] = 32,
        hidden_size: Optional[int] = 64,
    ):
        """Initialization method.

        Args:
            vocab_size: Vocabulary size.
            embedding_size: Embedding layer units.
            hidden_size: Hidden layer units.

        """

        logger.info("Overriding class: Base -> RNN.")

        super(RNN, self).__init__(name="rnn")

        # Embedding layer
        self.embedding = Embedding(vocab_size,
                                   embedding_size,
                                   name="embedding")

        # RNN cell
        self.cell = SimpleRNNCell(hidden_size, name="rnn_cell")

        # RNN layer
        self.rnn = RNNLayer(self.cell, name="rnn_layer", return_sequences=True)

        # Linear (dense) layer
        self.fc = Dense(vocab_size, name="out")

        logger.info("Class overrided.")
        logger.debug(
            "Embedding: %d | Hidden: %d | Output: %d.",
            embedding_size,
            hidden_size,
            vocab_size,
        )